repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/conf.rs | lib/src/comms/conf.rs | use std::net::IpAddr;
use std::net::SocketAddr;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use ate_comms::StreamTx;
use crate::comms::NodeId;
use crate::conf::ConfMesh;
use crate::conf::MeshAddress;
use crate::crypto::EncryptKey;
use crate::crypto::KeySize;
use crate::crypto::PrivateEncryptKey;
use crate::spec::*;
#[derive(Debug)]
pub struct Upstream {
#[allow(dead_code)]
pub id: NodeId,
pub outbox: StreamTx,
#[allow(dead_code)]
pub wire_format: SerializationFormat,
}
#[derive(Debug, Clone)]
pub(crate) struct NodeTarget {
pub ip: IpAddr,
pub port: u16,
}
impl From<NodeTarget> for SocketAddr {
fn from(target: NodeTarget) -> SocketAddr {
SocketAddr::new(target.ip, target.port)
}
}
#[derive(Debug, Clone)]
pub(crate) struct MeshConfig {
#[cfg(feature = "enable_server")]
pub listen_on: Vec<SocketAddr>,
#[cfg(feature = "enable_server")]
pub listen_min_encryption: Option<KeySize>,
#[cfg(feature = "enable_server")]
pub listen_cert: Option<PrivateEncryptKey>,
#[allow(dead_code)]
#[cfg(feature = "enable_dns")]
pub connect_to: Option<SocketAddr>,
#[allow(dead_code)]
#[cfg(not(feature = "enable_dns"))]
pub connect_to: Option<MeshAddress>,
#[allow(dead_code)]
pub cfg_mesh: ConfMesh,
}
impl MeshConfig {
#[allow(dead_code)]
pub(crate) fn new(cfg_mesh: ConfMesh) -> MeshConfig {
MeshConfig {
#[cfg(feature = "enable_server")]
listen_on: Vec::new(),
#[cfg(feature = "enable_server")]
listen_min_encryption: cfg_mesh.listen_min_encryption.clone(),
#[cfg(feature = "enable_server")]
listen_cert: cfg_mesh.listen_certificate.clone(),
#[cfg(feature = "enable_dns")]
connect_to: None,
#[cfg(not(feature = "enable_dns"))]
connect_to: None,
cfg_mesh: cfg_mesh,
}
}
#[cfg(feature = "enable_server")]
pub(crate) fn listen_on(mut self, ip: IpAddr, port: u16) -> Self {
self.listen_on
.push(SocketAddr::from(NodeTarget { ip, port }));
self
}
#[allow(dead_code)]
#[cfg(feature = "enable_server")]
pub(crate) fn listen_cert(mut self, certificate: PrivateEncryptKey) -> Self {
self.cfg_mesh.listen_certificate = Some(certificate.clone());
self.listen_cert = Some(certificate);
self
}
#[cfg(feature = "enable_client")]
pub(crate) fn connect_to(mut self, addr: MeshAddress) -> Self {
#[cfg(feature = "enable_dns")]
{
self.connect_to = Some(SocketAddr::from(NodeTarget {
ip: addr.host,
port: addr.port,
}));
}
#[cfg(not(feature = "enable_dns"))]
{
self.connect_to.replace(addr);
}
self
}
}
impl Upstream {
#[allow(dead_code)]
pub fn wire_encryption(&self) -> Option<EncryptKey> {
self.outbox.wire_encryption()
}
pub async fn close(&mut self) -> Result<(), tokio::io::Error> {
self.outbox.close().await
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/listener.rs | lib/src/comms/listener.rs | #![allow(unused_imports)]
use crate::crypto::KeySize;
use crate::error::*;
use crate::spec::*;
use async_trait::async_trait;
use error_chain::bail;
use serde::{de::DeserializeOwned, Serialize};
use std::marker::PhantomData;
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use std::sync::Weak;
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::sync::Mutex;
use tokio::time::Duration;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::{Instrument, WithSubscriber};
use std::convert::Infallible;
#[cfg(feature = "enable_full")]
use tokio_tungstenite::tungstenite::{handshake, Error};
#[cfg(feature = "enable_full")]
use tokio_tungstenite::WebSocketStream;
use super::conf::*;
use super::helper::InboxProcessor;
use super::helper::*;
use super::key_exchange;
use super::rx_tx::*;
use super::stream::*;
use super::router::*;
use super::PacketWithContext;
use super::StreamProtocol;
use super::StreamRouter;
use super::hello::HelloMetadata;
use crate::comms::NodeId;
use crate::crypto::PrivateEncryptKey;
use crate::crypto::EncryptKey;
use crate::engine::TaskEngine;
#[derive(Debug)]
struct ListenerNode {
#[allow(dead_code)]
path: String,
}
pub(crate) struct Listener<M, C>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
C: Send + Sync,
{
server_id: NodeId,
wire_format: SerializationFormat,
min_encryption: Option<KeySize>,
server_cert: Option<PrivateEncryptKey>,
timeout: Duration,
handler: Arc<dyn ServerProcessor<M, C>>,
routes: fxhash::FxHashMap<String, ListenerNode>,
exit: broadcast::Sender<()>,
}
#[async_trait]
pub(crate) trait ServerProcessor<M, C>
where
Self: Send + Sync,
M: Send + Sync + Serialize + DeserializeOwned + Clone,
C: Send + Sync,
{
async fn process<'a, 'b>(
&'a self,
pck: PacketWithContext<M, C>,
tx: &'b mut Tx,
) -> Result<(), CommsError>;
async fn shutdown(&self, addr: SocketAddr);
}
pub(crate) struct ServerProcessorFascade<M, C>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone + Default,
C: Send + Sync + 'static,
{
tx: Tx,
handler: Arc<dyn ServerProcessor<M, C>>,
}
#[async_trait]
impl<M, C> InboxProcessor<M, C> for ServerProcessorFascade<M, C>
where
Self: Send + Sync,
M: Send + Sync + Serialize + DeserializeOwned + Clone + Default,
C: Send + Sync,
{
async fn process(&mut self, pck: PacketWithContext<M, C>) -> Result<(), CommsError> {
self.handler.process(pck, &mut self.tx).await
}
async fn shutdown(&mut self, addr: SocketAddr) {
self.handler.shutdown(addr).await
}
}
impl<M, C> Listener<M, C>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone + Default + 'static,
C: Send + Sync + Default + 'static,
{
pub(crate) async fn new(
conf: &MeshConfig,
server_id: NodeId,
inbox: Arc<dyn ServerProcessor<M, C>>,
exit: broadcast::Sender<()>,
) -> Result<Arc<StdMutex<Listener<M, C>>>, CommsError> {
// Create the node state and initialize it
let listener = {
Arc::new(StdMutex::new(Listener {
server_id: server_id.clone(),
wire_format: conf.cfg_mesh.wire_format,
min_encryption: conf.listen_min_encryption.clone(),
server_cert: conf.listen_cert.clone(),
timeout: conf.cfg_mesh.accept_timeout,
handler: Arc::clone(&inbox),
routes: fxhash::FxHashMap::default(),
exit: exit.clone(),
}))
};
// If wire encryption is required then make sure a certificate of sufficient size was supplied
if let Some(size) = &conf.cfg_mesh.wire_encryption {
match conf.listen_cert.as_ref() {
None => {
bail!(CommsErrorKind::MissingCertificate);
}
Some(a) if a.size() < *size => {
bail!(CommsErrorKind::CertificateTooWeak(size.clone(), a.size()));
}
_ => {}
}
}
// Create all the listeners
for target in conf.listen_on.iter() {
Listener::listen_on(
target.clone(),
server_id.clone(),
Arc::downgrade(&listener),
conf.cfg_mesh.wire_protocol,
exit.clone(),
)
.await;
}
Ok(listener)
}
pub(crate) fn add_route(&mut self, path: &str) -> Result<(), CommsError> {
// Add the node to the lookup
self.routes.insert(
path.to_string(),
ListenerNode {
path: path.to_string(),
},
);
// Return the node transmit and receive handlers
Ok(())
}
async fn listen_on(
addr: SocketAddr,
server_id: NodeId,
listener: Weak<StdMutex<Listener<M, C>>>,
wire_protocol: StreamProtocol,
exit: broadcast::Sender<()>,
) {
let tcp_listener = TcpListener::bind(addr.clone()).await.expect(&format!(
"Failed to bind listener to address ({})",
addr.clone()
));
info!("listening on: {} with proto {}", addr, wire_protocol);
let mut exp_backoff = Duration::from_millis(100);
TaskEngine::spawn(async move {
loop {
let result = tcp_listener.accept().await;
let (stream, sock_addr) = match result {
Ok(a) => a,
Err(err) => {
error!("tcp-listener - {}", err.to_string());
crate::engine::sleep(exp_backoff).await;
exp_backoff *= 2;
if exp_backoff > Duration::from_secs(10) {
exp_backoff = Duration::from_secs(10);
}
continue;
}
};
exp_backoff = Duration::from_millis(100);
let listener = match Weak::upgrade(&listener) {
Some(a) => a,
None => {
error!("connection attempt on a terminated listener (out-of-scope)");
break;
}
};
setup_tcp_stream(&stream).unwrap();
// Use the listener parameters to create a stream router with a
// default route to the listener
let (
wire_format,
min_encryption,
server_cert,
timeout,
) = {
let listener = listener.lock().unwrap();
(
listener.wire_format.clone(),
listener.min_encryption.clone(),
listener.server_cert.clone(),
listener.timeout.clone(),
)
};
let mut router = StreamRouter::new(
wire_format,
wire_protocol,
min_encryption,
server_cert,
server_id,
timeout.clone()
);
let adapter = Arc::new(ListenerAdapter {
listener,
exit: exit.clone(),
});
router.set_default_route(adapter);
// Upgrade and split the stream
let (rx, tx) = match wire_protocol
.upgrade_server_and_split(stream, timeout)
.await {
Ok(a) => a,
Err(err) => {
warn!("connection-failed(accept): {}", err.to_string());
continue;
}
};
match router.accept_socket(rx, tx, sock_addr, None, None)
.instrument(tracing::info_span!(
"server-accept",
id = server_id.to_short_string().as_str()
))
.await
{
Ok(a) => a,
Err(CommsError(CommsErrorKind::IO(err), _))
if err.kind() == std::io::ErrorKind::UnexpectedEof
|| err.kind() == std::io::ErrorKind::ConnectionReset
|| err.kind() == std::io::ErrorKind::ConnectionAborted
|| err.kind() == std::io::ErrorKind::BrokenPipe
|| err
.to_string()
.to_lowercase()
.contains("connection reset without closing handshake") =>
{
debug!("{:?}(accept)", err.kind())
}
Err(err) => {
warn!("connection-failed(accept): {}", err.to_string());
continue;
}
}
}
});
}
pub(crate) async fn accept_stream(
listener: Arc<StdMutex<Listener<M, C>>>,
rx: StreamRx,
rx_proto: StreamProtocol,
tx: Upstream,
hello: HelloMetadata,
wire_encryption: Option<EncryptKey>,
sock_addr: SocketAddr,
exit: broadcast::Receiver<()>,
) -> Result<(), CommsError> {
debug!("accept-from: {}", sock_addr.to_string());
// Grab all the data we need
let (
server_id,
wire_format,
handler
) = {
let listener = listener.lock().unwrap();
(
listener.server_id.clone(),
listener.wire_format.clone(),
listener.handler.clone(),
)
};
let node_id = hello.client_id;
let context = Arc::new(C::default());
// Create an upstream from the tx
let tx = Arc::new(Mutex::new(tx));
// Create the metrics and throttles
let metrics = Arc::new(StdMutex::new(super::metrics::Metrics::default()));
let throttle = Arc::new(StdMutex::new(super::throttle::Throttle::default()));
// Now lets build a Tx object that is not connected to any of transmit pipes for now
// (later we will add other ones to create a broadcast group)
let mut group = TxGroup::default();
group.all.insert(node_id, Arc::downgrade(&tx));
let tx = Tx {
hello_path: hello.path.clone(),
wire_format,
direction: TxDirection::Downcast(TxGroupSpecific {
me_id: node_id,
me_tx: Arc::clone(&tx),
group: Arc::new(Mutex::new(group)),
}),
relay: None,
metrics: Arc::clone(&metrics),
throttle: Arc::clone(&throttle),
exit_dependencies: Vec::new(),
};
// The fascade makes the transmit object available
// for the server processor
let tx = ServerProcessorFascade { tx, handler };
let tx = Box::new(tx);
// Launch the inbox background thread
let worker_context = Arc::clone(&context);
TaskEngine::spawn(async move {
let result = process_inbox(
rx,
rx_proto,
tx,
metrics,
throttle,
server_id,
node_id,
sock_addr,
worker_context,
wire_format,
wire_encryption,
exit,
)
.await;
let span = span!(
Level::DEBUG,
"server",
addr = sock_addr.to_string().as_str()
);
let _span = span.enter();
match result {
Ok(_) => {}
Err(CommsError(CommsErrorKind::IO(err), _))
if err.kind() == std::io::ErrorKind::UnexpectedEof
|| err.kind() == std::io::ErrorKind::ConnectionReset
|| err.kind() == std::io::ErrorKind::ConnectionAborted
|| err.kind() == std::io::ErrorKind::BrokenPipe
|| err
.to_string()
.to_lowercase()
.contains("connection reset without closing handshake") =>
{
debug!("{:?}(inbox)", err.kind())
}
Err(CommsError(CommsErrorKind::IO(err), _)) => warn!(
"connection-failed (inbox): due to an IO error(kind={:?}) - {}",
err.kind(),
err
),
Err(err) => warn!("connection-failed (inbox): {}", err),
};
debug!("disconnected");
});
// Happy days
Ok(())
}
}
impl<M, C> Drop for Listener<M, C>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
C: Send + Sync,
{
fn drop(&mut self) {
let _ = self.exit.send(());
}
}
struct ListenerAdapter<M, C>
where M: Send + Sync + Serialize + DeserializeOwned + Clone + Default,
C: Send + Sync,
{
listener: Arc<StdMutex<Listener<M, C>>>,
exit: broadcast::Sender<()>,
}
#[async_trait]
impl<M, C> StreamRoute
for ListenerAdapter<M, C>
where M: Send + Sync + Serialize + DeserializeOwned + Clone + Default + 'static,
C: Send + Sync + Default + 'static,
{
async fn accepted_web_socket(
&self,
rx: StreamRx,
rx_proto: StreamProtocol,
tx: Upstream,
hello: HelloMetadata,
sock_addr: SocketAddr,
wire_encryption: Option<EncryptKey>,
) -> Result<(), CommsError>
{
Listener::accept_stream(
self.listener.clone(),
rx,
rx_proto,
tx,
hello,
wire_encryption,
sock_addr,
self.exit.subscribe(),
).await
}
} | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/rx_tx.rs | lib/src/comms/rx_tx.rs | use fxhash::FxHashMap;
use rand::seq::SliceRandom;
use serde::{de::DeserializeOwned, Serialize};
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use std::sync::Weak;
use tokio::sync::broadcast;
use tokio::sync::Mutex;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::crypto::EncryptKey;
use crate::error::*;
use crate::prelude::SerializationFormat;
use super::conf::Upstream;
use super::Metrics;
use super::NodeId;
use super::Packet;
use super::PacketData;
use super::PacketWithContext;
use super::Throttle;
#[derive(Debug)]
pub(crate) enum TxDirection {
#[cfg(feature = "enable_server")]
Downcast(TxGroupSpecific),
#[allow(dead_code)]
Upcast(Upstream),
#[allow(dead_code)]
Nullcast,
}
#[derive(Debug)]
pub(crate) struct TxRelay {
pub direction: TxDirection,
pub wire_format: SerializationFormat,
}
#[derive(Debug)]
pub(crate) struct Tx {
pub hello_path: String,
pub(crate) direction: TxDirection,
pub wire_format: SerializationFormat,
pub(crate) relay: Option<TxRelay>,
pub metrics: Arc<StdMutex<Metrics>>,
pub throttle: Arc<StdMutex<Throttle>>,
pub(crate) exit_dependencies: Vec<broadcast::Sender<()>>,
}
impl Tx {
#[allow(dead_code)]
pub async fn send_relay<M, C>(&mut self, pck: PacketWithContext<M, C>) -> Result<(), CommsError>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
C: Send + Sync,
{
trace!("send relay (type={})", std::any::type_name::<M>());
let mut total_sent = 0u64;
if let Some(relay) = self.relay.as_mut() {
let pck = if self.wire_format == relay.wire_format {
pck.data
} else {
Packet::from(pck.packet.msg).to_packet_data(relay.wire_format)?
};
match &mut relay.direction {
#[cfg(feature = "enable_server")]
TxDirection::Downcast(tx) => {
total_sent += tx.send_reply(pck).await?;
}
TxDirection::Upcast(tx) => {
total_sent += tx.outbox.write(&pck.bytes[..]).await? as u64;
}
TxDirection::Nullcast => {}
}
}
self.metrics_add_sent(total_sent).await;
Ok(())
}
pub async fn send_reply(&mut self, pck: PacketData) -> Result<(), CommsError> {
trace!("send reply (bytes={})", pck.bytes.len());
let total_sent = match &mut self.direction {
#[cfg(feature = "enable_server")]
TxDirection::Downcast(tx) => tx.send_reply(pck).await?,
TxDirection::Upcast(tx) => tx.outbox.write(&pck.bytes[..]).await? as u64,
TxDirection::Nullcast => 0u64,
};
self.metrics_add_sent(total_sent).await;
Ok(())
}
pub async fn send_reply_msg<M>(&mut self, msg: M) -> Result<(), CommsError>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
{
trace!("send reply msg (type={})", std::any::type_name::<M>());
let pck = Packet::from(msg).to_packet_data(self.wire_format)?;
self.send_reply(pck).await?;
Ok(())
}
#[cfg(feature = "enable_server")]
pub async fn send_others(&mut self, pck: PacketData) {
trace!("send others (bytes={})", pck.bytes.len());
let total_sent = match &mut self.direction {
#[cfg(feature = "enable_server")]
TxDirection::Downcast(tx) => {
tx.send_others(pck).await
},
_ => 0u64,
};
self.metrics_add_sent(total_sent).await;
}
pub async fn send_all(&mut self, pck: PacketData) -> Result<(), CommsError> {
trace!("send all (bytes={})", pck.bytes.len());
let total_sent = match &mut self.direction {
#[cfg(feature = "enable_server")]
TxDirection::Downcast(tx) => tx.send_all(pck).await,
TxDirection::Upcast(tx) => tx.outbox.write(&pck.bytes[..]).await? as u64,
TxDirection::Nullcast => 0u64,
};
self.metrics_add_sent(total_sent).await;
Ok(())
}
pub async fn send_all_msg<M>(&mut self, msg: M) -> Result<(), CommsError>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
{
trace!("send all msg (type={})", std::any::type_name::<M>());
let pck = Packet::from(msg).to_packet_data(self.wire_format)?;
self.send_all(pck).await?;
Ok(())
}
#[cfg(feature = "enable_server")]
pub(crate) async fn replace_group(&mut self, new_group: Arc<Mutex<TxGroup>>) {
match &mut self.direction {
#[cfg(feature = "enable_server")]
TxDirection::Downcast(tx) => {
{
let mut new_group = new_group.lock().await;
new_group.all.insert(tx.me_id, Arc::downgrade(&tx.me_tx));
}
let old_group = tx.replace_group(new_group);
{
let mut old_group = old_group.lock().await;
old_group.all.remove(&tx.me_id);
}
}
_ => {}
};
}
#[allow(dead_code)]
pub fn take(&mut self) -> Tx {
let mut direction = TxDirection::Nullcast;
std::mem::swap(&mut self.direction, &mut direction);
let ret = Tx {
hello_path: self.hello_path.clone(),
direction,
wire_format: self.wire_format.clone(),
relay: None,
metrics: Arc::clone(&self.metrics),
throttle: Arc::clone(&self.throttle),
exit_dependencies: Vec::new(),
};
ret
}
#[allow(dead_code)]
pub fn set_relay(&mut self, mut tx: Tx) {
let mut direction = TxDirection::Nullcast;
std::mem::swap(&mut tx.direction, &mut direction);
self.relay.replace(TxRelay {
direction,
wire_format: tx.wire_format,
});
}
#[allow(dead_code)]
pub fn relay_is_some(&self) -> bool {
self.relay.is_some()
}
async fn metrics_add_sent(&self, amt: u64) {
// Update the metrics with all this received data
let mut metrics = self.metrics.lock().unwrap();
metrics.sent += amt;
}
#[allow(dead_code)]
pub async fn wire_encryption(&self) -> Option<EncryptKey> {
self.direction.wire_encryption().await
}
#[allow(dead_code)]
pub fn add_exit_dependency(&mut self, exit: broadcast::Sender<()>) {
self.exit_dependencies.push(exit);
}
}
impl Drop for Tx {
fn drop(&mut self) {
for exit in self.exit_dependencies.drain(..) {
let _ = exit.send(());
}
#[cfg(feature = "enable_super_verbose")]
trace!("drop(node-tx)");
}
}
#[derive(Debug)]
pub(crate) struct TxGroupSpecific {
#[allow(dead_code)]
pub me_id: NodeId,
pub me_tx: Arc<Mutex<Upstream>>,
#[allow(dead_code)]
pub group: Arc<Mutex<TxGroup>>,
}
impl TxGroupSpecific {
#[cfg(feature = "enable_server")]
pub async fn send_reply(&mut self, pck: PacketData) -> Result<u64, CommsError> {
let mut tx = self.me_tx.lock().await;
let total_sent = tx.outbox.write(&pck.bytes[..]).await? as u64;
Ok(total_sent)
}
#[cfg(feature = "enable_server")]
pub async fn send_others(&mut self, pck: PacketData) -> u64 {
let mut group = self.group.lock().await;
group.send(pck, Some(self.me_id)).await
}
#[cfg(feature = "enable_server")]
pub async fn send_all(&mut self, pck: PacketData) -> u64 {
let mut group = self.group.lock().await;
group.send(pck, None).await
}
#[cfg(feature = "enable_server")]
pub(crate) fn replace_group(&mut self, group: Arc<Mutex<TxGroup>>) -> Arc<Mutex<TxGroup>> {
std::mem::replace(&mut self.group, group)
}
#[allow(dead_code)]
pub async fn wire_encryption(&self) -> Option<EncryptKey> {
let guard = self.me_tx.lock().await;
guard.wire_encryption()
}
}
#[derive(Debug, Default)]
pub(crate) struct TxGroup {
#[allow(dead_code)]
pub all: FxHashMap<NodeId, Weak<Mutex<Upstream>>>,
}
impl TxGroup {
#[cfg(feature = "enable_server")]
pub(crate) async fn send(
&mut self,
pck: PacketData,
skip: Option<NodeId>,
) -> u64 {
let mut total_sent = 0u64;
let all = self.all.values().filter_map(|a| Weak::upgrade(a));
for tx in all {
let mut tx = tx.lock().await;
if Some(tx.id) != skip {
if let Ok(amt) = tx.outbox.write(&pck.bytes[..]).await {
total_sent += amt as u64;
}
}
}
total_sent
}
}
impl TxDirection {
#[allow(dead_code)]
pub async fn wire_encryption(&self) -> Option<EncryptKey> {
match self {
#[cfg(feature = "enable_server")]
TxDirection::Downcast(a) => a.wire_encryption().await,
TxDirection::Nullcast => None,
TxDirection::Upcast(a) => a.wire_encryption(),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/client.rs | lib/src/comms/client.rs | use std::ops::DerefMut;
use std::net::SocketAddr;
#[cfg(not(feature = "enable_dns"))]
use std::net::ToSocketAddrs;
use std::result::Result;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use error_chain::bail;
use fxhash::FxHashMap;
use serde::{de::DeserializeOwned, Serialize};
#[cfg(feature = "enable_full")]
use tokio::net::TcpStream;
use tokio::sync::broadcast;
use tokio::sync::Mutex;
use tokio::time::Duration;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::{Instrument, WithSubscriber};
use ate_comms::MessageProtocolApi;
#[allow(unused_imports)]
use crate::conf::*;
use crate::crypto::*;
use crate::engine::TaskEngine;
use crate::spec::*;
use crate::{comms::NodeId, error::*};
use super::hello;
use super::helper::*;
use super::key_exchange;
use super::metrics::*;
use super::rx_tx::*;
use super::throttle::*;
use super::CertificateValidation;
use super::{conf::*, hello::HelloMetadata};
#[allow(unused_imports)]
use {
super::StreamProtocol, super::StreamRx, super::StreamTx,
};
pub(crate) async fn connect<M, C>(
conf: &MeshConfig,
hello_path: String,
node_id: NodeId,
inbox: impl InboxProcessor<M, C> + 'static,
metrics: Arc<StdMutex<Metrics>>,
throttle: Arc<StdMutex<Throttle>>,
exit: broadcast::Receiver<()>,
) -> Result<Tx, CommsError>
where
M: Send + Sync + Serialize + DeserializeOwned + Default + Clone + 'static,
C: Send + Sync + Default + 'static,
{
// Create all the outbound connections
if let Some(target) = &conf.connect_to {
// Perform the connect operation
let inbox = Box::new(inbox);
let upstream = mesh_connect_to::<M, C>(
target.clone(),
hello_path.clone(),
node_id,
conf.cfg_mesh.domain_name.clone(),
inbox,
conf.cfg_mesh.wire_protocol,
conf.cfg_mesh.wire_encryption,
conf.cfg_mesh.connect_timeout,
conf.cfg_mesh.fail_fast,
conf.cfg_mesh.certificate_validation.clone(),
Arc::clone(&metrics),
Arc::clone(&throttle),
exit,
)
.await?;
// Return the mesh
Ok(Tx {
direction: TxDirection::Upcast(upstream),
hello_path: hello_path.clone(),
wire_format: conf.cfg_mesh.wire_format,
relay: None,
metrics: Arc::clone(&metrics),
throttle: Arc::clone(&throttle),
exit_dependencies: Vec::new(),
})
} else {
bail!(CommsErrorKind::NoAddress);
}
}
pub(super) async fn mesh_connect_to<M, C>(
addr: MeshConnectAddr,
hello_path: String,
node_id: NodeId,
domain: String,
inbox: Box<dyn InboxProcessor<M, C>>,
wire_protocol: StreamProtocol,
wire_encryption: Option<KeySize>,
timeout: Duration,
fail_fast: bool,
validation: CertificateValidation,
metrics: Arc<StdMutex<super::metrics::Metrics>>,
throttle: Arc<StdMutex<super::throttle::Throttle>>,
exit: broadcast::Receiver<()>,
) -> Result<Upstream, CommsError>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone + Default + 'static,
C: Send + Sync + Default + 'static,
{
// Make the connection
trace!("prepare connect (path={})", hello_path);
let worker_connect = mesh_connect_prepare(
addr.clone(),
hello_path,
node_id,
domain,
wire_protocol,
wire_encryption,
fail_fast,
);
let mut worker_connect =
crate::engine::timeout(timeout, worker_connect).await??;
let wire_format = worker_connect.hello_metadata.wire_format;
let server_id = worker_connect.hello_metadata.server_id;
// If we are using wire encryption then exchange secrets
let ek = match wire_encryption {
Some(key_size) => Some(
key_exchange::mesh_key_exchange_sender(
worker_connect.proto.deref_mut(),
key_size,
validation,
)
.await?,
),
None => None,
};
// Split the stream
let (rx, tx) = worker_connect.proto.split(ek);
// background thread - connects and then runs inbox and outbox threads
// if the upstream object signals a termination event it will exit
trace!("spawning connect worker");
TaskEngine::spawn(mesh_connect_worker::<M, C>(
rx,
wire_protocol,
wire_format,
addr,
ek,
node_id,
server_id,
inbox,
metrics,
throttle,
exit,
));
trace!("building upstream with tx channel");
Ok(Upstream {
id: node_id,
outbox: tx,
wire_format,
})
}
struct MeshConnectContext {
#[allow(dead_code)]
addr: MeshConnectAddr,
proto: Box<dyn MessageProtocolApi + Send + Sync + 'static>,
hello_metadata: HelloMetadata,
}
#[allow(unused_variables)]
async fn mesh_connect_prepare(
addr: MeshConnectAddr,
hello_path: String,
node_id: NodeId,
domain: String,
wire_protocol: StreamProtocol,
wire_encryption: Option<KeySize>,
#[allow(unused_variables)] fail_fast: bool,
) -> Result<MeshConnectContext, CommsError> {
async move {
#[allow(unused_mut)]
let mut exp_backoff = Duration::from_millis(100);
loop {
// If we have a factory then use it
#[allow(unused_mut)]
let mut stream = {
let mut factory = crate::mesh::GLOBAL_COMM_FACTORY.lock().await;
if let Some(factory) = factory.as_mut() {
let create_client = Arc::clone(&factory);
drop(factory);
create_client(addr.clone()).await
} else {
None
}
};
// If no stream yet exists then create one
#[cfg(feature = "enable_full")]
if stream.is_none() {
stream = {
#[cfg(not(feature = "enable_dns"))]
let addr = {
match format!("{}:{}", addr.host, addr.port)
.to_socket_addrs()?
.next()
{
Some(a) => a,
None => {
bail!(CommsErrorKind::InvalidDomainName);
}
}
};
let stream = match TcpStream::connect(addr.clone()).await {
Err(err)
if match err.kind() {
std::io::ErrorKind::ConnectionRefused => {
if fail_fast {
bail!(CommsErrorKind::Refused);
}
true
}
std::io::ErrorKind::ConnectionReset => true,
std::io::ErrorKind::ConnectionAborted => true,
_ => false,
} =>
{
debug!(
"connect failed: reason={}, backoff={}s",
err,
exp_backoff.as_secs_f32()
);
crate::engine::sleep(exp_backoff).await;
exp_backoff *= 2;
if exp_backoff > Duration::from_secs(10) {
exp_backoff = Duration::from_secs(10);
}
continue;
}
a => a?,
};
// Upgrade and split
let (rx, tx) = wire_protocol.upgrade_client_and_split(stream).await?;
Some((rx, tx))
};
#[cfg(all(feature = "enable_web_sys", not(feature = "enable_full")))]
bail!(CommsErrorKind::InternalError(
"Web based clients require a GLOBAL_COMM_FACTORY".to_string()
));
}
let stream = match stream {
Some(a) => a,
None => {
bail!(CommsErrorKind::InternalError(
"Failed to create a client stream".to_string()
));
}
};
// Build the stream
trace!("splitting stream into rx/tx");
let (stream_rx,
stream_tx) = stream;
// Say hello
let (proto, hello_metadata) = hello::mesh_hello_exchange_sender(
stream_rx,
stream_tx,
node_id,
hello_path.clone(),
domain.clone(),
wire_encryption,
)
.await?;
// Return the result
return Ok(
MeshConnectContext {
addr,
proto,
hello_metadata,
}
);
}
}
.instrument(tracing::info_span!("connect"))
.await
}
async fn mesh_connect_worker<M, C>(
rx: StreamRx,
rx_proto: StreamProtocol,
wire_format: SerializationFormat,
sock_addr: MeshConnectAddr,
wire_encryption: Option<EncryptKey>,
node_id: NodeId,
peer_id: NodeId,
inbox: Box<dyn InboxProcessor<M, C>>,
metrics: Arc<StdMutex<super::metrics::Metrics>>,
throttle: Arc<StdMutex<super::throttle::Throttle>>,
exit: broadcast::Receiver<()>,
) where
M: Send + Sync + Serialize + DeserializeOwned + Clone + Default + 'static,
C: Send + Sync + Default + 'static,
{
let span = span!(
Level::DEBUG,
"client",
id = node_id.to_short_string().as_str(),
peer = peer_id.to_short_string().as_str()
);
let context = Arc::new(C::default());
match process_inbox::<M, C>(
rx,
rx_proto,
inbox,
metrics,
throttle,
node_id,
peer_id,
sock_addr.clone(),
context,
wire_format,
wire_encryption,
exit,
)
.instrument(span.clone())
.await
{
Ok(_) => {}
Err(CommsError(CommsErrorKind::IO(err), _))
if match err.kind() {
std::io::ErrorKind::BrokenPipe => true,
std::io::ErrorKind::UnexpectedEof => true,
std::io::ErrorKind::ConnectionReset => true,
std::io::ErrorKind::ConnectionAborted => true,
_ => false,
} => {}
Err(err) => {
warn!("connection-failed: {}", err.to_string());
}
};
let _span = span.enter();
//#[cfg(feature = "enable_verbose")]
debug!("disconnected-inbox: node-id={} addr={}", node_id.to_short_string().as_str(), sock_addr.to_string());
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/certificate_validation.rs | lib/src/comms/certificate_validation.rs | pub use ate_comms::CertificateValidation;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/helper.rs | lib/src/comms/helper.rs | use async_trait::async_trait;
use bytes::Bytes;
use serde::{de::DeserializeOwned, Serialize};
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use tokio::io::Error as TError;
use tokio::io::ErrorKind;
#[allow(unused_imports)]
use tokio::io::{self};
#[cfg(feature = "enable_full")]
use tokio::net::TcpStream;
use tokio::select;
use tokio::sync::broadcast;
use tokio::sync::Mutex;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::comms::NodeId;
use crate::comms::*;
use crate::crypto::*;
use crate::error::*;
use crate::spec::*;
use super::Metrics;
use super::Packet;
use super::PacketData;
use super::PacketWithContext;
use super::StreamRx;
use super::Throttle;
use crate::conf::MeshConnectAddr;
#[async_trait]
pub(crate) trait InboxProcessor<M, C>
where
Self: Send + Sync,
M: Send + Sync + Serialize + DeserializeOwned + Clone + Default,
C: Send + Sync,
{
async fn process(&mut self, pck: PacketWithContext<M, C>) -> Result<(), CommsError>;
async fn shutdown(&mut self, addr: MeshConnectAddr);
}
#[cfg(feature = "enable_full")]
pub(super) fn setup_tcp_stream(stream: &TcpStream) -> io::Result<()> {
stream.set_nodelay(true)?;
Ok(())
}
#[allow(dead_code)]
#[allow(unused_variables)]
pub(super) async fn process_inbox<M, C>(
mut rx: StreamRx,
rx_proto: StreamProtocol,
mut inbox: Box<dyn InboxProcessor<M, C>>,
metrics: Arc<StdMutex<Metrics>>,
throttle: Arc<StdMutex<Throttle>>,
id: NodeId,
peer_id: NodeId,
sock_addr: MeshConnectAddr,
context: Arc<C>,
wire_format: SerializationFormat,
wire_encryption: Option<EncryptKey>,
mut exit: broadcast::Receiver<()>,
) -> Result<(), CommsError>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone + Default,
C: Send + Sync,
{
let ret = async {
// Throttling variables
let throttle_interval = chrono::Duration::milliseconds(50);
let mut last_throttle = chrono::offset::Utc::now();
let mut current_received = 0u64;
let mut current_sent = 0u64;
let mut hickup_count = 0u32;
// Main read loop
loop {
// Read the next request
let buf = async {
// If the throttle has triggered
let now = chrono::offset::Utc::now();
let delta = now - last_throttle;
if delta > throttle_interval {
last_throttle = now;
// Compute the deltas
let (mut delta_received, mut delta_sent) = {
let metrics = metrics.lock().unwrap();
let delta_received = metrics.received - current_received;
let delta_sent = metrics.sent - current_sent;
current_received = metrics.received;
current_sent = metrics.sent;
(delta_received as i64, delta_sent as i64)
};
// Normalize the delta based on the time that passed
delta_received *= 1000i64;
delta_sent *= 1000i64;
delta_received /= delta.num_milliseconds();
delta_sent /= delta.num_milliseconds();
// We throttle the connection based off the current metrics and a calculated wait time
let wait_time = {
let throttle = throttle.lock().unwrap();
let wait1 = throttle
.download_per_second
.map(|limit| limit as i64)
.filter(|limit| delta_sent.gt(limit))
.map(|limit| {
chrono::Duration::milliseconds(
((delta_sent - limit) * 1000i64) / limit,
)
});
let wait2 = throttle
.upload_per_second
.map(|limit| limit as i64)
.filter(|limit| delta_received.gt(limit))
.map(|limit| {
chrono::Duration::milliseconds(
((delta_received - limit) * 1000i64) / limit,
)
});
// Whichever is the longer wait is the one we shall do
match (wait1, wait2) {
(Some(a), Some(b)) if a >= b => Some(a),
(Some(_), Some(b)) => Some(b),
(Some(a), None) => Some(a),
(None, Some(b)) => Some(b),
(None, None) => None,
}
};
// We wait outside the throttle lock otherwise we will break things
if let Some(wait_time) = wait_time {
if let Ok(wait_time) = wait_time.to_std() {
trace!("trottle wait: {}ms", wait_time.as_millis());
crate::engine::sleep(wait_time).await;
}
}
}
rx.read().await
};
let buf = {
select! {
_ = exit.recv() => {
debug!("received exit broadcast - {} - id={} peer={}", sock_addr, id.to_short_string().as_str(), peer_id.to_short_string().as_str());
break;
},
a = buf => a
}
}?;
// Update the metrics with all this received data
{
let mut metrics = metrics.lock().unwrap();
metrics.received += buf.len() as u64;
metrics.requests += 1u64;
}
// Deserialize it
let msg: M = wire_format.deserialize_ref(&buf)
.map_err(SerializationError::from)?;
let pck = Packet { msg };
// Process it
let pck = PacketWithContext {
data: PacketData {
bytes: Bytes::from(buf),
wire_format,
},
context: Arc::clone(&context),
packet: pck,
id,
peer_id,
};
// Its time to process the packet
let rcv = inbox.process(pck);
match rcv.await {
Ok(a) => {
if hickup_count > 0 {
debug!("inbox-recovered: recovered from hickups {}", hickup_count);
}
hickup_count = 0;
a
}
Err(CommsError(CommsErrorKind::Disconnected, _)) => {
break;
}
Err(CommsError(CommsErrorKind::IO(err), _))
if err.kind() == std::io::ErrorKind::BrokenPipe =>
{
if rx_proto.is_web_socket() && hickup_count < 10 {
hickup_count += 1;
continue;
}
debug!("inbox-debug: {}", err);
break;
}
Err(CommsError(CommsErrorKind::IO(err), _))
if err.kind() == std::io::ErrorKind::UnexpectedEof =>
{
debug!("inbox-debug: {}", err);
break;
}
Err(CommsError(CommsErrorKind::IO(err), _))
if err.kind() == std::io::ErrorKind::ConnectionAborted =>
{
warn!("inbox-err: {}", err);
break;
}
Err(CommsError(CommsErrorKind::IO(err), _))
if err.kind() == std::io::ErrorKind::ConnectionReset =>
{
warn!("inbox-err: {}", err);
break;
}
Err(CommsError(CommsErrorKind::ReadOnly, _)) => {
continue;
}
Err(CommsError(CommsErrorKind::NotYetSubscribed, _)) => {
error!("inbox-err: {}", CommsErrorKind::NotYetSubscribed);
break;
}
Err(CommsError(CommsErrorKind::CertificateTooWeak(needed, actual), _)) => {
error!(
"inbox-err: {}",
CommsErrorKind::CertificateTooWeak(needed, actual)
);
break;
}
Err(CommsError(CommsErrorKind::MissingCertificate, _)) => {
error!("inbox-err: {}", CommsErrorKind::MissingCertificate);
break;
}
Err(CommsError(CommsErrorKind::ServerCertificateValidation, _)) => {
error!("inbox-err: {}", CommsErrorKind::ServerCertificateValidation);
break;
}
Err(CommsError(CommsErrorKind::ServerEncryptionWeak, _)) => {
error!("inbox-err: {}", CommsErrorKind::ServerEncryptionWeak);
break;
}
Err(CommsError(CommsErrorKind::FatalError(err), _)) => {
error!("inbox-err: {}", err);
break;
}
Err(CommsError(CommsErrorKind::SendError(err), _)) => {
warn!("inbox-err: {}", err);
break;
}
Err(CommsError(
CommsErrorKind::ValidationError(ValidationErrorKind::Many(errs)),
_,
)) => {
for err in errs.iter() {
trace!("val-err: {}", err);
}
#[cfg(debug_assertions)]
warn!("inbox-debug: {} validation errors", errs.len());
#[cfg(not(debug_assertions))]
debug!("inbox-debug: {} validation errors", errs.len());
continue;
}
Err(CommsError(CommsErrorKind::ValidationError(err), _)) => {
#[cfg(debug_assertions)]
warn!("inbox-debug: validation error - {}", err);
#[cfg(not(debug_assertions))]
debug!("inbox-debug: validation error - {}", err);
continue;
}
Err(err) => {
warn!("inbox-error: {}", err.to_string());
continue;
}
}
}
Ok(())
}
.await;
inbox.shutdown(sock_addr).await;
ret
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/hello.rs | lib/src/comms/hello.rs | pub use ate_comms::mesh_hello_exchange_receiver;
pub use ate_comms::mesh_hello_exchange_sender;
pub use ate_comms::HelloMetadata;
pub use ate_comms::MessageProtocolVersion as StreamProtocolVersion;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/mod.rs | lib/src/comms/mod.rs | #![allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
mod certificate_validation;
#[cfg(feature = "enable_client")]
mod client;
mod conf;
pub mod hello;
mod helper;
pub mod key_exchange;
#[cfg(feature = "enable_server")]
mod listener;
mod metrics;
mod packet;
mod rx_tx;
mod stream;
mod test;
mod throttle;
mod router;
pub use ate_crypto::NodeId;
pub(crate) use conf::MeshConfig;
pub(crate) use packet::Packet;
pub(crate) use packet::PacketData;
pub(crate) use packet::PacketWithContext;
#[allow(unused_imports)]
pub(crate) use rx_tx::{Tx, TxDirection, TxGroup, TxGroupSpecific};
#[cfg(feature = "enable_client")]
#[allow(unused_imports)]
pub(crate) use client::connect;
#[cfg(feature = "enable_server")]
pub(crate) use listener::Listener;
pub use super::conf::MeshConnectAddr;
pub use certificate_validation::*;
pub use metrics::Metrics;
pub use stream::StreamProtocol;
pub use stream::StreamRx;
pub use stream::StreamTx;
pub use stream::StreamReadable;
pub use stream::StreamWritable;
pub use stream::MessageProtocolVersion;
pub use stream::StreamClient;
pub use stream::StreamSecurity;
#[cfg(feature = "enable_dns")]
pub use stream::Dns;
pub use conf::Upstream;
pub use throttle::Throttle;
pub use router::*;
pub use hello::HelloMetadata;
pub(crate) use helper::InboxProcessor;
#[cfg(feature = "server")]
pub(crate) use listener::ServerProcessor;
#[cfg(feature = "server")]
pub(crate) use listener::ServerProcessorFascade;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/metrics.rs | lib/src/comms/metrics.rs | #[derive(Debug, Clone, Default)]
pub struct Metrics {
pub received: u64,
pub sent: u64,
pub requests: u64,
pub chain_size: u64,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/key_exchange.rs | lib/src/comms/key_exchange.rs | pub use ate_comms::mesh_key_exchange_receiver;
pub use ate_comms::mesh_key_exchange_sender;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/time/keeper.rs | lib/src/time/keeper.rs | #[allow(unused_imports)]
use tracing::{debug, error, info};
use crate::conf::*;
use crate::error::*;
#[cfg(feature = "enable_ntp")]
use super::worker::NtpWorker;
use super::ChainTimestamp;
#[cfg(feature = "enable_ntp")]
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
#[derive(Debug, Clone)]
pub struct TimeKeeper {
pub tolerance: Duration,
#[cfg(feature = "enable_ntp")]
pub ntp_pool: String,
#[cfg(feature = "enable_ntp")]
pub ntp_port: u16,
#[cfg(feature = "enable_ntp")]
pub(crate) ntp_worker: Option<Arc<NtpWorker>>,
}
impl TimeKeeper {
#[allow(unused_variables)]
#[allow(dead_code)]
pub async fn new(cfg: &ConfAte, tolerance_ms: u32) -> Result<TimeKeeper, TimeError> {
let tolerance = Duration::from_millis(tolerance_ms as u64);
Ok(TimeKeeper {
tolerance: tolerance,
#[cfg(feature = "enable_ntp")]
ntp_pool: cfg.ntp_pool.clone(),
#[cfg(feature = "enable_ntp")]
ntp_port: cfg.ntp_port,
#[cfg(feature = "enable_ntp")]
ntp_worker: match cfg.ntp_sync {
true => Some(NtpWorker::create(cfg, tolerance_ms).await?),
false => None,
},
})
}
pub async fn wait_for_high_accuracy(&self) {
#[cfg(feature = "enable_ntp")]
if let Some(worker) = &self.ntp_worker {
worker.wait_for_high_accuracy().await;
}
}
pub fn has_converged(&self) -> bool {
#[cfg(feature = "enable_ntp")]
if let Some(worker) = &self.ntp_worker {
return worker.is_accurate();
}
true
}
pub fn current_timestamp_as_duration(&self) -> Result<Duration, TimeError> {
#[cfg(not(feature = "enable_ntp"))]
{
let start = SystemTime::now();
let since_the_epoch = start.duration_since(UNIX_EPOCH)?;
Ok(since_the_epoch)
}
#[cfg(feature = "enable_ntp")]
Ok(match &self.ntp_worker {
Some(worker) => worker.current_timestamp()?.since_the_epoch,
None => {
let start = SystemTime::now();
let since_the_epoch = start.duration_since(UNIX_EPOCH)?;
since_the_epoch
}
})
}
pub fn current_timestamp(&self) -> Result<ChainTimestamp, TimeError> {
Ok(ChainTimestamp::from(
self.current_timestamp_as_duration()?.as_millis() as u64,
))
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/time/timestamp.rs | lib/src/time/timestamp.rs | use serde::{Deserialize, Serialize};
#[derive(
Serialize, Deserialize, Debug, Clone, Copy, Hash, Default, PartialEq, Eq, PartialOrd, Ord,
)]
pub struct ChainTimestamp {
pub time_since_epoch_ms: u64,
}
impl ChainTimestamp {
pub fn inc(mut self) -> Self {
self.time_since_epoch_ms = self.time_since_epoch_ms + 1;
self
}
}
impl From<u64> for ChainTimestamp {
fn from(val: u64) -> ChainTimestamp {
ChainTimestamp {
time_since_epoch_ms: val,
}
}
}
impl std::fmt::Display for ChainTimestamp {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}ms", self.time_since_epoch_ms)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/time/worker.rs | lib/src/time/worker.rs | use fxhash::FxHashMap;
#[allow(unused_imports)]
use tracing::{debug, error, info, trace, warn};
use crate::conf::*;
use crate::engine::TaskEngine;
use crate::error::*;
use once_cell::sync::Lazy;
use std::sync::Arc;
use std::time::Duration;
use std::time::SystemTime;
use std::time::UNIX_EPOCH;
use tokio::sync::watch::*;
use tokio::sync::Mutex;
use super::ntp::NtpResult;
#[derive(Debug)]
pub struct NtpWorker {
result: Receiver<NtpResult>,
}
pub struct NtpOffset {
pub offset_ms: i64,
pub accurate: bool,
}
pub struct NtpPing {
pub roundtrip_ms: u64,
pub accurate: bool,
}
pub struct NtpTimestamp {
pub since_the_epoch: Duration,
pub accurate: bool,
}
static TIMESTAMP_WORKER: Lazy<Mutex<FxHashMap<String, Arc<NtpWorker>>>> =
Lazy::new(|| Mutex::new(FxHashMap::default()));
impl NtpWorker {
async fn new(pool: String, port: u16, tolerance_ms: u32) -> Result<Arc<NtpWorker>, TimeError> {
debug!("ntp service started for {}@{}", pool, port);
let tolerance_ms_loop = tolerance_ms;
// Make an inaccure NTP result using the system clock
let start = SystemTime::now();
let since_the_epoch = start.duration_since(UNIX_EPOCH)?.as_nanos();
let ntp_result = NtpResult {
sec: (since_the_epoch / 1000000000u128) as u32,
nsec: (since_the_epoch % 1000000000u128) as u32,
roundtrip: u64::MAX,
offset: 0i64,
accurate: false,
};
let (tx, rx) = channel(ntp_result);
let ret = Arc::new(NtpWorker { result: rx });
let bt_pool = pool.clone();
TaskEngine::spawn(async move {
let mut backoff_time = 50;
let mut best_ping = u32::MAX;
loop {
match super::ntp::query_ntp_retry(&bt_pool, port, tolerance_ms_loop, 10).await {
Ok(r) => {
let ping = Duration::from_micros(r.roundtrip()).as_millis() as u32;
if best_ping == u32::MAX || ping < best_ping + 50 {
best_ping = ping;
let res = tx.send(r);
if let Err(err) = res {
warn!("{}", err);
break;
}
}
crate::engine::sleep(Duration::from_secs(20)).await;
backoff_time = 50;
}
_ => {
crate::engine::sleep(Duration::from_millis(backoff_time)).await;
backoff_time = (backoff_time * 120) / 100;
backoff_time = backoff_time + 50;
if backoff_time > 10000 {
backoff_time = 10000;
}
}
}
}
});
debug!("ntp service ready for {}@{}", pool, port);
Ok(ret)
}
pub async fn create(cfg: &ConfAte, tolerance_ms: u32) -> Result<Arc<NtpWorker>, TimeError> {
let pool = cfg.ntp_pool.clone();
let port = cfg.ntp_port;
let ntp_worker = {
let key = format!("{}:{}", cfg.ntp_pool, cfg.ntp_port);
let mut guard = TIMESTAMP_WORKER.lock().await;
match guard.get(&key) {
Some(a) => Arc::clone(a),
None => {
let worker = NtpWorker::new(pool, port, tolerance_ms).await?;
guard.insert(key, Arc::clone(&worker));
worker
}
}
};
Ok(ntp_worker)
}
#[allow(dead_code)]
fn current_offset_ms(&self) -> NtpOffset {
let guard = self.result.borrow();
let ret = guard.offset() / 1000;
NtpOffset {
offset_ms: ret,
accurate: guard.accurate,
}
}
#[allow(dead_code)]
fn current_ping_ms(&self) -> NtpPing {
let guard = self.result.borrow();
let ret = guard.roundtrip() / 1000;
NtpPing {
roundtrip_ms: ret,
accurate: guard.accurate,
}
}
pub async fn wait_for_high_accuracy(&self) {
let mut result = self.result.clone();
while result.borrow().accurate == false {
if let Err(err) = result.changed().await {
error!("{}", err);
break;
}
}
}
pub fn is_accurate(&self) -> bool {
self.result.borrow().accurate
}
pub fn current_timestamp(&self) -> Result<NtpTimestamp, TimeError> {
let start = SystemTime::now();
let mut since_the_epoch = start.duration_since(UNIX_EPOCH)?;
let guard = self.result.borrow();
let mut offset = guard.offset();
if offset >= 0 {
since_the_epoch = since_the_epoch + Duration::from_micros(offset as u64);
} else {
offset = 0 - offset;
since_the_epoch = since_the_epoch - Duration::from_micros(offset as u64);
}
Ok(NtpTimestamp {
since_the_epoch,
accurate: guard.accurate,
})
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/time/mod.rs | lib/src/time/mod.rs | mod enforcer;
mod keeper;
#[cfg(feature = "enable_ntp")]
mod ntp;
mod timestamp;
#[cfg(feature = "enable_ntp")]
mod worker;
pub use enforcer::TimestampEnforcer;
pub use keeper::TimeKeeper;
pub use timestamp::ChainTimestamp;
#[cfg(feature = "enable_ntp")]
pub use worker::NtpWorker;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/time/enforcer.rs | lib/src/time/enforcer.rs | use error_chain::bail;
#[allow(unused_imports)]
use tracing::{debug, error, info};
use crate::conf::*;
use crate::error::*;
use crate::event::EventHeader;
use crate::index::*;
use crate::lint::*;
use crate::meta::*;
use crate::plugin::*;
use crate::session::*;
use crate::sink::*;
use crate::transaction::*;
use crate::transform::*;
use crate::validator::*;
use super::keeper::TimeKeeper;
use std::sync::Arc;
use std::time::Duration;
use std::time::UNIX_EPOCH;
#[derive(Debug, Clone)]
pub struct TimestampEnforcer {
pub cursor: Duration,
pub keeper: TimeKeeper,
}
impl TimestampEnforcer {
#[allow(dead_code)]
pub async fn new(cfg: &ConfAte, tolerance_ms: u32) -> Result<TimestampEnforcer, TimeError> {
let tolerance = Duration::from_millis(tolerance_ms as u64);
Ok(TimestampEnforcer {
cursor: tolerance,
keeper: TimeKeeper::new(cfg, tolerance_ms).await?,
})
}
}
impl EventMetadataLinter for TimestampEnforcer {
fn clone_linter(&self) -> Box<dyn EventMetadataLinter> {
Box::new(self.clone())
}
fn metadata_lint_event(
&self,
meta: &Metadata,
_session: &'_ dyn AteSession,
_trans_meta: &TransactionMetadata,
_type_code: &str,
) -> Result<Vec<CoreMetadata>, LintError> {
let mut ret = Vec::new();
//println!("TIME: {} with offset of {} and ping of {}", self.current_timestamp()?, self.current_offset_ms(), self.current_ping_ms());
if meta.get_timestamp().is_none() {
ret.push(CoreMetadata::Timestamp(self.keeper.current_timestamp()?));
}
Ok(ret)
}
}
impl EventSink for TimestampEnforcer {
fn feed(
&mut self,
header: &EventHeader,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<(), SinkError> {
if let Some(time) = header.meta.get_timestamp() {
let time = Duration::from_millis(time.time_since_epoch_ms);
if time > self.cursor {
self.cursor = time;
}
}
Ok(())
}
fn reset(&mut self) {
self.cursor = self.keeper.tolerance.clone();
}
}
impl EventIndexer for TimestampEnforcer {
fn clone_indexer(&self) -> Box<dyn EventIndexer> {
Box::new(self.clone())
}
}
impl EventDataTransformer for TimestampEnforcer {
fn clone_transformer(&self) -> Box<dyn EventDataTransformer> {
Box::new(self.clone())
}
}
impl EventValidator for TimestampEnforcer {
fn clone_validator(&self) -> Box<dyn EventValidator> {
Box::new(self.clone())
}
fn validate(
&self,
header: &EventHeader,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<ValidationResult, ValidationError> {
// If it does not have a timestamp then we can not accept it
let time = match header.meta.get_timestamp() {
Some(m) => m,
None => {
return match header.meta.needs_signature() {
true => {
debug!("rejected event due to missing timestamp");
Err(ValidationErrorKind::TrustError(TrustErrorKind::TimeError(
TimeErrorKind::NoTimestamp,
))
.into())
}
false => Ok(ValidationResult::Abstain),
};
}
};
// If time is not currently accurate then we can not properly validate
if self.keeper.has_converged() == true {
// Check its within the time range
let timestamp = Duration::from_millis(time.time_since_epoch_ms);
//let min_timestamp = self.cursor - self.tolerance;
let max_timestamp =
self.keeper.current_timestamp_as_duration()? + self.keeper.tolerance;
if
//timestamp < min_timestamp ||
timestamp > max_timestamp {
let cursor = UNIX_EPOCH + self.cursor;
let timestamp = UNIX_EPOCH + timestamp;
let cursor_str = chrono::DateTime::<chrono::Utc>::from(cursor)
.format("%Y-%m-%d %H:%M:%S.%f")
.to_string();
let timestamp_str = chrono::DateTime::<chrono::Utc>::from(timestamp)
.format("%Y-%m-%d %H:%M:%S.%f")
.to_string();
debug!(
"rejected event {:?} due to out-of-bounds timestamp ({} vs {})",
header, cursor_str, timestamp_str
);
bail!(ValidationErrorKind::TrustError(TrustErrorKind::TimeError(
TimeErrorKind::OutOfBounds(cursor, timestamp)
)));
}
}
// All good
Ok(ValidationResult::Abstain)
}
fn validator_name(&self) -> &str {
"timestamp-validator"
}
}
impl EventPlugin for TimestampEnforcer {
fn clone_plugin(&self) -> Box<dyn EventPlugin> {
Box::new(self.clone())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/time/ntp.rs | lib/src/time/ntp.rs | #![allow(unused_imports)]
use crate::engine::timeout;
use error_chain::bail;
use std::fmt::Debug;
use std::fmt::Formatter;
use std::mem;
use std::net::{SocketAddr, ToSocketAddrs};
use std::str;
use std::time;
use std::time::Duration;
use tokio::io;
use tokio::net;
use tokio::net::UdpSocket;
use tracing::{debug, error, info};
use crate::error::*;
const MODE_MASK: u8 = 0b0000_0111;
const MODE_SHIFT: u8 = 0;
const VERSION_MASK: u8 = 0b0011_1000;
const VERSION_SHIFT: u8 = 3;
const LI_MASK: u8 = 0b1100_0000;
const LI_SHIFT: u8 = 6;
const NSEC_IN_SEC: u32 = 1_000_000_000;
struct NtpPacket {
li_vn_mode: u8,
stratum: u8,
poll: i8,
precision: i8,
root_delay: u32,
root_dispersion: u32,
ref_id: u32,
ref_timestamp: u64,
origin_timestamp: u64,
recv_timestamp: u64,
tx_timestamp: u64,
}
/// SNTP request result representation
#[derive(Clone, Copy)]
pub(crate) struct NtpResult {
/// NTP server seconds value
pub sec: u32,
/// NTP server nanoseconds value
pub nsec: u32,
/// Request roundtrip time
pub roundtrip: u64,
/// Offset of the current system time with one received from a NTP server
pub offset: i64,
/// Flag that indicates if this result is likely to be accurate or not
pub accurate: bool,
}
impl NtpResult {
/// Create new NTP result
/// Args:
/// * `sec` - number of seconds
/// * `nsec` - number of nanoseconds
/// * `roundtrip` - calculated roundtrip in microseconds
/// * `offset` - calculated system clock offset in microseconds
pub(crate) fn new(sec: u32, nsec: u32, roundtrip: u64, offset: i64, accurate: bool) -> Self {
let residue = nsec / NSEC_IN_SEC;
let nsec = nsec % NSEC_IN_SEC;
let sec = sec + residue;
NtpResult {
sec,
nsec,
roundtrip,
offset,
accurate,
}
}
/// Returns number of seconds reported by an NTP server
#[allow(dead_code)]
pub(crate) fn sec(&self) -> u32 {
self.sec
}
/// Returns number of nanoseconds reported by an NTP server
#[allow(dead_code)]
pub(crate) fn nsec(&self) -> u32 {
self.nsec
}
/// Returns request's roundtrip time (client -> server -> client) in microseconds
pub(crate) fn roundtrip(&self) -> u64 {
self.roundtrip
}
/// Returns system clock offset value in microseconds
pub(crate) fn offset(&self) -> i64 {
self.offset
}
/// Returns if the result is accurate or not
#[allow(dead_code)]
pub(crate) fn accurate(&self) -> bool {
self.accurate
}
}
impl Debug for NtpResult {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
f.debug_struct("NtpResult")
.field("sec", &self.sec)
.field("nsec", &self.nsec)
.field("roundtrip", &self.roundtrip)
.field("offset", &self.offset)
.finish()
}
}
impl NtpPacket {
const NTP_TIMESTAMP_DELTA: u32 = 2_208_988_800u32;
const SNTP_CLIENT_MODE: u8 = 3;
const SNTP_VERSION: u8 = 4 << 3;
#[allow(dead_code)]
const LI_MASK: u8 = 0b0000_0011;
#[allow(dead_code)]
const VN_MASK: u8 = 0b0001_1100;
#[allow(dead_code)]
const MODE_MASK: u8 = 0b1110_0000;
pub(crate) fn new() -> NtpPacket {
let tx_timestamp = get_ntp_timestamp();
NtpPacket {
li_vn_mode: NtpPacket::SNTP_CLIENT_MODE | NtpPacket::SNTP_VERSION,
stratum: 0,
poll: 0,
precision: 0,
root_delay: 0,
root_dispersion: 0,
ref_id: 0,
ref_timestamp: 0,
origin_timestamp: 0,
recv_timestamp: 0,
tx_timestamp,
}
}
}
trait NtpNum {
type Type;
fn ntohl(&self) -> Self::Type;
}
impl NtpNum for u32 {
type Type = u32;
fn ntohl(&self) -> Self::Type {
self.to_be()
}
}
impl NtpNum for u64 {
type Type = u64;
fn ntohl(&self) -> Self::Type {
self.to_be()
}
}
struct RawNtpPacket([u8; mem::size_of::<NtpPacket>()]);
impl Default for RawNtpPacket {
fn default() -> Self {
RawNtpPacket([0u8; mem::size_of::<NtpPacket>()])
}
}
impl From<RawNtpPacket> for NtpPacket {
fn from(val: RawNtpPacket) -> Self {
// left it here for a while, maybe in future Rust releases there
// will be a way to use such a generic function with compile-time
// size determination
// const fn to_array<T: Sized>(x: &[u8]) -> [u8; mem::size_of::<T>()] {
// let mut temp_buf = [0u8; mem::size_of::<T>()];
//
// temp_buf.copy_from_slice(x);
// temp_buf
// }
let to_array_u32 = |x: &[u8]| {
let mut temp_buf = [0u8; mem::size_of::<u32>()];
temp_buf.copy_from_slice(x);
temp_buf
};
let to_array_u64 = |x: &[u8]| {
let mut temp_buf = [0u8; mem::size_of::<u64>()];
temp_buf.copy_from_slice(x);
temp_buf
};
NtpPacket {
li_vn_mode: val.0[0],
stratum: val.0[1],
poll: val.0[2] as i8,
precision: val.0[3] as i8,
root_delay: u32::from_le_bytes(to_array_u32(&val.0[4..8])),
root_dispersion: u32::from_le_bytes(to_array_u32(&val.0[8..12])),
ref_id: u32::from_le_bytes(to_array_u32(&val.0[12..16])),
ref_timestamp: u64::from_le_bytes(to_array_u64(&val.0[16..24])),
origin_timestamp: u64::from_le_bytes(to_array_u64(&val.0[24..32])),
recv_timestamp: u64::from_le_bytes(to_array_u64(&val.0[32..40])),
tx_timestamp: u64::from_le_bytes(to_array_u64(&val.0[40..48])),
}
}
}
impl From<&NtpPacket> for RawNtpPacket {
fn from(val: &NtpPacket) -> Self {
let mut tmp_buf = [0u8; mem::size_of::<NtpPacket>()];
tmp_buf[0] = val.li_vn_mode;
tmp_buf[1] = val.stratum;
tmp_buf[2] = val.poll as u8;
tmp_buf[3] = val.precision as u8;
tmp_buf[4..8].copy_from_slice(&val.root_delay.to_be_bytes());
tmp_buf[8..12].copy_from_slice(&val.root_dispersion.to_be_bytes());
tmp_buf[12..16].copy_from_slice(&val.ref_id.to_be_bytes());
tmp_buf[16..24].copy_from_slice(&val.ref_timestamp.to_be_bytes());
tmp_buf[24..32].copy_from_slice(&val.origin_timestamp.to_be_bytes());
tmp_buf[32..40].copy_from_slice(&val.recv_timestamp.to_be_bytes());
tmp_buf[40..48].copy_from_slice(&val.tx_timestamp.to_be_bytes());
RawNtpPacket(tmp_buf)
}
}
pub(crate) async fn request(
pool: &str,
port: u16,
timeout: time::Duration,
) -> io::Result<NtpResult> {
let socket = net::UdpSocket::bind("0.0.0.0:0")
.await
.expect("Unable to create a UDP socket");
let dest = format!("{}:{}", pool, port).to_socket_addrs()?;
let socket = socket.into_std()?;
socket
.set_read_timeout(Some(timeout))
.expect("Unable to set up socket timeout");
let socket = net::UdpSocket::from_std(socket)?;
let req = NtpPacket::new();
let dest = process_request(dest, &req, &socket).await?;
let mut buf: RawNtpPacket = RawNtpPacket::default();
let (response, src) =
crate::engine::timeout(timeout, socket.recv_from(buf.0.as_mut())).await??;
let recv_timestamp = get_ntp_timestamp();
if src != dest {
return Err(io::Error::new(
io::ErrorKind::Other,
"SNTP response port / address mismatch",
));
}
if response == mem::size_of::<NtpPacket>() {
let result = process_response(&req, buf, recv_timestamp);
return match result {
Ok(result) => Ok(result),
Err(err_str) => Err(io::Error::new(io::ErrorKind::Other, err_str)),
};
}
Err(io::Error::new(
io::ErrorKind::Other,
"Incorrect NTP packet size read",
))
}
async fn process_request(
dest: std::vec::IntoIter<SocketAddr>,
req: &NtpPacket,
socket: &UdpSocket,
) -> io::Result<SocketAddr> {
for addr in dest {
match send_request(&req, &socket, addr).await {
Ok(write_bytes) => {
assert_eq!(write_bytes, mem::size_of::<NtpPacket>());
return Ok(addr);
}
Err(_) => {}
}
}
Err(io::Error::new(
io::ErrorKind::AddrNotAvailable,
"SNTP servers not responding",
))
}
async fn send_request(
req: &NtpPacket,
socket: &net::UdpSocket,
dest: std::net::SocketAddr,
) -> io::Result<usize> {
let buf: RawNtpPacket = req.into();
socket.send_to(&buf.0, dest).await
}
fn process_response(
req: &NtpPacket,
resp: RawNtpPacket,
recv_timestamp: u64,
) -> Result<NtpResult, &str> {
const SNTP_UNICAST: u8 = 4;
const SNTP_BROADCAST: u8 = 5;
const LI_MAX_VALUE: u8 = 3;
const MSEC_MASK: u64 = 0x0000_0000_ffff_ffff;
let shifter = |val, mask, shift| (val & mask) >> shift;
let mut packet = NtpPacket::from(resp);
convert_from_network(&mut packet);
if req.tx_timestamp != packet.origin_timestamp {
return Err("Incorrect origin timestamp");
}
// Shift is 0
let mode = shifter(packet.li_vn_mode, MODE_MASK, MODE_SHIFT);
let li = shifter(packet.li_vn_mode, LI_MASK, LI_SHIFT);
let resp_version = shifter(packet.li_vn_mode, VERSION_MASK, VERSION_SHIFT);
let req_version = shifter(req.li_vn_mode, VERSION_MASK, VERSION_SHIFT);
if mode != SNTP_UNICAST && mode != SNTP_BROADCAST {
return Err("Incorrect MODE value");
}
if li > LI_MAX_VALUE {
return Err("Incorrect LI value");
}
if req_version != resp_version {
return Err("Incorrect response version");
}
if packet.stratum == 0 {
return Err("Incorrect STRATUM headers");
}
// theta = T(B) - T(A) = 1/2 * [(T2-T1) + (T3-T4)]
// and the round-trip delay
// delta = T(ABA) = (T4-T1) - (T3-T2).
// where:
// - T1 = client's TX timestamp
// - T2 = server's RX timestamp
// - T3 = server's TX timestamp
// - T4 = client's RX timestamp
let delta = (recv_timestamp - packet.origin_timestamp) as i64
- (packet.tx_timestamp - packet.recv_timestamp) as i64;
let theta = ((packet.recv_timestamp as i64 - packet.origin_timestamp as i64)
+ (recv_timestamp as i64 - packet.tx_timestamp as i64))
/ 2;
let seconds = (packet.tx_timestamp >> 32) as u32;
let nsec = (packet.tx_timestamp & MSEC_MASK) as u32;
let tx_tm = seconds - NtpPacket::NTP_TIMESTAMP_DELTA;
Ok(NtpResult::new(tx_tm, nsec, delta.abs() as u64, theta, true))
}
fn convert_from_network(packet: &mut NtpPacket) {
fn ntohl<T: NtpNum>(val: T) -> T::Type {
val.ntohl()
}
packet.root_delay = ntohl(packet.root_delay);
packet.root_dispersion = ntohl(packet.root_dispersion);
packet.ref_id = ntohl(packet.ref_id);
packet.ref_timestamp = ntohl(packet.ref_timestamp);
packet.origin_timestamp = ntohl(packet.origin_timestamp);
packet.recv_timestamp = ntohl(packet.recv_timestamp);
packet.tx_timestamp = ntohl(packet.tx_timestamp);
}
fn get_ntp_timestamp() -> u64 {
let now_since_unix = time::SystemTime::now()
.duration_since(time::SystemTime::UNIX_EPOCH)
.unwrap();
let timestamp = ((now_since_unix.as_secs() + (u64::from(NtpPacket::NTP_TIMESTAMP_DELTA)))
<< 32)
+ u64::from(now_since_unix.subsec_micros());
timestamp
}
pub(crate) async fn query_ntp(
pool: &String,
port: u16,
tolerance_ms: u32,
) -> Result<NtpResult, TimeError> {
let timeout = Duration::from_millis(tolerance_ms as u64) + Duration::from_millis(50);
let ret = request(pool.as_str(), port, timeout).await?;
let ping = Duration::from_micros(ret.roundtrip()).as_millis() as u32;
if ping > tolerance_ms {
bail!(TimeErrorKind::BeyondTolerance(ping as u32));
}
Ok(ret)
}
#[allow(dead_code)]
pub(crate) async fn query_ntp_with_backoff(
pool: &String,
port: u16,
tolerance_ms: u32,
samples: u32,
) -> NtpResult {
let mut wait_time = 50;
loop {
if let Ok(result) = query_ntp_retry(pool, port, tolerance_ms, samples).await {
return result;
}
crate::engine::sleep(Duration::from_millis(wait_time)).await;
wait_time = (wait_time * 120) / 100;
wait_time = wait_time + 50;
if wait_time > 10000 {
wait_time = 10000;
}
}
}
pub(crate) async fn query_ntp_retry(
pool: &String,
port: u16,
tolerance_ms: u32,
samples: u32,
) -> Result<NtpResult, TimeError> {
let mut best: Option<NtpResult> = None;
let mut positives = 0;
let mut wait_time = 50;
for _ in 0..samples {
let timeout = match &best {
Some(b) => Duration::from_micros(b.roundtrip()) + Duration::from_millis(50),
None => Duration::from_millis(tolerance_ms as u64),
};
#[cfg(feature = "enable_super_verbose")]
trace!("ntp request timeout={}ms", timeout.as_millis());
if let Ok(ret) = request(pool.as_str(), port, timeout).await {
#[cfg(feature = "enable_super_verbose")]
trace!(
"ntp response roundtrip={}, offset={}",
ret.roundtrip,
ret.offset
);
let current_ping = match &best {
Some(b) => b.roundtrip(),
None => u64::max_value(),
};
if ret.roundtrip() < current_ping {
best = Some(ret);
}
positives = positives + 1;
if positives >= samples {
break;
}
wait_time = 50;
} else {
crate::engine::sleep(Duration::from_millis(wait_time)).await;
wait_time = (wait_time * 120) / 100;
wait_time = wait_time + 50;
if wait_time > 10000 {
wait_time = 10000;
}
}
}
if let Some(ret) = best {
let ping = Duration::from_micros(ret.roundtrip()).as_millis() as u32;
if ping <= tolerance_ms {
return Ok(ret);
}
}
query_ntp(pool, port, tolerance_ms).await
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/utils/key.rs | lib/src/utils/key.rs | use crate::prelude::*;
pub fn chain_key_16hex(val: &str, prefix: Option<&str>) -> ChainKey {
let hash = AteHash::from(val.to_string());
let hex = hash.to_hex_string().to_lowercase();
match prefix {
Some(prefix) => ChainKey::new(format!("{}-{}", prefix, &hex[..16])),
None => ChainKey::new(format!("{}", &hex[..16])),
}
}
pub fn chain_key_4hex(val: &str, prefix: Option<&str>) -> ChainKey {
let hash = AteHash::from(val.to_string());
let hex = hash.to_hex_string().to_lowercase();
match prefix {
Some(prefix) => ChainKey::new(format!("{}-{}", prefix, &hex[..4])),
None => ChainKey::new(format!("{}", &hex[..4])),
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/utils/io.rs | lib/src/utils/io.rs | use std::fmt;
use std::fs::File;
use std::str::FromStr;
use std::io::{BufReader, BufRead, Error, Read};
pub fn load_node_list(list: Option<String>) -> Option<Vec<String>>
{
match list {
Some(list) => {
let list = shellexpand::tilde(&list).to_string();
let file = File::open(list.as_str())
.map_err(|err| conv_file_open_err(list.as_str(), err))
.unwrap();
let reader = BufReader::new(file);
let mut ret = Vec::new();
for line in reader.lines() {
ret.push(line.unwrap());
}
Some(ret)
},
None => None
}
}
pub fn load_node_id(path: Option<String>) -> Option<u32>
{
match path {
Some(path) => {
let path = shellexpand::tilde(&path).to_string();
let mut file = File::open(path.as_str())
.map_err(|err| conv_file_open_err(path.as_str(), err))
.unwrap();
let mut ret = String::new();
if let Ok(_) = file.read_to_string(&mut ret) {
u32::from_str(ret.as_str()).ok()
} else {
None
}
},
None => None
}
}
pub fn conv_file_open_err(path: &str, inner: Error) -> Error {
Error::new(inner.kind(), FileIOError {
path: path.to_string(),
inner
})
}
pub struct FileIOError
{
path: String,
inner: Error,
}
impl fmt::Display
for FileIOError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "failed while attempting to access [{}] - {}", self.path, self.inner.to_string())
}
}
impl fmt::Debug
for FileIOError
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
fmt::Display::fmt(self, f)
}
}
impl std::error::Error
for FileIOError
{
fn source(&self) -> Option<&(dyn std::error::Error + 'static)> {
Some(&self.inner)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/utils/progress.rs | lib/src/utils/progress.rs | #![allow(unused_imports, dead_code)]
use async_trait::async_trait;
use pbr::ProgressBar;
use pbr::Units;
use std::io::Write;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::error::ChainCreationError;
use crate::event::EventWeakData;
use crate::loader::LoadData;
use crate::mesh::Loader;
use crate::event::MessageBytes;
pub struct LoadProgress<T>
where
T: Write + Send + Sync,
{
pub msg_done: String,
pub units: pbr::Units,
pub bar: Option<ProgressBar<T>>,
pub writer: Option<T>,
}
impl<T> LoadProgress<T>
where
T: Write + Send + Sync,
{
pub fn new(writer: T) -> LoadProgress<T> {
LoadProgress {
msg_done: "Done".to_string(),
units: pbr::Units::Default,
bar: None,
writer: Some(writer),
}
}
}
#[async_trait]
impl<T> Loader for LoadProgress<T>
where
T: Write + Send + Sync,
{
fn human_message(&mut self, message: String) {
if self.bar.is_some() {
self.msg_done.push_str("\n");
self.msg_done.push_str(message.as_str());
} else if let Some(writer) = self.writer.as_mut() {
let message = message.into_bytes();
let _ = writer.write_all(&message[..]);
}
}
async fn start_of_history(&mut self, size: usize) {
if let Some(writer) = self.writer.take() {
let mut pb = ProgressBar::on(writer, size as u64);
match &self.units {
Units::Default => pb.set_units(Units::Default),
Units::Bytes => pb.set_units(Units::Bytes),
}
pb.format("╢█▌░╟");
self.bar.replace(pb);
}
}
fn feed_events(&mut self, evts: &Vec<EventWeakData>) {
if let Some(pb) = &mut self.bar {
pb.add(evts.len() as u64);
}
}
async fn feed_load_data(&mut self, data: LoadData) {
if let Some(pb) = &mut self.bar {
let total = 2
+ data.header.meta_bytes.len()
+ match data.data.data_bytes {
MessageBytes::Some(a) => a.len(),
MessageBytes::LazySome(l) => l.len,
MessageBytes::None => 0,
};
pb.add(total as u64);
}
}
async fn end_of_history(&mut self) {
if let Some(mut pb) = self.bar.take() {
pb.finish_print(self.msg_done.as_str());
}
}
async fn failed(&mut self, err: ChainCreationError) -> Option<ChainCreationError> {
if let Some(mut pb) = self.bar.take() {
pb.finish_print(err.to_string().as_str());
} else if let Some(writer) = self.writer.as_mut() {
let message = err.to_string().into_bytes();
let _ = writer.write_all(&message[..]);
}
Some(err)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/utils/mod.rs | lib/src/utils/mod.rs | #![cfg_attr(debug_assertions, allow(dead_code, unused_imports))]
#![allow(unused_imports)]
use tracing::{debug, error, info};
mod key;
mod progress;
mod io;
use ate_crypto::utils;
pub use ate_crypto::utils::b64;
pub use ate_crypto::utils::log;
pub use ate_crypto::utils::test;
pub use super::utils::test::*;
pub use utils::b16_deserialize;
pub use utils::b16_serialize;
pub use utils::b24_deserialize;
pub use utils::b24_serialize;
pub use utils::b32_deserialize;
pub use utils::b32_serialize;
pub use utils::vec_deserialize;
pub use utils::vec_serialize;
pub use key::chain_key_16hex;
pub use key::chain_key_4hex;
pub use log::log_init;
pub use log::obscure_error;
pub use log::obscure_error_str;
pub use progress::LoadProgress;
pub use io::load_node_list;
pub use io::load_node_id;
pub use io::conv_file_open_err;
pub use io::FileIOError;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/bin/stress.rs | lib/src/bin/stress.rs | #![allow(unused_imports)]
use ate::prelude::*;
use serde::{Deserialize, Serialize};
use tracing::info;
#[derive(Debug, Serialize, Deserialize, Clone)]
struct MyTestObject {
firstname: String,
lastname: String,
data: [u128; 32],
lines: Vec<String>,
}
#[cfg(not(feature = "server"))]
fn main() {}
#[cfg(any(feature = "server"))]
#[cfg_attr(feature = "enable_mt", tokio::main(flavor = "multi_thread"))]
#[cfg_attr(not(feature = "enable_mt"), tokio::main(flavor = "current_thread"))]
async fn main() -> Result<(), AteError> {
ate::log_init(0, true);
// The default configuration will store the redo log locally in the temporary folder
let mut cfg_ate = ConfAte::default();
cfg_ate.configured_for(ConfiguredFor::BestPerformance);
let builder = ChainBuilder::new(&cfg_ate).await.build();
{
// We create a chain with a specific key (this is used for the file name it creates)
let chain = builder.open(&ChainKey::from("stress")).await?;
// Prepare
let session = AteSessionUser::new();
let mut test_obj = MyTestObject {
firstname: "Joe".to_string(),
lastname: "Blogs".to_string(),
data: [123 as u128; 32],
lines: Vec::new(),
};
for n in 0..10 {
test_obj.lines.push(format!("test {}", n));
}
// Do a whole let of work
info!("stress::running");
for _ in 0..200 {
let dio = chain.dio_mut(&session).await;
for _ in 0..500 {
dio.store(test_obj.clone())?;
}
dio.commit().await?;
}
info!("stress::finished");
}
{
// We create a chain with a specific key (this is used for the file name it creates)
let chain = builder.open(&ChainKey::from("stress")).await?;
// Destroy the chain
chain.single().await.destroy().await.unwrap();
}
Ok(())
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/log_memdb.rs | lib/src/redo/log_memdb.rs | use async_trait::async_trait;
use bytes::*;
use error_chain::bail;
use fxhash::FxHashMap;
use std::pin::Pin;
use tokio::io::Result;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::error::*;
use crate::event::*;
use crate::loader::*;
use crate::spec::*;
use crate::{crypto::*, redo::LogLookup};
use super::*;
pub(super) struct LogFileMemDb {
pub(crate) offset: u64,
pub(crate) lookup: FxHashMap<AteHash, LogLookup>,
pub(crate) memdb: FxHashMap<LogLookup, LogEntry>,
pub(crate) header: Vec<u8>,
}
impl LogFileMemDb {
pub(super) async fn new(header_bytes: Vec<u8>) -> Result<Box<LogFileMemDb>> {
// Log file
let ret = LogFileMemDb {
offset: 0u64,
lookup: FxHashMap::default(),
memdb: FxHashMap::default(),
header: header_bytes,
};
Ok(Box::new(ret))
}
}
#[async_trait]
impl LogFile for LogFileMemDb {
#[cfg(feature = "enable_rotate")]
async fn rotate(&mut self, header_bytes: Vec<u8>) -> Result<()> {
self.header = header_bytes;
Ok(())
}
fn backup(
&mut self,
_include_active_files: bool,
) -> Result<Pin<Box<dyn futures::Future<Output = Result<()>> + Send + Sync>>> {
let ret = async move { Ok(()) };
Ok(Box::pin(ret))
}
async fn copy(&mut self) -> Result<Box<dyn LogFile>> {
Ok(Box::new(LogFileMemDb {
offset: self.offset,
lookup: self.lookup.clone(),
memdb: self.memdb.clone(),
header: self.header.clone(),
}))
}
async fn write(
&mut self,
evt: &EventWeakData,
) -> std::result::Result<LogLookup, SerializationError> {
// Write the appender
let header = evt.as_header_raw()?;
let lookup = LogLookup {
index: 0u32,
offset: self.offset,
};
self.offset = self.offset + 1u64;
// Record the lookup map
self.lookup.insert(header.event_hash, lookup);
#[cfg(feature = "enable_verbose")]
debug!("log-write: {} - {:?}", header.event_hash, lookup);
#[cfg(feature = "enable_super_verbose")]
debug!("log-write: {:?} - {:?}", header, evt);
// If we are running as a memory datachain then store it in the RAM
self.memdb.insert(
lookup,
LogEntry {
header: LogHeader {
offset: lookup.offset,
format: evt.format,
},
meta: header.meta_bytes.to_vec(),
data: evt.data_bytes.clone().to_log_data(),
},
);
// Return the result
Ok(lookup)
}
async fn copy_event(
&mut self,
from_log: &Box<dyn LogFile>,
hash: AteHash,
) -> std::result::Result<LogLookup, LoadError> {
// Load the data from the log file
let result = from_log.load(&hash).await?;
// Write it to the local log
let lookup = LogLookup {
index: 0u32,
offset: self.offset,
};
self.offset = self.offset + 1u64;
// Record the lookup map
self.lookup.insert(hash.clone(), lookup);
// Inser the data
self.memdb.insert(
lookup,
LogEntry {
header: LogHeader {
offset: lookup.offset,
format: result.data.format,
},
meta: result.header.meta_bytes.to_vec(),
data: result.data.data_bytes.clone().to_log_data(),
},
);
Ok(lookup)
}
async fn load(&self, hash: &AteHash) -> std::result::Result<LoadData, LoadError> {
// Lookup the record in the redo log
let lookup = match self.lookup.get(hash) {
Some(a) => a.clone(),
None => {
bail!(LoadErrorKind::NotFoundByHash(hash.clone()));
}
};
let _offset = lookup.offset;
// If we are running as a memory datachain then just lookup the value
let result = match self.memdb.get(&lookup) {
Some(a) => std::result::Result::<LogEntry, LoadError>::Ok(a.clone()),
None => Err(LoadErrorKind::NotFoundByHash(hash.clone()).into()),
}?;
// Hash body
let data_hash = result.data.hash();
let data_size = result.data.size();
// Convert the result into a deserialized result
let meta = result.header.format.meta.deserialize_ref(&result.meta[..])
.map_err(SerializationError::from)?;
let ret = LoadData {
header: EventHeaderRaw::new(
AteHash::from_bytes(&result.meta[..]),
Bytes::from(result.meta),
data_hash,
data_size,
result.header.format,
),
data: EventWeakData {
meta,
data_bytes: match result.data {
LogData::Some(data) => MessageBytes::Some(Bytes::from(data)),
LogData::LazySome(l) => MessageBytes::LazySome(l),
LogData::None => MessageBytes::None,
},
format: result.header.format,
},
lookup,
};
assert_eq!(hash.to_string(), ret.header.event_hash.to_string());
Ok(ret)
}
fn prime(&mut self, records: Vec<(AteHash, Option<Bytes>)>) {
for (record, data) in records {
if let Some(lookup) = self.lookup.get(&record) {
if let Some(entry) = self.memdb.get_mut(lookup) {
entry.data = match data {
Some(a) => LogData::Some(a.to_vec()),
None => LogData::None
};
}
}
}
}
async fn flush(&mut self) -> Result<()> {
Ok(())
}
fn count(&self) -> usize {
self.lookup.values().len()
}
fn size(&self) -> u64 {
self.offset as u64
}
fn index(&self) -> u32 {
0u32
}
fn offset(&self) -> u64 {
self.offset as u64
}
fn header(&self, _index: u32) -> Vec<u8> {
self.header.clone()
}
fn destroy(&mut self) -> Result<()> {
Ok(())
}
fn move_log_file(&mut self, _new_path: &String) -> Result<()> {
Ok(())
}
async fn begin_flip(&self, header_bytes: Vec<u8>) -> Result<Box<dyn LogFile>> {
Ok(LogFileMemDb::new(header_bytes).await?)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/test.rs | lib/src/redo/test.rs | #![cfg(test)]
use bytes::Bytes;
use tokio::runtime::Runtime;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::chain::*;
use crate::crypto::*;
use crate::event::*;
use crate::header::*;
use crate::meta::*;
use crate::spec::*;
use super::api::LogWritable;
use super::core::RedoLog;
#[cfg(feature = "enable_local_fs")]
use super::flags::OpenFlags;
/*
TESTS
*/
#[cfg(test)]
async fn test_write_data(
log: &mut dyn LogWritable,
key: PrimaryKey,
body: Option<Vec<u8>>,
flush: bool,
format: MessageFormat,
) -> AteHash {
let mut meta = Metadata::for_data(key);
meta.core
.push(CoreMetadata::Author("test@nowhere.com".to_string()));
// Write some data to the flipped buffer
let body = match body {
Some(a) => Some(Bytes::from(a)),
None => None,
};
let evt = EventWeakData {
meta: meta,
data_bytes: match body {
Some(data) => MessageBytes::Some(data),
None => MessageBytes::None,
},
format: format,
};
let hash = evt.as_header_raw().unwrap().event_hash;
let _ = log.write(&evt).await.expect("Failed to write the object");
if flush == true {
let _ = log.flush().await;
}
hash
}
#[cfg(test)]
async fn test_read_data(
log: &mut RedoLog,
read_header: AteHash,
test_key: PrimaryKey,
test_body: Option<Vec<u8>>,
format: MessageFormat,
) {
let result = log
.load(read_header.clone())
.await
.expect(&format!("Failed to read the entry {:?}", read_header));
let mut meta = Metadata::for_data(test_key);
meta.core
.push(CoreMetadata::Author("test@nowhere.com".to_string()));
let meta_bytes = Bytes::from(format.meta.serialize(&meta).unwrap());
let test_body = match test_body {
Some(a) => Some(Bytes::from(a)),
None => None,
};
assert_eq!(meta_bytes, result.header.meta_bytes);
assert_eq!(test_body, result.data.data_bytes.to_option());
}
#[test]
fn test_redo_log() {
crate::utils::bootstrap_test_env();
let rt = Runtime::new().unwrap();
let blah1 = PrimaryKey::generate();
let blah2 = PrimaryKey::generate();
let blah3 = PrimaryKey::generate();
let blah4 = PrimaryKey::generate();
let blah5 = PrimaryKey::generate();
let blah6 = PrimaryKey::generate();
let blah7 = PrimaryKey::generate();
rt.block_on(async {
let mock_cfg = crate::conf::tests::mock_test_config();
#[allow(unused_variables)]
let mock_chain_key = ChainKey::default().with_temp_name("test_redo".to_string());
{
// Open the log once for writing
println!("test_redo_log - creating the redo log");
#[cfg(feature = "enable_local_fs")]
let (mut rl, _) = RedoLog::open(
&mock_cfg,
&mock_chain_key,
OpenFlags::create_centralized_server(),
Vec::new(),
)
.await
.expect("Failed to load the redo log");
#[cfg(not(feature = "enable_local_fs"))]
let mut rl = RedoLog::open(Vec::new())
.await
.expect("Failed to load the redo log");
// Test that its empty
println!("test_redo_log - confirming no more data");
assert_eq!(0, rl.count());
// First test a simple case of a push and read
println!("test_redo_log - writing test data to log - blah1");
let halb1 =
test_write_data(&mut rl, blah1, Some(vec![1; 10]), true, mock_cfg.log_format).await;
assert_eq!(1, rl.count());
println!("test_redo_log - testing read result of blah1");
test_read_data(
&mut rl,
halb1,
blah1,
Some(vec![1; 10]),
mock_cfg.log_format,
)
.await;
// Now we push some data in to get ready for more tests
println!("test_redo_log - writing test data to log - blah3");
let halb2 = test_write_data(&mut rl, blah2, None, true, mock_cfg.log_format).await;
assert_eq!(2, rl.count());
println!("test_redo_log - writing test data to log - blah3");
let _ =
test_write_data(&mut rl, blah3, Some(vec![3; 10]), true, mock_cfg.log_format).await;
assert_eq!(3, rl.count());
// Begin an operation to flip the redo log
println!("test_redo_log - beginning the flip operation");
let mut flip = rl.begin_flip(Vec::new()).await.unwrap();
// Read the earlier pushed data
println!("test_redo_log - testing read result of blah2");
test_read_data(&mut rl, halb2, blah2, None, mock_cfg.log_format).await;
// Write some data to the redo log and the backing redo log
println!("test_redo_log - writing test data to flip - blah1 (again)");
let _ = test_write_data(
&mut flip,
blah1,
Some(vec![10; 10]),
true,
mock_cfg.log_format,
)
.await;
assert_eq!(1, flip.count());
assert_eq!(3, rl.count());
#[allow(unused_variables)]
let halb4 = test_write_data(
&mut flip,
blah4,
Some(vec![4; 10]),
true,
mock_cfg.log_format,
)
.await;
assert_eq!(2, flip.count());
assert_eq!(3, rl.count());
println!("test_redo_log - writing test data to log - blah5");
let halb5 =
test_write_data(&mut rl, blah5, Some(vec![5; 10]), true, mock_cfg.log_format).await;
assert_eq!(4, rl.count());
// The deferred writes do not take place until after the flip ends
assert_eq!(2, flip.count());
// End the flip operation
println!("test_redo_log - finishing the flip operation");
rl.finish_flip(flip, |_, _| {})
.await
.expect("Failed to end the flip operation");
assert_eq!(3, rl.count());
// Write some more data
println!("test_redo_log - writing test data to log - blah6");
let halb6 = test_write_data(
&mut rl,
blah6,
Some(vec![6; 10]),
false,
mock_cfg.log_format,
)
.await;
assert_eq!(4, rl.count());
// Attempt to read the log entry
rl.load(halb5.clone())
.await
.expect("This entry should be readable");
// Attempt to read blah 6 before its flushed should result in an error
rl.load(halb6.clone())
.await
.expect("The log file read should have worked now");
println!("test_redo_log - closing redo log");
}
{
// Open it up again which should check that it loads data properly
println!("test_redo_log - reopening the redo log");
#[cfg(feature = "enable_local_fs")]
let (mut rl, mut loader) = RedoLog::open(
&mock_cfg,
&mock_chain_key,
OpenFlags::open_centralized_server(),
Vec::new(),
)
.await
.expect("Failed to load the redo log");
#[cfg(not(feature = "enable_local_fs"))]
let mut rl = RedoLog::open(Vec::new())
.await
.expect("Failed to load the redo log");
#[cfg(feature = "enable_local_fs")]
{
// Check that the correct data is read
println!("test_redo_log - testing read result of blah1 (again)");
test_read_data(
&mut rl,
loader.pop_front().unwrap().header.event_hash,
blah1,
Some(vec![10; 10]),
mock_cfg.log_format,
)
.await;
println!("test_redo_log - testing read result of blah4");
test_read_data(
&mut rl,
loader.pop_front().unwrap().header.event_hash,
blah4,
Some(vec![4; 10]),
mock_cfg.log_format,
)
.await;
println!("test_redo_log - testing read result of blah5");
test_read_data(
&mut rl,
loader.pop_front().unwrap().header.event_hash,
blah5,
Some(vec![5; 10]),
mock_cfg.log_format,
)
.await;
println!("test_redo_log - testing read result of blah6");
test_read_data(
&mut rl,
loader.pop_front().unwrap().header.event_hash,
blah6,
Some(vec![6; 10]),
mock_cfg.log_format,
)
.await;
println!("test_redo_log - confirming no more data");
assert_eq!(loader.pop_front().is_none(), true);
}
// Write some data to the redo log and the backing redo log
println!("test_redo_log - writing test data to log - blah7");
let halb7 =
test_write_data(&mut rl, blah7, Some(vec![7; 10]), true, mock_cfg.log_format).await;
#[cfg(feature = "enable_local_fs")]
assert_eq!(5, rl.count());
#[cfg(not(feature = "enable_local_fs"))]
assert_eq!(1, rl.count());
// Read the test data again
println!("test_redo_log - testing read result of blah7");
test_read_data(
&mut rl,
halb7,
blah7,
Some(vec![7; 10]),
mock_cfg.log_format,
)
.await;
println!("test_redo_log - confirming no more data");
#[cfg(feature = "enable_local_fs")]
assert_eq!(5, rl.count());
rl.destroy().unwrap();
}
});
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/log_localfs.rs | lib/src/redo/log_localfs.rs | use async_trait::async_trait;
use error_chain::bail;
use std::pin::Pin;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use bytes::Bytes;
#[allow(unused_imports)]
#[cfg(feature = "enable_caching")]
use cached::Cached;
#[cfg(feature = "enable_caching")]
use cached::*;
use fxhash::FxHashMap;
#[cfg(feature = "enable_caching")]
use std::sync::Mutex as MutexSync;
use tokio::io::ErrorKind;
use tokio::io::Result;
use crate::error::*;
use crate::event::*;
use crate::loader::*;
use crate::spec::*;
use crate::{crypto::*, redo::LogLookup};
use super::appender::*;
use super::archive::*;
use super::magic::*;
use super::*;
#[cfg(feature = "enable_caching")]
pub(crate) struct LogFileCache {
pub(crate) flush: FxHashMap<AteHash, LoadData>,
pub(crate) write: TimedSizedCache<AteHash, LoadData>,
pub(crate) read: TimedSizedCache<AteHash, LoadData>,
}
pub(super) struct LogFileLocalFs {
pub(crate) log_path: String,
pub(crate) backup_path: Option<String>,
pub(crate) temp: bool,
pub(crate) lookup: FxHashMap<AteHash, LogLookup>,
pub(crate) appender: LogAppender,
pub(crate) archives: FxHashMap<u32, LogArchive>,
#[cfg(feature = "enable_caching")]
pub(crate) cache: MutexSync<LogFileCache>,
}
impl LogFileLocalFs {
pub(super) async fn new(
temp_file: bool,
read_only: bool,
path_log: String,
backup_path: Option<String>,
restore_path: Option<String>,
truncate: bool,
_cache_size: usize,
_cache_ttl: u64,
header_bytes: Vec<u8>,
) -> Result<Box<LogFileLocalFs>> {
debug!("open at {}", path_log);
// Load all the archives
let mut archives = FxHashMap::default();
let mut n = 0 as u32;
// If there are any backups then restore them and mark them as an
// archive file
if let Some(restore_path) = &restore_path {
let mut n = 0 as u32;
loop {
let source_path = format!("{}.{}", restore_path, n);
let source = std::path::Path::new(source_path.as_str());
if source.exists() == false {
break;
}
let dest_path = format!("{}.{}", path_log, n);
let dest = std::path::Path::new(dest_path.as_str());
if dest.exists() == true && source.metadata()?.len() > dest.metadata()?.len() {
n = n + 1;
continue;
}
// If its a temp file then fail as this would be unsupported behaviour
if temp_file {
return Err(tokio::io::Error::new(
ErrorKind::AlreadyExists,
"Can not start a temporary redo log when there are existing backup files.",
));
}
// We stage the file copy first so that if its interrupted that it will
// not cause a partially copied log file to be loaded or the restoration
// process from trying again
let dest_stage_path = format!("{}.{}.staged", restore_path, n);
let dest_stage = std::path::Path::new(dest_stage_path.as_str());
if let Err(err) = std::fs::copy(source, dest_stage) {
warn!("error while restoring log file({}) - {}", source_path, err);
return Err(err);
}
std::fs::rename(dest_stage, dest)?;
// Add the file as pure archive with no appender
archives.insert(n, LogArchive::new(path_log.clone(), n).await?);
n = n + 1;
}
}
// Now load any archives that exist but have not yet been loaded, archives
// exist when there is more than one file remaining thus the very last
// file is actually considered the active log file.
loop {
// If the next file does not exist then there are no more archives
let test = format!("{}.{}", path_log.clone(), n + 1);
if std::path::Path::new(test.as_str()).exists() == false {
break;
}
// If its a temp file then fail as this would be unsupported behaviour
if temp_file {
return Err(tokio::io::Error::new(
ErrorKind::AlreadyExists,
"Can not start a temporary redo log when there are existing archives.",
));
}
// Add the file as pure archive with no appender
archives.insert(n, LogArchive::new(path_log.clone(), n).await?);
n = n + 1;
}
// Create the log appender
let (appender, archive) =
LogAppender::new(path_log.clone(), truncate, read_only, n, &header_bytes[..]).await?;
archives.insert(n, archive);
// If we are temporary log file then kill the file
if temp_file && read_only == false {
let _ = std::fs::remove_file(appender.path());
}
// Log file
let ret = LogFileLocalFs {
log_path: path_log,
backup_path: backup_path,
temp: temp_file,
lookup: FxHashMap::default(),
appender,
#[cfg(feature = "enable_caching")]
cache: MutexSync::new(LogFileCache {
flush: FxHashMap::default(),
read: TimedSizedCache::with_size_and_lifespan(_cache_size, _cache_ttl),
write: TimedSizedCache::with_size_and_lifespan(_cache_size, _cache_ttl),
}),
archives,
};
Ok(Box::new(ret))
}
/// Read all the log files from all the archives including the current one representing the appender
pub(super) async fn read_all(
&mut self,
mut loader: Box<impl Loader>,
) -> std::result::Result<usize, SerializationError> {
let mut lookup = FxHashMap::default();
let archives = self.archives.values_mut().collect::<Vec<_>>();
let mut total: usize = 0;
for archive in archives.iter() {
total = total + archive.len().await? as usize;
}
loader.start_of_history(total).await;
let mut cnt: usize = 0;
for archive in archives {
let mut lock = archive.lock_at(0).await?;
let _version = match RedoHeader::read(&mut lock).await? {
Some(a) => a,
None => {
warn!("log-read-error: log file is empty");
continue;
}
};
loop {
match LogFileLocalFs::read_once_internal(&mut lock).await {
Ok(Some(head)) => {
#[cfg(feature = "enable_super_verbose")]
trace!("log-read: {:?}", head);
lookup.insert(head.header.event_hash, head.lookup);
loader.feed_load_data(head).await;
cnt = cnt + 1;
}
Ok(None) => break,
Err(err) => {
debug!("log-load-error: {}", err.to_string());
continue;
}
}
}
}
for (v, k) in lookup.into_iter() {
self.lookup.insert(v, k);
}
loader.end_of_history().await;
Ok(cnt)
}
async fn read_once_internal(
guard: &mut LogArchiveGuard<'_>,
) -> std::result::Result<Option<LoadData>, SerializationError> {
let offset = guard.offset();
#[cfg(feature = "enable_super_verbose")]
info!("log-read-event: offset={}", offset);
// Read the log event
let evt = match EventVersion::read(guard).await? {
Some(e) => e,
None => {
return Ok(None);
}
};
// Deserialize the meta bytes into a metadata object
let meta = evt.header.format.meta.deserialize_ref(&evt.meta[..])
.map_err(SerializationError::from)?;
let data_hash = evt.data.hash();
let data_size = evt.data.size();
// Record the lookup map
let header = EventHeaderRaw::new(
AteHash::from_bytes(&evt.meta[..]),
Bytes::from(evt.meta),
data_hash,
data_size,
evt.header.format,
);
Ok(Some(LoadData {
header,
data: EventWeakData {
meta: meta,
data_bytes: match evt.data {
LogData::Some(data) => MessageBytes::Some(Bytes::from(data)),
LogData::LazySome(l) => MessageBytes::LazySome(l),
LogData::None => MessageBytes::None,
},
format: evt.header.format,
},
lookup: LogLookup {
index: guard.index(),
offset,
},
}))
}
}
#[async_trait]
impl LogFile for LogFileLocalFs {
#[cfg(feature = "enable_rotate")]
async fn rotate(&mut self, header_bytes: Vec<u8>) -> Result<()> {
// If this a temporary file then fail
if self.temp {
return Err(tokio::io::Error::new(ErrorKind::PermissionDenied, "Can not rotate a temporary redo log - only persistent logs support this behaviour."));
}
// Flush and close and increment the log index
self.appender.sync().await?;
let next_index = self.appender.index + 1;
// Create a new appender
let (new_appender, new_archive) = LogAppender::new(
self.log_path.clone(),
false,
false,
next_index,
&header_bytes[..],
)
.await?;
// Set the new appender
self.archives.insert(next_index, new_archive);
self.appender = new_appender;
// Success
Ok(())
}
fn backup(
&mut self,
include_active_files: bool,
) -> Result<Pin<Box<dyn futures::Future<Output = Result<()>> + Send + Sync>>> {
// If this a temporary file then fail
if self.temp {
return Err(tokio::io::Error::new(ErrorKind::PermissionDenied, "Can not backup a temporary redo log - only persistent logs support this behaviour."));
}
// Make the actual backups but do it asynchronously
let mut delayed = Vec::new();
if let Some(restore_path) = &self.backup_path {
let end = if include_active_files {
self.appender.index + 1
} else {
self.appender.index
};
let mut n = 0 as u32;
while n < end {
let source_path = format!("{}.{}", self.log_path, n);
let source = std::path::Path::new(source_path.as_str());
if source.exists() == false {
break;
}
let dest_path = format!("{}.{}", restore_path, n);
let dest = std::path::Path::new(dest_path.as_str());
if dest.exists() == true && source.metadata()?.len() > dest.metadata()?.len() {
n = n + 1;
continue;
}
let dest_stage_path = format!("{}.{}.staged", restore_path, n);
delayed.push(async move {
let source = std::path::Path::new(source_path.as_str());
let dest = std::path::Path::new(dest_path.as_str());
let dest_stage = std::path::Path::new(dest_stage_path.as_str());
tokio::fs::copy(source, dest_stage).await?;
std::fs::rename(dest_stage, dest)?;
Ok(())
});
n = n + 1;
}
}
// Return a future that will complete all the IO copy operations
// (this is done outside this function to prevent the backup operation
// from freezing the datachain while its executing)
let ret = async move {
for delayed in delayed {
if let Err(err) = delayed.await {
warn!("error while backing up log file - {}", err);
return Err(err);
}
}
Ok(())
};
Ok(Box::pin(ret))
}
async fn copy(&mut self) -> Result<Box<dyn LogFile>> {
// Copy all the archives
let mut log_archives = FxHashMap::default();
for (k, v) in self.archives.iter() {
log_archives.insert(k.clone(), v.clone().await?);
}
#[cfg(feature = "enable_caching")]
let cache = {
let cache = self.cache.lock().unwrap();
MutexSync::new(LogFileCache {
flush: cache.flush.clone(),
read: cached::TimedSizedCache::with_size_and_lifespan(
cache.read.cache_capacity().unwrap(),
cache.read.cache_lifespan().unwrap(),
),
write: cached::TimedSizedCache::with_size_and_lifespan(
cache.write.cache_capacity().unwrap(),
cache.write.cache_lifespan().unwrap(),
),
})
};
Ok(Box::new(LogFileLocalFs {
log_path: self.log_path.clone(),
backup_path: self.backup_path.clone(),
temp: self.temp,
lookup: self.lookup.clone(),
appender: self.appender.clone().await?,
#[cfg(feature = "enable_caching")]
cache,
archives: log_archives,
}))
}
async fn write(
&mut self,
evt: &EventWeakData,
) -> std::result::Result<LogLookup, SerializationError> {
// Write the appender
let header = evt.as_header_raw()?;
#[cfg(feature = "enable_local_fs")]
let lookup = self.appender.write(evt, &header).await?;
// Record the lookup map
self.lookup.insert(header.event_hash, lookup);
#[cfg(feature = "enable_verbose")]
trace!("log-write: {} - {:?}", header.event_hash, lookup);
#[cfg(feature = "enable_super_verbose")]
trace!("log-write: {:?} - {:?}", header, evt);
// Cache the data
#[cfg(feature = "enable_caching")]
{
let mut cache = self.cache.lock().unwrap();
cache.flush.insert(
header.event_hash,
LoadData {
lookup,
header,
data: evt.clone(),
},
);
}
// Return the result
Ok(lookup)
}
async fn copy_event(
&mut self,
from_log: &Box<dyn LogFile>,
hash: AteHash,
) -> std::result::Result<LogLookup, LoadError> {
// Load the data from the log file
let result = from_log.load(&hash).await?;
// Write it to the local log
let lookup = self.appender.write(&result.data, &result.header).await?;
// Record the lookup map
self.lookup.insert(hash.clone(), lookup);
// Cache the data
#[cfg(feature = "enable_caching")]
{
let mut cache = self.cache.lock().unwrap();
cache.flush.insert(
hash.clone(),
LoadData {
header: result.header,
lookup,
data: result.data,
},
);
}
Ok(lookup)
}
async fn load(&self, hash: &AteHash) -> std::result::Result<LoadData, LoadError> {
// Check the caches
#[cfg(feature = "enable_caching")]
{
let mut cache = self.cache.lock().unwrap();
if let Some(result) = cache.flush.get(hash) {
return Ok(result.clone());
}
if let Some(result) = cache.read.cache_get(hash) {
return Ok(result.clone());
}
if let Some(result) = cache.write.cache_remove(hash) {
return Ok(result);
}
}
// Lookup the record in the redo log
let lookup = match self.lookup.get(hash) {
Some(a) => a.clone(),
None => {
bail!(LoadErrorKind::NotFoundByHash(hash.clone()));
}
};
let _offset = lookup.offset;
// Load the archive
let archive = match self.archives.get(&lookup.index) {
Some(a) => a,
None => {
bail!(LoadErrorKind::NotFoundByHash(hash.clone()));
}
};
// First read all the data into a buffer
let result = {
let mut loader = archive.lock_at(_offset).await?;
match EventVersion::read(&mut loader).await? {
Some(a) => a,
None => {
bail!(LoadErrorKind::NotFoundByHash(hash.clone()));
}
}
};
// Hash body
let data_hash = result.data.hash();
let data_size = result.data.size();
// Convert the result into a deserialized result
let meta = result.header.format.meta.deserialize_ref(&result.meta[..])
.map_err(SerializationError::from)?;
let ret = LoadData {
header: EventHeaderRaw::new(
AteHash::from_bytes(&result.meta[..]),
Bytes::from(result.meta),
data_hash,
data_size,
result.header.format,
),
data: EventWeakData {
meta,
data_bytes: match result.data {
LogData::Some(data) => MessageBytes::Some(Bytes::from(data)),
LogData::LazySome(l) => MessageBytes::LazySome(l),
LogData::None => MessageBytes::None,
},
format: result.header.format,
},
lookup,
};
assert_eq!(hash.to_string(), ret.header.event_hash.to_string());
// Store it in the read cache
#[cfg(feature = "enable_caching")]
{
let mut cache = self.cache.lock().unwrap();
cache.read.cache_set(ret.header.event_hash, ret.clone());
}
Ok(ret)
}
fn prime(&mut self, records: Vec<(AteHash, Option<Bytes>)>) {
// Store it in the read cache
#[cfg(feature = "enable_caching")]
{
let mut cache = self.cache.lock().unwrap();
for (record, data) in records {
if let Some(result) = cache.read.cache_get(&record) {
let mut new_result = result.clone();
new_result.data = EventWeakData {
meta: result.data.meta.clone(),
data_bytes: match data {
Some(data) => MessageBytes::Some(data),
None => MessageBytes::None,
},
format: result.data.format
};
cache.read.cache_set(record, new_result);
}
}
}
}
fn move_log_file(&mut self, new_path: &String) -> Result<()> {
if self.temp == false {
// First rename the orginal logs as a backup
let mut n = 0;
loop {
let path_from = format!("{}.{}", new_path, n);
let path_to = format!("{}.backup.{}", new_path, n);
if std::path::Path::new(path_from.as_str()).exists() == false {
break;
}
std::fs::rename(path_from, path_to)?;
n = n + 1;
}
// Move the flipped logs over to replace the originals
let mut n = 0;
loop {
let path_from = format!("{}.{}", self.log_path.clone(), n);
let path_to = format!("{}.{}", new_path, n);
if std::path::Path::new(path_from.as_str()).exists() == false {
break;
}
std::fs::rename(path_from, path_to)?;
n = n + 1;
}
// Now delete all the backups
let mut n = 0;
loop {
let path_old = format!("{}.backup.{}", new_path, n);
if std::path::Path::new(path_old.as_str()).exists() == true {
std::fs::remove_file(path_old)?;
} else {
break;
}
n = n + 1;
}
}
self.log_path = new_path.clone();
Ok(())
}
async fn flush(&mut self) -> Result<()> {
// Make a note of all the cache lines we need to move
#[cfg(feature = "enable_caching")]
let mut keys = Vec::new();
#[cfg(feature = "enable_caching")]
{
let cache = self.cache.lock().unwrap();
for k in cache.flush.keys() {
keys.push(k.clone());
}
}
// Flush the data to disk
self.appender.flush().await?;
// Move the cache lines into the write cache from the flush cache which
// will cause them to be released after the TTL is reached
#[cfg(feature = "enable_caching")]
{
let mut cache = self.cache.lock().unwrap();
for k in keys.into_iter() {
if let Some(v) = cache.flush.remove(&k) {
cache.write.cache_set(k, v);
}
}
}
Ok(())
}
fn count(&self) -> usize {
self.lookup.values().len()
}
fn size(&self) -> u64 {
self.appender.offset() - self.appender.header().len() as u64
}
fn index(&self) -> u32 {
self.appender.index
}
fn offset(&self) -> u64 {
self.appender.offset() as u64
}
fn header(&self, index: u32) -> Vec<u8> {
if index == u32::MAX || index == self.appender.index {
return Vec::from(self.appender.header());
}
if let Some(a) = self.archives.get(&index) {
Vec::from(a.header())
} else {
Vec::new()
}
}
fn destroy(&mut self) -> Result<()> {
// Now delete all the log files
let mut n = 0;
loop {
let path_old = format!("{}.{}", self.log_path, n);
if std::path::Path::new(path_old.as_str()).exists() == true {
std::fs::remove_file(path_old)?;
} else {
break;
}
n = n + 1;
}
Ok(())
}
async fn begin_flip(&self, header_bytes: Vec<u8>) -> Result<Box<dyn LogFile>> {
let ret = {
let path_flip = format!("{}.flip", self.log_path);
#[cfg(feature = "enable_caching")]
let (cache_size, cache_ttl) = {
let cache = self.cache.lock().unwrap();
let cache_size = cache.read.cache_capacity().unwrap();
let cache_ttl = cache.read.cache_lifespan().unwrap();
(cache_size, cache_ttl)
};
#[cfg(not(feature = "enable_caching"))]
let (cache_size, cache_ttl) = { (0, u64::MAX) };
LogFileLocalFs::new(
self.temp,
false,
path_flip,
self.backup_path.clone(),
None,
true,
cache_size,
cache_ttl,
header_bytes,
)
};
Ok(ret.await?)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/log_traits.rs | lib/src/redo/log_traits.rs | use async_trait::async_trait;
use std::pin::Pin;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use tokio::io::Result;
use bytes::Bytes;
use crate::error::*;
use crate::event::*;
use crate::loader::*;
use crate::{crypto::*, redo::LogLookup};
#[async_trait]
pub trait LogFile
where
Self: Sync + Send,
{
#[cfg(feature = "enable_rotate")]
async fn rotate(&mut self, header_bytes: Vec<u8>) -> Result<()>;
fn backup(
&mut self,
include_active_files: bool,
) -> Result<Pin<Box<dyn futures::Future<Output = Result<()>> + Send + Sync>>>;
async fn copy(&mut self) -> Result<Box<dyn LogFile>>;
async fn write(
&mut self,
evt: &EventWeakData,
) -> std::result::Result<LogLookup, SerializationError>;
async fn copy_event(
&mut self,
from_log: &Box<dyn LogFile>,
hash: AteHash,
) -> std::result::Result<LogLookup, LoadError>;
async fn load(&self, hash: &AteHash) -> std::result::Result<LoadData, LoadError>;
fn move_log_file(&mut self, new_path: &String) -> Result<()>;
async fn begin_flip(&self, header_bytes: Vec<u8>) -> Result<Box<dyn LogFile>>;
async fn flush(&mut self) -> Result<()>;
fn count(&self) -> usize;
fn prime(&mut self, records: Vec<(AteHash, Option<Bytes>)>);
fn size(&self) -> u64;
fn index(&self) -> u32;
fn offset(&self) -> u64;
fn header(&self, index: u32) -> Vec<u8>;
fn destroy(&mut self) -> Result<()>;
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/archive.rs | lib/src/redo/archive.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use async_trait::async_trait;
use std::io::SeekFrom;
use std::mem::size_of;
use tokio::fs::File;
use tokio::io::Result;
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
use tokio::sync::Mutex;
use tokio::sync::MutexGuard;
use super::magic::*;
use crate::spec::*;
#[derive(Debug)]
pub(crate) struct LogArchive {
pub(crate) index: u32,
pub(crate) path: String,
file: Mutex<File>,
header: Vec<u8>,
}
impl LogArchive {
pub async fn new(path: String, index: u32) -> Result<LogArchive> {
let path = format!("{}.{}", path.clone(), index);
let log_random_access = tokio::fs::OpenOptions::new()
.read(true)
.open(path.clone())
.await?;
let mut ret = LogArchive {
index,
path,
header: Vec::new(),
file: Mutex::new(log_random_access),
};
ret.header = {
let mut guard = ret.lock_at(0).await?;
let r = match RedoHeader::read(&mut guard).await? {
Some(a) => Vec::from(a.inner().clone()),
None => Vec::new(),
};
guard.seek(0).await?;
r
};
Ok(ret)
}
pub async fn clone(&self) -> Result<LogArchive> {
let log_back = self.file.lock().await.try_clone().await?;
Ok(LogArchive {
index: self.index,
path: self.path.clone(),
header: self.header.clone(),
file: Mutex::new(log_back),
})
}
pub async fn lock_at(&self, off: u64) -> Result<LogArchiveGuard<'_>> {
let mut file = self.file.lock().await;
file.seek(SeekFrom::Start(off)).await?;
Ok(LogArchiveGuard {
index: self.index,
offset: off,
file,
})
}
pub async fn len(&self) -> Result<u64> {
Ok(self.file.lock().await.metadata().await?.len())
}
pub(crate) fn header(&self) -> &[u8] {
&self.header[..]
}
}
#[derive(Debug)]
pub(crate) struct LogArchiveGuard<'a> {
index: u32,
offset: u64,
file: MutexGuard<'a, File>,
}
impl<'a> LogArchiveGuard<'a> {
pub(super) fn index(&'a self) -> u32 {
self.index
}
}
#[async_trait]
impl<'a> LogApi for LogArchiveGuard<'a> {
fn offset(&self) -> u64 {
self.offset
}
async fn len(&self) -> Result<u64> {
Ok(self.file.metadata().await?.len())
}
async fn seek(&mut self, off: u64) -> Result<()> {
self.file.seek(SeekFrom::Start(off)).await?;
self.offset = off;
Ok(())
}
async fn read_u8(&mut self) -> Result<u8> {
let ret = self.file.read_u8().await?;
self.offset = self.offset + size_of::<u8>() as u64;
Ok(ret)
}
async fn read_u16(&mut self) -> Result<u16> {
let ret = self.file.read_u16().await?;
self.offset = self.offset + size_of::<u16>() as u64;
Ok(ret)
}
async fn read_u32(&mut self) -> Result<u32> {
let ret = self.file.read_u32().await?;
self.offset = self.offset + size_of::<u32>() as u64;
Ok(ret)
}
async fn read_u64(&mut self) -> Result<u64> {
let ret = self.file.read_u64().await?;
self.offset = self.offset + size_of::<u64>() as u64;
Ok(ret)
}
async fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> {
let amt = self.file.read_exact(&mut buf[..]).await?;
self.offset = self.offset + amt as u64;
Ok(())
}
async fn write_u8(&mut self, val: u8) -> Result<()> {
self.file.write_u8(val).await?;
self.offset = self.offset + size_of::<u8>() as u64;
Ok(())
}
async fn write_u16(&mut self, val: u16) -> Result<()> {
self.file.write_u16(val).await?;
self.offset = self.offset + size_of::<u16>() as u64;
Ok(())
}
async fn write_u32(&mut self, val: u32) -> Result<()> {
self.file.write_u32(val).await?;
self.offset = self.offset + size_of::<u32>() as u64;
Ok(())
}
async fn write_u64(&mut self, val: u64) -> Result<()> {
self.file.write_u64(val).await?;
self.offset = self.offset + size_of::<u64>() as u64;
Ok(())
}
async fn write_exact(&mut self, buf: &[u8]) -> Result<()> {
self.file.write_all(&buf[..]).await?;
self.offset = self.offset + buf.len() as u64;
Ok(())
}
async fn sync(&mut self) -> Result<()> {
self.file.sync_all().await?;
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/magic.rs | lib/src/redo/magic.rs | use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use std::convert::TryFrom;
use tokio::io::ErrorKind;
use crate::spec::LogApi;
static LOG_MAGIC: &'static [u8; 3] = b"RED";
#[derive(Debug, Clone, Copy, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[repr(u8)]
pub enum RedoMagic {
V2 = b'1',
}
#[derive(Debug, Clone)]
pub struct RedoHeader {
magic: RedoMagic,
inner: Vec<u8>,
}
async fn read_byte(api: &mut impl LogApi) -> std::result::Result<Option<u8>, tokio::io::Error> {
match api.read_u8().await {
Ok(a) => Ok(Some(a)),
Err(err) => {
if err.kind() == ErrorKind::UnexpectedEof {
return Ok(None);
}
Err(tokio::io::Error::new(
tokio::io::ErrorKind::Other,
format!(
"Failed to read the event magic number at 0x{:x}",
api.offset()
),
))
}
}
}
impl RedoHeader {
#[allow(dead_code)]
pub fn new(magic: RedoMagic) -> RedoHeader {
RedoHeader {
magic,
inner: Vec::new(),
}
}
#[allow(dead_code)]
pub(crate) async fn load(
api: &mut impl LogApi,
default_header_bytes: &[u8],
) -> Result<Vec<u8>, tokio::io::Error> {
Ok(match RedoHeader::read(api).await? {
Some(a) => Vec::from(a.inner().clone()),
None => {
let mut magic = RedoHeader::new(RedoMagic::V2);
magic.set_inner(default_header_bytes);
let _ = magic.write(api).await?;
api.sync().await?;
Vec::from(default_header_bytes)
}
})
}
pub async fn read(api: &mut impl LogApi) -> Result<Option<RedoHeader>, tokio::io::Error> {
let mut n = 0;
while let Some(cur) = read_byte(api).await? {
loop {
if n < LOG_MAGIC.len() {
if cur == LOG_MAGIC[n] {
n = n + 1;
break;
}
if n > 0 {
n = 0;
continue;
}
break;
}
match RedoMagic::try_from(cur) {
Ok(a) => {
let inner = match a {
#[allow(deprecated)]
RedoMagic::V2 => {
let inner_size = api.read_u32().await?;
let mut inner = vec![0 as u8; inner_size as usize];
api.read_exact(&mut inner[..]).await?;
inner
}
};
return Ok(Some(RedoHeader { magic: a, inner }));
}
_ => {
n = 0;
continue;
}
}
}
}
return Ok(None);
}
pub async fn write(self, api: &mut impl LogApi) -> Result<(), tokio::io::Error> {
api.write_exact(&LOG_MAGIC[..]).await?;
api.write_u8(self.magic.into()).await?;
match self.magic {
RedoMagic::V2 => {
api.write_u32(self.inner.len() as u32).await?;
api.write_exact(&self.inner[..]).await?;
}
}
Ok(())
}
pub fn inner(&self) -> &[u8] {
&self.inner[..]
}
pub fn set_inner(&mut self, val: &[u8]) {
self.inner = Vec::from(val);
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/core.rs | lib/src/redo/core.rs | #![allow(unused_imports)]
use async_trait::async_trait;
#[cfg(feature = "enable_local_fs")]
use std::collections::VecDeque;
use std::pin::Pin;
use tokio::io::Error;
use tokio::io::ErrorKind;
use tokio::io::Result;
use tracing::{debug, error, info, trace, warn};
use bytes::Bytes;
#[cfg(feature = "enable_local_fs")]
use crate::chain::*;
#[cfg(feature = "enable_local_fs")]
use crate::conf::*;
use crate::crypto::*;
use crate::error::*;
use crate::event::*;
use crate::loader::*;
use crate::mesh::BackupMode;
use crate::redo::LogLookup;
use super::api::LogWritable;
#[cfg(feature = "enable_local_fs")]
use super::flags::OpenFlags;
use super::flip::FlippedLogFile;
use super::flip::RedoLogFlip;
#[cfg(feature = "enable_local_fs")]
use super::loader::RedoLogLoader;
#[cfg(feature = "enable_local_fs")]
use super::log_localfs::LogFileLocalFs;
use super::log_memdb::LogFileMemDb;
use super::*;
pub struct RedoLog {
#[cfg(feature = "enable_local_fs")]
log_path: Option<String>,
flip: Option<RedoLogFlip>,
pub(super) log_file: Box<dyn LogFile>,
}
impl RedoLog {
#[cfg(feature = "enable_local_fs")]
async fn new(
path_log: Option<String>,
backup_path: Option<String>,
restore_path: Option<String>,
flags: OpenFlags,
cache_size: usize,
cache_ttl: u64,
loader: Box<impl Loader>,
header_bytes: Vec<u8>,
) -> std::result::Result<RedoLog, SerializationError> {
// Now load the real thing
let ret = RedoLog {
log_path: path_log.clone(),
log_file: match path_log {
Some(path_log) => {
let mut log_file = LogFileLocalFs::new(
flags.temporal,
flags.read_only,
path_log,
backup_path,
restore_path,
flags.truncate,
cache_size,
cache_ttl,
header_bytes,
)
.await?;
let cnt = log_file.read_all(loader).await?;
debug!(
"redo-log: loaded {} events from {} files",
cnt,
log_file.archives.len()
);
log_file
}
None => LogFileMemDb::new(header_bytes).await?,
},
flip: None,
};
Ok(ret)
}
#[cfg(not(feature = "enable_local_fs"))]
async fn new(header_bytes: Vec<u8>) -> std::result::Result<RedoLog, SerializationError> {
// Now load the real thing
let ret = RedoLog {
log_file: LogFileMemDb::new(header_bytes).await?,
flip: None,
};
Ok(ret)
}
#[cfg(feature = "enable_rotate")]
pub async fn rotate(&mut self, header_bytes: Vec<u8>) -> Result<()> {
Ok(self.log_file.rotate(header_bytes).await?)
}
pub fn backup(
&mut self,
include_active_files: bool,
) -> Result<Pin<Box<dyn futures::Future<Output = Result<()>> + Send + Sync>>> {
Ok(self.log_file.backup(include_active_files)?)
}
pub async fn begin_flip(&mut self, header_bytes: Vec<u8>) -> Result<FlippedLogFile> {
match self.flip {
None => {
let flip = {
FlippedLogFile {
log_file: self.log_file.begin_flip(header_bytes).await?,
event_summary: Vec::new(),
}
};
self.flip = Some(RedoLogFlip {
deferred: Vec::new(),
});
Ok(flip)
}
Some(_) => Result::Err(Error::new(
ErrorKind::Other,
"Flip operation is already underway",
)),
}
}
pub async fn finish_flip(
&mut self,
mut flip: FlippedLogFile,
mut deferred_write_callback: impl FnMut(LogLookup, EventHeader),
) -> std::result::Result<Vec<EventHeaderRaw>, SerializationError> {
match &mut self.flip {
Some(inside) => {
let mut event_summary = flip.drain_events();
let mut new_log_file = flip.copy_log_file().await?;
for d in inside.deferred.drain(..) {
let header = d.as_header()?;
event_summary.push(header.raw.clone());
let lookup = new_log_file.write(&d).await?;
deferred_write_callback(lookup, header);
}
new_log_file.flush().await?;
#[cfg(feature = "enable_local_fs")]
if let Some(a) = self.log_path.as_ref() {
new_log_file.move_log_file(a)?;
}
self.log_file = new_log_file;
self.flip = None;
Ok(event_summary)
}
None => Err(SerializationErrorKind::IO(Error::new(
ErrorKind::Other,
"There is no outstanding flip operation to end.",
))
.into()),
}
}
pub async fn load(&self, hash: AteHash) -> std::result::Result<LoadData, LoadError> {
Ok(self.log_file.load(&hash).await?)
}
pub fn prime(&mut self, records: Vec<(AteHash, Option<Bytes>)>) {
self.log_file.prime(records);
}
pub fn count(&self) -> usize {
self.log_file.count()
}
pub fn size(&self) -> u64 {
self.log_file.size()
}
pub fn offset(&self) -> u64 {
self.log_file.offset()
}
pub fn end(&self) -> LogLookup {
LogLookup {
index: self.log_file.index(),
offset: self.log_file.offset(),
}
}
#[cfg(feature = "enable_local_fs")]
pub async fn open(
cfg: &ConfAte,
key: &ChainKey,
flags: OpenFlags,
header_bytes: Vec<u8>,
) -> std::result::Result<(RedoLog, VecDeque<LoadData>), SerializationError> {
let (loader, mut rx) = RedoLogLoader::new();
let cfg = cfg.clone();
let key = key.clone();
let join1 = async move { RedoLog::open_ext(&cfg, &key, flags, loader, header_bytes).await };
let join2 = async move {
let mut ret = VecDeque::new();
while let Some(evt) = rx.recv().await {
ret.push_back(evt);
}
ret
};
let (log, ret) = futures::join!(join1, join2);
Ok((log?, ret))
}
#[cfg(feature = "enable_local_fs")]
pub async fn open_ext(
cfg: &ConfAte,
key: &ChainKey,
flags: OpenFlags,
loader: Box<impl Loader>,
header_bytes: Vec<u8>,
) -> std::result::Result<RedoLog, SerializationError> {
let mut key_name = key.name.clone();
if key_name.starts_with("/") {
key_name = key_name[1..].to_string();
}
trace!("temporal: {}", flags.temporal);
let path_log = match flags.temporal {
false => match cfg.log_path.as_ref() {
Some(a) if a.ends_with("/") => Some(format!("{}{}.log", a, key_name)),
Some(a) => Some(format!("{}/{}.log", a, key_name)),
None => None,
},
true => None,
};
if let Some(path_log) = path_log.as_ref() {
trace!("log-path: {}", path_log);
let path = std::path::Path::new(path_log);
let _ = std::fs::create_dir_all(path.parent().unwrap().clone());
} else {
trace!("log-path: (memory)");
}
let mut backup_path = {
match cfg.backup_path.as_ref() {
Some(a) if a.ends_with("/") => Some(format!("{}{}.log", a, key_name)),
Some(a) => Some(format!("{}/{}.log", a, key_name)),
None => None,
}
};
if let Some(backup_path) = backup_path.as_ref() {
let path = std::path::Path::new(backup_path);
let _ = std::fs::create_dir_all(path.parent().unwrap().clone());
}
let mut restore_path = backup_path.clone();
match cfg.backup_mode {
BackupMode::None => {
restore_path = None;
backup_path = None;
}
BackupMode::Restore => {
backup_path = None;
}
BackupMode::Rotating => {}
BackupMode::Full => {}
};
let log = {
RedoLog::new(
path_log.clone(),
backup_path.clone(),
restore_path.clone(),
flags,
cfg.load_cache_size,
cfg.load_cache_ttl,
loader,
header_bytes,
)
.await?
};
Ok(log)
}
#[cfg(not(feature = "enable_local_fs"))]
pub async fn open(header_bytes: Vec<u8>) -> std::result::Result<RedoLog, SerializationError> {
let log = { RedoLog::new(header_bytes).await? };
Ok(log)
}
pub fn destroy(&mut self) -> Result<()> {
self.log_file.destroy()
}
pub fn header(&self, index: u32) -> Vec<u8> {
self.log_file.header(index)
}
}
#[async_trait]
impl LogWritable for RedoLog {
async fn write(
&mut self,
evt: &EventWeakData,
) -> std::result::Result<LogLookup, SerializationError> {
if let Some(flip) = &mut self.flip {
flip.deferred.push(evt.clone());
}
let pointer = self.log_file.write(evt).await?;
Ok(pointer)
}
async fn flush(&mut self) -> Result<()> {
self.log_file.flush().await?;
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/flags.rs | lib/src/redo/flags.rs | use crate::spec::*;
#[derive(Debug, Clone, Copy)]
pub struct OpenFlags {
pub read_only: bool,
pub truncate: bool,
pub temporal: bool,
pub integrity: TrustMode,
}
impl OpenFlags {
pub fn create_distributed() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: true,
temporal: false,
integrity: TrustMode::Distributed,
}
}
pub fn create_centralized_server() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: true,
temporal: false,
integrity: TrustMode::Centralized(CentralizedRole::Server),
}
}
pub fn create_centralized_client() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: true,
temporal: false,
integrity: TrustMode::Centralized(CentralizedRole::Client),
}
}
pub fn open_distributed() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: false,
temporal: false,
integrity: TrustMode::Distributed,
}
}
pub fn open_centralized_server() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: false,
temporal: false,
integrity: TrustMode::Centralized(CentralizedRole::Server),
}
}
pub fn open_centralized_client() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: false,
temporal: false,
integrity: TrustMode::Centralized(CentralizedRole::Client),
}
}
pub fn ethereal_distributed() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: false,
temporal: true,
integrity: TrustMode::Distributed,
}
}
pub fn ethereal_centralized_server() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: false,
temporal: true,
integrity: TrustMode::Centralized(CentralizedRole::Server),
}
}
pub fn ethereal_centralized_client() -> OpenFlags {
OpenFlags {
read_only: false,
truncate: false,
temporal: true,
integrity: TrustMode::Centralized(CentralizedRole::Client),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/api.rs | lib/src/redo/api.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::error::*;
use crate::event::*;
use async_trait::async_trait;
use tokio::io::Result;
pub use crate::spec::LogLookup;
#[async_trait]
pub trait LogWritable {
/// Writes data to the redo log and returns the new offset in bytes
async fn write(
&mut self,
evt: &EventWeakData,
) -> std::result::Result<LogLookup, SerializationError>;
async fn flush(&mut self) -> Result<()>;
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/flip.rs | lib/src/redo/flip.rs | use async_trait::async_trait;
use tokio::io::Result;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::crypto::AteHash;
use crate::error::*;
use crate::event::*;
use super::api::LogWritable;
use super::core::RedoLog;
use super::{log_traits::LogFile, LogLookup};
pub struct FlippedLogFile {
pub(super) log_file: Box<dyn LogFile>,
pub(crate) event_summary: Vec<EventHeaderRaw>,
}
#[async_trait]
impl LogWritable for FlippedLogFile {
#[allow(dead_code)]
async fn write(
&mut self,
evt: &EventWeakData,
) -> std::result::Result<LogLookup, SerializationError> {
let ret = self.log_file.write(evt).await?;
self.event_summary.push(evt.as_header_raw()?);
Ok(ret)
}
async fn flush(&mut self) -> Result<()> {
self.log_file.flush().await
}
}
impl FlippedLogFile {
pub(super) async fn copy_log_file(&mut self) -> Result<Box<dyn LogFile>> {
let new_log_file = self.log_file.copy().await?;
Ok(new_log_file)
}
#[allow(dead_code)]
pub(super) fn count(&self) -> usize {
self.log_file.count()
}
pub(super) fn drain_events(&mut self) -> Vec<EventHeaderRaw> {
let mut ret = Vec::new();
for evt in self.event_summary.drain(..) {
ret.push(evt);
}
ret
}
#[allow(dead_code)]
pub(crate) async fn copy_event(
&mut self,
from_log: &RedoLog,
from_pointer: AteHash,
) -> std::result::Result<LogLookup, LoadError> {
Ok(self
.log_file
.copy_event(&from_log.log_file, from_pointer)
.await?)
}
}
pub(super) struct RedoLogFlip {
pub deferred: Vec<EventWeakData>,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/model.rs | lib/src/redo/model.rs | #[allow(unused_imports)]
use tracing::{error, info, warn, debug};
use crate::{crypto::Hash};
use tokio::sync::Mutex as MutexAsync;
use cached::*;
use fxhash::FxHashMap;
use super::loader::LoadData; | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/mod.rs | lib/src/redo/mod.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, warn};
mod api;
#[cfg(feature = "enable_local_fs")]
mod appender;
#[cfg(feature = "enable_local_fs")]
mod archive;
mod core;
mod flags;
mod flip;
mod loader;
#[cfg(feature = "enable_local_fs")]
mod log_localfs;
mod log_memdb;
mod log_traits;
mod magic;
mod test;
pub use self::core::RedoLog;
pub use api::LogWritable;
pub use flags::OpenFlags;
pub use loader::RedoLogLoader;
pub(crate) use api::LogLookup;
pub use log_traits::*;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/loader.rs | lib/src/redo/loader.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use async_trait::async_trait;
use tokio::sync::mpsc;
use crate::loader::*;
pub struct RedoLogLoader {
feed: mpsc::Sender<LoadData>,
}
impl RedoLogLoader {
pub fn new() -> (Box<RedoLogLoader>, mpsc::Receiver<LoadData>) {
let (tx, rx) = mpsc::channel(1000);
let loader = RedoLogLoader { feed: tx };
(Box::new(loader), rx)
}
}
#[async_trait]
impl Loader for RedoLogLoader {
async fn feed_load_data(&mut self, data: LoadData) {
let _ = self.feed.send(data).await;
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/redo/appender.rs | lib/src/redo/appender.rs | use async_trait::async_trait;
use std::io::SeekFrom;
use std::mem::size_of;
use error_chain::bail;
use tokio::fs::File;
use tokio::fs::OpenOptions;
#[cfg(feature = "enable_buffered")]
use tokio::io::BufStream;
use tokio::io::Result;
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWriteExt};
use super::archive::*;
use super::magic::*;
use super::LogLookup;
use crate::error::*;
use crate::event::*;
use crate::spec::LogApi;
#[derive(Debug)]
pub(crate) struct LogAppender {
path: String,
pub(super) file: File,
#[cfg(feature = "enable_buffered")]
stream: BufStream<File>,
pub(super) offset: u64,
header: Vec<u8>,
pub(crate) index: u32,
}
impl LogAppender {
pub async fn new(
path_log: String,
truncate: bool,
read_only: bool,
index: u32,
header_bytes: &[u8],
) -> Result<(LogAppender, LogArchive)> {
// Compute the log file name
let log_back_path = format!("{}.{}", path_log.clone(), index);
let log_back = match read_only {
true => {
OpenOptions::new()
.read(true)
.open(log_back_path.clone())
.await?
}
false => match truncate {
true => {
OpenOptions::new()
.read(true)
.write(true)
.truncate(true)
.create(true)
.open(log_back_path.clone())
.await?
}
_ => {
OpenOptions::new()
.read(true)
.write(true)
.create(true)
.open(log_back_path.clone())
.await?
}
},
};
// Build the appender
let mut appender = LogAppender {
path: log_back_path.clone(),
#[cfg(feature = "enable_buffered")]
stream: BufStream::new(log_back.try_clone().await.unwrap()),
file: log_back,
offset: 0,
index,
header: Vec::new(),
};
// If it does not have a magic then add one - otherwise read it and check the value
appender.header = RedoHeader::load(&mut appender, header_bytes).await?;
appender.flush().await?;
// Seek to the end of the appender
appender.seek_to_end().await?;
// Create the archive
let archive = LogArchive::new(path_log, index).await?;
// Return the result
Ok((appender, archive))
}
pub(super) async fn clone(&mut self) -> Result<LogAppender> {
// We have to flush the stream in-case there is outstanding IO that is not yet written to the backing disk
#[cfg(feature = "enable_buffered")]
self.stream.flush().await?;
// Copy the file handles
Ok(LogAppender {
path: self.path.clone(),
file: self.file.try_clone().await?,
#[cfg(feature = "enable_buffered")]
stream: BufStream::new(self.file.try_clone().await?),
offset: self.offset,
index: self.index,
header: self.header.clone(),
})
}
pub(super) async fn write(
&mut self,
evt: &EventWeakData,
header: &EventHeaderRaw,
) -> std::result::Result<LogLookup, SerializationError> {
let log_header = crate::LOG_VERSION
.write(
self,
&header.meta_bytes[..],
match &evt.data_bytes {
MessageBytes::Some(d) => Some(&d[..]),
MessageBytes::LazySome(_) => bail!(SerializationErrorKind::MissingData),
MessageBytes::None => None,
},
evt.format,
)
.await?;
// Record the lookup map
let lookup = LogLookup {
index: self.index,
offset: log_header.offset,
};
// Return the log pointer
Ok(lookup)
}
pub(crate) fn path(&self) -> &String {
&self.path
}
pub(crate) fn header(&self) -> &[u8] {
&self.header[..]
}
pub(super) async fn flush(&mut self) -> Result<()> {
#[cfg(feature = "enable_buffered")]
self.stream.flush().await?;
Ok(())
}
pub(super) async fn seek_to_end(&mut self) -> Result<()> {
#[cfg(feature = "enable_buffered")]
self.stream.flush().await?;
self.offset = self.file.seek(SeekFrom::End(0)).await?;
#[cfg(feature = "enable_buffered")]
{
self.stream = BufStream::new(self.file.try_clone().await?);
}
Ok(())
}
}
#[async_trait]
impl LogApi for LogAppender {
fn offset(&self) -> u64 {
self.offset
}
async fn len(&self) -> Result<u64> {
Ok(self.file.metadata().await?.len())
}
async fn seek(&mut self, off: u64) -> Result<()> {
#[cfg(feature = "enable_buffered")]
self.stream.flush().await?;
self.file.seek(SeekFrom::Start(off)).await?;
self.offset = off;
#[cfg(feature = "enable_buffered")]
{
self.stream = BufStream::new(self.file.try_clone().await?);
}
Ok(())
}
async fn read_u8(&mut self) -> Result<u8> {
#[cfg(feature = "enable_buffered")]
let ret = self.stream.read_u8().await?;
#[cfg(not(feature = "enable_buffered"))]
let ret = self.file.read_u8().await?;
self.offset = self.offset + size_of::<u8>() as u64;
Ok(ret)
}
async fn read_u16(&mut self) -> Result<u16> {
#[cfg(feature = "enable_buffered")]
let ret = self.stream.read_u16().await?;
#[cfg(not(feature = "enable_buffered"))]
let ret = self.file.read_u16().await?;
self.offset = self.offset + size_of::<u16>() as u64;
Ok(ret)
}
async fn read_u32(&mut self) -> Result<u32> {
#[cfg(feature = "enable_buffered")]
let ret = self.stream.read_u32().await?;
#[cfg(not(feature = "enable_buffered"))]
let ret = self.file.read_u32().await?;
self.offset = self.offset + size_of::<u32>() as u64;
Ok(ret)
}
async fn read_u64(&mut self) -> Result<u64> {
#[cfg(feature = "enable_buffered")]
let ret = self.stream.read_u64().await?;
#[cfg(not(feature = "enable_buffered"))]
let ret = self.file.read_u64().await?;
self.offset = self.offset + size_of::<u64>() as u64;
Ok(ret)
}
async fn read_exact(&mut self, buf: &mut [u8]) -> Result<()> {
#[cfg(feature = "enable_buffered")]
let amt = self.stream.read_exact(&mut buf[..]).await?;
#[cfg(not(feature = "enable_buffered"))]
let amt = self.file.read_exact(&mut buf[..]).await?;
self.offset = self.offset + amt as u64;
Ok(())
}
async fn write_u8(&mut self, val: u8) -> Result<()> {
#[cfg(feature = "enable_buffered")]
self.stream.write_u8(val).await?;
#[cfg(not(feature = "enable_buffered"))]
self.file.write_u8(val).await?;
self.offset = self.offset + size_of::<u8>() as u64;
Ok(())
}
async fn write_u16(&mut self, val: u16) -> Result<()> {
#[cfg(feature = "enable_buffered")]
self.stream.write_u16(val).await?;
#[cfg(not(feature = "enable_buffered"))]
self.file.write_u16(val).await?;
self.offset = self.offset + size_of::<u16>() as u64;
Ok(())
}
async fn write_u32(&mut self, val: u32) -> Result<()> {
#[cfg(feature = "enable_buffered")]
self.stream.write_u32(val).await?;
#[cfg(not(feature = "enable_buffered"))]
self.file.write_u32(val).await?;
self.offset = self.offset + size_of::<u32>() as u64;
Ok(())
}
async fn write_u64(&mut self, val: u64) -> Result<()> {
#[cfg(feature = "enable_buffered")]
self.stream.write_u64(val).await?;
#[cfg(not(feature = "enable_buffered"))]
self.file.write_u64(val).await?;
self.offset = self.offset + size_of::<u64>() as u64;
Ok(())
}
async fn write_exact(&mut self, buf: &[u8]) -> Result<()> {
#[cfg(feature = "enable_buffered")]
self.stream.write_all(&buf[..]).await?;
#[cfg(not(feature = "enable_buffered"))]
self.file.write_all(&buf[..]).await?;
self.offset = self.offset + buf.len() as u64;
Ok(())
}
async fn sync(&mut self) -> Result<()> {
self.flush().await?;
self.file.sync_all().await?;
Ok(())
}
}
#[cfg(feature = "enable_buffered")]
impl Drop for LogAppender {
fn drop(&mut self) {
let exec = async_executor::LocalExecutor::default();
let _ = futures::executor::block_on(exec.run(self.stream.shutdown()));
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/session_user.rs | lib/src/session/session_user.rs | #[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::crypto::*;
use super::*;
pub type SessionToken = Option<EncryptedSecureData<EncryptKey>>;
/// Sessions hold facts about the user that give them certains
/// rights and abilities to view data within the chain-of-trust.
///
/// For instance - to see encrypted data for specific users they
/// must insert their `EncryptKey` into this session before
/// accessing the chain via a `Dio`.
///
/// Another example is the ability to write data. For certain
/// records within the tree if they have been protected with
/// write protection then you must insert your `PrivateKey`
/// into the session before you attempt to insert or modify these
/// records.
///
/// Sessions are never cached and only exist in memory for the
/// duration that you use them for security reasons.
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AteSessionUser {
pub user: AteGroupRole,
pub token: SessionToken,
pub identity: String,
pub broker_read: Option<PrivateEncryptKey>,
pub broker_write: Option<PrivateSignKey>,
}
impl Default for AteSessionUser {
fn default() -> AteSessionUser {
AteSessionUser {
user: AteGroupRole {
purpose: AteRolePurpose::Personal,
properties: Vec::new(),
},
token: None,
identity: "nobody@nowhere.com".to_string(),
broker_read: None,
broker_write: None,
}
}
}
impl AteSessionUser {
pub fn new() -> AteSessionUser {
AteSessionUser::default()
}
pub fn add_user_read_key(&mut self, key: &EncryptKey) {
self.user.add_read_key(key)
}
pub fn add_user_private_read_key(&mut self, key: &PrivateEncryptKey) {
self.user.add_private_read_key(key)
}
pub fn add_user_write_key(&mut self, key: &PrivateSignKey) {
self.user.add_write_key(key)
}
pub fn add_user_uid(&mut self, uid: u32) {
self.user.add_uid(uid)
}
}
impl AteSession for AteSessionUser {
fn role<'a>(&'a self, _purpose: &AteRolePurpose) -> Option<&'a AteGroupRole> {
None
}
fn read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a EncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.user.read_keys());
}
let ret1 = self
.user
.read_keys()
.filter(move |_| category.includes_user_keys());
Box::new(ret1)
}
fn write_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateSignKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.user.write_keys());
}
let ret1 = self
.user
.write_keys()
.filter(move |_| category.includes_user_keys());
let ret2 = self.broker_write().filter(|_| category.includes_broker_keys()).into_iter();
Box::new(ret1.chain(ret2))
}
fn public_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PublicEncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.user.public_read_keys());
}
let ret1 = self
.user
.public_read_keys()
.filter(move |_| category.includes_user_keys());
let ret2 = self.broker_read().filter(move |_| category.includes_broker_keys()).map(|a| a.as_public_key()).into_iter();
Box::new(ret1.chain(ret2))
}
fn private_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateEncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.user.private_read_keys());
}
let ret1 = self
.user
.private_read_keys()
.filter(move |_| category.includes_user_keys());
let ret2 = self.broker_read().filter(move |_| category.includes_broker_keys()).into_iter();
Box::new(ret1.chain(ret2))
}
fn broker_read<'a>(&'a self) -> Option<&'a PrivateEncryptKey> {
self.broker_read.as_ref()
}
fn broker_write<'a>(&'a self) -> Option<&'a PrivateSignKey> {
self.broker_write.as_ref()
}
fn identity<'a>(&'a self) -> &'a str {
self.identity.as_str()
}
fn user<'a>(&'a self) -> &'a AteSessionUser {
self
}
fn user_mut<'a>(&'a mut self) -> &'a mut AteSessionUser {
self
}
fn uid<'a>(&'a self) -> Option<u32> {
self.user.uid()
}
fn gid<'a>(&'a self) -> Option<u32> {
if let Some(gid) = self.user.gid() {
Some(gid)
} else {
self.uid()
}
}
fn properties<'a>(&'a self) -> Box<dyn Iterator<Item = &'a AteSessionProperty> + 'a> {
let ret1 = self.user.properties.iter();
Box::new(ret1)
}
fn append<'a, 'b>(
&'a mut self,
properties: Box<dyn Iterator<Item = &'b AteSessionProperty> + 'b>,
) {
let mut properties = properties.map(|a| a.clone()).collect::<Vec<_>>();
self.user.properties.append(&mut properties);
}
fn clone_session(&self) -> Box<dyn AteSession> {
Box::new(self.clone())
}
fn clone_inner(&self) -> AteSessionInner {
AteSessionInner::User(self.clone())
}
}
impl std::fmt::Display for AteSessionUser {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[")?;
self.user.fmt(f)?;
write!(f, "]")
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/group_role.rs | lib/src/session/group_role.rs | #[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::crypto::*;
use super::*;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AteGroupRole {
pub purpose: AteRolePurpose,
pub properties: Vec<AteSessionProperty>,
}
impl AteGroupRole {
pub fn add_read_key(&mut self, key: &EncryptKey) {
self.properties
.push(AteSessionProperty::ReadKey(key.clone()));
}
pub fn add_private_read_key(&mut self, key: &PrivateEncryptKey) {
self.properties
.push(AteSessionProperty::PrivateReadKey(key.clone()));
}
pub fn add_write_key(&mut self, key: &PrivateSignKey) {
self.properties
.push(AteSessionProperty::WriteKey(key.clone()));
}
pub fn clear_read_keys(&mut self) {
self.properties.retain(|p| {
if let AteSessionProperty::ReadKey(_) = p {
return false;
}
return true;
});
}
pub fn clear_private_read_keys(&mut self) {
self.properties.retain(|p| {
if let AteSessionProperty::PrivateReadKey(_) = p {
return false;
}
return true;
});
}
pub fn clear_write_keys(&mut self) {
self.properties.retain(|p| {
if let AteSessionProperty::WriteKey(_) = p {
return false;
}
return true;
});
}
pub fn add_uid(&mut self, uid: u32) {
self.properties.push(AteSessionProperty::Uid(uid));
}
pub fn add_gid(&mut self, gid: u32) {
self.properties.push(AteSessionProperty::Gid(gid));
}
pub fn read_keys<'a>(&'a self) -> impl Iterator<Item = &'a EncryptKey> {
self.properties.iter().filter_map(|p| match p {
AteSessionProperty::ReadKey(k) => Some(k),
_ => None,
})
}
pub fn write_keys<'a>(&'a self) -> impl Iterator<Item = &'a PrivateSignKey> {
self.properties.iter().filter_map(|p| match p {
AteSessionProperty::WriteKey(k) => Some(k),
_ => None,
})
}
pub fn public_read_keys<'a>(&'a self) -> impl Iterator<Item = &'a PublicEncryptKey> {
self.properties.iter().filter_map(|p| match p {
AteSessionProperty::PublicReadKey(k) => Some(k),
_ => None,
})
}
pub fn private_read_keys<'a>(&'a self) -> impl Iterator<Item = &'a PrivateEncryptKey> {
self.properties.iter().filter_map(|p| match p {
AteSessionProperty::PrivateReadKey(k) => Some(k),
_ => None,
})
}
pub fn uid<'a>(&'a self) -> Option<u32> {
self.properties
.iter()
.filter_map(|p| match p {
AteSessionProperty::Uid(k) => Some(k.clone()),
_ => None,
})
.next()
}
pub fn gid<'a>(&'a self) -> Option<u32> {
self.properties
.iter()
.filter_map(|p| match p {
AteSessionProperty::Gid(k) => Some(k.clone()),
_ => None,
})
.next()
}
}
impl std::fmt::Display for AteGroupRole {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "(purpose={}", self.purpose)?;
for prop in self.properties.iter() {
write!(f, ",")?;
prop.fmt(f)?;
}
write!(f, ")")
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/session_property.rs | lib/src/session/session_property.rs | #[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::crypto::*;
#[allow(dead_code)]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum AteSessionProperty {
None,
ReadKey(EncryptKey),
PrivateReadKey(PrivateEncryptKey),
PublicReadKey(PublicEncryptKey),
WriteKey(PrivateSignKey),
Uid(u32),
Gid(u32),
}
impl Default for AteSessionProperty {
fn default() -> Self {
AteSessionProperty::None
}
}
impl std::fmt::Display for AteSessionProperty {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AteSessionProperty::None => write!(f, "none"),
AteSessionProperty::ReadKey(a) => write!(f, "read-key:{}", a),
AteSessionProperty::PrivateReadKey(a) => write!(f, "private-read-key:{}", a),
AteSessionProperty::PublicReadKey(a) => write!(f, "public-read-key:{}", a),
AteSessionProperty::WriteKey(a) => write!(f, "write-key:{}", a),
AteSessionProperty::Uid(a) => write!(f, "uid:{}", a),
AteSessionProperty::Gid(a) => write!(f, "gid:{}", a),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/group.rs | lib/src/session/group.rs | #[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use crate::crypto::*;
use super::*;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AteGroup {
pub name: String,
pub roles: Vec<AteGroupRole>,
pub broker_read: Option<PrivateEncryptKey>,
pub broker_write: Option<PrivateSignKey>,
}
impl AteGroup {
pub fn get_role<'a>(&'a self, purpose: &AteRolePurpose) -> Option<&'a AteGroupRole> {
self.roles.iter().filter(|r| r.purpose == *purpose).next()
}
pub fn get_or_create_role<'a>(&'a mut self, purpose: &AteRolePurpose) -> &'a mut AteGroupRole {
if self.roles.iter().any(|r| r.purpose == *purpose) == false {
self.roles.push(AteGroupRole {
purpose: purpose.clone(),
properties: Vec::new(),
});
}
self.roles.iter_mut().filter(|r| r.purpose == *purpose).next().expect("It should not be possible for this call to fail as the line above just added the item we are searching for")
}
pub fn add_read_key(&mut self, purpose: &AteRolePurpose, key: &EncryptKey) {
let role = self.get_or_create_role(purpose);
role.properties
.push(AteSessionProperty::ReadKey(key.clone()));
}
pub fn add_private_read_key(&mut self, purpose: &AteRolePurpose, key: &PrivateEncryptKey) {
let role = self.get_or_create_role(purpose);
role.properties
.push(AteSessionProperty::PrivateReadKey(key.clone()));
}
pub fn add_write_key(&mut self, purpose: &AteRolePurpose, key: &PrivateSignKey) {
let role = self.get_or_create_role(purpose);
role.properties
.push(AteSessionProperty::WriteKey(key.clone()));
}
pub fn read_keys<'a>(&'a self) -> impl Iterator<Item = &'a EncryptKey> {
self.roles
.iter()
.flat_map(|r| r.properties.iter())
.filter_map(|p| match p {
AteSessionProperty::ReadKey(k) => Some(k),
_ => None,
})
}
pub fn write_keys<'a>(&'a self) -> impl Iterator<Item = &'a PrivateSignKey> {
self.roles
.iter()
.flat_map(|r| r.properties.iter())
.filter_map(|p| match p {
AteSessionProperty::WriteKey(k) => Some(k),
_ => None,
})
}
pub fn public_read_keys<'a>(&'a self) -> impl Iterator<Item = &'a PublicEncryptKey> {
self.roles
.iter()
.flat_map(|r| r.properties.iter())
.filter_map(|p| match p {
AteSessionProperty::PublicReadKey(k) => Some(k),
_ => None,
})
}
pub fn private_read_keys<'a>(&'a self) -> impl Iterator<Item = &'a PrivateEncryptKey> {
self.roles
.iter()
.flat_map(|r| r.properties.iter())
.filter_map(|p| match p {
AteSessionProperty::PrivateReadKey(k) => Some(k),
_ => None,
})
}
}
impl std::fmt::Display for AteGroup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "(name={}", self.name)?;
for role in self.roles.iter() {
write!(f, ",")?;
role.fmt(f)?;
}
if let Some(k) = &self.broker_read {
write!(f, ",broker_read={}", k)?;
}
if let Some(k) = &self.broker_write {
write!(f, ",broker_write={}", k)?;
}
write!(f, ")")
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/session_type.rs | lib/src/session/session_type.rs | use super::*;
use crate::crypto::*;
use serde::*;
#[derive(Serialize, Deserialize, Clone, Debug)]
pub enum AteSessionType {
User(AteSessionUser),
Sudo(AteSessionSudo),
Group(AteSessionGroup),
Nothing
}
impl AteSession for AteSessionType {
fn role<'a>(&'a self, purpose: &AteRolePurpose) -> Option<&'a AteGroupRole> {
match self {
AteSessionType::User(a) => a.role(purpose),
AteSessionType::Sudo(a) => a.role(purpose),
AteSessionType::Group(a) => a.role(purpose),
AteSessionType::Nothing => None,
}
}
fn read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a EncryptKey> + 'a> {
match self {
AteSessionType::User(a) => a.read_keys(category),
AteSessionType::Sudo(a) => a.read_keys(category),
AteSessionType::Group(a) => a.read_keys(category),
AteSessionType::Nothing => Box::new(std::iter::empty())
}
}
fn write_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateSignKey> + 'a> {
match self {
AteSessionType::User(a) => a.write_keys(category),
AteSessionType::Sudo(a) => a.write_keys(category),
AteSessionType::Group(a) => a.write_keys(category),
AteSessionType::Nothing => Box::new(std::iter::empty())
}
}
fn public_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PublicEncryptKey> + 'a> {
match self {
AteSessionType::User(a) => a.public_read_keys(category),
AteSessionType::Sudo(a) => a.public_read_keys(category),
AteSessionType::Group(a) => a.public_read_keys(category),
AteSessionType::Nothing => Box::new(std::iter::empty())
}
}
fn private_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateEncryptKey> + 'a> {
match self {
AteSessionType::User(a) => a.private_read_keys(category),
AteSessionType::Sudo(a) => a.private_read_keys(category),
AteSessionType::Group(a) => a.private_read_keys(category),
AteSessionType::Nothing => Box::new(std::iter::empty())
}
}
fn broker_read<'a>(&'a self) -> Option<&'a PrivateEncryptKey> {
match self {
AteSessionType::User(a) => a.broker_read(),
AteSessionType::Sudo(a) => a.broker_read(),
AteSessionType::Group(a) => a.broker_read(),
AteSessionType::Nothing => None
}
}
fn broker_write<'a>(&'a self) -> Option<&'a PrivateSignKey> {
match self {
AteSessionType::User(a) => a.broker_write(),
AteSessionType::Sudo(a) => a.broker_write(),
AteSessionType::Group(a) => a.broker_write(),
AteSessionType::Nothing => None
}
}
fn identity<'a>(&'a self) -> &'a str {
match self {
AteSessionType::User(a) => a.identity(),
AteSessionType::Sudo(a) => a.identity(),
AteSessionType::Group(a) => a.identity(),
AteSessionType::Nothing => "nothing"
}
}
fn user<'a>(&'a self) -> &'a AteSessionUser {
match self {
AteSessionType::User(a) => a.user(),
AteSessionType::Sudo(a) => a.user(),
AteSessionType::Group(a) => a.user(),
AteSessionType::Nothing => &super::session_inner::EMPTY_SESSION_USER
}
}
fn user_mut<'a>(&'a mut self) -> &'a mut AteSessionUser {
match self {
AteSessionType::User(a) => a.user_mut(),
AteSessionType::Sudo(a) => a.user_mut(),
AteSessionType::Group(a) => a.user_mut(),
AteSessionType::Nothing => panic!("orphaned user sessions can not be mutated")
}
}
fn uid<'a>(&'a self) -> Option<u32> {
match self {
AteSessionType::User(a) => a.uid(),
AteSessionType::Sudo(a) => a.uid(),
AteSessionType::Group(a) => a.uid(),
AteSessionType::Nothing => None
}
}
fn gid<'a>(&'a self) -> Option<u32> {
match self {
AteSessionType::User(a) => a.gid(),
AteSessionType::Sudo(a) => a.gid(),
AteSessionType::Group(a) => a.gid(),
AteSessionType::Nothing => None
}
}
fn clone_session(&self) -> Box<dyn AteSession> {
Box::new(self.clone())
}
fn clone_inner(&self) -> AteSessionInner {
match self {
AteSessionType::User(a) => a.clone_inner(),
AteSessionType::Sudo(a) => a.clone_inner(),
AteSessionType::Group(a) => a.clone_inner(),
AteSessionType::Nothing => AteSessionInner::Nothing
}
}
fn properties<'a>(&'a self) -> Box<dyn Iterator<Item = &'a AteSessionProperty> + 'a> {
match self {
AteSessionType::User(a) => a.properties(),
AteSessionType::Sudo(a) => a.properties(),
AteSessionType::Group(a) => a.properties(),
AteSessionType::Nothing => Box::new(std::iter::empty())
}
}
fn append<'a, 'b>(
&'a mut self,
properties: Box<dyn Iterator<Item = &'b AteSessionProperty> + 'b>,
) {
match self {
AteSessionType::User(a) => a.append(properties),
AteSessionType::Sudo(a) => a.append(properties),
AteSessionType::Group(a) => a.append(properties),
AteSessionType::Nothing => { }
}
}
}
impl std::fmt::Display for AteSessionType {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[")?;
match self {
AteSessionType::User(a) => write!(f, "user: {}", a),
AteSessionType::Sudo(a) => write!(f, "sudo: {}", a),
AteSessionType::Group(a) => write!(f, "group: {}", a),
AteSessionType::Nothing => write!(f, "nothing"),
}?;
write!(f, "]")
}
}
impl From<AteSessionInner> for AteSessionType {
fn from(a: AteSessionInner) -> Self {
match a {
AteSessionInner::User(a) => AteSessionType::User(a),
AteSessionInner::Sudo(a) => AteSessionType::Sudo(a),
AteSessionInner::Nothing => AteSessionType::Nothing
}
}
}
impl From<AteSessionUser> for AteSessionType {
fn from(a: AteSessionUser) -> Self {
AteSessionType::User(a)
}
}
impl From<AteSessionSudo> for AteSessionType {
fn from(a: AteSessionSudo) -> Self {
AteSessionType::Sudo(a)
}
}
impl From<AteSessionGroup> for AteSessionType {
fn from(a: AteSessionGroup) -> Self {
AteSessionType::Group(a)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/role_purpose.rs | lib/src/session/role_purpose.rs | #[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
#[derive(Debug, Serialize, Deserialize, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub enum AteRolePurpose {
Owner,
Personal,
Delegate,
Contributor,
Observer,
Finance,
WebServer,
EdgeCompute,
Other(String),
}
impl std::fmt::Display for AteRolePurpose {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
AteRolePurpose::Owner => write!(f, "owner"),
AteRolePurpose::Personal => write!(f, "personal"),
AteRolePurpose::Delegate => write!(f, "delegate"),
AteRolePurpose::Contributor => write!(f, "contributor"),
AteRolePurpose::Observer => write!(f, "observer"),
AteRolePurpose::Finance => write!(f, "finance"),
AteRolePurpose::WebServer => write!(f, "www"),
AteRolePurpose::EdgeCompute => write!(f, "edge"),
AteRolePurpose::Other(a) => write!(f, "other-{}", a),
}
}
}
impl std::str::FromStr for AteRolePurpose {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"owner" => Ok(AteRolePurpose::Owner),
"personal" => Ok(AteRolePurpose::Personal),
"delegate" => Ok(AteRolePurpose::Delegate),
"contributor" => Ok(AteRolePurpose::Contributor),
"observer" => Ok(AteRolePurpose::Observer),
"finance" => Ok(AteRolePurpose::Finance),
"www" => Ok(AteRolePurpose::WebServer),
"edge" => Ok(AteRolePurpose::EdgeCompute),
a if a.starts_with("other-") && a.len() > 6 => Ok(AteRolePurpose::Other(a["other-".len()..].to_string())),
_ => Err("valid values are 'owner', 'personal', 'delegate', 'contributor', 'observer', 'www', 'edge' and 'other-'"),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/session_inner.rs | lib/src/session/session_inner.rs | use super::*;
use crate::crypto::*;
use serde::*;
use once_cell::sync::Lazy;
pub(super) static EMPTY_SESSION_USER: Lazy<AteSessionUser> =
Lazy::new(|| AteSessionUser::new());
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum AteSessionInner {
User(AteSessionUser),
Sudo(AteSessionSudo),
Nothing,
}
impl AteSession for AteSessionInner {
fn role<'a>(&'a self, purpose: &AteRolePurpose) -> Option<&'a AteGroupRole> {
match self {
AteSessionInner::User(a) => a.role(purpose),
AteSessionInner::Sudo(a) => a.role(purpose),
AteSessionInner::Nothing => None,
}
}
fn read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a EncryptKey> + 'a> {
match self {
AteSessionInner::User(a) => a.read_keys(category),
AteSessionInner::Sudo(a) => a.read_keys(category),
AteSessionInner::Nothing => Box::new(std::iter::empty())
}
}
fn write_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateSignKey> + 'a> {
match self {
AteSessionInner::User(a) => a.write_keys(category),
AteSessionInner::Sudo(a) => a.write_keys(category),
AteSessionInner::Nothing => Box::new(std::iter::empty())
}
}
fn public_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PublicEncryptKey> + 'a> {
match self {
AteSessionInner::User(a) => a.public_read_keys(category),
AteSessionInner::Sudo(a) => a.public_read_keys(category),
AteSessionInner::Nothing => Box::new(std::iter::empty())
}
}
fn private_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateEncryptKey> + 'a> {
match self {
AteSessionInner::User(a) => a.private_read_keys(category),
AteSessionInner::Sudo(a) => a.private_read_keys(category),
AteSessionInner::Nothing => Box::new(std::iter::empty())
}
}
fn broker_read<'a>(&'a self) -> Option<&'a PrivateEncryptKey> {
match self {
AteSessionInner::User(a) => a.broker_read(),
AteSessionInner::Sudo(a) => a.broker_read(),
AteSessionInner::Nothing => None
}
}
fn broker_write<'a>(&'a self) -> Option<&'a PrivateSignKey> {
match self {
AteSessionInner::User(a) => a.broker_write(),
AteSessionInner::Sudo(a) => a.broker_write(),
AteSessionInner::Nothing => None
}
}
fn identity<'a>(&'a self) -> &'a str {
match self {
AteSessionInner::User(a) => a.identity(),
AteSessionInner::Sudo(a) => a.identity(),
AteSessionInner::Nothing => "nothing"
}
}
fn user<'a>(&'a self) -> &'a AteSessionUser {
match self {
AteSessionInner::User(a) => a.user(),
AteSessionInner::Sudo(a) => a.user(),
AteSessionInner::Nothing => &EMPTY_SESSION_USER
}
}
fn user_mut<'a>(&'a mut self) -> &'a mut AteSessionUser {
match self {
AteSessionInner::User(a) => a.user_mut(),
AteSessionInner::Sudo(a) => a.user_mut(),
AteSessionInner::Nothing => panic!("nothing user sessions can not be mutated")
}
}
fn uid<'a>(&'a self) -> Option<u32> {
match self {
AteSessionInner::User(a) => a.uid(),
AteSessionInner::Sudo(a) => a.uid(),
AteSessionInner::Nothing => None,
}
}
fn gid<'a>(&'a self) -> Option<u32> {
None
}
fn clone_session(&self) -> Box<dyn AteSession> {
Box::new(self.clone())
}
fn clone_inner(&self) -> AteSessionInner {
self.clone()
}
fn properties<'a>(&'a self) -> Box<dyn Iterator<Item = &'a AteSessionProperty> + 'a> {
match self {
AteSessionInner::User(a) => a.properties(),
AteSessionInner::Sudo(a) => a.properties(),
AteSessionInner::Nothing => Box::new(std::iter::empty())
}
}
fn append<'a, 'b>(
&'a mut self,
properties: Box<dyn Iterator<Item = &'b AteSessionProperty> + 'b>,
) {
match self {
AteSessionInner::User(a) => a.append(properties),
AteSessionInner::Sudo(a) => a.append(properties),
AteSessionInner::Nothing => { }
}
}
}
impl std::fmt::Display for AteSessionInner {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[")?;
match self {
AteSessionInner::User(a) => write!(f, "user: {}", a),
AteSessionInner::Sudo(a) => write!(f, "sudo: {}", a),
AteSessionInner::Nothing => write!(f, "nothing"),
}?;
write!(f, "]")
}
}
impl From<AteSessionUser> for AteSessionInner {
fn from(a: AteSessionUser) -> Self {
AteSessionInner::User(a)
}
}
impl From<AteSessionSudo> for AteSessionInner {
fn from(a: AteSessionSudo) -> Self {
AteSessionInner::Sudo(a)
}
} | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/mod.rs | lib/src/session/mod.rs | pub mod group;
pub mod group_role;
pub mod role_purpose;
pub mod session_group;
pub mod session_inner;
pub mod session_property;
pub mod session_sudo;
pub mod session_trait;
pub mod session_type;
pub mod session_user;
pub use group::*;
pub use group_role::*;
pub use role_purpose::*;
pub use session_group::*;
pub use session_inner::*;
pub use session_property::*;
pub use session_sudo::*;
pub use session_trait::*;
pub use session_type::*;
pub use session_user::*;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/session_group.rs | lib/src/session/session_group.rs | #[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::ops::Deref;
use std::ops::DerefMut;
use crate::crypto::*;
use super::*;
/// Sudo sessions are elevated permissions used to carry out
/// high priveledge actions
///
/// Sessions are never cached and only exist in memory for the
/// duration that you use them for security reasons.
#[allow(dead_code)]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AteSessionGroup {
pub inner: AteSessionInner,
pub group: AteGroup,
}
impl AteSessionGroup {
pub fn new(inner: AteSessionInner, group: String) -> AteSessionGroup {
AteSessionGroup {
inner,
group: AteGroup {
name: group,
roles: Vec::new(),
broker_read: None,
broker_write: None,
},
}
}
pub fn get_group_role<'a>(&'a self, purpose: &AteRolePurpose) -> Option<&'a AteGroupRole> {
self.group.get_role(purpose)
}
pub fn get_or_create_group_role<'a>(
&'a mut self,
purpose: &AteRolePurpose,
) -> &'a mut AteGroupRole {
self.group.get_or_create_role(purpose)
}
pub fn add_group_read_key(&mut self, purpose: &AteRolePurpose, key: &EncryptKey) {
let role = self.get_or_create_group_role(purpose);
role.add_read_key(key)
}
pub fn add_group_private_read_key(
&mut self,
purpose: &AteRolePurpose,
key: &PrivateEncryptKey,
) {
let role = self.get_or_create_group_role(purpose);
role.add_private_read_key(key)
}
pub fn add_group_write_key(&mut self, purpose: &AteRolePurpose, key: &PrivateSignKey) {
let role = self.get_or_create_group_role(purpose);
role.add_write_key(key)
}
pub fn add_group_gid(&mut self, purpose: &AteRolePurpose, gid: u32) {
let role = self.get_or_create_group_role(purpose);
role.add_gid(gid)
}
}
impl AteSession for AteSessionGroup {
fn role<'a>(&'a self, purpose: &AteRolePurpose) -> Option<&'a AteGroupRole> {
self.get_group_role(purpose)
}
fn read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a EncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.group.roles.iter().flat_map(|a| a.read_keys()));
}
let ret1 = self.inner.read_keys(category);
let ret2 = self
.group
.roles
.iter()
.filter(move |_| category.includes_group_keys())
.flat_map(|a| a.read_keys());
Box::new(ret1.chain(ret2))
}
fn write_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateSignKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.group.roles.iter().flat_map(|a| a.write_keys()));
}
let ret1 = self.inner.write_keys(category);
let ret2 = self
.group
.roles
.iter()
.filter(move |_| category.includes_group_keys())
.flat_map(|a| a.write_keys());
let ret3 = self.broker_write().filter(move |_| category.includes_broker_keys()).into_iter();
Box::new(ret1.chain(ret2).chain(ret3))
}
fn public_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PublicEncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.group.roles.iter().flat_map(|a| a.public_read_keys()));
}
let ret1 = self.inner.public_read_keys(category);
let ret2 = self
.group
.roles
.iter()
.filter(move |_| category.includes_group_keys())
.flat_map(|a| a.public_read_keys());
let ret3 = self.broker_read().filter(move |_| category.includes_broker_keys()).map(|a| a.as_public_key()).into_iter();
Box::new(ret1.chain(ret2).chain(ret3))
}
fn private_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateEncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.group.roles.iter().flat_map(|a| a.private_read_keys()));
}
let ret1 = self.inner.private_read_keys(category);
let ret2 = self
.group
.roles
.iter()
.filter(move |_| category.includes_group_keys())
.flat_map(|a| a.private_read_keys());
let ret3 = self.broker_read().filter(move |_| category.includes_broker_keys()).into_iter();
Box::new(ret1.chain(ret2).chain(ret3))
}
fn broker_read<'a>(&'a self) -> Option<&'a PrivateEncryptKey> {
self.group.broker_read.as_ref()
}
fn broker_write<'a>(&'a self) -> Option<&'a PrivateSignKey> {
self.group.broker_write.as_ref()
}
fn identity<'a>(&'a self) -> &'a str {
self.group.name.as_str()
}
fn user<'a>(&'a self) -> &'a AteSessionUser {
self.inner.user()
}
fn user_mut<'a>(&'a mut self) -> &'a mut AteSessionUser {
self.inner.user_mut()
}
fn uid<'a>(&'a self) -> Option<u32> {
self.inner.uid()
}
fn gid<'a>(&'a self) -> Option<u32> {
if let Some(gid) = self.group.roles.iter().flat_map(|a| a.gid()).next() {
return Some(gid);
}
self.inner.gid()
}
fn clone_session(&self) -> Box<dyn AteSession> {
Box::new(self.clone())
}
fn clone_inner(&self) -> AteSessionInner {
self.inner.clone()
}
fn properties<'a>(&'a self) -> Box<dyn Iterator<Item = &'a AteSessionProperty> + 'a> {
let ret1 = self.inner.properties();
let ret2 = self.group.roles.iter().flat_map(|a| a.properties.iter());
Box::new(ret1.chain(ret2))
}
fn append<'a, 'b>(
&'a mut self,
properties: Box<dyn Iterator<Item = &'b AteSessionProperty> + 'b>,
) {
self.inner.append(properties);
}
}
impl Deref for AteSessionGroup {
type Target = AteSessionInner;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for AteSessionGroup {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl std::fmt::Display for AteSessionGroup {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[inner=")?;
self.inner.fmt(f)?;
write!(f, ",group=")?;
self.group.fmt(f)?;
write!(f, "]")
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/session_trait.rs | lib/src/session/session_trait.rs | #[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use super::session_sudo::*;
use super::session_user::*;
use super::AteGroupRole;
use super::AteRolePurpose;
use super::AteSessionGroup;
use super::AteSessionInner;
use super::AteSessionProperty;
use super::AteSessionType;
use crate::crypto::*;
pub type SessionToken = Option<EncryptedSecureData<EncryptKey>>;
/// Sessions hold facts about the user that give them certains
/// rights and abilities to view data within the chain-of-trust.
///
/// For instance - to see encrypted data for specific users they
/// must insert their `EncryptKey` into this session before
/// accessing the chain via a `Dio`.
///
/// Another example is the ability to write data. For certain
/// records within the tree if they have been protected with
/// write protection then you must insert your `PrivateKey`
/// into the session before you attempt to insert or modify these
/// records.
///
/// Sessions are never cached and only exist in memory for the
/// duration that you use them for security reasons.
pub trait AteSession: Send + Sync + std::fmt::Display {
fn role<'a>(&'a self, purpose: &AteRolePurpose) -> Option<&'a AteGroupRole>;
fn read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a EncryptKey> + 'a>;
fn write_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateSignKey> + 'a>;
fn public_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PublicEncryptKey> + 'a>;
fn private_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateEncryptKey> + 'a>;
fn broker_read<'a>(&'a self) -> Option<&'a PrivateEncryptKey>;
fn broker_write<'a>(&'a self) -> Option<&'a PrivateSignKey>;
fn identity<'a>(&'a self) -> &'a str;
fn user<'a>(&'a self) -> &'a AteSessionUser;
fn user_mut<'a>(&'a mut self) -> &'a mut AteSessionUser;
fn uid<'a>(&'a self) -> Option<u32>;
fn gid<'a>(&'a self) -> Option<u32>;
fn properties<'a>(&'a self) -> Box<dyn Iterator<Item = &'a AteSessionProperty> + 'a>;
fn append<'a, 'b>(
&'a mut self,
properties: Box<dyn Iterator<Item = &'b AteSessionProperty> + 'b>,
);
fn clone_session(&self) -> Box<dyn AteSession>;
fn clone_inner(&self) -> AteSessionInner;
}
impl std::fmt::Debug
for dyn AteSession
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
std::fmt::Display::fmt(self, f)
}
}
impl From<AteSessionUser> for Box<dyn AteSession> {
fn from(session: AteSessionUser) -> Self {
Box::new(session)
}
}
impl From<AteSessionSudo> for Box<dyn AteSession> {
fn from(session: AteSessionSudo) -> Self {
Box::new(session)
}
}
impl From<AteSessionGroup> for Box<dyn AteSession> {
fn from(session: AteSessionGroup) -> Self {
Box::new(session)
}
}
impl From<AteSessionInner> for Box<dyn AteSession> {
fn from(session: AteSessionInner) -> Self {
Box::new(session)
}
}
impl From<AteSessionType> for Box<dyn AteSession> {
fn from(session: AteSessionType) -> Self {
Box::new(session)
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum AteSessionKeyCategory {
UpperKeys,
UserKeys,
SudoKeys,
GroupKeys,
BrokerKeys,
NonGroupKeys,
AllKeys,
}
impl AteSessionKeyCategory {
pub fn includes_user_keys(&self) -> bool {
match self {
AteSessionKeyCategory::UserKeys => true,
AteSessionKeyCategory::NonGroupKeys => true,
AteSessionKeyCategory::AllKeys => true,
_ => false,
}
}
pub fn includes_sudo_keys(&self) -> bool {
match self {
AteSessionKeyCategory::SudoKeys => true,
AteSessionKeyCategory::NonGroupKeys => true,
AteSessionKeyCategory::AllKeys => true,
_ => false,
}
}
pub fn includes_group_keys(&self) -> bool {
match self {
AteSessionKeyCategory::GroupKeys => true,
AteSessionKeyCategory::AllKeys => true,
_ => false,
}
}
pub fn includes_broker_keys(&self) -> bool {
match self {
AteSessionKeyCategory::BrokerKeys => true,
AteSessionKeyCategory::AllKeys => true,
_ => false,
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/session/session_sudo.rs | lib/src/session/session_sudo.rs | #[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::ops::Deref;
use std::ops::DerefMut;
use crate::crypto::*;
use super::*;
/// Sudo sessions are elevated permissions used to carry out
/// high priveledge actions
///
/// Sessions are never cached and only exist in memory for the
/// duration that you use them for security reasons.
#[allow(dead_code)]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct AteSessionSudo {
pub inner: AteSessionUser,
pub sudo: AteGroupRole,
}
impl Default for AteSessionSudo {
fn default() -> AteSessionSudo {
AteSessionSudo {
inner: AteSessionUser::default(),
sudo: AteGroupRole {
purpose: AteRolePurpose::Owner,
properties: Vec::new(),
},
}
}
}
impl AteSessionSudo {
pub fn new() -> AteSessionSudo {
AteSessionSudo::default()
}
pub fn add_sudo_read_key(&mut self, key: &EncryptKey) {
self.sudo.add_read_key(key)
}
pub fn add_sudo_private_read_key(&mut self, key: &PrivateEncryptKey) {
self.sudo.add_private_read_key(key)
}
pub fn add_sudo_write_key(&mut self, key: &PrivateSignKey) {
self.sudo.add_write_key(key)
}
}
impl AteSession for AteSessionSudo {
fn role<'a>(&'a self, _purpose: &AteRolePurpose) -> Option<&'a AteGroupRole> {
None
}
fn read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a EncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.sudo.read_keys());
}
let ret1 = self.inner.read_keys(category);
let ret2 = self
.sudo
.read_keys()
.filter(move |_| category.includes_sudo_keys());
Box::new(ret1.chain(ret2))
}
fn write_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateSignKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.sudo.write_keys());
}
let ret1 = self.inner.write_keys(category);
let ret2 = self
.sudo
.write_keys()
.filter(move |_| category.includes_sudo_keys());
let ret3 = self.broker_write().filter(|_| category.includes_broker_keys()).into_iter();
Box::new(ret1.chain(ret2).chain(ret3))
}
fn public_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PublicEncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.sudo.public_read_keys());
}
let ret1 = self.inner.public_read_keys(category);
let ret2 = self
.sudo
.public_read_keys()
.filter(move |_| category.includes_sudo_keys());
let ret3 = self.broker_read().filter(move |_| category.includes_broker_keys()).map(|a| a.as_public_key()).into_iter();
Box::new(ret1.chain(ret2).chain(ret3))
}
fn private_read_keys<'a>(
&'a self,
category: AteSessionKeyCategory,
) -> Box<dyn Iterator<Item = &'a PrivateEncryptKey> + 'a> {
if category == AteSessionKeyCategory::UpperKeys {
return Box::new(self.sudo.private_read_keys());
}
let ret1 = self.inner.private_read_keys(category);
let ret2 = self
.sudo
.private_read_keys()
.filter(move |_| category.includes_sudo_keys());
let ret3 = self.broker_read().filter(move |_| category.includes_broker_keys()).into_iter();
Box::new(ret1.chain(ret2).chain(ret3))
}
fn broker_read<'a>(&'a self) -> Option<&'a PrivateEncryptKey> {
self.inner.broker_read()
}
fn broker_write<'a>(&'a self) -> Option<&'a PrivateSignKey> {
self.inner.broker_write()
}
fn identity<'a>(&'a self) -> &'a str {
self.inner.identity.as_str()
}
fn user<'a>(&'a self) -> &'a AteSessionUser {
self.inner.user()
}
fn user_mut<'a>(&'a mut self) -> &'a mut AteSessionUser {
self.inner.user_mut()
}
fn uid<'a>(&'a self) -> Option<u32> {
self.inner.uid()
}
fn gid<'a>(&'a self) -> Option<u32> {
self.inner.gid()
}
fn clone_session(&self) -> Box<dyn AteSession> {
Box::new(self.clone())
}
fn clone_inner(&self) -> AteSessionInner {
AteSessionInner::Sudo(self.clone())
}
fn properties<'a>(&'a self) -> Box<dyn Iterator<Item = &'a AteSessionProperty> + 'a> {
let ret1 = self.inner.properties();
let ret2 = self.sudo.properties.iter();
Box::new(ret1.chain(ret2))
}
fn append<'a, 'b>(
&'a mut self,
properties: Box<dyn Iterator<Item = &'b AteSessionProperty> + 'b>,
) {
self.inner.append(properties);
}
}
impl Deref for AteSessionSudo {
type Target = AteSessionUser;
fn deref(&self) -> &Self::Target {
&self.inner
}
}
impl DerefMut for AteSessionSudo {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.inner
}
}
impl std::fmt::Display for AteSessionSudo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "[user=")?;
self.inner.user.fmt(f)?;
write!(f, ",sudo=")?;
self.sudo.fmt(f)?;
write!(f, "]")
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/trust/tests.rs | lib/src/trust/tests.rs | #![cfg(test)]
use bytes::Bytes;
use std::sync::Arc;
use std::time::Duration;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::chain::*;
use crate::compact::*;
use crate::conf::*;
use crate::crypto::*;
use crate::error::*;
use crate::event::*;
use crate::header::*;
use crate::lint::*;
use crate::spec::*;
use crate::transaction::*;
use crate::transform::*;
use crate::validator::*;
use super::*;
pub(crate) async fn create_test_chain(
mock_cfg: &mut ConfAte,
chain_name: String,
temp: bool,
barebone: bool,
root_public_key: Option<PublicSignKey>,
) -> (Arc<Chain>, Arc<ChainBuilder>) {
// Create the chain-of-trust and a validator
let mock_chain_key = match temp {
true => ChainKey::default().with_temp_name(chain_name),
false => ChainKey::default().with_name(chain_name),
};
let mut builder = match barebone {
true => {
mock_cfg.configured_for(ConfiguredFor::Barebone);
mock_cfg.log_format.meta = SerializationFormat::Bincode;
mock_cfg.log_format.data = SerializationFormat::Json;
ChainBuilder::new(&mock_cfg)
.await
.add_validator(Box::new(RubberStampValidator::default()))
.add_data_transformer(Box::new(StaticEncryptionTransformer::new(
&EncryptKey::from_seed_string("test".to_string(), KeySize::Bit192),
)))
.add_metadata_linter(Box::new(EventAuthorLinter::default()))
}
false => {
mock_cfg.configured_for(ConfiguredFor::Balanced);
mock_cfg.log_format.meta = SerializationFormat::Json;
mock_cfg.log_format.data = SerializationFormat::Json;
ChainBuilder::new(&mock_cfg).await
}
};
if let Some(key) = root_public_key {
builder = builder.add_root_public_key(&key);
}
let builder = builder.build();
(builder.open(&mock_chain_key).await.unwrap(), builder)
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_chain() -> Result<(), AteError> {
crate::utils::bootstrap_test_env();
let key1 = PrimaryKey::generate();
let key2 = PrimaryKey::generate();
#[allow(unused_variables)]
let chain_name;
#[cfg(not(feature = "enable_local_fs"))]
#[allow(unused_variables, unused_assignments)]
let mut stored_chain = None;
{
info!("creating test chain");
let mut mock_cfg = crate::conf::tests::mock_test_config();
mock_cfg.compact_mode = CompactMode::Never;
let (chain, _builder) =
create_test_chain(&mut mock_cfg, "test_chain".to_string(), true, true, None).await;
chain_name = chain.name().await;
info!("chain-name: {}", chain_name);
{
let lock = chain.multi().await;
assert_eq!(0, lock.count().await);
let evt1 = EventWeakData::new(key1.clone(), Bytes::from(vec![1; 1]), mock_cfg.log_format);
let evt2 = EventWeakData::new(key2.clone(), Bytes::from(vec![2; 1]), mock_cfg.log_format);
// Push the first events into the chain-of-trust
let mut evts = Vec::new();
evts.push(evt1);
evts.push(evt2);
info!("feeding two events into the chain");
let trans = Transaction::from_events(
evts,
TransactionScope::Local,
false,
Duration::from_secs(30),
);
lock.pipe
.feed(ChainWork { trans })
.await
.expect("The event failed to be accepted");
drop(lock);
assert_eq!(2, chain.count().await);
}
{
let lock = chain.multi().await;
// Make sure its there in the chain
info!("checking event1 is in the chain");
let test_data = lock
.lookup_primary(&key1)
.await
.expect("Failed to find the entry after the flip");
let test_data = lock
.load(test_data.clone())
.await
.expect("Could not load the data for the entry");
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(1; 1))));
// The other event we added should also still be there
info!("checking event2 is in the chain");
let test_data = lock
.lookup_primary(&key2)
.await
.expect("Failed to find the entry after the compact");
let test_data = lock.load(test_data.clone()).await?;
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(2; 1))));
}
// Fliush the chain
chain.flush().await?;
// Store the chain if we are in memory mode as there is no persistence
#[cfg(not(feature = "enable_local_fs"))]
{
stored_chain = Some(chain);
}
}
{
// Reload the chain from disk and check its integrity
info!("reloading the chain");
let mut mock_cfg = crate::conf::tests::mock_test_config();
mock_cfg.compact_mode = CompactMode::Never;
#[cfg(feature = "enable_local_fs")]
let (chain, _builder) =
create_test_chain(&mut mock_cfg, chain_name.clone(), false, true, None).await;
#[cfg(not(feature = "enable_local_fs"))]
let chain = stored_chain.take().unwrap();
{
let lock = chain.multi().await;
// Make sure its there in the chain
info!("checking event1 is in the chain");
let test_data = lock
.lookup_primary(&key1)
.await
.expect("Failed to find the entry after the reload");
let test_data = lock
.load(test_data.clone())
.await
.expect("Could not load the data for the entry");
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(1; 1))));
// The other event we added should also still be there
info!("checking event2 is in the chain");
let test_data = lock
.lookup_primary(&key2)
.await
.expect("Failed to find the entry after the reload");
let test_data = lock.load(test_data.clone()).await.unwrap();
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(2; 1))));
// Duplicate one of the event so the compactor has something to clean
let evt1 = EventWeakData::new(key1.clone(), Bytes::from(vec![10; 1]), mock_cfg.log_format);
info!("feeding new version of event1 into the chain");
let mut evts = Vec::new();
evts.push(evt1);
let trans = Transaction::from_events(
evts,
TransactionScope::Local,
false,
Duration::from_secs(30),
);
lock.pipe
.feed(ChainWork { trans })
.await
.expect("The event failed to be accepted");
drop(lock);
assert_eq!(3, chain.count().await);
}
// Now compact the chain-of-trust which should reduce the duplicate event
info!("compacting the log and checking the counts");
assert_eq!(3, chain.count().await);
chain.compact().await.expect("Failed to compact the log");
assert_eq!(2, chain.count().await);
{
let lock = chain.multi().await;
// Read the event and make sure its the second one that results after compaction
info!("checking event1 is in the chain");
let test_data = lock
.lookup_primary(&key1)
.await
.expect("Failed to find the entry after the compact");
let test_data = lock.load(test_data.clone()).await?;
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(10; 1))));
// The other event we added should also still be there
info!("checking event2 is in the chain");
let test_data = lock
.lookup_primary(&key2)
.await
.expect("Failed to find the entry after the compact");
let test_data = lock.load(test_data.clone()).await?;
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(2; 1))));
}
// Store the chain if we are in memory mode as there is no persistence
#[cfg(not(feature = "enable_local_fs"))]
{
stored_chain = Some(chain);
}
}
{
// Reload the chain from disk and check its integrity
info!("reloading the chain");
let mut mock_cfg = crate::conf::tests::mock_test_config();
mock_cfg.compact_mode = CompactMode::Never;
#[cfg(feature = "enable_local_fs")]
let (chain, _builder) =
create_test_chain(&mut mock_cfg, chain_name.clone(), false, true, None).await;
#[cfg(not(feature = "enable_local_fs"))]
let chain = stored_chain.take().unwrap();
assert_eq!(2, chain.count().await);
{
let lock = chain.multi().await;
// Read the event and make sure its the second one that results after compaction
info!("checking event1 is in the chain");
let test_data = lock
.lookup_primary(&key1)
.await
.expect("Failed to find the entry after the compact");
let test_data = lock.load(test_data.clone()).await?;
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(10; 1))));
// The other event we added should also still be there
info!("checking event2 is in the chain");
let test_data = lock
.lookup_primary(&key2)
.await
.expect("Failed to find the entry after the compact");
let test_data = lock.load(test_data.clone()).await?;
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(2; 1))));
}
{
let lock = chain.multi().await;
// Now lets tombstone the second event
info!("tombstoning event2");
let mut evt3 = EventWeakData::barebone(mock_cfg.log_format);
evt3.meta.add_tombstone(key2);
info!("feeding the tombstone into the chain");
let mut evts = Vec::new();
evts.push(evt3.clone());
let trans = Transaction::from_events(
evts,
TransactionScope::Local,
false,
Duration::from_secs(30),
);
lock.pipe
.feed(ChainWork { trans })
.await
.expect("The event failed to be accepted");
// Number of events should have gone up by one even though there should be one less item
drop(lock);
assert_eq!(3, chain.count().await);
}
// Searching for the item we should not find it
info!("checking event2 is gone from the chain");
match chain.multi().await.lookup_primary(&key2).await {
Some(_) => panic!("The item should not be visible anymore"),
None => {}
}
// Now compact the chain-of-trust which should remove one of the events and its tombstone
info!("compacting the chain");
let before = chain.count().await;
chain.compact().await.expect("Failed to compact the log");
let after = chain.count().await;
assert_eq!(
1,
chain.count().await,
"failed - before: {} - after: {}",
before,
after
);
// Store the chain if we are in memory mode as there is no persistence
#[cfg(not(feature = "enable_local_fs"))]
{
stored_chain = Some(chain);
}
}
{
// Reload the chain from disk and check its integrity
info!("reloading the chain");
let mut mock_cfg = crate::conf::tests::mock_test_config();
mock_cfg.compact_mode = CompactMode::Never;
#[cfg(feature = "enable_local_fs")]
let (chain, _builder) =
create_test_chain(&mut mock_cfg, chain_name.clone(), false, true, None).await;
#[cfg(not(feature = "enable_local_fs"))]
let chain = stored_chain.take().unwrap();
{
let lock = chain.multi().await;
// Read the event and make sure its the second one that results after compaction
info!("checking event1 is in the chain");
let test_data = lock
.lookup_primary(&key1)
.await
.expect("Failed to find the entry after we reloaded the chain");
let test_data = lock.load(test_data).await?;
assert_eq!(test_data.data.data_bytes, Some(Bytes::from(vec!(10; 1))));
}
// Destroy the chain
info!("destroying the chain");
chain.single().await.destroy().await?;
}
Ok(())
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/trust/header.rs | lib/src/trust/header.rs | use serde::*;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::time::ChainTimestamp;
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ChainHeader {
pub cut_off: ChainTimestamp,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/trust/chain_ref.rs | lib/src/trust/chain_ref.rs | use serde::{Deserialize, Serialize};
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use url::Url;
use super::ChainKey;
/// Unique reference to a particular chain-of-trust. The design must
/// partition their data space into seperate chains to improve scalability
/// and performance as a single chain will reside on a single node within
/// the cluster.
#[allow(dead_code)]
#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct ChainRef {
pub url: Url,
pub key: ChainKey,
}
impl std::fmt::Display for ChainRef {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}/{}", self.url, self.key)
}
}
impl ChainRef {
pub fn new(url: url::Url, key: ChainKey) -> ChainRef {
ChainRef { url: url, key: key }
}
pub fn to_string(&self) -> String {
format!("{}/{}", self.url, self.key)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/trust/chain_of_trust.rs | lib/src/trust/chain_of_trust.rs | use std::sync::Arc;
use std::sync::Mutex as StdMutex;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use bytes::Bytes;
use crate::crypto::AteHash;
use crate::comms::Metrics;
use crate::error::*;
use crate::event::*;
use crate::header::*;
use crate::index::*;
use crate::meta::*;
use crate::redo::*;
use super::*;
pub(crate) struct ChainOfTrust {
pub(crate) debug_id: u64,
pub(crate) key: ChainKey,
pub(crate) timeline: ChainTimeline,
pub(crate) redo: RedoLog,
pub metrics: Arc<StdMutex<Metrics>>,
}
impl std::fmt::Debug for ChainOfTrust {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"chain-of-trust(key={},debug_id={})",
self.key, self.debug_id
)
}
}
impl<'a> ChainOfTrust {
pub(crate) async fn load(&self, leaf: EventLeaf) -> Result<LoadWeakResult, LoadError> {
#[cfg(feature = "enable_verbose")]
debug!("loading: {}", leaf.record);
let data = self.redo.load(leaf.record).await?;
Ok(LoadWeakResult {
lookup: data.lookup,
header: data.header,
data: data.data,
leaf: leaf,
})
}
pub(crate) async fn load_many(
&self,
leafs: Vec<EventLeaf>,
) -> Result<Vec<LoadWeakResult>, LoadError> {
let mut ret = Vec::new();
let mut futures = Vec::new();
for leaf in leafs.into_iter() {
let data = self.redo.load(leaf.record);
futures.push((data, leaf));
}
for (join, leaf) in futures.into_iter() {
let data = join.await?;
ret.push(LoadWeakResult {
lookup: data.lookup,
header: data.header,
data: data.data,
leaf,
});
}
Ok(ret)
}
pub(crate) fn prime(&mut self, records: Vec<(AteHash, Option<Bytes>)>) {
self.redo.prime(records);
}
pub(crate) fn lookup_primary(&self, key: &PrimaryKey) -> Option<EventLeaf> {
self.timeline.lookup_primary(key)
}
pub(crate) fn lookup_parent(&self, key: &PrimaryKey) -> Option<MetaParent> {
self.timeline.lookup_parent(key)
}
pub(crate) fn lookup_secondary(&self, key: &MetaCollection) -> Option<Vec<EventLeaf>> {
self.timeline.lookup_secondary(key)
}
pub(crate) fn lookup_secondary_raw(&self, key: &MetaCollection) -> Option<Vec<PrimaryKey>> {
self.timeline.lookup_secondary_raw(key)
}
pub(crate) fn roots_raw(&self) -> Vec<PrimaryKey> {
self.timeline.roots_raw()
}
pub(crate) fn invalidate_caches(&mut self) {
self.timeline.invalidate_caches();
}
pub(crate) async fn flush(&mut self) -> Result<(), tokio::io::Error> {
self.redo.flush().await
}
pub(crate) async fn destroy(&mut self) -> Result<(), tokio::io::Error> {
self.invalidate_caches();
self.redo.destroy()
}
pub(crate) fn name(&self) -> String {
self.key.name.clone()
}
pub(crate) fn add_history(&mut self, header: EventHeader) {
{
let mut metrics = self.metrics.lock().unwrap();
metrics.chain_size += header.raw.meta_bytes.len() as u64;
metrics.chain_size += header.raw.data_size as u64;
}
self.timeline.add_history(header)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/trust/mod.rs | lib/src/trust/mod.rs | pub mod chain_of_trust;
pub mod chain_ref;
pub mod header;
pub mod load_result;
pub mod tests;
pub mod timeline;
#[allow(unused_imports)]
#[cfg(test)]
pub(crate) use tests::*;
pub(crate) use chain_of_trust::*;
pub(crate) use timeline::*;
pub use chain_ref::*;
pub use header::*;
pub use load_result::*;
pub use ate_crypto::ChainKey;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/trust/timeline.rs | lib/src/trust/timeline.rs | use btreemultimap::BTreeMultiMap;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::compact::*;
use crate::event::*;
use crate::header::*;
use crate::index::*;
use crate::meta::*;
use crate::time::*;
pub(crate) struct ChainTimeline {
pub(crate) history: BTreeMultiMap<ChainTimestamp, EventHeaderRaw>,
pub(crate) pointers: BinaryTreeIndexer,
pub(crate) compactors: Vec<Box<dyn EventCompactor>>,
}
impl<'a> ChainTimeline {
pub(crate) fn lookup_primary(&self, key: &PrimaryKey) -> Option<EventLeaf> {
self.pointers.lookup_primary(key)
}
pub(crate) fn lookup_parent(&self, key: &PrimaryKey) -> Option<MetaParent> {
self.pointers.lookup_parent(key)
}
pub(crate) fn lookup_secondary(&self, key: &MetaCollection) -> Option<Vec<EventLeaf>> {
self.pointers.lookup_secondary(key)
}
pub(crate) fn lookup_secondary_raw(&self, key: &MetaCollection) -> Option<Vec<PrimaryKey>> {
self.pointers.lookup_secondary_raw(key)
}
pub(crate) fn roots_raw(&self) -> Vec<PrimaryKey> {
self.pointers.roots_raw()
}
pub(crate) fn invalidate_caches(&mut self) {}
pub(crate) fn add_history(&mut self, header: EventHeader) {
self.pointers.feed(&header);
let raw = header.raw;
#[cfg(feature = "enable_super_verbose")]
trace!("add_history::evt[{}]", header.meta);
let timestamp = match header.meta.get_timestamp() {
Some(a) => a.clone(),
None => match self.history.iter().next_back() {
Some(a) => a.0.clone(),
None => ChainTimestamp::from(0u64),
},
};
if header.meta.include_in_history() {
self.history.insert(timestamp, raw);
}
}
#[allow(dead_code)]
pub(crate) fn start(&self) -> ChainTimestamp {
let last = self.history.iter().next();
match last {
Some(a) => a.0.clone(),
None => ChainTimestamp::from(0u64),
}
}
pub(crate) fn end(&self) -> ChainTimestamp {
let last = self.history.iter().next_back();
match last {
Some(a) => a.0.clone(),
None => ChainTimestamp::from(0u64),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/trust/load_result.rs | lib/src/trust/load_result.rs | use crate::index::*;
use crate::{event::*, redo::LogLookup};
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
#[derive(Debug, Clone)]
pub struct LoadStrongResult {
#[allow(dead_code)]
pub(crate) lookup: LogLookup,
pub header: EventHeaderRaw,
pub data: EventStrongData,
pub leaf: EventLeaf,
}
#[derive(Debug, Clone)]
pub struct LoadWeakResult {
#[allow(dead_code)]
pub(crate) lookup: LogLookup,
pub header: EventHeaderRaw,
pub data: EventWeakData,
pub leaf: EventLeaf,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/compact_state.rs | lib/src/compact/compact_state.rs | use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use std::time::Instant;
use tokio::select;
use tokio::sync::watch;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::CompactMode;
const GROWTH_FACTOR_IGNORE_SMALLER_THAN_SIZE: u64 = 2097152;
pub(crate) struct CompactNotifications {
pub log_size: watch::Sender<u64>,
}
pub(crate) struct CompactState {
pub mode: CompactMode,
pub log_size: watch::Receiver<u64>,
pub last_size: Arc<Mutex<u64>>,
pub last_compact: Arc<Mutex<Option<Instant>>>,
}
impl CompactState {
pub fn new(mode: CompactMode, size: u64) -> (CompactNotifications, CompactState) {
let (modified_tx, modified_rx) = watch::channel::<u64>(size);
(
CompactNotifications {
log_size: modified_tx,
},
CompactState {
mode,
log_size: modified_rx,
last_size: Arc::new(Mutex::new(size)),
last_compact: Arc::new(Mutex::new(None)),
},
)
}
pub async fn wait_for_compact(&mut self) -> Result<(), watch::error::RecvError> {
loop {
let initial_size = {
let mut guard = self.last_size.lock().unwrap();
let mut ret = *guard;
// If the size has gone backwards (likely due to compaction) then move the cursor back
let cur = *self.log_size.borrow();
if cur < ret {
*guard = cur;
ret = cur;
}
ret
};
let deadtime_compact = Arc::clone(&self.last_compact);
let deadtime = move |duration: Duration| {
let mut guard = deadtime_compact.lock().unwrap();
match *guard {
Some(a) => {
let already = a.elapsed();
if already > duration {
Duration::from_secs(0)
} else {
duration - already
}
}
None => {
*guard = Some(Instant::now());
duration
}
}
};
match self.mode {
CompactMode::Never => {
crate::engine::sleep(Duration::from_secs(u64::MAX)).await;
}
CompactMode::Timer(duration) => {
let deadtime = deadtime(duration);
crate::engine::sleep(deadtime).await;
break;
}
CompactMode::Modified => {
self.log_size.changed().await?;
break;
}
CompactMode::GrowthSize(target) => {
let final_size = *self.log_size.borrow();
if final_size > initial_size && final_size - initial_size >= target {
break;
}
self.log_size.changed().await?;
crate::engine::sleep(Duration::from_millis(10)).await;
}
CompactMode::GrowthFactor(target) => {
let target = 1.0f32 + target;
let target = (initial_size as f32 * target) as u64;
let cur = *self.log_size.borrow();
if cur >= GROWTH_FACTOR_IGNORE_SMALLER_THAN_SIZE && cur >= target {
break;
}
self.log_size.changed().await?;
crate::engine::sleep(Duration::from_millis(10)).await;
}
CompactMode::GrowthSizeOrTimer { growth, timer } => {
let final_size = *self.log_size.borrow();
if final_size > initial_size && final_size - initial_size >= growth {
break;
}
let deadtime = deadtime(timer);
select! {
a = self.log_size.changed() => {
a?;
crate::engine::sleep(Duration::from_millis(10)).await;
},
() = crate::engine::sleep(deadtime) => { break; },
};
}
CompactMode::GrowthFactorOrTimer { growth, timer } => {
let target = 1.0f32 + growth;
let target = (initial_size as f32 * target) as u64;
let cur = *self.log_size.borrow();
if cur >= GROWTH_FACTOR_IGNORE_SMALLER_THAN_SIZE && cur >= target {
break;
}
let deadtime = deadtime(timer);
select! {
a = self.log_size.changed() => {
a?;
crate::engine::sleep(Duration::from_millis(10)).await;
},
() = crate::engine::sleep(deadtime) => { break; },
};
}
}
}
*self.last_size.lock().unwrap() = *self.log_size.borrow();
*self.last_compact.lock().unwrap() = Some(Instant::now());
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/event_compactor.rs | lib/src/compact/event_compactor.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::event::*;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum EventRelevance {
ForceKeep, // Force the event to be kept
Keep, // This event should be kept
Abstain, // Do not have an opinion on this event
Drop, // The event should be dropped
ForceDrop, // Force the event to drop
}
impl std::fmt::Display for EventRelevance {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
EventRelevance::ForceKeep => write!(f, "force-keep"),
EventRelevance::Keep => write!(f, "keep"),
EventRelevance::Abstain => write!(f, "abstain"),
EventRelevance::Drop => write!(f, "drop"),
EventRelevance::ForceDrop => write!(f, "force-drop"),
}
}
}
pub trait EventCompactor: Send + Sync {
// Decision making time - in order of back to front we now decide if we keep or drop an event
fn relevance(&self, _header: &EventHeader) -> EventRelevance {
EventRelevance::Abstain
}
fn feed(&mut self, _header: &EventHeader, _keep: bool) {}
fn clone_compactor(&self) -> Option<Box<dyn EventCompactor>>;
fn name(&self) -> &str {
"unnamed-compactor"
}
}
pub fn compute_relevance<'a>(
compactors: impl Iterator<Item = &'a Box<dyn EventCompactor>>,
header: &EventHeader,
) -> bool {
// Determine if we should drop of keep the value
let mut is_force_keep = false;
let mut is_keep = false;
let mut is_drop = false;
let mut is_force_drop = false;
for compactor in compactors {
let relevance = compactor.relevance(&header);
#[cfg(feature = "enable_verbose")]
if relevance != EventRelevance::Abstain
&& relevance != EventRelevance::ForceKeep
&& relevance != EventRelevance::Keep
{
debug!("{} on {} for {}", relevance, compactor.name(), header.meta);
}
match relevance {
EventRelevance::ForceKeep => is_force_keep = true,
EventRelevance::Keep => is_keep = true,
EventRelevance::Drop => is_drop = true,
EventRelevance::ForceDrop => is_force_drop = true,
EventRelevance::Abstain => {}
}
}
#[cfg(feature = "enable_verbose")]
if is_force_keep == false && is_keep == false && is_drop == false && is_force_drop == false {
debug!("abstain-all for {}", header.meta);
}
// Keep takes priority over drop and force takes priority over nominal indicators
// (default is to drop unless someone indicates we should keep it)
match is_force_keep {
true => true,
false if is_force_drop == true => false,
_ if is_keep == true => true,
_ if is_drop == true => false,
_ => false,
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/tests.rs | lib/src/compact/tests.rs | #![cfg(test)]
use std::time::Duration;
use std::time::Instant;
use crate::engine::sleep;
use crate::engine::timeout;
use crate::engine::TaskEngine;
use crate::error::*;
use super::*;
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_compact_state_machine() -> Result<(), AteError> {
crate::utils::bootstrap_test_env();
async move {
// Test the never trigger (negative)
let (tx, mut rx) = CompactState::new(CompactMode::Never, 0);
let wait = rx.wait_for_compact();
tx.log_size.send(100u64)?;
timeout(Duration::from_millis(20), wait)
.await
.expect_err("The never event should never be triggered");
// Test the timer trigger (negative)
let (_tx, mut rx) = CompactState::new(CompactMode::Timer(Duration::from_millis(100)), 0);
let wait = rx.wait_for_compact();
timeout(Duration::from_millis(50), wait)
.await
.expect_err("The timer event should not be triggered");
// Test the timer trigger (positive)
let (_tx, mut rx) = CompactState::new(CompactMode::Timer(Duration::from_millis(100)), 0);
let wait = rx.wait_for_compact();
let start = Instant::now();
timeout(Duration::from_millis(150), wait)
.await
.expect("This should not timeout")?;
let elapsed = start.elapsed();
assert!(
elapsed > Duration::from_millis(100),
"The timer should have waited 100 milliseconds"
);
// Test the modify trigger (negative)
let (_tx, mut rx) = CompactState::new(CompactMode::Modified, 0);
let wait = rx.wait_for_compact();
timeout(Duration::from_millis(20), wait)
.await
.expect_err("The modify event should not be triggered");
// Test the modify trigger (positive)
let (tx, mut rx) = CompactState::new(CompactMode::Modified, 0);
let wait = rx.wait_for_compact();
TaskEngine::spawn(async move {
sleep(Duration::from_millis(10)).await;
let _ = tx.log_size.send(100u64);
tx
});
timeout(Duration::from_millis(100), wait)
.await
.expect("This should not timeout")?;
// Test the growth size trigger (negative I)
let (_tx, mut rx) = CompactState::new(CompactMode::GrowthSize(500), 1000);
let wait = rx.wait_for_compact();
timeout(Duration::from_millis(20), wait)
.await
.expect_err("The growth size event should not be triggered");
// Test the growth size trigger (negative II)
let (tx, mut rx) = CompactState::new(CompactMode::GrowthSize(500), 1000);
let wait = rx.wait_for_compact();
tx.log_size.send(1000u64)?;
TaskEngine::spawn(async move {
sleep(Duration::from_millis(10)).await;
let _ = tx.log_size.send(1100u64);
sleep(Duration::from_millis(100)).await;
});
timeout(Duration::from_millis(50), wait)
.await
.expect_err("The growth size event should not be triggered");
// Test the growth size trigger (positive)
let (tx, mut rx) = CompactState::new(CompactMode::GrowthSize(500), 1000);
let wait = rx.wait_for_compact();
TaskEngine::spawn(async move {
sleep(Duration::from_millis(10)).await;
let _ = tx.log_size.send(1400u64);
let _ = tx.log_size.send(1600u64);
tx
});
timeout(Duration::from_millis(100), wait)
.await
.expect("This should not timeout")?;
// Test the growth or timer trigger (negative I)
let (_tx, mut rx) = CompactState::new(
CompactMode::GrowthSizeOrTimer {
growth: 500,
timer: Duration::from_millis(100),
},
1000,
);
let wait = rx.wait_for_compact();
timeout(Duration::from_millis(20), wait)
.await
.expect_err("The growth or timer event should not be triggered");
// Test the growth or timer trigger (positive I via timer)
let (_tx, mut rx) = CompactState::new(
CompactMode::GrowthSizeOrTimer {
growth: 500,
timer: Duration::from_millis(100),
},
1000,
);
let wait = rx.wait_for_compact();
let start = Instant::now();
timeout(Duration::from_millis(150), wait)
.await
.expect("This growth or timer event should not have timeed out")?;
let elapsed = start.elapsed();
assert!(
elapsed > Duration::from_millis(100),
"The growth or timer event should have waited 100 milliseconds"
);
// Test the growth of timer trigger (positive II via growth)
let (tx, mut rx) = CompactState::new(
CompactMode::GrowthSizeOrTimer {
growth: 500,
timer: Duration::from_millis(100),
},
1000,
);
let wait = rx.wait_for_compact();
let start = Instant::now();
TaskEngine::spawn(async move {
sleep(Duration::from_millis(10)).await;
let _ = tx.log_size.send(2000u64);
tx
});
timeout(Duration::from_millis(50), wait)
.await
.expect("This growth or timer event should have triggered")?;
let elapsed = start.elapsed();
assert!(
elapsed < Duration::from_millis(100),
"The growth or timer event should not have waited 100 milliseconds"
);
Ok(())
}
.await
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/indecisive_compactor.rs | lib/src/compact/indecisive_compactor.rs | use crate::event::*;
use super::*;
#[derive(Default, Clone)]
pub struct IndecisiveCompactor {}
impl EventCompactor for IndecisiveCompactor {
fn clone_compactor(&self) -> Option<Box<dyn EventCompactor>> {
Some(Box::new(self.clone()))
}
fn relevance(&self, _: &EventHeader) -> EventRelevance {
EventRelevance::Abstain
}
fn name(&self) -> &str {
"indecisive-compactor"
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/compact_mode.rs | lib/src/compact/compact_mode.rs | use std::time::Duration;
/// # Compaction State Machine
///
/// State machine that will trigger a compaction only when a particular set
/// of states has been reached.
// Specifies when a compaction event on a chain will occur.
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum CompactMode {
// Compaction will never occur which effectivily means this chain is immutable
Never,
// Comapction will be triggered when the chain is modified in any way
Modified,
// Compaction will occur whenever a timer duration has been reached
Timer(Duration),
// Compaction will occur whenever growth exceeds a particular percentage
GrowthFactor(f32),
// Compaction will occur whenever growth exceeds a particular percentage or the timer is triggered
GrowthFactorOrTimer { growth: f32, timer: Duration },
// Compaction will occur whever the chain size increases by a certain absolute amount in bytes
GrowthSize(u64),
// Compaction will occur whever the chain size increases by a certain absolute amount in bytes or the timer is triggered
GrowthSizeOrTimer { growth: u64, timer: Duration },
}
impl CompactMode {
pub fn with_timer_value(self: Self, val: Duration) -> Self {
match self {
CompactMode::Timer(_) => CompactMode::Timer(val),
CompactMode::GrowthFactorOrTimer {
growth,
timer: _timer,
} => CompactMode::GrowthFactorOrTimer { growth, timer: val },
CompactMode::GrowthSizeOrTimer {
growth,
timer: _timer,
} => CompactMode::GrowthSizeOrTimer { growth, timer: val },
a => a,
}
}
pub fn with_growth_factor(self: Self, val: f32) -> Self {
match self {
CompactMode::GrowthFactor(_) => CompactMode::GrowthFactor(val),
CompactMode::GrowthFactorOrTimer {
growth: _growth,
timer,
} => CompactMode::GrowthFactorOrTimer { growth: val, timer },
a => a,
}
}
pub fn with_growth_size(self: Self, val: u64) -> Self {
match self {
CompactMode::GrowthSize(_) => CompactMode::GrowthSize(val),
CompactMode::GrowthSizeOrTimer {
growth: _growth,
timer,
} => CompactMode::GrowthSizeOrTimer { growth: val, timer },
a => a,
}
}
}
impl std::fmt::Display for CompactMode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
CompactMode::Never => write!(f, "never"),
CompactMode::Modified => write!(f, "modified"),
CompactMode::Timer(a) => write!(f, "timer({}ms)", a.as_millis()),
CompactMode::GrowthFactor(a) => write!(f, "factor({})", a),
CompactMode::GrowthFactorOrTimer { growth, timer } => {
write!(f, "factor({})-or-timer({}ms)", growth, timer.as_millis())
}
CompactMode::GrowthSize(a) => write!(f, "size({})", a),
CompactMode::GrowthSizeOrTimer { growth, timer } => {
write!(f, "size({})-or-timer({}ms)", growth, timer.as_millis())
}
}
}
}
impl std::str::FromStr for CompactMode {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"never" => Ok(CompactMode::Never),
"immutable" => Ok(CompactMode::Never),
"modified" => Ok(CompactMode::Modified),
"timer" => Ok(CompactMode::Timer(Duration::from_secs(3600))),
"factor" => Ok(CompactMode::GrowthFactor(0.2f32)),
"size" => Ok(CompactMode::GrowthSize(104857600)),
"factor-or-timer" => Ok(CompactMode::GrowthFactorOrTimer { growth: 0.2f32, timer: Duration::from_secs(3600) }),
"size-or-timer" => Ok(CompactMode::GrowthSizeOrTimer { growth: 104857600, timer: Duration::from_secs(3600) }),
_ => Err("valid values are 'never', 'modified', 'timer', 'factor', 'size', 'factor-or-timer', 'size-or-timer'"),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/remove_duplicates.rs | lib/src/compact/remove_duplicates.rs | use fxhash::FxHashSet;
use crate::crypto::AteHash;
use crate::event::*;
use crate::{header::*, meta::MetaAuthorization};
use super::*;
#[derive(Debug, Clone, Hash, Eq, PartialEq)]
pub struct UniqueEvent {
key: PrimaryKey,
auth: Option<MetaAuthorization>,
}
#[derive(Default, Clone)]
pub struct RemoveDuplicatesCompactor {
keep: FxHashSet<AteHash>,
drop: FxHashSet<AteHash>,
already: FxHashSet<UniqueEvent>,
parent_override: FxHashSet<PrimaryKey>,
}
impl EventCompactor for RemoveDuplicatesCompactor {
fn clone_compactor(&self) -> Option<Box<dyn EventCompactor>> {
Some(Box::new(Self::default()))
}
fn relevance(&self, header: &EventHeader) -> EventRelevance {
if self.keep.contains(&header.raw.event_hash) {
return EventRelevance::Keep;
}
if self.drop.contains(&header.raw.event_hash) {
return EventRelevance::Drop;
}
if header.meta.get_data_key().is_some() {
return EventRelevance::Keep;
} else {
return EventRelevance::Abstain;
}
}
fn feed(&mut self, header: &EventHeader, _keep: bool) {
let key = match header.meta.get_data_key() {
Some(key) => key,
None => {
return;
}
};
if let Some(parent) = header.meta.get_parent() {
self.parent_override.insert(parent.vec.parent_id);
}
let unique = UniqueEvent {
key,
auth: header.meta.get_authorization().map(|a| a.clone()),
};
let keep = if self.already.contains(&unique) == false {
self.already.insert(unique);
self.parent_override.remove(&key);
true
} else if self.parent_override.remove(&key) {
true
} else {
false
};
if keep {
self.keep.insert(header.raw.event_hash);
} else {
self.drop.insert(header.raw.event_hash);
}
}
fn name(&self) -> &str {
"remove-duplicates-compactor"
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/tombstone_compactor.rs | lib/src/compact/tombstone_compactor.rs | use fxhash::FxHashSet;
use crate::crypto::AteHash;
use crate::event::*;
use crate::header::*;
use crate::meta::*;
use super::*;
#[derive(Default, Clone)]
pub struct TombstoneCompactor {
ignored: FxHashSet<AteHash>,
tombstoned: FxHashSet<PrimaryKey>,
}
impl EventCompactor for TombstoneCompactor {
fn clone_compactor(&self) -> Option<Box<dyn EventCompactor>> {
Some(Box::new(Self::default()))
}
fn relevance(&self, header: &EventHeader) -> EventRelevance {
let key = match header.meta.get_data_key() {
Some(key) => key,
None => {
return EventRelevance::Abstain;
}
};
if self.ignored.contains(&header.raw.event_hash) {
return EventRelevance::Abstain;
}
match self.tombstoned.contains(&key) {
true => EventRelevance::ForceDrop,
false => EventRelevance::Abstain,
}
}
fn feed(&mut self, header: &EventHeader, _keep: bool) {
if let Some(key) = header.meta.get_tombstone() {
self.tombstoned.insert(key.clone());
} else if let Some(key) = header.meta.get_data_key() {
if self.tombstoned.contains(&key) == false {
self.ignored.insert(header.raw.event_hash);
}
}
}
fn name(&self) -> &str {
"tombstone-compactor"
}
}
impl Metadata {
pub fn get_tombstone(&self) -> Option<PrimaryKey> {
self.core
.iter()
.filter_map(|m| match m {
CoreMetadata::Tombstone(k) => Some(k.clone()),
_ => None,
})
.next()
}
pub fn add_tombstone(&mut self, key: PrimaryKey) {
let has = self.core.iter().any(|m| match m {
CoreMetadata::Tombstone(k) => *k == key,
_ => false,
});
if has == true {
return;
}
self.core.push(CoreMetadata::Tombstone(key));
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/cut_off_compactor.rs | lib/src/compact/cut_off_compactor.rs | use crate::event::*;
use super::*;
use crate::time::ChainTimestamp;
#[derive(Default, Clone)]
pub struct CutOffCompactor {
pub cut_off: ChainTimestamp,
}
impl CutOffCompactor {
pub fn new(after: ChainTimestamp) -> CutOffCompactor {
CutOffCompactor { cut_off: after }
}
}
impl EventCompactor for CutOffCompactor {
fn clone_compactor(&self) -> Option<Box<dyn EventCompactor>> {
None
}
fn relevance(&self, header: &EventHeader) -> EventRelevance {
if let Some(timestamp) = header.meta.get_timestamp() {
if *timestamp >= self.cut_off {
return EventRelevance::ForceKeep;
}
}
EventRelevance::Abstain
}
fn name(&self) -> &str {
"cut-off-compactor"
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/sig_compactor.rs | lib/src/compact/sig_compactor.rs | use crate::crypto::*;
use crate::event::*;
use fxhash::FxHashSet;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::*;
#[derive(Default, Clone)]
pub struct SignatureCompactor {
sigs: FxHashSet<AteHash>,
sigs_already: FxHashSet<AteHash>,
sign_with: FxHashSet<AteHash>,
signed_events: FxHashSet<AteHash>,
}
impl SignatureCompactor {
pub fn new() -> SignatureCompactor {
SignatureCompactor {
sigs: FxHashSet::default(),
sigs_already: FxHashSet::default(),
signed_events: FxHashSet::default(),
sign_with: FxHashSet::default(),
}
}
}
impl EventCompactor for SignatureCompactor {
fn clone_compactor(&self) -> Option<Box<dyn EventCompactor>> {
Some(Box::new(Self::default()))
}
fn relevance(&self, header: &EventHeader) -> EventRelevance {
if let Some(sig) = header.meta.get_signature() {
if self.sigs.contains(&header.raw.event_hash) {
return EventRelevance::ForceKeep;
}
if sig.hashes.iter().any(|h| self.signed_events.contains(h)) {
return EventRelevance::ForceKeep;
}
}
EventRelevance::Abstain
}
fn feed(&mut self, header: &EventHeader, keep: bool) {
if keep {
self.signed_events.insert(header.raw.event_hash);
} else {
self.signed_events.remove(&header.raw.event_hash);
}
if keep == true {
if let Some(sign_with) = header.meta.get_sign_with() {
for key in sign_with.keys.iter() {
self.sign_with.insert(*key);
}
}
}
if let Some(sig) = header.meta.get_signature() {
if self.sign_with.contains(&sig.public_key_hash) {
if self.sigs_already.contains(&sig.public_key_hash) == false {
self.sigs_already.insert(sig.public_key_hash);
self.sigs.insert(header.raw.event_hash);
}
}
}
}
fn name(&self) -> &str {
"signature-compactor"
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/mod.rs | lib/src/compact/mod.rs | pub mod compact_mode;
pub mod compact_state;
pub mod cut_off_compactor;
pub mod event_compactor;
pub mod indecisive_compactor;
pub mod public_key_compactor;
pub mod remove_duplicates;
pub mod sig_compactor;
mod tests;
pub mod tombstone_compactor;
pub(crate) use compact_state::*;
pub use compact_mode::*;
pub use cut_off_compactor::*;
pub use event_compactor::*;
pub use indecisive_compactor::*;
pub use public_key_compactor::*;
pub use remove_duplicates::*;
pub use sig_compactor::*;
pub use tombstone_compactor::*;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/compact/public_key_compactor.rs | lib/src/compact/public_key_compactor.rs | use crate::crypto::*;
use crate::event::*;
use fxhash::FxHashSet;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::*;
#[derive(Default, Clone)]
pub struct PublicKeyCompactor {
sign_with: FxHashSet<AteHash>,
}
impl PublicKeyCompactor {
pub fn new() -> PublicKeyCompactor {
PublicKeyCompactor {
sign_with: FxHashSet::default(),
}
}
}
impl EventCompactor for PublicKeyCompactor {
fn clone_compactor(&self) -> Option<Box<dyn EventCompactor>> {
Some(Box::new(Self::default()))
}
fn relevance(&self, header: &EventHeader) -> EventRelevance {
if let Some(pk) = header.meta.get_public_key() {
let pk_hash = pk.hash();
if self.sign_with.contains(&pk_hash) {
return EventRelevance::ForceKeep;
}
}
EventRelevance::Abstain
}
fn feed(&mut self, header: &EventHeader, keep: bool) {
if keep == true {
if let Some(sign_with) = header.meta.get_sign_with() {
for key in sign_with.keys.iter() {
self.sign_with.insert(*key);
}
}
if let Some(sign) = header.meta.get_signature() {
self.sign_with.insert(sign.public_key_hash);
}
}
}
fn name(&self) -> &str {
"public-key-compactor"
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/foreign.rs | lib/src/dio/foreign.rs | use fxhash::FxHashMap;
use crate::header::*;
use serde::*;
/// Rerepresents a reference to structured data that exists in another
/// chain-of-trust
///
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct DaoForeign {
map: FxHashMap<String, PrimaryKey>,
}
impl DaoForeign {
pub fn new() -> DaoForeign {
DaoForeign {
map: FxHashMap::default(),
}
}
pub fn get_by_url(&self, chain_url: url::Url) -> Option<PrimaryKey> {
let key = chain_url.to_string();
match self.map.get(&key) {
Some(a) => Some(a.clone()),
None => None,
}
}
pub fn set_by_url(&mut self, chain_url: url::Url, key: PrimaryKey) {
let chain_key = chain_url.to_string();
self.map.insert(chain_key, key);
}
pub fn get_by_name(&self, name: String) -> Option<PrimaryKey> {
match self.map.get(&name) {
Some(a) => Some(a.clone()),
None => None,
}
}
pub fn set_by_name(&mut self, name: String, key: PrimaryKey) {
self.map.insert(name, key);
}
pub fn get<T>(&self) -> Option<PrimaryKey> {
let name = std::any::type_name::<T>().to_string();
match self.map.get(&name) {
Some(a) => Some(a.clone()),
None => None,
}
}
pub fn set<T>(&mut self, key: PrimaryKey) {
let name = std::any::type_name::<T>().to_string();
self.map.insert(name, key);
}
}
impl Default for DaoForeign {
fn default() -> DaoForeign {
DaoForeign::new()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/test.rs | lib/src/dio/test.rs | #![allow(unused_imports)]
use serde::Deserialize;
use serde::{de::DeserializeOwned, Serialize};
use std::convert::*;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::crypto::*;
use crate::dio::*;
use crate::prelude::*;
#[cfg(test)]
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum TestEnumDao {
None,
Blah1,
Blah2(u32),
Blah3(String),
Blah4,
Blah5,
}
#[cfg(test)]
impl Default for TestEnumDao {
fn default() -> TestEnumDao {
TestEnumDao::None
}
}
#[cfg(test)]
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
pub struct TestStructDao {
val: u32,
hidden: String,
inner: DaoVec<TestEnumDao>,
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_dio() -> Result<(), AteError> {
crate::utils::bootstrap_test_env();
info!("generating crypto keys");
let write_key = PrivateSignKey::generate(crate::crypto::KeySize::Bit192);
let write_key2 = PrivateSignKey::generate(KeySize::Bit256);
let read_key = EncryptKey::generate(crate::crypto::KeySize::Bit192);
let root_public_key = write_key.as_public_key();
info!("building the session");
let mut session = AteSessionUser::new();
session
.user
.properties
.push(AteSessionProperty::WriteKey(write_key.clone()));
session
.user
.properties
.push(AteSessionProperty::WriteKey(write_key2.clone()));
session
.user
.properties
.push(AteSessionProperty::ReadKey(read_key.clone()));
session.identity = "author@here.com".to_string();
info!("{}", session);
let key1;
let key2;
let key3;
let chain_name = format!("test_dio_{}", PrimaryKey::generate().to_string());
#[cfg(not(feature = "enable_local_fs"))]
#[allow(unused_variables, unused_assignments)]
let mut stored_chain = None;
{
info!("creating the chain-of-trust");
let mut mock_cfg = crate::conf::tests::mock_test_config();
let (chain, _builder) = crate::trust::create_test_chain(
&mut mock_cfg,
chain_name.clone(),
false,
false,
Some(root_public_key.clone()),
)
.await;
//let mut chain = create_test_chain("test_dio".to_string(), true, false, None);
// Write a value immediately from chain (this data will remain in the transaction)
{
let dio = chain.dio_mut(&session).await;
{
info!("storing data object 1");
let mut mock_dao = TestStructDao::default();
mock_dao.val = 1;
mock_dao.hidden = "This text should be hidden".to_string();
let mut dao1 = dio.store(mock_dao).unwrap();
let dao3 = dao1.as_mut().inner.push(TestEnumDao::Blah1).unwrap();
key1 = dao1.key().clone();
info!("key1: {}", key1.as_hex_string());
key3 = dao3.key().clone();
info!("key3: {}", key3.as_hex_string());
info!("loading data object 1");
info!("setting read and write crypto keys");
dao1.auth_mut().read = ReadOption::from_key(&read_key);
dao1.auth_mut().write = WriteOption::Specific(write_key2.hash());
}
dio.commit().await.expect("The DIO should commit");
}
{
info!("new DIO context");
let dio = chain.dio_mut(&session).await;
{
// Load the object again which should load it from the cache
info!("loading data object 1");
let mut dao1 = dio.load::<TestStructDao>(&key1).await.unwrap();
// When we update this value it will become dirty and hence should block future loads until its flushed or goes out of scope
info!("updating data object");
dao1.as_mut().val = 2;
// Flush the data and attempt to read it again (this should succeed)
info!("load the object again");
let test: DaoMut<TestStructDao> = dio
.load(&key1)
.await
.expect("The dirty data object should have been read after it was flushed");
assert_eq!(test.val, 2 as u32);
}
{
// Load the object again which should load it from the cache
info!("loading data object 1 in new scope");
let mut dao1 = dio.load::<TestStructDao>(&key1).await.unwrap();
// Again after changing the data reads should fail
info!("modifying data object 1");
dao1.as_mut().val = 3;
}
{
// Write a record to the chain that we will delete again later
info!("storing data object 2");
let mut dao2 = dio.store(TestEnumDao::Blah4).unwrap();
// We create a new private key for this data
info!("adding a write crypto key");
dao2.auth_mut().write = WriteOption::Specific(write_key2.as_public_key().hash());
key2 = dao2.key().clone();
info!("key2: {}", key2.as_hex_string());
}
dio.commit().await.expect("The DIO should commit");
}
{
info!("new DIO context");
let dio = chain.dio(&session).await;
// Now its out of scope it should be loadable again
info!("loading data object 1");
let test = dio
.load::<TestStructDao>(&key1)
.await
.expect("The dirty data object should have been read after it was flushed");
assert_eq!(test.val, 3);
// Read the items in the collection which we should find our second object
info!("loading children");
let test3 = test
.inner
.iter()
.await
.unwrap()
.next()
.expect("Three should be a data object in this collection");
assert_eq!(test3.key(), &key3);
}
{
info!("new DIO context");
let dio = chain.dio_mut(&session).await;
// The data we saved earlier should be accessible accross DIO scope boundaries
info!("loading data object 1");
let mut dao1: DaoMut<TestStructDao> = dio
.load(&key1)
.await
.expect("The data object should have been read");
assert_eq!(dao1.val, 3);
dao1.as_mut().val = 4;
// First attempt to read the record then delete it
info!("loading data object 2");
let dao2 = dio
.load::<TestEnumDao>(&key2)
.await
.expect("The record should load before we delete it in this session");
info!("deleting data object 2");
dao2.delete().unwrap();
// It should no longer load now that we deleted it
info!("negative test on loading data object 2");
dio.load::<TestEnumDao>(&key2)
.await
.expect_err("This load should fail as we deleted the record");
dio.commit().await.expect("The DIO should commit");
}
// Store the chain if we are in memory mode as there is no persistence
#[cfg(not(feature = "enable_local_fs"))]
{
stored_chain = Some(chain);
}
}
{
info!("reloading the chain of trust");
#[cfg(feature = "enable_local_fs")]
let mut mock_cfg = crate::conf::tests::mock_test_config();
#[cfg(feature = "enable_local_fs")]
let (chain, _builder) = crate::trust::create_test_chain(
&mut mock_cfg,
chain_name.clone(),
false,
false,
Some(root_public_key.clone()),
)
.await;
#[cfg(not(feature = "enable_local_fs"))]
let chain = stored_chain.take().unwrap();
{
let dio = chain.dio(&session).await;
// Load it again
info!("loading data object 1");
let dao1: Dao<TestStructDao> = dio
.load(&key1)
.await
.expect("The data object should have been read");
assert_eq!(dao1.val, 4);
// After going out of scope then back again we should still no longer see the record we deleted
info!("loading data object 2");
dio.load::<TestEnumDao>(&key2)
.await
.expect_err("This load should fail as we deleted the record");
}
info!("destroying the chain of trust");
chain.single().await.destroy().await.unwrap();
}
Ok(())
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/vec.rs | lib/src/dio/vec.rs | #![allow(unused_imports)]
use error_chain::bail;
use std::ops::Deref;
use std::marker::PhantomData;
use std::sync::{Arc, Weak};
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::Instrument;
use super::dio::DioWeak;
use super::dio_mut::DioMutWeak;
use crate::dio::dao::*;
use crate::dio::*;
use crate::error::*;
use crate::prelude::*;
use serde::de::*;
use serde::*;
use std::collections::VecDeque;
/// Rerepresents a vector of children attached to a parent DAO
///
/// This object does not actually store the children which are
/// actually stored within the chain-of-trust as seperate events
/// that are indexed into secondary indexes that this object queries.
///
/// Vectors can also be used as queues and as a bus for various
/// different usecases.
///
/// Storing this vector within other DAO's allows complex models
/// to be represented.
///
/// Alternatively you can store your vectors, maps and other
/// relationships as collections of `PrimaryKey`'s however you
/// will need to manage this yourselve and can not benefit from
/// publish/subscribe patterns.
///
#[derive(Serialize, Deserialize)]
pub struct DaoVec<D> {
pub(super) vec_id: u64,
#[serde(skip)]
pub(super) state: DaoVecState,
#[serde(skip)]
pub(super) dio: DioWeak,
#[serde(skip)]
pub(super) dio_mut: DioMutWeak,
#[serde(skip)]
pub(super) _phantom1: PhantomData<D>,
}
pub(super) enum DaoVecState {
Unsaved,
Saved(PrimaryKey),
}
impl Default for DaoVecState {
fn default() -> Self {
match PrimaryKey::current_get() {
Some(a) => DaoVecState::Saved(a),
None => DaoVecState::Unsaved,
}
}
}
impl Clone for DaoVecState {
fn clone(&self) -> Self {
match self {
Self::Unsaved => Self::default(),
Self::Saved(a) => Self::Saved(a.clone()),
}
}
}
impl<D> std::fmt::Debug for DaoVec<D>
where
D: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let type_name = std::any::type_name::<D>();
write!(f, "dao-vec(vec_id={}, type={}", self.vec_id, type_name)
}
}
impl<D> Default for DaoVec<D> {
fn default() -> Self {
DaoVec::new()
}
}
impl<D> Clone for DaoVec<D> {
fn clone(&self) -> DaoVec<D> {
DaoVec {
state: self.state.clone(),
vec_id: self.vec_id,
dio: self.dio.clone(),
dio_mut: self.dio_mut.clone(),
_phantom1: PhantomData,
}
}
}
impl<D> DaoVec<D> {
pub fn new() -> DaoVec<D> {
DaoVec {
state: DaoVecState::Unsaved,
dio: DioWeak::Uninitialized,
dio_mut: DioMutWeak::Uninitialized,
vec_id: fastrand::u64(..),
_phantom1: PhantomData,
}
}
pub fn new_orphaned(dio: &Arc<Dio>, parent: PrimaryKey, vec_id: u64) -> DaoVec<D> {
DaoVec {
state: DaoVecState::Saved(parent),
dio: DioWeak::from(dio),
dio_mut: DioMutWeak::Uninitialized,
vec_id: vec_id,
_phantom1: PhantomData,
}
}
pub fn new_orphaned_mut(dio: &Arc<DioMut>, parent: PrimaryKey, vec_id: u64) -> DaoVec<D> {
DaoVec {
state: DaoVecState::Saved(parent),
dio: DioWeak::from(&dio.dio),
dio_mut: DioMutWeak::from(dio),
vec_id: vec_id,
_phantom1: PhantomData,
}
}
pub fn dio(&self) -> Option<Arc<Dio>> {
match &self.dio {
DioWeak::Uninitialized => None,
DioWeak::Weak(a) => Weak::upgrade(a),
}
}
pub fn dio_mut(&self) -> Option<Arc<DioMut>> {
match &self.dio_mut {
DioMutWeak::Uninitialized => None,
DioMutWeak::Weak(a) => Weak::upgrade(a),
}
}
pub fn vec_id(&self) -> u64 {
self.vec_id
}
pub async fn len(&self) -> Result<usize, LoadError> {
let len = match &self.state {
DaoVecState::Unsaved => 0usize,
DaoVecState::Saved(parent_id) => {
let dio = match self.dio() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
dio.children_keys(parent_id.clone(), self.vec_id)
.await?
.len()
}
};
Ok(len)
}
pub async fn iter(&self) -> Result<Iter<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
self.iter_ext(false, false).await
}
pub async fn clear(&mut self) -> Result<(), LoadError>
where
D: Serialize + DeserializeOwned,
{
for dao in self.iter_mut().await? {
dao.delete()?;
}
Ok(())
}
pub async fn retain<F>(&mut self, mut f: F) -> Result<(), LoadError>
where
D: Serialize + DeserializeOwned,
F: FnMut(&D) -> bool,
{
for dao in self.iter_mut().await? {
if f(dao.deref()) {
continue;
}
dao.delete()?;
}
Ok(())
}
pub async fn iter_ext(
&self,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Iter<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let children = match &self.state {
DaoVecState::Unsaved => vec![],
DaoVecState::Saved(parent_id) => {
if let Some(dio) = self.dio_mut() {
dio.children_ext(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
.into_iter()
.map(|a: DaoMut<D>| a.inner)
.collect::<Vec<_>>()
} else {
let dio = match self.dio() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
dio.children_ext(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
}
}
};
Ok(Iter::new(children))
}
pub async fn iter_mut(&mut self) -> Result<IterMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
self.iter_mut_ext(false, false).await
}
pub async fn iter_mut_ext(
&mut self,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<IterMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let children = match &self.state {
DaoVecState::Unsaved => vec![],
DaoVecState::Saved(parent_id) => {
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let mut ret = Vec::default();
for child in dio
.children_ext::<D>(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
{
ret.push(child)
}
ret
}
};
Ok(IterMut::new(children))
}
pub async fn iter_mut_with_dio(&self, dio: &Arc<DioMut>) -> Result<IterMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
self.iter_mut_ext_with_dio(dio, false, false).await
}
pub async fn iter_mut_ext_with_dio(
&self,
dio: &Arc<DioMut>,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<IterMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let children = match &self.state {
DaoVecState::Unsaved => vec![],
DaoVecState::Saved(parent_id) => {
let mut ret = Vec::default();
for child in dio
.children_ext::<D>(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
{
ret.push(child)
}
ret
}
};
Ok(IterMut::new(children))
}
/// Pushes a data object into this collection and stores
/// it for the next DIO commit
pub fn push(&mut self, data: D) -> Result<DaoMut<D>, SerializationError>
where
D: Clone + Serialize + DeserializeOwned,
{
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(SerializationErrorKind::WeakDio),
};
let parent_id = match &self.state {
DaoVecState::Unsaved => {
bail!(SerializationErrorKind::SaveParentFirst);
}
DaoVecState::Saved(a) => a.clone(),
};
let mut ret = dio.store(data)?;
ret.attach_ext(parent_id, self.vec_id)?;
Ok(ret)
}
pub fn push_with_key(
&mut self,
data: D,
key: PrimaryKey,
) -> Result<DaoMut<D>, SerializationError>
where
D: Clone + Serialize + DeserializeOwned,
{
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(SerializationErrorKind::WeakDio),
};
let parent_id = match &self.state {
DaoVecState::Unsaved => {
bail!(SerializationErrorKind::SaveParentFirst);
}
DaoVecState::Saved(a) => a.clone(),
};
let mut ret = dio.store_with_key(data, key)?;
ret.attach_ext(parent_id, self.vec_id)?;
Ok(ret)
}
pub fn push_with_dio(&self, dio: &Arc<DioMut>, data: D) -> Result<DaoMut<D>, SerializationError>
where
D: Clone + Serialize + DeserializeOwned,
{
let parent_id = match &self.state {
DaoVecState::Unsaved => {
bail!(SerializationErrorKind::SaveParentFirst);
}
DaoVecState::Saved(a) => a.clone(),
};
let mut ret = dio.store(data)?;
ret.attach_ext(parent_id, self.vec_id)?;
Ok(ret)
}
pub fn push_with_dio_and_key(
&self,
dio: &Arc<DioMut>,
data: D,
key: PrimaryKey,
) -> Result<DaoMut<D>, SerializationError>
where
D: Clone + Serialize + DeserializeOwned,
{
let parent_id = match &self.state {
DaoVecState::Unsaved => {
bail!(SerializationErrorKind::SaveParentFirst);
}
DaoVecState::Saved(a) => a.clone(),
};
let mut ret = dio.store_with_key(data, key)?;
ret.attach_ext(parent_id, self.vec_id)?;
Ok(ret)
}
}
pub struct Iter<D> {
vec: VecDeque<Dao<D>>,
}
impl<D> Iter<D> {
pub(super) fn new(vec: Vec<Dao<D>>) -> Iter<D> {
Iter {
vec: VecDeque::from(vec),
}
}
}
impl<D> Iterator for Iter<D> {
type Item = Dao<D>;
fn next(&mut self) -> Option<Dao<D>> {
self.vec.pop_front()
}
}
pub struct IterMut<D>
where
D: Serialize,
{
vec: VecDeque<DaoMut<D>>,
}
impl<D> IterMut<D>
where
D: Serialize,
{
pub(super) fn new(vec: Vec<DaoMut<D>>) -> IterMut<D> {
IterMut {
vec: VecDeque::from(vec),
}
}
}
impl<D> Iterator for IterMut<D>
where
D: Serialize,
{
type Item = DaoMut<D>;
fn next(&mut self) -> Option<DaoMut<D>> {
self.vec.pop_front()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/row.rs | lib/src/dio/row.rs | #![allow(unused_imports)]
use error_chain::bail;
use fxhash::FxHashSet;
use tracing::{debug, warn, error};
use bytes::Bytes;
use serde::{de::DeserializeOwned, Serialize};
use std::ops::{Deref, DerefMut};
use std::sync::Arc;
use std::sync::{Mutex, MutexGuard};
use crate::crypto::{EncryptedPrivateKey, PrivateSignKey};
use crate::{crypto::EncryptKey, session::AteSessionProperty};
use super::dio_mut::*;
use crate::crypto::AteHash;
use crate::dio::*;
use crate::error::*;
use crate::event::*;
use crate::header::*;
use crate::index::*;
use crate::meta::*;
use crate::spec::*;
pub use super::vec::DaoVec;
#[derive(Debug, Clone)]
pub(crate) struct RowHeader {
pub key: PrimaryKey,
pub parent: Option<MetaParent>,
pub auth: MetaAuthorization,
}
pub(super) struct Row<D> {
pub(super) key: PrimaryKey,
pub(super) type_name: String,
pub(super) created: u64,
pub(super) updated: u64,
pub(super) format: MessageFormat,
pub(super) data: D,
pub(super) collections: FxHashSet<MetaCollection>,
pub(super) extra_meta: Vec<CoreMetadata>,
pub(super) is_new: bool,
}
impl<D> Clone for Row<D>
where
D: Clone,
{
fn clone(&self) -> Self {
Row {
key: self.key.clone(),
type_name: self.type_name.clone(),
created: self.created,
updated: self.updated,
format: self.format,
data: self.data.clone(),
collections: self.collections.clone(),
extra_meta: self.extra_meta.clone(),
is_new: self.is_new.clone(),
}
}
}
impl<D> std::fmt::Debug for Row<D>
where
D: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(
f,
"row(key={}, created={}, updated={}, data=",
self.key, self.created, self.updated
)?;
let ret = self.data.fmt(f);
write!(f, ")")?;
ret
}
}
impl<D> Row<D> {
pub(crate) fn from_event(
dio: &Arc<Dio>,
evt: &EventStrongData,
created: u64,
updated: u64,
) -> Result<(RowHeader, Row<D>), SerializationError>
where
D: DeserializeOwned,
{
let key = match evt.meta.get_data_key() {
Some(key) => key,
None => {
bail!(SerializationErrorKind::NoPrimarykey)
}
};
let mut collections = FxHashSet::default();
for a in evt.meta.get_collections() {
collections.insert(a);
}
match &evt.data_bytes {
Some(data) => {
let auth = match evt.meta.get_authorization() {
Some(a) => a.clone(),
None => MetaAuthorization::default(),
};
let parent = match evt.meta.get_parent() {
Some(a) => Some(a.clone()),
None => None,
};
let data = {
let _pop1 = DioScope::new(dio);
let _pop2 = PrimaryKeyScope::new(key);
evt.format.data.deserialize_ref(&data)
.map_err(SerializationError::from)
.map_err(|err| {
//trace!("{}", String::from_utf8_lossy(&data[..]));
err
})?
};
Ok((
RowHeader {
key: key.clone(),
parent,
auth,
},
Row {
key,
type_name: std::any::type_name::<D>().to_string(),
format: evt.format,
data,
collections,
created,
updated,
extra_meta: Vec::new(),
is_new: false,
},
))
},
None => bail!(SerializationErrorKind::NoData),
}
}
pub(crate) fn from_row_data(
dio: &Arc<Dio>,
row: &RowData,
) -> Result<(RowHeader, Row<D>), SerializationError>
where
D: DeserializeOwned,
{
let data = {
let _pop1 = DioScope::new(dio);
let _pop2 = PrimaryKeyScope::new(row.key);
row.format.data.deserialize_ref(&row.data)
.map_err(SerializationError::from)?
};
Ok((
RowHeader {
key: row.key.clone(),
parent: row.parent.clone(),
auth: row.auth.clone(),
},
Row {
key: row.key,
type_name: row.type_name.clone(),
format: row.format,
data: data,
collections: row.collections.clone(),
created: row.created,
updated: row.updated,
extra_meta: row.extra_meta.clone(),
is_new: false,
},
))
}
pub(crate) fn as_row_data(
&self,
header: &RowHeader,
) -> std::result::Result<RowData, SerializationError>
where
D: Serialize,
{
let data = Bytes::from(self.format.data.serialize(&self.data)?);
let data_hash = AteHash::from_bytes(&data[..]);
Ok(RowData {
key: self.key.clone(),
type_name: self.type_name.clone(),
format: self.format,
parent: header.parent.clone(),
data_hash,
data,
auth: header.auth.clone(),
collections: self.collections.clone(),
created: self.created,
updated: self.updated,
extra_meta: self.extra_meta.clone(),
is_new: self.is_new,
})
}
}
#[derive(Debug, Clone)]
pub(crate) struct RowData {
pub key: PrimaryKey,
pub type_name: String,
pub format: MessageFormat,
pub data_hash: AteHash,
pub data: Bytes,
pub collections: FxHashSet<MetaCollection>,
pub created: u64,
pub updated: u64,
pub extra_meta: Vec<CoreMetadata>,
pub parent: Option<MetaParent>,
pub auth: MetaAuthorization,
pub is_new: bool,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/map.rs | lib/src/dio/map.rs | #![allow(unused_imports)]
use error_chain::bail;
use fxhash::FxHashMap;
use std::marker::PhantomData;
use std::sync::{Arc, Weak};
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::Instrument;
use super::dio::DioWeak;
use super::dio_mut::DioMutWeak;
use super::vec::DaoVecState;
use crate::dio::dao::*;
use crate::dio::*;
use crate::error::*;
use crate::prelude::*;
use serde::de::*;
use serde::*;
use std::collections::VecDeque;
#[derive(Serialize, Deserialize)]
pub struct DaoMap<K, V> {
pub(super) lookup: FxHashMap<String, PrimaryKey>,
pub(super) vec_id: u64,
#[serde(skip)]
pub(super) state: DaoMapState,
#[serde(skip)]
dio: DioWeak,
#[serde(skip)]
dio_mut: DioMutWeak,
#[serde(skip)]
_phantom1: PhantomData<K>,
#[serde(skip)]
_phantom2: PhantomData<V>,
}
pub(super) enum DaoMapState {
Unsaved,
Saved(PrimaryKey),
}
impl Default for DaoMapState {
fn default() -> Self {
match PrimaryKey::current_get() {
Some(a) => DaoMapState::Saved(a),
None => DaoMapState::Unsaved,
}
}
}
impl Clone for DaoMapState {
fn clone(&self) -> Self {
match self {
Self::Unsaved => Self::default(),
Self::Saved(a) => Self::Saved(a.clone()),
}
}
}
impl<K, V> std::fmt::Debug for DaoMap<K, V> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let key_type_name = std::any::type_name::<K>();
let value_type_name = std::any::type_name::<V>();
write!(
f,
"dao-map(vec_id={}, key-type={}, value-type={}",
self.vec_id, key_type_name, value_type_name
)
}
}
impl<K, V> Default for DaoMap<K, V> {
fn default() -> Self {
DaoMap::new()
}
}
impl<K, V> Clone for DaoMap<K, V> {
fn clone(&self) -> DaoMap<K, V> {
DaoMap {
lookup: self.lookup.clone(),
state: self.state.clone(),
vec_id: self.vec_id,
dio: self.dio.clone(),
dio_mut: self.dio_mut.clone(),
_phantom1: PhantomData,
_phantom2: PhantomData,
}
}
}
impl<K, V> DaoMap<K, V> {
pub fn new() -> DaoMap<K, V> {
DaoMap {
lookup: FxHashMap::default(),
state: DaoMapState::Unsaved,
dio: DioWeak::Uninitialized,
dio_mut: DioMutWeak::Uninitialized,
vec_id: fastrand::u64(..),
_phantom1: PhantomData,
_phantom2: PhantomData,
}
}
}
impl<K, V> DaoMap<K, V> {
pub fn new_orphaned(dio: &Arc<Dio>, parent: PrimaryKey, vec_id: u64) -> DaoMap<K, V> {
DaoMap {
lookup: FxHashMap::default(),
state: DaoMapState::Saved(parent),
dio: DioWeak::from(dio),
dio_mut: DioMutWeak::Uninitialized,
vec_id: vec_id,
_phantom1: PhantomData,
_phantom2: PhantomData,
}
}
pub fn new_orphaned_mut(dio: &Arc<DioMut>, parent: PrimaryKey, vec_id: u64) -> DaoMap<K, V> {
DaoMap {
lookup: FxHashMap::default(),
state: DaoMapState::Saved(parent),
dio: DioWeak::from(&dio.dio),
dio_mut: DioMutWeak::from(dio),
vec_id: vec_id,
_phantom1: PhantomData,
_phantom2: PhantomData,
}
}
pub fn dio(&self) -> Option<Arc<Dio>> {
match &self.dio {
DioWeak::Uninitialized => None,
DioWeak::Weak(a) => Weak::upgrade(a),
}
}
pub fn dio_mut(&self) -> Option<Arc<DioMut>> {
match &self.dio_mut {
DioMutWeak::Uninitialized => None,
DioMutWeak::Weak(a) => Weak::upgrade(a),
}
}
pub fn as_vec(&self) -> DaoVec<V> {
DaoVec {
vec_id: self.vec_id,
state: match &self.state {
DaoMapState::Saved(a) => DaoVecState::Saved(a.clone()),
DaoMapState::Unsaved => DaoVecState::Unsaved,
},
dio: self.dio.clone(),
dio_mut: self.dio_mut.clone(),
_phantom1: PhantomData,
}
}
pub fn vec_id(&self) -> u64 {
self.vec_id
}
pub async fn len(&self) -> Result<usize, LoadError> {
let len = match &self.state {
DaoMapState::Unsaved => self.lookup.len(),
DaoMapState::Saved(parent_id) => {
let dio = match self.dio() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
dio.children_keys(parent_id.clone(), self.vec_id)
.await?
.len()
}
};
Ok(len)
}
pub async fn iter(&self) -> Result<Iter<K, V>, LoadError>
where
K: DeserializeOwned,
V: Serialize + DeserializeOwned,
{
self.iter_ext(false, false).await
}
pub async fn iter_ext(
&self,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Iter<K, V>, LoadError>
where
K: DeserializeOwned,
V: Serialize + DeserializeOwned,
{
let mut reverse = FxHashMap::default();
for (k, v) in self.lookup.iter() {
reverse.insert(v, k);
}
let children = match &self.state {
DaoMapState::Unsaved => vec![],
DaoMapState::Saved(parent_id) => {
if let Some(dio) = self.dio_mut() {
dio.children_ext(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
.into_iter()
.map(|a: DaoMut<V>| a.inner)
.collect::<Vec<_>>()
} else {
let dio = match self.dio() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
dio.children_ext(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
}
}
};
let pairs = children
.into_iter()
.filter_map(|v| match reverse.get(v.key()) {
Some(k) => {
let k = base64::decode(k)
.ok()
.map(|a| bincode::deserialize(&a[..]).ok())
.flatten();
match k {
Some(k) => Some((k, v)),
None => None,
}
}
None => None,
})
.collect::<Vec<_>>();
Ok(Iter::new(pairs))
}
pub async fn iter_mut(&mut self) -> Result<IterMut<K, V>, LoadError>
where
K: DeserializeOwned,
V: Serialize + DeserializeOwned,
{
self.iter_mut_ext(false, false).await
}
pub async fn iter_mut_ext(
&mut self,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<IterMut<K, V>, LoadError>
where
K: DeserializeOwned,
V: Serialize + DeserializeOwned,
{
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
self.iter_mut_ext_with_dio(&dio, allow_missing_keys, allow_serialization_error)
.await
}
pub async fn iter_mut_with_dio(&self, dio: &Arc<DioMut>) -> Result<IterMut<K, V>, LoadError>
where
K: DeserializeOwned,
V: Serialize + DeserializeOwned,
{
self.iter_mut_ext_with_dio(dio, false, false).await
}
pub async fn iter_mut_ext_with_dio(
&self,
dio: &Arc<DioMut>,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<IterMut<K, V>, LoadError>
where
K: DeserializeOwned,
V: Serialize + DeserializeOwned,
{
let mut reverse = FxHashMap::default();
for (k, v) in self.lookup.iter() {
reverse.insert(v, k);
}
let children = match &self.state {
DaoMapState::Unsaved => vec![],
DaoMapState::Saved(parent_id) => {
let mut ret = Vec::default();
for child in dio
.children_ext::<V>(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
{
ret.push(child)
}
ret
}
};
let pairs = children
.into_iter()
.filter_map(|v| match reverse.get(v.key()) {
Some(k) => {
let k = base64::decode(k)
.ok()
.map(|a| bincode::deserialize(&a[..]).ok())
.flatten();
match k {
Some(k) => Some((k, v)),
None => None,
}
}
None => None,
})
.collect::<Vec<_>>();
Ok(IterMut::new(pairs))
}
pub async fn insert(&mut self, key: K, value: V) -> Result<(), SerializationError>
where
K: Serialize,
V: Clone + Serialize + DeserializeOwned,
{
self.insert_ret(key, value).await?;
Ok(())
}
pub async fn insert_ret(&mut self, key: K, value: V) -> Result<DaoMut<V>, SerializationError>
where
K: Serialize,
V: Clone + Serialize + DeserializeOwned,
{
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(SerializationErrorKind::WeakDio),
};
let parent_id = match &self.state {
DaoMapState::Unsaved => {
bail!(SerializationErrorKind::SaveParentFirst);
}
DaoMapState::Saved(a) => a.clone(),
};
let key = base64::encode(&bincode::serialize(&key)?[..]);
let mut ret = dio.store(value)?;
ret.attach_ext(parent_id, self.vec_id)?;
if let Some(old) = self.lookup.insert(key, ret.key().clone()) {
dio.delete(&old).await?;
}
Ok(ret)
}
pub async fn get(&self, key: &K) -> Result<Option<Dao<V>>, LoadError>
where
K: Serialize,
V: Serialize + DeserializeOwned,
{
let key = base64::encode(&bincode::serialize(key)?[..]);
let id = match self.lookup.get(&key) {
Some(a) => a,
None => {
return Ok(None);
}
};
if let Some(dio) = self.dio_mut() {
let ret = match dio.load::<V>(&id).await {
Ok(a) => Some(a.inner),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => None,
Err(err) => {
bail!(err);
}
};
return Ok(ret);
}
let dio = match self.dio() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let ret = match dio.load::<V>(&id).await {
Ok(a) => Some(a),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => None,
Err(err) => {
bail!(err);
}
};
Ok(ret)
}
pub async fn get_mut(&mut self, key: &K) -> Result<Option<DaoMut<V>>, LoadError>
where
K: Serialize,
V: Serialize + DeserializeOwned,
{
let key = base64::encode(&bincode::serialize(key)?[..]);
let id = match self.lookup.get(&key) {
Some(a) => a,
None => {
return Ok(None);
}
};
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let ret = match dio.load::<V>(&id).await {
Ok(a) => Some(a),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => None,
Err(err) => {
bail!(err);
}
};
Ok(ret)
}
pub async fn get_or_default(&mut self, key: K) -> Result<DaoMut<V>, LoadError>
where
K: Serialize,
V: Clone + Serialize + DeserializeOwned + Default,
{
self.get_or_insert_with(key, || Default::default()).await
}
pub async fn get_or_insert(&mut self, key: K, default_val: V) -> Result<DaoMut<V>, LoadError>
where
K: Serialize,
V: Clone + Serialize + DeserializeOwned + Default,
{
self.get_or_insert_with(key, || default_val).await
}
pub async fn get_or_insert_with<F>(
&mut self,
key: K,
default: F,
) -> Result<DaoMut<V>, LoadError>
where
F: FnOnce() -> V,
K: Serialize,
V: Clone + Serialize + DeserializeOwned,
{
let key = base64::encode(&bincode::serialize(&key)?[..]);
let id = self.lookup.entry(key).or_default().clone();
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let ret = match dio.load::<V>(&id).await {
Ok(a) => a,
Err(LoadError(LoadErrorKind::NotFound(_), _)) => {
let parent_id = match &self.state {
DaoMapState::Unsaved => {
bail!(LoadErrorKind::SerializationError(
SerializationErrorKind::SaveParentFirst
));
}
DaoMapState::Saved(a) => a.clone(),
};
let mut ret = dio.store_with_key(default(), id)?;
ret.attach_ext(parent_id, self.vec_id)?;
ret
}
Err(err) => {
bail!(err);
}
};
Ok(ret)
}
pub async fn delete(&mut self, key: &K) -> Result<bool, SerializationError>
where
K: Serialize,
V: Serialize,
{
let key = base64::encode(&bincode::serialize(key)?[..]);
let id = match self.lookup.get(&key) {
Some(a) => a,
None => {
return Ok(false);
}
};
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(SerializationErrorKind::WeakDio),
};
if dio.exists(&id).await == false {
return Ok(false);
}
dio.delete(&id).await?;
Ok(true)
}
}
pub struct Iter<K, V> {
vec: VecDeque<(K, Dao<V>)>,
}
impl<K, V> Iter<K, V> {
pub(super) fn new(vec: Vec<(K, Dao<V>)>) -> Iter<K, V> {
Iter {
vec: VecDeque::from(vec),
}
}
}
impl<K, V> Iterator for Iter<K, V> {
type Item = (K, Dao<V>);
fn next(&mut self) -> Option<(K, Dao<V>)> {
self.vec.pop_front()
}
}
pub struct IterMut<K, V>
where
V: Serialize,
{
vec: VecDeque<(K, DaoMut<V>)>,
}
impl<K, V> IterMut<K, V>
where
V: Serialize,
{
pub(super) fn new(vec: Vec<(K, DaoMut<V>)>) -> IterMut<K, V> {
IterMut {
vec: VecDeque::from(vec),
}
}
}
impl<K, V> Iterator for IterMut<K, V>
where
V: Serialize,
{
type Item = (K, DaoMut<V>);
fn next(&mut self) -> Option<(K, DaoMut<V>)> {
self.vec.pop_front()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/mod.rs | lib/src/dio/mod.rs | pub(crate) mod bus;
pub(crate) mod child;
pub(crate) mod dao;
pub(crate) mod dao_mut;
pub(crate) mod dio;
pub(crate) mod dio_mut;
pub(crate) mod foreign;
pub(crate) mod map;
pub(crate) mod row;
pub(crate) mod test;
pub(crate) mod vec;
pub(crate) mod weak;
pub use super::dio::dao_mut::DaoAuthGuard;
pub use super::dio::dao_mut::DaoMutGuard;
pub use super::dio::dao_mut::DaoMutGuardOwned;
pub use super::dio::dio::Dio;
pub use super::dio::dio::DioSessionGuard;
pub use super::dio::dio::DioSessionGuardMut;
pub use super::dio::dio_mut::DioMut;
pub use super::dio::map::DaoMap;
pub use crate::dio::bus::Bus;
pub use crate::dio::bus::BusEvent;
pub use crate::dio::bus::TryBusEvent;
pub use crate::dio::child::DaoChild;
pub use crate::dio::dao::Dao;
pub use crate::dio::dao::DaoObj;
pub use crate::dio::dao_mut::DaoMut;
pub use crate::dio::foreign::DaoForeign;
pub use crate::dio::vec::DaoVec;
pub use crate::dio::weak::DaoWeak;
pub(crate) use self::dio::DioScope;
pub(crate) use self::dio_mut::DioMutState;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/dao.rs | lib/src/dio/dao.rs | #![allow(unused_imports)]
use fxhash::FxHashSet;
use tracing::{debug, warn};
use bytes::Bytes;
use serde::{de::DeserializeOwned, Serialize};
use std::ops::{Deref, DerefMut};
use std::sync::{Arc, Weak};
use std::sync::{Mutex, MutexGuard};
use crate::crypto::{EncryptedPrivateKey, PrivateSignKey};
use crate::{
crypto::EncryptKey,
session::{AteSession, AteSessionProperty},
};
use super::dio_mut::*;
use crate::crypto::AteHash;
use crate::dio::*;
use crate::error::*;
use crate::event::*;
use crate::header::*;
use crate::index::*;
use crate::meta::*;
use crate::spec::*;
use super::row::*;
pub use super::vec::DaoVec;
pub trait DaoObj {
fn key(&self) -> &PrimaryKey;
fn auth(&self) -> &MetaAuthorization;
fn dio(&self) -> &Arc<Dio>;
fn when_created(&self) -> u64;
fn when_updated(&self) -> u64;
}
/// Represents a data object that will be represented as one or
/// more events on the redo-log and validated in the chain-of-trust.
///
/// Reading this object using none-mutable behavior will incur no IO
/// on the redo-log however if you edit the object you must commit it
/// to the `Dio` before it goes out of scope or the data will be lost
/// (in Debug mode this will even trigger an assert).
///
/// Metadata about the data object can also be accessed via this object
/// which allows you to read access rights, etc.
///
/// If you wish to actually modify the data you must first call the 'mut'
/// function on an open transaction, which will give you an object you
/// can modify
pub struct Dao<D> {
dio: Arc<Dio>,
pub(super) row_header: RowHeader,
pub(super) row: Row<D>,
}
impl<D> Clone for Dao<D>
where
D: Clone,
{
fn clone(&self) -> Self {
Dao {
dio: self.dio.clone(),
row_header: self.row_header.clone(),
row: self.row.clone(),
}
}
}
impl<D> std::fmt::Debug for Dao<D>
where
D: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
self.row.fmt(f)
}
}
impl<D> PartialEq<Dao<D>>
for Dao<D>
where D: PartialEq<D>
{
fn eq(&self, other: &Dao<D>) -> bool {
if self.row.key.ne(&other.row.key) {
return false;
}
if self.row_header.auth.ne(&other.row_header.auth) {
return false;
}
if self.row_header.parent.ne(&other.row_header.parent) {
return false;
}
self.row.data.eq(&other.row.data)
}
}
impl<D> Eq
for Dao<D>
where D: Eq + PartialEq<Dao<D>>
{ }
impl<D> Dao<D> {
pub(super) fn new(dio: &Arc<Dio>, row_header: RowHeader, row: Row<D>) -> Dao<D> {
Dao {
dio: Arc::clone(dio),
row_header,
row,
}
}
pub fn take(self) -> D {
self.row.data
}
pub fn parent(&self) -> Option<MetaCollection> {
self.row_header.parent.as_ref().map(|a| a.vec.clone())
}
pub fn parent_id(&self) -> Option<PrimaryKey> {
self.row_header
.parent
.as_ref()
.map(|a| a.vec.parent_id.clone())
}
}
impl<D> DaoObj for Dao<D> {
fn auth(&self) -> &MetaAuthorization {
&self.row_header.auth
}
fn dio(&self) -> &Arc<Dio> {
&self.dio
}
fn key(&self) -> &PrimaryKey {
&self.row.key
}
fn when_created(&self) -> u64 {
self.row.created
}
fn when_updated(&self) -> u64 {
self.row.updated
}
}
impl<D> std::ops::Deref for Dao<D> {
type Target = D;
fn deref(&self) -> &Self::Target {
&self.row.data
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/child.rs | lib/src/dio/child.rs | #![allow(unused_imports)]
use error_chain::bail;
use std::marker::PhantomData;
use std::sync::{Arc, Weak};
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::Instrument;
use super::dio::DioWeak;
use super::dio_mut::DioMutWeak;
use super::vec::DaoVecState as DaoChildState;
use super::vec::Iter;
use super::vec::IterMut;
use crate::dio::dao::*;
use crate::dio::*;
use crate::error::*;
use crate::header::*;
use serde::de::*;
use serde::*;
/// Rerepresents a reference to another data object with strong
/// type linting to make the model more solidified
///
#[derive(Serialize, Deserialize)]
pub struct DaoChild<D> {
pub(super) vec_id: u64,
#[serde(skip)]
pub(super) state: DaoChildState,
#[serde(skip)]
pub(super) dio: DioWeak,
#[serde(skip)]
pub(super) dio_mut: DioMutWeak,
#[serde(skip)]
pub(super) _marker: PhantomData<D>,
}
impl<D> std::fmt::Debug for DaoChild<D>
where
D: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let type_name = std::any::type_name::<D>();
write!(f, "dao-child(type={})", type_name)
}
}
impl<D> Default for DaoChild<D> {
fn default() -> Self {
DaoChild::new()
}
}
impl<D> Clone for DaoChild<D> {
fn clone(&self) -> Self {
DaoChild {
state: self.state.clone(),
vec_id: self.vec_id,
dio: self.dio.clone(),
dio_mut: self.dio_mut.clone(),
_marker: PhantomData,
}
}
}
impl<D> DaoChild<D> {
pub fn new() -> DaoChild<D> {
DaoChild {
state: DaoChildState::Unsaved,
dio: DioWeak::Uninitialized,
dio_mut: DioMutWeak::Uninitialized,
vec_id: fastrand::u64(..),
_marker: PhantomData,
}
}
pub fn dio(&self) -> Option<Arc<Dio>> {
match &self.dio {
DioWeak::Uninitialized => None,
DioWeak::Weak(a) => Weak::upgrade(a),
}
}
pub fn dio_mut(&self) -> Option<Arc<DioMut>> {
match &self.dio_mut {
DioMutWeak::Uninitialized => None,
DioMutWeak::Weak(a) => Weak::upgrade(a),
}
}
async fn iter(&self) -> Result<Iter<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
self.iter_ext(false, false).await
}
async fn iter_ext(
&self,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Iter<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let children = match &self.state {
DaoChildState::Unsaved => vec![],
DaoChildState::Saved(parent_id) => {
if let Some(dio) = self.dio_mut() {
dio.children_ext(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
.into_iter()
.rev()
.map(|a: DaoMut<D>| a.inner)
.collect::<Vec<_>>()
} else {
let dio = match self.dio() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
dio.children_ext(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
.into_iter()
.rev()
.collect::<Vec<_>>()
}
}
};
Ok(Iter::new(children))
}
async fn iter_mut(&mut self) -> Result<IterMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
self.iter_mut_ext(false, false).await
}
async fn iter_mut_ext(
&mut self,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<IterMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let children = match &self.state {
DaoChildState::Unsaved => vec![],
DaoChildState::Saved(parent_id) => {
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let mut ret = Vec::default();
for child in dio
.children_ext::<D>(
parent_id.clone(),
self.vec_id,
allow_missing_keys,
allow_serialization_error,
)
.await?
.into_iter()
.rev()
{
ret.push(child)
}
ret
}
};
Ok(IterMut::new(children))
}
pub async fn clear(&mut self) -> Result<(), LoadError>
where
D: Serialize + DeserializeOwned,
{
for child in self.iter_mut().await? {
child.delete()?;
}
Ok(())
}
/// Loads the data object (if it exists)
pub async fn load(&self) -> Result<Option<Dao<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
Ok(self.iter().await?.next())
}
/// Loads the data object (if it exists)
pub async fn load_mut(&mut self) -> Result<Option<DaoMut<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
Ok(self.iter_mut().await?.next())
}
/// Stores the data within this reference
pub async fn store(&mut self, data: D) -> Result<DaoMut<D>, LoadError>
where
D: Clone + Serialize + DeserializeOwned,
{
self.store_with_key(data, PrimaryKey::generate()).await
}
/// Stores the data within this reference
pub async fn store_with_key(&mut self, data: D, key: PrimaryKey) -> Result<DaoMut<D>, LoadError>
where
D: Clone + Serialize + DeserializeOwned,
{
self.clear().await?;
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let parent_id = match &self.state {
DaoChildState::Unsaved => {
bail!(LoadErrorKind::SerializationError(
SerializationErrorKind::SaveParentFirst
));
}
DaoChildState::Saved(a) => a.clone(),
};
let mut ret = dio.store_with_key(data, key)?;
ret.attach_ext(parent_id, self.vec_id)?;
Ok(ret)
}
/// Loads all the orphanes
pub async fn orphans(&mut self) -> Result<IterMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let mut iter = self.iter_mut().await?;
let _top = iter.next();
Ok(iter)
}
/// Loads the data object or uses a default if none exists
pub async fn unwrap_or(&mut self, default: D) -> Result<D, LoadError>
where
D: Serialize + DeserializeOwned,
{
match self.load().await? {
Some(a) => Ok(a.take()),
None => Ok(default),
}
}
/// Loads the data object or uses a default if none exists
pub async fn unwrap_or_else<F: FnOnce() -> D>(&mut self, f: F) -> Result<D, LoadError>
where
D: Serialize + DeserializeOwned,
{
match self.load().await? {
Some(a) => Ok(a.take()),
None => Ok(f()),
}
}
/// Loads the data object or creates a new one (if it does not exist)
pub async fn unwrap_or_default(&mut self) -> Result<D, LoadError>
where
D: Serialize + DeserializeOwned + Default,
{
Ok(self
.unwrap_or_else(|| {
let ret: D = Default::default();
ret
})
.await?)
}
pub async fn expect(&self, msg: &str) -> Dao<D>
where
D: Serialize + DeserializeOwned,
{
match self.load().await {
Ok(Some(a)) => a,
Ok(None) => {
panic!("{}", msg);
}
Err(err) => {
panic!("{}: {:?}", msg, err);
}
}
}
pub async fn unwrap(&self) -> Dao<D>
where
D: Serialize + DeserializeOwned,
{
self.load()
.await
.ok()
.flatten()
.expect("called `DaoRef::unwrap()` that failed to load")
}
pub async fn is_some(&self) -> Result<bool, LoadError>
where
D: Serialize + DeserializeOwned,
{
Ok(self.iter().await?.next().is_some())
}
pub async fn is_none(&self) -> Result<bool, LoadError>
where
D: Serialize + DeserializeOwned,
{
Ok(!self.is_some().await?)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/dio.rs | lib/src/dio/dio.rs | #![allow(unused_imports)]
use crate::prelude::*;
use error_chain::bail;
use fxhash::FxHashMap;
use fxhash::FxHashSet;
use multimap::MultiMap;
use serde::Deserialize;
use serde::{de::DeserializeOwned, de::Deserializer, Serialize, Serializer};
use std::cell::RefCell;
use std::ops::Deref;
use std::ops::DerefMut;
use std::rc::Rc;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use std::sync::Weak;
use std::{fmt::Debug, sync::Arc};
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::Instrument;
use super::dao::*;
use super::row::*;
use super::DioMutState;
use crate::comms::*;
use crate::error::*;
use crate::event::*;
use crate::header::PrimaryKeyScope;
use crate::index::*;
use crate::lint::*;
use crate::meta::*;
use crate::spec::*;
use crate::time::*;
use crate::transaction::*;
use crate::tree::*;
use crate::trust::LoadStrongResult;
use crate::crypto::{EncryptedPrivateKey, PrivateSignKey};
use crate::{
crypto::EncryptKey,
session::{AteSession, AteSessionProperty},
};
#[derive(Debug)]
pub(crate) struct DioState {
pub(super) cache_load: FxHashMap<PrimaryKey, (Arc<EventStrongData>, EventLeaf)>,
}
/// Represents a series of mutations that the user is making on a particular chain-of-trust
/// with a specific set of facts attached to a session. All changes are stored in memory
/// until the commit function is invoked which will feed them into the chain.
///
/// If you decide to abort the transaction then call the `cancel` function before it goes
/// out of scope however if you mutate data and do not call `commit` then the data will be
/// lost (or an assert will be triggerd when in Debug mode).
///
/// These objects are multi-thread safe and allow for very high concurrency through async
/// operations.
///
/// When setting the scope for the DIO it will behave differently when the commit function
/// is invoked based on what scope you set for the transaction.
#[derive(Debug)]
pub struct Dio {
pub(super) chain: Arc<Chain>,
pub(super) multi: ChainMultiUser,
pub(super) state: StdMutex<DioState>,
pub(super) session: StdRwLock<Box<dyn AteSession>>,
pub(super) time: Arc<TimeKeeper>,
pub(crate) log_format: Option<MessageFormat>,
}
pub(crate) struct DioScope {
pop: Option<Arc<Dio>>,
_negative: Rc<()>,
}
impl DioScope {
pub fn new(dio: &Arc<Dio>) -> Self {
DioScope {
pop: Dio::current_set(Some(Arc::clone(dio))),
_negative: Rc::new(()),
}
}
}
impl Drop for DioScope {
fn drop(&mut self) {
Dio::current_set(self.pop.take());
}
}
pub(crate) enum DioWeak {
Uninitialized,
Weak(Weak<Dio>),
}
impl Default for DioWeak {
fn default() -> Self {
match Dio::current_get() {
Some(a) => DioWeak::Weak(Arc::downgrade(&a)),
None => DioWeak::Uninitialized,
}
}
}
impl Clone for DioWeak {
fn clone(&self) -> Self {
match self {
Self::Uninitialized => Self::default(),
Self::Weak(a) => Self::Weak(Weak::clone(a)),
}
}
}
impl From<&Arc<Dio>> for DioWeak {
fn from(val: &Arc<Dio>) -> Self {
DioWeak::Weak(Arc::downgrade(val))
}
}
impl From<&Arc<DioMut>> for DioWeak {
fn from(val: &Arc<DioMut>) -> Self {
DioWeak::Weak(Arc::downgrade(&val.dio))
}
}
impl Dio {
thread_local! {
static CURRENT: RefCell<Option<Arc<Dio>>> = RefCell::new(None)
}
pub(crate) fn current_get() -> Option<Arc<Dio>> {
Dio::CURRENT.with(|dio| {
let dio = dio.borrow();
return dio.clone();
})
}
fn current_set(val: Option<Arc<Dio>>) -> Option<Arc<Dio>> {
Dio::CURRENT.with(|dio| {
let mut dio = dio.borrow_mut();
match val {
Some(a) => dio.replace(a),
None => dio.take(),
}
})
}
pub fn chain(&self) -> &Arc<Chain> {
&self.chain
}
pub fn remote<'a>(&'a self) -> Option<&'a url::Url> {
self.chain.remote()
}
async fn run_async<F>(&self, future: F) -> F::Output
where
F: std::future::Future,
{
let key_str = self.chain.key().to_string();
future.instrument(span!(Level::DEBUG, "dio", key = key_str.as_str()))
.await
}
pub async fn load_raw(self: &Arc<Self>, key: &PrimaryKey) -> Result<EventStrongData, LoadError> {
self.run_async(self.__load_raw(key)).await
}
pub(super) async fn __load_raw(
self: &Arc<Self>,
key: &PrimaryKey,
) -> Result<EventStrongData, LoadError> {
let leaf = match self.multi.lookup_primary(key).await {
Some(a) => a,
None => bail!(LoadErrorKind::NotFound(key.clone())),
};
let data = self.multi.load(leaf).await?.data;
Ok(data)
}
pub async fn load<D>(self: &Arc<Self>, key: &PrimaryKey) -> Result<Dao<D>, LoadError>
where
D: DeserializeOwned,
{
self.run_async(self.__load(key)).await
}
pub(super) async fn __load<D>(self: &Arc<Self>, key: &PrimaryKey) -> Result<Dao<D>, LoadError>
where
D: DeserializeOwned,
{
{
let state = self.state.lock().unwrap();
if let Some((dao, leaf)) = state.cache_load.get(key) {
let (row_header, row) =
Row::from_event(self, dao.deref(), leaf.created, leaf.updated)?;
return Ok(Dao::new(self, row_header, row));
}
}
let leaf = match self.multi.lookup_primary(key).await {
Some(a) => a,
None => bail!(LoadErrorKind::NotFound(key.clone())),
};
Ok(self.load_from_entry(leaf).await?)
}
pub async fn load_and_take<D>(self: &Arc<Self>, key: &PrimaryKey) -> Result<D, LoadError>
where
D: DeserializeOwned,
{
let ret: Dao<D> = self.load(key).await?;
Ok(ret.take())
}
pub async fn exists(&self, key: &PrimaryKey) -> bool {
self.run_async(self.__exists(key)).await
}
pub(super) async fn __exists(&self, key: &PrimaryKey) -> bool {
{
let state = self.state.lock().unwrap();
if let Some((_, _)) = state.cache_load.get(key) {
return true;
}
}
self.multi.lookup_primary(key).await.is_some()
}
pub(crate) async fn load_from_entry<D>(
self: &Arc<Self>,
leaf: EventLeaf,
) -> Result<Dao<D>, LoadError>
where
D: DeserializeOwned,
{
self.run_async(self.__load_from_entry(leaf)).await
}
pub(super) async fn __load_from_entry<D>(
self: &Arc<Self>,
leaf: EventLeaf,
) -> Result<Dao<D>, LoadError>
where
D: DeserializeOwned,
{
let evt = self.multi.load(leaf).await?;
let session = self.session();
Ok(self.load_from_event(session.as_ref(), evt.data, evt.header.as_header()?, leaf)?)
}
pub(crate) fn load_from_event<D>(
self: &Arc<Self>,
session: &'_ dyn AteSession,
mut data: EventStrongData,
header: EventHeader,
leaf: EventLeaf,
) -> Result<Dao<D>, LoadError>
where
D: DeserializeOwned,
{
data.data_bytes = match data.data_bytes {
Some(data) => Some(self.multi.data_as_overlay(&header.meta, data, session)?),
None => None,
};
let mut state = self.state.lock().unwrap();
match header.meta.get_data_key() {
Some(key) => {
let (row_header, row) = Row::from_event(self, &data, leaf.created, leaf.updated)?;
state.cache_load.insert(key.clone(), (Arc::new(data), leaf));
Ok(Dao::new(self, row_header, row))
}
None => Err(LoadErrorKind::NoPrimaryKey.into()),
}
}
pub async fn children_keys(
self: &Arc<Self>,
parent_id: PrimaryKey,
collection_id: u64,
) -> Result<Vec<PrimaryKey>, LoadError> {
self.run_async(self.__children_keys(parent_id, collection_id))
.await
}
pub async fn __children_keys(
self: &Arc<Self>,
parent_id: PrimaryKey,
collection_id: u64,
) -> Result<Vec<PrimaryKey>, LoadError> {
// Build the secondary index key
let collection_key = MetaCollection {
parent_id,
collection_id,
};
// Build a list of keys
let keys = match self.multi.lookup_secondary_raw(&collection_key).await {
Some(a) => a,
None => return Ok(Vec::new()),
};
Ok(keys)
}
pub async fn root_keys(
self: &Arc<Self>,
) -> Vec<PrimaryKey> {
self.run_async(self.__root_keys())
.await
}
pub async fn __root_keys(
self: &Arc<Self>,
) -> Vec<PrimaryKey> {
self.multi.roots_raw().await
}
pub async fn all_keys(self: &Arc<Self>) -> Vec<PrimaryKey> {
self.run_async(self.__all_keys()).await
}
pub async fn __all_keys(self: &Arc<Self>) -> Vec<PrimaryKey> {
let guard = self.multi.inside_async.read().await;
let keys = guard.chain.timeline.pointers.all_keys();
keys.map(|a| a.clone()).collect::<Vec<_>>()
}
pub async fn children<D>(
self: &Arc<Self>,
parent_id: PrimaryKey,
collection_id: u64,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
self.children_ext(parent_id, collection_id, false, false)
.await
}
pub async fn children_ext<D>(
self: &Arc<Self>,
parent_id: PrimaryKey,
collection_id: u64,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
self.run_async(self.__children_ext(
parent_id,
collection_id,
allow_missing_keys,
allow_serialization_error,
))
.await
}
pub(super) async fn __children_ext<D>(
self: &Arc<Self>,
parent_id: PrimaryKey,
collection_id: u64,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
// Load all the objects
let keys = self.__children_keys(parent_id, collection_id).await?;
Ok(self
.__load_many_ext(
keys.into_iter(),
allow_missing_keys,
allow_serialization_error,
)
.await?)
}
pub async fn roots<D>(
self: &Arc<Self>,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
self.roots_ext(false, false)
.await
}
pub async fn roots_ext<D>(
self: &Arc<Self>,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
self.run_async(self.__roots_ext(
allow_missing_keys,
allow_serialization_error,
))
.await
}
pub(super) async fn __roots_ext<D>(
self: &Arc<Self>,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
// Load all the objects
let keys = self.__root_keys().await;
Ok(self
.__load_many_ext(
keys.into_iter(),
allow_missing_keys,
allow_serialization_error,
)
.await?)
}
pub async fn load_many<D>(
self: &Arc<Self>,
keys: impl Iterator<Item = PrimaryKey>,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
self.load_many_ext(keys, false, false).await
}
pub async fn load_many_ext<D>(
self: &Arc<Self>,
keys: impl Iterator<Item = PrimaryKey>,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
self.run_async(self.__load_many_ext(keys, allow_missing_keys, allow_serialization_error))
.await
}
pub(super) async fn __load_many_ext<D>(
self: &Arc<Self>,
keys: impl Iterator<Item = PrimaryKey>,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Vec<Dao<D>>, LoadError>
where
D: DeserializeOwned,
{
// This is the main return list
let mut already = FxHashSet::default();
let mut ret = Vec::new();
// We either find existing objects in the cache or build a list of objects to load
let to_load = {
let mut to_load = Vec::new();
let inside_async = self.multi.inside_async.read().await;
let state = self.state.lock().unwrap();
for key in keys {
if let Some((dao, leaf)) = state.cache_load.get(&key) {
let (row_header, row) =
Row::from_event(self, dao.deref(), leaf.created, leaf.updated)?;
already.insert(row.key.clone());
ret.push(Dao::new(self, row_header, row));
continue;
}
to_load.push(match inside_async.chain.lookup_primary(&key) {
Some(a) => a,
None => continue,
});
}
to_load
};
// Load all the objects that have not yet been loaded
let to_load = self.multi.load_many(to_load).await?;
// Now process all the objects
let ret = {
let mut state = self.state.lock().unwrap();
let session = self.session();
for mut evt in to_load {
let mut header = evt.header.as_header()?;
let key = match header.meta.get_data_key() {
Some(k) => k,
None => {
continue;
}
};
if let Some((dao, leaf)) = state.cache_load.get(&key) {
let (row_header, row) =
Row::from_event(self, dao.deref(), leaf.created, leaf.updated)?;
already.insert(row.key.clone());
ret.push(Dao::new(self, row_header, row));
}
let (row_header, row) = match self.__process_load_row(
session.as_ref(),
&mut evt,
&mut header.meta,
allow_missing_keys,
allow_serialization_error,
)? {
Some(a) => a,
None => {
continue;
}
};
state
.cache_load
.insert(row.key.clone(), (Arc::new(evt.data), evt.leaf));
already.insert(row.key.clone());
ret.push(Dao::new(self, row_header, row));
}
ret
};
Ok(ret)
}
pub(crate) fn data_as_overlay(
self: &Arc<Self>,
session: &'_ dyn AteSession,
data: &mut EventStrongData,
) -> Result<(), TransformError> {
data.data_bytes = match &data.data_bytes {
Some(d) => Some(self.multi.data_as_overlay(&data.meta, d.clone(), session)?),
None => None,
};
Ok(())
}
pub(super) fn __process_load_row<D>(
self: &Arc<Self>,
session: &'_ dyn AteSession,
evt: &mut LoadStrongResult,
meta: &Metadata,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Option<(RowHeader, Row<D>)>, LoadError>
where
D: DeserializeOwned,
{
evt.data.data_bytes = match &evt.data.data_bytes {
Some(data) => {
let data = match self.multi.data_as_overlay(meta, data.clone(), session) {
Ok(a) => a,
Err(TransformError(TransformErrorKind::MissingReadKey(_hash), _))
if allow_missing_keys =>
{
//trace!("Missing read key {} - ignoring row", _hash);
return Ok(None);
}
Err(err) => {
bail!(LoadErrorKind::TransformationError(err.0));
}
};
Some(data)
}
None => {
return Ok(None);
}
};
let (row_header, row) =
match Row::from_event(self, &evt.data, evt.leaf.created, evt.leaf.updated) {
Ok(a) => a,
Err(err) => {
if allow_serialization_error {
//trace!("Serialization error {} - ignoring row", err);
return Ok(None);
}
bail!(LoadErrorKind::SerializationError(err.0));
}
};
Ok(Some((row_header, row)))
}
pub fn session<'a>(&'a self) -> DioSessionGuard<'a> {
DioSessionGuard::new(self)
}
pub fn session_mut<'a>(&'a self) -> DioSessionGuardMut<'a> {
DioSessionGuardMut::new(self)
}
pub async fn wait_for_accurate_timing(&self) {
self.time.wait_for_high_accuracy().await;
}
pub(crate) fn run_decache(self: &Arc<Dio>, mut decache: broadcast::Receiver<Vec<PrimaryKey>>) {
let dio = Arc::downgrade(self);
TaskEngine::spawn(async move {
loop {
let recv =
crate::engine::timeout(std::time::Duration::from_secs(5), decache.recv()).await;
let dio = match Weak::upgrade(&dio) {
Some(a) => a,
None => {
break;
}
};
let recv = match recv {
Ok(a) => a,
Err(_) => {
continue;
}
};
let recv = match recv {
Ok(a) => a,
Err(broadcast::error::RecvError::Closed) => {
break;
}
Err(broadcast::error::RecvError::Lagged(_)) => {
continue;
}
};
let mut state = dio.state.lock().unwrap();
for key in recv {
state.cache_load.remove(&key);
}
loop {
match decache.try_recv() {
Ok(recv) => {
for key in recv {
state.cache_load.remove(&key);
}
}
Err(_) => {
break;
}
}
}
}
});
}
}
pub struct DioSessionGuard<'a> {
lock: std::sync::RwLockReadGuard<'a, Box<dyn AteSession>>,
}
impl<'a> DioSessionGuard<'a> {
fn new(dio: &'a Dio) -> DioSessionGuard<'a> {
DioSessionGuard {
lock: dio.session.read().unwrap(),
}
}
pub fn as_ref(&self) -> &dyn AteSession {
self.lock.deref().deref()
}
}
impl<'a> Deref for DioSessionGuard<'a> {
type Target = dyn AteSession;
fn deref(&self) -> &Self::Target {
self.lock.deref().deref()
}
}
pub struct DioSessionGuardMut<'a> {
lock: std::sync::RwLockWriteGuard<'a, Box<dyn AteSession>>,
}
impl<'a> DioSessionGuardMut<'a> {
fn new(dio: &'a Dio) -> DioSessionGuardMut<'a> {
DioSessionGuardMut {
lock: dio.session.write().unwrap(),
}
}
pub fn as_ref(&self) -> &dyn AteSession {
self.lock.deref().deref()
}
pub fn as_mut(&mut self) -> &mut dyn AteSession {
self.lock.deref_mut().deref_mut()
}
}
impl<'a> Deref for DioSessionGuardMut<'a> {
type Target = dyn AteSession;
fn deref(&self) -> &Self::Target {
self.lock.deref().deref()
}
}
impl<'a> DerefMut for DioSessionGuardMut<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.lock.deref_mut().deref_mut()
}
}
impl Chain {
/// Opens a data access layer that allows read only access to data within the chain
/// In order to make changes to data you must use '.dio_mut', '.dio_fire', '.dio_full' or '.dio_trans'
pub async fn dio(self: &Arc<Chain>, session: &'_ dyn AteSession) -> Arc<Dio> {
let decache = self.decache.subscribe();
let multi = self.multi().await;
let ret = Dio {
chain: Arc::clone(self),
state: StdMutex::new(DioState {
cache_load: FxHashMap::default(),
}),
session: StdRwLock::new(session.clone_session()),
log_format: Some(multi.default_format.clone()),
multi,
time: Arc::clone(&self.time),
};
let ret = Arc::new(ret);
ret.run_decache(decache);
ret
}
}
impl Dio {
pub async fn as_mut(self: &Arc<Self>) -> Arc<DioMut> {
self.trans(TransactionScope::Local).await
}
pub async fn trans(self: &Arc<Self>, scope: TransactionScope) -> Arc<DioMut> {
DioMut::new(self, scope).await
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/dio_mut.rs | lib/src/dio/dio_mut.rs | #![allow(unused_imports)]
use crate::prelude::*;
use error_chain::bail;
use fxhash::FxHashMap;
use fxhash::FxHashSet;
use multimap::MultiMap;
use serde::Deserialize;
use serde::{de::DeserializeOwned, de::Deserializer, Serialize, Serializer};
use std::cell::RefCell;
use std::ops::Deref;
use std::rc::Rc;
use std::sync::Mutex;
use std::sync::Weak;
use std::time::Duration;
use std::{fmt::Debug, sync::Arc};
use tokio::sync::mpsc;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::Instrument;
use super::dao::*;
use super::dao_mut::*;
use super::dio::*;
use super::row::*;
use crate::chain::ChainWork;
use crate::comms::*;
use crate::error::*;
use crate::event::*;
use crate::header::PrimaryKeyScope;
use crate::index::*;
use crate::lint::*;
use crate::meta::*;
use crate::spec::*;
use crate::time::*;
use crate::transaction::*;
use crate::tree::*;
use crate::trust::LoadStrongResult;
use crate::crypto::{EncryptedPrivateKey, PrivateSignKey};
use crate::{
crypto::EncryptKey,
session::{AteSession, AteSessionProperty},
};
pub(crate) struct DioMutState {
pub(super) store_ordered: Vec<RowHeader>,
pub(super) store_secondary: MultiMap<MetaCollection, PrimaryKey>,
pub(super) rows: FxHashMap<PrimaryKey, RowData>,
pub(super) locked: FxHashSet<PrimaryKey>,
pub(super) deleted: FxHashSet<PrimaryKey>,
pub(super) pipe_unlock: FxHashSet<PrimaryKey>,
pub(super) auto_cancel: bool,
}
impl DioMutState {
/// Returns true if the row also needs to be updated
pub(crate) fn dirty_header(&mut self, header: RowHeader) -> bool {
if let Some(parent) = &header.parent {
let exists = self
.store_secondary
.get_vec(&parent.vec)
.iter()
.map(|a| a.iter())
.flatten()
.any(|a| *a == header.key);
if exists == false {
self.store_secondary.insert(parent.vec.clone(), header.key);
}
}
{
// If the last row is a already there then we only need update it
// and we don't need to do a complete data save
if let Some(row) = self.store_ordered.iter_mut().rev().next() {
if row.key == header.key {
*row = header;
return false;
}
}
}
self.store_ordered.push(header);
return true;
}
pub(crate) fn dirty_row(&mut self, row: RowData) -> bool {
let mut ret = true;
if let Some(existing) = self.rows.get(&row.key) {
if existing.data_hash == row.data_hash {
//trace!("skipping row that has not changed [{}]", row.key);
ret = false;
}
}
let key = row.key.clone();
let parent = row.parent.clone();
self.rows.insert(key.clone(), row);
if let Some(parent) = parent {
let exists = self
.store_secondary
.get_vec(&parent.vec)
.iter()
.map(|a| a.iter())
.flatten()
.any(|a| *a == key);
if exists == false {
self.store_secondary.insert(parent.vec, key);
}
}
ret
}
pub(super) fn lock(&mut self, key: &PrimaryKey) -> bool {
self.locked.insert(key.clone())
}
pub(super) fn unlock(&mut self, key: &PrimaryKey) -> bool {
self.locked.remove(key)
}
pub(super) fn is_locked(&self, key: &PrimaryKey) -> bool {
self.locked.contains(key)
}
pub(super) fn add_deleted(&mut self, key: PrimaryKey, parent: Option<MetaParent>) {
if self.lock(&key) == false {
eprintln!("Detected concurrent write while deleting a data object ({:?}) - the delete operation will override everything else", key);
}
self.rows.remove(&key);
if let Some(tree) = parent {
if let Some(y) = self.store_secondary.get_vec_mut(&tree.vec) {
y.retain(|x| *x == key);
}
}
self.deleted.insert(key);
}
}
impl DioMutState {
fn new() -> DioMutState {
DioMutState {
store_ordered: Vec::new(),
rows: FxHashMap::default(),
store_secondary: MultiMap::new(),
locked: FxHashSet::default(),
deleted: FxHashSet::default(),
pipe_unlock: FxHashSet::default(),
auto_cancel: true,
}
}
fn clear(&mut self) {
self.store_ordered.clear();
self.rows.clear();
self.store_secondary.clear();
self.locked.clear();
self.deleted.clear();
self.pipe_unlock.clear();
}
}
/// Represents a series of mutations that the user is making on a particular chain-of-trust
/// with a specific set of facts attached to a session. All changes are stored in memory
/// until the commit function is invoked which will feed them into the chain.
///
/// If you decide to abort the transaction then call the `cancel` function before it goes
/// out of scope however if you mutate data and do not call `commit` then the data will be
/// lost (or an assert will be triggerd when in Debug mode).
///
/// These objects are multi-thread safe and allow for very high concurrency through async
/// operations.
///
/// When setting the scope for the DIO it will behave differently when the commit function
/// is invoked based on what scope you set for the transaction.
pub struct DioMut {
pub dio: Arc<Dio>,
pub scope: TransactionScope,
pub(crate) state: Mutex<DioMutState>,
pub(super) conversation: Option<Arc<ConversationSession>>,
#[cfg(feature = "enable_dio_backtrace")]
pub backtrace_new: backtrace::Backtrace,
}
pub(crate) struct DioMutScope {
_inner: DioScope,
pop: Option<Arc<DioMut>>,
_negative: Rc<()>,
}
impl DioMutScope {
pub fn new(trans: &Arc<DioMut>) -> Self {
DioMutScope {
_inner: DioScope::new(&trans.dio),
pop: DioMut::current_set(Some(Arc::clone(trans))),
_negative: Rc::new(()),
}
}
}
impl Drop for DioMutScope {
fn drop(&mut self) {
DioMut::current_set(self.pop.take());
}
}
pub(crate) enum DioMutWeak {
Uninitialized,
Weak(Weak<DioMut>),
}
impl Default for DioMutWeak {
fn default() -> Self {
match DioMut::current_get() {
Some(a) => DioMutWeak::Weak(Arc::downgrade(&a)),
None => DioMutWeak::Uninitialized,
}
}
}
impl Clone for DioMutWeak {
fn clone(&self) -> Self {
match self {
Self::Uninitialized => Self::default(),
Self::Weak(a) => Self::Weak(Weak::clone(a)),
}
}
}
impl From<&Arc<DioMut>> for DioMutWeak {
fn from(val: &Arc<DioMut>) -> Self {
DioMutWeak::Weak(Arc::downgrade(val))
}
}
impl DioMut {
thread_local! {
static CURRENT: RefCell<Option<Arc<DioMut>>> = RefCell::new(None)
}
pub(crate) fn current_get() -> Option<Arc<DioMut>> {
DioMut::CURRENT.with(|trans| {
let trans = trans.borrow();
return trans.clone();
})
}
fn current_set(val: Option<Arc<DioMut>>) -> Option<Arc<DioMut>> {
DioMut::CURRENT.with(|trans| {
let mut trans = trans.borrow_mut();
match val {
Some(a) => trans.replace(a),
None => trans.take(),
}
})
}
pub async fn new(dio: &Arc<Dio>, scope: TransactionScope) -> Arc<DioMut> {
let ret = DioMut {
dio: Arc::clone(dio),
scope,
state: Mutex::new(DioMutState::new()),
conversation: dio.chain.pipe.conversation().await,
#[cfg(feature = "enable_dio_backtrace")]
backtrace_new: backtrace::Backtrace::new(),
};
Arc::new(ret)
}
pub fn store<D>(self: &Arc<Self>, data: D) -> Result<DaoMut<D>, SerializationError>
where
D: Clone + Serialize + DeserializeOwned,
{
self.store_with_format(data, None, self.dio.log_format)
}
pub fn store_with_key<D>(
self: &Arc<Self>,
data: D,
key: PrimaryKey,
) -> Result<DaoMut<D>, SerializationError>
where
D: Clone + Serialize + DeserializeOwned,
{
self.store_with_format(data, Some(key.clone()), self.dio.log_format)
}
pub fn store_with_format<D>(
self: &Arc<Self>,
data: D,
key: Option<PrimaryKey>,
format: Option<MessageFormat>,
) -> Result<DaoMut<D>, SerializationError>
where
D: Clone + Serialize + DeserializeOwned,
{
let format = match format {
Some(a) => a,
None => self.default_format(),
};
let key = match key {
Some(k) => k,
None => PrimaryKey::generate(),
};
// We serialize then deserialize the object so that vectors and ref
// objects get the proper references needed for the system to work
let _pop1 = DioMutScope::new(self);
let _pop2 = PrimaryKeyScope::new(key);
let data = data.clone();
let row_header = RowHeader {
key: key.clone(),
parent: None,
auth: MetaAuthorization {
read: ReadOption::Inherit,
write: WriteOption::Inherit,
},
};
let row = Row {
key,
type_name: std::any::type_name::<D>().to_string(),
data,
collections: FxHashSet::default(),
format,
created: 0,
updated: 0,
extra_meta: Vec::new(),
is_new: true,
};
let mut ret: DaoMut<D> =
DaoMut::new(Arc::clone(self), Dao::new(&self.dio, row_header, row));
ret.commit(true, true)?;
Ok(ret)
}
async fn run_async<F>(&self, future: F) -> F::Output
where
F: std::future::Future,
{
let key_str = self.chain.key().to_string();
future.instrument(span!(Level::DEBUG, "dio", key = key_str.as_str()))
.await
}
pub async fn delete(&self, key: &PrimaryKey) -> Result<(), SerializationError> {
{
let mut state = self.state.lock().unwrap();
if state.is_locked(key) {
bail!(SerializationErrorKind::ObjectStillLocked(key.clone()));
}
if state.deleted.contains(&key) {
bail!(SerializationErrorKind::AlreadyDeleted(key.clone()));
}
state.store_ordered.retain(|a| a.key != *key);
}
let parent = self.multi.lookup_parent(key).await;
self.state.lock().unwrap().add_deleted(key.clone(), parent);
Ok(())
}
}
impl Chain {
/// Opens a data access layer that allows mutable changes to data.
/// Transaction consistency on commit will be guarranted for local redo log files
pub async fn dio_mut(self: &Arc<Chain>, session: &'_ dyn AteSession) -> Arc<DioMut> {
self.dio_trans(session, TransactionScope::Local).await
}
/// Opens a data access layer that allows mutable changes to data (in a fire-and-forget mode).
/// No transaction consistency on commits will be enforced
pub async fn dio_fire(self: &Arc<Chain>, session: &'_ dyn AteSession) -> Arc<DioMut> {
self.dio_trans(session, TransactionScope::None).await
}
/// Opens a data access layer that allows mutable changes to data.
/// Transaction consistency on commit will be guarranted for all remote replicas
pub async fn dio_full(self: &Arc<Chain>, session: &'_ dyn AteSession) -> Arc<DioMut> {
self.dio_trans(session, TransactionScope::Full).await
}
/// Opens a data access layer that allows mutable changes to data.
/// Transaction consistency on commit must be specified
pub async fn dio_trans(
self: &Arc<Chain>,
session: &'_ dyn AteSession,
scope: TransactionScope,
) -> Arc<DioMut> {
let dio = self.dio(session).await;
dio.trans(scope).await
}
}
impl DioMut {
pub fn has_uncommitted(&self) -> bool {
let state = self.state.lock().unwrap();
if state.store_ordered.is_empty() && state.deleted.is_empty() {
return false;
}
return true;
}
pub fn cancel(&self) {
let mut state = self.state.lock().unwrap();
state.clear();
}
pub fn auto_cancel(&self) {
let mut state = self.state.lock().unwrap();
state.auto_cancel = true;
}
pub fn auto_panic(&self) {
let mut state = self.state.lock().unwrap();
state.auto_cancel = false;
}
pub(crate) fn default_format(&self) -> MessageFormat {
self.dio.multi.default_format.clone()
}
pub async fn commit(&self) -> Result<(), CommitError> {
let timeout = Duration::from_secs(30);
self.commit_ext(timeout).await
}
pub async fn commit_ext(&self, timeout: Duration) -> Result<(), CommitError> {
let (rows, deleted, unlocks) = {
// If we have no dirty records
let mut state = self.state.lock().unwrap();
if state.store_ordered.is_empty() && state.deleted.is_empty() {
return Ok(());
}
// Grab the rows from the state datachain
let rows = state
.store_ordered
.iter()
.filter(|a| state.deleted.contains(&a.key) == false)
.filter_map(|a| match state.rows.get(&a.key) {
Some(b) => Some((a.clone(), b.clone())),
None => None,
})
.collect::<Vec<_>>();
let deleted = state.deleted.iter().map(|a| a.clone()).collect::<Vec<_>>();
let unlocks = state
.pipe_unlock
.iter()
.map(|a| a.clone())
.collect::<Vec<_>>();
// Clear them all down as we have them now
state.clear();
// Now we process them
trace!(
"commit stored={} deleted={} unlocks={}",
rows.len(),
deleted.len(),
unlocks.len()
);
(rows, deleted, unlocks)
};
// Declare variables
let mut evts = Vec::new();
let mut trans_meta = TransactionMetadata::default();
{
// Take all the locks we need to perform the commit actions
let multi_lock = self.multi.lock().await;
let session = self.session();
// Determine the format of the message
let format = match self.log_format {
Some(a) => a,
None => self.multi.default_format,
};
// Convert all the events that we are storing into serialize data
for (row_header, row) in rows {
// Debug output
#[cfg(feature = "enable_verbose")]
trace!("store: {}@{}", row.type_name, row.key.as_hex_string());
// Build a new clean metadata header
let mut meta = Metadata::for_data(row.key);
meta.core
.push(CoreMetadata::Timestamp(self.time.current_timestamp()?));
if row_header.auth.is_relevant() {
meta.core
.push(CoreMetadata::Authorization(row_header.auth.clone()));
}
if let Some(parent) = &row_header.parent {
meta.core.push(CoreMetadata::Parent(parent.clone()))
} else {
if multi_lock.inside_async.disable_new_roots == true && row.is_new == true {
bail!(CommitErrorKind::NewRootsAreDisabled);
}
}
for extra in row.extra_meta.iter() {
meta.core.push(extra.clone());
}
if self.dio.chain.cfg_ate.record_type_name {
if meta.get_type_name().is_none() {
meta.core.push(CoreMetadata::Type(MetaType {
type_name: row.type_name.clone(),
}));
}
}
// Compute all the extra metadata for an event
let extra_meta = multi_lock.metadata_lint_event(
&mut meta,
session.deref(),
&trans_meta,
&row.type_name,
)?;
meta.core.extend(extra_meta);
// Add the data to the transaction metadata object
if let Some(key) = meta.get_data_key() {
trans_meta.auth.insert(
key,
match meta.get_authorization() {
Some(a) => a.clone(),
None => MetaAuthorization {
read: ReadOption::Inherit,
write: WriteOption::Inherit,
},
},
);
if let Some(parent) = meta.get_parent() {
if parent.vec.parent_id != key {
trans_meta.parents.insert(key, parent.clone());
}
};
}
// Perform any transformation (e.g. data encryption and compression)
let data = multi_lock.data_as_underlay(
&mut meta,
row.data.clone(),
session.deref(),
&trans_meta,
)?;
// Only once all the rows are processed will we ship it to the redo log
let evt = EventWeakData {
meta: meta,
data_bytes: MessageBytes::Some(data),
format: row.format,
};
evts.push(evt);
}
// Build events that will represent tombstones on all these records (they will be sent after the writes)
for key in deleted {
let mut meta = Metadata::default();
meta.core
.push(CoreMetadata::Timestamp(self.time.current_timestamp()?));
meta.core
.push(CoreMetadata::Authorization(MetaAuthorization {
read: ReadOption::Everyone(None),
write: WriteOption::Nobody,
}));
if let Some(parent) = multi_lock.inside_async.chain.lookup_parent(&key) {
meta.core.push(CoreMetadata::Parent(parent))
}
meta.add_tombstone(key);
// Compute all the extra metadata for an event
let extra_meta = multi_lock.metadata_lint_event(
&mut meta,
session.deref(),
&trans_meta,
"[unknown]",
)?;
meta.core.extend(extra_meta);
let evt = EventWeakData {
meta: meta,
data_bytes: MessageBytes::None,
format,
};
evts.push(evt);
}
// Lint the data
let mut lints = Vec::new();
for evt in evts.iter() {
lints.push(LintData {
data: evt,
header: evt.as_header()?,
});
}
let meta = multi_lock.metadata_lint_many(
&lints,
session.deref(),
self.conversation.as_ref(),
)?;
// If it has data then insert it at the front of these events
if meta.len() > 0 {
evts.insert(
0,
EventWeakData {
meta: Metadata { core: meta },
data_bytes: MessageBytes::None,
format,
},
);
}
}
#[cfg(feature = "enable_verbose")]
{
for evt in evts.iter() {
trace!("event: {}", evt.meta);
}
}
// Create the transaction
let trans = Transaction {
scope: self.scope.clone(),
transmit: true,
events: evts,
timeout,
conversation: match &self.conversation {
Some(c) => Some(Arc::clone(c)),
None => None,
},
};
trace!("commit events={}", trans.events.len());
// Process the transaction in the chain using its pipe
self.multi.pipe.feed(ChainWork { trans: trans }).await?;
// Last thing we do is kick off an unlock operation using fire and forget
let unlock_multi = self.multi.clone();
let unlock_me = unlocks.iter().map(|a| a.clone()).collect::<Vec<_>>();
for key in unlock_me {
let _ = unlock_multi.pipe.unlock(key).await;
}
// Success
Ok(())
}
}
impl std::ops::Deref for DioMut {
type Target = Dio;
fn deref(&self) -> &Self::Target {
self.dio.deref()
}
}
impl Drop for DioMut {
fn drop(&mut self) {
// Check if auto-cancel is enabled
if self.has_uncommitted() & self.state.lock().unwrap().auto_cancel {
debug!("Data objects have been discarded due to auto-cancel and uncommitted changes");
#[cfg(feature = "enable_dio_backtrace")]
debug!("{:?}", self.backtrace_new);
self.cancel();
}
// If the DIO has uncommitted changes then warn the caller
debug_assert!(self.has_uncommitted() == false, "dio-has-uncommitted - the DIO has uncommitted data in it - call the .commit() method before the DIO goes out of scope.");
}
}
impl DioMut {
pub async fn try_load<D>(
self: &Arc<Self>,
key: &PrimaryKey,
) -> Result<Option<DaoMut<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
match self.load(key).await {
Ok(a) => Ok(Some(a)),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => Ok(None),
Err(err) => Err(err),
}
}
pub async fn load<D>(self: &Arc<Self>, key: &PrimaryKey) -> Result<DaoMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
{
let state = self.state.lock().unwrap();
let _pop1 = DioMutScope::new(self);
if state.is_locked(key) {
bail!(LoadErrorKind::ObjectStillLocked(key.clone()));
}
if state.deleted.contains(&key) {
bail!(LoadErrorKind::AlreadyDeleted(key.clone()));
}
if let Some(dao) = state.rows.get(key) {
let (row_header, row) = Row::from_row_data(&self.dio, dao.deref())?;
return Ok(DaoMut::new(
Arc::clone(self),
Dao::<D>::new(&self.dio, row_header, row),
));
}
}
{
let state = self.dio.state.lock().unwrap();
let _pop1 = DioMutScope::new(self);
if let Some((dao, leaf)) = state.cache_load.get(key) {
let (row_header, row) =
Row::from_event(&self.dio, dao.deref(), leaf.created, leaf.updated)?;
return Ok(DaoMut::new(
Arc::clone(self),
Dao::new(&self.dio, row_header, row),
));
}
}
let leaf = match self.multi.lookup_primary(key).await {
Some(a) => a,
None => bail!(LoadErrorKind::NotFound(key.clone())),
};
Ok(self.load_from_entry(leaf).await?)
}
pub(crate) async fn load_from_entry<D>(
self: &Arc<Self>,
leaf: EventLeaf,
) -> Result<DaoMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
self.run_async(self.__load_from_entry(leaf)).await
}
pub(super) async fn __load_from_entry<D>(
self: &Arc<Self>,
leaf: EventLeaf,
) -> Result<DaoMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let evt = self.multi.load(leaf).await?;
let session = self.session();
let _pop1 = DioMutScope::new(self);
Ok(self.load_from_event(session.as_ref(), evt.data, evt.header.as_header()?, leaf)?)
}
pub(crate) fn load_from_event<D>(
self: &Arc<Self>,
session: &'_ dyn AteSession,
mut data: EventStrongData,
header: EventHeader,
leaf: EventLeaf,
) -> Result<DaoMut<D>, LoadError>
where
D: Serialize + DeserializeOwned,
{
data.data_bytes = match data.data_bytes {
Some(data) => Some(self.multi.data_as_overlay(&header.meta, data, session)?),
None => None,
};
let mut state = self.dio.state.lock().unwrap();
let _pop1 = DioMutScope::new(self);
match header.meta.get_data_key() {
Some(key) => {
let (row_header, row) =
Row::from_event(&self.dio, &data, leaf.created, leaf.updated)?;
state.cache_load.insert(key.clone(), (Arc::new(data), leaf));
Ok(DaoMut::new(
Arc::clone(self),
Dao::new(&self.dio, row_header, row),
))
}
None => Err(LoadErrorKind::NoPrimaryKey.into()),
}
}
pub async fn load_and_take<D>(self: &Arc<Self>, key: &PrimaryKey) -> Result<D, LoadError>
where
D: Serialize + DeserializeOwned,
{
let ret: DaoMut<D> = self.load(key).await?;
Ok(ret.take())
}
pub async fn load_raw(self: &Arc<Self>, key: &PrimaryKey) -> Result<EventStrongData, LoadError> {
self.run_async(self.dio.__load_raw(key)).await
}
pub async fn exists(&self, key: &PrimaryKey) -> bool {
{
let state = self.state.lock().unwrap();
if state.deleted.contains(&key) {
return false;
}
if let Some(_) = state.rows.get(key) {
return true;
}
}
self.dio.__exists(key).await
}
pub async fn try_lock(self: &Arc<Self>, key: PrimaryKey) -> Result<bool, CommitError> {
self.multi.pipe.try_lock(key).await
}
pub async fn unlock(self: &Arc<Self>, key: PrimaryKey) -> Result<(), CommitError> {
self.multi.pipe.unlock(key).await
}
pub async fn delete_all_roots(self: &Arc<Self>) -> Result<(), CommitError> {
for key in self.root_keys().await {
self.delete(&key).await?;
}
Ok(())
}
pub async fn children<D>(
self: &Arc<Self>,
parent_id: PrimaryKey,
collection_id: u64,
) -> Result<Vec<DaoMut<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
self.children_ext(parent_id, collection_id, false, false)
.await
}
pub async fn children_ext<D>(
self: &Arc<Self>,
parent_id: PrimaryKey,
collection_id: u64,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Vec<DaoMut<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
// Build the secondary index key
let collection_key = MetaCollection {
parent_id,
collection_id,
};
// Build a list of keys
let keys = match self.multi.lookup_secondary_raw(&collection_key).await {
Some(a) => a,
None => return Ok(Vec::new()),
};
// Perform the lower level calls
let mut ret: Vec<DaoMut<D>> = self
.load_many_ext(
keys.into_iter(),
allow_missing_keys,
allow_serialization_error,
)
.await?;
// Build an already loaded list
let mut already = FxHashSet::default();
for a in ret.iter() {
already.insert(a.key().clone());
}
// Now we search the secondary local index so any objects we have
// added in this transaction scope are returned
let state = self.state.lock().unwrap();
let _pop1 = DioMutScope::new(self);
if let Some(vec) = state.store_secondary.get_vec(&collection_key) {
for a in vec {
// This is an OR of two lists so its likely that the object
// may already be in the return list
if already.contains(a) {
continue;
}
if state.deleted.contains(a) {
continue;
}
// If its still locked then that is a problem
if state.is_locked(a) {
bail!(LoadErrorKind::ObjectStillLocked(a.clone()));
}
if let Some(dao) = state.rows.get(a) {
let (row_header, row) = Row::from_row_data(&self.dio, dao.deref())?;
already.insert(row.key.clone());
let dao: Dao<D> = Dao::new(&self.dio, row_header, row);
ret.push(DaoMut::new(Arc::clone(self), dao));
}
}
}
Ok(ret)
}
pub async fn load_many<D>(
self: &Arc<Self>,
keys: impl Iterator<Item = PrimaryKey>,
) -> Result<Vec<DaoMut<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
self.load_many_ext(keys, false, false).await
}
pub async fn load_many_ext<D>(
self: &Arc<Self>,
keys: impl Iterator<Item = PrimaryKey>,
allow_missing_keys: bool,
allow_serialization_error: bool,
) -> Result<Vec<DaoMut<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
// This is the main return list
let mut already = FxHashSet::default();
let mut ret = Vec::new();
// We either find existing objects in the cache or build a list of objects to load
let to_load = {
let mut to_load = Vec::new();
let inside_async = self.multi.inside_async.read().await;
let state = self.state.lock().unwrap();
let inner_state = self.dio.state.lock().unwrap();
let _pop1 = DioMutScope::new(self);
for key in keys {
if state.is_locked(&key) {
bail!(LoadErrorKind::ObjectStillLocked(key));
}
if state.deleted.contains(&key) {
continue;
}
if let Some(dao) = state.rows.get(&key) {
let (row_header, row) = Row::from_row_data(&self.dio, dao.deref())?;
already.insert(row.key.clone());
ret.push(Dao::new(&self.dio, row_header, row));
continue;
}
if let Some((dao, leaf)) = inner_state.cache_load.get(&key) {
let (row_header, row) =
Row::from_event(&self.dio, dao.deref(), leaf.created, leaf.updated)?;
already.insert(row.key.clone());
ret.push(Dao::new(&self.dio, row_header, row));
continue;
}
to_load.push(match inside_async.chain.lookup_primary(&key) {
Some(a) => a,
None => continue,
});
}
to_load
};
// Load all the objects that have not yet been loaded
let to_load = self.multi.load_many(to_load).await?;
// Now process all the objects
let ret = {
let state = self.state.lock().unwrap();
let mut inner_state = self.dio.state.lock().unwrap();
let _pop1 = DioMutScope::new(self);
let session = self.session();
for mut evt in to_load {
let mut header = evt.header.as_header()?;
let key = match header.meta.get_data_key() {
Some(k) => k,
None => {
continue;
}
};
if state.is_locked(&key) {
bail!(LoadErrorKind::ObjectStillLocked(key.clone()));
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | true |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/bus.rs | lib/src/dio/bus.rs | use error_chain::bail;
use serde::{de::DeserializeOwned, Serialize};
use std::marker::PhantomData;
use std::sync::Arc;
use std::ops::Deref;
use tokio::sync::mpsc;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use std::fmt;
use super::dao::*;
use super::dao_mut::*;
use super::dio_mut::*;
use super::vec::DaoVecState;
use super::*;
use crate::chain::*;
use crate::header::PrimaryKey;
use crate::header::PrimaryKeyScope;
use crate::{error::*, event::*, meta::MetaCollection};
pub enum BusEvent<D>
{
Updated(Dao<D>),
Deleted(PrimaryKey),
}
impl<D> BusEvent<D>
{
pub fn data(self) -> Option<D> {
match self {
BusEvent::Updated(data) => Some(data.take()),
_ => None,
}
}
}
impl<D> fmt::Debug
for BusEvent<D>
where D: fmt::Debug
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
BusEvent::Updated(dao) => {
write!(f, "updated(")?;
dao.fmt(f)?;
write!(f, ")")
},
BusEvent::Deleted(key) => {
write!(f, "deleted({})", key)
},
}
}
}
impl<D> PartialEq<BusEvent<D>>
for BusEvent<D>
where D: PartialEq<D>,
Dao<D>: PartialEq<Dao<D>>
{
fn eq(&self, other: &BusEvent<D>) -> bool {
match self {
BusEvent::Updated(dao1) => match other {
BusEvent::Updated(dao2) => dao1.eq(dao2),
_ => false
},
BusEvent::Deleted(key1) => match other {
BusEvent::Deleted(key2) => key1.eq(key2),
_ => false
},
}
}
}
impl<D> Eq
for BusEvent<D>
where D: Eq + PartialEq<BusEvent<D>>,
Dao<D>: PartialEq<Dao<D>>
{ }
pub enum TryBusEvent<D>
{
Updated(Dao<D>),
Deleted(PrimaryKey),
NoData,
}
impl<D> TryBusEvent<D>
{
pub fn data(self) -> Option<D> {
match self {
TryBusEvent::Updated(data) => Some(data.take()),
_ => None,
}
}
}
impl<D> fmt::Debug
for TryBusEvent<D>
where D: fmt::Debug
{
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
match self {
TryBusEvent::Updated(dao) => {
write!(f, "updated(")?;
dao.fmt(f)?;
write!(f, ")")
},
TryBusEvent::Deleted(key) => {
write!(f, "deleted({})", key)
},
TryBusEvent::NoData => {
write!(f, "no-data")
}
}
}
}
impl<D> PartialEq<TryBusEvent<D>>
for TryBusEvent<D>
where D: PartialEq<D>,
Dao<D>: PartialEq<Dao<D>>
{
fn eq(&self, other: &TryBusEvent<D>) -> bool {
match self {
TryBusEvent::Updated(dao1) => match other {
TryBusEvent::Updated(dao2) => dao1.eq(dao2),
_ => false
},
TryBusEvent::Deleted(key1) => match other {
TryBusEvent::Deleted(key2) => key1.eq(key2),
_ => false
},
TryBusEvent::NoData => match other {
TryBusEvent::NoData => true,
_ => false
},
}
}
}
impl<D> Eq
for TryBusEvent<D>
where D: Eq + PartialEq<TryBusEvent<D>>,
Dao<D>: PartialEq<Dao<D>>
{ }
#[allow(dead_code)]
pub struct Bus<D> {
dio: Arc<Dio>,
chain: Arc<Chain>,
vec: MetaCollection,
receiver: mpsc::Receiver<EventWeakData>,
_marker: PhantomData<D>,
}
impl<D> Bus<D> {
pub(crate) async fn new(dio: &Arc<Dio>, vec: MetaCollection) -> Bus<D> {
let id = fastrand::u64(..);
let (tx, rx) = mpsc::channel(100);
{
let mut lock = dio.chain().inside_async.write().await;
let listener = ChainListener { id: id, sender: tx };
lock.listeners.insert(vec.clone(), listener);
}
Bus {
dio: Arc::clone(&dio),
chain: Arc::clone(dio.chain()),
vec: vec,
receiver: rx,
_marker: PhantomData,
}
}
pub async fn recv(&mut self) -> Result<BusEvent<D>, BusError>
where
D: DeserializeOwned,
{
while let Some(evt) = self.receiver.recv().await {
match self.ret_evt(evt).await? {
TryBusEvent::Updated(dao) => {
return Ok(BusEvent::Updated(dao));
},
TryBusEvent::Deleted(key) => {
return Ok(BusEvent::Deleted(key));
},
_ => { continue; }
}
}
Err(BusErrorKind::ChannelClosed.into())
}
pub async fn try_recv(&mut self) -> Result<TryBusEvent<D>, BusError>
where
D: DeserializeOwned,
{
loop {
match self.receiver.try_recv() {
Ok(evt) => {
match self.ret_evt(evt).await? {
TryBusEvent::Updated(dao) => {
return Ok(TryBusEvent::Updated(dao));
},
TryBusEvent::Deleted(key) => {
return Ok(TryBusEvent::Deleted(key));
},
TryBusEvent::NoData => {
return Ok(TryBusEvent::NoData);
}
}
},
Err(mpsc::error::TryRecvError::Empty) => {
return Ok(TryBusEvent::NoData);
},
Err(mpsc::error::TryRecvError::Disconnected) => {
return Err(BusErrorKind::ChannelClosed.into());
}
}
}
}
async fn ret_evt(&self, evt: EventWeakData) -> Result<TryBusEvent<D>, BusError>
where
D: DeserializeOwned,
{
if let Some(key) = evt.meta.get_tombstone() {
return Ok(TryBusEvent::Deleted(key));
}
if evt.data_bytes.is_none() {
return Ok(TryBusEvent::NoData);
}
let when = evt.meta.get_timestamp();
let when = match when {
Some(t) => t.time_since_epoch_ms,
None => 0,
};
let data_key = evt
.meta
.get_data_key();
let mut evt = EventStrongData {
meta: evt.meta,
data_bytes: match evt.data_bytes {
MessageBytes::Some(a) => Some(a),
MessageBytes::LazySome(l) => {
match self.chain.pipe.load_many(vec![l.record]).await {
Ok(data) => {
if let Some(data) = data.into_iter().next() {
data
} else {
return Ok(TryBusEvent::NoData);
}
}
Err(err) => {
trace!("bus recv failed to load - {}", err);
return Ok(TryBusEvent::NoData);
}
}
},
MessageBytes::None => None,
},
format: evt.format,
};
let session = self.dio.session();
evt.data_bytes = match evt.data_bytes {
Some(data) => Some(self.dio.multi.data_as_overlay(&evt.meta, data, session.deref())?),
None => None,
};
let _pop1 = DioScope::new(&self.dio);
let _pop2 = data_key
.as_ref()
.map(|a| PrimaryKeyScope::new(a.clone()));
let (row_header, row) = super::row::Row::from_event(&self.dio, &evt, when, when)?;
return Ok(TryBusEvent::Updated(Dao::new(&self.dio, row_header, row)));
}
pub async fn process(&mut self, trans: &Arc<DioMut>) -> Result<DaoMut<D>, BusError>
where
D: Serialize + DeserializeOwned,
{
loop {
let dao = self.recv().await?;
if let BusEvent::Updated(dao) = dao {
let mut dao = DaoMut::new(Arc::clone(trans), dao);
if dao.try_lock_then_delete().await? == true {
return Ok(dao);
}
}
}
}
}
impl<D> DaoVec<D> {
pub async fn bus(&self) -> Result<Bus<D>, BusError> {
let parent_id = match &self.state {
DaoVecState::Unsaved => {
bail!(BusErrorKind::SaveParentFirst);
}
DaoVecState::Saved(a) => a.clone(),
};
let vec = MetaCollection {
parent_id: parent_id,
collection_id: self.vec_id,
};
let dio = match self.dio() {
Some(a) => a,
None => {
bail!(BusErrorKind::WeakDio);
}
};
Ok(Bus::new(&dio, vec).await)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/weak.rs | lib/src/dio/weak.rs | #![allow(unused_imports)]
use error_chain::bail;
use std::marker::PhantomData;
use std::sync::{Arc, Weak};
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::Instrument;
use super::dio::DioWeak;
use super::dio_mut::DioMutWeak;
use crate::dio::dao::*;
use crate::dio::*;
use crate::error::*;
use crate::header::*;
use serde::de::*;
use serde::*;
/// Rerepresents a reference to another data object with strong
/// type linting to make the model more solidified
///
#[derive(Serialize, Deserialize)]
pub struct DaoWeak<D> {
pub(super) id: Option<PrimaryKey>,
#[serde(skip)]
pub(super) dio: DioWeak,
#[serde(skip)]
pub(super) dio_mut: DioMutWeak,
#[serde(skip)]
pub(super) _marker: PhantomData<D>,
}
impl<D> Clone for DaoWeak<D> {
fn clone(&self) -> Self {
DaoWeak {
id: self.id.clone(),
dio: self.dio.clone(),
dio_mut: self.dio_mut.clone(),
_marker: PhantomData,
}
}
}
impl<D> Default for DaoWeak<D> {
fn default() -> Self {
DaoWeak::new()
}
}
impl<D> std::fmt::Debug for DaoWeak<D>
where
D: std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let type_name = std::any::type_name::<D>();
match self.id {
Some(id) => write!(f, "dao-weak(key={}, type={})", id, type_name),
None => write!(f, "dao-weak(type={})", type_name),
}
}
}
impl<D> DaoWeak<D> {
pub fn new() -> DaoWeak<D> {
DaoWeak {
id: None,
dio: DioWeak::Uninitialized,
dio_mut: DioMutWeak::Uninitialized,
_marker: PhantomData,
}
}
pub fn from_key(dio: &Arc<DioMut>, key: PrimaryKey) -> DaoWeak<D> {
DaoWeak {
id: Some(key),
dio: DioWeak::from(&dio.dio),
dio_mut: DioMutWeak::from(dio),
_marker: PhantomData,
}
}
pub fn key(&self) -> Option<PrimaryKey> {
self.id
}
pub fn set_key(&mut self, val: PrimaryKey) {
self.id = Some(val);
}
pub fn clear(&mut self) {
self.id = None;
}
pub fn dio(&self) -> Option<Arc<Dio>> {
match &self.dio {
DioWeak::Uninitialized => None,
DioWeak::Weak(a) => Weak::upgrade(a),
}
}
pub fn dio_mut(&self) -> Option<Arc<DioMut>> {
match &self.dio_mut {
DioMutWeak::Uninitialized => None,
DioMutWeak::Weak(a) => Weak::upgrade(a),
}
}
/// Loads the data object (if it exists)
pub async fn load(&self) -> Result<Option<Dao<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let id = match self.id {
Some(a) => a,
None => {
return Ok(None);
}
};
let ret = {
if let Some(dio) = self.dio_mut() {
match dio.load::<D>(&id).await {
Ok(a) => Some(a.inner),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => None,
Err(err) => {
bail!(err);
}
}
} else {
let dio = match self.dio() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
match dio.load::<D>(&id).await {
Ok(a) => Some(a),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => None,
Err(err) => {
bail!(err);
}
}
}
};
Ok(ret)
}
/// Loads the data object (if it exists)
pub async fn load_mut(&mut self) -> Result<Option<DaoMut<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let id = match self.id {
Some(a) => a,
None => {
return Ok(None);
}
};
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let ret = match dio.load::<D>(&id).await {
Ok(a) => Some(a),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => None,
Err(err) => {
bail!(err);
}
};
Ok(ret)
}
/// Stores the data within this reference
pub fn store(&mut self, value: D) -> Result<DaoMut<D>, SerializationError>
where
D: Clone + Serialize + DeserializeOwned,
{
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(SerializationErrorKind::WeakDio),
};
let ret = dio.store::<D>(value)?;
self.id = Some(ret.key().clone());
Ok(ret)
}
/// Loads the data object or uses a default if none exists
pub async fn unwrap_or(&mut self, default: D) -> Result<D, LoadError>
where
D: Serialize + DeserializeOwned,
{
match self.load().await? {
Some(a) => Ok(a.take()),
None => Ok(default),
}
}
/// Loads the data object or uses a default if none exists
pub async fn unwrap_or_else<F: FnOnce() -> D>(&mut self, f: F) -> Result<D, LoadError>
where
D: Serialize + DeserializeOwned,
{
match self.load().await? {
Some(a) => Ok(a.take()),
None => Ok(f()),
}
}
/// Loads the data object or creates a new one (if it does not exist)
pub async fn unwrap_or_default(&mut self) -> Result<D, LoadError>
where
D: Serialize + DeserializeOwned + Default,
{
Ok(self
.unwrap_or_else(|| {
let ret: D = Default::default();
ret
})
.await?)
}
pub async fn expect(&self, msg: &str) -> Dao<D>
where
D: Serialize + DeserializeOwned,
{
match self.load().await {
Ok(Some(a)) => a,
Ok(None) => {
panic!("{}", msg);
}
Err(err) => {
panic!("{}: {:?}", msg, err);
}
}
}
pub async fn unwrap(&self) -> Dao<D>
where
D: Serialize + DeserializeOwned,
{
self.load()
.await
.ok()
.flatten()
.expect("called `DaoRef::unwrap()` that failed to load")
}
pub async fn take(&mut self) -> Result<Option<DaoMut<D>>, LoadError>
where
D: Serialize + DeserializeOwned,
{
let key = self.id.take();
self.id = None;
let id = match key {
Some(a) => a,
None => {
return Ok(None);
}
};
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let ret = match dio.load::<D>(&id).await {
Ok(a) => Some(a),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => None,
Err(err) => {
bail!(err);
}
};
Ok(ret)
}
pub async fn replace(&mut self, value: D) -> Result<Option<DaoMut<D>>, LoadError>
where
D: Clone + Serialize + DeserializeOwned,
{
let dio = match self.dio_mut() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
let ret = dio.store::<D>(value)?;
let key = self.id.replace(ret.key().clone());
let id = match key {
Some(a) => a,
None => {
return Ok(None);
}
};
let ret = match dio.load::<D>(&id).await {
Ok(a) => Some(a),
Err(LoadError(LoadErrorKind::NotFound(_), _)) => None,
Err(err) => {
bail!(err);
}
};
Ok(ret)
}
pub async fn is_some(&self) -> Result<bool, LoadError> {
let id = match self.id {
Some(a) => a,
None => {
return Ok(false);
}
};
let ret = {
if let Some(dio) = self.dio_mut() {
dio.exists(&id).await
} else {
let dio = match self.dio() {
Some(a) => a,
None => bail!(LoadErrorKind::WeakDio),
};
dio.exists(&id).await
}
};
Ok(ret)
}
pub async fn is_none(&self) -> Result<bool, LoadError> {
Ok(!self.is_some().await?)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dio/dao_mut.rs | lib/src/dio/dao_mut.rs | #![allow(unused_imports)]
use async_trait::async_trait;
use fxhash::FxHashSet;
use tracing::{debug, error, info, trace, warn};
use bytes::Bytes;
use serde::{de::DeserializeOwned, Serialize};
use std::ops::{Deref, DerefMut};
use std::sync::{Arc, Weak};
use std::sync::{Mutex, MutexGuard};
use crate::crypto::{EncryptedPrivateKey, PrivateSignKey};
use crate::{
crypto::EncryptKey,
session::{AteSession, AteSessionProperty},
};
use super::dao::*;
use super::dio::*;
use super::dio_mut::*;
use crate::crypto::AteHash;
use crate::error::*;
use crate::event::*;
use crate::header::*;
use crate::index::*;
use crate::meta::*;
use crate::spec::*;
use super::row::*;
pub use super::vec::DaoVec;
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord)]
pub enum DaoMutLock {
/// The DAO has no lock on it
Unlocked,
/// The DAO has been manually locked forcing serial access
Locked,
/// The dao is being processed thus holds a lock and should be deleted
/// when it goes out of scope
LockedThenDelete,
}
#[derive(Debug, Clone)]
pub struct DaoMutState {
pub(super) lock: DaoMutLock,
}
pub(crate) trait DaoObjCommit: DaoObj {
fn commit(
&mut self,
header_changed: bool,
data_changed: bool,
) -> std::result::Result<(), SerializationError>;
fn auth_set(&mut self, auth: MetaAuthorization) -> std::result::Result<(), SerializationError>;
}
/// Represents a data object that will be represented as one or
/// more events on the redo-log and validated in the chain-of-trust.
///
/// Reading this object using none-mutable behavior will incur no IO
/// on the redo-log however if you edit the object you must commit it
/// to the `Dio` before it goes out of scope or the data will be lost
/// (in Debug mode this will even trigger an assert).
///
/// Metadata about the data object can also be accessed via this object
/// which allows you to change the read/write access rights, etc.
///
/// If you change your mind on commiting the data to the redo-log then
/// you can call the `cancel` function instead.
///
/// The real version represents all operations that can be performed
/// before the obejct is actually saved and all those after
pub struct DaoMut<D>
where
D: Serialize,
{
pub(super) inner: Dao<D>,
trans: Arc<DioMut>,
state: DaoMutState,
}
impl<D> Clone for DaoMut<D>
where
D: Serialize + Clone,
{
fn clone(&self) -> Self {
DaoMut {
inner: self.inner.clone(),
trans: Arc::clone(&self.trans),
state: self.state.clone(),
}
}
}
impl<D> std::fmt::Debug for DaoMut<D>
where
D: Serialize + std::fmt::Debug,
{
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "dao-mut(")?;
self.inner.fmt(f)?;
write!(f, ")")
}
}
impl<D> DaoMut<D>
where
D: Serialize,
{
pub(super) fn new(trans: Arc<DioMut>, inner: Dao<D>) -> DaoMut<D> {
DaoMut {
trans,
inner,
state: DaoMutState {
lock: DaoMutLock::Unlocked,
},
}
}
pub fn trans(&self) -> Arc<DioMut> {
Arc::clone(&self.trans)
}
pub fn set_trans(&mut self, dio: &Arc<DioMut>) {
self.trans = Arc::clone(dio);
}
pub fn delete(self) -> std::result::Result<(), SerializationError> {
let key = self.key().clone();
let mut state = self.trans.state.lock().unwrap();
state.add_deleted(key, self.inner.row_header.parent.clone());
Ok(())
}
pub fn detach(&mut self) -> std::result::Result<(), SerializationError> {
self.inner.row_header.parent = None;
self.commit(true, false)
}
pub fn attach_ext(
&mut self,
parent: PrimaryKey,
collection_id: u64,
) -> std::result::Result<(), SerializationError> {
self.inner.row_header.parent = Some(MetaParent {
vec: MetaCollection {
parent_id: parent,
collection_id,
},
});
self.commit(true, false)
}
pub fn attach_orphaned(
&mut self,
parent: &PrimaryKey,
) -> std::result::Result<(), SerializationError> {
self.attach_ext(parent.clone(), 0u64)
}
pub fn attach_orphaned_ext(
&mut self,
parent: &PrimaryKey,
collection_id: u64,
) -> std::result::Result<(), SerializationError> {
self.attach_ext(parent.clone(), collection_id)
}
pub fn add_extra_metadata(
&mut self,
meta: CoreMetadata,
) -> std::result::Result<(), SerializationError> {
self.inner.row.extra_meta.push(meta);
self.commit(true, true)
}
pub fn is_locked(&self) -> bool {
match self.state.lock {
DaoMutLock::Locked | DaoMutLock::LockedThenDelete => true,
DaoMutLock::Unlocked => false,
}
}
pub fn dio_mut(&self) -> Arc<DioMut> {
self.trans()
}
pub fn attach(
&mut self,
parent: &dyn DaoObj,
vec: &DaoVec<D>,
) -> std::result::Result<(), SerializationError>
where
D: Serialize,
{
self.inner.row_header.parent = Some(MetaParent {
vec: MetaCollection {
parent_id: parent.key().clone(),
collection_id: vec.vec_id,
},
});
self.commit(true, false)
}
async fn try_lock_ext(&mut self, new_state: DaoMutLock) -> Result<bool, LockError> {
match self.state.lock {
DaoMutLock::Locked | DaoMutLock::LockedThenDelete => {}
DaoMutLock::Unlocked => {
// Attempt the lock
let dio = self.dio();
if dio.multi.pipe.try_lock(self.inner.row.key.clone()).await? == false {
return Ok(false);
}
// The object is now locked
self.state.lock = new_state;
}
};
Ok(true)
}
pub async fn try_lock(&mut self) -> Result<bool, LockError> {
self.try_lock_ext(DaoMutLock::Locked).await
}
pub async fn try_lock_with_timeout(
&mut self,
timeout: std::time::Duration,
) -> Result<bool, LockError> {
if self.try_lock_ext(DaoMutLock::Locked).await? == true {
return Ok(true);
}
let timer = std::time::Instant::now();
// Use an exponential backoff
let mut spin = 3;
let mut max_wait = 0u64;
while timer.elapsed() < timeout {
if self.try_lock_ext(DaoMutLock::Locked).await? == true {
return Ok(true);
}
if spin > 0 {
spin -= 1;
continue;
}
let elapsed = timer.elapsed();
let remaining = match timeout.checked_sub(elapsed) {
Some(a) => a,
None => {
break;
}
};
max_wait = ((max_wait * 12u64) / 10u64) + 5u64;
max_wait = max_wait.min(500u64);
let min_wait = max_wait / 2u64;
let random_wait = fastrand::u64(min_wait..max_wait);
let mut random_wait = std::time::Duration::from_millis(random_wait);
random_wait = random_wait.min(remaining);
crate::engine::sleep(random_wait).await;
}
return Ok(false);
}
pub async fn unlock(&mut self) -> Result<bool, LockError> {
match self.state.lock {
DaoMutLock::Unlocked | DaoMutLock::LockedThenDelete => {
return Ok(false);
}
DaoMutLock::Locked => {
let dio = self.inner.dio();
dio.multi.pipe.unlock(self.inner.row.key.clone()).await?;
self.state.lock = DaoMutLock::Unlocked;
}
};
Ok(true)
}
pub async fn try_lock_then_delete(&mut self) -> Result<bool, LockError> {
self.try_lock_ext(DaoMutLock::LockedThenDelete).await
}
pub fn auth_mut<'a>(&'a mut self) -> DaoAuthGuard<'a> {
DaoAuthGuard {
auth: self.inner.row_header.auth.clone(),
dao: self,
dirty: false,
}
}
pub fn take(self) -> D {
self.inner.row.data
}
pub fn parent(&self) -> Option<MetaCollection> {
self.inner.parent()
}
pub fn parent_id(&self) -> Option<PrimaryKey> {
self.inner.parent_id()
}
pub fn as_mut<'a>(&'a mut self) -> DaoMutGuard<'a, D> {
{
let mut state = self.trans.state.lock().unwrap();
if state.rows.contains_key(self.inner.key()) == false {
if let Some(row) = self.inner.row.as_row_data(&self.inner.row_header).ok() {
state.rows.insert(self.inner.key().clone(), row);
}
}
}
DaoMutGuard {
dao: self,
dirty: false,
}
}
pub fn as_ref<'a>(&'a self) -> &'a D {
&self.inner.row.data
}
pub fn as_immutable(&self) -> &Dao<D> {
&self.inner
}
pub fn to_immutable(self) -> Dao<D> {
self.inner
}
pub fn as_mut_owned(self) -> DaoMutGuardOwned<D> {
DaoMutGuardOwned {
dao: self,
dirty: false,
}
}
}
impl<'a, D> DaoObjCommit for DaoMut<D>
where
D: Serialize,
{
fn auth_set(&mut self, auth: MetaAuthorization) -> std::result::Result<(), SerializationError> {
self.inner.row_header.auth = auth;
self.commit(true, false)
}
fn commit(
&mut self,
header_changed: bool,
data_changed: bool,
) -> std::result::Result<(), SerializationError>
where
D: Serialize,
{
let mut state = self.trans.state.lock().unwrap();
// The local DIO lock gets released first
state.unlock(&self.inner.row.key);
// Next any pessimistic locks on the local chain
match self.state.lock {
DaoMutLock::Locked => {
state.pipe_unlock.insert(self.inner.row.key.clone());
}
DaoMutLock::LockedThenDelete => {
state.pipe_unlock.insert(self.inner.row.key.clone());
let key = self.key().clone();
state.add_deleted(key, self.inner.row_header.parent.clone());
return Ok(());
}
_ => {}
}
let mut write_header = header_changed;
let mut wrote_data = false;
if data_changed {
let row_data = { self.inner.row.as_row_data(&self.inner.row_header)? };
if state.dirty_row(row_data) {
write_header = true;
wrote_data = true;
}
}
if write_header {
if state.dirty_header(self.inner.row_header.clone()) {
if wrote_data == false {
let row_data = { self.inner.row.as_row_data(&self.inner.row_header)? };
state.dirty_row(row_data);
}
}
}
Ok(())
}
}
impl<D> std::ops::Deref for DaoMut<D>
where
D: Serialize,
{
type Target = D;
fn deref(&self) -> &Self::Target {
self.inner.deref()
}
}
impl<D> DaoObj for DaoMut<D>
where
D: Serialize,
{
fn key(&self) -> &PrimaryKey {
self.inner.key()
}
fn auth(&self) -> &MetaAuthorization {
self.inner.auth()
}
fn dio(&self) -> &Arc<Dio> {
self.inner.dio()
}
fn when_created(&self) -> u64 {
self.inner.when_created()
}
fn when_updated(&self) -> u64 {
self.inner.when_updated()
}
}
pub struct DaoAuthGuard<'a> {
dao: &'a mut dyn DaoObjCommit,
auth: MetaAuthorization,
dirty: bool,
}
impl<'a> DaoAuthGuard<'a> {
pub fn commit(&mut self) -> std::result::Result<(), SerializationError> {
if self.dirty {
self.dirty = false;
self.dao.auth_set(self.auth.clone())?;
}
Ok(())
}
}
impl<'a> Deref for DaoAuthGuard<'a> {
type Target = MetaAuthorization;
fn deref(&self) -> &Self::Target {
&self.auth
}
}
impl<'a> DerefMut for DaoAuthGuard<'a> {
fn deref_mut(&mut self) -> &mut Self::Target {
self.dirty = true;
&mut self.auth
}
}
impl<'a> Drop for DaoAuthGuard<'a> {
fn drop(&mut self) {
if self.dirty {
self.commit()
.expect("Failed to commit the data header after accessing it")
}
}
}
pub struct DaoMutGuard<'a, D>
where
D: Serialize,
{
dao: &'a mut DaoMut<D>,
dirty: bool,
}
impl<'a, D> DaoMutGuard<'a, D>
where
D: Serialize,
{
pub fn trans(&self) -> Arc<DioMut> {
self.dao.trans()
}
pub fn commit(&mut self) -> Result<(), SerializationError> {
if self.dirty {
self.dao.commit(false, true)?;
self.dirty = false;
}
Ok(())
}
}
impl<'a, D> Deref for DaoMutGuard<'a, D>
where
D: Serialize,
{
type Target = D;
fn deref(&self) -> &Self::Target {
&self.dao.inner.row.data
}
}
impl<'a, D> DerefMut for DaoMutGuard<'a, D>
where
D: Serialize,
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.dirty = true;
&mut self.dao.inner.row.data
}
}
impl<'a, D> Drop for DaoMutGuard<'a, D>
where
D: Serialize,
{
fn drop(&mut self) {
if let Err(err) = self.commit() {
error!("failed to commit the data after accessing it - {}", err);
panic!("failed to commit the data after accessing it - {}", err);
}
}
}
pub struct DaoMutGuardOwned<D>
where
D: Serialize,
{
dao: DaoMut<D>,
dirty: bool,
}
impl<D> DaoMutGuardOwned<D>
where
D: Serialize,
{
pub fn trans(&self) -> Arc<DioMut> {
self.dao.trans()
}
pub fn commit(&mut self) -> Result<(), SerializationError> {
if self.dirty {
self.dao.commit(false, true)?;
self.dirty = false;
}
Ok(())
}
}
impl<D> DaoObj for DaoMutGuardOwned<D>
where
D: Serialize,
{
fn key(&self) -> &PrimaryKey {
self.dao.key()
}
fn auth(&self) -> &MetaAuthorization {
self.dao.auth()
}
fn dio(&self) -> &Arc<Dio> {
self.dao.dio()
}
fn when_created(&self) -> u64 {
self.dao.when_created()
}
fn when_updated(&self) -> u64 {
self.dao.when_updated()
}
}
impl<D> Deref for DaoMutGuardOwned<D>
where
D: Serialize,
{
type Target = D;
fn deref(&self) -> &Self::Target {
&self.dao.inner.row.data
}
}
impl<D> DerefMut for DaoMutGuardOwned<D>
where
D: Serialize,
{
fn deref_mut(&mut self) -> &mut Self::Target {
self.dirty = true;
&mut self.dao.inner.row.data
}
}
impl<D> Drop for DaoMutGuardOwned<D>
where
D: Serialize,
{
fn drop(&mut self) {
self.commit()
.expect("Failed to commit the data header after accessing it");
}
}
impl<D> From<DaoMut<D>> for Dao<D>
where
D: Serialize,
{
fn from(a: DaoMut<D>) -> Self {
a.inner
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/test.rs | lib/src/mesh/test.rs | #![allow(unused_imports)]
use std::sync::Arc;
use tracing::{debug, error, info};
use serde::{Deserialize, Serialize};
use crate::error::*;
#[cfg(feature = "enable_server")]
use crate::mesh::MeshRoot;
use crate::prelude::*;
#[derive(Serialize, Deserialize, Debug, Clone, Default)]
struct TestData {
pub data: u128,
pub inner: DaoVec<String>,
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_distributed_with_tcp_and_plain() {
test_mesh_internal(false, StreamProtocol::Tcp, None).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_centralized_with_tcp_and_plain() {
test_mesh_internal(true, StreamProtocol::Tcp, None).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_distributed_with_web_socket_and_plain() {
test_mesh_internal(false, StreamProtocol::WebSocket, None).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_centralized_with_web_socket_and_plain() {
test_mesh_internal(true, StreamProtocol::WebSocket, None).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_distributed_with_tcp_and_aes128() {
test_mesh_internal(false, StreamProtocol::Tcp, Some(KeySize::Bit128)).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_centralized_with_tcp_and_aes128() {
test_mesh_internal(true, StreamProtocol::Tcp, Some(KeySize::Bit128)).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_distributed_with_web_socket_and_aes128() {
test_mesh_internal(false, StreamProtocol::WebSocket, Some(KeySize::Bit128)).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_centralized_with_web_socket_and_aes128() {
test_mesh_internal(true, StreamProtocol::WebSocket, Some(KeySize::Bit128)).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_distributed_with_tcp_and_aes256() {
test_mesh_internal(false, StreamProtocol::Tcp, Some(KeySize::Bit256)).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_centralized_with_tcp_and_aes256() {
test_mesh_internal(true, StreamProtocol::Tcp, Some(KeySize::Bit256)).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_distributed_with_web_socket_and_aes256() {
test_mesh_internal(false, StreamProtocol::WebSocket, Some(KeySize::Bit256)).await
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_mesh_centralized_with_web_socket_and_aes256() {
test_mesh_internal(true, StreamProtocol::WebSocket, Some(KeySize::Bit256)).await
}
#[cfg(test)]
async fn test_mesh_internal(centralized: bool, proto: StreamProtocol, wire_encryption: Option<KeySize>) {
crate::utils::bootstrap_test_env();
let cfg_ate = crate::conf::tests::mock_test_config();
let test_url = url::Url::parse(format!("{}://localhost/", proto.to_scheme()).as_str()).unwrap();
// Create a root key that will protect the integrity of the chain
let root_key = crate::crypto::PrivateSignKey::generate(KeySize::Bit256);
// We offset the ports so that we don't need port re-use between tests
let port_offset = fastrand::u16(..1000);
let port_offset = port_offset * 10;
let mut mesh_roots = Vec::new();
let mut cfg_mesh = {
let mut roots = Vec::new();
for n in (5100 + port_offset)..(5105 + port_offset) {
roots.push(MeshAddress::new(IpAddr::from_str("127.0.0.1").unwrap(), n));
}
let remote = url::Url::parse(format!("{}://localhost", proto.to_scheme()).as_str()).unwrap();
let mut cfg_mesh = ConfMesh::new("localhost", remote, roots.iter());
cfg_mesh.wire_protocol = proto;
cfg_mesh.wire_encryption = wire_encryption;
let mut mesh_root_joins = Vec::new();
// Create the first cluster of mesh root nodes
let certificate = PrivateEncryptKey::generate(wire_encryption.unwrap_or(KeySize::Bit192));
#[allow(unused_variables)]
let mut index: i32 = 0;
for n in (5100 + port_offset)..(5105 + port_offset) {
#[cfg(feature = "enable_dns")]
let addr = MeshAddress::new(IpAddr::from_str("0.0.0.0").unwrap(), n);
#[cfg(not(feature = "enable_dns"))]
let addr = MeshAddress::new("localhost", n);
#[allow(unused_mut)]
let mut cfg_ate = cfg_ate.clone();
#[cfg(feature = "enable_local_fs")]
{
cfg_ate.log_path = cfg_ate
.log_path
.as_ref()
.map(|a| format!("{}/p{}", a, index));
}
let mut cfg_mesh = cfg_mesh.clone();
cfg_mesh.force_listen = Some(addr.clone());
cfg_mesh.listen_certificate = Some(certificate.clone());
let root_key = root_key.as_public_key().clone();
let join = async move {
let server = create_server(&cfg_mesh).await?;
if centralized {
server
.add_route(
all_ethereal_centralized_with_root_key(root_key).await,
&cfg_ate,
)
.await?;
} else {
server
.add_route(
all_ethereal_distributed_with_root_key(root_key).await,
&cfg_ate,
)
.await?;
}
Result::<Arc<MeshRoot>, CommsError>::Ok(server)
};
mesh_root_joins.push((addr, join));
index = index + 1;
}
// Wait for all the servers to start
for (addr, join) in mesh_root_joins {
info!("creating server on {:?}", addr);
let join = join.await.unwrap();
mesh_roots.push(join);
}
cfg_mesh.certificate_validation =
CertificateValidation::AllowedCertificates(vec![certificate.hash()]);
cfg_mesh
};
info!("create the mesh and connect to it with client 1");
let client_a = create_temporal_client(&cfg_ate, &cfg_mesh);
info!("temporal client is ready");
let chain_a = Arc::clone(&client_a)
.open(&test_url, &ChainKey::from("test-chain"))
.await
.unwrap();
info!("connected with client 1");
let mut session_a = AteSessionUser::new();
session_a.add_user_write_key(&root_key);
let dao_key1;
let dao_key2;
{
let mut bus_a;
let mut bus_b;
let mut dao2;
{
let dio = chain_a.dio_trans(&session_a, TransactionScope::Full).await;
dao2 = dio.store(TestData::default()).unwrap();
dao_key2 = dao2.key().clone();
let _ = dio.store(TestData::default()).unwrap();
info!("commit on chain_a with two rows");
dio.commit().await.unwrap();
bus_b = dao2.as_mut().inner.bus().await.unwrap();
}
{
cfg_mesh.force_listen = None;
cfg_mesh.force_client_only = true;
let client_b = create_temporal_client(&cfg_ate, &cfg_mesh);
let chain_b = client_b
.open(&test_url, &ChainKey::new("test-chain".to_string()))
.await
.unwrap();
let mut session_b = AteSessionUser::new();
session_b.add_user_write_key(&root_key);
bus_a = dao2.as_mut().inner.bus().await.unwrap();
{
info!("start a DIO session for client B");
let dio = chain_b.dio_trans(&session_b, TransactionScope::Full).await;
info!("store data object 1");
dao_key1 = dio.store(TestData::default()).unwrap().key().clone();
info!("commit on chain_b with one rows");
dio.commit().await.unwrap();
info!("load data object 2");
let mut dao2: DaoMut<TestData> = dio
.load(&dao_key2)
.await
.expect("An earlier saved object should have loaded");
info!("add to new sub objects to the vector");
dao2.as_mut()
.inner
.push("test_string1".to_string())
.unwrap();
dio.commit().await.unwrap();
dao2.as_mut()
.inner
.push("test_string2".to_string())
.unwrap();
info!("commit on chain_b with two children");
dio.commit().await.unwrap();
}
}
info!("sync to disk");
chain_a.sync().await.unwrap();
info!("wait for an event on the BUS (local)");
let task_ret = bus_a
.recv()
.await
.expect("Should have received the result on the BUS");
assert_eq!(task_ret.data(), Some("test_string1".to_string()));
info!("wait for an event on the BUS (other)");
let task_ret = bus_b
.recv()
.await
.expect("Should have received the result on the BUS");
assert_eq!(task_ret.data(), Some("test_string1".to_string()));
{
info!("new DIO session for client A");
let dio = chain_a.dio_trans(&session_a, TransactionScope::Full).await;
info!("processing the next event in the BUS (and lock_for_delete it)");
let task_ret = bus_b
.process(&dio)
.await
.expect("Should have received the result on the BUS for the second time");
info!("event received");
assert_eq!(*task_ret, "test_string2".to_string());
info!("loading data object 1");
dio.load::<TestData>(&dao_key1)
.await
.expect("The data did not not get replicated to other clients in realtime");
info!("commit on chain_a with one processed event");
dio.commit().await.unwrap();
}
}
{
// Find an address where the chain is 'not' owned which will mean the
// server needs to do a cross connect in order to pass this test\
// (this is needed for the WebAssembly model as this can not support
// client side load-balancing)
cfg_mesh.force_connect = cfg_mesh
.roots
.iter()
.filter(|a| Some(*a) != chain_a.remote_addr())
.map(|a| a.clone())
.next();
cfg_mesh.force_listen = None;
cfg_mesh.force_client_only = true;
let client = create_temporal_client(&cfg_ate, &cfg_mesh);
info!("reconnecting the client");
let chain = client
.open(&test_url, &ChainKey::from("test-chain"))
.await
.unwrap();
let session = AteSessionUser::new();
{
info!("loading data object 1");
let dio = chain.dio(&session).await;
dio.load::<TestData>(&dao_key1)
.await
.expect("The data did not survive between new sessions");
}
}
info!("shutting down");
//std::process::exit(0);
use crate::dio::bus::BusEvent;
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/lock_request.rs | lib/src/mesh/lock_request.rs | use crate::engine::timeout;
use async_trait::async_trait;
use fxhash::FxHashMap;
use std::ops::Rem;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use std::time::Duration;
use std::time::Instant;
use std::{sync::Arc, sync::Weak};
use tokio::sync::broadcast;
use tokio::sync::watch;
use tokio::sync::RwLock;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::core::*;
use super::msg::*;
use crate::chain::*;
use crate::conf::*;
use crate::crypto::*;
use crate::error::*;
use crate::header::*;
use crate::loader::*;
use crate::meta::*;
use crate::pipe::*;
use crate::session::*;
use crate::spec::*;
use crate::time::*;
use crate::transaction::*;
use crate::trust::*;
use crate::{anti_replay::AntiReplayPlugin, comms::*};
#[derive(Debug)]
pub(super) struct LockRequest {
pub(super) needed: u32,
pub(super) positive: u32,
pub(super) negative: u32,
pub(super) tx: watch::Sender<bool>,
}
impl LockRequest {
/// returns true if the vote is finished
pub(super) fn entropy(&mut self, result: bool) -> bool {
match result {
true => self.positive = self.positive + 1,
false => self.negative = self.negative + 1,
}
if self.positive >= self.needed {
let _ = self.tx.send(true);
return true;
}
if self.positive + self.negative >= self.needed {
let _ = self.tx.send(false);
return true;
}
return false;
}
pub(super) fn cancel(&self) {
let _ = self.tx.send(false);
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/session.rs | lib/src/mesh/session.rs | use crate::engine::timeout;
use async_trait::async_trait;
use error_chain::bail;
use fxhash::FxHashMap;
use std::net::SocketAddr;
use std::ops::Rem;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use std::time::Duration;
use std::time::Instant;
use std::{sync::Arc, sync::Weak};
use tokio::select;
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::sync::RwLock;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::{Instrument, WithSubscriber};
use bytes::Bytes;
use super::core::*;
use super::lock_request::*;
use super::msg::*;
use super::recoverable_session_pipe::*;
use crate::chain::*;
use crate::conf::MeshConnectAddr;
use crate::conf::*;
use crate::crypto::*;
use crate::engine::*;
use crate::error::*;
use crate::header::*;
use crate::loader::*;
use crate::mesh::NodeId;
use crate::meta::*;
use crate::pipe::*;
use crate::session::*;
use crate::spec::*;
use crate::time::*;
use crate::transaction::*;
use crate::trust::*;
use crate::{anti_replay::AntiReplayPlugin, comms::*};
pub struct LoadRequest
{
pub records: Vec<AteHash>,
pub tx: mpsc::Sender<Result<Vec<Option<Bytes>>, LoadError>>,
}
pub struct MeshSession {
pub(super) addr: MeshAddress,
pub(super) key: ChainKey,
pub(super) sync_tolerance: Duration,
pub(super) chain: Weak<Chain>,
pub(super) commit: Arc<StdMutex<FxHashMap<u64, mpsc::Sender<Result<u64, CommitError>>>>>,
pub(super) lock_requests: Arc<StdMutex<FxHashMap<PrimaryKey, LockRequest>>>,
pub(super) load_requests: Arc<StdMutex<FxHashMap<u64, LoadRequest>>>,
pub(super) inbound_conversation: Arc<ConversationSession>,
pub(super) outbound_conversation: Arc<ConversationSession>,
pub(crate) status_tx: mpsc::Sender<ConnectionStatusChange>,
}
impl MeshSession {
pub(super) async fn connect(
builder: ChainBuilder,
cfg_mesh: &ConfMesh,
chain_key: &ChainKey,
remote: url::Url,
addr: MeshAddress,
node_id: NodeId,
hello_path: String,
loader_local: impl Loader + 'static,
loader_remote: impl Loader + 'static,
) -> Result<Arc<Chain>, ChainCreationError> {
debug!("new: chain_key={}", chain_key.to_string());
#[cfg(feature = "enable_super_verbose")]
{
let bt = backtrace::Backtrace::new();
trace!("{:?}", bt);
}
let temporal = builder.temporal;
let lazy_data = temporal == true;
// Open the chain and make a sample of the last items so that we can
// speed up the synchronization by skipping already loaded items
let mut chain = {
let chain_key = chain_key.clone();
// Generate a better key name
let mut key_name = chain_key.name.clone();
if key_name.starts_with("/") {
key_name = key_name[1..].to_string();
}
let chain_key = ChainKey::new(format!("{}", key_name).to_string());
// Generate the chain object
// While we load the data on disk we run in centralized mode
// as otherwise there could be errors loading the redo log
trace!("perf-checkpoint: chain::new_ext");
let mut chain = Chain::new_ext(
builder.clone(),
chain_key,
Some(Box::new(loader_local)),
true,
TrustMode::Centralized(CentralizedRole::Client),
TrustMode::Distributed,
)
.await?;
trace!("perf-checkpoint: finished chain::new_ext");
chain.remote = Some(remote);
chain.remote_addr = Some(addr.clone());
chain
};
// While we are running offline we run in full distributed mode until
// we are reconnect as otherwise if the server is in distributed mode
// it will immediately reject everything
chain.single().await.set_integrity(TrustMode::Distributed);
// Create a session pipe
let chain_store = Arc::new(StdMutex::new(None));
let session = RecoverableSessionPipe {
cfg_mesh: cfg_mesh.clone(),
next: NullPipe::new(),
active: RwLock::new(None),
lazy_data,
mode: builder.cfg_ate.recovery_mode,
addr,
hello_path,
node_id: node_id.clone(),
key: chain_key.clone(),
builder,
exit: chain.exit.clone(),
chain: Arc::clone(&chain_store),
loader_remote: StdMutex::new(Some(Box::new(loader_remote))),
metrics: Arc::clone(&chain.metrics),
throttle: Arc::clone(&chain.throttle),
};
// Add the pipe to the chain and cement it
chain.proxy(Box::new(session));
let chain = Arc::new(chain);
// Set a reference to the chain and trigger it to connect!
chain_store.lock().unwrap().replace(Arc::downgrade(&chain));
trace!("perf-checkpoint: pipe.connect()");
let on_disconnect = chain.pipe.connect().await?;
trace!("perf-checkpoint: pipe.connected");
// Launch an automatic reconnect thread
if temporal == false {
trace!("launching auto-reconnect thread {}", chain_key.to_string());
TaskEngine::spawn(RecoverableSessionPipe::auto_reconnect(
Arc::downgrade(&chain),
on_disconnect,
));
}
// Ok we are good!
trace!("chain connected {}", chain_key.to_string());
Ok(chain)
}
pub(super) async fn inbox_human_message(
self: &Arc<MeshSession>,
message: String,
loader: &mut Option<Box<dyn Loader>>,
) -> Result<(), CommsError> {
trace!("human-message len={}", message.len());
if let Some(loader) = loader.as_mut() {
loader.human_message(message);
}
Ok(())
}
pub(super) async fn inbox_events(
self: &Arc<MeshSession>,
evts: Vec<MessageEvent>,
loader: &mut Option<Box<dyn Loader>>,
) -> Result<(), CommsError> {
trace!("events cnt={}", evts.len());
match self.chain.upgrade() {
Some(chain) => {
// Convert the events but we do this differently depending on on if we are
// in a loading phase or a running phase
let feed_me = MessageEvent::convert_from(evts.into_iter());
let feed_me = match loader.as_mut() {
Some(l) => {
// Feeding the events into the loader lets proactive feedback to be given back to
// the user such as progress bars
l.feed_events(&feed_me);
// When we are running then we proactively remove any duplicates to reduce noise
// or the likelihood of errors
feed_me
.into_iter()
.filter(|e| l.relevance_check(e) == false)
.collect::<Vec<_>>()
}
None => feed_me,
};
// We only feed the transactions into the local chain otherwise this will
// reflect events back into the chain-of-trust running on the server
chain
.pipe
.feed(ChainWork {
trans: Transaction {
scope: TransactionScope::Local,
transmit: false,
events: feed_me,
timeout: Duration::from_secs(30),
conversation: Some(Arc::clone(&self.inbound_conversation)),
},
})
.await?;
}
None => {}
};
Ok(())
}
pub(super) async fn inbox_confirmed(
self: &Arc<MeshSession>,
id: u64,
) -> Result<(), CommsError> {
trace!("commit_confirmed id={}", id);
let r = {
let mut lock = self.commit.lock().unwrap();
lock.remove(&id)
};
if let Some(result) = r {
result.send(Ok(id)).await?;
} else {
trace!("orphaned confirmation!");
}
Ok(())
}
pub(super) async fn inbox_commit_error(
self: &Arc<MeshSession>,
id: u64,
err: String,
) -> Result<(), CommsError> {
trace!("commit_error id={}, err={}", id, err);
let r = {
let mut lock = self.commit.lock().unwrap();
lock.remove(&id)
};
if let Some(result) = r {
result
.send(Err(CommitErrorKind::RootError(err).into()))
.await?;
}
Ok(())
}
pub(super) fn inbox_lock_result(
self: &Arc<MeshSession>,
key: PrimaryKey,
is_locked: bool,
) -> Result<(), CommsError> {
trace!(
"lock_result key={} is_locked={}",
key.to_string(),
is_locked
);
let mut remove = false;
let mut guard = self.lock_requests.lock().unwrap();
if let Some(result) = guard.get_mut(&key) {
if result.entropy(is_locked) == true {
remove = true;
}
}
if remove == true {
guard.remove(&key);
}
Ok(())
}
pub(super) fn inbox_load_result(
self: &Arc<MeshSession>,
id: u64,
) -> Result<Option<LoadRequest>, CommsError> {
trace!(
"load_result id={}",
id,
);
let mut guard = self.load_requests.lock().unwrap();
if let Some(result) = guard.remove(&id) {
return Ok(Some(result));
}
Ok(None)
}
pub(super) async fn record_delayed_upload(
chain: &Arc<Chain>,
pivot: ChainTimestamp,
) -> Result<(), CommsError> {
let mut guard = chain.inside_async.write().await;
let from = guard.range_keys(pivot..).next();
if let Some(from) = from {
if let Some(a) = guard.chain.timeline.pointers.get_delayed_upload(from) {
trace!("delayed_upload exists: {}..{}", a.from, a.to);
return Ok(());
}
let to = guard.range_keys(from..).next_back();
if let Some(to) = to {
trace!("delayed_upload new: {}..{}", from, to);
guard
.feed_meta_data(
&chain.inside_sync,
Metadata {
core: vec![CoreMetadata::DelayedUpload(MetaDelayedUpload {
complete: false,
from: from.clone(),
to: to.clone(),
})],
},
)
.await?;
} else {
trace!("delayed_upload: {}..error", from);
}
} else {
trace!("delayed_upload: error..error");
}
Ok(())
}
pub(super) async fn complete_delayed_upload(
chain: &Arc<Chain>,
from: ChainTimestamp,
to: ChainTimestamp,
) -> Result<(), CommsError> {
trace!("delayed_upload complete: {}..{}", from, to);
let mut guard = chain.inside_async.write().await;
let _ = guard
.feed_meta_data(
&chain.inside_sync,
Metadata {
core: vec![CoreMetadata::DelayedUpload(MetaDelayedUpload {
complete: true,
from,
to,
})],
},
)
.await?;
Ok(())
}
pub(super) async fn inbox_start_of_history(
self: &Arc<MeshSession>,
size: usize,
_from: Option<ChainTimestamp>,
to: Option<ChainTimestamp>,
loader: &mut Option<Box<dyn Loader>>,
root_keys: Vec<PublicSignKey>,
integrity: TrustMode,
) -> Result<(), CommsError> {
// Declare variables
let size = size;
if let Some(chain) = self.chain.upgrade() {
#[cfg(feature = "enable_verbose")]
trace!("start_of_history: chain_key={}", chain.key());
{
// Setup the chain based on the properties given to us
let mut lock = chain.inside_sync.write().unwrap();
lock.set_integrity_mode(integrity);
for plugin in lock.plugins.iter_mut() {
plugin.set_root_keys(&root_keys);
}
}
// If we are synchronizing from an earlier point in the tree then
// add all the events into a redo log that will be shippped
if let Some(to) = to {
let next = {
let multi = chain.multi().await;
let guard = multi.inside_async.read().await;
let mut iter = guard.range_keys(to..);
iter.next();
iter.next()
};
if let Some(next) = next {
MeshSession::record_delayed_upload(&chain, next).await?;
}
}
}
// Tell the loader that we will be starting the load process of the history
if let Some(loader) = loader {
loader.start_of_history(size).await;
}
Ok(())
}
pub(super) async fn inbox_end_of_history(
self: &Arc<MeshSession>,
_pck: PacketWithContext<Message, ()>,
loader: &mut Option<Box<dyn Loader>>,
) -> Result<(), CommsError> {
trace!("end_of_history");
// The end of the history means that the chain can now be actively used, its likely that
// a loader is waiting for this important event which will then release some caller who
// wanted to use the data but is waiting for it to load first.
if let Some(mut loader) = loader.take() {
loader.end_of_history().await;
}
Ok(())
}
pub(super) async fn inbox_secure_with(
self: &Arc<MeshSession>,
session: crate::session::AteSessionUser,
) -> Result<(), CommsError> {
if let Some(chain) = self.chain.upgrade() {
if let Some(root) = session.user.write_keys().next() {
trace!(
"received 'secure_with' secrets root_key={}",
root.as_public_key().hash()
);
} else {
trace!("received 'secure_with' secrets no_root");
}
chain
.inside_sync
.write()
.unwrap()
.default_session
.append(session.properties());
}
Ok(())
}
pub(super) async fn inbox_new_conversation(
self: &Arc<MeshSession>,
conversation_id: AteHash,
) -> Result<(), CommsError> {
self.inbound_conversation.clear();
self.outbound_conversation.clear();
if let Some(mut a) = self.inbound_conversation.id.try_lock() {
a.update(Some(conversation_id));
} else {
error!("failed to update the inbound conversation id");
bail!(CommsErrorKind::Disconnected);
}
if let Some(mut a) = self.outbound_conversation.id.try_lock() {
a.update(Some(conversation_id));
} else {
error!("failed to update the outbound conversation id");
bail!(CommsErrorKind::Disconnected);
}
Ok(())
}
pub(super) async fn inbox_packet(
self: &Arc<MeshSession>,
loader: &mut Option<Box<dyn Loader>>,
pck: PacketWithContext<Message, ()>,
) -> Result<(), CommsError> {
#[cfg(feature = "enable_super_verbose")]
trace!("packet size={}", pck.data.bytes.len());
//trace!("packet(size={} msg={:?})", pck.data.bytes.len(), pck.packet.msg);
match pck.packet.msg {
Message::StartOfHistory {
size,
from,
to,
root_keys,
integrity,
} => {
Self::inbox_start_of_history(self, size, from, to, loader, root_keys, integrity)
.instrument(span!(Level::DEBUG, "start-of-history"))
.await?;
}
Message::HumanMessage { message } => {
Self::inbox_human_message(self, message, loader)
.instrument(span!(Level::DEBUG, "human-message"))
.await?;
}
Message::ReadOnly => {
error!(
"chain-of-trust is currently read-only - {}",
self.key.to_string()
);
self.cancel_commits(CommitErrorKind::ReadOnly).await;
let _ = self.status_tx.send(ConnectionStatusChange::ReadOnly).await;
}
Message::Events { commit, evts } => {
let num_deletes = evts
.iter()
.filter(|a| a.meta.get_tombstone().is_some())
.count();
let num_data = evts.iter().filter(|a| a.data.is_some()).count();
let ret2 = if let Some(id) = commit {
Self::inbox_confirmed(self, id)
.instrument(span!(Level::DEBUG, "commit-confirmed"))
.await
} else {
Result::<_, CommsError>::Ok(())
};
Self::inbox_events(self, evts, loader)
.instrument(span!(
Level::DEBUG,
"event",
delete_cnt = num_deletes,
data_cnt = num_data
))
.await?;
ret2?;
}
Message::Confirmed(id) => {
Self::inbox_confirmed(self, id)
.instrument(span!(Level::DEBUG, "commit-confirmed"))
.await?;
}
Message::CommitError { id, err } => {
Self::inbox_commit_error(self, id, err)
.instrument(span!(Level::DEBUG, "commit-error"))
.await?;
}
Message::LockResult { key, is_locked } => {
async move { Self::inbox_lock_result(self, key, is_locked) }
.instrument(span!(Level::DEBUG, "lock_result"))
.await?;
}
Message::LoadManyResult { id, data } => {
async move {
let sender = Self::inbox_load_result(self, id)?;
if let Some(load) = sender
{
// Build the records
let data = data.into_iter().map(|d| d.map(|d| Bytes::from(d))).collect::<Vec<_>>();
let records = load.records
.into_iter()
.zip(data.clone().into_iter())
.collect();
// Anything that is loaded is primed back down the pipes
// so that future requests do not need to go to the server
if let Some(chain) = self.chain.upgrade() {
chain
.pipe
.prime(records)
.await?;
}
// Inform the sender
let _ = load.tx.send(Ok(data)).await;
}
Result::<(), CommsError>::Ok(())
}
.instrument(span!(Level::DEBUG, "load_result"))
.await?;
}
Message::LoadManyFailed { id, err } => {
async move {
let sender = Self::inbox_load_result(self, id)?;
if let Some(load) = sender {
let _ = load.tx.send(Err(LoadErrorKind::LoadFailed(err).into())).await;
}
Result::<(), CommsError>::Ok(())
}
.instrument(span!(Level::DEBUG, "load_failed"))
.await?;
}
Message::EndOfHistory => {
Self::inbox_end_of_history(self, pck, loader)
.instrument(span!(Level::DEBUG, "end-of-history"))
.await?;
}
Message::SecuredWith(session) => {
Self::inbox_secure_with(self, session)
.instrument(span!(Level::DEBUG, "secured-with"))
.await?;
}
Message::NewConversation { conversation_id } => {
Self::inbox_new_conversation(self, conversation_id)
.instrument(span!(Level::DEBUG, "new-conversation"))
.await?;
}
Message::FatalTerminate(fatal) => {
async move {
if let Some(mut loader) = loader.take() {
loader
.failed(ChainCreationErrorKind::ServerRejected(fatal.clone()).into())
.await;
}
warn!("mesh-session-err: {}", fatal);
}
.instrument(span!(Level::DEBUG, "fatal_terminate"))
.await;
bail!(CommsErrorKind::Disconnected);
}
_ => {}
};
Ok(())
}
pub(super) async fn cancel_commits(&self, reason: CommitErrorKind) {
let mut senders = Vec::new();
{
let mut guard = self.commit.lock().unwrap();
for (_, sender) in guard.drain() {
senders.push(sender);
}
}
for sender in senders.into_iter() {
let reason = match &reason {
CommitErrorKind::ReadOnly => CommitErrorKind::ReadOnly,
_ => CommitErrorKind::Aborted,
};
if let Err(err) = sender.send(Err(reason.into())).await {
warn!("mesh-session-cancel-err: {}", err.to_string());
}
}
}
pub(super) fn cancel_locks(&self) {
let mut guard = self.lock_requests.lock().unwrap();
for (_, sender) in guard.drain() {
sender.cancel();
}
}
pub(super) fn cancel_sniffers(&self) {
if let Some(guard) = self.chain.upgrade() {
let mut lock = guard.inside_sync.write().unwrap();
lock.sniffers.clear();
}
}
}
pub(crate) struct MeshSessionProcessor {
pub(crate) addr: MeshAddress,
pub(crate) node_id: NodeId,
pub(crate) loader: Option<Box<dyn Loader>>,
pub(crate) session: Weak<MeshSession>,
pub(crate) status_tx: mpsc::Sender<ConnectionStatusChange>,
}
#[async_trait]
impl InboxProcessor<Message, ()> for MeshSessionProcessor {
async fn process(&mut self, pck: PacketWithContext<Message, ()>) -> Result<(), CommsError> {
let session = match Weak::upgrade(&self.session) {
Some(a) => a,
None => {
trace!("inbox-server-exit: reference dropped scope");
bail!(CommsErrorKind::Disconnected);
}
};
MeshSession::inbox_packet(&session, &mut self.loader, pck).await?;
Ok(())
}
async fn shutdown(&mut self, _sock_addr: MeshConnectAddr) {
debug!("disconnected: {}:{}", self.addr.host, self.addr.port);
if let Some(session) = self.session.upgrade() {
session.cancel_commits(CommitErrorKind::Aborted).await;
session.cancel_sniffers();
session.cancel_locks();
}
// We should only get here if the inbound connection is shutdown or fails
let _ = self
.status_tx
.send(ConnectionStatusChange::Disconnected)
.await;
}
}
impl Drop for MeshSession {
fn drop(&mut self) {
trace!("drop {}", self.key.to_string());
self.cancel_locks();
self.cancel_sniffers();
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/redirect.rs | lib/src/mesh/redirect.rs | use async_trait::async_trait;
use error_chain::bail;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::marker::PhantomData;
use std::net::SocketAddr;
use tokio::sync::broadcast;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::{Instrument, WithSubscriber};
use super::client::MeshClient;
use super::core::*;
use super::msg::*;
use super::server::SessionContext;
use super::MeshSession;
use super::Registry;
use super::*;
use crate::chain::*;
use crate::comms::ServerProcessorFascade;
use crate::comms::TxDirection;
use crate::comms::TxGroup;
use crate::comms::*;
use crate::conf::*;
use crate::crypto::AteHash;
use crate::engine::TaskEngine;
use crate::error::*;
use crate::flow::OpenAction;
use crate::flow::OpenFlow;
use crate::index::*;
use crate::prelude::*;
use crate::spec::SerializationFormat;
use crate::time::ChainTimestamp;
use crate::transaction::*;
use crate::trust::*;
struct Redirect<C>
where
C: Send + Sync + Default + 'static,
{
tx: Tx,
_marker1: PhantomData<C>,
}
impl<C> Drop for Redirect<C>
where
C: Send + Sync + Default,
{
fn drop(&mut self) {
debug!("drop(redirect)");
}
}
#[async_trait]
impl<C> InboxProcessor<Message, C> for Redirect<C>
where
C: Send + Sync + Default + 'static,
{
async fn process(&mut self, pck: PacketWithContext<Message, C>) -> Result<(), CommsError> {
self.tx.send_reply(pck.data).await?;
Ok(())
}
async fn shutdown(&mut self, addr: SocketAddr) {
debug!("disconnected: {}", addr.to_string());
}
}
pub(super) async fn redirect<C>(
root: Arc<MeshRoot>,
node_addr: MeshAddress,
omit_data: bool,
hello_path: &str,
chain_key: ChainKey,
from: ChainTimestamp,
tx: Tx,
exit: broadcast::Receiver<()>,
) -> Result<Tx, CommsError>
where
C: Send + Sync + Default + 'static,
{
let metrics = Arc::clone(&tx.metrics);
let throttle = Arc::clone(&tx.throttle);
let fascade = Redirect {
tx,
_marker1: PhantomData::<C>,
};
debug!("redirect to {}", node_addr);
// Build a configuration that forces connecting to a specific ndoe
let mut conf = root.cfg_mesh.clone();
conf.force_connect = Some(node_addr.clone());
if let Some(cert) = &root.cfg_mesh.listen_certificate {
conf.certificate_validation = CertificateValidation::AllowedCertificates(vec![cert.hash()]);
} else {
conf.certificate_validation = CertificateValidation::AllowAll;
}
let conf = MeshConfig::new(conf)
.connect_to(node_addr);
// Attempt to connect to the other machine
let mut relay_tx = crate::comms::connect(
&conf,
hello_path.to_string(),
root.server_id,
fascade,
metrics,
throttle,
exit,
)
.await?;
// Send a subscribe packet to the server
relay_tx
.send_all_msg(Message::Subscribe {
chain_key,
from,
allow_redirect: false,
omit_data,
})
.await?;
// All done
Ok(relay_tx)
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/core.rs | lib/src/mesh/core.rs | use crate::{header::PrimaryKey, meta::Metadata, pipe::EventPipe};
use async_trait::async_trait;
use bytes::Bytes;
use error_chain::bail;
use serde::{Deserialize, Serialize};
use std::ops::*;
use std::{collections::BTreeMap, sync::Arc};
use tokio::sync::mpsc;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::chain::*;
use crate::comms::{PacketData, NodeId};
use crate::comms::StreamTx;
use crate::comms::Tx;
use crate::conf::*;
use crate::crypto::*;
use crate::error::*;
use crate::event::*;
use crate::index::*;
use crate::mesh::msg::*;
use crate::mesh::MeshSession;
use crate::redo::LogLookup;
use crate::spec::*;
use crate::time::ChainTimestamp;
use crate::trust::*;
// Determines how the file-system will react while it is nominal and when it is
// recovering from a communication failure (valid options are 'async', 'readonly-async',
// 'readonly-sync' or 'sync')
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum RecoveryMode {
// Fully asynchronous mode which allows staging of all writes locally giving
// maximum availability however split-brain scenarios are the responsibility
// of the user
Async,
// While in a nominal state the file-system will make asynchronous writes however
// if a communication failure occurs the local file-system will switch to read-only
// mode and upon restoring the connectivity the last few writes that had not been
// sent will be retransmitted.
ReadOnlyAsync,
// While in a nominal state the file-system will make synchronous writes to the
// remote location however if a break in communication occurs the local file-system
// will switch to read-only mode until communication is restored.
ReadOnlySync,
// Fully synchonrous mode meaning all reads and all writes are committed to
// local and remote locations at all times. This gives maximum integrity however
// nominal writes will be considerable slower while reads will be blocked when in
// a disconnected state
Sync,
}
impl RecoveryMode {
pub fn should_go_readonly(&self) -> bool {
match self {
RecoveryMode::Async => false,
RecoveryMode::Sync => false,
RecoveryMode::ReadOnlyAsync => true,
RecoveryMode::ReadOnlySync => true,
}
}
pub fn should_error_out(&self) -> bool {
match self {
RecoveryMode::Async => false,
RecoveryMode::Sync => true,
RecoveryMode::ReadOnlyAsync => true,
RecoveryMode::ReadOnlySync => true,
}
}
pub fn is_sync(&self) -> bool {
match self {
RecoveryMode::Async => false,
RecoveryMode::Sync => true,
RecoveryMode::ReadOnlyAsync => false,
RecoveryMode::ReadOnlySync => true,
}
}
pub fn is_meta_sync(&self) -> bool {
match self {
RecoveryMode::Async => false,
RecoveryMode::Sync => true,
RecoveryMode::ReadOnlyAsync => true,
RecoveryMode::ReadOnlySync => true,
}
}
}
impl std::str::FromStr for RecoveryMode {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"async" => Ok(RecoveryMode::Async),
"readonly-async" => Ok(RecoveryMode::ReadOnlyAsync),
"readonly-sync" => Ok(RecoveryMode::ReadOnlySync),
"sync" => Ok(RecoveryMode::Sync),
_ => Err("valid values are 'async', 'readonly-async', 'readonly-sync' and 'sync'"),
}
}
}
// Determines how the redo-log engine will perform its backup and restoration
// actions. Backup and restoration is required for expansions of the cluster
// and for storage capacity management.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum BackupMode {
// No backups or restorations will take place fro this set of redo logs. Using this
// mode does not improve performance but can save disk space and simplify the
// deployment model. This comes at the price of essentially having no backups.
None,
// The system will not automatically backup data but it will restore data files from
// the backup store before creating new empty log files. This is ideal for migration
// environments or replicas.
Restore,
// Backups will be made whenever the log files rotate and when the system loads then
// the restoration folder will be checked before it brings online any backups.
Rotating,
// ATE will automatically backup data to the backup location whenever the log files
// rotate or the process shuts down. Upon bringing online a new chain-of-trust the
// backup files will be checked first before starting a new log-file thus providing
// an automatic migration and restoration system.
Full,
}
impl std::str::FromStr for BackupMode {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"off" => Ok(BackupMode::None),
"none" => Ok(BackupMode::None),
"restore" => Ok(BackupMode::Restore),
"rotating" => Ok(BackupMode::Rotating),
"full" => Ok(BackupMode::Full),
"auto" => Ok(BackupMode::Full),
"on" => Ok(BackupMode::Full),
_ => Err("valid values are 'none', 'restore', 'rotating' and 'full'"),
}
}
}
/// Result of opening a chain-of-trust
pub struct OpenedChain {
pub chain: Arc<Chain>,
pub integrity: TrustMode,
pub message_of_the_day: Option<String>,
}
#[derive(Default)]
pub struct MeshHashTable {
pub(super) address_lookup: Vec<MeshAddress>,
pub(super) hash_table: BTreeMap<AteHash, usize>,
}
impl MeshHashTable {
pub fn lookup(&self, key: &ChainKey) -> Option<(MeshAddress, u32)> {
let hash = key.hash();
let mut pointer: Option<usize> = None;
for (k, v) in self.hash_table.iter() {
if *k > hash {
match pointer {
Some(a) => {
pointer = Some(a.clone());
break;
}
None => {
pointer = Some(v.clone());
break;
}
};
}
pointer = Some(v.clone());
}
if let Some(a) = pointer {
let index = a % self.address_lookup.len();
if let Some(a) = self.address_lookup.get(index) {
return Some((a.clone(), index as u32));
}
}
None
}
pub fn derive_id(&self, addr: &MeshAddress) -> Option<u32> {
let mut n = 0usize;
while n < self.address_lookup.len() {
let test = &self.address_lookup[n];
#[cfg(feature = "enable_dns")]
match test.host.is_loopback() {
true if test.port == addr.port => {
if addr.host.is_loopback() || addr.host.is_unspecified() {
return Some(n as u32);
}
}
_ => {
if *test == *addr {
return Some(n as u32);
}
}
}
#[cfg(not(feature = "enable_dns"))]
if *test == *addr {
return Some(n as u32);
}
n = n + 1;
}
None
}
pub fn compute_node_id(&self, force_node_id: Option<u32>) -> Result<NodeId, CommsError> {
let node_id = match force_node_id {
Some(a) => a,
None => {
match self.address_lookup
.iter()
.filter_map(|a| self.derive_id(a))
.next()
{
Some(a) => a,
None => {
bail!(CommsErrorKind::RequiredExplicitNodeId);
}
}
}
};
let node_id = NodeId::generate_server_id(node_id);
Ok(node_id)
}
pub fn new(cfg_mesh: &ConfMesh) -> MeshHashTable {
let mut index: usize = 0;
let mut addresses = Vec::new();
let mut hash_table = BTreeMap::new();
for addr in cfg_mesh.roots.iter() {
addresses.push(addr.clone());
hash_table.insert(addr.hash(), index);
index = index + 1;
}
MeshHashTable {
address_lookup: addresses,
hash_table,
}
}
}
async fn stream_events<R>(
chain: &Arc<Chain>,
range: R,
tx: &mut Tx,
strip_signatures: bool,
strip_data: usize,
) -> Result<(), CommsError>
where
R: RangeBounds<ChainTimestamp>,
{
// Declare vars
let multi = chain.multi().await;
let mut skip = 0usize;
let mut start = match range.start_bound() {
Bound::Unbounded => {
let guard = multi.inside_async.read().await;
let r = match guard.range(..).map(|a| a.0).next() {
Some(a) => a.clone(),
None => return Ok(()),
};
drop(guard);
r
}
Bound::Included(a) => a.clone(),
Bound::Excluded(a) => ChainTimestamp::from(a.time_since_epoch_ms + 1u64),
};
let end = match range.end_bound() {
Bound::Unbounded => Bound::Unbounded,
Bound::Included(a) => Bound::Included(a.clone()),
Bound::Excluded(a) => Bound::Excluded(a.clone()),
};
// We work in batches of 2000 events releasing the lock between iterations so that the
// server has time to process new events (capped at 512KB of data per send)
let max_send: usize = 512 * 1024;
loop {
let mut leafs = Vec::new();
{
let guard = multi.inside_async.read().await;
let mut iter = guard
.range((Bound::Included(start), end))
.skip(skip)
.take(5000);
let mut amount = 0usize;
while let Some((k, v)) = iter.next() {
if *k != start {
start = k.clone();
skip = 1;
} else {
skip = skip + 1;
}
leafs.push(EventLeaf {
record: v.event_hash,
created: 0,
updated: 0,
});
amount = amount + v.meta_bytes.len() + v.data_size;
if amount > max_send {
break;
}
}
if amount <= 0 {
return Ok(());
}
}
let mut evts = Vec::new();
for evt in multi.load_many(leafs).await? {
let mut meta = evt.data.meta.clone();
if strip_signatures {
meta.strip_signatures();
}
let evt = MessageEvent {
meta,
data: match evt.data.data_bytes {
Some(a) if a.len() <= strip_data => MessageData::Some(a.to_vec()),
Some(a) => {
let data = a.to_vec();
MessageData::LazySome(LazyData {
record: evt.leaf.record,
hash: AteHash::from_bytes(&data[..]),
len: data.len(),
})
},
None => MessageData::None
},
format: evt.header.format,
};
evts.push(evt);
}
trace!("sending {} events", evts.len());
tx.send_reply_msg(Message::Events { commit: None, evts })
.await?;
}
}
pub(super) async fn stream_empty_history(
chain: Arc<Chain>,
to: Option<ChainTimestamp>,
tx: &mut StreamTx,
wire_format: SerializationFormat,
) -> Result<(), CommsError> {
// Extract the root keys and integrity mode
let (integrity, root_keys) = {
let chain = chain.inside_sync.read().unwrap();
let root_keys = chain
.plugins
.iter()
.flat_map(|p| p.root_keys())
.collect::<Vec<_>>();
(chain.integrity, root_keys)
};
// Let the caller know we will be streaming them events
trace!("sending start-of-history (size={})", 0);
PacketData::reply_at(
tx,
wire_format,
Message::StartOfHistory {
size: 0,
from: None,
to,
root_keys,
integrity,
},
)
.await?;
// Let caller know we have sent all the events that were requested
trace!("sending end-of-history");
PacketData::reply_at(tx, wire_format, Message::EndOfHistory).await?;
Ok(())
}
pub(super) async fn stream_history_range<R>(
chain: Arc<Chain>,
range: R,
tx: &mut Tx,
strip_signatures: bool,
strip_data: usize,
) -> Result<(), CommsError>
where
R: RangeBounds<ChainTimestamp>,
{
// Extract the root keys and integrity mode
let (integrity, root_keys) = {
let chain = chain.inside_sync.read().unwrap();
let root_keys = chain
.plugins
.iter()
.flat_map(|p| p.root_keys())
.collect::<Vec<_>>();
(chain.integrity, root_keys)
};
// Determine how many more events are left to sync
let size = {
let guard = chain.multi().await;
let guard = guard.inside_async.read().await;
guard
.range((range.start_bound(), range.end_bound()))
.count()
};
// Let the caller know we will be streaming them events
trace!("sending start-of-history (size={})", size);
tx.send_reply_msg(Message::StartOfHistory {
size,
from: match range.start_bound() {
Bound::Unbounded => None,
Bound::Included(a) | Bound::Excluded(a) => Some(a.clone()),
},
to: match range.end_bound() {
Bound::Unbounded => None,
Bound::Included(a) | Bound::Excluded(a) => Some(a.clone()),
},
root_keys,
integrity: integrity.as_client(),
})
.await?;
// Only if there are things to send
if size > 0 {
// Sync the events
trace!("streaming requested events");
stream_events(&chain, range, tx, strip_signatures, strip_data).await?;
}
// Let caller know we have sent all the events that were requested
trace!("sending end-of-history");
tx.send_reply_msg(Message::EndOfHistory).await?;
Ok(())
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/recoverable_session_pipe.rs | lib/src/mesh/recoverable_session_pipe.rs | use crate::engine::timeout;
use async_trait::async_trait;
use error_chain::bail;
use fxhash::FxHashMap;
use std::ops::Deref;
use std::ops::DerefMut;
use std::ops::Rem;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use std::time::Duration;
use std::time::Instant;
use std::{sync::Arc, sync::Weak};
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::sync::RwLock;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::active_session_pipe::*;
use super::core::*;
use super::lock_request::*;
use super::msg::*;
use super::session::*;
use super::*;
use crate::chain::*;
use crate::conf::*;
use crate::crypto::*;
use crate::error::*;
use crate::header::*;
use crate::loader::*;
use crate::mesh::NodeId;
use crate::meta::*;
use crate::pipe::*;
use crate::session::*;
use crate::spec::*;
use crate::time::*;
use crate::transaction::*;
use crate::trust::*;
use crate::{anti_replay::AntiReplayPlugin, comms::*};
pub(super) struct RecoverableSessionPipe {
// Passes onto the next pipe
pub(super) next: Arc<Box<dyn EventPipe>>,
pub(super) active: RwLock<Option<ActiveSessionPipe>>,
pub(super) mode: RecoveryMode,
// Configuration
pub(super) cfg_mesh: ConfMesh,
// Used to create new active pipes
pub(super) addr: MeshAddress,
pub(super) lazy_data: bool,
pub(super) hello_path: String,
pub(super) node_id: NodeId,
pub(super) key: ChainKey,
pub(super) builder: ChainBuilder,
pub(super) exit: broadcast::Sender<()>,
pub(super) chain: Arc<StdMutex<Option<Weak<Chain>>>>,
pub(super) loader_remote: StdMutex<Option<Box<dyn Loader + 'static>>>,
pub(crate) metrics: Arc<StdMutex<Metrics>>,
pub(crate) throttle: Arc<StdMutex<Throttle>>,
}
impl RecoverableSessionPipe {
#[cfg(not(feature = "enable_client"))]
pub(super) async fn create_active_pipe(
&self,
_loader: impl Loader + 'static,
_status_tx: mpsc::Sender<ConnectionStatusChange>,
_exit: broadcast::Receiver<()>,
) -> Result<ActiveSessionPipe, CommsError> {
return Err(CommsErrorKind::InternalError(
"client connections are unsupported".to_string(),
)
.into());
}
#[cfg(feature = "enable_client")]
pub(super) async fn create_active_pipe(
&self,
loader: impl Loader + 'static,
status_tx: mpsc::Sender<ConnectionStatusChange>,
exit: broadcast::Receiver<()>,
) -> Result<ActiveSessionPipe, CommsError> {
trace!("creating active pipe");
let commit = Arc::new(StdMutex::new(FxHashMap::default()));
let lock_requests = Arc::new(StdMutex::new(FxHashMap::default()));
let load_requests = Arc::new(StdMutex::new(FxHashMap::default()));
// Create pipes to all the target root nodes
trace!("building node cfg connect to");
let node_cfg = MeshConfig::new(self.cfg_mesh.clone())
.connect_to(self.addr.clone());
let inbound_conversation = Arc::new(ConversationSession::default());
let outbound_conversation = Arc::new(ConversationSession::default());
let session = Arc::new(MeshSession {
addr: self.addr.clone(),
key: self.key.clone(),
sync_tolerance: self.builder.cfg_ate.sync_tolerance,
commit: Arc::clone(&commit),
chain: Weak::clone(
self.chain
.lock()
.unwrap()
.as_ref()
.expect("You must call the 'set_chain' before invoking this method."),
),
lock_requests: Arc::clone(&lock_requests),
load_requests: Arc::clone(&load_requests),
inbound_conversation: Arc::clone(&inbound_conversation),
outbound_conversation: Arc::clone(&outbound_conversation),
status_tx: status_tx.clone(),
});
let inbox = MeshSessionProcessor {
addr: self.addr.clone(),
node_id: self.node_id,
session: Arc::downgrade(&session),
loader: Some(Box::new(loader)),
status_tx,
};
let mut node_tx = crate::comms::connect(
&node_cfg,
self.hello_path.clone(),
self.node_id.clone(),
inbox,
Arc::clone(&self.metrics),
Arc::clone(&self.throttle),
exit,
)
.await?;
// Compute an end time that we will sync from based off whats already in the
// chain-of-trust minus a small tolerance that helps in edge-cases - this will
// cause a minor number duplicate events to be ignored but it is needed to
// reduce the chances of data loss.
trace!("computing timeline end");
let from = {
let tolerance_ms = self.builder.cfg_ate.sync_tolerance.as_millis() as u64;
let chain = {
let lock = self.chain.lock().unwrap();
lock.as_ref().map(|a| Weak::upgrade(a)).flatten()
};
if let Some(chain) = chain {
let lock = chain.inside_async.read().await;
let mut ret = lock.chain.timeline.end();
if ret.time_since_epoch_ms > tolerance_ms {
ret.time_since_epoch_ms = ret.time_since_epoch_ms - tolerance_ms;
}
// If the chain has a cut-off value then the subscription point must be less than
// this value to avoid the situation where a compacted chain reloads values that
// have already been deleted
let chain_header = lock.chain.redo.read_chain_header()?;
if chain_header.cut_off > ret {
ret = chain_header.cut_off;
}
ret
} else {
ChainTimestamp::from(0u64)
}
};
// Now we subscribe to the chain
trace!("sending subscribe (key={}, omit_data={})", self.key, self.lazy_data);
node_tx
.send_reply_msg(Message::Subscribe {
chain_key: self.key.clone(),
from,
allow_redirect: true,
omit_data: self.lazy_data,
})
.await?;
// Set the pipe and drop the lock so that events can be fed correctly
Ok(ActiveSessionPipe {
key: self.key.clone(),
connected: false,
likely_read_only: false,
mode: self.mode,
session: Arc::clone(&session),
tx: node_tx,
commit: Arc::clone(&commit),
lock_attempt_timeout: self.builder.cfg_ate.lock_attempt_timeout,
lock_requests: Arc::clone(&lock_requests),
load_timeout: self.builder.cfg_ate.load_timeout,
load_requests: Arc::clone(&load_requests),
outbound_conversation: Arc::clone(&outbound_conversation),
})
}
pub(super) async fn auto_reconnect(
chain: Weak<Chain>,
mut status_change: mpsc::Receiver<ConnectionStatusChange>,
) -> Result<(), ChainCreationError> {
// Enter a loop
let mut exp_backoff = 1;
loop {
// Upgrade to a full reference long enough to get a channel clone
// if we can not get a full reference then the chain has been destroyed
// and we should exit
let pipe = {
let chain = match Weak::upgrade(&chain) {
Some(a) => a,
None => {
break;
}
};
Arc::clone(&chain.pipe)
};
// Wait on it to disconnect
let now = Instant::now();
match status_change.recv().await {
Some(ConnectionStatusChange::Disconnected) => {
pipe.on_disconnect().await?;
}
Some(ConnectionStatusChange::ReadOnly) => {
pipe.on_read_only().await?;
continue;
}
None => {
break;
}
}
// Enter a reconnect loop
while chain.strong_count() > 0 {
// If we had a good run then reset the exponental backoff
if now.elapsed().as_secs() > 60 {
exp_backoff = 1;
}
// Wait a fix amount of time to prevent thrashing and increase the exp backoff
crate::engine::sleep(Duration::from_secs(exp_backoff)).await;
exp_backoff = (exp_backoff * 2) + 4;
if exp_backoff > 60 {
exp_backoff = 60;
}
// Reconnect
status_change = match pipe.connect().await {
Ok(a) => a,
Err(ChainCreationError(
ChainCreationErrorKind::CommsError(CommsErrorKind::Refused),
_,
)) => {
trace!("recoverable_session_pipe reconnect has failed - refused");
exp_backoff = 4;
continue;
}
Err(err) => {
warn!("recoverable_session_pipe reconnect has failed - {}", err);
continue;
}
};
break;
}
}
// Success
Ok(())
}
}
impl Drop for RecoverableSessionPipe {
fn drop(&mut self) {
trace!("drop {} @ {}", self.key.to_string(), self.addr);
}
}
#[async_trait]
impl EventPipe for RecoverableSessionPipe {
async fn is_connected(&self) -> bool {
let lock = self.active.read().await;
if let Some(pipe) = lock.as_ref() {
return pipe.is_connected();
}
false
}
async fn on_read_only(&self) -> Result<(), CommsError> {
let mut lock = self.active.write().await;
if let Some(pipe) = lock.as_mut() {
pipe.on_read_only();
}
Ok(())
}
async fn on_disconnect(&self) -> Result<(), CommsError> {
let lock = self.active.read().await;
if let Some(pipe) = lock.as_ref() {
return pipe.on_disconnect().await;
}
Ok(())
}
async fn connect(
&self,
) -> Result<mpsc::Receiver<ConnectionStatusChange>, ChainCreationError> {
trace!("connecting to {}", self.addr);
// Remove the pipe which will mean if we are in a particular recovery
// mode then all write IO will be blocked
self.active.write().await.take();
// We build a anti replay loader and fill it with the events we already have
// This is because the sync design has a tolerance in what it replays back
// to the consumer meaning duplicate events will be received from the remote
// chain
trace!("building anti-reply loader");
let mut anti_replay = Box::new(AntiReplayPlugin::default());
{
let chain = self.chain.lock().unwrap().as_ref().map(|a| a.upgrade());
if let Some(Some(chain)) = chain {
let guard = chain.inside_async.read().await;
for evt in guard.chain.timeline.history.iter() {
anti_replay.push(evt.1.event_hash);
}
}
}
// Run the loaders and the message procesor
trace!("building composite loader");
let mut loader = self.loader_remote.lock().unwrap().take();
let (loading_sender, mut loading_receiver) = mpsc::channel(1);
let notify_loaded = Box::new(crate::loader::NotificationLoader::new(loading_sender));
let mut composite_loader = crate::loader::CompositionLoader::default();
composite_loader.loaders.push(anti_replay);
composite_loader.loaders.push(notify_loaded);
if let Some(loader) = loader.take() {
composite_loader.loaders.push(loader);
}
trace!("perf-checkpoint: create_active_pipe");
// Set the pipe and drop the lock so that events can be fed correctly
let (status_tx, status_rx) = mpsc::channel(1);
let pipe = self
.create_active_pipe(composite_loader, status_tx, self.exit.subscribe())
.await?;
// We replace the new pipe which will mean the chain becomes active again
// before its completed all the load operations however this is required
// as otherwise when events are received on the inbox they will not feed
// properly. A consequence of this is that write operations will succeed
// again (if they are ASYNC) however any confirmation will not be received
// until all the chain is loaded
self.active.write().await.replace(pipe);
trace!("perf-checkpoint: pre-loading");
// Wait for all the messages to start loading
match loading_receiver.recv().await {
Some(result) => result?,
None => {
bail!(ChainCreationErrorKind::ServerRejected(
FatalTerminate::Other {
err: "Server disconnected before it started loading the chain.".to_string()
}
));
}
}
debug!("loading {}", self.key.to_string());
trace!("perf-checkpoint: chain::loading");
// Wait for all the messages to load before we give it to the caller
match loading_receiver.recv().await {
Some(result) => result?,
None => {
warn!("Service disconnected before it loaded the chain of trust");
bail!(ChainCreationErrorKind::ServerRejected(
FatalTerminate::Other {
err: "Server disconnected before it loaded the chain.".to_string()
}
));
}
}
debug!("loaded {}", self.key.to_string());
trace!("perf-checkpoint: chain::loaded");
// Now we need to send all the events over that have been delayed
let chain = self.chain.lock().unwrap().as_ref().map(|a| a.upgrade());
if let Some(Some(chain)) = chain {
for delayed_upload in chain.get_pending_uploads().await {
debug!(
"sending pending upload [{}..{}]",
delayed_upload.from, delayed_upload.to
);
let mut lock = self.active.write().await;
if let Some(pipe_tx) = lock.as_mut().map(|a| &mut a.tx) {
trace!("perf-checkpoint: streaming events to server");
// We send all the events for this delayed upload to the server by streaming
// it in a controlled and throttled way
stream_history_range(
Arc::clone(&chain),
delayed_upload.from..delayed_upload.to,
pipe_tx,
false,
usize::MAX,
)
.await?;
trace!("perf-checkpoint: streamed events to the server");
// We complete a dummy transaction to confirm that all the data has been
// successfully received by the server and processed before we clear our flag
trace!("perf-checkpoint: sync");
match chain.multi().await.sync().await {
Ok(_) => {
// Finally we clear the pending upload by writing a record for it
MeshSession::complete_delayed_upload(
&chain,
delayed_upload.from,
delayed_upload.to,
)
.await?;
}
Err(err) => {
debug!("failed sending pending upload - {}", err);
}
};
}
}
}
trace!("local upload complete {}", self.key.to_string());
// Mark the pipe as connected
{
let mut lock = self.active.write().await;
if let Some(pipe) = lock.as_mut() {
pipe.mark_connected();
}
trace!("pipe connected {}", self.key.to_string());
}
trace!("perf-checkpoint: pipe::connected");
Ok(status_rx)
}
async fn load_many(&self, leafs: Vec<AteHash>) -> Result<Vec<Option<Bytes>>, LoadError> {
let ret = match self.next.load_many(leafs.clone()).await
{
Ok(a) => a,
Err(LoadError(LoadErrorKind::MissingData, _)) |
Err(LoadError(LoadErrorKind::Disconnected, _)) => {
let mut lock = self.active.write().await;
if let Some(active) = lock.as_mut() {
active.load_many(leafs).await?
} else {
bail!(LoadErrorKind::Disconnected)
}
},
Err(err) => return Err(err)
};
Ok(ret)
}
async fn prime(&self, _records: Vec<(AteHash, Option<Bytes>)>) -> Result<(), CommsError>
{
// We don't do anything here as the server is the one that send it to us in
// the first place so what would be the point in sending it back to them again?
Ok(())
}
async fn feed(&self, mut work: ChainWork) -> Result<(), CommitError> {
trace!(
"feed trans(cnt={}, scope={})",
work.trans.events.len(),
work.trans.scope
);
let timeout = work.trans.timeout.clone();
let receiver = {
let mut lock = self.active.write().await;
if let Some(pipe) = lock.as_mut() {
pipe.feed(&mut work.trans).await?
} else if self.mode.should_error_out() {
bail!(CommitErrorKind::CommsError(CommsErrorKind::Disconnected));
} else if self.mode.should_go_readonly() {
bail!(CommitErrorKind::CommsError(CommsErrorKind::ReadOnly));
} else {
None
}
};
// If we need to wait for the transaction to commit then do so
if let Some(mut receiver) = receiver {
trace!("waiting for transaction to commit");
match crate::engine::timeout(timeout, receiver.recv()).await {
Ok(Some(result)) => {
{
let mut lock = self.active.write().await;
if let Some(pipe) = lock.as_mut() {
pipe.likely_read_only = false;
}
}
let commit_id = result?;
trace!("transaction committed: {}", commit_id);
}
Ok(None) => {
debug!("transaction has aborted");
bail!(CommitErrorKind::Aborted);
}
Err(elapsed) => {
debug!("transaction has timed out");
bail!(CommitErrorKind::Timeout(elapsed.to_string()));
}
};
}
// Now we pass on the transaction work to the local chain
self.next.feed(work).await
}
async fn try_lock(&self, key: PrimaryKey) -> Result<bool, CommitError> {
// If we are not active then fail
let mut lock = self.active.write().await;
if lock.is_none() {
return Ok(false);
}
// First we do a lock locally so that we reduce the number of
// collisions on the main server itself
if self.next.try_lock(key).await? == false {
return Ok(false);
}
// Now process it in the active pipe
if let Some(pipe) = lock.as_mut() {
return pipe.try_lock(key).await;
} else if self.mode.should_error_out() {
bail!(CommitErrorKind::CommsError(CommsErrorKind::Disconnected));
} else if self.mode.should_go_readonly() {
bail!(CommitErrorKind::CommsError(CommsErrorKind::ReadOnly));
} else {
return Ok(false);
}
}
fn unlock_local(&self, key: PrimaryKey) -> Result<(), CommitError> {
self.next.unlock_local(key)
}
async fn unlock(&self, key: PrimaryKey) -> Result<(), CommitError> {
// First we unlock any local locks so errors do not kill access
// to the data object
self.next.unlock(key).await?;
// Now unlock it at the server
let mut lock = self.active.write().await;
if let Some(pipe) = lock.as_mut() {
pipe.unlock(key).await?
} else if self.mode.should_error_out() {
bail!(CommitErrorKind::CommsError(CommsErrorKind::Disconnected));
} else if self.mode.should_go_readonly() {
bail!(CommitErrorKind::CommsError(CommsErrorKind::ReadOnly));
}
Ok(())
}
fn set_next(&mut self, next: Arc<Box<dyn EventPipe>>) {
let _ = std::mem::replace(&mut self.next, next);
}
async fn conversation(&self) -> Option<Arc<ConversationSession>> {
let lock = self.active.read().await;
if let Some(pipe) = lock.as_ref() {
return pipe.conversation();
}
None
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/client.rs | lib/src/mesh/client.rs | use crate::{header::PrimaryKey, pipe::EventPipe};
use async_trait::async_trait;
use error_chain::bail;
use fxhash::FxHashMap;
use std::sync::Weak;
use std::time::Duration;
use std::{collections::hash_map::Entry, sync::Arc};
use tokio::sync::Mutex;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::{Instrument, WithSubscriber};
use super::core::*;
use super::msg::*;
use super::session::*;
use crate::chain::*;
use crate::comms::StreamProtocol;
use crate::conf::*;
use crate::error::*;
use crate::loader::Loader;
use crate::prelude::NodeId;
use crate::prelude::TaskEngine;
use crate::transaction::*;
use crate::trust::*;
pub struct MeshClient {
cfg_ate: ConfAte,
cfg_mesh: ConfMesh,
lookup: MeshHashTable,
node_id: NodeId,
temporal: bool,
sessions: Mutex<FxHashMap<ChainKey, Arc<MeshClientSession>>>,
}
pub struct MeshClientSession {
key: ChainKey,
chain: Mutex<Weak<Chain>>,
}
impl MeshClientSession {
pub async fn try_open_ext<'a>(
&'a self,
) -> Result<Option<Arc<Chain>>, ChainCreationError> {
let chain = self.chain.lock().await;
if let Some(chain) = chain.upgrade() {
trace!("reusing chain {}", self.key);
return Ok(Some(chain));
}
Ok(None)
}
pub async fn open_ext<'a>(
&'a self,
client: &MeshClient,
hello_path: String,
loader_local: impl Loader + 'static,
loader_remote: impl Loader + 'static,
) -> Result<Arc<Chain>, ChainCreationError> {
let mut chain = self.chain.lock().await;
if let Some(chain) = chain.upgrade() {
trace!("reusing chain {}", self.key);
return Ok(chain);
}
trace!("creating chain {}", self.key);
let ret = self
.open_ext_internal(client, hello_path, loader_local, loader_remote)
.await?;
*chain = Arc::downgrade(&ret);
Ok(ret)
}
pub async fn open_ext_internal<'a>(
&'a self,
client: &MeshClient,
hello_path: String,
loader_local: impl Loader + 'static,
loader_remote: impl Loader + 'static,
) -> Result<Arc<Chain>, ChainCreationError> {
debug!(key = self.key.to_string().as_str());
debug!(path = hello_path.as_str());
let (peer_addr, _) = match client.lookup.lookup(&self.key) {
Some(a) => a,
None => {
bail!(ChainCreationErrorKind::NoRootFoundInConfig);
}
};
let addr = match &client.cfg_mesh.force_connect {
Some(a) => a.clone(),
None => peer_addr,
};
let builder = ChainBuilder::new(&client.cfg_ate)
.await
.node_id(client.node_id.clone())
.temporal(client.temporal);
trace!("connecting to {}", addr);
let chain = MeshSession::connect(
builder,
&client.cfg_mesh,
&self.key,
client.cfg_mesh.remote.clone(),
addr,
client.node_id.clone(),
hello_path,
loader_local,
loader_remote,
)
.await?;
Ok(chain)
}
}
impl MeshClient {
pub(super) fn new(
cfg_ate: &ConfAte,
cfg_mesh: &ConfMesh,
node_id: NodeId,
temporal: bool,
) -> Arc<MeshClient> {
Arc::new(MeshClient {
cfg_ate: cfg_ate.clone(),
cfg_mesh: cfg_mesh.clone(),
lookup: MeshHashTable::new(cfg_mesh),
node_id,
temporal,
sessions: Mutex::new(FxHashMap::default()),
})
}
pub async fn try_open_ext<'a>(
&'a self,
key: &ChainKey,
) -> Result<Option<Arc<Chain>>, ChainCreationError> {
let session = {
let mut sessions = self.sessions.lock().await;
let record = match sessions.entry(key.clone()) {
Entry::Occupied(o) => o.into_mut(),
Entry::Vacant(_) => {
return Ok(None);
}
};
Arc::clone(record)
};
session.try_open_ext().await
}
pub async fn open_ext<'a>(
&'a self,
key: &ChainKey,
hello_path: String,
loader_local: impl Loader + 'static,
loader_remote: impl Loader + 'static,
) -> Result<Arc<Chain>, ChainCreationError> {
let session = {
let mut sessions = self.sessions.lock().await;
let record = match sessions.entry(key.clone()) {
Entry::Occupied(o) => o.into_mut(),
Entry::Vacant(v) => v.insert(Arc::new(MeshClientSession {
key: key.clone(),
chain: Mutex::new(Weak::new()),
})),
};
Arc::clone(record)
};
session
.open_ext(self, hello_path, loader_local, loader_remote)
.await
}
pub fn temporal(mut self, val: bool) -> Self {
self.temporal = val;
self
}
}
impl Drop for MeshClient {
fn drop(&mut self) {
let span = span!(
Level::TRACE,
"client",
id = self.node_id.to_short_string().as_str()
);
let _span = span.enter();
trace!("drop (out-of-scope)");
}
}
impl MeshClient {
pub async fn open(
self: &Arc<MeshClient>,
url: &'_ url::Url,
key: &'_ ChainKey,
) -> Result<Arc<Chain>, ChainCreationError> {
let loader_local = crate::loader::DummyLoader::default();
let loader_remote = crate::loader::DummyLoader::default();
let hello_path = url.path().to_string();
self.open_ext(&key, hello_path, loader_local, loader_remote)
.await
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/registry.rs | lib/src/mesh/registry.rs | #![allow(unused_imports)]
use async_trait::async_trait;
use error_chain::bail;
use fxhash::FxHashMap;
use once_cell::sync::Lazy;
use derivative::*;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::net::Ipv4Addr;
use std::net::Ipv6Addr;
use std::net::SocketAddr;
use std::net::ToSocketAddrs;
use std::ops::Deref;
use std::str::FromStr;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use std::time::Duration;
use std::{net::IpAddr, sync::Arc};
use tokio::sync::Mutex;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use url::Url;
use crate::chain::Chain;
use crate::chain::ChainKey;
#[cfg(feature = "enable_dns")]
use crate::dns::*;
use crate::engine::TaskEngine;
use crate::error::*;
use crate::loader;
use crate::mesh::*;
use crate::prelude::*;
use crate::service::Service;
use crate::utils::chain_key_16hex;
use crate::{conf::ConfAte, error::ChainCreationError};
#[derive(Derivative)]
#[derivative(Debug)]
pub struct Registry {
pub cfg_ate: ConfAte,
#[derivative(Debug = "ignore")]
#[cfg(feature = "enable_dns")]
dns: Mutex<DnsClient>,
pub temporal: bool,
pub node_id: NodeId,
pub fail_fast: bool,
pub keep_alive: Option<Duration>,
pub ignore_certificates: bool,
cmd_key: StdMutex<FxHashMap<url::Url, String>>,
#[derivative(Debug = "ignore")]
#[cfg(feature = "enable_client")]
remotes: Mutex<FxHashMap<url::Url, Arc<MeshClient>>>,
#[derivative(Debug = "ignore")]
pub(crate) services: StdMutex<Vec<Arc<dyn Service>>>,
}
impl Registry {
pub async fn new(cfg_ate: &ConfAte) -> Registry {
#[cfg(feature = "enable_dns")]
let dns = {
let dns = DnsClient::connect(cfg_ate).await;
Mutex::new(dns)
};
let node_id = NodeId::generate_client_id();
Registry {
cfg_ate: cfg_ate.clone(),
fail_fast: true,
#[cfg(feature = "enable_dns")]
dns,
node_id,
#[cfg(feature = "enable_local_fs")]
temporal: cfg_ate.log_path.is_none(),
#[cfg(not(feature = "enable_local_fs"))]
temporal: true,
ignore_certificates: false,
cmd_key: StdMutex::new(FxHashMap::default()),
#[cfg(feature = "enable_client")]
remotes: Mutex::new(FxHashMap::default()),
services: StdMutex::new(Vec::new()),
keep_alive: None,
}
}
pub fn keep_alive(mut self, duration: Duration) -> Self {
self.keep_alive = Some(duration);
self
}
pub fn temporal(mut self, temporal: bool) -> Self {
self.temporal = temporal;
self
}
pub fn fail_fast(mut self, fail_fast: bool) -> Self {
self.fail_fast = fail_fast;
self
}
pub fn ignore_certificates(mut self) -> Self {
self.ignore_certificates = true;
self
}
pub fn cement(self) -> Arc<Self> {
Arc::new(self)
}
pub async fn open_cmd(&self, url: &Url) -> Result<ChainGuard, ChainCreationError> {
async {
if let Some(a) = self.try_reuse(url, &self.chain_key_cmd(url, true)).await.ok().flatten() {
Ok(a)
} else {
Ok(self.open(url, &self.chain_key_cmd(url, false), true).await?)
}
}
.await
}
pub async fn open(&self, url: &Url, key: &ChainKey, force_temporal: bool) -> Result<ChainGuard, ChainCreationError> {
let loader_local = loader::DummyLoader::default();
let loader_remote = loader::DummyLoader::default();
Ok(self
.open_ext(url, key, force_temporal, loader_local, loader_remote)
.await?)
}
pub async fn try_reuse(
&self,
url: &Url,
key: &ChainKey,
) -> Result<Option<ChainGuard>, ChainCreationError> {
Ok(self.try_reuse_ext(url, key).await?)
}
#[cfg(feature = "enable_client")]
pub async fn try_reuse_ext(
&self,
url: &Url,
key: &ChainKey,
) -> Result<Option<ChainGuard>, ChainCreationError> {
let client = {
let lock = self.remotes.lock().await;
match lock.get(&url) {
Some(a) => Arc::clone(a),
None => {
trace!("no chain to reuse for chain ({})", key);
return Ok(None);
}
}
};
trace!("trying reuse chain ({}) on mesh client for {}", key, url);
let ret = client.try_open_ext(&key).await?;
let ret = match ret {
Some(a) => a,
None => {
trace!("reuse not possible for chain ({})", key);
return Ok(None);
}
};
Ok(Some(ChainGuard {
chain: ret,
keep_alive: self.keep_alive.clone(),
}))
}
#[cfg(not(feature = "enable_client"))]
pub async fn try_open_ext(
&self,
_url: &Url,
_key: &ChainKey,
) -> Result<Option<ChainGuard>, ChainCreationError> {
return Err(ChainCreationErrorKind::InternalError(
"client connections are unsupported".to_string(),
)
.into());
}
#[cfg(feature = "enable_client")]
pub async fn open_ext(
&self,
url: &Url,
key: &ChainKey,
force_temporal: bool,
loader_local: impl loader::Loader + 'static,
loader_remote: impl loader::Loader + 'static,
) -> Result<ChainGuard, ChainCreationError> {
let client = {
let mut lock = self.remotes.lock().await;
match lock.get(&url) {
Some(a) => Arc::clone(a),
None => {
trace!("perf-checkpoint: creating mesh client");
trace!("building mesh client for {}", url);
let cfg_mesh = self.cfg_for_url(url).await?;
let mesh = MeshClient::new(
&self.cfg_ate,
&cfg_mesh,
self.node_id.clone(),
force_temporal | self.temporal,
);
lock.insert(url.clone(), Arc::clone(&mesh));
Arc::clone(&mesh)
}
}
};
trace!("opening chain ({}) on mesh client for {}", key, url);
trace!("perf-checkpoint: open_ext (hello_path={})", url.path());
let hello_path = url.path().to_string();
let ret = client
.open_ext(&key, hello_path, loader_local, loader_remote)
.await?;
Ok(ChainGuard {
chain: ret,
keep_alive: self.keep_alive.clone(),
})
}
#[cfg(not(feature = "enable_client"))]
pub async fn open_ext(
&self,
_url: &Url,
_key: &ChainKey,
_loader_local: impl loader::Loader + 'static,
_loader_remote: impl loader::Loader + 'static,
) -> Result<ChainGuard, ChainCreationError> {
return Err(ChainCreationErrorKind::InternalError(
"client connections are unsupported".to_string(),
)
.into());
}
pub async fn cfg_for_url(&self, url: &Url) -> Result<ConfMesh, ChainCreationError> {
let protocol = StreamProtocol::parse(url)?;
let port = match url.port() {
Some(a) => a,
None => protocol.default_port(),
};
let domain = match url.domain() {
Some(a) => a,
None => {
bail!(ChainCreationErrorKind::NoValidDomain(url.to_string()));
}
};
let mut ret = self.cfg_for_domain(domain, port).await?;
ret.remote = url.clone();
ret.wire_protocol = protocol;
// Set the fail fast
ret.fail_fast = self.fail_fast;
// Set the ignore certificates
if self.ignore_certificates {
ret.certificate_validation = CertificateValidation::AllowAll;
} else if url.domain() == Some("localhost") {
ret.certificate_validation = CertificateValidation::AllowAll;
}
// Add all the global certificates
if let CertificateValidation::AllowedCertificates(allowed) = &mut ret.certificate_validation
{
for cert in ate_comms::get_global_certificates() {
allowed.push(cert.clone());
}
}
// Perform a DNS query on the domain and pull down TXT records
#[cfg(feature = "enable_dns")]
if let CertificateValidation::AllowedCertificates(allowed) = &mut ret.certificate_validation
{
let mut certs = self.dns_certs(domain).await?;
allowed.append(&mut certs);
}
Ok(ret)
}
async fn cfg_roots(
&self,
domain: &str,
port: u16,
) -> Result<Vec<MeshAddress>, ChainCreationError> {
let mut roots = Vec::new();
if let Some(nodes) = &self.cfg_ate.nodes {
let mut r = Vec::new();
for node in nodes.iter() {
r.push(MeshAddress {
port,
#[cfg(feature = "enable_dns")]
host: IpAddr::from_str(node.as_str())
.map_err(|err| {
ChainCreationError::from(
ChainCreationErrorKind::InternalError(err.to_string())
)
})?,
#[cfg(not(feature = "enable_dns"))]
host: node.clone(),
});
}
return Ok(r);
};
// Search DNS for entries for this server (Ipv6 takes prioity over Ipv4)
#[cfg(feature = "enable_dns")]
{
let mut addrs = self.dns_query(domain).await?;
if addrs.len() <= 0 {
debug!("no nodes found for {}", domain);
}
addrs.sort();
for addr in addrs.iter() {
debug!("found node {}", addr);
}
// Add the cluster to the configuration
for addr in addrs {
let addr = MeshAddress::new(addr, port);
roots.push(addr);
}
};
#[cfg(not(feature = "enable_dns"))]
{
let addr = MeshAddress::new(domain, port);
roots.push(addr);
}
if roots.len() <= 0 {
bail!(ChainCreationErrorKind::NoRootFoundForDomain(
domain.to_string()
));
}
Ok(roots)
}
#[cfg(feature = "enable_dns")]
pub async fn dns_certs(&self, name: &str) -> Result<Vec<AteHash>, ClientError> {
match name.to_lowercase().as_str() {
"localhost" => {
return Ok(Vec::new());
}
_ => {}
};
if let Ok(_) = IpAddr::from_str(name) {
return Ok(Vec::new());
}
trace!("dns_query for {}", name);
let mut client = self.dns.lock().await;
let mut txts = Vec::new();
if let Some(response) = client
.query(Name::from_str(name).unwrap(), DNSClass::IN, RecordType::TXT)
.await
.ok()
{
for answer in response.answers() {
if let RData::TXT(ref txt) = *answer.rdata() {
txts.push(txt.to_string());
}
}
}
let prefix = "ate-cert-";
let mut certs = Vec::new();
for txt in txts {
let txt = txt.replace(" ", "");
if txt.trim().starts_with(prefix) {
let start = prefix.len();
let hash = &txt.trim()[start..];
if let Some(hash) = AteHash::from_hex_string(hash) {
trace!("found certificate({}) for {}", hash, name);
certs.push(hash);
}
}
}
trace!(
"dns_query for {} returned {} certificates",
name,
certs.len()
);
Ok(certs)
}
#[cfg(feature = "enable_dns")]
pub async fn dns_query(&self, name: &str) -> Result<Vec<IpAddr>, ClientError> {
match name.to_lowercase().as_str() {
"localhost" => return Ok(vec![IpAddr::V4(Ipv4Addr::from_str("127.0.0.1").unwrap())]),
_ => {}
};
if let Ok(ip) = IpAddr::from_str(name) {
return Ok(vec![ip]);
}
trace!("dns_query for {}", name);
let mut client = self.dns.lock().await;
let mut addrs = Vec::new();
if let Some(response) = client
.query(
Name::from_str(name).unwrap(),
DNSClass::IN,
RecordType::A,
)
.await
.ok()
{
for answer in response.answers() {
if let RData::A(ref address) = *answer.rdata() {
addrs.push(IpAddr::V4(address.clone()));
}
}
}
if addrs.len() <= 0 {
let response = client
.query(
Name::from_str(name).unwrap(),
DNSClass::IN,
RecordType::AAAA)
.await?;
for answer in response.answers() {
if let RData::AAAA(ref address) = *answer.rdata() {
addrs.push(IpAddr::V6(address.clone()));
}
}
}
trace!("dns_query for {} returned {} addresses", name, addrs.len());
Ok(addrs)
}
pub(crate) async fn cfg_for_domain(
&self,
domain_name: &str,
port: u16,
) -> Result<ConfMesh, ChainCreationError> {
let roots = self.cfg_roots(domain_name, port).await?;
let remote = url::Url::parse(format!("{}://{}", Self::guess_schema(port), domain_name).as_str())?;
let ret = ConfMesh::new(domain_name, remote, roots.iter());
Ok(ret)
}
pub fn guess_schema(port: u16) -> &'static str {
match port {
80 => "ws",
443 => "wss",
_ => "tcp"
}
}
/// Will generate a random command key - reused for 30 seconds to improve performance
/// (note: this cache time must be less than the server cache time on commands)
fn chain_key_cmd(&self, url: &url::Url, reuse: bool) -> ChainKey {
let mut guard = self.cmd_key.lock().unwrap();
if reuse {
if let Some(hex) = guard.get(url) {
return chain_key_16hex(hex.as_str(), Some("cmd"));
}
}
let hex = AteHash::generate().to_hex_string();
guard.insert(url.clone(), hex.clone());
chain_key_16hex(hex.as_str(), Some("cmd"))
}
}
#[derive(Clone)]
pub struct ChainGuard {
keep_alive: Option<Duration>,
chain: Arc<Chain>,
}
impl ChainGuard {
pub fn as_ref(&self) -> &Chain {
self.chain.deref()
}
pub fn as_arc(&self) -> Arc<Chain> {
Arc::clone(&self.chain)
}
pub async fn dio(&self, session: &'_ dyn AteSession) -> Arc<Dio> {
self.chain.dio(session).await
}
/// Opens a data access layer that allows mutable changes to data.
/// Transaction consistency on commit will be guarranted for local redo log files
pub async fn dio_mut(&self, session: &'_ dyn AteSession) -> Arc<DioMut> {
self.chain.dio_mut(session).await
}
/// Opens a data access layer that allows mutable changes to data (in a fire-and-forget mode).
/// No transaction consistency on commits will be enforced
pub async fn dio_fire(&self, session: &'_ dyn AteSession) -> Arc<DioMut> {
self.chain.dio_fire(session).await
}
/// Opens a data access layer that allows mutable changes to data.
/// Transaction consistency on commit will be guarranted for all remote replicas
pub async fn dio_full(&self, session: &'_ dyn AteSession) -> Arc<DioMut> {
self.chain.dio_full(session).await
}
/// Opens a data access layer that allows mutable changes to data.
/// Transaction consistency on commit must be specified
pub async fn dio_trans(
&self,
session: &'_ dyn AteSession,
scope: TransactionScope,
) -> Arc<DioMut> {
self.chain.dio_trans(session, scope).await
}
pub async fn invoke<REQ, RES, ERR>(&self, request: REQ) -> Result<Result<RES, ERR>, InvokeError>
where
REQ: Clone + Serialize + DeserializeOwned + Sync + Send + ?Sized,
RES: Serialize + DeserializeOwned + Sync + Send + ?Sized,
ERR: Serialize + DeserializeOwned + Sync + Send + ?Sized,
{
self.as_arc().invoke(request).await
}
pub async fn invoke_ext<REQ, RES, ERR>(
&self,
session: Option<&'_ dyn AteSession>,
request: REQ,
timeout: Duration,
) -> Result<Result<RES, ERR>, InvokeError>
where
REQ: Clone + Serialize + DeserializeOwned + Sync + Send + ?Sized,
RES: Serialize + DeserializeOwned + Sync + Send + ?Sized,
ERR: Serialize + DeserializeOwned + Sync + Send + ?Sized,
{
self.as_arc().invoke_ext(session, request, timeout).await
}
}
impl Deref for ChainGuard {
type Target = Chain;
fn deref(&self) -> &Self::Target {
self.chain.deref()
}
}
impl Drop for ChainGuard {
fn drop(&mut self) {
if let Some(duration) = &self.keep_alive {
let chain = Arc::clone(&self.chain);
let duration = duration.clone();
TaskEngine::spawn(async move {
trace!("keep-alive: warm down for {}", chain.key());
crate::engine::sleep(duration).await;
// If we are the last then do a cleanup
if Arc::strong_count(&chain) <= 1 {
let ret = chain.shutdown().await;
if let Err(err) = ret {
error!("shutdown failed during guard drop - {}", err);
}
}
drop(chain);
});
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/mod.rs | lib/src/mesh/mod.rs | #![cfg_attr(debug_assertions, allow(dead_code, unused_imports))]
#![allow(unused_imports)]
use tracing::trace;
use tracing::{debug, error, info};
mod active_session_pipe;
#[cfg(feature = "enable_client")]
mod client;
mod core;
mod lock_request;
mod msg;
mod recoverable_session_pipe;
#[cfg(feature = "enable_server")]
mod redirect;
mod registry;
#[cfg(feature = "enable_server")]
mod server;
mod session;
mod test;
use async_trait::async_trait;
use bytes::Bytes;
use fxhash::FxHashMap;
use once_cell::sync::Lazy;
use serde::{Deserialize, Serialize};
use std::future::Future;
use std::pin::Pin;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use std::sync::Weak;
use std::{collections::hash_map::Entry, collections::BTreeMap, sync::Arc};
use std::{
net::{IpAddr, Ipv6Addr},
str::FromStr,
};
use tokio::sync::mpsc;
use tokio::sync::{Mutex, RwLock};
use tokio::io::{AsyncRead, AsyncWrite};
use super::chain::*;
use super::chain::*;
use super::comms::*;
use super::conf::*;
use super::crypto::AteHash;
use super::error::*;
use super::event::*;
#[cfg(feature = "enable_server")]
use super::flow::*;
use super::session::*;
use super::transaction::*;
use super::trust::*;
use crate::dio::*;
use crate::engine::TaskEngine;
#[cfg(feature = "enable_server")]
use crate::flow::basic::*;
use crate::mesh::msg::*;
use crate::meta::*;
use crate::pipe::*;
#[cfg(feature = "enable_client")]
pub(crate) use crate::mesh::client::MeshClient;
pub(crate) use session::MeshSession;
pub use crate::mesh::core::MeshHashTable;
pub use self::core::BackupMode;
pub use self::core::RecoveryMode;
pub use self::msg::FatalTerminate;
pub use crate::loader::Loader;
pub use crate::mesh::registry::ChainGuard;
pub use crate::mesh::registry::Registry;
#[cfg(feature = "enable_server")]
pub use crate::mesh::server::MeshRoot;
fn create_prepare<'a, 'b>(cfg_mesh: &'b ConfMesh) -> (Vec<MeshAddress>, Vec<MeshAddress>) {
let mut hash_table = BTreeMap::new();
for addr in cfg_mesh.roots.iter() {
hash_table.insert(addr.hash(), addr.clone());
}
#[allow(unused_mut)]
let mut listen_root_addresses = Vec::new();
#[allow(unused_mut)]
let mut all_root_addresses = Vec::new();
#[cfg(feature = "enable_server")]
if let Some(addr) = &cfg_mesh.force_listen {
listen_root_addresses.push(addr.clone());
all_root_addresses.push(addr.clone());
}
#[cfg(feature = "enable_dns")]
{
let local_ips = pnet::datalink::interfaces()
.iter()
.flat_map(|i| i.ips.iter())
.map(|i| i.ip())
.collect::<Vec<_>>();
if listen_root_addresses.len() <= 0 && cfg_mesh.force_client_only == false {
for local_ip in local_ips.iter() {
trace!("Found Local IP - {}", local_ip);
for root in cfg_mesh.roots.iter() {
if root.host == *local_ip {
listen_root_addresses.push(root.clone());
}
}
}
}
#[cfg(feature = "enable_server")]
let port = {
match cfg_mesh.force_port {
Some(a) => a,
None => match &cfg_mesh.force_listen {
Some(a) => a.port,
None => {
match StreamProtocol::parse(&cfg_mesh.remote) {
Ok(protocol) => {
cfg_mesh.remote.port().unwrap_or(protocol.default_port())
}
_ => 443
}
}
}
}
};
#[cfg(not(feature = "enable_server"))]
let port = match StreamProtocol::parse(&cfg_mesh.remote) {
Ok(protocol) => {
cfg_mesh.remote.port().unwrap_or(protocol.default_port())
}
_ => 443
};
for local_ip in local_ips.iter() {
all_root_addresses.push(MeshAddress {
host: local_ip.clone(),
port,
});
}
}
(listen_root_addresses, all_root_addresses)
}
#[cfg(feature = "enable_server")]
pub async fn create_persistent_centralized_server(
cfg_ate: &ConfAte,
cfg_mesh: &ConfMesh,
) -> Result<Arc<MeshRoot>, CommsError> {
let ret = create_server(cfg_mesh).await?;
ret.add_route(super::flow::all_persistent_and_centralized().await, cfg_ate)
.await?;
Ok(ret)
}
#[cfg(feature = "enable_server")]
pub async fn create_persistent_distributed_server(
cfg_ate: &ConfAte,
cfg_mesh: &ConfMesh,
) -> Result<Arc<MeshRoot>, CommsError> {
let ret = create_server(cfg_mesh).await?;
ret.add_route(super::flow::all_persistent_and_distributed().await, cfg_ate)
.await?;
Ok(ret)
}
#[cfg(feature = "enable_server")]
pub async fn create_ethereal_centralized_server(
cfg_ate: &ConfAte,
cfg_mesh: &ConfMesh,
) -> Result<Arc<MeshRoot>, CommsError> {
let ret = create_server(cfg_mesh).await?;
ret.add_route(super::flow::all_ethereal_centralized().await, cfg_ate)
.await?;
Ok(ret)
}
#[cfg(feature = "enable_server")]
pub async fn create_ethereal_distributed_server(
cfg_ate: &ConfAte,
cfg_mesh: &ConfMesh,
) -> Result<Arc<MeshRoot>, CommsError> {
let ret = create_server(cfg_mesh).await?;
ret.add_route(super::flow::all_ethereal_distributed().await, cfg_ate)
.await?;
Ok(ret)
}
#[cfg(feature = "enable_server")]
pub async fn create_server(cfg_mesh: &ConfMesh) -> Result<Arc<MeshRoot>, CommsError> {
let (listen_root_addresses, all_root_addresses) = create_prepare(cfg_mesh);
for addr in listen_root_addresses.iter() {
trace!("listen address: {}", addr);
}
let ret = MeshRoot::new(&cfg_mesh, listen_root_addresses, all_root_addresses).await?;
Ok(ret)
}
#[cfg(feature = "enable_client")]
pub fn create_client(cfg_ate: &ConfAte, cfg_mesh: &ConfMesh, temporal: bool) -> Arc<MeshClient> {
let client_id = NodeId::generate_client_id();
MeshClient::new(&cfg_ate, &cfg_mesh, client_id, temporal)
}
#[cfg(feature = "enable_client")]
pub fn create_persistent_client(cfg_ate: &ConfAte, cfg_mesh: &ConfMesh) -> Arc<MeshClient> {
let client_id = NodeId::generate_client_id();
MeshClient::new(&cfg_ate, &cfg_mesh, client_id, false)
}
#[cfg(feature = "enable_client")]
pub fn create_temporal_client(cfg_ate: &ConfAte, cfg_mesh: &ConfMesh) -> Arc<MeshClient> {
let client_id = NodeId::generate_client_id();
MeshClient::new(&cfg_ate, &cfg_mesh, client_id, true)
}
pub use ate_comms::add_global_certificate;
pub(crate) static GLOBAL_COMM_FACTORY: Lazy<
Mutex<
Option<
Arc<
dyn Fn(
MeshConnectAddr,
)
-> Pin<Box<dyn Future<Output = Option<
(
Box<dyn AsyncRead + Send + Sync + Unpin + 'static>,
Box<dyn AsyncWrite + Send + Sync + Unpin + 'static>
)
>> + Send + Sync + 'static>>
+ Send
+ Sync
+ 'static,
>,
>,
>,
> = Lazy::new(|| Mutex::new(None));
pub async fn set_comm_factory(
funct: impl Fn(MeshConnectAddr) -> Pin<Box<dyn Future<Output = Option<
(
Box<dyn AsyncRead + Send + Sync + Unpin + 'static>,
Box<dyn AsyncWrite + Send + Sync + Unpin + 'static>
)
>> + Send + Sync + 'static>>
+ Send
+ Sync
+ 'static,
) {
GLOBAL_COMM_FACTORY.lock().await.replace(Arc::new(funct));
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/msg.rs | lib/src/mesh/msg.rs | use async_trait::async_trait;
use bytes::Bytes;
use serde::{Deserialize, Serialize};
use std::sync::Arc;
use crate::chain::Chain;
use crate::chain::ChainKey;
use crate::crypto::AteHash;
use crate::crypto::PublicSignKey;
use crate::error::*;
use crate::event::*;
use crate::redo::LogLookup;
use crate::header::PrimaryKey;
use crate::pipe::EventPipe;
use crate::session::AteSessionUser;
use crate::spec::*;
use crate::time::ChainTimestamp;
use crate::{
crypto::{PrivateEncryptKey, PrivateSignKey},
meta::{CoreMetadata, Metadata},
};
use super::NodeId;
pub type MessageData = LogData;
pub type MessageDataRef<'a> = LogDataRef<'a>;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub(super) struct MessageEvent {
pub(crate) meta: Metadata,
pub(crate) data: MessageData,
pub(crate) format: MessageFormat,
}
impl MessageEvent {
pub(crate) fn convert_to(evts: &Vec<EventWeakData>) -> Vec<MessageEvent> {
let mut feed_me = Vec::new();
for evt in evts {
let evt = MessageEvent {
meta: evt.meta.clone(),
data: match &evt.data_bytes {
MessageBytes::Some(d) => MessageData::Some(d.to_vec()),
MessageBytes::LazySome(l) => MessageData::LazySome(l.clone()),
MessageBytes::None => MessageData::None,
},
format: evt.format,
};
feed_me.push(evt);
}
feed_me
}
pub(crate) fn convert_from_single(evt: MessageEvent) -> EventWeakData {
EventWeakData {
meta: evt.meta.clone(),
data_bytes: match evt.data {
MessageData::Some(d) => MessageBytes::Some(Bytes::from(d)),
MessageData::LazySome(l) => MessageBytes::LazySome(l.clone()),
MessageData::None => MessageBytes::None,
},
format: evt.format,
}
}
pub(crate) fn convert_from(evts: impl Iterator<Item = MessageEvent>) -> Vec<EventWeakData> {
let mut feed_me = Vec::new();
for evt in evts {
feed_me.push(MessageEvent::convert_from_single(evt));
}
feed_me
}
pub(crate) fn data_hash(&self) -> Option<AteHash> {
match self.data.as_ref() {
MessageDataRef::Some(d) => Some(AteHash::from_bytes(&d[..])),
MessageDataRef::LazySome(l) => Some(l.hash),
MessageDataRef::None => None,
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum FatalTerminate {
NotYetSubscribed,
NotFound,
NotThisRoot,
RootRedirect { expected: u32, actual: u32 },
Denied { reason: String },
Other { err: String },
}
impl std::fmt::Display for FatalTerminate {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
FatalTerminate::NotYetSubscribed => {
write!(f, "Performed an action while the chain is not subscribed")
}
FatalTerminate::NotFound => {
write!(f, "The chain is not found")
}
FatalTerminate::NotThisRoot => {
write!(
f,
"Failed to create chain-of-trust as this is the wrong root node"
)
}
FatalTerminate::RootRedirect { expected, actual } => {
write!(f, "Failed to create chain-of-trust as the server you connected (node_id={}) is not hosting these chains - instead you must connect to another node (node_id={})", actual, expected)
}
FatalTerminate::Denied { reason } => {
write!(f, "Access to this chain is denied - {}", reason)
}
FatalTerminate::Other { err } => {
write!(f, "Fatal error occured - {}", err)
}
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub(super) enum Message {
Noop,
Subscribe {
chain_key: ChainKey,
from: ChainTimestamp,
allow_redirect: bool,
omit_data: bool,
},
HumanMessage {
message: String,
},
ReadOnly,
Lock {
key: PrimaryKey,
},
Unlock {
key: PrimaryKey,
},
LockResult {
key: PrimaryKey,
is_locked: bool,
},
NewConversation {
conversation_id: AteHash,
},
StartOfHistory {
size: usize,
from: Option<ChainTimestamp>,
to: Option<ChainTimestamp>,
integrity: TrustMode,
root_keys: Vec<PublicSignKey>,
},
Events {
commit: Option<u64>,
evts: Vec<MessageEvent>,
},
EndOfHistory,
/// Asks to confirm all events are up-to-date for transaction keeping purposes
Confirmed(u64),
CommitError {
id: u64,
err: String,
},
FatalTerminate(FatalTerminate),
SecuredWith(AteSessionUser),
LoadMany {
id: u64,
leafs: Vec<AteHash>,
},
LoadManyResult {
id: u64,
data: Vec<Option<Vec<u8>>>,
},
LoadManyFailed {
id: u64,
err: String,
}
}
impl std::fmt::Display for Message {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
Message::Noop => write!(f, "noop"),
Message::Subscribe { chain_key, from, allow_redirect, omit_data} => {
if *omit_data {
if *allow_redirect {
write!(f, "subscribe(chain_key={}, from={}, omit_data, allow_redirect)", chain_key, from)
} else {
write!(f, "subscribe(chain_key={}, from={}, omit_data)", chain_key, from)
}
} else {
if *allow_redirect {
write!(f, "subscribe(chain_key={}, from={}, allow_redirect)", chain_key, from)
} else {
write!(f, "subscribe(chain_key={}, from={})", chain_key, from)
}
}
},
Message::HumanMessage { message } => write!(f, "human-message('{}')", message),
Message::ReadOnly => write!(f, "read-only"),
Message::Lock { key } => write!(f, "lock(key={})", key),
Message::Unlock { key } => write!(f, "unlock(key={})", key),
Message::LockResult { key, is_locked } => {
if *is_locked {
write!(f, "lock-result(key={}, locked)", key)
} else {
write!(f, "lock-result(key={}, unlocked)", key)
}
},
Message::NewConversation { conversation_id } => write!(f, "new-conversation(id={})", conversation_id),
Message::StartOfHistory { size, from, to, integrity, root_keys } => {
write!(f, "start-of-history(size={}", size)?;
if let Some(from) = from {
write!(f, ", from={}", from)?;
}
if let Some(to) = to {
write!(f, ", to={}", to)?;
}
write!(f, ", integrity={}, root_key_cnt={})", integrity, root_keys.len())
},
Message::Events { commit, evts } => {
if let Some(commit) = commit {
write!(f, "events(commit={}, evt_cnt={})", commit, evts.len())
} else {
write!(f, "events(evt_cnt={})", evts.len())
}
},
Message::EndOfHistory => write!(f, "end-of-history"),
Message::Confirmed(id) => write!(f, "confirmed({})", id),
Message::CommitError { id, err } => write!(f, "commit-error(id={}, err='{}')", id, err),
Message::FatalTerminate(why) => write!(f, "fatal-terminate({})", why),
Message::SecuredWith(sess) => write!(f, "secured-with({})", sess),
Message::LoadMany { id, leafs } => write!(f, "load-many(id={}, cnt={})", id, leafs.len()),
Message::LoadManyResult { id, data } => write!(f, "load-many-result(id={}, cnt={})", id, data.len()),
Message::LoadManyFailed { id, err } => write!(f, "load-many-failed(id={})-{}", id, err),
}
}
}
impl Default for Message {
fn default() -> Message {
Message::Noop
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/server.rs | lib/src/mesh/server.rs | use crate::{header::PrimaryKey, pipe::EventPipe};
use async_trait::async_trait;
use error_chain::bail;
use fxhash::FxHashMap;
use fxhash::FxHashSet;
use serde::de::DeserializeOwned;
use serde::{Deserialize, Serialize};
use std::future::Future;
use std::net::SocketAddr;
use std::sync::Mutex as StdMutex;
use std::sync::Weak;
use std::time::Duration;
use std::{
borrow::Borrow,
net::{IpAddr, Ipv4Addr, Ipv6Addr},
ops::Deref,
};
use std::{collections::hash_map::Entry, sync::Arc};
use tokio::sync::broadcast;
use tokio::sync::mpsc;
use tokio::sync::Mutex;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::{Instrument, WithSubscriber};
use bytes::Bytes;
use super::client::MeshClient;
use super::core::*;
use super::msg::*;
use super::MeshSession;
use super::Registry;
use crate::chain::*;
use crate::comms::TxDirection;
use crate::comms::TxGroup;
use crate::comms::*;
use crate::conf::*;
use crate::crypto::AteHash;
use crate::engine::TaskEngine;
use crate::error::*;
use crate::flow::OpenAction;
use crate::flow::OpenFlow;
use crate::index::*;
use crate::prelude::*;
use crate::spec::SerializationFormat;
use crate::time::ChainTimestamp;
use crate::transaction::*;
use crate::trust::*;
#[derive(Serialize, Deserialize, Debug, Clone, Default, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct RouteChain {
pub route: String,
pub chain: ChainKey,
}
pub struct MeshRoute {
pub hello_path: String,
pub cfg_ate: ConfAte,
pub cfg_mesh: ConfMesh,
pub flow: Box<dyn OpenFlow>,
pub flow_type: String,
}
pub struct MeshChain {
chain: Arc<Chain>,
integrity: TrustMode,
tx_group: Arc<Mutex<TxGroup>>,
}
pub struct MeshRoot {
pub(super) cfg_mesh: ConfMesh,
pub(super) server_id: NodeId,
pub(super) node_id: u32,
pub(super) lookup: MeshHashTable,
pub(super) addrs: Vec<MeshAddress>,
pub(super) chains: Mutex<FxHashMap<RouteChain, MeshChain>>,
pub(super) listener: StdMutex<Option<Arc<StdMutex<Listener<Message, SessionContext>>>>>,
pub(super) routes: StdMutex<FxHashMap<String, Arc<Mutex<MeshRoute>>>>,
pub(super) exit: broadcast::Sender<()>,
}
#[derive(Clone)]
struct SessionContextProtected {
chain: Option<Arc<Chain>>,
locks: FxHashSet<PrimaryKey>,
}
pub(super) struct SessionContext {
inside: StdMutex<SessionContextProtected>,
conversation: Arc<ConversationSession>,
}
impl Default for SessionContext {
fn default() -> SessionContext {
SessionContext {
inside: StdMutex::new(SessionContextProtected {
chain: None,
locks: FxHashSet::default(),
}),
conversation: Arc::new(ConversationSession::default()),
}
}
}
impl Drop for SessionContext {
fn drop(&mut self) {
let context = self.inside.lock().unwrap().clone();
if let Err(err) = disconnected(context) {
debug_assert!(false, "mesh-root-err {:?}", err);
warn!("mesh-root-err: {}", err.to_string());
}
}
}
impl MeshRoot {
pub(super) async fn new(
cfg: &ConfMesh,
listen_addrs: Vec<MeshAddress>,
all_addrs: Vec<MeshAddress>,
) -> Result<Arc<Self>, CommsError> {
let lookup = MeshHashTable::new(&cfg);
let node_id = match cfg.force_node_id {
Some(a) => a,
None => {
match all_addrs
.iter()
.filter_map(|a| lookup.derive_id(a))
.next()
{
Some(a) => a,
None => {
bail!(CommsErrorKind::RequiredExplicitNodeId);
}
}
}
};
let server_id = format!("n{}", node_id);
Self::new_ext(cfg, lookup, node_id, listen_addrs)
.instrument(span!(
Level::INFO,
"server",
id = server_id.as_str()
))
.await
}
pub async fn new_ext(
cfg: &ConfMesh,
lookup: MeshHashTable,
node_id: u32,
listen_addrs: Vec<MeshAddress>,
) -> Result<Arc<Self>, CommsError> {
let mut cfg = MeshConfig::new(cfg.clone());
let mut listen_ports = listen_addrs.iter().map(|a| a.port).collect::<Vec<_>>();
if let Some(port) = cfg.cfg_mesh.force_port {
listen_ports.clear();
listen_ports.push(port);
}
listen_ports.sort();
listen_ports.dedup();
for port in listen_ports.iter() {
cfg = cfg.listen_on(
IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 0)),
port.clone(),
);
}
if let Some(cert) = cfg.listen_cert.as_ref() {
trace!("using certificate: {}", cert.hash());
}
let (exit_tx, _) = broadcast::channel(1);
let server_id = NodeId::generate_server_id(node_id);
let root = Arc::new(MeshRoot {
cfg_mesh: cfg.cfg_mesh.clone(),
addrs: listen_addrs,
lookup,
server_id: server_id.clone(),
node_id,
chains: Mutex::new(FxHashMap::default()),
listener: StdMutex::new(None),
routes: StdMutex::new(FxHashMap::default()),
exit: exit_tx.clone(),
});
let processor = Arc::new(MeshRootProcessor {
root: Arc::downgrade(&root),
});
let listener =
crate::comms::Listener::new(&cfg, server_id, processor, exit_tx.clone()).await?;
{
let mut guard = root.listener.lock().unwrap();
guard.replace(listener);
}
{
let root = Arc::clone(&root);
TaskEngine::spawn(async move {
root.auto_clean().await;
});
}
Ok(root)
}
async fn auto_clean(self: Arc<Self>) {
let chain = Arc::downgrade(&self);
loop {
crate::engine::sleep(std::time::Duration::from_secs(5)).await;
let chain = match Weak::upgrade(&chain) {
Some(a) => a,
None => {
break;
}
};
chain.clean().await;
}
}
pub async fn add_route<F>(
self: &Arc<Self>,
open_flow: Box<F>,
cfg_ate: &ConfAte,
) -> Result<(), CommsError>
where
F: OpenFlow + 'static,
{
let hello_path = open_flow.hello_path().to_string();
let route = MeshRoute {
hello_path: hello_path.clone(),
cfg_ate: cfg_ate.clone(),
cfg_mesh: self.cfg_mesh.clone(),
flow: open_flow,
flow_type: std::any::type_name::<F>().to_string(),
};
{
let mut routes = self.routes.lock().unwrap();
routes.insert(hello_path.clone(), Arc::new(Mutex::new(route)));
}
{
let listener = self.listener.lock().unwrap();
if let Some(listener) = listener.deref() {
let mut listener = listener.lock().unwrap();
listener.add_route(hello_path.as_str())?
}
};
Ok(())
}
pub async fn clean(self: &Arc<Self>) {
let mut shutdown_me = Vec::new();
{
let mut guard = self.chains.lock().await;
guard.retain(|_k, v| {
if Arc::strong_count(&v.chain) <= 1 {
shutdown_me.push(Arc::clone(&v.chain));
false
} else {
true
}
});
}
for chain in shutdown_me {
if let Err(err) = chain.shutdown().await {
error!("failed to shutdown chain - {}", err);
}
}
}
pub fn server_id(&self) -> NodeId {
self.server_id.clone()
}
pub async fn shutdown(self: &Arc<Self>) {
{
let mut guard = self.listener.lock().unwrap();
guard.take();
}
{
let mut guard = self.routes.lock().unwrap();
guard.clear();
}
{
let mut guard = self.chains.lock().await;
for (_, v) in guard.drain() {
if let Err(err) = v.chain.shutdown().await {
error!("failed to shutdown chain - {}", err);
}
}
}
}
}
#[async_trait]
impl StreamRoute
for MeshRoot
{
async fn accepted_web_socket(
&self,
rx: StreamRx,
rx_proto: StreamProtocol,
tx: Upstream,
hello: HelloMetadata,
sock_addr: SocketAddr,
wire_encryption: Option<EncryptKey>,
) -> Result<(), CommsError> {
let listener = {
let guard = self.listener.lock().unwrap();
if let Some(listener) = guard.as_ref() {
Arc::clone(&listener)
} else {
warn!("listener is inactive - lost stream");
bail!(CommsErrorKind::Refused);
}
};
Listener::accept_stream(listener, rx, rx_proto, tx, hello, wire_encryption, sock_addr, self.exit.subscribe()).await?;
Ok(())
}
}
fn disconnected(mut context: SessionContextProtected) -> Result<(), CommsError> {
if let Some(chain) = context.chain {
for key in context.locks.iter() {
chain.pipe.unlock_local(key.clone())?;
}
}
context.chain = None;
Ok(())
}
struct ServerPipe {
chain_key: ChainKey,
tx_group: Arc<Mutex<TxGroup>>,
wire_format: SerializationFormat,
next: Arc<Box<dyn EventPipe>>,
}
#[async_trait]
impl EventPipe for ServerPipe {
async fn feed(&self, work: ChainWork) -> Result<(), CommitError> {
// If this packet is being broadcast then send it to all the other nodes too
if work.trans.transmit {
let evts = MessageEvent::convert_to(&work.trans.events);
let pck = Packet::from(Message::Events {
commit: None,
evts: evts.clone(),
})
.to_packet_data(self.wire_format)?;
let mut tx = self.tx_group.lock().await;
tx.send(pck, None).await;
}
// Hand over to the next pipe as this transaction
self.next.feed(work).await
}
async fn try_lock(&self, key: PrimaryKey) -> Result<bool, CommitError> {
self.next.try_lock(key).await
}
fn unlock_local(&self, key: PrimaryKey) -> Result<(), CommitError> {
self.next.unlock_local(key)
}
async fn unlock(&self, key: PrimaryKey) -> Result<(), CommitError> {
self.next.unlock(key).await
}
fn set_next(&mut self, next: Arc<Box<dyn EventPipe>>) {
let _ = std::mem::replace(&mut self.next, next);
}
async fn conversation(&self) -> Option<Arc<ConversationSession>> {
None
}
async fn load_many(&self, leafs: Vec<AteHash>) -> Result<Vec<Option<Bytes>>, LoadError> {
self.next.load_many(leafs).await
}
async fn prime(&self, records: Vec<(AteHash, Option<Bytes>)>) -> Result<(), CommsError> {
self.next.prime(records).await
}
}
async fn open_internal<'b>(
root: Arc<MeshRoot>,
route_chain: RouteChain,
tx: &'b mut Tx,
) -> Result<OpenedChain, ChainCreationError> {
debug!(
"open_internal {} - {}",
route_chain.route, route_chain.chain
);
// Perform a clean of any chains that are out of scope
root.clean().await;
// Determine the route (if any)
let route = {
let routes = root.routes.lock().unwrap();
match routes.get(&route_chain.route) {
Some(a) => Arc::clone(a),
None => {
bail!(ChainCreationErrorKind::InvalidRoute(route_chain.route))
}
}
};
{
let chains = root.chains.lock().await;
if let Some(chain) = chains.get(&route_chain) {
tx.replace_group(Arc::clone(&chain.tx_group)).await;
let route = route.lock().await;
return Ok(OpenedChain {
integrity: chain.integrity,
message_of_the_day: route.flow.message_of_the_day(&chain.chain).await?,
chain: Arc::clone(&chain.chain),
});
}
}
// Get the configuration, metrics and throttle
let cfg_ate = {
let route = route.lock().await;
route.cfg_ate.clone()
};
// Create a chain builder
let mut builder = ChainBuilder::new(&cfg_ate)
.await
.node_id(root.server_id.clone())
.with_metrics(&tx.metrics)
.with_throttle(&tx.throttle);
// Postfix the hello_path
#[cfg(feature = "enable_local_fs")]
{
builder = builder.postfix_log_path(route_chain.route.as_str());
}
// Create the broadcast group
let new_tx_group = { Arc::new(Mutex::new(TxGroup::default())) };
// Add a pipe that will broadcast message to the connected clients
let pipe = Box::new(ServerPipe {
chain_key: route_chain.chain.clone(),
tx_group: Arc::clone(&new_tx_group),
wire_format: root.cfg_mesh.wire_format.clone(),
next: crate::pipe::NullPipe::new(),
});
builder = builder.add_pipe(pipe);
// Create the chain using the chain flow builder
let integrity;
let wire_encryption = tx.wire_encryption().await.map(|a| a.size());
let new_chain = {
let route = route.lock().await;
debug!("open_flow: {}", route.flow_type);
match route
.flow
.open(builder, &route_chain.chain, wire_encryption)
.await?
{
OpenAction::PrivateChain { chain, session } => {
let msg = Message::SecuredWith(session);
let pck = Packet::from(msg).to_packet_data(root.cfg_mesh.wire_format)?;
tx.send_reply(pck).await?;
integrity = TrustMode::Centralized(CentralizedRole::Server);
chain
}
OpenAction::DistributedChain { chain } => {
integrity = TrustMode::Distributed;
chain
}
OpenAction::CentralizedChain { chain } => {
integrity = TrustMode::Centralized(CentralizedRole::Server);
chain
}
OpenAction::Deny { reason } => {
bail!(ChainCreationErrorKind::ServerRejected(
FatalTerminate::Denied { reason }
));
}
}
};
new_chain.single().await.set_integrity(integrity);
// Insert it into the cache so future requests can reuse the reference to the chain
let mut chains = root.chains.lock().await;
let new_chain = match chains.entry(route_chain.clone()) {
Entry::Occupied(o) => {
let o = o.into_mut();
tx.replace_group(Arc::clone(&o.tx_group)).await;
o
}
Entry::Vacant(v) => {
tx.replace_group(Arc::clone(&new_tx_group)).await;
v.insert(MeshChain {
integrity,
chain: Arc::clone(&new_chain),
tx_group: new_tx_group,
})
}
};
let route = route.lock().await;
Ok(OpenedChain {
integrity,
message_of_the_day: route.flow.message_of_the_day(&new_chain.chain).await?,
chain: Arc::clone(&new_chain.chain),
})
}
#[derive(Clone)]
struct MeshRootProcessor {
root: Weak<MeshRoot>,
}
#[async_trait]
impl ServerProcessor<Message, SessionContext> for MeshRootProcessor {
async fn process<'a, 'b>(
&'a self,
pck: PacketWithContext<Message, SessionContext>,
tx: &'b mut Tx,
) -> Result<(), CommsError> {
let root = match Weak::upgrade(&self.root) {
Some(a) => a,
None => {
debug!("inbox-server-exit: reference dropped scope");
bail!(CommsErrorKind::Disconnected);
}
};
inbox_packet(root, pck, tx).await
}
async fn shutdown(&self, addr: SocketAddr) {
debug!("disconnected: {}", addr.to_string());
}
}
async fn inbox_event<'b>(
context: Arc<SessionContext>,
commit: Option<u64>,
evts: Vec<MessageEvent>,
tx: &'b mut Tx,
pck_data: PacketData,
) -> Result<(), CommsError> {
trace!(evts.cnt = evts.len());
#[cfg(feature = "enable_verbose")]
{
for evt in evts.iter() {
trace!("event: {}", evt.meta);
}
}
let chain = context.inside.lock().unwrap().chain.clone();
let chain = match chain {
Some(a) => a,
None => {
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::NotYetSubscribed))
.await?;
bail!(CommsErrorKind::NotYetSubscribed);
}
};
let commit = commit.clone();
// Feed the events into the chain of trust
let evts = MessageEvent::convert_from(evts.into_iter());
let ret = chain
.pipe
.feed(ChainWork {
trans: Transaction {
scope: TransactionScope::None,
transmit: false,
events: evts,
timeout: Duration::from_secs(30),
conversation: Some(Arc::clone(&context.conversation)),
},
})
.await;
// Send the packet down to others
match ret {
Ok(_) => {
// If the operation has a commit to transmit the response
if let Some(id) = commit {
match ret {
Ok(a) => {
trace!("send::commit_confirmed id={}", id);
tx.send_reply_msg(Message::Confirmed(id.clone())).await?;
a
}
Err(err) => {
let err = err.to_string();
tx.send_reply_msg(Message::CommitError {
id: id.clone(),
err,
})
.await?;
}
}
}
// Send the packet data onto the others in this broadcast group
tx.send_others(pck_data).await;
Ok(())
}
Err(err) => {
Err(CommsErrorKind::InternalError(format!("feed-failed - {}", err.to_string())).into())
}
}
}
async fn inbox_lock<'b>(
context: Arc<SessionContext>,
key: PrimaryKey,
tx: &'b mut Tx,
) -> Result<(), CommsError> {
trace!("lock {}", key);
let chain = context.inside.lock().unwrap().chain.clone();
let chain = match chain {
Some(a) => a,
None => {
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::NotYetSubscribed))
.await?;
bail!(CommsErrorKind::NotYetSubscribed);
}
};
let is_locked = chain.pipe.try_lock(key.clone()).await?;
context.inside.lock().unwrap().locks.insert(key.clone());
tx.send_reply_msg(Message::LockResult {
key: key.clone(),
is_locked,
})
.await
}
async fn inbox_load_many<'b>(
context: Arc<SessionContext>,
id: u64,
leafs: Vec<AteHash>,
tx: &'b mut Tx,
) -> Result<(), CommsError> {
trace!("load id={}, leafs={}", id, leafs.len());
let chain = context.inside.lock().unwrap().chain.clone();
let chain = match chain {
Some(a) => a,
None => {
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::NotYetSubscribed))
.await?;
bail!(CommsErrorKind::NotYetSubscribed);
}
};
let ret = match chain.pipe.load_many(leafs).await {
Ok(d) => Message::LoadManyResult {
id,
data: d.into_iter().map(|d| d.map(|d| d.to_vec())).collect()
},
Err(err) => Message::LoadManyFailed {
id,
err: err.to_string(),
}
};
tx.send_reply_msg(ret)
.await
}
async fn inbox_unlock<'b>(
context: Arc<SessionContext>,
key: PrimaryKey,
tx: &'b mut Tx,
) -> Result<(), CommsError> {
trace!("unlock {}", key);
let chain = context.inside.lock().unwrap().chain.clone();
let chain = match chain {
Some(a) => a,
None => {
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::NotYetSubscribed))
.await?;
bail!(CommsErrorKind::NotYetSubscribed);
}
};
context.inside.lock().unwrap().locks.remove(&key);
chain.pipe.unlock(key).await?;
Ok(())
}
async fn inbox_subscribe<'b>(
root: Arc<MeshRoot>,
hello_path: &str,
chain_key: ChainKey,
from: ChainTimestamp,
redirect: bool,
omit_data: bool,
context: Arc<SessionContext>,
tx: &'b mut Tx,
) -> Result<(), CommsError> {
trace!("subscribe: (key={}, omit_data={})", chain_key.to_string(), omit_data);
// Randomize the conversation ID and clear its state
context.conversation.clear();
let conv_id = AteHash::generate();
let conv_updated = if let Some(mut a) = context.conversation.id.try_lock() {
a.update(Some(conv_id));
true
} else {
false
};
if conv_updated {
trace!("sending Message::NewConversation(conv_id={})", conv_id);
tx.send_reply_msg(Message::NewConversation {
conversation_id: conv_id,
})
.await?;
} else {
let err = "failed to generate a new conversation id".to_string();
trace!("sending Message::FatalTerminate(other={})", err);
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::Other {
err,
}))
.await?;
return Ok(());
}
// First lets check if this connection is meant for this group of servers that make
// up the distributed chain table.
let (node_addr, node_id) = match root.lookup.lookup(&chain_key) {
Some(a) => a,
None => {
trace!("sending Message::FatalTerminate(not_this_root)");
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::NotThisRoot))
.await?;
return Ok(());
}
};
// Reject the request if its from the wrong machine
// Or... if we can perform a redirect then do so
if root.node_id != node_id {
if redirect {
let (exit_tx, exit_rx) = broadcast::channel(1);
let relay_tx = super::redirect::redirect::<SessionContext>(
root,
node_addr,
omit_data,
hello_path,
chain_key,
from,
tx.take(),
exit_rx,
)
.await?;
tx.set_relay(relay_tx);
tx.add_exit_dependency(exit_tx);
return Ok(());
} else {
// Fail to redirect
trace!("sending Message::FatalTerminate(redirect actual={} expected={})", node_id, root.node_id);
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::RootRedirect {
actual: node_id,
expected: root.node_id,
}))
.await?;
return Ok(());
}
}
// Create the open context
let route = RouteChain {
route: hello_path.to_string(),
chain: chain_key.clone(),
};
// If we can't find a chain for this subscription then fail and tell the caller
let opened_chain = match open_internal(Arc::clone(&root), route.clone(), tx).await {
Err(ChainCreationError(ChainCreationErrorKind::NotThisRoot, _)) => {
trace!("sending Message::FatalTerminate(not_this_root)");
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::NotThisRoot))
.await?;
return Ok(());
}
Err(ChainCreationError(ChainCreationErrorKind::NoRootFoundInConfig, _)) => {
trace!("sending Message::FatalTerminate(not_this_root)");
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::NotThisRoot))
.await?;
return Ok(());
}
a => {
let chain = match a {
Ok(a) => a,
Err(err) => {
let err = err.to_string();
trace!("sending Message::FatalTerminate(other={})", err);
tx.send_reply_msg(Message::FatalTerminate(FatalTerminate::Other {
err: err.clone(),
}))
.await?;
bail!(CommsErrorKind::FatalError(err));
}
};
chain
}
};
let chain = opened_chain.chain;
// Replace the metrics and throttle with the one stored in the chain
tx.metrics = Arc::clone(&chain.metrics);
tx.throttle = Arc::clone(&chain.throttle);
// If there is a message of the day then transmit it to the caller
if let Some(message_of_the_day) = opened_chain.message_of_the_day {
trace!("sending Message::HumanMessage(msg={})", message_of_the_day);
tx.send_reply_msg(Message::HumanMessage {
message: message_of_the_day,
})
.await?;
}
// Update the context with the latest chain-key
{
let mut guard = context.inside.lock().unwrap();
guard.chain.replace(Arc::clone(&chain));
}
// Stream the data back to the client
debug!("starting the streaming process");
let strip_signatures = opened_chain.integrity.is_centralized();
let strip_data = match omit_data {
true => 64usize,
false => usize::MAX
};
stream_history_range(Arc::clone(&chain), from.., tx, strip_signatures, strip_data).await?;
Ok(())
}
async fn inbox_unsubscribe<'b>(
_root: Arc<MeshRoot>,
chain_key: ChainKey,
_tx: &'b mut StreamTx,
context: Arc<SessionContext>,
) -> Result<(), CommsError> {
debug!(" unsubscribe: {}", chain_key.to_string());
// Clear the chain this is operating on
{
let mut guard = context.inside.lock().unwrap();
guard.chain.take();
}
Ok(())
}
async fn inbox_packet<'b>(
root: Arc<MeshRoot>,
pck: PacketWithContext<Message, SessionContext>,
tx: &'b mut Tx,
) -> Result<(), CommsError> {
let context = pck.context.clone();
// Extract the client it and build the span (used for tracing)
let span = span!(
Level::DEBUG,
"server",
id = pck.id.to_short_string().as_str(),
peer = pck.peer_id.to_short_string().as_str()
);
// If we are in relay mode the send it on to the other server
if tx.relay_is_some() {
tx.send_relay(pck).await?;
return Ok(());
}
// Now process the packet under the span
async move {
trace!(packet_size = pck.data.bytes.len());
let pck_data = pck.data;
let pck = pck.packet;
let delete_only = {
let throttle = tx.throttle.lock().unwrap();
throttle.delete_only
};
match pck.msg {
Message::Subscribe {
chain_key,
from,
allow_redirect: redirect,
omit_data,
} => {
let hello_path = tx.hello_path.clone();
inbox_subscribe(
root,
hello_path.as_str(),
chain_key,
from,
redirect,
omit_data,
context,
tx,
)
.instrument(span!(Level::DEBUG, "subscribe"))
.await?;
}
Message::Events { commit, evts } => {
let num_deletes = evts
.iter()
.filter(|a| a.meta.get_tombstone().is_some())
.count();
let num_data = evts.iter().filter(|a| a.data.is_some()).count();
if delete_only && num_data > 0 {
debug!("event aborted - channel is currently read-only");
tx.send_reply_msg(Message::ReadOnly).await?;
return Ok(());
}
inbox_event(context, commit, evts, tx, pck_data)
.instrument(span!(
Level::DEBUG,
"event",
delete_cnt = num_deletes,
data_cnt = num_data
))
.await?;
}
Message::Lock { key } => {
inbox_lock(context, key, tx)
.instrument(span!(Level::DEBUG, "lock"))
.await?;
}
Message::Unlock { key } => {
inbox_unlock(context, key, tx)
.instrument(span!(Level::DEBUG, "unlock"))
.await?;
}
Message::LoadMany { id, leafs } => {
inbox_load_many(context, id, leafs, tx)
.instrument(span!(Level::DEBUG, "load-many"))
.await?;
}
_ => {}
};
Ok(())
}
.instrument(span)
.await
}
impl Drop for MeshRoot {
fn drop(&mut self) {
debug!("drop (MeshRoot)");
let _ = self.exit.send(());
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/mesh/active_session_pipe.rs | lib/src/mesh/active_session_pipe.rs | use crate::engine::timeout;
use async_trait::async_trait;
use error_chain::bail;
use fxhash::FxHashMap;
use std::ops::Rem;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use std::time::Duration;
use std::time::Instant;
use std::{sync::Arc, sync::Weak};
use tokio::sync::broadcast;
use tokio::sync::watch;
use tokio::sync::RwLock;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::core::*;
use super::lock_request::*;
use super::msg::*;
use super::recoverable_session_pipe::*;
use super::*;
use super::session::LoadRequest;
use crate::chain::*;
use crate::conf::*;
use crate::crypto::*;
use crate::error::*;
use crate::header::*;
use crate::loader::*;
use crate::meta::*;
use crate::pipe::*;
use crate::session::*;
use crate::spec::*;
use crate::time::*;
use crate::transaction::*;
use crate::trust::*;
use crate::{anti_replay::AntiReplayPlugin, comms::*};
pub(super) struct ActiveSessionPipe {
pub(super) key: ChainKey,
pub(super) tx: Tx,
pub(super) mode: RecoveryMode,
pub(super) session: Arc<MeshSession>,
pub(super) connected: bool,
pub(super) likely_read_only: bool,
pub(super) commit: Arc<StdMutex<FxHashMap<u64, mpsc::Sender<Result<u64, CommitError>>>>>,
pub(super) lock_attempt_timeout: Duration,
pub(super) lock_requests: Arc<StdMutex<FxHashMap<PrimaryKey, LockRequest>>>,
pub(super) load_timeout: Duration,
pub(super) load_requests: Arc<StdMutex<FxHashMap<u64, LoadRequest>>>,
pub(super) outbound_conversation: Arc<ConversationSession>,
}
impl ActiveSessionPipe {
pub(super) fn mark_connected(&mut self) {
self.connected = true;
}
pub(super) fn is_connected(&self) -> bool {
if self.connected == false {
return false;
}
true
}
pub(super) fn on_read_only(&mut self) {
self.likely_read_only = true;
}
pub(super) async fn on_disconnect(&self) -> Result<(), CommsError> {
// Switch over to a distributed integrity mode as while we are in an offline
// state we need to make sure we sign all the records. Its only the server
// and the fact we trust it that we can omit signatures
if let Some(chain) = self.session.chain.upgrade() {
chain.single().await.set_integrity(TrustMode::Distributed);
}
Ok(())
}
pub(super) async fn feed_internal(
&mut self,
trans: &mut Transaction,
) -> Result<Option<mpsc::Receiver<Result<u64, CommitError>>>, CommitError> {
// Convert the event data into message events
let evts = MessageEvent::convert_to(&trans.events);
// If the scope requires synchronization with the remote server then allocate a commit ID
let (commit, receiver) = match &trans.scope {
TransactionScope::Full => {
// Generate a sender/receiver pair
let (sender, receiver) = mpsc::channel(1);
// Register a commit ID that will receive the response
let id = fastrand::u64(..);
self.commit.lock().unwrap().insert(id, sender);
(Some(id), Some(receiver))
}
_ => (None, None),
};
// Send the same packet to all the transmit nodes (if there is only one then don't clone)
trace!("tx wire_format={}", self.tx.wire_format);
self.tx
.send_all_msg(Message::Events { commit, evts })
.await?;
Ok(receiver)
}
}
impl ActiveSessionPipe {
pub(super) async fn feed(&mut self, trans: &mut Transaction) -> Result<Option<mpsc::Receiver<Result<u64, CommitError>>>, CommitError> {
// Only transmit the packet if we are meant to
let ret = if trans.transmit == true {
// If we are likely in a read only situation then all transactions
// should go to the server in synchronous mode until we can confirm
// normal writability is restored
if self.likely_read_only && self.mode.should_go_readonly() {
trans.scope = TransactionScope::Full;
}
// If we are still connecting then don't do it
if self.connected == false {
if self.mode.should_error_out() {
return Err(CommitErrorKind::CommsError(CommsErrorKind::Disconnected).into());
} else if self.mode.should_go_readonly() {
return Err(CommitErrorKind::CommsError(CommsErrorKind::ReadOnly).into());
} else {
return Ok(None);
}
}
// Feed the transaction into the pipe
self.feed_internal(trans).await?
} else {
None
};
Ok(ret)
}
pub(super) async fn load_many(&mut self, leafs: Vec<AteHash>) -> Result<Vec<Option<Bytes>>, LoadError> {
// Register a load ID that will receive the response
let (tx, mut rx) = mpsc::channel(1);
let id = fastrand::u64(..);
self.load_requests.lock().unwrap().insert(id, LoadRequest {
records: leafs.clone(),
tx
});
// Inform the server that we want these records
self.tx
.send_all_msg(Message::LoadMany { id, leafs: leafs })
.await
.map_err(|err| {
trace!("load failed: {}", err);
LoadErrorKind::Disconnected
})?;
// Wait for the response from the server (or a timeout)
match crate::engine::timeout(self.load_timeout, rx.recv()).await {
Ok(Some(a)) => {
self.likely_read_only = false;
return a;
}
Ok(None) => {
self.load_requests.lock().unwrap().remove(&id);
bail!(LoadErrorKind::Disconnected);
}
Err(_) => {
self.load_requests.lock().unwrap().remove(&id);
bail!(LoadErrorKind::Timeout)
},
};
}
pub(super) async fn try_lock(&mut self, key: PrimaryKey) -> Result<bool, CommitError> {
// If we are still connecting then don't do it
if self.connected == false {
bail!(CommitErrorKind::LockError(CommsErrorKind::Disconnected));
}
// Write an entry into the lookup table
let (tx, mut rx) = watch::channel(false);
let my_lock = LockRequest {
needed: 1,
positive: 0,
negative: 0,
tx,
};
self.lock_requests
.lock()
.unwrap()
.insert(key.clone(), my_lock);
// Send a message up to the main server asking for a lock on the data object
trace!("tx lock key={}", key);
self.tx
.send_all_msg(Message::Lock { key: key.clone() })
.await?;
// Wait for the response from the server
let ret = match crate::engine::timeout(self.lock_attempt_timeout, rx.changed()).await {
Ok(a) => {
self.likely_read_only = false;
if let Err(_) = a {
bail!(CommitErrorKind::LockError(
CommsErrorKind::Disconnected.into()
));
}
*rx.borrow()
}
Err(_) => {
self.lock_requests.lock().unwrap().remove(&key);
bail!(CommitErrorKind::LockError(CommsErrorKind::Timeout.into()))
},
};
Ok(ret)
}
pub(super) async fn unlock(&mut self, key: PrimaryKey) -> Result<(), CommitError> {
// If we are still connecting then don't do it
if self.connected == false {
bail!(CommitErrorKind::CommsError(CommsErrorKind::Disconnected));
}
// Send a message up to the main server asking for an unlock on the data object
trace!("tx unlock key={}", key);
self.tx
.send_all_msg(Message::Unlock { key: key.clone() })
.await?;
// Success
Ok(())
}
pub(super) fn conversation(&self) -> Option<Arc<ConversationSession>> {
Some(Arc::clone(&self.outbound_conversation))
}
}
impl Drop for ActiveSessionPipe {
fn drop(&mut self) {
#[cfg(feature = "enable_verbose")]
debug!("drop {}", self.key.to_string());
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/conf/tests.rs | lib/src/conf/tests.rs | #![cfg(test)]
#[cfg(feature = "enable_dns")]
use std::{net::IpAddr, str::FromStr};
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::mesh::Registry;
use super::*;
pub(crate) fn mock_test_config() -> ConfAte {
#[allow(unused_mut)]
let mut ret = ConfAte::default();
#[cfg(feature = "enable_local_fs")]
{
ret.log_path = Some("/tmp/ate".to_string());
}
#[cfg(feature = "enable_ntp")]
{
ret.ntp_sync = false;
}
return ret;
}
pub(crate) fn mock_test_mesh(port: u16) -> ConfMesh {
let mut roots = Vec::new();
#[cfg(feature = "enable_dns")]
roots.push(MeshAddress::new(
IpAddr::from_str("127.0.0.1").unwrap(),
port,
));
#[cfg(not(feature = "enable_dns"))]
roots.push(MeshAddress::new("localhost", port));
let remote = url::Url::parse(format!("{}://localhost", Registry::guess_schema(port)).as_str()).unwrap();
let ret = ConfMesh::new("localhost", remote, roots.iter());
ret
}
#[test]
fn test_config_mocking() {
crate::utils::bootstrap_test_env();
let cfg = mock_test_mesh(4001);
assert_eq!(
cfg.roots.iter().next().unwrap().host.to_string(),
"127.0.0.1"
);
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/conf/mesh_address.rs | lib/src/conf/mesh_address.rs | use serde::{Deserialize, Serialize};
#[cfg(feature = "enable_dns")]
use std::net::IpAddr;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::crypto::AteHash;
/// Represents a target node within a mesh
#[derive(Serialize, Deserialize, Debug, Clone, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct MeshAddress {
#[cfg(feature = "enable_dns")]
pub host: IpAddr,
#[cfg(not(feature = "enable_dns"))]
pub host: String,
pub port: u16,
}
#[cfg(feature = "enable_dns")]
pub type MeshConnectAddr = std::net::SocketAddr;
#[cfg(not(feature = "enable_dns"))]
pub type MeshConnectAddr = crate::conf::MeshAddress;
impl MeshAddress {
#[cfg(feature = "enable_dns")]
#[allow(dead_code)]
pub fn new(ip: IpAddr, port: u16) -> MeshAddress {
MeshAddress { host: ip, port }
}
#[cfg(not(feature = "enable_dns"))]
#[allow(dead_code)]
pub fn new(domain: &str, port: u16) -> MeshAddress {
MeshAddress {
host: domain.to_string(),
port,
}
}
pub fn hash(&self) -> AteHash {
#[cfg(feature = "enable_dns")]
match self.host {
IpAddr::V4(host) => AteHash::from_bytes_twice(&host.octets(), &self.port.to_be_bytes()),
IpAddr::V6(host) => AteHash::from_bytes_twice(&host.octets(), &self.port.to_be_bytes()),
}
#[cfg(not(feature = "enable_dns"))]
AteHash::from_bytes_twice(self.host.as_bytes(), &self.port.to_be_bytes())
}
pub fn port(&self) -> u16 {
self.port
}
}
impl std::fmt::Display for MeshAddress {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
write!(f, "{}:{}", self.host, self.port)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/conf/mesh.rs | lib/src/conf/mesh.rs | #![allow(unused_imports)]
use error_chain::bail;
use std::iter::Iterator;
use std::net::IpAddr;
use std::time::Duration;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::comms::CertificateValidation;
use crate::conf::ConfAte;
use crate::crypto::KeySize;
use crate::mesh::Registry;
use crate::prelude::*;
use crate::{comms::StreamProtocol, error::CommsError};
use super::*;
/// Represents all nodes within this cluster. All the chains
/// are spread evenly across the nodes within a cluster using a hashing
/// algorithm. Care must be taken when added new addresses that the
/// redo logs are not lost during a respreading of addresses. The recommended
/// way to grow clusters is to first run an ATE mirror on the new cluster
/// nodes then once its running switch to an active cluster
#[derive(Debug, Clone)]
pub struct ConfMesh {
/// Domain name that this mesh is running on
pub domain_name: String,
/// URL of the target remote location
pub remote: url::Url,
/// List of all the allowed certificates for authenticated servers
pub certificate_validation: CertificateValidation,
/// List of all the addresses that the root nodes exists on
pub roots: Vec<MeshAddress>,
/// Forces ATE to act as a client even if its local IP address is one
/// of the node machines in the clusters (normally ATE would automatically
/// listen for connections)
#[cfg(feature = "enable_client")]
pub force_client_only: bool,
/// Forces ATE to listen on a particular address for connections even if
/// the address is not in the list of cluster nodes.
#[cfg(feature = "enable_server")]
pub force_listen: Option<MeshAddress>,
/// Forces ATE to listen on a specific port
#[cfg(feature = "enable_server")]
pub force_port: Option<u16>,
/// When listening for connections the minimum level of encryption to
/// force clients to upgrade to during handshaking.
/// Note: Settings this value may mean that some connections (e.g. browser)
/// and rely on TLS encryption may not be able to connect
#[cfg(feature = "enable_server")]
pub listen_min_encryption: Option<KeySize>,
/// When listening for connections the server will use the certificate
/// below when establishing secure connections.
#[cfg(feature = "enable_server")]
pub listen_certificate: Option<PrivateEncryptKey>,
/// Forces ATE to process all requests related to this particular node_id.
/// Use this property when the node_id can not be derived from the list
/// of addresses and your listen address. For instance when behind a load
/// balancer
#[cfg(feature = "enable_server")]
pub force_node_id: Option<u32>,
/// Forces ATE to connect to a specific address for connections even if
/// chain is not owned by that particular node in the cluster
#[cfg(feature = "enable_client")]
pub force_connect: Option<MeshAddress>,
/// Flag that indicates if encryption will be used for the underlying
/// connections over the wire. When using a ATE's in built encryption
/// and quantum resistant signatures it is not mandatory to use
/// wire encryption as confidentially and integrity are already enforced however
/// for best security it is advisable to apply a layered defence, of
/// which double encrypting your data and the metadata around it is
/// another defence.
pub wire_encryption: Option<KeySize>,
/// Time to wait for a connection to a server before it times out
pub connect_timeout: Duration,
/// Time to wait for a connection to be accepted during handshaking
#[cfg(feature = "enable_server")]
pub accept_timeout: Duration,
/// Connection attempts will abort quickly in the scenario that something is wrong rather
/// than retrying in an exponential backoff
pub fail_fast: bool,
/// Serialization format of the data on the network pipes between nodes and clients
pub wire_format: SerializationFormat,
/// The transport protocol that will be used for communication. When compiled
/// with the right features this will allow the caller to specify different
/// underlying communication channels
pub wire_protocol: StreamProtocol,
/// Size of the buffer on mesh clients, tweak this number with care
#[cfg(feature = "enable_client")]
pub buffer_size_client: usize,
/// Size of the buffer on mesh servers, tweak this number with care
#[cfg(feature = "enable_server")]
pub buffer_size_server: usize,
}
impl ConfMesh {
/// Represents a skeleton server that can manually receive new connections
#[cfg(feature = "enable_dns")]
#[cfg(feature = "enable_server")]
pub async fn skeleton(
cfg_ate: &ConfAte,
domain: String,
connect_port: u16,
node_id: Option<u32>,
) -> Result<ConfMesh, CommsError> {
let registry = Registry::new(cfg_ate).await;
let mut cfg_mesh = registry
.cfg_for_domain(domain.as_str(), connect_port)
.await?;
cfg_mesh.force_client_only = true;
cfg_mesh.force_node_id = node_id;
Ok(cfg_mesh)
}
/// Represents a single server listening on all available addresses. All chains
/// will be stored locally to this server and there is no replication
#[cfg(feature = "enable_dns")]
#[cfg(feature = "enable_server")]
pub async fn solo(
cfg_ate: &ConfAte,
listen: &IpAddr,
listen_port: Option<u16>,
domain: String,
connect_port: u16,
node_id: Option<u32>
) -> Result<ConfMesh, CommsError> {
let registry = Registry::new(cfg_ate).await;
let addr = MeshAddress::new(listen.clone(), connect_port);
let mut cfg_mesh = registry
.cfg_for_domain(domain.as_str(), connect_port)
.await?;
cfg_mesh.force_client_only = false;
cfg_mesh.force_listen = Some(addr.clone());
cfg_mesh.force_node_id = node_id;
cfg_mesh.force_port = listen_port;
Ok(cfg_mesh)
}
/// Represents a single server listening on all available addresses. All chains
/// will be stored locally to this server and there is no replication
#[cfg(not(feature = "enable_dns"))]
#[cfg(feature = "enable_server")]
pub async fn solo(
cfg_ate: &ConfAte,
domain: String,
listen_port: Option<u16>,
connect_port: u16,
node_id: Option<u32>,
) -> Result<ConfMesh, CommsError> {
let registry = Registry::new(cfg_ate).await;
let addr = MeshAddress::new(domain.as_str(), port);
let mut cfg_mesh = registry.cfg_for_domain(domain.as_str(), port).await?;
cfg_mesh.force_client_only = false;
cfg_mesh.force_listen = Some(addr.clone());
cfg_mesh.force_node_id = node_id;
Ok(cfg_mesh)
}
#[cfg(feature = "enable_dns")]
#[cfg(feature = "enable_server")]
pub async fn solo_from_url(
cfg_ate: &ConfAte,
url: &url::Url,
listen: &IpAddr,
listen_port: Option<u16>,
node_id: Option<u32>,
) -> Result<ConfMesh, CommsError> {
let protocol = StreamProtocol::parse(url)?;
let port = url.port().unwrap_or(protocol.default_port());
let domain = match url.domain() {
Some(a) => a.to_string(),
None => {
bail!(CommsErrorKind::InvalidDomainName);
}
};
let mut ret = ConfMesh::solo(cfg_ate, listen, listen_port, domain, port, node_id).await?;
ret.force_node_id = match node_id {
Some(a) => Some(a),
None => match ret.roots.len() {
1 => Some(0u32),
_ => None,
},
};
Ok(ret)
}
#[cfg(not(feature = "enable_dns"))]
#[cfg(feature = "enable_server")]
pub fn solo_from_url(
cfg_ate: &ConfAte,
url: &url::Url,
node_id: Option<u32>,
) -> Result<ConfMesh, CommsError> {
let protocol = StreamProtocol::parse(url)?;
let port = url.port().unwrap_or(protocol.default_port());
let domain = match url.domain() {
Some(a) => a.to_string(),
None => {
return Err(CommsError::InvalidDomainName);
}
};
ConfMesh::solo(cfg_ate, domain, port, node_id)
}
pub(crate) fn new<'a, 'b>(
domain_name: &'a str,
remote: url::Url,
roots: impl Iterator<Item = &'b MeshAddress>,
) -> ConfMesh {
ConfMesh {
roots: roots.map(|a| a.clone()).collect::<Vec<_>>(),
domain_name: domain_name.to_string(),
remote,
certificate_validation: CertificateValidation::AllowedCertificates(Vec::new()),
#[cfg(feature = "enable_server")]
listen_min_encryption: None,
#[cfg(feature = "enable_server")]
listen_certificate: None,
#[cfg(feature = "enable_client")]
force_client_only: false,
#[cfg(feature = "enable_server")]
force_listen: None,
#[cfg(feature = "enable_server")]
force_port: None,
#[cfg(feature = "enable_server")]
force_node_id: None,
#[cfg(feature = "enable_client")]
force_connect: None,
wire_encryption: Some(KeySize::Bit128),
wire_protocol: StreamProtocol::WebSocket,
wire_format: SerializationFormat::Bincode,
connect_timeout: Duration::from_secs(30),
#[cfg(feature = "enable_server")]
accept_timeout: Duration::from_secs(10),
fail_fast: false,
#[cfg(feature = "enable_client")]
buffer_size_client: 2,
#[cfg(feature = "enable_server")]
buffer_size_server: 10,
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/conf/chain_builder.rs | lib/src/conf/chain_builder.rs | use std::sync::Arc;
use std::sync::Mutex as StdMutex;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::anti_replay::AntiReplayPlugin;
use crate::chain::Chain;
use crate::comms::Metrics;
use crate::comms::NodeId;
use crate::comms::Throttle;
use crate::compact::*;
use crate::crypto::PublicSignKey;
use crate::error::*;
use crate::index::*;
use crate::lint::*;
use crate::pipe::*;
use crate::plugin::*;
use crate::prelude::CentralizedRole;
use crate::prelude::TrustMode;
use crate::session::AteSession;
use crate::session::AteSessionUser;
use crate::time::TimestampEnforcer;
use crate::transform::*;
use crate::tree::TreeAuthorityPlugin;
use crate::trust::ChainKey;
use crate::validator::*;
use super::*;
/// Building class used to construct a chain-of-trust with
/// its user defined plugins and configuration. Nearly always
/// this builder will be used to create and load your chains.
pub struct ChainBuilder {
pub(crate) cfg_ate: ConfAte,
pub(crate) node_id: NodeId,
pub(crate) configured_for: ConfiguredFor,
pub(crate) validators: Vec<Box<dyn EventValidator>>,
pub(crate) compactors: Vec<Box<dyn EventCompactor>>,
pub(crate) linters: Vec<Box<dyn EventMetadataLinter>>,
pub(crate) transformers: Vec<Box<dyn EventDataTransformer>>,
pub(crate) indexers: Vec<Box<dyn EventIndexer>>,
pub(crate) plugins: Vec<Box<dyn EventPlugin>>,
pub(crate) pipes: Option<Arc<Box<dyn EventPipe>>>,
pub(crate) tree: Option<TreeAuthorityPlugin>,
pub(crate) truncate: bool,
pub(crate) temporal: bool,
pub(crate) session: Box<dyn AteSession>,
pub(crate) metrics: Arc<StdMutex<Metrics>>,
pub(crate) throttle: Arc<StdMutex<Throttle>>,
pub(crate) load_integrity: TrustMode,
pub(crate) idle_integrity: TrustMode,
}
impl Clone for ChainBuilder {
fn clone(&self) -> Self {
ChainBuilder {
cfg_ate: self.cfg_ate.clone(),
node_id: self.node_id.clone(),
configured_for: self.configured_for.clone(),
validators: self
.validators
.iter()
.map(|a| a.clone_validator())
.collect::<Vec<_>>(),
compactors: self
.compactors
.iter()
.filter_map(|a| a.clone_compactor())
.collect::<Vec<_>>(),
linters: self
.linters
.iter()
.map(|a| a.clone_linter())
.collect::<Vec<_>>(),
transformers: self
.transformers
.iter()
.map(|a| a.clone_transformer())
.collect::<Vec<_>>(),
indexers: self
.indexers
.iter()
.map(|a| a.clone_indexer())
.collect::<Vec<_>>(),
plugins: self
.plugins
.iter()
.map(|a| a.clone_plugin())
.collect::<Vec<_>>(),
pipes: self.pipes.clone(),
tree: self.tree.clone(),
session: self.session.clone_session(),
truncate: self.truncate,
temporal: self.temporal,
metrics: Arc::clone(&self.metrics),
throttle: Arc::clone(&self.throttle),
load_integrity: self.load_integrity,
idle_integrity: self.idle_integrity,
}
}
}
impl ChainBuilder {
#[allow(dead_code)]
pub async fn new(cfg_ate: &ConfAte) -> ChainBuilder {
ChainBuilder {
cfg_ate: cfg_ate.clone(),
node_id: crate::comms::NodeId::generate_client_id(),
configured_for: cfg_ate.configured_for.clone(),
validators: Vec::new(),
indexers: Vec::new(),
compactors: Vec::new(),
linters: Vec::new(),
transformers: Vec::new(),
plugins: Vec::new(),
pipes: None,
tree: None,
session: AteSessionUser::new().into(),
truncate: false,
temporal: false,
metrics: Arc::new(StdMutex::new(Metrics::default())),
throttle: Arc::new(StdMutex::new(Throttle::default())),
load_integrity: TrustMode::Centralized(CentralizedRole::Client),
idle_integrity: TrustMode::Distributed,
}
.with_defaults()
.await
}
#[allow(dead_code)]
pub async fn with_defaults(mut self) -> Self {
self.validators.clear();
self.indexers.clear();
self.linters.clear();
self.transformers.clear();
self.plugins.clear();
self.compactors.clear();
self.tree = None;
self.truncate = false;
if self.configured_for == ConfiguredFor::Raw {
return self;
}
self.compactors
.push(Box::new(PublicKeyCompactor::default()));
self.compactors
.push(Box::new(SignatureCompactor::default()));
self.compactors
.push(Box::new(RemoveDuplicatesCompactor::default()));
self.compactors
.push(Box::new(TombstoneCompactor::default()));
self.plugins.push(Box::new(AntiReplayPlugin::default()));
match self.configured_for {
ConfiguredFor::SmallestSize => {
self.transformers
.insert(0, Box::new(CompressorWithSnapTransformer::default()));
}
ConfiguredFor::Balanced => {}
ConfiguredFor::BestSecurity => {
self.cfg_ate.dns_sec = true;
}
_ => {}
}
if self.configured_for == ConfiguredFor::Barebone {
self.validators
.push(Box::new(RubberStampValidator::default()));
return self;
} else {
self.tree = Some(crate::tree::TreeAuthorityPlugin::new());
let tolerance = self.configured_for.ntp_tolerance();
self.plugins.push(Box::new(
TimestampEnforcer::new(&self.cfg_ate, tolerance)
.await
.unwrap(),
));
}
self
}
#[allow(dead_code)]
pub fn without_defaults(mut self) -> Self {
self.validators.clear();
self.indexers.clear();
self.compactors.clear();
self.linters.clear();
self.transformers.clear();
self.plugins.clear();
self.tree = None;
self.truncate = false;
self
}
#[allow(dead_code)]
pub fn with_metrics(mut self, metrics: &Arc<StdMutex<Metrics>>) -> Self {
self.metrics = Arc::clone(metrics);
self
}
#[allow(dead_code)]
pub fn with_throttle(mut self, throttle: &Arc<StdMutex<Throttle>>) -> Self {
self.throttle = Arc::clone(throttle);
self
}
#[allow(dead_code)]
pub fn load_integrity(mut self, trust: TrustMode) -> Self {
self.load_integrity = trust;
self
}
#[allow(dead_code)]
pub fn idle_integrity(mut self, trust: TrustMode) -> Self {
self.idle_integrity = trust;
self
}
#[allow(dead_code)]
pub fn add_compactor(mut self, compactor: Box<dyn EventCompactor>) -> Self {
self.compactors.push(compactor);
self
}
#[allow(dead_code)]
pub fn add_validator(mut self, validator: Box<dyn EventValidator>) -> Self {
self.validators.push(validator);
self
}
#[allow(dead_code)]
pub fn add_metadata_linter(mut self, linter: Box<dyn EventMetadataLinter>) -> Self {
self.linters.push(linter);
self
}
#[allow(dead_code)]
pub fn add_data_transformer(mut self, transformer: Box<dyn EventDataTransformer>) -> Self {
self.transformers.push(transformer);
self
}
#[allow(dead_code)]
pub fn add_indexer(mut self, indexer: Box<dyn EventIndexer>) -> Self {
self.indexers.push(indexer);
self
}
#[allow(dead_code)]
pub fn add_plugin(mut self, plugin: Box<dyn EventPlugin>) -> Self {
self.plugins.push(plugin);
self
}
#[allow(dead_code)]
pub fn add_root_public_key(mut self, key: &PublicSignKey) -> Self {
if let Some(tree) = &mut self.tree {
tree.add_root_public_key(key);
}
self
}
#[allow(dead_code)]
pub(crate) fn add_pipe(mut self, mut pipe: Box<dyn EventPipe>) -> Self {
let next = self.pipes.take();
if let Some(next) = next {
pipe.set_next(next);
}
self.pipes = Some(Arc::new(pipe));
self
}
#[cfg(feature = "enable_local_fs")]
#[allow(dead_code)]
pub fn postfix_log_path(mut self, postfix: &str) -> Self {
let orig_path = match self.cfg_ate.log_path.as_ref() {
Some(a) => a.clone(),
None => {
return self;
}
};
// Remove any prefix slash as this will already be there
let mut postfix = postfix.to_string();
while postfix.starts_with("/") {
postfix = postfix[1..].to_string();
}
if postfix.len() <= 0 {
return self;
}
let path = match orig_path.ends_with("/") {
true => format!("{}{}", orig_path, postfix),
false => format!("{}/{}", orig_path, postfix),
};
self.cfg_ate.log_path = Some(path);
// Also update the backup path
if let Some(backup_path) = self.cfg_ate.backup_path.as_ref() {
let backup_path = backup_path.clone();
let path = match backup_path.ends_with("/") {
true => format!("{}{}", backup_path, postfix),
false => format!("{}/{}", backup_path, postfix),
};
self.cfg_ate.backup_path = Some(path);
}
self
}
#[allow(dead_code)]
pub fn set_session(mut self, session: Box<dyn AteSession>) -> Self {
self.session = session;
self
}
#[allow(dead_code)]
pub fn truncate(mut self, val: bool) -> Self {
self.truncate = val;
self
}
#[allow(dead_code)]
pub fn temporal(mut self, val: bool) -> Self {
self.temporal = val;
self
}
pub fn node_id(mut self, client_id: NodeId) -> Self {
self.node_id = client_id;
self
}
pub fn cfg_ate(&self) -> &ConfAte {
&self.cfg_ate
}
#[allow(dead_code)]
pub fn build(self) -> Arc<ChainBuilder> {
Arc::new(self)
}
pub async fn open(self: &Arc<Self>, key: &ChainKey) -> Result<Arc<Chain>, ChainCreationError> {
let ret = Arc::new(
Chain::new(
(**self).clone(),
key,
self.load_integrity,
self.idle_integrity,
)
.await?,
);
Ok(ret)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/conf/mod.rs | lib/src/conf/mod.rs | pub mod chain_builder;
pub mod conf_ate;
pub mod configured_for;
pub mod mesh;
pub mod mesh_address;
pub mod tests;
pub use chain_builder::*;
pub use conf_ate::*;
pub use configured_for::*;
pub use mesh::*;
pub use mesh_address::*;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/conf/conf_ate.rs | lib/src/conf/conf_ate.rs | use std::time::Duration;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::compact::CompactMode;
use crate::mesh::BackupMode;
use crate::mesh::RecoveryMode;
use crate::spec::*;
use super::*;
/// Configuration settings for the ATE datastore
///
#[derive(Debug, Clone)]
pub struct ConfAte {
/// Optimizes ATE for a specific group of usecases.
pub(super) configured_for: ConfiguredFor,
/// Specifies the recovery mode that the mesh will take.
pub recovery_mode: RecoveryMode,
/// Specifies the log compaction mode for the redo log.
pub compact_mode: CompactMode,
/// Compacts the redo log on bootstrapping of the program.
pub compact_bootstrap: bool,
/// Compacts the redo log on cleanup
pub compact_cleanup: bool,
/// Directory path that the redo logs will be stored.
/// (if this option is none then the logs will be stored in memory)
#[cfg(feature = "enable_local_fs")]
pub log_path: Option<String>,
/// (Optional) List of nodes that make up the mesh, otherwise they will be
/// built from the DNS A records if not supplied here
pub nodes: Option<Vec<String>>,
/// Directory path that the backup files will be stored and fetched.
/// (if this option is none then the logs will not be backed up)
#[cfg(feature = "enable_local_fs")]
pub backup_path: Option<String>,
/// Specifies the backup mode that the mesh will undertake
pub backup_mode: BackupMode,
/// NTP pool server which ATE will synchronize its clocks with, its
/// important to have synchronized clocks with ATE as it uses time as
/// digest to prevent replay attacks
#[cfg(feature = "enable_ntp")]
pub ntp_pool: String,
/// Port that the NTP server is listening on (defaults to 123)
#[cfg(feature = "enable_ntp")]
pub ntp_port: u16,
/// Flag that indicates if the time keeper will sync with NTP or not
/// (avoiding NTP sync means one can run fully offline but time drift
/// will cause issues with multi factor authentication and timestamps)
#[cfg(feature = "enable_ntp")]
pub ntp_sync: bool,
/// Flag that determines if ATE will use DNSSec or just plain DNS
pub dns_sec: bool,
/// DNS server that queries will be made do by the chain registry
pub dns_server: String,
/// Synchronization tolerance whereby event duplication during connection phases
/// and compaction efficiency are impacted. Greater tolerance will reduce the
/// possibility of data lose on specific edge-cases while shorter tolerance will
/// improve space and network efficiency. It is not recommended to select a value
/// lower than a few seconds while increasing the value to days will impact performance.
/// (default=30 seconds)
pub sync_tolerance: Duration,
/// Size of the local cache that stores redo log entries in memory
#[cfg(feature = "enable_local_fs")]
pub load_cache_size: usize,
/// Number of seconds that redo log entries will remain in memory before
/// they are evicted
#[cfg(feature = "enable_local_fs")]
pub load_cache_ttl: u64,
/// Serialization format of the log files
pub log_format: MessageFormat,
/// Size of the buffer used by the chain-of-trust
pub buffer_size_chain: usize,
/// Timeout before an attempt to lock a data object fails
pub lock_attempt_timeout: Duration,
/// Timeout before an attempt to load a data object fails
pub load_timeout: Duration,
/// Flag that indicates if the type name should always be saved in the event log.
/// Added the type-name consumes space but gives extra debug information
pub record_type_name: bool,
}
impl Default for ConfAte {
fn default() -> ConfAte {
ConfAte {
#[cfg(feature = "enable_local_fs")]
log_path: None,
dns_sec: false,
dns_server: "8.8.8.8".to_string(),
recovery_mode: RecoveryMode::ReadOnlyAsync,
#[cfg(feature = "enable_local_fs")]
backup_path: None,
backup_mode: BackupMode::Full,
compact_mode: CompactMode::Never,
compact_bootstrap: false,
compact_cleanup: false,
sync_tolerance: Duration::from_secs(30),
#[cfg(feature = "enable_ntp")]
ntp_sync: true,
#[cfg(feature = "enable_ntp")]
ntp_pool: "pool.ntp.org".to_string(),
#[cfg(feature = "enable_ntp")]
ntp_port: 123,
configured_for: ConfiguredFor::default(),
#[cfg(feature = "enable_local_fs")]
load_cache_size: 1000,
#[cfg(feature = "enable_local_fs")]
load_cache_ttl: 30,
log_format: MessageFormat {
meta: SerializationFormat::Bincode,
data: SerializationFormat::Json,
},
buffer_size_chain: 1,
lock_attempt_timeout: Duration::from_secs(20),
load_timeout: Duration::from_secs(20),
record_type_name: false,
nodes: None,
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/conf/configured_for.rs | lib/src/conf/configured_for.rs | use crate::spec::*;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::*;
/// Determines what optimizes and defaults ATE selects based of a particular
/// group of usecases
#[allow(dead_code)]
#[derive(Debug, Clone, Copy, PartialEq)]
pub enum ConfiguredFor {
/// ATE is left completely unconfigured with no-assumptions and no default functionality
Raw,
/// ATE is configured with the minimum that is considered at least functional
Barebone,
/// ATE will optimize its usage for the redo-logs with the smallest size possible, this
/// includes using compression on the data streams by default.
SmallestSize,
/// ATE will use serializers that are much faster than normal however they do not support
/// forward or backwards compatibility meaning changes to the data object schemas will
/// break your trees thus you will need to handle versioning yourself manually.
BestPerformance,
/// ATE will use serializers that provide both forward and backward compatibility for changes
/// to the metadata schema and the data schema. This format while slower than the performance
/// setting allows seamless upgrades and changes to your model without breaking existing data.
BestCompatibility,
/// A balance between performance, compatibility and security that gives a bit of each without
/// without going towards the extremes of any. For instance, the data model is forwards and
/// backwards compatible however the metadata is not. Encryption is good eno\for all known
/// attacks of today but less protected against unknown attacks of the future.
Balanced,
/// Provides the best encryption routines available at the expense of performance and size
BestSecurity,
}
impl ConfiguredFor {
pub fn ntp_tolerance(&self) -> u32 {
match self {
ConfiguredFor::BestPerformance => 60000u32,
ConfiguredFor::BestSecurity => 30000u32,
_ => 40000u32,
}
}
}
impl std::str::FromStr for ConfiguredFor {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"raw" => Ok(ConfiguredFor::Raw),
"barebone" => Ok(ConfiguredFor::Barebone),
"best_performance" => Ok(ConfiguredFor::BestPerformance),
"performance" => Ok(ConfiguredFor::BestPerformance),
"speed" => Ok(ConfiguredFor::BestPerformance),
"best_compatibility" => Ok(ConfiguredFor::BestCompatibility),
"compatibility" => Ok(ConfiguredFor::BestCompatibility),
"balanced" => Ok(ConfiguredFor::Balanced),
"best_security" => Ok(ConfiguredFor::BestSecurity),
"security" => Ok(ConfiguredFor::BestSecurity),
_ => Err("valid values are 'raw', 'barebone', 'best_performance', 'performance', 'speed', 'best_compatibility', 'compatibility', 'balanced', 'best_security' and 'security'"),
}
}
}
impl Default for ConfiguredFor {
fn default() -> ConfiguredFor {
ConfiguredFor::Balanced
}
}
impl ConfAte {
pub fn configured_for(&mut self, configured_for: ConfiguredFor) {
self.configured_for = configured_for;
match configured_for {
ConfiguredFor::BestPerformance => {
self.log_format.meta = SerializationFormat::Bincode;
self.log_format.data = SerializationFormat::Bincode;
}
ConfiguredFor::BestCompatibility => {
self.log_format.meta = SerializationFormat::Json;
self.log_format.data = SerializationFormat::Json;
}
_ => {
self.log_format.meta = SerializationFormat::Bincode;
self.log_format.data = SerializationFormat::Json;
}
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/tree/linter.rs | lib/src/tree/linter.rs | use error_chain::bail;
use std::sync::Arc;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::error::*;
use crate::lint::*;
use crate::meta::*;
use crate::session::*;
use crate::signature::*;
use crate::transaction::*;
use super::*;
impl EventMetadataLinter for TreeAuthorityPlugin {
fn clone_linter(&self) -> Box<dyn EventMetadataLinter> {
Box::new(self.clone())
}
fn metadata_lint_many<'a>(
&self,
headers: &Vec<LintData<'a>>,
session: &'_ dyn AteSession,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<Vec<CoreMetadata>, LintError> {
let mut ret = Vec::new();
let mut other = self
.signature_plugin
.metadata_lint_many(headers, session, conversation)?;
ret.append(&mut other);
Ok(ret)
}
fn metadata_lint_event(
&self,
meta: &Metadata,
session: &'_ dyn AteSession,
trans_meta: &TransactionMetadata,
type_code: &str,
) -> Result<Vec<CoreMetadata>, LintError> {
let mut ret = Vec::new();
let mut sign_with = Vec::new();
// Signatures a done using the authorizations before its attached
let auth = self.compute_auth(meta, trans_meta, ComputePhase::BeforeStore)?;
match auth.write {
WriteOption::Specific(_) | WriteOption::Any(_) => {
for write_hash in auth.write.vals().iter() {
// Add any signing keys that we have
sign_with.append(
&mut session
.write_keys(AteSessionKeyCategory::AllKeys)
.filter(|p| p.hash() == *write_hash)
.map(|p| p.hash())
.collect::<Vec<_>>(),
);
}
if meta.needs_signature() && sign_with.len() <= 0 {
// This record has no authorization
return match meta.get_data_key() {
Some(key) => Err(LintErrorKind::TrustError(
TrustErrorKind::NoAuthorizationWrite(
type_code.to_string(),
key,
auth.write,
),
)
.into()),
None => Err(LintErrorKind::TrustError(
TrustErrorKind::NoAuthorizationOrphan,
)
.into()),
};
}
// Add the signing key hashes for the later stages
if sign_with.len() > 0 {
ret.push(CoreMetadata::SignWith(MetaSignWith { keys: sign_with }));
}
}
WriteOption::Inherit => {
bail!(LintErrorKind::TrustError(
TrustErrorKind::UnspecifiedWritability
));
}
WriteOption::Everyone => {}
WriteOption::Nobody => {
bail!(LintErrorKind::TrustError(TrustErrorKind::OwnedByNobody(
type_code.to_string()
)));
}
}
// Now lets add all the encryption keys
let auth = self.compute_auth(meta, trans_meta, ComputePhase::AfterStore)?;
let key_hash = match &auth.read {
ReadOption::Everyone(key) => match key {
Some(a) => Some(a.short_hash()),
None => None,
},
ReadOption::Specific(read_hash, derived) => {
let mut ret = session
.read_keys(AteSessionKeyCategory::AllKeys)
.filter(|p| p.hash() == *read_hash)
.filter_map(|p| derived.transmute(p).ok())
.map(|p| p.short_hash())
.next();
if ret.is_none() {
ret = session
.private_read_keys(AteSessionKeyCategory::AllKeys)
.filter(|p| p.hash() == *read_hash)
.filter_map(|p| derived.transmute_private(p).ok())
.map(|p| p.short_hash())
.next();
}
if ret.is_none() {
if let Some(key) = meta.get_data_key() {
bail!(LintErrorKind::TrustError(
TrustErrorKind::NoAuthorizationRead(
type_code.to_string(),
key,
auth.read
)
));
}
}
ret
}
_ => None,
};
if let Some(key_hash) = key_hash {
ret.push(CoreMetadata::Confidentiality(MetaConfidentiality {
hash: key_hash,
_cache: Some(auth.read),
}));
}
// Now run the signature plugin
ret.extend(
self.signature_plugin
.metadata_lint_event(meta, session, trans_meta, type_code)?,
);
// We are done
Ok(ret)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/tree/sink.rs | lib/src/tree/sink.rs | use std::sync::Arc;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::error::*;
use crate::event::*;
use crate::meta::*;
use crate::sink::*;
use crate::transaction::*;
use super::*;
impl EventSink for TreeAuthorityPlugin {
fn feed(
&mut self,
header: &EventHeader,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<(), SinkError> {
if let Some(key) = header.meta.get_tombstone() {
self.auth.remove(&key);
self.parents.remove(&key);
} else if let Some(key) = header.meta.get_data_key() {
self.auth.insert(
key,
match header.meta.get_authorization() {
Some(a) => a.clone(),
None => MetaAuthorization {
read: ReadOption::Inherit,
write: WriteOption::Inherit,
},
},
);
if let Some(parent) = header.meta.get_parent() {
if parent.vec.parent_id != key {
self.parents.insert(key, parent.clone());
}
}
}
self.signature_plugin.feed(header, conversation)?;
Ok(())
}
fn reset(&mut self) {
self.auth.clear();
self.parents.clear();
self.signature_plugin.reset();
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/tree/mod.rs | lib/src/tree/mod.rs | pub mod compute;
pub mod generate_encrypt_key;
pub mod get_encrypt_key;
pub mod linter;
pub mod plugin;
pub mod sink;
pub mod transformer;
pub mod validator;
pub use generate_encrypt_key::*;
pub use get_encrypt_key::*;
pub use linter::*;
pub use plugin::*;
pub use sink::*;
pub use transformer::*;
pub use validator::*;
pub(self) use compute::*;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/tree/generate_encrypt_key.rs | lib/src/tree/generate_encrypt_key.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::crypto::*;
use crate::error::*;
use crate::meta::*;
use crate::session::*;
use super::*;
impl TreeAuthorityPlugin {
pub(super) fn generate_encrypt_key(
&self,
auth: &ReadOption,
session: &'_ dyn AteSession,
) -> Result<Option<(InitializationVector, EncryptKey)>, TransformError> {
match auth {
ReadOption::Inherit => Err(TransformErrorKind::UnspecifiedReadability.into()),
ReadOption::Everyone(_key) => Ok(None),
ReadOption::Specific(key_hash, derived) => {
for key in session.read_keys(AteSessionKeyCategory::AllKeys) {
if key.hash() == *key_hash {
return Ok(Some((
InitializationVector::generate(),
derived.transmute(key)?,
)));
}
}
for key in session.private_read_keys(AteSessionKeyCategory::AllKeys) {
if key.hash() == *key_hash {
return Ok(Some((
InitializationVector::generate(),
derived.transmute_private(key)?,
)));
}
}
Err(TransformErrorKind::MissingReadKey(key_hash.to_hex_string()).into())
}
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.