repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/examples/condvar/src/main.rs | examples/condvar/src/main.rs | //#![feature(unboxed_closures)]
//#![feature(fn_traits)]
use std::{
thread,
time::Duration,
sync::{
Arc,
Mutex,
Condvar,
},
};
fn main() {
// Inside of our lock, spawn a new thread, and then wait for it to start.
let pair = Arc::new((Mutex::new(false), Condvar::new()));
// We enter a lock
let (lock, cvar) = &*pair;
let mut started = lock.lock().unwrap();
println!("condvar1 thread spawn");
{
let pair = Arc::clone(&pair);
thread::spawn(move|| {
{
println!("condvar1 thread started");
let (lock, cvar) = &*pair;
println!("condvar1 thread sleep(1sec) start");
thread::sleep(Duration::from_secs(1));
println!("condvar1 thread sleep(1sec) end");
let mut started = lock.lock().unwrap();
*started = true;
println!("condvar1 thread set condition");
// We notify the condvar that the value has changed.
cvar.notify_one();
println!("condvar1 thread notify");
}
thread::sleep(Duration::from_millis(50));
println!("condvar1 thread exit");
});
}
thread::sleep(Duration::from_millis(100));
// Wait for the thread to start up.
println!("condvar loop");
while !*started {
println!("condvar wait");
started = cvar.wait(started).unwrap();
println!("condvar woken");
}
println!("condvar parent done");
thread::sleep(Duration::from_millis(100));
println!("all done");
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/examples/multi-threading/src/main.rs | examples/multi-threading/src/main.rs | //#![feature(unboxed_closures)]
//#![feature(fn_traits)]
use std::{
thread,
time::Duration,
};
fn main() {
// Now we do some work using multi threads
let mut joins = Vec::new();
for n in 1..10u32 {
joins.push(thread::spawn(move || {
println!("thread {} started", n);
thread::sleep(Duration::from_secs(4));
println!("thread {} finished", n);
}));
thread::sleep(Duration::from_millis(100));
}
println!("waiting for threads");
for join in joins {
join.join().unwrap();
}
println!("all done");
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/examples/wasm64-example/src/main.rs | examples/wasm64-example/src/main.rs | fn main() {
println!("Hello World.");
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/examples/ws-client/src/main.rs | examples/ws-client/src/main.rs | use wasmer_bus_ws::prelude::*;
fn main() {
println!("creating web socket and opening");
let ws = SocketBuilder::new_str("wss://ws.postman-echo.com/raw")
.unwrap()
.blocking_open()
.unwrap();
let data = vec![ 1u8, 2u8, 3u8 ];
println!("sending data to socket");
let (mut tx, mut rx) = ws.split();
tx.blocking_send(data).unwrap();
println!("receiving data from socket");
let test = rx.blocking_recv();
assert!(test == Some(vec![ 1u8, 2u8, 3u8 ]), "data is not the same");
println!("success");
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/examples/sub-process/src/main.rs | examples/sub-process/src/main.rs | use wasmer_bus_process::prelude::*;
fn main() {
let mut task = Command::new("ls")
.stdin(Stdio::inherit())
.stderr(Stdio::inherit())
.stdout(Stdio::inherit())
.spawn()
.expect("ls command failed to start");
task.wait().unwrap();
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/examples/thread-local/src/main.rs | examples/thread-local/src/main.rs | use std::cell::Cell;
use std::thread;
use std::time::Duration;
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
enum TestEnum {
SecondEnum(u32),
FirstEnum,
ThirdEnum(u128)
}
thread_local! { static VAR1: Cell<TestEnum> = Cell::new(TestEnum::FirstEnum); }
fn xs() {
println!("VAR1 in thread step 1: {:?}",VAR1.with(|v| {v.get()}));
assert_eq!(VAR1.with(|v| {v.get()}), TestEnum::FirstEnum);
std::thread::sleep(Duration::from_millis(400));
println!("VAR1 in thread step 2: {:?}",VAR1.with(|v| {v.get()}));
assert_eq!(VAR1.with(|v| {v.get()}), TestEnum::FirstEnum);
VAR1.with(|v| {v.set(TestEnum::SecondEnum(4))});
println!("VAR1 in thread step 3: {:?}",VAR1.with(|v| {v.get()}));
assert_eq!(VAR1.with(|v| {v.get()}), TestEnum::SecondEnum(4));
std::thread::sleep(Duration::from_millis(100));
println!("VAR1 in thread step 4: {:?}",VAR1.with(|v| {v.get()}));
assert_eq!(VAR1.with(|v| {v.get()}), TestEnum::SecondEnum(4));
}
fn main() {
println!("VAR1 in main before change: {:?}",VAR1.with(|v| {v.get()}));
assert_eq!(VAR1.with(|v| {v.get()}), TestEnum::FirstEnum);
let mut joins = Vec::new();
for _ in 0..2 {
let t1 = thread::spawn(xs);
joins.push(t1);
}
VAR1.with(|v| {v.set(TestEnum::ThirdEnum(u128::MAX))});
println!("VAR1 in main after change: {:?}",VAR1.with(|v| {v.get()}));
assert_eq!(VAR1.with(|v| {v.get()}), TestEnum::ThirdEnum(u128::MAX));
let mut joins = Vec::new();
for _ in 0..10 {
let t1 = thread::spawn(xs);
std::thread::sleep(Duration::from_millis(50));
joins.push(t1);
}
std::thread::sleep(Duration::from_millis(500));
VAR1.with(|v| {v.set(TestEnum::SecondEnum(998877))});
println!("VAR1 in main after thread midpoint: {:?}",VAR1.with(|v| {v.get()}));
assert_eq!(VAR1.with(|v| {v.get()}), TestEnum::SecondEnum(998877));
for t1 in joins {
t1.join().unwrap();
}
println!("VAR1 in main after thread join: {:?}",VAR1.with(|v| {v.get()}));
assert_eq!(VAR1.with(|v| {v.get()}), TestEnum::SecondEnum(998877));
} | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/examples/tcp-listener/src/main.rs | examples/tcp-listener/src/main.rs | use std::net::TcpListener;
fn main() {
let listener = TcpListener::bind("127.0.0.1:7878").unwrap();
println!("Listening on {}", listener.local_addr().unwrap());
for stream in listener.incoming() {
let _stream = stream.unwrap();
println!("Connection established!");
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/examples/fuse/src/main.rs | examples/fuse/src/main.rs | use std::sync::Arc;
use std::sync::atomic::AtomicU64;
use std::sync::atomic::Ordering;
use wasmer_bus_fuse::api;
use wasmer_bus_fuse::api::FuseSimplified;
use wasmer_bus_fuse::prelude::*;
use wasmer_bus_fuse::api::FileSystemSimplified;
use wasmer_bus_fuse::api::FileIOSimplified;
use wasmer_bus_fuse::api::OpenedFileSimplified;
#[derive(Debug)]
struct MyFuse { }
#[async_trait]
impl FuseSimplified
for MyFuse {
async fn mount(&self, _name: String) -> Result<Arc<dyn api::FileSystem>, BusError> {
Ok(Arc::new(
MyFileSystem { }
))
}
}
#[derive(Debug)]
struct MyFileSystem { }
static META_DIR: Metadata = Metadata {
ft: api::FileType {
dir: true,
file: false,
symlink: false,
char_device: false,
block_device: false,
socket: false,
fifo: false,
},
accessed: 0,
created: 0,
modified: 0,
len: README.as_bytes().len() as u64,
};
static META_FILE: Metadata = Metadata {
ft: api::FileType {
dir: false,
file: true,
symlink: false,
char_device: false,
block_device: false,
socket: false,
fifo: false,
},
accessed: 0,
created: 0,
modified: 0,
len: README.as_bytes().len() as u64,
};
#[async_trait]
impl FileSystemSimplified
for MyFileSystem
{
async fn init(&self) -> FsResult<()> {
Ok(())
}
async fn read_dir(&self, path: String) -> FsResult<Dir> {
if path == "/" {
FsResult::Ok(Dir {
data: vec![
api::DirEntry {
path: ".".to_string(),
metadata: Some(META_DIR.clone()),
},
api::DirEntry {
path: "readme.md".to_string(),
metadata: Some(META_FILE.clone()),
},
]
})
} else {
FsResult::Err(FsError::EntityNotFound)
}
}
async fn create_dir(&self, _path: String) -> FsResult<Metadata> {
FsResult::Err(FsError::PermissionDenied)
}
async fn remove_dir(&self, _path: String) -> FsResult<()> {
FsResult::Err(FsError::PermissionDenied)
}
async fn rename(&self, _from: String, _to: String) -> FsResult<()> {
FsResult::Err(FsError::PermissionDenied)
}
async fn remove_file(&self, _path: String) -> FsResult<()> {
FsResult::Err(FsError::PermissionDenied)
}
async fn read_metadata(&self, path: String) -> FsResult<Metadata> {
if path == "/" || path == "." {
FsResult::Ok(META_DIR.clone())
} else if path == "/readme.md" {
FsResult::Ok(META_FILE.clone())
} else {
FsResult::Err(FsError::EntityNotFound)
}
}
async fn read_symlink_metadata(&self, path: String) -> FsResult<Metadata> {
self.read_metadata(path).await
}
async fn open(&self, path: String, _options: api::OpenOptions) -> Result<Arc<dyn api::OpenedFile>, BusError> {
if path == "/readme.md" {
Result::Ok(Arc::new(MyFile::default()))
} else {
Result::Err(BusError::Aborted)
}
}
}
static README: &'static str = r#"# Example Readme
This is an example readme file from the fuse example service.
"#;
#[derive(Debug, Default, Clone)]
struct MyFile {
pos: Arc<AtomicU64>,
}
#[async_trait]
impl OpenedFileSimplified
for MyFile {
async fn meta(&self) -> FsResult<Metadata> {
FsResult::Ok(META_FILE.clone())
}
async fn unlink(&self) -> FsResult<()> {
FsResult::Err(FsError::PermissionDenied)
}
async fn set_len(&self, _len: u64) -> FsResult<()> {
FsResult::Err(FsError::PermissionDenied)
}
async fn io(&self) -> Result<Arc<dyn api::FileIO>, BusError> {
Result::Ok(
Arc::new(self.clone())
)
}
}
#[async_trait]
impl FileIOSimplified
for MyFile {
async fn seek(&self, from: api::SeekFrom) -> FsResult<u64> {
FsResult::Ok(
match from {
api::SeekFrom::Current(a) => {
let a = a as u64;
self.pos.fetch_add(a, Ordering::AcqRel) + a
},
api::SeekFrom::End(a) => {
let a = (README.as_bytes().len() as i64) + a;
let a = a as u64;
self.pos.store(a, Ordering::Release);
a
},
api::SeekFrom::Start(a) => {
let a = a as u64;
self.pos.store(a, Ordering::Release);
a
}
}
)
}
async fn flush(&self) -> FsResult<()> {
FsResult::Ok(())
}
async fn write(&self, _data: Vec<u8>) -> FsResult<u64> {
FsResult::Err(FsError::PermissionDenied)
}
async fn read(&self, len: u64) -> FsResult<Vec<u8>> {
let buf = README.as_bytes();
let pos = self.pos.load(Ordering::Acquire) as usize;
if pos >= buf.len() {
FsResult::Ok(Vec::new())
} else {
let mut pos_end = pos + (len as usize);
if pos_end > buf.len() {
pos_end = buf.len();
}
FsResult::Ok(README.as_bytes()[pos..pos_end].to_vec())
}
}
}
#[tokio::main(flavor = "multi_thread")]
async fn main() {
let fuse = MyFuse { };
api::FuseService::listen(Arc::new(fuse));
api::FuseService::serve().await;
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/stream.rs | ateweb/src/stream.rs | use core::task::{Context, Poll};
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
use tokio::net::TcpStream;
use tokio_rustls::server::TlsStream;
#[allow(unused_imports, dead_code)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use hyper_tungstenite::WebSocketStream;
use hyper_tungstenite::tungstenite::Message;
use futures::stream::SplitSink;
use futures::stream::SplitStream;
use futures::SinkExt;
use futures_util::StreamExt;
use bytes::Bytes;
pub enum HyperStream
where
Self: Send + Sync,
{
PlainTcp((TcpStream, SocketAddr)),
Tls((TlsStream<TcpStream>, SocketAddr)),
}
impl HyperStream {
pub fn remote_addr(&self) -> &SocketAddr {
match self {
HyperStream::PlainTcp((_, addr)) => addr,
HyperStream::Tls((_, addr)) => addr,
}
}
}
impl AsyncRead for HyperStream {
fn poll_read(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
match self.get_mut() {
HyperStream::PlainTcp((a, _)) => Pin::new(a).poll_read(cx, buf),
HyperStream::Tls((a, _)) => Pin::new(a).poll_read(cx, buf),
}
}
}
impl AsyncWrite for HyperStream {
fn poll_write(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<io::Result<usize>> {
match self.get_mut() {
HyperStream::PlainTcp((a, _)) => Pin::new(a).poll_write(cx, buf),
HyperStream::Tls((a, _)) => Pin::new(a).poll_write(cx, buf),
}
}
fn poll_write_vectored(
self: Pin<&mut Self>,
cx: &mut Context<'_>,
bufs: &[io::IoSlice<'_>],
) -> Poll<io::Result<usize>> {
match self.get_mut() {
HyperStream::PlainTcp((a, _)) => Pin::new(a).poll_write_vectored(cx, bufs),
HyperStream::Tls((a, _)) => Pin::new(a).poll_write_vectored(cx, bufs),
}
}
fn is_write_vectored(&self) -> bool {
match self {
HyperStream::PlainTcp((a, _)) => Pin::new(a).is_write_vectored(),
HyperStream::Tls((a, _)) => Pin::new(a).is_write_vectored(),
}
}
#[inline]
fn poll_flush(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.get_mut() {
HyperStream::PlainTcp((a, _)) => Pin::new(a).poll_flush(cx),
HyperStream::Tls((a, _)) => Pin::new(a).poll_flush(cx),
}
}
fn poll_shutdown(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<io::Result<()>> {
match self.get_mut() {
HyperStream::PlainTcp((a, _)) => Pin::new(a).poll_shutdown(cx),
HyperStream::Tls((a, _)) => Pin::new(a).poll_shutdown(cx),
}
}
}
#[derive(Debug)]
pub struct SendHalf<S>
where S: AsyncRead + AsyncWrite + Unpin
{
sink: SplitSink<WebSocketStream<S>, Message>,
}
impl<S> SendHalf<S>
where S: AsyncRead + AsyncWrite + Unpin
{
pub fn new(sink: SplitSink<WebSocketStream<S>, Message>) -> Self {
Self {
sink,
}
}
}
impl<S> AsyncWrite
for SendHalf<S>
where S: AsyncRead + AsyncWrite + Unpin
{
fn poll_write(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &[u8],
) -> Poll<Result<usize, io::Error>>
{
match self.sink.poll_ready_unpin(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(_)) => {
match self.sink.start_send_unpin(Message::Binary(buf.to_vec())) {
Ok(_) => Poll::Ready(Ok(buf.len())),
Err(err) => {
return Poll::Ready(Err(
io::Error::new(io::ErrorKind::BrokenPipe, err.to_string())
));
}
}
}
Poll::Ready(Err(err)) => {
return Poll::Ready(Err(
io::Error::new(io::ErrorKind::BrokenPipe, err.to_string())
));
}
}
}
fn poll_flush(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>
) -> Poll<Result<(), io::Error>>
{
match self.sink.poll_flush_unpin(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(_)) => Poll::Ready(Ok(())),
Poll::Ready(Err(err)) => {
Poll::Ready(Err(
io::Error::new(io::ErrorKind::BrokenPipe, err.to_string())
))
}
}
}
fn poll_shutdown(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>
) -> Poll<Result<(), io::Error>>
{
match self.sink.poll_flush_unpin(cx) {
Poll::Pending => {
return Poll::Pending;
},
_ => { }
}
match self.sink.poll_close_unpin(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(Ok(a)) => Poll::Ready(Ok(a)),
Poll::Ready(Err(err)) => {
Poll::Ready(Err(
io::Error::new(io::ErrorKind::Other, err.to_string())
))
}
}
}
}
#[derive(Debug)]
pub struct RecvHalf<S>
where S: AsyncRead + AsyncWrite + Unpin
{
stream: SplitStream<WebSocketStream<S>>,
buffer: Option<Bytes>,
}
impl<S> RecvHalf<S>
where S: AsyncRead + AsyncWrite + Unpin
{
pub fn new(stream: SplitStream<WebSocketStream<S>>) -> Self {
Self {
stream,
buffer: None,
}
}
}
impl<S> AsyncRead
for RecvHalf<S>
where S: AsyncRead + AsyncWrite + Unpin
{
fn poll_read(
mut self: Pin<&mut Self>,
cx: &mut Context<'_>,
buf: &mut ReadBuf<'_>,
) -> Poll<io::Result<()>> {
if let Some(stream) = self.buffer.take() {
if stream.len() <= buf.remaining() {
buf.put_slice(&stream[..]);
} else {
let end = buf.remaining();
buf.put_slice(&stream[..end]);
self.buffer.replace(stream.slice(end..));
}
return Poll::Ready(Ok(()));
}
match self.stream.poll_next_unpin(cx) {
Poll::Pending => Poll::Pending,
Poll::Ready(None) => {
Poll::Ready(Err(tokio::io::Error::new(
tokio::io::ErrorKind::BrokenPipe,
format!("Failed to receive data from websocket"),
)))
},
Poll::Ready(Some(Err(err))) => {
Poll::Ready(Err(tokio::io::Error::new(
tokio::io::ErrorKind::BrokenPipe,
format!(
"Failed to receive data from websocket - {}",
err.to_string()
),
)))
},
Poll::Ready(Some(Ok(Message::Binary(stream)))) => {
if stream.len() <= buf.remaining() {
buf.put_slice(&stream[..]);
} else {
let end = buf.remaining();
buf.put_slice(&stream[..end]);
self.buffer.replace(Bytes::from(stream).slice(end..));
}
Poll::Ready(Ok(()))
},
Poll::Ready(Some(Ok(Message::Close(_)))) => {
Poll::Ready(Err(io::Error::new(
io::ErrorKind::NotConnected,
"web socket connection has closed"
)))
},
Poll::Ready(Some(Ok(_))) => {
Poll::Ready(Err(tokio::io::Error::new(
tokio::io::ErrorKind::BrokenPipe,
format!("Failed to receive data from websocket as the message was the wrong type")
)))
},
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/router.rs | ateweb/src/router.rs | use async_trait::async_trait;
use std::net::SocketAddr;
#[allow(unused_imports, dead_code)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use http::*;
use hyper::upgrade::Upgraded;
use hyper_tungstenite::WebSocketStream;
use std::result::Result;
use ate::comms::StreamRouter;
use super::server::ServerCallback;
#[async_trait]
impl ServerCallback for StreamRouter {
async fn web_socket(
&self,
ws: WebSocketStream<Upgraded>,
sock_addr: SocketAddr,
uri: Option<http::Uri>,
headers: Option<http::HeaderMap>
) -> Result<(), Box<dyn std::error::Error>>
{
use futures_util::StreamExt;
let (sink, stream) = ws.split();
let rx = Box::new(super::stream::RecvHalf::new(stream));
let tx = Box::new(super::stream::SendHalf::new(sink));
self.accept_socket(rx, tx, sock_addr, uri, headers).await?;
Ok(())
}
async fn post_request(
&self,
body: Vec<u8>,
sock_addr: SocketAddr,
uri: http::Uri,
headers: http::HeaderMap,
) -> Result<Vec<u8>, (Vec<u8>, StatusCode)> {
StreamRouter::post_request(self, body, sock_addr, uri, headers).await
}
async fn put_request(
&self,
body: Vec<u8>,
sock_addr: SocketAddr,
uri: http::Uri,
headers: http::HeaderMap,
) -> Result<Vec<u8>, (Vec<u8>, StatusCode)> {
StreamRouter::put_request(self, body, sock_addr, uri, headers).await
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/builder.rs | ateweb/src/builder.rs | use std::net::{IpAddr, SocketAddr};
use std::sync::Arc;
use std::time::Duration;
use url::Url;
use ate::prelude::*;
use super::conf::*;
use super::server::*;
pub struct ServerBuilder {
pub(crate) remote: Url,
pub(crate) auth_url: Url,
pub(crate) conf: ServerConf,
pub(crate) web_master_key: Option<EncryptKey>,
pub(crate) session_cert_store: Option<AteSessionGroup>,
pub(crate) callback: Option<Arc<dyn ServerCallback>>,
}
impl ServerBuilder {
pub fn new(remote: Url, auth_url: Url) -> ServerBuilder {
ServerBuilder {
remote,
auth_url,
conf: ServerConf::default(),
web_master_key: None,
session_cert_store: None,
callback: None,
}
}
pub fn with_web_master_key(mut self, key: EncryptKey) -> Self {
self.web_master_key = Some(key);
self
}
pub fn with_conf(mut self, cfg: &ConfAte) -> Self {
self.conf.cfg_ate = cfg.clone();
self
}
pub fn with_cert_store_session(mut self, session_cert_store: AteSessionGroup) -> Self {
self.session_cert_store = Some(session_cert_store);
self
}
pub fn with_callback(mut self, callback: impl ServerCallback + 'static) -> Self {
let callback = Arc::new(callback);
self.callback = Some(callback);
self
}
pub fn ttl(mut self, ttl: Duration) -> Self {
self.conf.ttl = ttl;
self
}
pub fn add_listener(mut self, ip: IpAddr, port: u16, tls: bool) -> Self {
self.conf.listen.push(ServerListen {
addr: SocketAddr::new(ip, port),
tls,
});
self
}
pub async fn build(self) -> Result<Arc<Server>, AteError> {
Server::new(self).await
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/lib.rs | ateweb/src/lib.rs | pub mod builder;
pub mod conf;
pub mod error;
pub mod helper;
pub mod model;
pub mod opt;
pub mod server;
pub mod acceptor;
pub mod acme;
pub mod router;
pub mod stream;
pub use acceptor::*;
pub use acme::*;
pub use builder::ServerBuilder;
pub use conf::ServerConf;
pub use server::Server;
pub use stream::*;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/conf.rs | ateweb/src/conf.rs | use std::net::SocketAddr;
use std::time::Duration;
use ate::prelude::*;
#[derive(Debug, Clone)]
pub struct ServerListen {
pub addr: SocketAddr,
pub tls: bool,
}
#[derive(Debug)]
pub struct ServerConf {
pub cfg_ate: ConfAte,
pub ttl: Duration,
pub listen: Vec<ServerListen>,
}
impl Default for ServerConf {
fn default() -> Self {
ServerConf {
cfg_ate: ConfAte::default(),
ttl: Duration::from_secs(60),
listen: Vec::new(),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/error.rs | ateweb/src/error.rs | use ate::error::*;
use ate_files::error::FileSystemError;
use ate_files::error::FileSystemErrorKind;
use error_chain::error_chain;
use hyper::StatusCode;
error_chain! {
types {
WebServerError, WebServerErrorKind, WebServerResultExt, WebServerResult;
}
links {
LoadError(LoadError, LoadErrorKind);
SerializationError(SerializationError, SerializationErrorKind);
ChainCreationError(ChainCreationError, ChainCreationErrorKind);
LockError(LockError, LockErrorKind);
TransformError(TransformError, TransformErrorKind);
FileSystemError(FileSystemError, FileSystemErrorKind);
}
foreign_links {
HeaderStrError(http::header::ToStrError);
HeaderValueError(http::header::InvalidHeaderValue);
TokioTungsteniteError(tokio_tungstenite::tungstenite::error::ProtocolError);
HyperTungsteniteError(hyper_tungstenite::tungstenite::error::ProtocolError);
}
errors {
BadHost(host: String) {
description("Bad Host"),
display("Bad Host - {}", host),
}
BadConfiguration(err: String) {
description("Bad Configuration"),
display("Bad Configuration - {}", err),
}
BadRequest(err: String) {
description("Bad Request"),
display("Bad Request - {}", err),
}
UnknownHost {
description("Unknown Host"),
display("Unknown Host"),
}
}
}
impl WebServerError {
pub fn status_code(&self) -> StatusCode {
match self {
WebServerError(WebServerErrorKind::BadHost(_), _) => StatusCode::BAD_GATEWAY,
WebServerError(WebServerErrorKind::BadRequest(_), _) => StatusCode::BAD_REQUEST,
WebServerError(WebServerErrorKind::UnknownHost, _) => StatusCode::BAD_REQUEST,
WebServerError(
WebServerErrorKind::FileSystemError(FileSystemErrorKind::DoesNotExist),
_,
) => StatusCode::NOT_FOUND,
_ => StatusCode::INTERNAL_SERVER_ERROR,
}
}
pub fn response_body(&self) -> String {
let mut ret = match self {
err => err.to_string(),
};
if ret.ends_with("\n") == false {
ret.push_str("\n");
}
ret
}
}
error_chain! {
types {
OrderError, OrderErrorKind, OrderResultExt, OrderResult;
}
links {
SerializationError(SerializationError, SerializationErrorKind);
CommitError(CommitError, CommitErrorKind);
AcmeError(AcmeError, AcmeErrorKind);
FileSystemError(FileSystemError, FileSystemErrorKind);
}
errors {
Pem(err: pem::PemError) {
description("could not parse pem"),
display("could not parse pem: {0}", err)
}
Rcgen(err: rcgen::RcgenError) {
description("certificate generation error"),
display("certificate generation error: {0}", err)
}
BadOrder(order: crate::acme::Order) {
description("bad order object"),
display("bad order object: {0:?}", order)
}
Timeout {
description("timeout while waiting for certificate"),
display("timeout while waiting for certificate")
}
BadAuth(auth: crate::acme::Auth) {
description("bad auth object"),
display("bad auth object: {0:?}", auth)
}
TooManyAttemptsAuth(domain: String) {
description("authorization failed too many times"),
display("authorization for {0} failed too many times", domain)
}
}
}
impl From<pem::PemError> for OrderError {
fn from(err: pem::PemError) -> OrderError {
OrderErrorKind::Pem(err).into()
}
}
impl From<rcgen::RcgenError> for OrderError {
fn from(err: rcgen::RcgenError) -> OrderError {
OrderErrorKind::Rcgen(err).into()
}
}
error_chain! {
types {
SecurityError, SecurityErrorKind, SecurityResultExt, SecurityResult;
}
foreign_links {
Json(serde_json::Error);
Crypto(ring::error::Unspecified);
}
}
error_chain! {
types {
AcmeError, AcmeErrorKind, AcmeResultExt, AcmeResult;
}
foreign_links {
Io(std::io::Error);
HyperError(hyper::Error);
Rcgen(rcgen::RcgenError);
Security(SecurityError);
Json(serde_json::Error);
KeyRejected(ring::error::KeyRejected);
Crypto(ring::error::Unspecified);
ToStrError(http::header::ToStrError);
}
errors {
MissingHeader(name: &'static str) {
description("missing header"),
display("missing header ({})", name)
}
BadResponse(code: u16, response: String) {
description("letsencrypt returned a bad response"),
display("letsencrypt returned a bad response (code={}) - {}", code, response),
}
ApiError(err: super::ApiError) {
description("letsencrypt returned an error"),
display("letsencrypt returned an error (code={}) - {}", err.typ, err.detail),
}
NoTlsAlpn01Challenge {
description("no tls alpn 01 challenge"),
display("no tls alpn 01 challenge")
}
}
}
error_chain! {
types {
HttpError, HttpErrorKind, HttpResultExt, HttpResult;
}
foreign_links {
IO(std::io::Error);
}
errors {
UndefinedHost {
description("could not determine host from url"),
display("could not determine host from url")
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/helper.rs | ateweb/src/helper.rs | pub fn redirect_body(target: &str) -> String {
format!(
r#"<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01//EN" "http://www.w3.org/TR/html4/strict.dtd">
<html>
<head>
<title>Permanent Redirect</title>
<meta http-equiv="refresh" content="0; url={}">
</head>
<body>
<p>
The document has been moved to <a href="{}">{}</a>.
</p>
</body>
</html>"#,
target, target, target
)
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/acceptor.rs | ateweb/src/acceptor.rs | use super::acme::ACME_TLS_ALPN_NAME;
use core::task::{Context, Poll};
use std::future::Future;
use std::io;
use std::net::SocketAddr;
use std::pin::Pin;
use std::sync::Arc;
use tokio::net::TcpListener;
use tokio::net::TcpStream;
use tokio_rustls::TlsAcceptor;
#[allow(unused_imports, dead_code)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use hyper;
use super::acme::*;
use super::stream::*;
pub struct HyperAcceptor
where
Self: Send,
{
pub tcp: TcpListener,
pub tls: Option<TlsAcceptor>,
pub acme: Arc<AcmeResolver>,
pub accepting:
Vec<Pin<Box<dyn Future<Output = Result<HyperStream, Box<dyn std::error::Error>>> + Send>>>,
}
impl HyperAcceptor {
pub fn new(listener: TcpListener, acme: Arc<AcmeResolver>, enable_tls: bool) -> HyperAcceptor {
let tls = match enable_tls {
false => None,
true => {
let acme = Arc::clone(&acme);
let tls_cfg = {
let mut cfg = rustls::ServerConfig::new(rustls::NoClientAuth::new());
cfg.cert_resolver = acme;
cfg.set_protocols(&[
b"h2".to_vec(),
b"http/1.1".to_vec(),
b"acme-tls/1".to_vec(),
]);
Arc::new(cfg)
};
Some(TlsAcceptor::from(tls_cfg))
}
};
HyperAcceptor {
tcp: listener,
tls,
acme,
accepting: Vec::new(),
}
}
pub async fn accept(
tls: TlsAcceptor,
acme: Arc<AcmeResolver>,
socket: TcpStream,
addr: SocketAddr,
) -> Result<HyperStream, Box<dyn std::error::Error>> {
// Enter a loop peeking for the hello client message
let mut peek_size = 128usize;
while peek_size <= 16384usize {
peek_size *= 2usize;
// Keep peeking at the stream until we have a TlsMessage
let mut buf = vec![0; peek_size];
let n = socket.peek(&mut buf).await?;
if n <= 0 {
continue;
}
// Attempt to get a TlsMessage
let record = match tls_parser::parse_tls_plaintext(&buf[..n]) {
Ok((_rem, record)) => record,
Err(tls_parser::Err::Incomplete(_needed)) => {
continue;
}
Err(e) => {
warn!("parse_tls_record_with_header failed: {:?}", e);
break;
}
};
// Find the handshake / client hello message
let msg = record
.msg
.iter()
.filter_map(|a| match a {
tls_parser::TlsMessage::Handshake(
tls_parser::TlsMessageHandshake::ClientHello(hello),
) => Some(hello),
_ => None,
})
.next();
let hello = match msg {
Some(a) => a,
None => {
continue;
}
};
// Grab all the extensions
let exts = if let Some(hello_ext) = hello.ext {
if let Ok((_rem, exts)) = tls_parser::parse_tls_extensions(hello_ext) {
exts
} else {
break;
}
} else {
break;
};
// If it has an ACME ALPN extension then we dont want to trigger another certificate for it
// so we instead just attempt to accept the connection
let mut alpn = false;
for ext in exts.iter() {
if let tls_parser::TlsExtension::ALPN(alpn_exts) = ext {
for alpn_ext in alpn_exts {
if ACME_TLS_ALPN_NAME.eq(*alpn_ext) {
alpn = true;
}
}
}
}
// We are looking for the SNI extension
let sni = exts
.iter()
.filter_map(|a| match a {
tls_parser::TlsExtension::SNI(snis) => snis
.iter()
.filter_map(|a| match a {
(tls_parser::SNIType::HostName, sni_bytes) => {
Some(String::from_utf8_lossy(sni_bytes))
}
_ => None,
})
.next(),
_ => None,
})
.next();
let sni = match sni {
Some(a) => a,
None => {
break;
}
};
// Load the object
if alpn {
trace!("alpn challenge for SNI: {}", sni);
acme.touch_alpn(sni.to_string()).await?;
} else {
trace!("connection attempt SNI: {}", sni);
acme.touch_web(sni.to_string(), chrono::Duration::days(30))
.await?;
}
break;
}
// Its time to now accept the connect (if the preload failed, then so be it, things will still
// work they will just get a error message on the first request to this web server as it wont
// have the server ceritifcate loaded yet and will need to be loaded asynchronously)
let stream = tls.accept(socket).await?;
Ok(HyperStream::Tls((stream, addr)))
}
}
impl hyper::server::accept::Accept for HyperAcceptor {
type Conn = HyperStream;
type Error = io::Error;
fn poll_accept(
mut self: Pin<&mut Self>,
cx: &mut Context,
) -> Poll<Option<Result<Self::Conn, Self::Error>>> {
loop {
match self.tcp.poll_accept(cx) {
Poll::Pending => break,
Poll::Ready(Err(err)) => {
return Poll::Ready(Some(Err(err)));
}
Poll::Ready(Ok((socket, addr))) => {
// For HTTP streams there is nothing more to do
let tls = match &self.tls {
None => {
return Poll::Ready(Some(Ok(HyperStream::PlainTcp((socket, addr)))));
}
Some(tls) => tls.clone(),
};
// Otherwise its time to accept the TLS connection
let acme = self.acme.clone();
let accept = HyperAcceptor::accept(tls, acme, socket, addr);
self.accepting.push(Box::pin(accept));
}
};
}
let mut ret = None;
let drained = {
let mut drained = Vec::with_capacity(self.accepting.capacity());
std::mem::swap(self.accepting.as_mut(), &mut drained);
drained
};
for mut accept in drained {
if ret.is_some() {
self.accepting.push(accept);
continue;
}
match accept.as_mut().poll(cx) {
Poll::Pending => {
self.accepting.push(accept);
}
Poll::Ready(Ok(stream)) => {
ret = Some(stream);
}
Poll::Ready(Err(err)) => {
warn!("failed to accept TLS stream - {}", err);
continue;
}
}
}
if let Some(stream) = ret {
return Poll::Ready(Some(Ok(stream)));
}
Poll::Pending
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/server.rs | ateweb/src/server.rs | use async_trait::async_trait;
use error_chain::bail;
use fxhash::FxHashMap;
use std::collections::hash_map::Entry as StdEntry;
use std::convert::Infallible;
use std::net::SocketAddr;
use std::ops::Deref;
use std::sync::Arc;
use std::sync::Weak;
use std::time::Duration;
use std::time::Instant;
use tokio::net::TcpListener;
use tokio::sync::Mutex;
#[allow(unused_imports, dead_code)]
use tracing::{debug, error, info, instrument, span, trace, warn, event, Level};
use wasmer_auth::service::AuthService;
use wasmer_auth::cmd::gather_command;
use hyper;
use hyper::header::HeaderValue;
use hyper::service::{make_service_fn, service_fn};
use hyper::upgrade::Upgraded;
pub use hyper::Body;
use hyper::Method;
use hyper::Request;
use hyper::Response;
use hyper::StatusCode;
use hyper_tungstenite::WebSocketStream;
use ate::prelude::*;
use ate_files::prelude::*;
use ate_files::repo::*;
use crate::model::WebConf;
use super::acceptor::*;
use super::acme::AcmeResolver;
use super::builder::*;
use super::conf::*;
use super::error::WebServerError;
use super::error::WebServerErrorKind;
use super::model::*;
use super::stream::*;
pub struct ServerWebConf {
web_conf: WebConf,
web_conf_when: Option<Instant>,
}
#[async_trait]
pub trait ServerCallback: Send + Sync {
async fn web_socket(
&self,
_ws: WebSocketStream<Upgraded>,
_sock_addr: SocketAddr,
_uri: Option<http::Uri>,
_headers: Option<http::HeaderMap>,
) -> Result<(), Box<dyn std::error::Error>> {
Ok(())
}
async fn post_request(
&self,
_body: Vec<u8>,
_sock_addr: SocketAddr,
_uri: http::Uri,
_headers: http::HeaderMap,
) -> Result<Vec<u8>, (Vec<u8>, StatusCode)> {
let msg = format!("Bad Request (Not Implemented)").as_bytes().to_vec();
Err((msg, StatusCode::BAD_REQUEST))
}
async fn put_request(
&self,
_body: Vec<u8>,
_sock_addr: SocketAddr,
_uri: http::Uri,
_headers: http::HeaderMap,
) -> Result<Vec<u8>, (Vec<u8>, StatusCode)> {
let msg = format!("Bad Request (Not Implemented)").as_bytes().to_vec();
Err((msg, StatusCode::BAD_REQUEST))
}
}
pub struct Server {
repo: Arc<Repository>,
web_conf: Mutex<FxHashMap<String, ServerWebConf>>,
server_conf: ServerConf,
callback: Option<Arc<dyn ServerCallback + 'static>>,
mime: FxHashMap<String, String>,
}
async fn process(
server: Arc<Server>,
listen: Arc<ServerListen>,
req: Request<Body>,
sock_addr: SocketAddr,
) -> Result<Response<Body>, hyper::Error> {
trace!("perf-checkpoint: hyper process (addr={})", sock_addr);
let path = req.uri().path().to_string();
match server.process(req, sock_addr, listen.deref()).await {
Ok(resp) => {
trace!("perf-checkpoint: hyper finished");
trace!("res: status={}", resp.status().as_u16());
Ok(resp)
}
Err(WebServerError(
WebServerErrorKind::FileSystemError(FileSystemErrorKind::NoAccess),
_,
)) => {
let err = format!("Access Denied - {}\n", path);
let mut resp = Response::new(Body::from(err));
*resp.status_mut() = StatusCode::FORBIDDEN;
trace!("res: status={}", resp.status().as_u16());
return Ok(resp);
}
Err(err) => {
let mut resp = Response::new(Body::from(err.response_body()));
*resp.status_mut() = err.status_code();
trace!("res: status={}", resp.status().as_u16());
return Ok(resp);
}
}
}
impl Server {
pub(crate) async fn new(mut builder: ServerBuilder) -> Result<Arc<Server>, AteError>
{
// There are few more tweaks we need to make to the configuration
builder.conf.cfg_ate.recovery_mode = RecoveryMode::ReadOnlySync;
builder.conf.cfg_ate.backup_mode = BackupMode::None;
// Now we are ready
let registry = Arc::new(Registry::new(&builder.conf.cfg_ate).await);
let session_factory = SessionFactory {
auth_url: builder.auth_url.clone(),
registry: registry.clone(),
master_key: builder.web_master_key.clone(),
};
let repo = Repository::new(
®istry,
builder.remote.clone(),
builder.auth_url.clone(),
Box::new(session_factory),
builder.conf.ttl,
)
.await?;
Ok(Arc::new(Server {
repo,
web_conf: Mutex::new(FxHashMap::default()),
server_conf: builder.conf,
callback: builder.callback,
mime: Server::init_mime(),
}))
}
pub async fn run(self: &Arc<Self>) -> Result<(), Box<dyn std::error::Error>> {
trace!("running web server");
let acme = AcmeResolver::new(&self.repo).await?;
let mut joins = Vec::new();
for listen in self.server_conf.listen.iter() {
let make_service = {
let server = Arc::clone(self);
let listen = Arc::new(listen.clone());
make_service_fn(move |conn: &HyperStream| {
let addr = conn.remote_addr().clone();
let server = server.clone();
let listen = listen.clone();
async move {
Ok::<_, Infallible>(service_fn(move |req| {
process(server.clone(), listen.clone(), req, addr)
}))
}
})
};
let acme = acme.clone();
let tcp_listener = TcpListener::bind(&listen.addr).await?;
let acceptor = HyperAcceptor::new(tcp_listener, acme, listen.tls);
let server = hyper::Server::builder(acceptor)
.http1_preserve_header_case(true)
.http1_title_case_headers(true)
.serve(make_service);
println!("Listening on {}", listen.addr);
joins.push(server);
}
// This next background thread will terminate any chains that have gone
// out-of-scope due to expired TTL (caching cleanup)
{
let ttl = self.server_conf.ttl.as_secs();
let ttl_check = u64::min(ttl, 30u64);
let server = Arc::clone(self);
TaskEngine::spawn(async move {
let server = {
let s = Arc::downgrade(&server);
drop(server);
s
};
let mut n = 0u64;
loop {
ate::engine::sleep(Duration::from_secs(1)).await;
let server = match Weak::upgrade(&server) {
Some(a) => a,
None => break,
};
n += 1;
if n >= ttl_check {
server.house_keeping().await;
n = 0;
}
}
});
}
for res in futures::future::join_all(joins).await {
if let Err(e) = res {
eprintln!("server error: {}", e);
}
}
Ok(())
}
async fn house_keeping(&self) {
self.repo.house_keeping().await;
}
pub(crate) fn get_host(&self, req: &Request<Body>) -> Result<String, WebServerError> {
if let Some(host) = req.uri().host() {
return Ok(host.to_string());
}
match req.headers().get("Host") {
Some(a) => Ok(a.to_str()?.to_string()),
None => {
bail!(WebServerErrorKind::UnknownHost);
}
}
}
pub(crate) async fn get_conf(&self, host: &str) -> Result<WebConf, WebServerError> {
let host = host.to_string();
let mut lock = self.web_conf.lock().await;
let conf = match lock.entry(host.clone()) {
StdEntry::Occupied(a) => a.into_mut(),
StdEntry::Vacant(a) => a.insert(ServerWebConf {
web_conf: WebConf::default(),
web_conf_when: None,
}),
};
let trigger = match &conf.web_conf_when {
Some(a) if a.elapsed().as_millis() > 4000u128 => true,
None => true,
_ => {
return Ok(conf.web_conf.clone());
}
};
if trigger {
let key = ChainKey::from(format!("{}/www", host));
conf.web_conf_when = Some(Instant::now());
conf.web_conf = match self
.repo
.get_file(&key, host.as_str(), WEB_CONF_FILES_CONF)
.await
.ok()
.flatten()
{
Some(data) => {
let data = String::from_utf8_lossy(&data[..]);
serde_yaml::from_str::<WebConf>(&data).map_err(|err| {
WebServerError::from_kind(WebServerErrorKind::BadConfiguration(
err.to_string(),
))
})?
}
None => {
let mut ret = WebConf::default();
ret.default_page = Some("index.html".to_string());
ret.force_https = true;
if let Some(ret_str) = serde_yaml::to_string(&ret).ok() {
let err = self
.repo
.set_file(&key, host.as_str(), WEB_CONF_FILES_CONF, ret_str.as_bytes())
.await;
if let Err(err) = err {
info!("failed to save default web.yaml - {}", err);
}
}
ret
}
};
}
match serde_yaml::to_string(&conf.web_conf) {
Ok(conf) => trace!("web-conf: {}", conf),
Err(err) => trace!("web-conf-err: {}", err),
};
Ok(conf.web_conf.clone())
}
pub(crate) async fn force_https(
&self,
req: Request<Body>,
) -> Result<Response<Body>, WebServerError> {
let host = match req.uri().authority() {
Some(a) => a.to_string(),
None => match req.headers().get("Host") {
Some(a) => a.to_str()?.to_string(),
None => {
bail!(WebServerErrorKind::BadRequest(
"unknown host address needed for redirect to https".to_string()
))
}
},
};
let mut uri = http::Uri::builder()
.authority(host.as_str())
.scheme("https");
if let Some(path_and_query) = req.uri().path_and_query() {
uri = uri.path_and_query(path_and_query.clone());
}
let uri = match uri.build() {
Ok(uri) => uri,
Err(err) => {
bail!(WebServerErrorKind::BadRequest(err.to_string()))
}
}
.to_string();
self.process_redirect(uri.as_str()).await
}
pub(crate) async fn process_redirect_host(
&self,
req: Request<Body>,
listen: &ServerListen,
redirect: &str,
) -> Result<Response<Body>, WebServerError> {
let mut uri = http::Uri::builder().authority(redirect);
if let Some(scheme) = req.uri().scheme() {
uri = uri.scheme(scheme.clone());
} else if listen.tls {
uri = uri.scheme("https");
} else {
uri = uri.scheme("http");
}
if let Some(path_and_query) = req.uri().path_and_query() {
uri = uri.path_and_query(path_and_query.clone());
}
let uri = match uri.build() {
Ok(uri) => uri,
Err(err) => {
bail!(WebServerErrorKind::BadRequest(err.to_string()))
}
}
.to_string();
self.process_redirect(uri.as_str()).await
}
pub(crate) async fn process_redirect(
&self,
uri: &str,
) -> Result<Response<Body>, WebServerError> {
let mut resp = Response::new(Body::from(crate::helper::redirect_body(uri)));
resp.headers_mut()
.append("Location", HeaderValue::from_str(uri)?);
*resp.status_mut() = StatusCode::PERMANENT_REDIRECT;
return Ok(resp);
}
pub(crate) fn sanitize(&self, mut path: &str) -> Result<(), WebServerError> {
while path.starts_with("/") {
path = &path[1..];
}
if path.contains("..") {
bail!(WebServerErrorKind::BadRequest(
"Accessing parent directories is forbidden".to_string()
));
}
if path.starts_with(WEB_CONF_FILES) {
bail!(WebServerErrorKind::BadRequest(
"Accessing configuration files is forbidden".to_string()
));
}
Ok(())
}
pub(crate) async fn process_get(
&self,
host: &str,
path: &str,
is_head: bool,
conf: &WebConf,
) -> Result<Option<Response<Body>>, WebServerError> {
self.sanitize(path)?;
let key = ChainKey::from(format!("{}/www", host));
trace!("perf-checkpoint: get_file (path={})", path);
if let Some(data) = self.repo.get_file(&key, host, path).await? {
let len_str = data.len().to_string();
trace!("perf-checkpoint: got_file (data_len={})", len_str);
let mut resp = if is_head {
Response::new(Body::empty())
} else {
Response::new(Body::from(data))
};
resp.headers_mut()
.append("Content-Length", HeaderValue::from_str(len_str.as_str())?);
self.apply_mime(path, &mut resp)?;
if conf.coop {
resp.headers_mut().append(
"Cross-Origin-Embedder-Policy",
HeaderValue::from_str("require-corp")?,
);
resp.headers_mut().append(
"Cross-Origin-Opener-Policy",
HeaderValue::from_str("same-origin")?,
);
}
*resp.status_mut() = StatusCode::OK;
Ok(Some(resp))
} else {
Ok(None)
}
}
pub(crate) fn apply_mime(
&self,
path: &str,
resp: &mut Response<Body>,
) -> Result<(), WebServerError> {
if let Some(ext) = path.split(".").collect::<Vec<_>>().into_iter().rev().next() {
let ext = ext.to_string();
if let Some(mime) = self.mime.get(&ext) {
resp.headers_mut()
.append("Content-Type", HeaderValue::from_str(mime.as_str())?);
}
}
Ok(())
}
fn init_mime() -> FxHashMap<String, String> {
let mut ret = FxHashMap::default();
ret.insert("aac".to_string(), "audio/aac".to_string());
ret.insert("abw".to_string(), "application/x-abiword".to_string());
ret.insert("arc".to_string(), "application/x-freearc".to_string());
ret.insert("avi".to_string(), "video/x-msvideo".to_string());
ret.insert(
"azw".to_string(),
"application/vnd.amazon.ebook".to_string(),
);
ret.insert("bin".to_string(), "application/octet-stream".to_string());
ret.insert("bmp".to_string(), "image/bmp".to_string());
ret.insert("bz".to_string(), "application/x-bzip".to_string());
ret.insert("bz2".to_string(), "application/x-bzip2".to_string());
ret.insert("cda".to_string(), "application/x-cdf".to_string());
ret.insert("csh".to_string(), "application/x-csh".to_string());
ret.insert("css".to_string(), "text/css".to_string());
ret.insert("csv".to_string(), "text/csv".to_string());
ret.insert("doc".to_string(), "application/msword".to_string());
ret.insert(
"docx".to_string(),
"application/vnd.openxmlformats-officedocument.wordprocessingml.document".to_string(),
);
ret.insert(
"eot".to_string(),
"application/vnd.ms-fontobject".to_string(),
);
ret.insert("epub".to_string(), "application/epub+zip".to_string());
ret.insert("gz".to_string(), "application/gzip".to_string());
ret.insert("gif".to_string(), "image/gif".to_string());
ret.insert("htm".to_string(), "text/html".to_string());
ret.insert("html".to_string(), "text/html".to_string());
ret.insert("ico".to_string(), "image/vnd.microsoft.icon".to_string());
ret.insert("ics".to_string(), "text/calendar".to_string());
ret.insert("jar".to_string(), "application/java-archive".to_string());
ret.insert("jpeg".to_string(), "image/jpeg".to_string());
ret.insert("jpg".to_string(), "image/jpeg".to_string());
ret.insert("js".to_string(), "text/javascript".to_string());
ret.insert("json".to_string(), "application/json".to_string());
ret.insert("jsonld".to_string(), "application/ld+json".to_string());
ret.insert("mid".to_string(), "audio/midi".to_string());
ret.insert("midi".to_string(), "audio/midi".to_string());
ret.insert("mjs".to_string(), "text/javascript".to_string());
ret.insert("mp3".to_string(), "audio/mpeg".to_string());
ret.insert("mp4".to_string(), "video/mp4".to_string());
ret.insert("mpeg".to_string(), "video/mpeg".to_string());
ret.insert(
"mpkg".to_string(),
"application/vnd.apple.installer+xml".to_string(),
);
ret.insert(
"odp".to_string(),
"application/vnd.oasis.opendocument.presentation".to_string(),
);
ret.insert(
"ods".to_string(),
"application/vnd.oasis.opendocument.spreadsheet".to_string(),
);
ret.insert(
"odt".to_string(),
"application/vnd.oasis.opendocument.text".to_string(),
);
ret.insert("oga".to_string(), "audio/ogg".to_string());
ret.insert("ogv".to_string(), "video/ogg".to_string());
ret.insert("ogx".to_string(), "application/ogg".to_string());
ret.insert("opus".to_string(), "audio/opus".to_string());
ret.insert("otf".to_string(), "font/otf".to_string());
ret.insert("png".to_string(), "image/png".to_string());
ret.insert("pdf".to_string(), "application/pdf".to_string());
ret.insert("php".to_string(), "application/x-httpd-php".to_string());
ret.insert(
"ppt".to_string(),
"application/vnd.ms-powerpoint".to_string(),
);
ret.insert(
"pptx".to_string(),
"application/vnd.openxmlformats-officedocument.presentationml.presentation".to_string(),
);
ret.insert("rar".to_string(), "application/vnd.rar".to_string());
ret.insert("rtf".to_string(), "application/rtf".to_string());
ret.insert("sh".to_string(), "application/x-sh".to_string());
ret.insert("svg".to_string(), "image/svg+xml".to_string());
ret.insert(
"swf".to_string(),
"application/x-shockwave-flash".to_string(),
);
ret.insert("tar".to_string(), "application/x-tar".to_string());
ret.insert("tif".to_string(), "image/tiff".to_string());
ret.insert("tiff".to_string(), "image/tiff".to_string());
ret.insert("ts".to_string(), "video/mp2t".to_string());
ret.insert("ttf".to_string(), "font/ttf".to_string());
ret.insert("txt".to_string(), "text/plain".to_string());
ret.insert("vsd".to_string(), "application/vnd.visio".to_string());
ret.insert("wav".to_string(), "audio/wav".to_string());
ret.insert("wasm".to_string(), "application/wasm".to_string());
ret.insert("weba".to_string(), "audio/webm".to_string());
ret.insert("webm".to_string(), "video/webm".to_string());
ret.insert("webp".to_string(), "image/webp".to_string());
ret.insert("woff".to_string(), "font/woff".to_string());
ret.insert("woff2".to_string(), "font/woff2".to_string());
ret.insert("xhtml".to_string(), "application/xhtml+xml".to_string());
ret.insert("xls".to_string(), "application/vnd.ms-excel".to_string());
ret.insert(
"xlsx".to_string(),
"application/vnd.openxmlformats-officedocument.spreadsheetml.sheet".to_string(),
);
ret.insert("xml".to_string(), "application/xml".to_string());
ret.insert(
"xul".to_string(),
"application/vnd.mozilla.xul+xml".to_string(),
);
ret.insert("zip".to_string(), "application/zip".to_string());
ret.insert("7z".to_string(), "application/x-7z-compressed".to_string());
ret
}
pub(crate) async fn process_get_with_default(
&self,
host: &str,
path: &str,
is_head: bool,
conf: &WebConf,
) -> Result<Response<Body>, WebServerError> {
self.sanitize(path)?;
// If it has parameters passed to the web server we ignore them
let path = if let Some((left, _right)) = path.split_once("?") {
left.to_string()
} else {
path.to_string()
};
// Attempt to get the file
trace!("perf-checkpoint: process_get");
match self.process_get(host, path.as_str(), is_head, conf).await? {
Some(a) => {
return Ok(a);
}
None => {
// Otherwise we attempt to get a default file
let default_page = conf.default_page.as_ref();
if let Some(default_page) = default_page {
let path = if path.ends_with("/") == false {
format!("{}/{}", path, default_page)
} else {
format!("{}{}", path, default_page)
};
if let Some(ret) = self.process_get(host, path.as_str(), is_head, conf).await? {
return Ok(ret);
}
}
}
}
trace!("perf-checkpoint: response from data");
let data = format!("File Not Found - {}\n", path);
let mut resp = Response::new(Body::from(data));
*resp.status_mut() = StatusCode::NOT_FOUND;
Ok(resp)
}
pub(crate) async fn process(
&self,
req: Request<Body>,
sock_addr: SocketAddr,
listen: &ServerListen,
) -> Result<Response<Body>, WebServerError> {
trace!("req: {:?}", req);
if hyper_tungstenite::is_upgrade_request(&req) {
trace!("perf-checkpoint: hyper upgrade request");
return self.process_upgrade(req, sock_addr).await;
}
let uri = req.uri().clone();
let method = req.method().clone();
if method == Method::POST || method == Method::PUT {
trace!("perf-checkpoint: put/post");
if let Some(callback) = &self.callback {
let headers = req.headers().clone();
if let Some(body) = hyper::body::to_bytes(req.into_body()).await.ok() {
let body = body.to_vec();
let ret = match method {
Method::POST => callback.post_request(body, sock_addr, uri, headers).await,
Method::PUT => callback.put_request(body, sock_addr, uri, headers).await,
_ => Err((Vec::new(), StatusCode::BAD_REQUEST))
};
match ret {
Ok(resp) => {
let resp = Response::new(Body::from(resp));
trace!("res: status={}", resp.status().as_u16());
return Ok(resp);
}
Err((resp, status)) => {
let mut resp = Response::new(Body::from(resp));
*resp.status_mut() = status;
trace!("res: status={}", resp.status().as_u16());
return Ok(resp);
}
};
} else {
let status = StatusCode::BAD_REQUEST;
let err = status.as_str().to_string();
let mut resp = Response::new(Body::from(err));
*resp.status_mut() = status;
trace!("res: status={}", resp.status().as_u16());
return Ok(resp);
}
}
trace!("perf-checkpoint: finished put/post");
}
let is_head = method == Method::HEAD;
let host = self.get_host(&req)?;
let conf = self.get_conf(host.as_str()).await?;
let ret = self.process_internal(req, listen, &conf).await;
match ret {
Ok(a) => {
info!("http peer={} method={} path={} - {}", sock_addr, method, uri, a.status());
Ok(a)
},
Err(err) => {
info!("http peer={} method={} path={} err={}", sock_addr, method, uri, err);
let page = conf
.status_pages
.get(&err.status_code().as_u16())
.map(|a| a.clone());
if let Some(page) = page {
trace!("perf-checkpoint: load error page");
if let Some(ret) = self
.process_get(host.as_str(), page.as_str(), is_head, &conf)
.await?
{
return Ok(ret);
}
}
return Err(err);
}
}
}
pub(crate) async fn process_upgrade(
&self,
req: Request<Body>,
sock_addr: SocketAddr,
) -> Result<Response<Body>, WebServerError> {
if let Some(callback) = &self.callback {
let uri = req.uri().clone();
let headers = req.headers().clone();
let callback = Arc::clone(callback);
let (response, websocket) = hyper_tungstenite::upgrade(req, None)?;
TaskEngine::spawn(async move {
match websocket.await {
Ok(websocket) => {
trace!("perf-checkpoint: begin callback.web_socket");
let ret = callback.web_socket(websocket, sock_addr, Some(uri), Some(headers)).await;
trace!("perf-checkpoint: finish callback.web_socket");
if let Err(err) = ret {
error!("web socket failed(1) - {}", err);
}
}
Err(err) => {
error!("web socket failed(2) - {}", err);
}
}
});
Ok(response)
} else {
Err(WebServerErrorKind::BadRequest("websockets are not supported".to_string()).into())
}
}
pub(crate) async fn process_cors(
&self,
req: Request<Body>,
_listen: &ServerListen,
conf: &WebConf,
target: String
) -> Result<Response<Body>, StatusCode> {
let mut uri = format!("https://{}", target);
if let Some(query) = req.uri().query() {
uri += "?";
uri += query;
}
if let Ok(uri) = http::uri::Uri::from_str(uri.as_str())
{
// Check if its allowed
if conf.cors_proxy
.iter()
.map(|cors| Some(cors.as_str()))
.any(|cors| cors == uri.authority().map(|a| a.as_str()))
{
let method = req.method().clone();
let client = reqwest::ClientBuilder::default().build().map_err(|err| {
debug!("failed to build reqwest client - {}", err);
StatusCode::BAD_REQUEST
})?;
let mut builder = client.request(method, uri.to_string().as_str());
for (header, val) in req.headers() {
builder = builder.header(header, val);
}
let body = hyper::body::to_bytes(req.into_body()).await
.map_err(|err| {
debug!("failed to build reqwest body - {}", err);
StatusCode::BAD_REQUEST
})?;
builder = builder.body(reqwest::Body::from(body));
let request = builder.build().map_err(|err| {
debug!("failed to convert request (url={}) - {}", uri, err);
StatusCode::BAD_REQUEST
})?;
let response = client.execute(request).await.map_err(|err| {
debug!("failed to execute reqest - {}", err);
StatusCode::BAD_REQUEST
})?;
let status = response.status();
let headers = response.headers().clone();
let data = response.bytes().await.map_err(|err| {
debug!("failed to read response bytes - {}", err);
StatusCode::BAD_REQUEST
})?;
let data = data.to_vec();
let mut resp = Response::new(Body::from(data));
for (header, val) in headers {
if let Some(header) = header {
resp.headers_mut()
.append(header, val);
}
}
if resp.headers().contains_key(http::header::ACCESS_CONTROL_ALLOW_ORIGIN) == false {
resp.headers_mut()
.append(http::header::ACCESS_CONTROL_ALLOW_ORIGIN, "*".parse().unwrap());
}
if resp.headers().contains_key(http::header::ACCESS_CONTROL_ALLOW_METHODS) == false {
resp.headers_mut()
.append(http::header::ACCESS_CONTROL_ALLOW_METHODS, "*".parse().unwrap());
}
if resp.headers().contains_key(http::header::ACCESS_CONTROL_ALLOW_HEADERS) == false {
resp.headers_mut()
.append(http::header::ACCESS_CONTROL_ALLOW_HEADERS, "*".parse().unwrap());
}
if resp.headers().contains_key(http::header::ACCESS_CONTROL_MAX_AGE) == false {
resp.headers_mut()
.append(http::header::ACCESS_CONTROL_MAX_AGE, "86400".parse().unwrap());
}
*resp.status_mut() = status;
return Ok(resp);
} else {
return Err(StatusCode::UNAUTHORIZED);
}
}
return Err(StatusCode::BAD_REQUEST);
}
pub(crate) async fn process_internal(
&self,
req: Request<Body>,
listen: &ServerListen,
conf: &WebConf,
) -> Result<Response<Body>, WebServerError> {
if let Some(redirect) = conf.redirect.as_ref() {
trace!("perf-checkpoint: redirect host");
return self.process_redirect_host(req, listen, &redirect).await;
}
if conf.force_https && listen.tls == false {
trace!("perf-checkpoint: force_https");
return self.force_https(req).await;
}
let mut cors_proxy = req.uri().path().split("https://");
cors_proxy.next();
if let Some(next) = cors_proxy.next() {
trace!("perf-checkpoint: cors proxy");
let next = next.to_string();
return Ok(self.process_cors(req, listen, conf, next).await
.unwrap_or_else(|code| {
let mut resp = Response::new(Body::from(code.as_str().to_string()));
*resp.status_mut() = code;
resp
}));
}
let host = self.get_host(&req)?;
let is_head = req.method() == Method::HEAD || req.method() == Method::OPTIONS;
let path = req.uri().path();
match req.method() {
&Method::OPTIONS | &Method::HEAD | &Method::GET => {
trace!("perf-checkpoint: options/head/get");
self.sanitize(path)?;
self.process_get_with_default(host.as_str(), path, is_head, conf)
.await
}
_ => {
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | true |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/acme/security.rs | ateweb/src/acme/security.rs | use base64::URL_SAFE_NO_PAD;
use ring::digest::{digest, Digest, SHA256};
use ring::rand::SystemRandom;
use ring::signature::{EcdsaKeyPair, KeyPair};
use serde::Serialize;
use crate::error::*;
pub fn sign(
key: &EcdsaKeyPair,
kid: Option<&str>,
nonce: String,
url: &str,
payload: &str,
) -> Result<String, SecurityError> {
let jwk = match kid {
None => Some(Jwk::new(key)),
Some(_) => None,
};
let protected = Protected::base64(jwk, kid, nonce, url)?;
let payload = base64::encode_config(payload, URL_SAFE_NO_PAD);
let combined = format!("{}.{}", &protected, &payload);
let signature = key.sign(&SystemRandom::new(), combined.as_bytes())?;
let signature = base64::encode_config(signature.as_ref(), URL_SAFE_NO_PAD);
let body = Body {
protected,
payload,
signature,
};
Ok(serde_json::to_string(&body)?)
}
pub fn key_authorization_sha256(key: &EcdsaKeyPair, token: &str) -> Result<Digest, SecurityError> {
let jwk = Jwk::new(key);
let key_authorization = format!("{}.{}", token, jwk.thumb_sha256_base64()?);
Ok(digest(&SHA256, key_authorization.as_bytes()))
}
#[derive(Serialize)]
pub struct Body {
protected: String,
payload: String,
signature: String,
}
#[derive(Serialize)]
pub struct Protected<'a> {
alg: &'static str,
#[serde(skip_serializing_if = "Option::is_none")]
jwk: Option<Jwk>,
#[serde(skip_serializing_if = "Option::is_none")]
kid: Option<&'a str>,
nonce: String,
url: &'a str,
}
impl<'a> Protected<'a> {
pub fn base64(
jwk: Option<Jwk>,
kid: Option<&'a str>,
nonce: String,
url: &'a str,
) -> Result<String, SecurityError> {
let protected = Self {
alg: "ES256",
jwk,
kid,
nonce,
url,
};
let protected = serde_json::to_vec(&protected)?;
Ok(base64::encode_config(protected, URL_SAFE_NO_PAD))
}
}
#[derive(Serialize)]
pub struct Jwk {
alg: &'static str,
crv: &'static str,
kty: &'static str,
#[serde(rename = "use")]
u: &'static str,
x: String,
y: String,
}
impl Jwk {
pub fn new(key: &EcdsaKeyPair) -> Self {
let (x, y) = key.public_key().as_ref()[1..].split_at(32);
Self {
alg: "ES256",
crv: "P-256",
kty: "EC",
u: "sig",
x: base64::encode_config(x, URL_SAFE_NO_PAD),
y: base64::encode_config(y, URL_SAFE_NO_PAD),
}
}
pub fn thumb_sha256_base64(&self) -> Result<String, SecurityError> {
let jwk_thumb = JwkThumb {
crv: self.crv,
kty: self.kty,
x: &self.x,
y: &self.y,
};
let json = serde_json::to_vec(&jwk_thumb)?;
let hash = digest(&SHA256, &json);
Ok(base64::encode_config(hash, URL_SAFE_NO_PAD))
}
}
#[derive(Serialize)]
pub struct JwkThumb<'a> {
crv: &'a str,
kty: &'a str,
x: &'a str,
y: &'a str,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/acme/acme.rs | ateweb/src/acme/acme.rs | use base64::URL_SAFE_NO_PAD;
use error_chain::bail;
use rcgen::{Certificate, CustomExtension, PKCS_ECDSA_P256_SHA256};
use ring::rand::SystemRandom;
use ring::signature::{EcdsaKeyPair, ECDSA_P256_SHA256_FIXED_SIGNING};
use serde::{Deserialize, Serialize};
use serde_json::json;
use std::sync::Arc;
use tokio_rustls::rustls::sign::{any_ecdsa_type, CertifiedKey};
use tokio_rustls::rustls::PrivateKey;
#[allow(unused_imports, dead_code)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use http::HeaderMap;
use http::HeaderValue;
use hyper::Body;
use hyper::Client;
use hyper::Method;
use hyper::Request;
use hyper_tls::HttpsConnector;
use super::security::*;
use crate::error::*;
pub const LETS_ENCRYPT_STAGING_DIRECTORY: &str =
"https://acme-staging-v02.api.letsencrypt.org/directory";
pub const LETS_ENCRYPT_PRODUCTION_DIRECTORY: &str =
"https://acme-v02.api.letsencrypt.org/directory";
pub const PEBBLE_DIRECTORY: &str = "https://localhost:14000/dir";
pub const ACME_TLS_ALPN_NAME: &[u8] = b"acme-tls/1";
#[derive(Debug)]
pub struct Account {
pub key_pair: EcdsaKeyPair,
pub directory: Directory,
pub kid: String,
}
impl Account {
pub async fn load_or_create<'a, S, I>(
directory: Directory,
contact: I,
) -> Result<Self, AcmeError>
where
S: AsRef<str> + 'a,
I: IntoIterator<Item = &'a S>,
{
let alg = &ECDSA_P256_SHA256_FIXED_SIGNING;
let contact: Vec<&'a str> = contact.into_iter().map(AsRef::<str>::as_ref).collect();
let key_pair = {
info!("creating a new account key");
let rng = SystemRandom::new();
let pkcs8 = EcdsaKeyPair::generate_pkcs8(alg, &rng)?;
EcdsaKeyPair::from_pkcs8(alg, pkcs8.as_ref())?
};
let payload = json!({
"termsOfServiceAgreed": true,
"contact": contact,
})
.to_string();
let body = sign(
&key_pair,
None,
directory.nonce().await?,
&directory.new_account,
&payload,
)?;
let (_, headers) = api_call(
&directory.new_account,
Method::POST,
Some(body),
directory.insecure,
)
.await?;
let kid = get_header(&headers, "Location")?;
Ok(Account {
key_pair,
kid,
directory,
})
}
pub async fn request(
&self,
url: &str,
payload: &str,
) -> Result<(String, HeaderMap), AcmeError> {
let mut n = 0;
loop {
let body = sign(
&self.key_pair,
Some(&self.kid),
self.directory.nonce().await?,
url,
payload,
)?;
match api_call(url, Method::POST, Some(body), self.directory.insecure).await {
Ok((body, headers)) => {
//debug!("response: {:?}", body);
return Ok((body, headers));
}
Err(AcmeError(AcmeErrorKind::ApiError(err), _)) => {
if err.typ == "urn:ietf:params:acme:error:badNonce" && n < 5 {
n += 1;
continue;
}
bail!(AcmeErrorKind::ApiError(err));
}
Err(err) => {
return Err(err);
}
};
}
}
pub async fn auth(&self, url: &str) -> Result<Auth, AcmeError> {
let payload = "".to_string();
let (response, _) = self.request(url, &payload).await?;
Ok(serde_json::from_str(&response)?)
}
pub async fn challenge(&self, url: &str) -> Result<(), AcmeError> {
self.request(url, "{}").await?;
Ok(())
}
pub async fn new_order(&self, domains: Vec<String>) -> Result<(Order, String), AcmeError> {
//let _not_before = not_before.to_rfc3339();
// let _not_after = not_after.to_rfc3339();
let domains: Vec<Identifier> = domains.into_iter().map(|d| Identifier::Dns(d)).collect();
//let payload = format!("{{\"identifiers\":{},\"notBefore\":\"{}\",\"notAfter\":\"{}\"}}", serde_json::to_string(&domains)?, not_before, not_after);
let payload = format!("{{\"identifiers\":{}}}", serde_json::to_string(&domains)?);
let (response, headers) = self.request(&self.directory.new_order, &payload).await?;
let order = serde_json::from_str(&response)?;
let kid = get_header(&headers, "Location")?;
Ok((order, kid))
}
pub async fn finalize(&self, url: &str, csr: Vec<u8>) -> Result<Order, AcmeError> {
let payload = format!(
"{{\"csr\":\"{}\"}}",
base64::encode_config(csr, URL_SAFE_NO_PAD)
);
let (response, _) = self.request(url, &payload).await?;
Ok(serde_json::from_str(&response)?)
}
pub async fn certificate(&self, url: &str) -> Result<String, AcmeError> {
let (ret, _) = self.request(url, "").await?;
Ok(ret)
}
pub async fn check(&self, url: &str) -> Result<Order, AcmeError> {
let (response, _) = self.request(url, "").await?;
Ok(serde_json::from_str(&response)?)
}
pub fn tls_alpn_01<'a>(
&self,
challenges: &'a Vec<Challenge>,
domain: String,
) -> Result<(&'a Challenge, CertifiedKey, String, String), AcmeError> {
let challenge = challenges
.iter()
.filter(|c| c.typ == ChallengeType::TlsAlpn01)
.next();
let challenge = match challenge {
Some(challenge) => challenge,
None => return Err(AcmeErrorKind::NoTlsAlpn01Challenge.into()),
};
let mut params = rcgen::CertificateParams::new(vec![domain]);
let key_auth = key_authorization_sha256(&self.key_pair, &*challenge.token)?;
params.alg = &PKCS_ECDSA_P256_SHA256;
params.custom_extensions = vec![CustomExtension::new_acme_identifier(key_auth.as_ref())];
let cert = Certificate::from_params(params)?;
let cert_pem = cert.serialize_pem()?;
let pk_pem = cert.serialize_private_key_pem();
let pk = any_ecdsa_type(&PrivateKey(cert.serialize_private_key_der())).unwrap();
let certified_key = CertifiedKey::new(
vec![tokio_rustls::rustls::Certificate(cert.serialize_der()?)],
Arc::new(pk),
);
Ok((challenge, certified_key, cert_pem, pk_pem))
}
}
#[derive(Debug, Clone, Deserialize)]
#[serde(rename_all = "camelCase")]
pub struct Directory {
pub new_nonce: String,
pub new_account: String,
pub new_order: String,
#[serde(skip)]
pub insecure: bool,
}
impl Directory {
pub async fn discover(url: &str) -> Result<Self, AcmeError> {
let insecure = url == PEBBLE_DIRECTORY;
let (body, _) = api_call(url, Method::GET, None, insecure).await?;
let mut ret: Directory = serde_json::from_str(body.as_str())?;
ret.insecure = insecure;
Ok(ret)
}
pub async fn nonce(&self) -> Result<String, AcmeError> {
let (_, headers) =
api_call(&self.new_nonce.as_str(), Method::HEAD, None, self.insecure).await?;
get_header(&headers, "replay-nonce")
}
}
#[derive(Debug, Deserialize, Eq, PartialEq)]
pub enum ChallengeType {
#[serde(rename = "http-01")]
Http01,
#[serde(rename = "dns-01")]
Dns01,
#[serde(rename = "tls-alpn-01")]
TlsAlpn01,
}
#[derive(Debug, Deserialize)]
#[serde(tag = "status", rename_all = "camelCase")]
pub enum Order {
Pending {
authorizations: Vec<String>,
finalize: String,
},
Ready {
finalize: String,
},
Valid {
certificate: String,
},
Invalid,
Processing,
}
#[derive(Debug, Deserialize)]
#[serde(tag = "status", rename_all = "camelCase")]
pub enum Auth {
Pending {
identifier: Identifier,
challenges: Vec<Challenge>,
},
Valid,
Invalid,
Revoked,
Expired,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(tag = "type", content = "value", rename_all = "camelCase")]
pub enum Identifier {
Dns(String),
}
#[derive(Debug, Deserialize)]
pub struct Challenge {
#[serde(rename = "type")]
pub typ: ChallengeType,
pub url: String,
pub token: String,
}
#[derive(Debug, Deserialize)]
pub struct ApiError {
#[serde(rename = "type")]
pub typ: String,
pub detail: String,
pub status: u16,
}
fn get_header(
response: &HeaderMap<HeaderValue>,
header: &'static str,
) -> Result<String, AcmeError> {
match response.get(header) {
Some(value) => Ok(value.to_str()?.to_string()),
None => bail!(AcmeErrorKind::MissingHeader(header)),
}
}
async fn api_call(
req_url: &str,
method: Method,
req: Option<String>,
insecure: bool,
) -> Result<(String, HeaderMap<HeaderValue>), AcmeError> {
// Build the request
let req_url = req_url.to_string();
/*
if let Some(req_str) = req.as_ref() {
debug!("Request: {:?}@{}", req_str, req_url);
} else {
debug!("Request: @{}", req_url);
}
*/
// Create the HTTPS client
let client = {
let tls_connector = hyper_tls::native_tls::TlsConnector::builder()
.danger_accept_invalid_certs(insecure)
.build()
.unwrap();
let mut http_connector = hyper::client::HttpConnector::new();
http_connector.enforce_http(false);
let https_connector = HttpsConnector::from((http_connector, tls_connector.into()));
Client::builder().build::<_, hyper::Body>(https_connector)
};
// Make the request object
let builder = Request::builder()
.method(method)
.uri(req_url)
.header("Content-Type", "application/jose+json");
let req = if let Some(req_str) = req {
builder.body(Body::from(req_str)).unwrap()
} else {
builder.body(Body::empty()).unwrap()
};
let mut res = client.request(req).await?;
let status = res.status();
//debug!("Response: {}", status);
//debug!("Headers: {:#?}\n", res.headers());
let headers = res.headers().clone();
let res = hyper::body::to_bytes(res.body_mut()).await?;
let orig_res = String::from_utf8(res.into_iter().collect()).unwrap();
// Pretty print
let res = match jsonxf::pretty_print(orig_res.as_str()) {
Ok(a) => a,
Err(err) => {
error!("{}", err);
orig_res.clone()
}
};
// If an error occured then fail
if !status.is_success() {
warn!("{}", res);
if status.as_u16() == 400 {
if let Some(err) = serde_json::from_str::<ApiError>(res.as_str()).ok() {
bail!(AcmeErrorKind::ApiError(err));
}
}
bail!(AcmeErrorKind::BadResponse(status.as_u16(), orig_res));
}
//debug!("Body: {}", res);
Ok((res, headers))
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/acme/mod.rs | ateweb/src/acme/mod.rs | mod acme;
mod resolver;
mod security;
pub use acme::*;
pub use resolver::*;
pub use security::*;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/acme/resolver.rs | ateweb/src/acme/resolver.rs | use super::acme::{
Account,
Auth,
Directory,
Identifier,
Order,
ACME_TLS_ALPN_NAME,
LETS_ENCRYPT_PRODUCTION_DIRECTORY,
//LETS_ENCRYPT_STAGING_DIRECTORY,
//PEBBLE_DIRECTORY,
};
use ate::prelude::*;
use bytes::Bytes;
use futures::future::try_join_all;
use fxhash::FxHashMap;
use rcgen::{CertificateParams, DistinguishedName, PKCS_ECDSA_P256_SHA256};
use rustls::sign::any_supported_type;
use rustls::sign::CertifiedKey;
use rustls::Certificate as RustlsCertificate;
use rustls::ClientHello;
use rustls::PrivateKey;
use rustls::ResolvesServerCert;
use std::collections::hash_map::Entry;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use std::time::Duration;
use tokio::sync::Mutex;
#[allow(unused_imports, dead_code)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use ttl_cache::TtlCache;
use x509_parser::parse_x509_certificate;
use ate_files::repo::*;
use crate::error::*;
use crate::model::*;
#[derive(Default)]
pub struct AcmeState {
err_cnt: i64,
next_try: Option<chrono::DateTime<chrono::Utc>>,
}
pub struct AcmeResolver {
pub repo: Arc<Repository>,
pub certs: StdRwLock<TtlCache<String, CertifiedKey>>,
pub auths: StdRwLock<TtlCache<String, CertifiedKey>>,
pub locks: StdMutex<FxHashMap<String, Arc<Mutex<AcmeState>>>>,
}
impl AcmeResolver {
pub async fn new(repo: &Arc<Repository>) -> Result<Arc<AcmeResolver>, AteError> {
let ret = AcmeResolver {
repo: Arc::clone(repo),
certs: StdRwLock::new(TtlCache::new(65536usize)),
auths: StdRwLock::new(TtlCache::new(1024usize)),
locks: StdMutex::new(FxHashMap::default()),
};
Ok(Arc::new(ret))
}
}
impl AcmeResolver {
async fn process_cert(
&self,
sni: &str,
cert: Bytes,
key: Bytes,
) -> Result<Option<CertifiedKey>, Box<dyn std::error::Error>> {
let key = pem::parse(&key[..])?;
let pems = pem::parse_many(&cert[..]);
if pems.len() < 1 {
error!("expected 1 or more pem in {}, got: {}", sni, pems.len());
return Ok(None);
}
let pk = match any_supported_type(&PrivateKey(key.contents)) {
Ok(pk) => pk,
Err(_) => {
error!("{} does not contain an ecdsa private key", sni);
return Ok(None);
}
};
let cert_chain: Vec<RustlsCertificate> = pems
.into_iter()
.map(|p| RustlsCertificate(p.contents))
.collect();
let cert_key = CertifiedKey::new(cert_chain, Arc::new(pk));
Ok(Some(cert_key))
}
pub async fn touch_alpn(&self, sni: String) -> Result<(), Box<dyn std::error::Error>> {
// Fast path
{
let guard = self.auths.read().unwrap();
if guard.contains_key(&sni) {
return Ok(());
}
}
// Load the certificates
let web_key = ChainKey::from(format!("{}/www", sni));
let cert = self
.repo
.get_file(&web_key, sni.as_str(), WEB_CONF_FILES_ALPN_CERT)
.await?;
let key = self
.repo
.get_file(&web_key, sni.as_str(), WEB_CONF_FILES_ALPN_KEY)
.await?;
if let Some(cert) = cert {
if let Some(key) = key {
if let Some(cert_key) = self.process_cert(sni.as_str(), cert, key).await? {
let mut guard = self.auths.write().unwrap();
guard.insert(sni.to_string(), cert_key, Duration::from_secs(300));
return Ok(());
}
} else {
warn!("missing alpn private key for {}", sni);
}
} else {
warn!("missing alpn chain for {}", sni);
}
// No certificate :-(
let mut guard = self.auths.write().unwrap();
guard.remove(&sni);
Ok(())
}
pub async fn touch_web(
&self,
sni: String,
renewal: chrono::Duration,
) -> Result<(), Box<dyn std::error::Error>> {
// Fast path
{
let guard = self.certs.read().unwrap();
if let Some(cert) = guard.get(&sni) {
let d = self.duration_until_renewal_attempt(cert, renewal);
if d.as_secs() > 0 {
trace!("next renewal attempt in {}s", d.as_secs());
return Ok(());
}
}
}
let web_key = ChainKey::from(format!("{}/www", sni));
let lock = {
let mut guard = self.locks.lock().unwrap();
match guard.entry(sni.clone()) {
Entry::Occupied(a) => Arc::clone(a.get()),
Entry::Vacant(a) => {
let ret = Arc::new(Mutex::new(AcmeState::default()));
a.insert(Arc::clone(&ret));
ret
}
}
};
let mut lock = lock.lock().await;
// Slow path
let loaded = {
let guard = self.certs.read().unwrap();
if let Some(cert) = guard.get(&sni) {
let d = self.duration_until_renewal_attempt(cert, renewal);
if d.as_secs() > 0 {
trace!("next renewal attempt in {}s", d.as_secs());
return Ok(());
}
true
} else {
false
}
};
// If we have never loaded the certificates from disk then load them now
if loaded == false {
let cert = self
.repo
.get_file(&web_key, sni.as_str(), WEB_CONF_FILES_WEB_CERT)
.await?;
let key = self
.repo
.get_file(&web_key, sni.as_str(), WEB_CONF_FILES_WEB_KEY)
.await?;
if let Some(cert) = cert {
if let Some(key) = key {
if let Some(cert_key) = self.process_cert(sni.as_str(), cert, key).await? {
let mut guard = self.certs.write().unwrap();
guard.insert(sni.to_string(), cert_key.clone(), Duration::from_secs(3600));
let d = self.duration_until_renewal_attempt(&cert_key, renewal);
if d.as_secs() > 0 {
trace!("next renewal attempt in {}s", d.as_secs());
return Ok(());
} else {
info!("certificate will be renewed for {}", sni);
}
} else {
warn!("failed to process certificate");
}
} else {
warn!("missing certificate private key for {}", sni);
}
} else {
warn!("missing certificate chain for {}", sni);
}
// If the file system that backs this web site is not even in existance then we should
// not try and generate a certificate as we have nowhere to save it
let accessor = self.repo.get_accessor(&web_key, sni.as_str()).await?;
if accessor
.root(&ate_files::prelude::RequestContext::default())
.await?
.is_none()
{
trace!("aborting attempt due to uninitialized backing file system");
return Ok(());
}
}
// Check for exponental backoff
if let Some(next_try) = lock.next_try {
if next_try.gt(&chrono::Utc::now()) {
trace!("aborting attempt due to exponential backoff");
return Ok(());
}
}
let directory_url = LETS_ENCRYPT_PRODUCTION_DIRECTORY;
//let directory_url = LETS_ENCRYPT_STAGING_DIRECTORY;
//let directory_url = PEBBLE_DIRECTORY;
let expires = chrono::Duration::days(90);
// Order the certificate using lets encrypt
debug!("ordering of certificate started");
match self.order(&directory_url, sni.as_str(), expires).await {
Ok((cert_key, cert_pem, pk_pem)) => {
debug!("successfully ordered certificate");
lock.err_cnt = 0i64;
lock.next_try = None;
self.repo
.set_file(&web_key, sni.as_str(), WEB_CONF_FILES_WEB_CERT, cert_pem.as_bytes())
.await?;
self.repo
.set_file(&web_key, sni.as_str(), WEB_CONF_FILES_WEB_KEY, pk_pem.as_bytes())
.await?;
let mut guard = self.certs.write().unwrap();
guard.insert(sni.to_string(), cert_key, Duration::from_secs(3600));
}
Err(err) => {
warn!("ordering certificate failed: {}", err);
lock.err_cnt += 1i64;
let retry_time = chrono::Duration::seconds(1 << lock.err_cnt);
let retry_time = chrono::Utc::now() + retry_time;
lock.next_try = Some(retry_time);
}
};
Ok(())
}
fn duration_until_renewal_attempt(
&self,
cert_key: &CertifiedKey,
renewal: chrono::Duration,
) -> Duration {
for cert in cert_key.cert.iter() {
if let Ok((_, cert)) = parse_x509_certificate(cert.0.as_slice()) {
let time_stamp = chrono::DateTime::<chrono::Utc>::from_utc(
chrono::NaiveDateTime::from_timestamp(cert.validity().not_after.timestamp(), 0),
chrono::Utc,
);
trace!("valid until {}", time_stamp);
let valid_until = cert.validity().not_after.timestamp();
let valid_secs = (valid_until - chrono::Utc::now().timestamp()).max(0);
let valid_secs = (valid_secs - renewal.num_seconds()).max(0);
return Duration::from_secs(valid_secs as u64);
}
}
chrono::Duration::days(365).to_std().unwrap()
}
async fn order(
&self,
directory_url: &str,
domain: &str,
duration: chrono::Duration,
) -> Result<(CertifiedKey, String, String), OrderError> {
let contacts = vec![format!("mailto:info@{}", domain)];
let domains = vec![domain.to_string()];
let not_before = chrono::Utc::now();
let mut not_after = not_before.clone();
if let Some(not_after_next) = not_before.checked_add_signed(duration) {
not_after = not_after_next;
}
let mut params = CertificateParams::new(domains.clone());
params.distinguished_name = DistinguishedName::new();
params.alg = &PKCS_ECDSA_P256_SHA256;
params.not_before = not_before;
params.not_after = not_after;
let cert = rcgen::Certificate::from_params(params)?;
let pk_pem = cert.serialize_private_key_pem();
let pk_bytes = cert.serialize_private_key_der();
let pk = any_supported_type(&PrivateKey(pk_bytes.clone())).unwrap();
debug!("load_or_create account");
let directory = Directory::discover(directory_url).await?;
let account = Account::load_or_create(directory, &contacts).await?;
debug!("new order for {:?}", domains);
let mut wait = 0u32;
let (mut order, kid) = account.new_order(domains.clone()).await?;
loop {
order = match order {
Order::Pending {
authorizations,
finalize,
} => {
let auth_futures = authorizations
.iter()
.map(|url| self.authorize(&account, domain, url));
try_join_all(auth_futures).await?;
debug!("completed all authorizations");
Order::Ready { finalize }
}
Order::Ready { finalize } => {
debug!("sending csr");
let csr = cert.serialize_request_der()?;
account.finalize(finalize.as_str(), csr).await?
}
Order::Processing => {
debug!("processing certificate");
wait += 1;
if wait > 30 {
return Err(OrderErrorKind::Timeout.into());
}
ate::engine::sleep(Duration::from_secs(1)).await;
account.check(kid.as_str()).await?
}
Order::Valid { certificate } => {
debug!("download certificate");
let acme_cert_pem = account.certificate(certificate.as_str()).await?;
let acme_cert_pem = acme_cert_pem.replace(
"-----BEGINCERTIFICATE-----",
"-----BEGIN CERTIFICATE-----\n",
);
let acme_cert_pem = acme_cert_pem
.replace("-----ENDCERTIFICATE-----", "\n-----END CERTIFICATE-----\n");
let pems = pem::parse_many(&acme_cert_pem);
let cert_chain: Vec<rustls::Certificate> = pems
.into_iter()
.map(|p| RustlsCertificate(p.contents))
.collect();
let cert_key = CertifiedKey::new(cert_chain, Arc::new(pk));
return Ok((cert_key, acme_cert_pem, pk_pem));
}
Order::Invalid => return Err(OrderErrorKind::BadOrder(order).into()),
}
}
}
async fn authorize(
&self,
account: &Account,
sni: &str,
url: &String,
) -> Result<(), OrderError> {
debug!("starting authorization for {}", url);
let (domain, challenge_url) = match account.auth(url).await? {
Auth::Pending {
identifier,
challenges,
} => {
let Identifier::Dns(domain) = identifier;
info!("trigger challenge for {}", &domain);
let (challenge, _auth_key, cert_pem, pk_pem) =
account.tls_alpn_01(&challenges, domain.clone())?;
let key = ChainKey::from(format!("{}/www", sni));
self.repo
.set_file(&key, sni, WEB_CONF_FILES_ALPN_CERT, cert_pem.as_bytes())
.await?;
self.repo
.set_file(&key, sni, WEB_CONF_FILES_ALPN_KEY, pk_pem.as_bytes())
.await?;
self.auths.write().unwrap().remove(&domain);
/*
self.auths
.write()
.unwrap()
.insert(domain.clone(), _auth_key, Duration::from_secs(300));
*/
account.challenge(&challenge.url).await?;
(domain, challenge.url.clone())
}
Auth::Valid => return Ok(()),
auth => return Err(OrderErrorKind::BadAuth(auth).into()),
};
for i in 0u64..5 {
ate::engine::sleep(Duration::from_secs(1 << i)).await;
match account.auth(url).await? {
Auth::Pending { .. } => {
info!("authorization for {} still pending", &domain);
account.challenge(&challenge_url).await?
}
Auth::Valid => return Ok(()),
auth => return Err(OrderErrorKind::BadAuth(auth).into()),
}
}
Err(OrderErrorKind::TooManyAttemptsAuth(domain).into())
}
}
impl ResolvesServerCert for AcmeResolver {
fn resolve(&self, client_hello: ClientHello) -> Option<CertifiedKey> {
if let Some(sni) = client_hello.server_name() {
let sni = sni.to_owned();
let sni: String = AsRef::<str>::as_ref(&sni).to_string();
if client_hello.alpn() == Some(&[ACME_TLS_ALPN_NAME]) {
let guard = self.auths.read().unwrap();
if let Some(cert) = guard.get(&sni) {
trace!("tls_challenge: auth_hit={:?}", sni);
return Some(cert.clone());
} else {
trace!("tls_challenge: auth_miss={:?}", sni);
return None;
}
}
let guard = self.certs.read().unwrap();
return if let Some(cert) = guard.get(&sni) {
trace!("tls_hello: cert_hit={:?}", sni);
Some(cert.clone())
} else {
trace!("tls_hello: cert_miss={:?}", sni);
None
};
} else {
debug!("rejected connection (SNI was missing)");
}
None
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/model/web_conf.rs | ateweb/src/model/web_conf.rs | use fxhash::FxHashMap;
use serde::*;
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct WebConf {
/// Forces the host to be redirected to a new URL
#[serde(default)]
pub redirect: Option<String>,
/// Hosts the ATE web sockets and other end-points such as HTTP on this site
#[serde(default)]
pub ate_proxy: bool,
/// Enable COOP (Cross-Origin-Opener-Policy: same-origin)
#[serde(default)]
pub coop: bool,
/// Force (by direction) all requests to HTTPS
#[serde(default)]
pub force_https: bool,
/// The default page
#[serde(default)]
pub default_page: Option<String>,
/// Redirects certain status codes to specific pages
#[serde(default)]
pub status_pages: FxHashMap<u16, String>,
/// List of the domains that this domain will reverse proxy for cors
#[serde(default)]
pub cors_proxy: Vec<String>,
}
impl Default for WebConf {
fn default() -> Self {
WebConf {
redirect: None,
coop: false,
ate_proxy: false,
force_https: false,
default_page: None,
status_pages: FxHashMap::default(),
cors_proxy: Vec::new(),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/model/mod.rs | ateweb/src/model/mod.rs | mod web_conf;
pub use web_conf::*;
pub const WEB_CONF_FILES: &'static str = ".conf/";
pub const WEB_CONF_FILES_CONF: &'static str = ".conf/web.yaml";
pub const WEB_CONF_FILES_WEB_CERT: &'static str = ".conf/cert.pem";
pub const WEB_CONF_FILES_WEB_KEY: &'static str = ".conf/key.pem";
pub const WEB_CONF_FILES_ALPN_CERT: &'static str = ".conf/alpn/cert.pem";
pub const WEB_CONF_FILES_ALPN_KEY: &'static str = ".conf/alpn/key.pem";
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/opt/all.rs | ateweb/src/opt/all.rs | use std::net::IpAddr;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use url::Url;
use clap::Parser;
/// Runs a web server that will serve content from a Wasmer file system
#[derive(Parser)]
pub struct OptsAll {
/// IP address that the datachain server will isten on
#[clap(short, long, default_value = "::")]
pub listen: IpAddr,
/// Port that the server will listen on for HTTP requests
#[clap(long, default_value = "80")]
pub port: u16,
/// Number of seconds that a website will remain idle in memory before it is evicted
#[clap(long, default_value = "60")]
pub ttl: u64,
/// URL where the data is remotely stored on a distributed commit log.
#[clap(short, long, default_value = "ws://wasmer.sh/db")]
pub remote: Url,
/// Location where all the websites will be cached
#[clap(long, default_value = "/tmp/www")]
pub log_path: String,
/// Path to the secret key that helps protect key operations like creating users and resetting passwords
#[clap(long, default_value = "~/wasmer/auth.key")]
pub auth_key_path: String,
/// Path to the log files where all the authentication data is stored
#[clap(long, default_value = "~/wasmer/auth")]
pub auth_logs_path: String,
/// Address that the authentication server(s) are listening and that
/// this server can connect to if the chain is on another mesh node
#[clap(short, long, default_value = "ws://localhost:5001/auth")]
pub auth_url: url::Url,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/opt/core.rs | ateweb/src/opt/core.rs | use std::net::IpAddr;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use url::Url;
use clap::Parser;
use super::OptsAuth;
#[derive(Parser)]
#[clap(version = "1.6", author = "John S. <johnathan.sharratt@gmail.com>")]
pub struct Opts {
/// Sets the level of log verbosity, can be used multiple times
#[allow(dead_code)]
#[clap(short, long, parse(from_occurrences))]
pub verbose: i32,
/// URL where the user is authenticated
#[clap(short, long, default_value = "ws://wasmer.sh/auth")]
pub auth: Url,
/// No NTP server will be used to synchronize the time thus the server time
/// will be used instead
#[clap(long)]
pub no_ntp: bool,
/// NTP server address that the file-system will synchronize with
#[clap(long)]
pub ntp_pool: Option<String>,
/// NTP server port that the file-system will synchronize with
#[clap(long)]
pub ntp_port: Option<u16>,
/// Logs debug info to the console
#[clap(short, long)]
pub debug: bool,
/// Determines if ATE will use DNSSec or just plain DNS
#[clap(long)]
pub dns_sec: bool,
/// Address that DNS queries will be sent to
#[clap(long, default_value = "8.8.8.8")]
pub dns_server: String,
/// Token file to read that holds a previously created token to be used for this operation
#[clap(long, default_value = "~/wasmer/token")]
pub token_path: String,
#[clap(subcommand)]
pub subcmd: SubCommand,
}
/// Runs a web server that will serve content from a Wasmer file system
#[derive(Parser)]
pub struct OptsWeb {
/// IP address that the datachain server will isten on
#[clap(short, long, default_value = "::")]
pub listen: IpAddr,
/// Port that the server will listen on for HTTP requests
#[clap(long, default_value = "80")]
pub port: u16,
/// Number of seconds that a website will remain idle in memory before it is evicted
#[clap(long, default_value = "60")]
pub ttl: u64,
/// URL where the data is remotely stored on a distributed commit log.
#[clap(short, long, default_value = "ws://wasmer.sh/db")]
pub remote: Url,
/// URL where the authentication requests will be lodged.
#[clap(short, long, default_value = "ws://wasmer.sh/auth")]
pub auth_url: Url,
/// Path to the secret key that grants access to the WebServer role within groups
#[clap(long, default_value = "~/wasmer/web.key")]
pub web_key_path: String,
/// Location where all the websites will be cached
#[clap(long, default_value = "/tmp/www")]
pub log_path: String,
}
/// Runs a web server that will serve content from a Wasmer file system
#[derive(Parser)]
pub struct OptsAll {
/// Optional list of the nodes that make up this cluster
#[clap(long)]
pub nodes_list: Option<String>,
/// IP address that the datachain server will isten on
#[clap(short, long, default_value = "::")]
pub listen: IpAddr,
/// Port that the server will listen on for HTTP requests
#[clap(long, default_value = "80")]
pub port: u16,
/// Number of seconds that a website will remain idle in memory before it is evicted
#[clap(long, default_value = "60")]
pub ttl: u64,
/// Location where all the websites will be cached
#[clap(long, default_value = "/tmp/www")]
pub log_path: String,
/// Path to the secret key that helps protect key operations like creating users and resetting passwords
#[clap(long, default_value = "~/wasmer/auth.key")]
pub auth_key_path: String,
/// Path to the secret key that grants access to the WebServer role within groups
#[clap(long, default_value = "~/wasmer/web.key")]
pub web_key_path: String,
/// Path to the secret key that grants access to the EdgeCompute role within groups
#[clap(long, default_value = "~/wasmer/edge.key")]
pub edge_key_path: String,
/// Path to the secret key that grants access to the contracts
#[clap(long, default_value = "~/wasmer/contract.key")]
pub contract_key_path: String,
/// Path to the certificate file that will be used by an listening servers
/// (there must be TXT records in the host domain servers for this cert)
#[clap(long, default_value = "~/wasmer/cert")]
pub cert_path: String,
/// Path to the log files where all the authentication data is stored
#[clap(long, default_value = "~/wasmer/auth")]
pub auth_logs_path: String,
/// Path to the backup and restore location of log files
#[clap(short, long)]
pub backup_path: Option<String>,
/// URL where the data is remotely stored on a distributed commit log.
#[clap(short, long, default_value = "ws://wasmer.sh/db")]
pub remote: Url,
/// Address that the authentication server(s) are listening and that
/// this server can connect to if the chain is on another mesh node
#[clap(short, long, default_value = "ws://localhost:5001/auth")]
pub auth_url: url::Url,
/// Ensures that this authentication server runs as a specific node_id
#[clap(short, long)]
pub node_id: Option<u32>,
}
#[derive(Parser)]
pub enum SubCommand {
/// Hosts the authentication service
#[clap()]
Auth(OptsAuth),
/// Starts a web server that will load Wasmer file systems and serve
/// them directly as HTML content
#[clap()]
Web(OptsWeb),
/// Starts a web server that will load Wasmer file systems and serve
/// them directly as HTML content along with a database engine and
/// authentication server
#[clap()]
All(OptsAll),
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/opt/auth.rs | ateweb/src/opt/auth.rs | use clap::Parser;
use std::net::IpAddr;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
/// Runs the login authentication and authorization server
#[derive(Parser)]
pub struct OptsAuth {
/// Optional list of the nodes that make up this cluster
#[clap(long)]
pub nodes_list: Option<String>,
/// Path to the secret key that helps protect key operations like creating users and resetting passwords
#[clap(long, default_value = "~/wasmer/auth.key")]
pub auth_key_path: String,
/// Path to the secret key that grants access to the WebServer role within groups
#[clap(long, default_value = "~/wasmer/web.key")]
pub web_key_path: String,
/// Path to the secret key that grants access to the EdgeCompute role within groups
#[clap(long, default_value = "~/wasmer/edge.key")]
pub edge_key_path: String,
/// Path to the secret key that grants access to the contracts
#[clap(long, default_value = "~/wasmer/contract.key")]
pub contract_key_path: String,
/// Path to the certificate file that will be used by an listening servers
/// (there must be TXT records in the host domain servers for this cert)
#[clap(long, default_value = "~/wasmer/cert")]
pub cert_path: String,
/// Path to the log files where all the authentication data is stored
#[clap(index = 1, default_value = "~/wasmer/auth")]
pub logs_path: String,
/// Path to the backup and restore location of log files
#[clap(short, long)]
pub backup_path: Option<String>,
/// Address that the authentication server(s) are listening and that
/// this server can connect to if the chain is on another mesh node
#[clap(short, long, default_value = "ws://localhost:5001/auth")]
pub url: url::Url,
/// IP address that the authentication server will isten on
#[clap(short, long, default_value = "::")]
pub listen: IpAddr,
/// Ensures that this authentication server runs as a specific node_id
#[clap(short, long)]
pub node_id: Option<u32>,
} | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/opt/mod.rs | ateweb/src/opt/mod.rs | mod all;
mod core;
mod web;
mod auth;
pub use self::core::*;
pub use all::*;
pub use web::*;
pub use auth::*; | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/opt/web.rs | ateweb/src/opt/web.rs | use std::net::IpAddr;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use url::Url;
use clap::Parser;
/// Runs a web server that will serve content from a Wasmer file system
#[derive(Parser)]
pub struct OptsWeb {
/// IP address that the web server will isten on
#[clap(short, long, default_value = "::")]
pub listen: IpAddr,
/// Port that the server will listen on for HTTP requests
#[clap(long, default_value = "80")]
pub port: u16,
/// Number of seconds that a website will remain idle in memory before it is evicted
#[clap(long, default_value = "60")]
pub ttl: u64,
/// URL where the data is remotely stored on a distributed commit log.
#[clap(short, long, default_value = "ws://wasmer.sh/db")]
pub remote: Url,
/// Location where all the websites will be cached
#[clap(long, default_value = "/tmp/www")]
pub log_path: String,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/ateweb/src/bin/ateweb.rs | ateweb/src/bin/ateweb.rs | use std::time::Duration;
use ate::utils::load_node_list;
use wasmer_auth::flow::ChainFlow;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use clap::Parser;
use ate::prelude::*;
use wasmer_auth::helper::*;
use ateweb::opt::*;
use ateweb::*;
#[tokio::main]
async fn main() -> Result<(), Box<dyn std::error::Error>> {
let opts: Opts = Opts::parse();
ate::log_init(opts.verbose, opts.debug);
let mut conf = AteConfig::default();
conf.dns_sec = opts.dns_sec;
conf.dns_server = opts.dns_server;
conf.ntp_sync = opts.no_ntp == false;
if let Some(pool) = opts.ntp_pool {
conf.ntp_pool = pool;
}
if let Some(port) = opts.ntp_port {
conf.ntp_port = port;
}
// Run the server
match opts.subcmd {
SubCommand::Auth(run) => {
conf.nodes = load_node_list(run.nodes_list);
// Open the key file
let root_write_key: PrivateSignKey = load_key(run.auth_key_path.clone(), ".write");
let root_read_key: EncryptKey = load_key(run.auth_key_path.clone(), ".read");
let root_cert_key: PrivateEncryptKey = load_key(run.cert_path.clone(), "");
let web_key: EncryptKey = load_key(run.web_key_path.clone(), ".read");
let edge_key: EncryptKey = load_key(run.edge_key_path.clone(), ".read");
let contract_key: EncryptKey = load_key(run.contract_key_path.clone(), ".read");
let mut session = AteSessionUser::new();
session.user.add_read_key(&root_read_key);
session.user.add_write_key(&root_write_key);
// Create the server and listen
let mut flow = ChainFlow::new(
&conf,
root_write_key,
session,
web_key,
edge_key,
contract_key,
&run.url,
);
flow.terms_and_conditions = Some(wasmer_auth::GENERIC_TERMS_AND_CONDITIONS.to_string());
let mut cfg_mesh =
ConfMesh::solo_from_url(&conf, &run.url, &run.listen, None, run.node_id).await?;
cfg_mesh.wire_protocol = StreamProtocol::parse(&run.url)?;
cfg_mesh.listen_certificate = Some(root_cert_key);
let server = create_server(&cfg_mesh).await?;
server.add_route(Box::new(flow), &conf).await?;
// Wait for ctrl-c
let mut exit = ctrl_channel();
while *exit.borrow() == false {
exit.changed().await.unwrap();
}
println!("Shutting down...");
server.shutdown().await;
println!("Goodbye!");
}
SubCommand::Web(run) => {
let web_key: EncryptKey = load_key(run.web_key_path.clone(), ".read");
conf.log_path = Some(run.log_path);
let server = ServerBuilder::new(run.remote, run.auth_url)
.with_web_master_key(web_key)
.with_conf(&conf)
.ttl(Duration::from_secs(run.ttl))
.add_listener(run.listen, run.port, run.port == 443u16)
.build()
.await?;
server.run().await?;
}
SubCommand::All(run) => {
conf.nodes = load_node_list(run.nodes_list);
// Open the key file
let root_write_key: PrivateSignKey = load_key(run.auth_key_path.clone(), ".write");
let root_read_key: EncryptKey = load_key(run.auth_key_path.clone(), ".read");
let root_cert_key: PrivateEncryptKey = load_key(run.cert_path.clone(), "");
let web_key: EncryptKey = load_key(run.web_key_path.clone(), ".read");
let edge_key: EncryptKey = load_key(run.edge_key_path.clone(), ".read");
let contract_key: EncryptKey = load_key(run.contract_key_path.clone(), ".read");
let mut session = AteSessionUser::new();
session.user.add_read_key(&root_read_key);
session.user.add_write_key(&root_write_key);
let protocol = StreamProtocol::parse(&run.auth_url)?;
let port = run.auth_url.port().unwrap_or(protocol.default_port());
let domain = run.auth_url.domain().unwrap_or("localhost").to_string();
let mut cfg_mesh = ConfMesh::skeleton(&conf, domain, port, Some(0u32)).await?;
cfg_mesh.wire_protocol = protocol;
cfg_mesh.wire_encryption = Some(KeySize::Bit192);
cfg_mesh.listen_certificate = Some(root_cert_key);
let root = create_server(&cfg_mesh).await?;
// Create the server and listen
let mut flow = ChainFlow::new(
&conf,
root_write_key,
session,
web_key,
edge_key,
contract_key,
&run.auth_url,
);
flow.terms_and_conditions = Some(wasmer_auth::GENERIC_TERMS_AND_CONDITIONS.to_string());
root.add_route(Box::new(flow), &conf).await?;
let mut router = ate::comms::StreamRouter::new(
cfg_mesh.wire_format.clone(),
cfg_mesh.wire_protocol.clone(),
None,
cfg_mesh.listen_certificate.clone(),
root.server_id(),
cfg_mesh.accept_timeout,
);
router.set_default_route(root);
conf.log_path = Some(run.log_path);
let server = ServerBuilder::new(run.remote, run.auth_url)
.with_web_master_key(web_key)
.with_conf(&conf)
.ttl(Duration::from_secs(run.ttl))
.with_callback(router)
.add_listener(run.listen, run.port, run.port == 443u16)
.build()
.await?;
server.run().await?;
}
}
info!("ateweb::shutdown");
Ok(())
}
fn ctrl_channel() -> tokio::sync::watch::Receiver<bool> {
let (sender, receiver) = tokio::sync::watch::channel(false);
ctrlc_async::set_handler(move || {
let _ = sender.send(true);
})
.unwrap();
receiver
} | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/wasmer-term/build.rs | wasmer-term/build.rs | extern crate build_deps;
fn main() {
#[cfg(feature = "embedded_files")]
build_deps::rerun_if_changed_paths( "public/bin/*" ).unwrap();
#[cfg(feature = "embedded_files")]
build_deps::rerun_if_changed_paths( "public/*" ).unwrap();
#[cfg(feature = "embedded_files")]
build_deps::rerun_if_changed_paths( "public/bin" ).unwrap();
#[cfg(feature = "embedded_files")]
build_deps::rerun_if_changed_paths( "public" ).unwrap();
} | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/wasmer-term/src/lib.rs | wasmer-term/src/lib.rs | pub mod system;
pub mod utils;
pub mod ws;
pub use wasmer_os;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/wasmer-term/src/system.rs | wasmer-term/src/system.rs | use async_trait::async_trait;
#[cfg(feature = "embedded_files")]
use include_dir::{include_dir, Dir};
use wasmer_os::wasmer::{Module, Store};
use wasmer_os::wasmer::vm::{VMMemory, VMSharedMemory};
use wasmer_os::wasmer_wasi::WasiThreadError;
use std::cell::RefCell;
use std::convert::TryFrom;
use std::future::Future;
use std::io::{self, Read, Write};
use std::path::PathBuf;
use std::pin::Pin;
use std::rc::Rc;
use std::sync::Arc;
use std::sync::Mutex;
use std::time::Duration;
use wasmer_os::api::abi::*;
use wasmer_os::api::AsyncResult;
use wasmer_os::api::SerializationFormat;
use wasmer_os::api::ThreadLocal;
use wasmer_os::api::WebSocketAbi;
use wasmer_os::api::WebGlAbi;
use wasmer_os::err;
use tokio::runtime::Builder;
use tokio::runtime::Runtime;
use tokio::sync::mpsc;
use tokio::sync::watch;
#[allow(unused_imports, dead_code)]
use tracing::{debug, error, info, trace, warn};
use crate::ws::SysWebSocket;
#[cfg(feature = "embedded_files")]
static PUBLIC_DIR: Dir = include_dir!("$CARGO_MANIFEST_DIR/public");
thread_local!(static THREAD_LOCAL: Rc<RefCell<ThreadLocal>> = Rc::new(RefCell::new(ThreadLocal::default())));
#[derive(Debug, Clone)]
pub struct SysSystem {
exit_tx: Arc<watch::Sender<bool>>,
runtime: Arc<Runtime>,
stdio_lock: Arc<Mutex<()>>,
native_files_path: Option<PathBuf>,
}
impl SysSystem {
pub fn new(native_files_path: Option<String>, exit: watch::Sender<bool>) -> SysSystem {
let runtime = Builder::new_multi_thread().enable_all().build().unwrap();
let native_files_path = native_files_path
.map(PathBuf::from);
SysSystem {
exit_tx: Arc::new(exit),
runtime: Arc::new(runtime),
stdio_lock: Arc::new(Mutex::new(())),
native_files_path,
}
}
pub fn new_with_runtime(native_files_path: Option<String>, exit: watch::Sender<bool>, runtime: Arc<Runtime>) -> SysSystem {
let native_files_path = native_files_path
.map(PathBuf::from);
SysSystem {
exit_tx: Arc::new(exit),
runtime,
stdio_lock: Arc::new(Mutex::new(())),
native_files_path,
}
}
pub fn block_on<F: Future>(&self, future: F) -> F::Output {
self.runtime.block_on(async move {
future.await
})
}
}
#[async_trait]
impl SystemAbi for SysSystem {
/// Starts an asynchronous task that will run on a shared worker pool
/// This task must not block the execution or it could cause a deadlock
fn task_shared(
&self,
task: Box<
dyn FnOnce() -> Pin<Box<dyn Future<Output = ()> + Send + 'static>> + Send + 'static,
>,
) {
self.runtime.spawn(async move {
let fut = task();
fut.await
});
}
/// Starts an asynchronous task will will run on a dedicated thread
/// pulled from the worker pool that has a stateful thread local variable
/// It is ok for this task to block execution and any async futures within its scope
fn task_wasm(
&self,
task: Box<dyn FnOnce(Store, Module, Option<VMMemory>) -> Pin<Box<dyn Future<Output = ()> + 'static>> + Send + 'static>,
store: Store,
module: Module,
memory_spawn: SpawnType,
) -> Result<(), WasiThreadError> {
use tracing::error;
let memory: Option<VMMemory> = match memory_spawn {
SpawnType::CreateWithType(mem) => {
Some(
VMSharedMemory::new(&mem.ty, &mem.style)
.map_err(|err| {
error!("failed to create memory - {}", err);
})
.unwrap()
.into()
)
},
SpawnType::NewThread(mem) => Some(mem),
SpawnType::Create => None,
};
let rt = self.runtime.clone();
self.runtime.spawn_blocking(move || {
// Invoke the callback
let fut = task(store, module, memory);
rt.block_on(fut)
});
Ok(())
}
/// Starts an synchronous task will will run on a dedicated thread
/// pulled from the worker pool. It is ok for this task to block execution
/// and any async futures within its scope
fn task_dedicated(
&self,
task: Box<dyn FnOnce() + Send + 'static>,
) {
self.runtime.spawn_blocking(move || {
task();
});
}
/// Starts an asynchronous task will will run on a dedicated thread
/// pulled from the worker pool. It is ok for this task to block execution
/// and any async futures within its scope
fn task_dedicated_async(
&self,
task: Box<dyn FnOnce() -> Pin<Box<dyn Future<Output = ()> + 'static>> + Send + 'static>,
) {
let rt = self.runtime.clone();
self.runtime.spawn_blocking(move || {
let fut = task();
rt.block_on(fut)
});
}
/// Starts an asynchronous task on the current thread. This is useful for
/// launching background work with variables that are not Send.
fn task_local(&self, task: Pin<Box<dyn Future<Output = ()> + 'static>>) {
tokio::task::spawn_local(async move {
task.await;
});
}
/// Puts the current thread to sleep for a fixed number of milliseconds
fn sleep(&self, ms: u128) -> AsyncResult<()> {
let (tx_done, rx_done) = mpsc::channel(1);
self.task_shared(Box::new(move || {
Box::pin(async move {
tokio::time::sleep(Duration::from_millis(ms as u64)).await;
let _ = tx_done.send(()).await;
})
}));
AsyncResult::new(SerializationFormat::Json, rx_done)
}
/// Fetches a data file from the local context of the process
#[allow(unused)]
fn fetch_file(&self, path: &str) -> AsyncResult<Result<Vec<u8>, u32>> {
let mut path = path.to_string();
if path.starts_with("/") {
path = path[1..].to_string();
};
let native_files_path = self.native_files_path.clone();
let (tx_done, rx_done) = mpsc::channel(1);
self.task_dedicated_async(Box::new(move || {
Box::pin(async move {
#[cfg(not(feature = "embedded_files"))]
let mut ret = Err(err::ERR_ENOENT);
#[cfg(feature = "embedded_files")]
let mut ret = PUBLIC_DIR
.get_file(path.as_str())
.map_or(Err(err::ERR_ENOENT), |file| Ok(file.contents().to_vec()));
if ret.is_err() {
if let Some(native_files) = native_files_path.as_ref() {
if path.contains("..") || path.contains("~") || path.contains("//") {
warn!("relative paths are a security risk - {}", path);
ret = Err(err::ERR_EACCES);
} else {
let mut path = path.as_str();
while path.starts_with("/") {
path = &path[1..];
}
let path = native_files.join(path);
// Attempt to open the file
ret = match std::fs::File::open(path.clone()) {
Ok(mut file) => {
let mut data = Vec::new();
file
.read_to_end(&mut data)
.map_err(|err| {
debug!("failed to read local file ({}) - {}", path.to_string_lossy(), err);
err::ERR_EIO
})
.map(|_| data)
},
Err(err) => {
debug!("failed to open local file ({}) - {}", path.to_string_lossy(), err);
Err(err::ERR_EIO)
}
};
}
}
}
let _ = tx_done.send(ret).await;
})
}));
AsyncResult::new(SerializationFormat::Bincode, rx_done)
}
/// Performs a HTTP or HTTPS request to a destination URL
fn reqwest(
&self,
url: &str,
method: &str,
_options: ReqwestOptions,
headers: Vec<(String, String)>,
data: Option<Vec<u8>>,
) -> AsyncResult<Result<ReqwestResponse, u32>> {
let method = method.to_string();
let url = url.to_string();
let (tx_done, rx_done) = mpsc::channel(1);
self.task_shared(Box::new(move || {
Box::pin(async move {
let ret = move || async move {
let method = reqwest::Method::try_from(method.as_str()).map_err(|err| {
debug!("failed to convert method ({}) - {}", method, err);
err::ERR_EIO
})?;
let client = reqwest::ClientBuilder::default().build().map_err(|err| {
debug!("failed to build reqwest client - {}", err);
err::ERR_EIO
})?;
let mut builder = client.request(method, url.as_str());
for (header, val) in headers {
if let Ok(header) =
reqwest::header::HeaderName::from_bytes(header.as_bytes())
{
builder = builder.header(header, val);
} else {
debug!("failed to parse header - {}", header);
}
}
if let Some(data) = data {
builder = builder.body(reqwest::Body::from(data));
}
let request = builder.build().map_err(|err| {
debug!("failed to convert request (url={}) - {}", url.as_str(), err);
err::ERR_EIO
})?;
let response = client.execute(request).await.map_err(|err| {
debug!("failed to execute reqest - {}", err);
err::ERR_EIO
})?;
let status = response.status().as_u16();
let status_text = response.status().as_str().to_string();
let data = response.bytes().await.map_err(|err| {
debug!("failed to read response bytes - {}", err);
err::ERR_EIO
})?;
let data = data.to_vec();
Ok(ReqwestResponse {
pos: 0usize,
ok: true,
status,
status_text,
redirected: false,
data: Some(data),
headers: Vec::new(),
})
};
let ret = ret().await;
let _ = tx_done.send(ret).await;
})
}));
AsyncResult::new(SerializationFormat::Bincode, rx_done)
}
async fn web_socket(&self, url: &str) -> Result<Box<dyn WebSocketAbi>, String> {
return Ok(Box::new(SysWebSocket::new(url).await?));
}
// WebGL is not supported here
async fn webgl(&self) -> Option<Box<dyn WebGlAbi>> {
None
}
}
#[async_trait]
impl ConsoleAbi for SysSystem {
async fn stdout(&self, data: Vec<u8>) {
use raw_tty::GuardMode;
let _guard = self.stdio_lock.lock().unwrap();
if let Ok(mut stdout) = io::stdout().guard_mode() {
stdout.write_all(&data[..]).unwrap();
stdout.flush().unwrap();
}
}
async fn stderr(&self, data: Vec<u8>) {
use raw_tty::GuardMode;
let _guard = self.stdio_lock.lock().unwrap();
if let Ok(mut stderr) = io::stderr().guard_mode() {
stderr.write_all(&data[..]).unwrap();
stderr.flush().unwrap();
}
}
async fn flush(&self) {
use raw_tty::GuardMode;
let _guard = self.stdio_lock.lock().unwrap();
if let Ok(mut stdout) = io::stdout().guard_mode() {
stdout.flush().unwrap();
}
if let Ok(mut stderr) = io::stderr().guard_mode() {
stderr.flush().unwrap();
}
}
/// Writes output to the log
async fn log(&self, text: String) {
use raw_tty::GuardMode;
let _guard = self.stdio_lock.lock().unwrap();
if let Ok(mut stderr) = io::stderr().guard_mode() {
write!(&mut *stderr, "{}\r\n", text).unwrap();
stderr.flush().unwrap();
}
}
/// Gets the number of columns and rows in the terminal
async fn console_rect(&self) -> ConsoleRect {
if let Some((w, h)) = term_size::dimensions() {
ConsoleRect {
cols: w as u32,
rows: h as u32,
}
} else {
ConsoleRect { cols: 80, rows: 25 }
}
}
/// Clears the terminal
async fn cls(&self) {
print!("{}[2J", 27 as char);
}
async fn exit(&self) {
let _ = self.exit_tx.send(true);
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/wasmer-term/src/utils.rs | wasmer-term/src/utils.rs | #![allow(unused_imports)]
use tracing::metadata::LevelFilter;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_subscriber::fmt::SubscriberBuilder;
use tracing_subscriber::EnvFilter;
#[cfg(unix)]
use {
libc::{
c_int, tcsetattr, termios, ECHO, ECHONL, ICANON, ICRNL, IEXTEN, ISIG, IXON, OPOST, TCSANOW,
},
std::mem,
std::os::unix::io::AsRawFd,
};
pub fn log_init(verbose: i32, debug: bool) {
let mut log_level = match verbose {
0 => None,
1 => Some(LevelFilter::WARN),
2 => Some(LevelFilter::INFO),
3 => Some(LevelFilter::DEBUG),
4 => Some(LevelFilter::TRACE),
_ => None,
};
if debug {
log_level = Some(LevelFilter::DEBUG);
}
if let Some(log_level) = log_level {
SubscriberBuilder::default()
.with_max_level(log_level)
.init();
} else {
SubscriberBuilder::default()
.with_env_filter(EnvFilter::from_default_env())
.init();
}
}
#[cfg(unix)]
pub fn io_result(ret: libc::c_int) -> std::io::Result<()> {
match ret {
0 => Ok(()),
_ => Err(std::io::Error::last_os_error()),
}
}
#[cfg(unix)]
pub fn set_mode_no_echo() -> std::fs::File {
let tty = std::fs::File::open("/dev/tty").unwrap();
let fd = tty.as_raw_fd();
let mut termios = mem::MaybeUninit::<termios>::uninit();
io_result(unsafe { ::libc::tcgetattr(fd, termios.as_mut_ptr()) }).unwrap();
let mut termios = unsafe { termios.assume_init() };
termios.c_lflag &= !ECHO;
termios.c_lflag &= !ICANON;
termios.c_lflag &= !ISIG;
termios.c_lflag &= !IXON;
termios.c_lflag &= !IEXTEN;
termios.c_lflag &= !ICRNL;
termios.c_lflag &= !OPOST;
unsafe { tcsetattr(fd, TCSANOW, &termios) };
tty
}
#[cfg(unix)]
pub fn set_mode_echo() -> std::fs::File {
let tty = std::fs::File::open("/dev/tty").unwrap();
let fd = tty.as_raw_fd();
let mut termios = mem::MaybeUninit::<termios>::uninit();
io_result(unsafe { ::libc::tcgetattr(fd, termios.as_mut_ptr()) }).unwrap();
let mut termios = unsafe { termios.assume_init() };
termios.c_lflag |= ECHO;
termios.c_lflag |= ICANON;
termios.c_lflag |= ISIG;
termios.c_lflag |= IXON;
termios.c_lflag |= IEXTEN;
termios.c_lflag |= ICRNL;
termios.c_lflag |= OPOST;
unsafe { tcsetattr(fd, TCSANOW, &termios) };
tty
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/wasmer-term/src/ws.rs | wasmer-term/src/ws.rs | use async_trait::async_trait;
use futures::stream::SplitSink;
use futures::stream::SplitStream;
use futures::SinkExt;
use futures_util::StreamExt;
use std::sync::Arc;
use std::sync::Mutex;
use wasmer_os::api::System;
use wasmer_os::api::SystemAbiExt;
use wasmer_os::api::WebSocketAbi;
use tokio::net::TcpStream;
use tokio_tungstenite::{connect_async, tungstenite::protocol::Message};
use tokio_tungstenite::{MaybeTlsStream, WebSocketStream};
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
pub struct SysWebSocket {
system: System,
sink: SplitSink<WebSocketStream<MaybeTlsStream<TcpStream>>, Message>,
stream: Option<SplitStream<WebSocketStream<MaybeTlsStream<TcpStream>>>>,
on_close: Arc<Mutex<Option<Box<dyn Fn() + Send + 'static>>>>,
}
impl SysWebSocket {
pub async fn new(url: &str) -> Result<SysWebSocket, String> {
let url = url::Url::parse(url)
.map_err(|err| err.to_string())?;
let (ws_stream, _) = connect_async(url).await
.map_err(|err| format!("failed to connect - {}", err))?;
let (sink, stream) = ws_stream.split();
Ok(
SysWebSocket {
system: System::default(),
sink,
stream: Some(stream),
on_close: Arc::new(Mutex::new(None)),
}
)
}
}
#[async_trait]
impl WebSocketAbi for SysWebSocket {
fn set_onopen(&mut self, mut callback: Box<dyn FnMut()>) {
// We instantly notify that we are open
callback();
}
fn set_onclose(&mut self, callback: Box<dyn Fn() + Send + 'static>) {
let mut guard = self.on_close.lock().unwrap();
guard.replace(callback);
}
fn set_onmessage(&mut self, callback: Box<dyn Fn(Vec<u8>) + Send + 'static>) {
if let Some(mut stream) = self.stream.take() {
let on_close = self.on_close.clone();
self.system.fork_shared(move || async move {
while let Some(msg) = stream.next().await {
match msg {
Ok(Message::Binary(msg)) => {
callback(msg);
}
a => {
debug!("received invalid msg: {:?}", a);
}
}
}
let on_close = on_close.lock().unwrap();
if let Some(on_close) = on_close.as_ref() {
on_close();
}
});
}
}
async fn send(&mut self, data: Vec<u8>) -> Result<(), String> {
self.sink
.send(Message::binary(data))
.await
.map_err(|err| err.to_string())?;
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/wasmer-term/src/bin/wasmer-terminal.rs | wasmer-term/src/bin/wasmer-terminal.rs | #![allow(unused_imports)]
use clap::Parser;
use raw_tty::GuardMode;
use std::io::Read;
use std::sync::Arc;
use wasmer_os::api::*;
use wasmer_os::console::Console;
use tokio::io;
use tokio::select;
use tokio::sync::watch;
use wasmer_term::wasmer_os::bin_factory::CachedCompiledModules;
use wasmer_term::utils::*;
use tracing::{debug, error, info, warn};
#[cfg(unix)]
use {
libc::{c_int, tcsetattr, termios, ECHO, ECHONL, ICANON, TCSANOW},
std::mem,
std::os::unix::io::AsRawFd,
};
#[allow(dead_code)]
#[derive(Parser)]
#[clap(version = "1.0", author = "Wasmer Inc <info@wasmer.io>")]
struct Opts {
/// Sets the level of log verbosity, can be used multiple times
#[clap(short, long, parse(from_occurrences))]
pub verbose: i32,
/// Logs debug info to the console
#[clap(short, long)]
pub debug: bool,
/// Determines which compiler to use
#[clap(short, long, default_value = "default")]
pub compiler: wasmer_os::eval::Compiler,
/// Location where cached compiled modules are stored
#[clap(long, default_value = "~/wasmer/compiled")]
pub compiler_cache_path: String,
/// Uses a local directory for native files rather than the published ate chain
#[clap(long)]
pub native_files_path: Option<String>,
/// Runs a particular command after loading
#[clap(index = 1)]
pub run: Option<String>,
}
fn main() -> Result<(), Box<dyn std::error::Error>> {
let opts = Opts::parse();
// Set the panic hook that will terminate the process
let mut tty = set_mode_no_echo();
let old_panic_hook = std::panic::take_hook();
std::panic::set_hook(Box::new(move |panic_info| {
set_mode_echo();
old_panic_hook(panic_info);
std::process::exit(1);
}));
// Initialize the logging engine
wasmer_term::utils::log_init(opts.verbose, opts.debug);
// Set the system
let (tx_exit, mut rx_exit) = watch::channel(false);
let sys = wasmer_term::system::SysSystem::new(opts.native_files_path, tx_exit);
let con = Arc::new(sys.clone());
wasmer_os::api::set_system_abi(sys.clone());
let system = System::default();
// Read keys in a dedicated thread
let (tx_data, mut rx_data) = tokio::sync::mpsc::channel(wasmer_os::common::MAX_MPSC);
system.fork_dedicated_async(move || async move {
let mut buf = [0u8; 1024];
while let Ok(read) = tty.read(&mut buf) {
let buf = &buf[..read];
unsafe {
let _ = tx_data
.send(String::from_utf8_unchecked(buf.to_vec()))
.await;
}
}
});
// Build the compiled modules
let compiled_modules = Arc::new(CachedCompiledModules::new(Some(opts.compiler_cache_path)));
// If a command is passed in then pass it into the console
let location = if let Some(run) = opts.run {
format!("wss://localhost/?no_welcome&init={}", run)
} else {
format!("wss://localhost/")
};
// Now we run the actual console under the runtime
let fs = wasmer_os::fs::create_root_fs(None);
let con = con.clone();
let compiler = opts.compiler;
sys.block_on(async move {
let user_agent = "noagent".to_string();
let mut console = Console::new(
location,
user_agent,
compiler,
con.clone(),
None,
fs,
compiled_modules,
);
console.init().await;
// Process data until the console closes
while *rx_exit.borrow() == false {
select! {
data = rx_data.recv() => {
if let Some(data) = data {
console.on_data(data).await;
} else {
break;
}
}
_ = rx_exit.changed() => {
}
}
}
// Clear the screen
let _ = con.stdout("\r\n".to_string().into_bytes()).await;
});
// We are done
set_mode_echo();
Ok(())
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/misc.rs | lib/src/misc.rs | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false | |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/event.rs | lib/src/event.rs | use bytes::Bytes;
use crate::crypto::{AteHash, DoubleHash};
use super::error::*;
use super::header::*;
use super::meta::*;
use super::spec::*;
pub use super::spec::LazyData;
/// Represents the raw bytes that can describe what the event is
#[derive(Debug, Clone)]
pub struct EventHeaderRaw {
pub meta_hash: super::crypto::AteHash,
pub meta_bytes: Bytes,
pub data_hash: Option<super::crypto::AteHash>,
pub data_size: usize,
pub event_hash: super::crypto::AteHash,
pub format: MessageFormat,
}
impl std::hash::Hash for EventHeaderRaw {
fn hash<H: std::hash::Hasher>(&self, state: &mut H) {
self.event_hash.hash(state);
}
}
pub(crate) fn event_sig_hash(
meta_hash: &super::crypto::AteHash,
data_hash: &Option<super::crypto::AteHash>,
) -> AteHash {
match data_hash {
Some(d) => DoubleHash::from_hashes(&meta_hash, d).hash(),
None => meta_hash.clone(),
}
}
impl EventHeaderRaw {
pub(crate) fn new(
meta_hash: super::crypto::AteHash,
meta_bytes: Bytes,
data_hash: Option<super::crypto::AteHash>,
data_size: usize,
format: MessageFormat,
) -> EventHeaderRaw {
EventHeaderRaw {
event_hash: event_sig_hash(&meta_hash, &data_hash),
meta_hash,
meta_bytes,
data_hash,
data_size,
format,
}
}
pub fn as_header(&self) -> Result<EventHeader, SerializationError> {
Ok(EventHeader {
raw: self.clone(),
meta: self.format.meta.deserialize_ref(&self.meta_bytes)
.map_err(SerializationError::from)?,
})
}
}
/// Describes what the event is and includes a structured object to represent it
#[derive(Debug, Clone)]
pub struct EventHeader {
pub raw: EventHeaderRaw,
pub meta: Metadata,
}
impl EventHeader {
pub fn hash(&self) -> AteHash {
self.raw.event_hash
}
pub fn is_empty(&self) -> bool {
if self.meta.is_empty() == false {
return false;
}
if self.raw.data_size > 0 {
return false;
}
return true;
}
}
#[derive(Debug, Clone)]
pub enum MessageBytesRef<'a>
{
Some(&'a Bytes),
LazySome(&'a LazyData),
None,
}
impl<'a> MessageBytesRef<'a>
{
pub const fn as_some(self) -> Option<&'a Bytes> {
match self {
MessageBytesRef::Some(a) => Some(a),
_ => None
}
}
}
#[derive(Debug, Clone)]
pub enum MessageBytes
{
Some(Bytes),
LazySome(LazyData),
None,
}
impl MessageBytes
{
pub fn is_none(&self) -> bool {
self.is_some() == false
}
pub fn is_some(&self) -> bool {
match self {
MessageBytes::Some(_) => true,
MessageBytes::LazySome(_) => true,
MessageBytes::None => false,
}
}
pub fn is_lazy(&self) -> bool {
if let MessageBytes::LazySome(_) = self {
true
} else {
false
}
}
pub const fn as_ref<'a>(&'a self) -> MessageBytesRef<'a> {
match *self {
MessageBytes::Some(ref a) => MessageBytesRef::Some(a),
MessageBytes::LazySome(ref a) => MessageBytesRef::LazySome(a),
MessageBytes::None => MessageBytesRef::None,
}
}
pub const fn as_option<'a>(&'a self) -> Option<&'a Bytes> {
match *self {
MessageBytes::Some(ref a) => Some(a),
_ => None
}
}
pub fn to_option(self) -> Option<Bytes> {
match self {
MessageBytes::Some(a) => Some(a),
_ => None
}
}
pub fn to_log_data(self) -> LogData {
match self {
MessageBytes::Some(a) => LogData::Some(a.to_vec()),
MessageBytes::LazySome(l) => LogData::LazySome(l),
MessageBytes::None => LogData::None,
}
}
}
/// Represents an event that has not yet been stored anywhere
#[derive(Debug, Clone)]
pub struct EventWeakData
where
Self: Send + Sync,
{
pub meta: Metadata,
pub data_bytes: MessageBytes,
pub format: MessageFormat,
}
impl std::fmt::Display for EventWeakData {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let MessageBytes::Some(data) = &self.data_bytes {
write!(f, "format={}, meta={}, data={}", self.format, self.meta, data.len())
} else if let MessageBytes::LazySome(lazy) = &self.data_bytes {
write!(f, "format={}, meta={}, data={}", self.format, self.meta, lazy.len)
} else {
write!(f, "format={}, meta={}", self.format, self.meta)
}
}
}
impl EventWeakData {
pub fn new(key: PrimaryKey, data: Bytes, format: MessageFormat) -> EventWeakData {
EventWeakData {
meta: Metadata::for_data(key),
data_bytes: MessageBytes::Some(data),
format,
}
}
pub fn barebone(format: MessageFormat) -> EventWeakData {
EventWeakData {
meta: Metadata::default(),
data_bytes: MessageBytes::None,
format,
}
}
pub fn as_header_raw(&self) -> Result<EventHeaderRaw, SerializationError> {
let data_hash = match &self.data_bytes {
MessageBytes::Some(d) => Some(AteHash::from_bytes(&d[..])),
MessageBytes::LazySome(lazy) => Some(lazy.hash),
MessageBytes::None => None,
};
let data_size = match &self.data_bytes {
MessageBytes::Some(d) => d.len() as usize,
MessageBytes::LazySome(lazy) => lazy.len,
MessageBytes::None => 0,
};
let meta_bytes = Bytes::from(self.format.meta.serialize(&self.meta)?);
let meta_hash = AteHash::from_bytes(&meta_bytes[..]);
Ok(EventHeaderRaw::new(
meta_hash,
meta_bytes,
data_hash,
data_size,
self.format,
))
}
pub fn as_header(&self) -> Result<EventHeader, SerializationError> {
Ok(EventHeader {
raw: self.as_header_raw()?,
meta: self.meta.clone(),
})
}
pub fn with_core_metadata(mut self, core: CoreMetadata) -> Self {
self.meta.core.push(core);
self
}
}
/// Represents an event that has not yet been stored anywhere
#[derive(Debug, Clone)]
pub struct EventStrongData
where
Self: Send + Sync,
{
pub meta: Metadata,
pub data_bytes: Option<Bytes>,
pub format: MessageFormat,
}
impl std::fmt::Display for EventStrongData {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
if let Some(data) = &self.data_bytes {
write!(f, "format={}, meta={}, data={}", self.format, self.meta, data.len())
} else {
write!(f, "format={}, meta={}", self.format, self.meta)
}
}
}
impl EventStrongData {
pub fn new(key: PrimaryKey, data: Bytes, format: MessageFormat) -> EventStrongData {
EventStrongData {
meta: Metadata::for_data(key),
data_bytes: Some(data),
format,
}
}
pub fn barebone(format: MessageFormat) -> EventStrongData {
EventStrongData {
meta: Metadata::default(),
data_bytes: None,
format,
}
}
pub fn with_core_metadata(mut self, core: CoreMetadata) -> Self {
self.meta.core.push(core);
self
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/prelude.rs | lib/src/prelude.rs | pub use crate::compact::CompactMode;
pub use crate::conf::ConfAte as AteConfig;
pub use crate::conf::ConfAte;
pub use crate::conf::ConfMesh;
pub use crate::conf::ConfiguredFor;
pub use crate::error::*;
pub use crate::header::PrimaryKey;
pub use crate::comms::Metrics as ChainMetrics;
pub use crate::comms::Throttle as ChainThrottle;
pub use crate::conf::MeshConnectAddr;
pub use crate::crypto::AteHash;
pub use crate::crypto::DerivedEncryptKey;
pub use crate::crypto::EncryptKey;
pub use crate::crypto::EncryptedSecureData;
pub use crate::crypto::KeySize;
pub use crate::crypto::MultiEncryptedSecureData;
pub use crate::crypto::PrivateEncryptKey;
pub use crate::crypto::PrivateSignKey;
pub use crate::crypto::PublicEncryptKey;
pub use crate::crypto::PublicEncryptedSecureData;
pub use crate::crypto::PublicSignKey;
pub use crate::crypto::SignedProtectedData;
pub use crate::meta::ReadOption;
pub use crate::meta::WriteOption;
#[cfg(feature = "enable_server")]
pub use crate::flow::all_ethereal_centralized;
#[cfg(feature = "enable_server")]
pub use crate::flow::all_ethereal_centralized_with_root_key;
#[cfg(feature = "enable_server")]
pub use crate::flow::all_ethereal_distributed;
#[cfg(feature = "enable_server")]
pub use crate::flow::all_ethereal_distributed_with_root_key;
#[cfg(feature = "enable_server")]
pub use crate::flow::all_persistent_and_centralized;
#[cfg(feature = "enable_server")]
pub use crate::flow::all_persistent_and_centralized_with_root_key;
#[cfg(feature = "enable_server")]
pub use crate::flow::all_persistent_and_distributed;
#[cfg(feature = "enable_server")]
pub use crate::flow::all_persistent_and_distributed_with_root_key;
#[cfg(feature = "enable_server")]
pub use crate::flow::OpenAction;
#[cfg(feature = "enable_server")]
pub use crate::flow::OpenFlow;
pub use crate::utils::chain_key_16hex;
pub use crate::utils::chain_key_4hex;
pub use crate::chain::Chain;
pub use crate::conf::ChainBuilder;
pub use crate::mesh::ChainGuard;
pub use crate::trust::ChainKey;
pub use crate::trust::ChainRef;
pub use crate::dio::Bus;
pub use crate::dio::BusEvent;
pub use crate::dio::TryBusEvent;
pub use crate::dio::Dao;
pub use crate::dio::DaoAuthGuard;
pub use crate::dio::DaoChild;
pub use crate::dio::DaoForeign;
pub use crate::dio::DaoMap;
pub use crate::dio::DaoMut;
pub use crate::dio::DaoMutGuard;
pub use crate::dio::DaoMutGuardOwned;
pub use crate::dio::DaoObj;
pub use crate::dio::DaoVec;
pub use crate::dio::DaoWeak;
pub use crate::dio::Dio;
pub use crate::dio::DioMut;
pub use crate::dio::DioSessionGuard;
pub use crate::dio::DioSessionGuardMut;
pub use crate::multi::ChainMultiUser;
pub use crate::session::AteGroup;
pub use crate::session::AteGroupRole;
pub use crate::session::AteRolePurpose;
pub use crate::session::AteSession;
pub use crate::session::AteSessionGroup;
pub use crate::session::AteSessionInner;
pub use crate::session::AteSessionKeyCategory;
pub use crate::session::AteSessionProperty;
pub use crate::session::AteSessionSudo;
pub use crate::session::AteSessionType;
pub use crate::session::AteSessionUser;
pub use crate::single::ChainSingleUser;
pub use crate::spec::SerializationFormat;
pub use crate::transaction::TransactionScope;
pub use crate::service::ServiceHandler;
pub use crate::comms::CertificateValidation;
pub use crate::comms::NodeId;
pub use crate::comms::StreamProtocol;
pub use crate::conf::MeshAddress;
pub use crate::engine::TaskEngine;
pub use crate::mesh::BackupMode;
pub use crate::mesh::RecoveryMode;
pub use crate::mesh::Registry;
pub use crate::spec::CentralizedRole;
pub use crate::spec::TrustMode;
pub use std::{
net::{IpAddr, Ipv4Addr, Ipv6Addr},
str::FromStr,
};
pub use crate::mesh::add_global_certificate;
#[cfg(feature = "enable_client")]
pub use crate::mesh::create_client;
#[cfg(feature = "enable_server")]
pub use crate::mesh::create_ethereal_centralized_server;
#[cfg(feature = "enable_server")]
pub use crate::mesh::create_ethereal_distributed_server;
#[cfg(feature = "enable_server")]
pub use crate::mesh::create_persistent_centralized_server;
#[cfg(feature = "enable_client")]
pub use crate::mesh::create_persistent_client;
#[cfg(feature = "enable_server")]
pub use crate::mesh::create_persistent_distributed_server;
#[cfg(feature = "enable_server")]
pub use crate::mesh::create_server;
#[cfg(feature = "enable_client")]
pub use crate::mesh::create_temporal_client;
pub use crate::mesh::set_comm_factory;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/engine.rs | lib/src/engine.rs | #![allow(unused_imports)]
use cooked_waker::*;
use fxhash::FxHashMap;
use once_cell::sync::Lazy;
use pin_project_lite::pin_project;
use std::cell::RefCell;
use std::collections::VecDeque;
use std::future::Future;
use std::ops::DerefMut;
use std::pin::Pin;
use std::sync::atomic::*;
use std::sync::Arc;
use std::sync::Mutex;
use std::task::*;
use std::thread::AccessError;
use std::time::Duration;
use std::time::Instant;
use tokio::sync::broadcast;
use tokio::sync::oneshot;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
pub struct TaskEngine {}
impl TaskEngine {
pub fn spawn<T>(task: T) -> tokio::task::JoinHandle<T::Output>
where
T: Future + Send + 'static,
T::Output: Send + 'static,
{
tokio::spawn(task)
}
pub async fn spawn_blocking<F, R>(f: F) -> R
where
F: FnOnce() -> R + Send + 'static,
R: Send + 'static,
{
let ret = tokio::task::spawn_blocking(f).await;
ret.unwrap()
}
}
#[cfg(target_family = "wasm")]
pub async fn sleep(duration: Duration) {
wasmer_bus_time::prelude::sleep(duration).await;
}
#[cfg(target_family = "wasm")]
pub async fn timeout<T>(
duration: Duration,
future: T,
) -> Result<T::Output, wasmer_bus_time::prelude::Elapsed>
where
T: Future,
{
wasmer_bus_time::prelude::timeout(duration, future).await
}
#[cfg(not(target_family = "wasm"))]
pub async fn sleep(duration: Duration) {
tokio::time::sleep(duration).await;
}
#[cfg(not(target_family = "wasm"))]
pub fn timeout<T>(duration: Duration, future: T) -> tokio::time::Timeout<T>
where
T: Future,
{
tokio::time::timeout(duration, future)
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/lib.rs | lib/src/lib.rs | #![cfg_attr(
not(debug_assertions),
allow(dead_code, unused_imports, unused_variables)
)]
#![warn(unused_extern_crates)]
/// You can change the log file format with these features
/// - feature = "use_version1"
/// - feature = "use_version2"
pub const LOG_VERSION: spec::EventVersion = spec::EventVersion::V2;
pub mod anti_replay;
pub mod chain;
pub mod comms;
pub mod compact;
pub mod conf;
pub mod dio;
#[cfg(feature = "enable_dns")]
pub mod dns;
pub mod engine;
pub mod error;
pub mod event;
#[cfg(feature = "enable_server")]
pub mod flow;
pub mod header;
pub mod index;
pub mod lint;
pub mod loader;
pub mod mesh;
pub mod meta;
pub mod multi;
pub mod pipe;
pub mod plugin;
pub mod prelude;
pub mod redo;
pub mod service;
pub mod session;
pub mod signature;
pub mod single;
pub mod sink;
pub mod spec;
pub mod time;
pub mod transaction;
pub mod transform;
pub mod tree;
pub mod trust;
pub mod utils;
pub mod validator;
pub use ate_crypto::crypto;
pub use ate_crypto::utils::log_init;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/index.rs | lib/src/index.rs | use fxhash::FxHashMap;
use fxhash::FxHashSet;
use multimap::MultiMap;
use super::error::*;
use super::event::*;
use super::header::*;
use super::meta::*;
use super::sink::*;
use super::time::*;
pub trait EventIndexer
where
Self: EventSink + Send + Sync + std::fmt::Debug,
{
fn rebuild(&mut self, _data: &Vec<EventHeader>) -> Result<(), SinkError> {
Ok(())
}
fn clone_indexer(&self) -> Box<dyn EventIndexer>;
}
#[derive(Debug, Copy, Clone)]
pub struct EventLeaf {
pub record: super::crypto::AteHash,
pub created: u64,
pub updated: u64,
}
#[derive(Default, Debug)]
pub(crate) struct BinaryTreeIndexer {
roots: FxHashSet<PrimaryKey>,
primary: FxHashMap<PrimaryKey, EventLeaf>,
secondary: MultiMap<MetaCollection, PrimaryKey>,
parents: FxHashMap<PrimaryKey, MetaParent>,
uploads: FxHashMap<ChainTimestamp, MetaDelayedUpload>,
}
impl BinaryTreeIndexer {
#[allow(dead_code)]
pub(crate) fn contains_key(&self, key: &PrimaryKey) -> bool {
self.primary.contains_key(key)
}
#[allow(dead_code)]
pub(crate) fn count(&self) -> usize {
self.primary.iter().count()
}
#[allow(dead_code)]
pub(crate) fn feed(&mut self, entry: &EventHeader) {
for core in entry.meta.core.iter() {
match core {
CoreMetadata::Tombstone(key) => {
self.roots.remove(key);
self.primary.remove(&key);
if let Some(tree) = self.parents.remove(&key) {
if let Some(vec) = self.secondary.get_vec_mut(&tree.vec) {
vec.retain(|x| *x != *key);
}
}
return;
}
_ => {}
}
}
let has_parent = entry.meta.core
.iter()
.any(|a| if let CoreMetadata::Parent(..) = a { true } else { false });
for core in entry.meta.core.iter() {
match core {
CoreMetadata::Data(key) => {
if entry.raw.data_hash.is_none() {
continue;
}
if has_parent {
self.roots.insert(key.clone());
}
let when = entry.meta.get_timestamp();
let v = self.primary.entry(key.clone()).or_insert(EventLeaf {
record: crate::crypto::AteHash { val: [0; 16] },
created: match when {
Some(t) => t.time_since_epoch_ms,
None => 0,
},
updated: 0,
});
v.record = entry.raw.event_hash.clone();
v.updated = match when {
Some(t) => t.time_since_epoch_ms,
None => 0,
};
}
CoreMetadata::Parent(parent) => {
if let Some(key) = entry.meta.get_data_key() {
if let Some(parent) = self.parents.remove(&key) {
if let Some(vec) = self.secondary.get_vec_mut(&parent.vec) {
vec.retain(|x| *x != key);
}
}
self.parents.insert(key.clone(), parent.clone());
let vec = parent.vec.clone();
let exists = match self.secondary.get_vec(&vec) {
Some(a) => a.contains(&key),
None => false,
};
if exists == false {
self.secondary.insert(vec, key);
}
}
}
CoreMetadata::DelayedUpload(upload) => {
self.uploads.insert(upload.from, upload.clone());
}
_ => {}
}
}
}
pub(crate) fn lookup_primary(&self, key: &PrimaryKey) -> Option<EventLeaf> {
match self.primary.get(key) {
None => None,
Some(a) => Some(a.clone()),
}
}
pub(crate) fn lookup_parent(&self, key: &PrimaryKey) -> Option<MetaParent> {
match self.parents.get(key) {
None => None,
Some(a) => Some(a.clone()),
}
}
pub(crate) fn lookup_secondary(&self, key: &MetaCollection) -> Option<Vec<EventLeaf>> {
match self.secondary.get_vec(key) {
Some(vec) => Some(
vec.iter()
.map(|a| a.clone())
.filter_map(|a| self.primary.get(&a))
.map(|a| a.clone())
.collect::<Vec<_>>(),
),
None => None,
}
}
pub(crate) fn lookup_secondary_raw(&self, key: &MetaCollection) -> Option<Vec<PrimaryKey>> {
match self.secondary.get_vec(key) {
Some(vec) => Some(vec.iter().map(|a| a.clone()).collect::<Vec<_>>()),
None => None,
}
}
pub(crate) fn roots_raw(&self) -> Vec<PrimaryKey> {
self.roots
.iter()
.map(|a| a.clone())
.collect()
}
pub(crate) fn get_delayed_upload(&self, from: ChainTimestamp) -> Option<MetaDelayedUpload> {
self.uploads.get(&from).map(|e| e.clone())
}
pub(crate) fn get_pending_uploads(&self) -> Vec<MetaDelayedUpload> {
self.uploads
.values()
.filter(|d| d.complete == false)
.map(|d| d.clone())
.collect::<Vec<_>>()
}
pub(crate) fn all_keys(&self) -> impl Iterator<Item = &PrimaryKey> {
self.primary.keys()
}
}
#[derive(Default, Debug)]
pub struct UselessIndexer {}
impl EventSink for UselessIndexer {}
impl EventIndexer for UselessIndexer {
fn clone_indexer(&self) -> Box<dyn EventIndexer> {
Box::new(UselessIndexer::default())
}
fn rebuild(&mut self, _headers: &Vec<EventHeader>) -> Result<(), SinkError> {
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/signature.rs | lib/src/signature.rs | #[allow(unused_imports)]
use crate::crypto::{AteHash, DoubleHash, EncryptedPrivateKey, PublicSignKey};
#[allow(unused_imports)]
use crate::session::{AteSession, AteSessionKeyCategory, AteSessionProperty};
use crate::spec::*;
use crate::utils::vec_deserialize;
use crate::utils::vec_serialize;
use error_chain::bail;
use fxhash::FxHashMap;
use multimap::MultiMap;
#[allow(unused_imports)]
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::ops::Deref;
use std::sync::Arc;
#[allow(unused_imports)]
use tracing::{debug, error, info, trace, warn};
use super::lint::EventMetadataLinter;
#[allow(unused_imports)]
use super::sink::EventSink;
use super::transform::EventDataTransformer;
use super::validator::EventValidator;
use super::error::*;
use super::event::*;
use super::lint::*;
use super::meta::*;
use super::plugin::*;
use super::transaction::*;
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct MetaSignature {
pub hashes: Vec<AteHash>,
#[serde(serialize_with = "vec_serialize", deserialize_with = "vec_deserialize")]
pub signature: Vec<u8>,
pub public_key_hash: AteHash,
}
impl std::fmt::Display for MetaSignature {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for hash in self.hashes.iter() {
if first {
first = false;
} else {
write!(f, "+")?;
}
write!(f, "{}", hash)?;
}
write!(f, "={}", self.public_key_hash)
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct MetaSignWith {
pub keys: Vec<AteHash>,
}
impl std::fmt::Display for MetaSignWith {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut first = true;
for key in self.keys.iter() {
if first {
first = false;
} else {
write!(f, ",")?;
}
write!(f, "{}", key)?;
}
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct SignaturePlugin {
pk: FxHashMap<AteHash, PublicSignKey>,
sigs: MultiMap<AteHash, AteHash>,
integrity: TrustMode,
}
impl SignaturePlugin {
pub fn new() -> SignaturePlugin {
SignaturePlugin {
pk: FxHashMap::default(),
sigs: MultiMap::default(),
integrity: TrustMode::Distributed,
}
}
pub fn get_verified_signatures(&self, data_hash: &AteHash) -> Option<&Vec<AteHash>> {
match self.sigs.get_vec(data_hash) {
Some(a) => Some(a),
None => None,
}
}
#[allow(dead_code)]
pub fn has_public_key(&self, key_hash: &AteHash) -> bool {
self.pk.contains_key(&key_hash)
}
}
impl EventSink for SignaturePlugin {
fn feed(
&mut self,
header: &EventHeader,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<(), SinkError> {
// Store the public key and encrypt private keys into the index
for m in header.meta.core.iter() {
match m {
CoreMetadata::PublicKey(pk) => {
self.pk.insert(pk.hash(), pk.clone());
}
_ => {}
}
}
// The signatures need to be validated after the public keys are processed or
// there will be a race condition
for m in header.meta.core.iter() {
match m {
CoreMetadata::Signature(sig) => {
if self.integrity == TrustMode::Distributed
|| self.integrity == TrustMode::Centralized(CentralizedRole::Server)
{
let pk = match self.pk.get(&sig.public_key_hash) {
Some(pk) => pk,
None => {
trace!("signature missing public key: {}", sig.public_key_hash);
bail!(SinkErrorKind::MissingPublicKey(sig.public_key_hash))
}
};
let hashes_bytes: Vec<u8> = sig
.hashes
.iter()
.flat_map(|h| Vec::from(h.val).into_iter())
.collect();
let hash_of_hashes = AteHash::from_bytes(&hashes_bytes[..]);
let result = match pk.verify(&hash_of_hashes.val[..], &sig.signature[..]) {
Ok(r) => r,
Err(err) => {
trace!("signature is invalid: {}", err);
bail!(SinkErrorKind::InvalidSignature(
sig.public_key_hash,
Some(err)
))
}
};
if result == false {
trace!("signature failed validate - {}", sig.public_key_hash);
bail!(SinkErrorKind::InvalidSignature(sig.public_key_hash, None));
}
}
// Add all the validated hashes
for sig_hash in &sig.hashes {
self.sigs.insert(sig_hash.clone(), sig.public_key_hash);
}
// If we in a conversation and integrity is centrally managed then update the
// conversation so that we record that a signature was validated for a hash
// which is clear proof of ownershp
if self.integrity.is_centralized() {
if let Some(conversation) = &conversation {
if let Some(conv_id) = conversation.id.read() {
if sig.hashes.contains(conv_id.deref()) {
let mut lock = conversation.signatures.write().unwrap();
lock.insert(sig.public_key_hash);
}
}
}
}
}
_ => {}
}
}
Ok(())
}
fn reset(&mut self) {
self.pk.clear();
self.sigs.clear();
}
}
impl EventValidator for SignaturePlugin {
fn clone_validator(&self) -> Box<dyn EventValidator> {
Box::new(self.clone())
}
fn set_integrity_mode(&mut self, mode: TrustMode) {
self.integrity = mode;
}
fn validator_name(&self) -> &str {
"signature-validator"
}
}
impl EventMetadataLinter for SignaturePlugin {
fn clone_linter(&self) -> Box<dyn EventMetadataLinter> {
Box::new(self.clone())
}
fn metadata_lint_many<'a>(
&self,
raw: &Vec<LintData<'a>>,
session: &'_ dyn AteSession,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<Vec<CoreMetadata>, LintError> {
// If there is no data then we are already done
let mut ret = Vec::new();
if raw.len() <= 0 {
return Ok(ret);
}
// Build a list of all the authorizations we need to write
let mut auths = raw
.iter()
.filter_map(|e| e.data.meta.get_sign_with())
.flat_map(|a| a.keys.iter())
.collect::<Vec<_>>();
auths.sort();
auths.dedup();
// Check the fast path... if we are under centralized integrity and the destination
// has already got proof that we own the authentication key then we are done
if self.integrity.is_centralized() {
if let Some(conversation) = &conversation {
let lock = conversation.signatures.read().unwrap();
auths.retain(|h| lock.contains(h) == false);
}
}
// Loop through each unique write key that we need to write with
for auth in auths.into_iter() {
// Find the session key for it (if one does not exist we have a problem!)
let sk = match session
.write_keys(AteSessionKeyCategory::AllKeys)
.filter(|k| k.hash() == *auth)
.next()
{
Some(sk) => sk,
None => bail!(LintErrorKind::MissingWriteKey(auth.clone())),
};
// Compute a hash of the hashesevt
let mut data_hashes = Vec::new();
if self.integrity.is_centralized() {
if let Some(conversation) = &conversation {
if let Some(conv_id) = conversation.id.read() {
let conv_id = conv_id.deref().clone();
data_hashes.push(conv_id);
}
}
}
for e in raw.iter() {
if let Some(a) = e.data.meta.get_sign_with() {
if a.keys.contains(&auth) == true {
data_hashes.push(e.header.raw.event_hash);
}
}
}
let hashes_bytes = data_hashes
.iter()
.flat_map(|h| Vec::from(h.clone().val).into_iter())
.collect::<Vec<_>>();
let hash_of_hashes = AteHash::from_bytes(&hashes_bytes[..]);
// Add the public key side into the chain-of-trust if it is not present yet
if self.pk.get(&auth).is_none() || self.integrity.is_centralized() {
ret.push(CoreMetadata::PublicKey(sk.as_public_key().clone()));
};
// Next we need to decrypt the private key and use it to sign the hashes
let sig = sk.sign(&hash_of_hashes.val[..])?;
let sig = MetaSignature {
hashes: data_hashes,
signature: sig,
public_key_hash: auth.clone(),
};
// Push the signature
ret.push(CoreMetadata::Signature(sig));
// Save signatures we have sent over this specific conversation so that future
// transmissions do not need to prove it again (this makes the fast path quicker)
if self.integrity.is_centralized() {
if let Some(conversation) = &conversation {
let mut lock = conversation.signatures.write().unwrap();
lock.insert((*auth).clone());
}
}
}
// All ok
Ok(ret)
}
}
impl EventDataTransformer for SignaturePlugin {
fn clone_transformer(&self) -> Box<dyn EventDataTransformer> {
Box::new(self.clone())
}
}
impl EventPlugin for SignaturePlugin {
fn clone_plugin(&self) -> Box<dyn EventPlugin> {
Box::new(self.clone())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/transform.rs | lib/src/transform.rs | use super::crypto::*;
use super::error::*;
use super::meta::*;
use super::session::*;
use super::transaction::TransactionMetadata;
use bytes::{Buf, Bytes};
use snap::read::FrameDecoder;
use snap::read::FrameEncoder;
pub trait EventDataTransformer: Send + Sync {
/// Callback when data is stored in the event
fn data_as_underlay(
&self,
_meta: &mut Metadata,
with: Bytes,
_session: &'_ dyn AteSession,
_trans_meta: &TransactionMetadata,
) -> Result<Bytes, TransformError> {
Ok(with)
}
/// Callback before data in an event is actually used by an actual user
fn data_as_overlay(
&self,
_meta: &Metadata,
with: Bytes,
_session: &'_ dyn AteSession,
) -> Result<Bytes, TransformError> {
Ok(with)
}
fn clone_transformer(&self) -> Box<dyn EventDataTransformer>;
}
#[derive(Debug, Default, Clone)]
pub struct CompressorWithSnapTransformer {}
impl EventDataTransformer for CompressorWithSnapTransformer {
fn clone_transformer(&self) -> Box<dyn EventDataTransformer> {
Box::new(self.clone())
}
#[allow(unused_variables)]
fn data_as_underlay(
&self,
meta: &mut Metadata,
with: Bytes,
_session: &'_ dyn AteSession,
_trans_meta: &TransactionMetadata,
) -> Result<Bytes, TransformError> {
let mut reader = FrameEncoder::new(with.reader());
let mut compressed = Vec::new();
std::io::copy(&mut reader, &mut compressed)?;
Ok(Bytes::from(compressed))
}
#[allow(unused_variables)]
fn data_as_overlay(
&self,
meta: &Metadata,
with: Bytes,
_session: &'_ dyn AteSession,
) -> Result<Bytes, TransformError> {
let mut reader = FrameDecoder::new(with.reader());
let mut decompressed = Vec::new();
std::io::copy(&mut reader, &mut decompressed)?;
Ok(Bytes::from(decompressed))
}
}
#[derive(Clone)]
pub struct StaticEncryptionTransformer {
key: EncryptKey,
}
impl StaticEncryptionTransformer {
#[allow(dead_code)]
pub fn new(key: &EncryptKey) -> StaticEncryptionTransformer {
StaticEncryptionTransformer { key: key.clone() }
}
}
impl EventDataTransformer for StaticEncryptionTransformer {
fn clone_transformer(&self) -> Box<dyn EventDataTransformer> {
Box::new(self.clone())
}
#[allow(unused_variables)]
fn data_as_underlay(
&self,
meta: &mut Metadata,
with: Bytes,
_session: &'_ dyn AteSession,
_trans_meta: &TransactionMetadata,
) -> Result<Bytes, TransformError> {
let iv = meta.generate_iv();
let encrypted = self.key.encrypt_with_iv(&iv, &with[..]);
Ok(Bytes::from(encrypted))
}
#[allow(unused_variables)]
fn data_as_overlay(
&self,
meta: &Metadata,
with: Bytes,
_session: &'_ dyn AteSession,
) -> Result<Bytes, TransformError> {
let iv = meta.get_iv()?;
let decrypted = self.key.decrypt(&iv, &with[..]);
Ok(Bytes::from(decrypted))
}
}
#[test]
fn test_encrypter() {
crate::utils::bootstrap_test_env();
let key = EncryptKey::from_seed_string("test".to_string(), KeySize::Bit192);
let encrypter = StaticEncryptionTransformer::new(&key);
let trans_meta = TransactionMetadata::default();
let test_bytes = Bytes::from_static(b"Some Crypto Text");
let mut meta = Metadata::default();
let encrypted = encrypter
.data_as_underlay(
&mut meta,
test_bytes.clone(),
&AteSessionUser::new(),
&trans_meta,
)
.unwrap();
println!("metadata: {:?}", meta);
println!("data_test: {:X}", &test_bytes);
println!("data_encrypted: {:X}", &encrypted);
assert_ne!(&test_bytes, &encrypted);
let decrypted = encrypter
.data_as_overlay(&mut meta, encrypted, &AteSessionUser::new())
.unwrap();
println!("data_decrypted: {:X}", &decrypted);
assert_eq!(&test_bytes, &decrypted);
}
#[test]
fn test_compressor() {
crate::utils::bootstrap_test_env();
let compressor = CompressorWithSnapTransformer::default();
let trans_meta = TransactionMetadata::default();
let test_bytes = Bytes::from("test".as_bytes());
let mut meta = Metadata::default();
let compressed = compressor
.data_as_underlay(
&mut meta,
test_bytes.clone(),
&AteSessionUser::new(),
&trans_meta,
)
.unwrap();
println!("metadata: {:?}", meta);
println!("data_test: {:X}", &test_bytes);
println!("data_compressed: {:X}", &compressed);
assert_ne!(&test_bytes, &compressed);
let decompressed = compressor
.data_as_overlay(&mut meta, compressed, &AteSessionUser::new())
.unwrap();
println!("data_decompressed: {:X}", &decompressed);
assert_eq!(&test_bytes, &decompressed);
}
#[test]
fn test_crypto() {
crate::utils::bootstrap_test_env();
let key = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F";
let cipher = crate::crypto::EncryptKey::Aes128(key.clone());
let data = b"Some Crypto Text";
let iv = b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07";
let ciphertext = cipher.encrypt_with_iv(&InitializationVector::from(iv), data);
assert_eq!(
[110, 148, 177, 161, 48, 153, 25, 114, 206, 212, 126, 250, 70, 201, 154, 141],
&ciphertext[..]
);
let key = b"\x00\x01\x02\x03\x04\x05\x06\x07\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F";
let cipher = crate::crypto::EncryptKey::Aes128(key.clone());
let iv = b"\x00\x01\x02\x03\x04\x05\x06\x07\x00\x01\x02\x03\x04\x05\x06\x07";
let data = cipher.decrypt(&InitializationVector::from(iv), &ciphertext[..]);
assert_eq!(b"Some Crypto Text", &data[..]);
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/anti_replay.rs | lib/src/anti_replay.rs | #![allow(unused_imports)]
use std::sync::Arc;
use tracing::{debug, error, info};
use crate::crypto::AteHash;
use fxhash::FxHashSet;
use super::lint::EventMetadataLinter;
use super::sink::EventSink;
use super::transform::EventDataTransformer;
use super::validator::EventValidator;
use super::error::*;
use super::event::*;
use super::loader::*;
use super::plugin::*;
use super::transaction::ConversationSession;
use super::validator::ValidationResult;
#[derive(Debug, Default, Clone)]
pub struct AntiReplayPlugin {
seen: FxHashSet<AteHash>,
}
impl AntiReplayPlugin {
pub fn new() -> AntiReplayPlugin {
AntiReplayPlugin {
seen: FxHashSet::default(),
}
}
pub fn push(&mut self, hash: AteHash) {
self.seen.insert(hash);
}
}
impl EventSink for AntiReplayPlugin {
fn feed(
&mut self,
header: &EventHeader,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<(), SinkError> {
self.seen.insert(header.raw.event_hash);
Ok(())
}
fn reset(&mut self) {
self.seen.clear();
}
}
impl EventValidator for AntiReplayPlugin {
fn validate(
&self,
header: &EventHeader,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<ValidationResult, ValidationError> {
match self.seen.contains(&header.raw.event_hash) {
true => {
#[cfg(feature = "enable_verbose")]
debug!(
"rejected event as it is a duplicate - {}",
header.raw.event_hash
);
Ok(ValidationResult::Deny)
}
false => Ok(ValidationResult::Abstain),
}
}
fn clone_validator(&self) -> Box<dyn EventValidator> {
Box::new(self.clone())
}
fn validator_name(&self) -> &str {
"anti-reply-validator"
}
}
impl EventMetadataLinter for AntiReplayPlugin {
fn clone_linter(&self) -> Box<dyn EventMetadataLinter> {
Box::new(self.clone())
}
}
impl EventDataTransformer for AntiReplayPlugin {
fn clone_transformer(&self) -> Box<dyn EventDataTransformer> {
Box::new(self.clone())
}
}
impl EventPlugin for AntiReplayPlugin {
fn clone_plugin(&self) -> Box<dyn EventPlugin> {
Box::new(self.clone())
}
}
impl Loader for AntiReplayPlugin {
fn relevance_check(&mut self, header: &EventWeakData) -> bool {
match header.as_header_raw() {
Ok(a) => {
let ret = self.seen.contains(&a.event_hash);
self.seen.insert(a.event_hash);
ret
}
_ => false,
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/lint.rs | lib/src/lint.rs | use crate::session::AteSession;
use std::sync::Arc;
use super::error::*;
use super::event::*;
use super::meta::*;
use super::transaction::*;
pub struct LintData<'a> {
pub data: &'a EventWeakData,
pub header: EventHeader,
}
pub trait EventMetadataLinter: Send + Sync {
/// Called just before the metadata is pushed into the redo log
fn metadata_lint_many<'a>(
&self,
_lints: &Vec<LintData<'a>>,
_session: &'_ dyn AteSession,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<Vec<CoreMetadata>, LintError> {
Ok(Vec::new())
}
// Lint an exact event
fn metadata_lint_event(
&self,
_meta: &Metadata,
_session: &'_ dyn AteSession,
_trans_meta: &TransactionMetadata,
_type_code: &str,
) -> Result<Vec<CoreMetadata>, LintError> {
Ok(Vec::new())
}
fn clone_linter(&self) -> Box<dyn EventMetadataLinter>;
}
#[derive(Default, Clone)]
pub struct EventAuthorLinter {}
impl EventMetadataLinter for EventAuthorLinter {
fn clone_linter(&self) -> Box<dyn EventMetadataLinter> {
Box::new(self.clone())
}
fn metadata_lint_event(
&self,
_meta: &Metadata,
session: &'_ dyn AteSession,
_trans_meta: &TransactionMetadata,
_type_code: &str,
) -> Result<Vec<CoreMetadata>, LintError> {
let mut ret = Vec::new();
ret.push(CoreMetadata::Author(session.identity().to_string()));
Ok(ret)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/dns.rs | lib/src/dns.rs | #![allow(unused_imports)]
use std::net::SocketAddr;
use std::net::ToSocketAddrs;
#[cfg(feature = "enable_full")]
use tokio::net::TcpStream as TokioTcpStream;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::conf::ConfAte;
use crate::engine::TaskEngine;
use {
trust_dns_client::client::*, trust_dns_client::op::DnsResponse, trust_dns_client::tcp::*,
trust_dns_proto::iocompat::AsyncIoTokioAsStd, trust_dns_proto::DnssecDnsHandle,
};
pub use {trust_dns_client::error::ClientError, trust_dns_client::rr::*};
pub enum DnsClient {
Dns {
cfg: ConfAte,
client: MemoizeClientHandle<AsyncClient>,
},
DnsSec {
cfg: ConfAte,
client: DnssecDnsHandle<MemoizeClientHandle<AsyncClient>>,
},
}
impl DnsClient {
#[cfg(feature = "enable_full")]
pub async fn connect(cfg: &ConfAte) -> DnsClient {
debug!("using DNS server: {}", cfg.dns_server);
let addr: SocketAddr = (cfg.dns_server.clone(), 53)
.to_socket_addrs()
.unwrap()
.next()
.unwrap();
let (stream, sender) = TcpClientStream::<AsyncIoTokioAsStd<TokioTcpStream>>::new(addr);
let client = AsyncClient::new(stream, sender, None);
let (client, bg) = client.await.expect("client failed to connect");
TaskEngine::spawn(bg);
let client = MemoizeClientHandle::new(client);
match cfg.dns_sec {
false => {
debug!("configured for DNSSec");
DnsClient::Dns {
cfg: cfg.clone(),
client,
}
}
true => {
debug!("configured for plain DNS");
DnsClient::DnsSec {
cfg: cfg.clone(),
client: DnssecDnsHandle::new(client.clone()),
}
}
}
}
pub async fn reconnect(&mut self) {
let cfg = match self {
DnsClient::Dns { cfg, client: _ } => cfg.clone(),
DnsClient::DnsSec { cfg, client: _ } => cfg.clone(),
};
*self = DnsClient::connect(&cfg).await;
}
pub async fn query(
&mut self,
name: Name,
query_class: DNSClass,
query_type: RecordType,
) -> Result<DnsResponse, ClientError> {
let ret = {
match self {
DnsClient::Dns { cfg: _, client: c } => {
c.query(name.clone(), query_class, query_type).await
}
DnsClient::DnsSec { cfg: _, client: c } => {
c.query(name.clone(), query_class, query_type).await
}
}
};
match ret {
Ok(a) => Ok(a),
Err(_) => {
self.reconnect().await;
match self {
DnsClient::Dns { cfg: _, client: c } => {
c.query(name, query_class, query_type).await
}
DnsClient::DnsSec { cfg: _, client: c } => {
c.query(name, query_class, query_type).await
}
}
}
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/header.rs | lib/src/header.rs | use std::rc::Rc;
#[allow(unused_imports)]
use super::meta::*;
#[allow(unused_imports)]
use fastrand::u64;
pub use ate_crypto::spec::PrimaryKey;
pub(crate) struct PrimaryKeyScope {
pop: Option<PrimaryKey>,
_negative: Rc<()>,
}
impl PrimaryKeyScope {
pub fn new(key: PrimaryKey) -> Self {
PrimaryKeyScope {
pop: PrimaryKey::current_set(Some(key)),
_negative: Rc::new(()),
}
}
}
impl Drop for PrimaryKeyScope {
fn drop(&mut self) {
PrimaryKey::current_set(self.pop.take());
}
}
impl Metadata {
pub fn for_data(key: PrimaryKey) -> Metadata {
let mut ret = Metadata::default();
ret.core.push(CoreMetadata::Data(key));
return ret;
}
pub fn get_data_key(&self) -> Option<PrimaryKey> {
self.core
.iter()
.filter_map(|m| match m {
CoreMetadata::Data(k) => Some(k.clone()),
CoreMetadata::Tombstone(k) => Some(k.clone()),
_ => None,
})
.next()
}
#[allow(dead_code)]
pub fn set_data_key(&mut self, key: PrimaryKey) {
for core in self.core.iter_mut() {
match core {
CoreMetadata::Data(k) => {
if *k == key {
return;
}
*k = key;
return;
}
_ => {}
}
}
self.core.push(CoreMetadata::Data(key));
}
} | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/sink.rs | lib/src/sink.rs | use std::sync::Arc;
use super::error::*;
use super::event::*;
use super::transaction::ConversationSession;
pub trait EventSink {
fn feed(
&mut self,
_header: &EventHeader,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<(), SinkError> {
Ok(())
}
fn reset(&mut self) {}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/loader.rs | lib/src/loader.rs | use async_trait::async_trait;
use tokio::sync::mpsc;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::error::*;
use crate::event::*;
use crate::redo::LogLookup;
#[derive(Debug, Clone)]
pub struct LoadData {
pub(crate) lookup: LogLookup,
pub header: EventHeaderRaw,
pub data: EventWeakData,
}
#[async_trait]
pub trait Loader: Send + Sync {
/// Function invoked when the start of the history is being loaded
async fn start_of_history(&mut self, _size: usize) {}
/// Human message sent from the server to this process
fn human_message(&mut self, _message: String) {}
/// Events are being processed
fn feed_events(&mut self, _evts: &Vec<EventWeakData>) {}
/// Load data is being processed
async fn feed_load_data(&mut self, _data: LoadData) {}
/// The last event is now received
async fn end_of_history(&mut self) {}
/// Callback when the load has failed
async fn failed(&mut self, err: ChainCreationError) -> Option<ChainCreationError> {
Some(err)
}
fn relevance_check(&mut self, _header: &EventWeakData) -> bool {
false
}
}
#[derive(Debug, Clone, Default)]
pub struct DummyLoader {}
impl Loader for DummyLoader {}
#[derive(Default)]
pub struct CompositionLoader {
pub loaders: Vec<Box<dyn Loader>>,
}
#[async_trait]
impl Loader for CompositionLoader {
async fn start_of_history(&mut self, size: usize) {
for loader in self.loaders.iter_mut() {
loader.start_of_history(size).await;
}
}
fn human_message(&mut self, message: String) {
for loader in self.loaders.iter_mut() {
loader.human_message(message.clone());
}
}
fn feed_events(&mut self, evts: &Vec<EventWeakData>) {
for loader in self.loaders.iter_mut() {
loader.feed_events(evts);
}
}
async fn feed_load_data(&mut self, data: LoadData) {
for loader in self.loaders.iter_mut() {
loader.feed_load_data(data.clone()).await;
}
}
async fn end_of_history(&mut self) {
for loader in self.loaders.iter_mut() {
loader.end_of_history().await;
}
}
async fn failed(&mut self, mut err: ChainCreationError) -> Option<ChainCreationError> {
let err_msg = err.to_string();
for loader in self.loaders.iter_mut() {
err = match loader.failed(err).await {
Some(a) => a,
None => ChainCreationErrorKind::InternalError(err_msg.clone()).into(),
};
}
Some(err)
}
fn relevance_check(&mut self, header: &EventWeakData) -> bool {
for loader in self.loaders.iter_mut() {
if loader.relevance_check(header) {
return true;
}
}
false
}
}
pub struct NotificationLoader {
notify: mpsc::Sender<Result<(), ChainCreationError>>,
}
impl NotificationLoader {
pub fn new(notify: mpsc::Sender<Result<(), ChainCreationError>>) -> NotificationLoader {
NotificationLoader { notify }
}
}
#[async_trait]
impl Loader for NotificationLoader {
async fn start_of_history(&mut self, _size: usize) {
trace!("sending notify");
let _ = self.notify.send(Ok(())).await;
}
async fn end_of_history(&mut self) {
trace!("sending notify");
let _ = self.notify.send(Ok(())).await;
}
async fn failed(&mut self, err: ChainCreationError) -> Option<ChainCreationError> {
let _ = self.notify.send(Err(err)).await;
None
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/validator.rs | lib/src/validator.rs | use std::sync::Arc;
use super::crypto::*;
use super::error::*;
use super::event::*;
use super::meta::*;
use super::signature::MetaSignature;
use super::transaction::*;
use crate::spec::TrustMode;
#[derive(Debug)]
pub enum ValidationResult {
Deny,
Allow,
#[allow(dead_code)]
Abstain,
}
pub trait EventValidator: Send + Sync {
fn validate(
&self,
_header: &EventHeader,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<ValidationResult, ValidationError> {
Ok(ValidationResult::Abstain)
}
fn set_integrity_mode(&mut self, _mode: TrustMode) {}
fn clone_validator(&self) -> Box<dyn EventValidator>;
fn validator_name(&self) -> &str;
}
#[derive(Default, Clone)]
pub struct RubberStampValidator {}
impl EventValidator for RubberStampValidator {
fn clone_validator(&self) -> Box<dyn EventValidator> {
Box::new(self.clone())
}
#[allow(unused_variables)]
fn validate(
&self,
_header: &EventHeader,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<ValidationResult, ValidationError> {
Ok(ValidationResult::Allow)
}
fn validator_name(&self) -> &str {
"rubber-stamp-validator"
}
}
#[derive(Debug, Clone)]
pub struct StaticSignatureValidator {
#[allow(dead_code)]
pk: PublicSignKey,
}
impl StaticSignatureValidator {
#[allow(dead_code)]
pub fn new(key: &PublicSignKey) -> StaticSignatureValidator {
StaticSignatureValidator { pk: key.clone() }
}
}
impl EventValidator for StaticSignatureValidator {
fn clone_validator(&self) -> Box<dyn EventValidator> {
Box::new(self.clone())
}
#[allow(unused_variables)]
fn validate(
&self,
_header: &EventHeader,
_conversation: Option<&Arc<ConversationSession>>,
) -> Result<ValidationResult, ValidationError> {
Ok(ValidationResult::Allow)
}
fn validator_name(&self) -> &str {
"static-signature-validator"
}
}
impl Metadata {
#[allow(dead_code)]
pub fn add_signature(&mut self, _sig: MetaSignature) {}
pub fn get_signature<'a>(&'a self) -> Option<&'a MetaSignature> {
self.core
.iter()
.filter_map(|m| match m {
CoreMetadata::Signature(k) => Some(k),
_ => None,
})
.next()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/pipe.rs | lib/src/pipe.rs | use super::error::*;
use super::transaction::*;
use crate::chain::ChainWork;
use crate::header::PrimaryKey;
#[allow(unused_imports)]
use crate::meta::*;
use async_trait::async_trait;
use std::sync::Arc;
use tokio::sync::mpsc;
use bytes::Bytes;
use crate::crypto::AteHash;
pub enum ConnectionStatusChange {
Disconnected,
ReadOnly,
}
#[async_trait]
pub(crate) trait EventPipe: Send + Sync {
async fn is_connected(&self) -> bool {
true
}
async fn connect(
&self,
) -> Result<mpsc::Receiver<ConnectionStatusChange>, ChainCreationError> {
Err(ChainCreationErrorKind::NotImplemented.into())
}
async fn on_disconnect(&self) -> Result<(), CommsError> {
Ok(())
}
async fn on_read_only(&self) -> Result<(), CommsError> {
Ok(())
}
async fn load_many(&self, leafs: Vec<AteHash>) -> Result<Vec<Option<Bytes>>, LoadError>;
async fn feed(&self, work: ChainWork) -> Result<(), CommitError>;
async fn prime(&self, records: Vec<(AteHash, Option<Bytes>)>) -> Result<(), CommsError>;
async fn try_lock(&self, key: PrimaryKey) -> Result<bool, CommitError>;
async fn unlock(&self, key: PrimaryKey) -> Result<(), CommitError>;
fn unlock_local(&self, key: PrimaryKey) -> Result<(), CommitError>;
fn set_next(&mut self, next: Arc<Box<dyn EventPipe>>);
async fn conversation(&self) -> Option<Arc<ConversationSession>>;
}
#[derive(Debug, Default, Clone, Copy)]
pub(crate) struct NullPipe {}
impl NullPipe {
pub fn new() -> Arc<Box<dyn EventPipe>> {
Arc::new(Box::new(NullPipe {}))
}
}
#[async_trait]
impl EventPipe for NullPipe {
async fn feed(&self, _work: ChainWork) -> Result<(), CommitError> {
Ok(())
}
async fn load_many(&self, leafs: Vec<AteHash>) -> Result<Vec<Option<Bytes>>, LoadError> {
Ok(leafs
.into_iter()
.map(|_| None)
.collect())
}
async fn prime(&self, _records: Vec<(AteHash, Option<Bytes>)>) -> Result<(), CommsError> {
Ok(())
}
async fn try_lock(&self, _key: PrimaryKey) -> Result<bool, CommitError> {
Ok(false)
}
async fn unlock(&self, _key: PrimaryKey) -> Result<(), CommitError> {
Ok(())
}
fn unlock_local(&self, _key: PrimaryKey) -> Result<(), CommitError> {
Ok(())
}
fn set_next(&mut self, _next: Arc<Box<dyn EventPipe>>) {}
async fn conversation(&self) -> Option<Arc<ConversationSession>> {
None
}
}
#[derive(Clone)]
pub(crate) struct DuelPipe {
first: Arc<Box<dyn EventPipe>>,
second: Arc<Box<dyn EventPipe>>,
}
impl DuelPipe {
pub fn new(first: Arc<Box<dyn EventPipe>>, second: Arc<Box<dyn EventPipe>>) -> DuelPipe {
DuelPipe { first, second }
}
}
#[async_trait]
impl EventPipe for DuelPipe {
async fn is_connected(&self) -> bool {
if self.first.is_connected().await == false {
return false;
}
if self.second.is_connected().await == false {
return false;
}
true
}
async fn on_disconnect(&self) -> Result<(), CommsError> {
let ret1 = self.first.on_disconnect().await;
let ret2 = self.second.on_disconnect().await;
if let Ok(_) = ret1 {
return Ok(());
}
if let Ok(_) = ret2 {
return Ok(());
}
Err(CommsErrorKind::ShouldBlock.into())
}
async fn on_read_only(&self) -> Result<(), CommsError> {
let ret1 = self.first.on_read_only().await;
let ret2 = self.second.on_read_only().await;
if let Ok(_) = ret1 {
return Ok(());
}
if let Ok(_) = ret2 {
return Ok(());
}
Err(CommsErrorKind::ShouldBlock.into())
}
async fn connect(
&self,
) -> Result<mpsc::Receiver<ConnectionStatusChange>, ChainCreationError> {
match self.first.connect().await {
Ok(a) => {
return Ok(a);
}
Err(ChainCreationError(ChainCreationErrorKind::NotImplemented, _)) => {}
Err(err) => {
return Err(err);
}
}
match self.second.connect().await {
Ok(a) => {
return Ok(a);
}
Err(ChainCreationError(ChainCreationErrorKind::NotImplemented, _)) => {}
Err(err) => {
return Err(err);
}
}
Err(ChainCreationErrorKind::NotImplemented.into())
}
async fn feed(&self, work: ChainWork) -> Result<(), CommitError> {
let join1 = self.first.feed(work.clone());
let join2 = self.second.feed(work);
let (notify1, notify2) = futures::join!(join1, join2);
notify1?;
notify2?;
Ok(())
}
async fn load_many(&self, leafs: Vec<AteHash>) -> Result<Vec<Option<Bytes>>, LoadError> {
let join1 = self.first.load_many(leafs.clone());
let join2 = self.second.load_many(leafs);
let (notify1, notify2) = futures::join!(join1, join2);
let rets = match (notify1, notify2) {
(Ok(notify1), Ok(notify2)) => {
let max = notify1.len().max(notify2.len());
let mut notify1 = notify1.into_iter();
let mut notify2 = notify2.into_iter();
let mut rets = Vec::new();
for _ in 0..max {
match (notify1.next(), notify2.next()) {
(Some(Some(a)), _) => rets.push(Some(a)),
(_, Some(Some(b))) => rets.push(Some(b)),
(Some(None), _) => rets.push(None),
(_, Some(None)) => rets.push(None),
(_, _) => break,
};
}
rets
},
(Ok(notify1), Err(_)) => notify1,
(Err(_), Ok(notify2)) => notify2,
(Err(err), Err(_)) => {
return Err(err);
}
};
Ok(rets)
}
async fn prime(&self, records: Vec<(AteHash, Option<Bytes>)>) -> Result<(), CommsError> {
let join1 = self.first.prime(records.clone());
let join2 = self.second.prime(records);
let (notify1, notify2) = futures::join!(join1, join2);
notify1?;
notify2?;
Ok(())
}
async fn try_lock(&self, key: PrimaryKey) -> Result<bool, CommitError> {
Ok(self.first.try_lock(key).await? || self.second.try_lock(key).await?)
}
async fn unlock(&self, key: PrimaryKey) -> Result<(), CommitError> {
self.first.unlock(key).await?;
self.second.unlock(key).await?;
Ok(())
}
fn unlock_local(&self, key: PrimaryKey) -> Result<(), CommitError> {
self.first.unlock_local(key)?;
self.second.unlock_local(key)?;
Ok(())
}
fn set_next(&mut self, _next: Arc<Box<dyn EventPipe>>) {}
async fn conversation(&self) -> Option<Arc<ConversationSession>> {
if let Some(ret) = self.first.conversation().await {
return Some(ret);
}
if let Some(ret) = self.second.conversation().await {
return Some(ret);
}
None
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/single.rs | lib/src/single.rs | use std::sync::Arc;
use std::sync::RwLock as StdRwLock;
use tokio::sync::RwLock;
use tokio::sync::RwLockWriteGuard;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::chain::*;
use crate::spec::TrustMode;
/// Represents an exclusive lock on a chain-of-trust that allows the
/// user to execute mutations that would otherwise have an immedaite
/// impact on other users.
pub struct ChainSingleUser<'a> {
pub(crate) inside_async: RwLockWriteGuard<'a, ChainProtectedAsync>,
pub(crate) inside_sync: Arc<StdRwLock<ChainProtectedSync>>,
}
impl<'a> ChainSingleUser<'a> {
pub(crate) async fn new(accessor: &'a Chain) -> ChainSingleUser<'a> {
Self::new_ext(&accessor.inside_async, &accessor.inside_sync).await
}
pub(crate) async fn new_ext(
inside_async: &'a Arc<RwLock<ChainProtectedAsync>>,
inside_sync: &'a Arc<StdRwLock<ChainProtectedSync>>,
) -> ChainSingleUser<'a> {
ChainSingleUser {
inside_async: inside_async.write().await,
inside_sync: Arc::clone(&inside_sync),
}
}
#[allow(dead_code)]
pub async fn destroy(&mut self) -> Result<(), tokio::io::Error> {
self.inside_async.chain.destroy().await
}
#[allow(dead_code)]
pub fn name(&self) -> String {
self.inside_async.chain.name()
}
pub fn disable_new_roots(&mut self) {
self.inside_async.disable_new_roots = true;
}
pub fn set_integrity(&mut self, mode: TrustMode) {
self.inside_async.set_integrity_mode(mode);
let mut lock = self.inside_sync.write().unwrap();
lock.set_integrity_mode(mode);
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/multi.rs | lib/src/multi.rs | use std::sync::RwLock as StdRwLock;
#[allow(unused_imports)]
use std::sync::{Arc, Weak};
use std::time::Duration;
use tokio::sync::RwLock;
use derivative::*;
use error_chain::bail;
use crate::event::EventStrongData;
use crate::session::AteSession;
use super::chain::*;
use super::error::*;
use super::header::*;
use super::index::*;
use super::lint::*;
use super::meta::*;
use super::pipe::*;
use super::spec::MessageFormat;
use super::transaction::*;
use super::trust::*;
use super::event::MessageBytes;
use bytes::Bytes;
pub(crate) struct ChainMultiUserLock<'a> {
pub inside_async: tokio::sync::RwLockReadGuard<'a, ChainProtectedAsync>,
pub inside_sync: std::sync::RwLockReadGuard<'a, ChainProtectedSync>,
}
impl<'a> std::ops::Deref for ChainMultiUserLock<'a> {
type Target = ChainProtectedSync;
fn deref(&self) -> &ChainProtectedSync {
self.inside_sync.deref()
}
}
#[derive(Derivative, Clone)]
#[derivative(Debug)]
pub struct ChainMultiUser {
pub(super) inside_async: Arc<RwLock<ChainProtectedAsync>>,
#[derivative(Debug = "ignore")]
pub(super) inside_sync: Arc<StdRwLock<ChainProtectedSync>>,
#[derivative(Debug = "ignore")]
pub(super) pipe: Arc<Box<dyn EventPipe>>,
pub(super) default_format: MessageFormat,
}
impl ChainMultiUser {
pub(crate) async fn new(chain: &Chain) -> ChainMultiUser {
ChainMultiUser {
inside_async: Arc::clone(&chain.inside_async),
inside_sync: Arc::clone(&chain.inside_sync),
pipe: Arc::clone(&chain.pipe),
default_format: chain.default_format,
}
}
pub(crate) async fn new_ext(
inside_async: &Arc<RwLock<ChainProtectedAsync>>,
inside_sync: &Arc<StdRwLock<ChainProtectedSync>>,
pipe: &Arc<Box<dyn EventPipe>>,
) -> ChainMultiUser {
let guard = inside_async.read().await;
ChainMultiUser {
inside_async: Arc::clone(inside_async),
inside_sync: Arc::clone(inside_sync),
pipe: Arc::clone(pipe),
default_format: guard.default_format,
}
}
pub async fn load(&self, leaf: EventLeaf) -> Result<LoadStrongResult, LoadError> {
let ret = self.inside_async.read().await.chain.load(leaf).await?;
let ret = LoadStrongResult {
lookup: ret.lookup,
header: ret.header,
data: EventStrongData {
meta: ret.data.meta,
format: ret.data.format,
data_bytes: match ret.data.data_bytes {
MessageBytes::Some(a) => Some(a),
MessageBytes::LazySome(l) => {
let data = self.pipe.load_many(vec![ l.record ]).await?;
match data.into_iter().next() {
Some(Some(a)) => Some(a),
Some(None) => {
bail!(LoadErrorKind::MissingData)
},
_ => {
bail!(LoadErrorKind::Disconnected)
},
}
},
MessageBytes::None => None,
}
},
leaf: ret.leaf,
};
Ok(ret)
}
pub async fn load_many(&self, leafs: Vec<EventLeaf>) -> Result<Vec<LoadStrongResult>, LoadError> {
let mut data = self.inside_async.read().await.chain.load_many(leafs.clone()).await?;
let lazy = data.iter().any(|r| r.data.data_bytes.is_lazy());
if lazy {
let leafs = leafs.into_iter().map(|l| l.record).collect();
let mut other = self.pipe.load_many(leafs).await?.into_iter();
for ret in data.iter_mut() {
let other = other.next();
if ret.data.data_bytes.is_lazy() {
ret.data.data_bytes = match other {
Some(Some(a)) => MessageBytes::Some(a),
Some(None) => {
bail!(LoadErrorKind::MissingData)
},
_ => {
bail!(LoadErrorKind::Disconnected)
},
}
}
}
}
let mut rets = Vec::new();
for l in data {
rets.push(
LoadStrongResult {
lookup: l.lookup,
header: l.header,
data: EventStrongData {
meta: l.data.meta,
format: l.data.format,
data_bytes: match l.data.data_bytes {
MessageBytes::Some(a) => Some(a),
MessageBytes::LazySome(_) => {
bail!(LoadErrorKind::MissingData)
},
MessageBytes::None => None,
}
},
leaf: l.leaf,
}
);
}
Ok(rets)
}
pub async fn lookup_primary(&self, key: &PrimaryKey) -> Option<EventLeaf> {
self.inside_async.read().await.chain.lookup_primary(key)
}
pub async fn lookup_secondary(&self, key: &MetaCollection) -> Option<Vec<EventLeaf>> {
self.inside_async.read().await.chain.lookup_secondary(key)
}
pub async fn lookup_secondary_raw(&self, key: &MetaCollection) -> Option<Vec<PrimaryKey>> {
self.inside_async
.read()
.await
.chain
.lookup_secondary_raw(key)
}
pub async fn lookup_parent(&self, key: &PrimaryKey) -> Option<MetaParent> {
self.inside_async.read().await.chain.lookup_parent(key)
}
pub async fn roots_raw(&self) -> Vec<PrimaryKey> {
self.inside_async
.read()
.await
.chain
.roots_raw()
}
#[allow(dead_code)]
pub(crate) fn metadata_lint_many<'a>(
&self,
lints: &Vec<LintData<'a>>,
session: &'_ dyn AteSession,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<Vec<CoreMetadata>, LintError> {
let guard = self.inside_sync.read().unwrap();
guard.metadata_lint_many(lints, session, conversation)
}
#[allow(dead_code)]
pub(crate) fn metadata_lint_event(
&self,
meta: &mut Metadata,
session: &'_ dyn AteSession,
trans_meta: &TransactionMetadata,
type_code: &str,
) -> Result<Vec<CoreMetadata>, LintError> {
let guard = self.inside_sync.read().unwrap();
guard.metadata_lint_event(meta, session, trans_meta, type_code)
}
#[allow(dead_code)]
pub(crate) fn data_as_overlay(
&self,
meta: &Metadata,
data: Bytes,
session: &'_ dyn AteSession,
) -> Result<Bytes, TransformError> {
let guard = self.inside_sync.read().unwrap();
guard.data_as_overlay(meta, data, session)
}
#[allow(dead_code)]
pub(crate) fn data_as_underlay(
&self,
meta: &mut Metadata,
data: Bytes,
session: &'_ dyn AteSession,
trans_meta: &TransactionMetadata,
) -> Result<Bytes, TransformError> {
let guard = self.inside_sync.read().unwrap();
guard.data_as_underlay(meta, data, session, trans_meta)
}
pub async fn count(&self) -> usize {
self.inside_async.read().await.chain.redo.count()
}
pub(crate) async fn lock<'a>(&'a self) -> ChainMultiUserLock<'a> {
ChainMultiUserLock {
inside_async: self.inside_async.read().await,
inside_sync: self.inside_sync.read().unwrap(),
}
}
pub async fn sync(&self) -> Result<(), CommitError> {
let timeout = Duration::from_secs(30);
self.sync_ext(timeout).await
}
pub async fn sync_ext(&self, timeout: Duration) -> Result<(), CommitError> {
// Create the transaction
let trans = Transaction {
scope: TransactionScope::Full,
transmit: true,
events: Vec::new(),
timeout,
conversation: None,
};
let work = ChainWork { trans };
// Process the transaction in the chain using its pipe
self.pipe.feed(work).await?;
Ok(())
}
}
impl ChainProtectedSync {
pub(crate) fn metadata_lint_many<'a>(
&self,
lints: &Vec<LintData<'a>>,
session: &'_ dyn AteSession,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<Vec<CoreMetadata>, LintError> {
let mut ret = Vec::new();
for linter in self.linters.iter() {
ret.extend(linter.metadata_lint_many(lints, session, conversation)?);
}
for plugin in self.plugins.iter() {
ret.extend(plugin.metadata_lint_many(lints, session, conversation)?);
}
Ok(ret)
}
pub(crate) fn metadata_lint_event(
&self,
meta: &mut Metadata,
session: &'_ dyn AteSession,
trans_meta: &TransactionMetadata,
type_code: &str,
) -> Result<Vec<CoreMetadata>, LintError> {
let mut ret = Vec::new();
for linter in self.linters.iter() {
ret.extend(linter.metadata_lint_event(meta, session, trans_meta, type_code)?);
}
for plugin in self.plugins.iter() {
ret.extend(plugin.metadata_lint_event(meta, session, trans_meta, type_code)?);
}
Ok(ret)
}
pub(crate) fn data_as_overlay(
&self,
meta: &Metadata,
data: Bytes,
session: &'_ dyn AteSession,
) -> Result<Bytes, TransformError> {
let mut ret = data;
for plugin in self.plugins.iter().rev() {
ret = plugin.data_as_overlay(meta, ret, session)?;
}
for transformer in self.transformers.iter().rev() {
ret = transformer.data_as_overlay(meta, ret, session)?;
}
Ok(ret)
}
pub(crate) fn data_as_underlay(
&self,
meta: &mut Metadata,
data: Bytes,
session: &'_ dyn AteSession,
trans_meta: &TransactionMetadata,
) -> Result<Bytes, TransformError> {
let mut ret = data;
for transformer in self.transformers.iter() {
ret = transformer.data_as_underlay(meta, ret, session, trans_meta)?;
}
for plugin in self.plugins.iter() {
ret = plugin.data_as_underlay(meta, ret, session, trans_meta)?;
}
Ok(ret)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/transaction.rs | lib/src/transaction.rs | #![allow(unused_imports)]
use crate::meta::MetaParent;
use fxhash::FxHashMap;
use fxhash::FxHashSet;
use rcu_cell::RcuCell;
use std::sync::Arc;
use std::sync::RwLock as StdRwLock;
use std::time::Duration;
use tokio::sync::mpsc;
use super::crypto::AteHash;
use super::error::*;
use super::event::*;
use super::header::*;
use super::mesh::MeshSession;
use super::meta::*;
use super::trust::*;
/// Represents the scope of `Dio` transaction for all the data
/// it is gathering up locally. Once the user calls the `commit`
/// method it will push the data into the redo log following one
/// of the behaviours defined in this enum.
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub enum TransactionScope {
/// The thread will not wait for any data storage confirmation
#[allow(dead_code)]
None,
/// Data must be flushed to local disk
#[allow(dead_code)]
Local,
/// The data must be flushed to the root server disks before the event is considered processed
#[allow(dead_code)]
Full,
}
impl std::fmt::Display for TransactionScope {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
TransactionScope::None => write!(f, "none"),
TransactionScope::Local => write!(f, "local"),
TransactionScope::Full => write!(f, "full"),
}
}
}
#[derive(Debug, Default)]
pub struct ConversationSession {
pub id: RcuCell<AteHash>,
pub weaken_validation: bool,
pub signatures: StdRwLock<FxHashSet<AteHash>>,
}
impl ConversationSession {
pub fn clear(&self) {
if let Some(mut guard) = self.id.try_lock() {
guard.update(None);
}
let mut guard = self.signatures.write().unwrap();
guard.clear();
}
}
#[derive(Debug, Clone)]
pub(crate) struct Transaction {
pub(crate) scope: TransactionScope,
pub(crate) transmit: bool,
pub(crate) events: Vec<EventWeakData>,
pub(crate) timeout: Duration,
pub(crate) conversation: Option<Arc<ConversationSession>>,
}
impl Transaction {
#[allow(dead_code)]
pub(crate) fn from_events(
events: Vec<EventWeakData>,
scope: TransactionScope,
transmit: bool,
timeout: Duration,
) -> Transaction {
Transaction {
scope,
transmit,
events,
timeout,
conversation: None,
}
}
}
#[derive(Debug, Clone, Default)]
pub struct TransactionMetadata {
pub auth: FxHashMap<PrimaryKey, MetaAuthorization>,
pub parents: FxHashMap<PrimaryKey, MetaParent>,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/spec.rs | lib/src/spec.rs | use num_enum::IntoPrimitive;
use num_enum::TryFromPrimitive;
use serde::{Deserialize, Serialize};
use std::convert::TryFrom;
use super::error::*;
use super::crypto::AteHash;
use async_trait::async_trait;
use tokio::io::ErrorKind;
pub use ate_crypto::SerializationFormat;
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct MessageFormat {
pub meta: SerializationFormat,
pub data: SerializationFormat,
}
impl std::fmt::Display for MessageFormat {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "meta={}, data={}", self.meta, self.data)
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum CentralizedRole {
Server,
Client,
}
impl std::fmt::Display for CentralizedRole {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
CentralizedRole::Server => write!(f, "server"),
CentralizedRole::Client => write!(f, "client"),
}
}
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub enum TrustMode {
Distributed,
Centralized(CentralizedRole),
}
impl TrustMode {
pub fn is_centralized(&self) -> bool {
if let TrustMode::Centralized(_) = self {
true
} else {
false
}
}
pub fn as_client(&self) -> TrustMode {
match self {
TrustMode::Centralized(_) => TrustMode::Centralized(CentralizedRole::Client),
a => a.clone(),
}
}
pub fn as_server(&self) -> TrustMode {
match self {
TrustMode::Centralized(_) => TrustMode::Centralized(CentralizedRole::Server),
a => a.clone(),
}
}
}
impl std::fmt::Display for TrustMode {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
match self {
TrustMode::Centralized(a) => write!(f, "centralized({})", a),
TrustMode::Distributed => write!(f, "distributed"),
}
}
}
impl std::str::FromStr for TrustMode {
type Err = &'static str;
fn from_str(s: &str) -> Result<Self, Self::Err> {
match s {
"distributed" => Ok(TrustMode::Distributed),
"centralized" => Ok(TrustMode::Centralized(CentralizedRole::Server)),
"centralized(client)" => Ok(TrustMode::Centralized(CentralizedRole::Client)),
"centralized(server)" => Ok(TrustMode::Centralized(CentralizedRole::Server)),
_ => Err("valid values are 'distributed', 'centralized'"),
}
}
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[repr(u8)]
pub enum BlobSize {
U8 = 1,
U16 = 2,
U32 = 3,
U64 = 4,
}
static LOG_MAGIC: &'static [u8; 3] = b"Ate";
#[async_trait]
pub trait LogApi {
fn offset(&self) -> u64;
async fn len(&self) -> Result<u64, tokio::io::Error>;
async fn seek(&mut self, off: u64) -> Result<(), tokio::io::Error>;
async fn read_u8(&mut self) -> Result<u8, tokio::io::Error>;
async fn read_u16(&mut self) -> Result<u16, tokio::io::Error>;
async fn read_u32(&mut self) -> Result<u32, tokio::io::Error>;
async fn read_u64(&mut self) -> Result<u64, tokio::io::Error>;
async fn read_exact(&mut self, buf: &mut [u8]) -> Result<(), tokio::io::Error>;
async fn write_u8(&mut self, val: u8) -> Result<(), tokio::io::Error>;
async fn write_u16(&mut self, val: u16) -> Result<(), tokio::io::Error>;
async fn write_u32(&mut self, val: u32) -> Result<(), tokio::io::Error>;
async fn write_u64(&mut self, val: u64) -> Result<(), tokio::io::Error>;
async fn write_exact(&mut self, buf: &[u8]) -> Result<(), tokio::io::Error>;
async fn sync(&mut self) -> Result<(), tokio::io::Error>;
}
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
pub struct LogHeader {
pub offset: u64,
pub format: MessageFormat,
}
#[derive(Serialize, Deserialize, Debug, Clone, Copy, Hash, PartialEq, Eq, PartialOrd, Ord)]
pub struct LogLookup {
pub index: u32,
pub offset: u64,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub struct LazyData
{
pub record: AteHash,
pub hash: AteHash,
pub len: usize,
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub enum LogData
{
Some(Vec<u8>),
LazySome(LazyData),
None,
}
#[derive(Debug, Clone)]
pub enum LogDataRef<'a>
{
Some(&'a Vec<u8>),
LazySome(&'a LazyData),
None,
}
impl LogData
{
pub fn is_none(&self) -> bool {
self.is_some() == false
}
pub fn is_some(&self) -> bool {
match self {
LogData::Some(_) => true,
LogData::LazySome(_) => true,
LogData::None => false,
}
}
pub const fn as_ref<'a>(&'a self) -> LogDataRef<'a> {
match self {
LogData::Some(ref a) => LogDataRef::Some(a),
LogData::LazySome(a) => LogDataRef::LazySome(a),
LogData::None => LogDataRef::None,
}
}
pub const fn as_option<'a>(&'a self) -> Option<&'a Vec<u8>> {
match *self {
LogData::Some(ref a) => Some(a),
_ => None
}
}
pub fn to_option(self) -> Option<Vec<u8>> {
match self {
LogData::Some(a) => Some(a),
_ => None
}
}
pub fn hash(&self) -> Option<AteHash> {
match self {
LogData::Some(a) => Some(AteHash::from_bytes(&a[..])),
LogData::LazySome(l) => Some(l.hash.clone()),
LogData::None => None,
}
}
pub fn size(&self) -> usize {
match self {
LogData::Some(a) => a.len(),
LogData::LazySome(l) => l.len,
LogData::None => 0usize,
}
}
}
#[derive(Debug, Clone)]
pub struct LogEntry {
pub header: LogHeader,
pub meta: Vec<u8>,
pub data: LogData,
}
#[derive(Debug, Clone, Copy, Eq, PartialEq, IntoPrimitive, TryFromPrimitive)]
#[repr(u8)]
pub enum EventVersion {
/*
#[deprecated(
since = "0.3.0",
note = "This message format is deprecated and will be removed in a future release."
)]
V1 = b'!',
*/
V2 = b'1',
}
impl EventVersion {
async fn read_byte(
api: &mut impl LogApi,
) -> std::result::Result<Option<u8>, SerializationError> {
match api.read_u8().await {
Ok(a) => Ok(Some(a)),
Err(err) => {
if err.kind() == ErrorKind::UnexpectedEof {
return Ok(None);
}
Err(SerializationErrorKind::IO(tokio::io::Error::new(
tokio::io::ErrorKind::Other,
format!(
"Failed to read the event magic number at 0x{:x}",
api.offset()
),
))
.into())
}
}
}
async fn read_version(
api: &mut impl LogApi,
) -> std::result::Result<Option<EventVersion>, SerializationError> {
let mut n = 0;
while let Some(cur) = EventVersion::read_byte(api).await? {
loop {
if n < LOG_MAGIC.len() {
if cur == LOG_MAGIC[n] {
n = n + 1;
break;
}
if n > 0 {
n = 0;
continue;
}
break;
}
match EventVersion::try_from(cur) {
Ok(a) => {
return Ok(Some(a));
}
_ => {
n = 0;
continue;
}
}
}
}
return Ok(None);
}
async fn read_blob_size(&self, api: &mut impl LogApi) -> Result<usize, SerializationError> {
match self {
EventVersion::V2 => match BlobSize::try_from(api.read_u8().await?) {
Ok(BlobSize::U8) => Ok(api.read_u8().await? as usize),
Ok(BlobSize::U16) => Ok(api.read_u16().await? as usize),
Ok(BlobSize::U32) => Ok(api.read_u32().await? as usize),
Ok(BlobSize::U64) => Ok(api.read_u64().await? as usize),
Err(err) => Err(SerializationErrorKind::IO(tokio::io::Error::new(
tokio::io::ErrorKind::Other,
format!("Failed to read data at 0x{:x} - {}", api.offset(), err),
))
.into()),
},
}
}
async fn write_blob_size(
&self,
api: &mut impl LogApi,
val: usize,
) -> Result<(), SerializationError> {
match self {
EventVersion::V2 => {
let blob_size = match val {
_ if val < u8::MAX as usize => BlobSize::U8,
_ if val < u16::MAX as usize => BlobSize::U16,
_ if val < u32::MAX as usize => BlobSize::U32,
_ if val < u64::MAX as usize => BlobSize::U64,
_ => BlobSize::U32,
};
api.write_u8(blob_size.into()).await?;
match blob_size {
BlobSize::U8 => Ok(api.write_u8(val as u8).await?),
BlobSize::U16 => Ok(api.write_u16(val as u16).await?),
BlobSize::U32 => Ok(api.write_u32(val as u32).await?),
BlobSize::U64 => Ok(api.write_u64(val as u64).await?),
}
}
}
}
async fn read_format(
&self,
api: &mut impl LogApi,
) -> Result<SerializationFormat, SerializationError> {
match SerializationFormat::try_from(api.read_u8().await?) {
Ok(a) => Ok(a),
Err(_) => Err(SerializationErrorKind::InvalidSerializationFormat.into()),
}
}
async fn write_format(
&self,
api: &mut impl LogApi,
format: SerializationFormat,
) -> Result<(), SerializationError> {
match self {
EventVersion::V2 => match api.write_u8(format.into()).await {
Ok(_) => Ok(()),
Err(err) => Err(SerializationErrorKind::IO(tokio::io::Error::new(
tokio::io::ErrorKind::Other,
format!("Failed to write data at 0x{:x} - {}", api.offset(), err),
))
.into()),
},
}
}
pub async fn read(api: &mut impl LogApi) -> Result<Option<LogEntry>, SerializationError> {
let offset = api.offset();
let version = match Self::read_version(api).await? {
Some(a) => a,
None => {
return Ok(None);
}
};
let format_meta = version.read_format(api).await?;
let meta_size = version.read_blob_size(api).await?;
let mut meta = vec![0 as u8; meta_size];
api.read_exact(&mut meta[..]).await?;
let format_data = version.read_format(api).await?;
let data_size = version.read_blob_size(api).await?;
let data = if data_size > 0 {
let mut data = vec![0 as u8; data_size];
api.read_exact(&mut data[..]).await?;
Some(data)
} else {
None
};
Ok(Some(LogEntry {
header: LogHeader {
offset,
format: MessageFormat {
meta: format_meta,
data: format_data,
},
},
meta,
data: match data {
Some(a) => LogData::Some(a),
None => LogData::None,
},
}))
}
pub async fn write(
&self,
api: &mut impl LogApi,
meta: &[u8],
data: Option<&[u8]>,
format: MessageFormat,
) -> Result<LogHeader, SerializationError> {
let offset = api.offset();
api.write_exact(&LOG_MAGIC[..]).await?;
api.write_u8((*self).into()).await?;
self.write_format(api, format.meta).await?;
self.write_blob_size(api, meta.len()).await?;
api.write_exact(&meta[..]).await?;
self.write_format(api, format.data).await?;
match data {
Some(data) => {
self.write_blob_size(api, data.len()).await?;
api.write_exact(&data[..]).await?;
}
None => {
self.write_blob_size(api, 0).await?;
}
};
Ok(LogHeader { offset, format })
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/plugin.rs | lib/src/plugin.rs | use std::sync::Arc;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::lint::EventMetadataLinter;
use crate::transform::EventDataTransformer;
use super::crypto::*;
use super::error::*;
use super::event::*;
use super::sink::*;
use super::transaction::ConversationSession;
use super::validator::*;
pub trait EventPlugin
where
Self: EventValidator + EventSink + EventMetadataLinter + EventDataTransformer + Send + Sync,
{
fn rebuild(
&mut self,
headers: &Vec<EventHeader>,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<(), SinkError> {
self.reset();
for header in headers {
match self.feed(header, conversation) {
Ok(_) => {}
Err(err) => {
debug!("feed error: {}", err);
}
}
}
Ok(())
}
fn clone_plugin(&self) -> Box<dyn EventPlugin>;
fn root_keys(&self) -> Vec<PublicSignKey> {
Vec::new()
}
fn set_root_keys(&mut self, _root_keys: &Vec<PublicSignKey>) {}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/protected_async.rs | lib/src/chain/protected_async.rs | use error_chain::bail;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::error::*;
use crate::event::*;
use crate::redo::LogWritable;
use crate::transaction::*;
use fxhash::FxHashSet;
use multimap::MultiMap;
use std::ops::*;
use std::sync::Arc;
use std::sync::RwLock as StdRwLock;
use std::sync::RwLockWriteGuard as StdRwLockWriteGuard;
use std::time::Duration;
use tokio::sync::RwLock;
use crate::meta::*;
use crate::spec::*;
use crate::time::*;
use crate::trust::*;
use super::*;
#[derive(Debug)]
pub(crate) struct ChainProtectedAsync {
pub(crate) chain: ChainOfTrust,
pub(crate) default_format: MessageFormat,
pub(crate) disable_new_roots: bool,
pub(crate) sync_tolerance: Duration,
pub(crate) listeners: MultiMap<MetaCollection, ChainListener>,
pub(crate) is_shutdown: bool,
pub(crate) integrity: TrustMode,
}
impl ChainProtectedAsync {
pub(super) fn process(
&mut self,
mut sync: StdRwLockWriteGuard<ChainProtectedSync>,
headers: Vec<EventHeader>,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<(), ChainCreationError> {
let mut ret = ProcessError::default();
for header in headers.into_iter() {
if let Result::Err(err) = sync.validate_event(&header, conversation) {
ret.validation_errors.push(err);
}
for indexer in sync.indexers.iter_mut() {
if let Err(err) = indexer.feed(&header, conversation) {
ret.sink_errors.push(err);
}
}
for plugin in sync.plugins.iter_mut() {
if let Err(err) = plugin.feed(&header, conversation) {
ret.sink_errors.push(err);
}
}
self.chain.add_history(header);
}
Ok(ret.as_result()?)
}
pub(crate) async fn feed_meta_data(
&mut self,
sync: &Arc<StdRwLock<ChainProtectedSync>>,
meta: Metadata,
) -> Result<(), CommitError> {
let data = EventWeakData {
meta,
data_bytes: MessageBytes::None,
format: MessageFormat {
meta: SerializationFormat::Json,
data: SerializationFormat::Json,
},
};
let evts = vec![data];
self.feed_async_internal(sync, &evts, None).await
}
pub(super) async fn feed_async_internal(
&mut self,
sync: &Arc<StdRwLock<ChainProtectedSync>>,
evts: &Vec<EventWeakData>,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<(), CommitError> {
let mut errors = Vec::new();
let mut validated_evts = Vec::new();
{
let mut sync = sync.write().unwrap();
for evt in evts.iter() {
let header = evt.as_header()?;
#[cfg(feature = "enable_verbose")]
trace!(
"chain::evt[key={}]",
header
.meta
.get_data_key()
.map_or_else(|| "none".to_string(), |h| h.to_string())
);
match sync.validate_event(&header, conversation) {
Err(err) => {
#[cfg(feature = "enable_verbose")]
debug!("chain::feed-validation-err: {}", err);
errors.push(err);
continue;
}
_ => {}
}
for indexer in sync.indexers.iter_mut() {
indexer.feed(&header, conversation)?;
}
for plugin in sync.plugins.iter_mut() {
plugin.feed(&header, conversation)?;
}
validated_evts.push((evt, header));
}
}
for (evt, mut header) in validated_evts.into_iter() {
if self.integrity.is_centralized() {
header.meta.strip_signatures();
header.meta.strip_public_keys();
}
if header.is_empty() == false {
let _lookup = self.chain.redo.write(evt).await?;
self.chain.add_history(header);
}
}
if errors.len() > 0 {
if errors.len() == 1 {
let err = errors.into_iter().next().unwrap();
bail!(CommitErrorKind::ValidationError(err.0));
}
bail!(CommitErrorKind::ValidationError(ValidationErrorKind::Many(
errors
)));
}
Ok(())
}
pub fn range<'a, R>(
&'a self,
range: R,
) -> impl DoubleEndedIterator<Item = (&'a ChainTimestamp, &'a EventHeaderRaw)>
where
R: RangeBounds<ChainTimestamp>,
{
self.chain.timeline.history.range(range)
}
pub fn range_keys<'a, R>(
&'a self,
range: R,
) -> impl DoubleEndedIterator<Item = ChainTimestamp> + 'a
where
R: RangeBounds<ChainTimestamp>,
{
let mut ret = self.range(range).map(|e| e.0).collect::<Vec<_>>();
ret.dedup();
ret.into_iter().map(|a| a.clone())
}
#[allow(dead_code)]
pub fn range_values<'a, R>(
&'a self,
range: R,
) -> impl DoubleEndedIterator<Item = &'a EventHeaderRaw>
where
R: RangeBounds<ChainTimestamp>,
{
self.range(range).map(|e| e.1)
}
pub(crate) async fn notify(lock: Arc<RwLock<ChainProtectedAsync>>, evts: Vec<EventWeakData>) {
// Build a map of event parents that will be used in the BUS notifications
let mut notify_map = MultiMap::new();
for evt in evts {
if let Some(parent) = evt.meta.get_parent() {
notify_map.insert(parent.vec.clone(), evt);
}
}
let mut to_remove = MultiMap::new();
if notify_map.is_empty() == false {
{
// Push the events to all the listeners
let lock = lock.read().await;
for (k, v) in notify_map {
if let Some(targets) = lock.listeners.get_vec(&k) {
for target in targets {
for evt in v.iter() {
match target.sender.send(evt.clone()).await {
Ok(()) => {}
Err(_) => {
to_remove.insert(k.clone(), target.id);
break;
}
}
}
}
}
}
}
// If any listeners have disconnected then remove them
if to_remove.is_empty() == false {
let mut lock = lock.write().await;
for (k, to_remove) in to_remove {
let to_remove = to_remove.into_iter().collect::<FxHashSet<u64>>();
if let Some(targets) = lock.listeners.get_vec_mut(&k) {
targets.retain(|a| to_remove.contains(&a.id) == false);
}
}
}
}
}
pub fn set_integrity_mode(&mut self, mode: TrustMode) {
debug!("switching to {}", mode);
self.integrity = mode;
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/compact.rs | lib/src/chain/compact.rs | use btreemultimap::BTreeMultiMap;
use std::sync::Arc;
use std::sync::RwLock as StdRwLock;
use tokio::sync::RwLock;
#[allow(unused_imports)]
use tracing::{debug, error, info, trace, warn};
use super::*;
use crate::compact::*;
use crate::error::*;
use crate::index::*;
use crate::multi::ChainMultiUser;
use crate::pipe::EventPipe;
use crate::redo::*;
use crate::session::*;
use crate::single::ChainSingleUser;
use crate::spec::*;
use crate::time::*;
use crate::transaction::*;
use crate::trust::*;
impl<'a> Chain {
pub async fn compact(self: &'a Chain) -> Result<(), CompactError> {
Chain::compact_ext(
Arc::clone(&self.inside_async),
Arc::clone(&self.inside_sync),
Arc::clone(&self.pipe),
Arc::clone(&self.time),
)
.await
}
pub(crate) async fn compact_ext(
inside_async: Arc<RwLock<ChainProtectedAsync>>,
inside_sync: Arc<StdRwLock<ChainProtectedSync>>,
pipe: Arc<Box<dyn EventPipe>>,
time: Arc<TimeKeeper>,
) -> Result<(), CompactError> {
// Compacting requires an accure time
time.wait_for_high_accuracy().await;
// compute a cut-off using the current time and the sync tolerance
let cut_off = {
let guard = inside_async.read().await;
let key = guard.chain.key.to_string();
// Compute the minimum cut off which is whatever is recorded in the header
// as otherwise the repeated compaction would reload data
let min_cut_off = guard.chain.redo.read_chain_header()?.cut_off;
// The maximum cut off is to prevent very recent events from being lost
// due to a compaction which creates a hard cut off while events are still
// being streamed
let max_cut_off = time.current_timestamp()?.time_since_epoch_ms
- guard.sync_tolerance.as_millis() as u64;
let max_cut_off = ChainTimestamp::from(max_cut_off);
debug!(
"compacting chain: {} min {} max {}",
key, min_cut_off, max_cut_off
);
// The cut-off can not be higher than the actual history
let mut end = guard.chain.timeline.end();
if end > ChainTimestamp::from(0u64) {
end = end.inc();
}
min_cut_off.max(max_cut_off.min(end))
};
// prepare
let mut new_timeline = ChainTimeline {
history: BTreeMultiMap::new(),
pointers: BinaryTreeIndexer::default(),
compactors: Vec::new(),
};
// create the flip
let mut flip = {
let mut single = ChainSingleUser::new_ext(&inside_async, &inside_sync).await;
// Build the header
let header = ChainHeader { cut_off };
let header_bytes = SerializationFormat::Json.serialize(&header)
.map_err(SerializationError::from)?;
// Now start the flip
let ret = single
.inside_async
.chain
.redo
.begin_flip(header_bytes)
.await?;
single.inside_async.chain.redo.flush().await?;
ret
};
{
let multi = ChainMultiUser::new_ext(&inside_async, &inside_sync, &pipe).await;
let guard_async = multi.inside_async.read().await;
// step0 - zip up the headers with keep status flags
let mut headers = guard_async
.chain
.timeline
.history
.iter()
.filter_map(|a| {
if let Some(header) = a.1.as_header().ok() {
Some((header, false))
} else {
None
}
})
.collect::<Vec<_>>();
let total = headers.len() as u64;
#[cfg(feature = "enable_super_verbose")]
{
debug!("step-p");
headers.iter().for_each(|a| debug!("=> [{}]", a.0.meta));
debug!("step0");
headers
.iter()
.for_each(|a| debug!("[{}]->{}", a.1, a.0.raw.event_hash));
}
// step1 - reset all the compactors
for compactor in &guard_async.chain.timeline.compactors {
if let Some(compactor) = compactor.clone_compactor() {
new_timeline.compactors.push(compactor);
}
}
// step2 - add a compactor that will add all events close to the current time within a particular
// tolerance as multi-consumers could be in need of these events
new_timeline
.compactors
.push(Box::new(CutOffCompactor::new(cut_off)));
// step3 - feed all the events into the compactors so they charged up and ready to make decisions
// (we keep looping until the keep status stops changing which means we have reached equilibrium)
loop {
let mut changed = false;
// We feed the events into the compactors in reverse order
for (header, keep) in headers.iter_mut().rev() {
for compactor in new_timeline.compactors.iter_mut() {
compactor.feed(&header, *keep);
}
}
// Next we update all the keep status flags and detect if the state changed at all
for (header, keep) in headers.iter_mut() {
let test =
crate::compact::compute_relevance(new_timeline.compactors.iter(), header);
if *keep != test {
*keep = test;
changed = true;
}
}
#[cfg(feature = "enable_super_verbose")]
{
debug!("step3");
headers
.iter()
.for_each(|a| debug!("[{}]->{}", a.1, a.0.raw.event_hash));
}
// If nother changed on this run then we have reached equilibrum
if changed == false {
break;
}
}
// step4 - create a fake sync that will be used by the validators
let mut sync = {
let guard_sync = multi.inside_sync.read().unwrap();
ChainProtectedSync {
sniffers: Vec::new(),
services: Vec::new(),
indexers: Vec::new(),
plugins: guard_sync
.plugins
.iter()
.map(|a| a.clone_plugin())
.collect::<Vec<_>>(),
linters: Vec::new(),
validators: guard_sync
.validators
.iter()
.map(|a| a.clone_validator())
.collect::<Vec<_>>(),
transformers: Vec::new(),
default_session: AteSessionUser::default().into(),
integrity: guard_sync.integrity,
}
};
sync.plugins.iter_mut().for_each(|a| a.reset());
// step5 - run all the validators over the events to make sure only a valid
// chain of trust will be stored
let mut conversation = ConversationSession::default();
conversation.weaken_validation = true;
let conversation = Arc::new(conversation);
for (header, keep) in headers.iter_mut().filter(|a| a.1) {
if let Ok(_err) = sync.validate_event(&header, Some(&conversation)) {
for plugin in sync.plugins.iter_mut() {
let _r = plugin.feed(&header, Some(&conversation));
#[cfg(feature = "enable_verbose")]
if let Err(_err) = _r {
debug!("err-while-compacting: {}", _err);
}
}
} else {
*keep = false;
}
}
#[cfg(feature = "enable_super_verbose")]
{
debug!("step5");
headers
.iter()
.for_each(|a| debug!("[{}]->{}", a.1, a.0.raw.event_hash));
}
// write the events out only loading the ones that are actually needed
let how_many_keepers = headers.iter().filter(|a| a.1).count();
debug!(
"compact: kept {} events of {} events for cut-off {}",
how_many_keepers, total, cut_off
);
// step6 - build a list of the events that are actually relevant to a compacted log
for header in headers.into_iter().filter(|a| a.1).map(|a| a.0) {
flip.event_summary.push(header.raw.clone());
let _lookup = flip
.copy_event(&guard_async.chain.redo, header.raw.event_hash)
.await?;
new_timeline.add_history(header);
}
}
// Opening this lock will prevent writes while we are flipping
let mut single = ChainSingleUser::new_ext(&inside_async, &inside_sync).await;
// finish the flips
debug!("compact: finished the flip");
let new_events = single
.inside_async
.chain
.redo
.finish_flip(flip, |_l, h| {
new_timeline.add_history(h);
})
.await?;
// complete the transaction under another lock
{
let mut lock = single.inside_sync.write().unwrap();
let new_events = new_events
.into_iter()
.map(|e| e.as_header())
.collect::<Result<Vec<_>, _>>()?;
// Flip all the indexes
let chain = &mut single.inside_async.chain;
chain.timeline = new_timeline;
debug!("compact: rebuilding indexes");
let conversation = Arc::new(ConversationSession::default());
for indexer in lock.indexers.iter_mut() {
indexer.rebuild(&new_events)?;
}
for plugin in lock.plugins.iter_mut() {
plugin.rebuild(&new_events, Some(&conversation))?;
}
}
// Flush the log again
single.inside_async.chain.flush().await?;
single.inside_async.chain.invalidate_caches();
// success
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/inbox_pipe.rs | lib/src/chain/inbox_pipe.rs | use tokio::sync::broadcast;
use error_chain::bail;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use async_trait::async_trait;
use fxhash::FxHashSet;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use bytes::Bytes;
use tokio::sync::RwLock;
use super::workers::ChainWorkProcessor;
use crate::error::*;
use crate::event::MessageBytes;
use crate::header::PrimaryKey;
use crate::pipe::*;
use crate::transaction::*;
use crate::chain::ChainProtectedAsync;
use crate::crypto::AteHash;
use crate::index::*;
use super::workers::*;
pub(super) struct InboxPipe {
pub(super) inbox: ChainWorkProcessor,
pub(super) decache: broadcast::Sender<Vec<PrimaryKey>>,
pub(super) locks: StdMutex<FxHashSet<PrimaryKey>>,
pub(super) inside_async: Arc<RwLock<ChainProtectedAsync>>,
}
#[async_trait]
impl EventPipe for InboxPipe {
async fn feed(&self, work: ChainWork) -> Result<(), CommitError> {
// Prepare the work and submit it
let decache = work
.trans
.events
.iter()
.filter_map(|a| a.meta.get_data_key())
.collect::<Vec<_>>();
// Submit the work
let ret = self.inbox.process(work).await?;
// Clear all the caches
let _ = self.decache.send(decache);
// Success
Ok(ret)
}
async fn load_many(&self, leafs: Vec<AteHash>) -> Result<Vec<Option<Bytes>>, LoadError> {
let leafs = leafs
.into_iter()
.map(|r| EventLeaf {
record: r,
created: 0,
updated: 0
})
.collect();
let guard = self.inside_async.read().await;
let data = guard.chain.load_many(leafs).await?;
let mut ret = Vec::new();
for l in data {
ret.push(
match l.data.data_bytes {
MessageBytes::Some(a) => Some(a),
MessageBytes::LazySome(_) => {
bail!(LoadErrorKind::MissingData)
},
MessageBytes::None => None,
}
);
}
Ok(ret)
}
async fn prime(&self, records: Vec<(AteHash, Option<Bytes>)>) -> Result<(), CommsError> {
let mut guard = self.inside_async.write().await;
guard.chain.prime(records);
Ok(())
}
#[allow(dead_code)]
async fn try_lock(&self, key: PrimaryKey) -> Result<bool, CommitError> {
let mut guard = self.locks.lock().unwrap();
if guard.contains(&key) {
return Ok(false);
}
guard.insert(key.clone());
Ok(true)
}
#[allow(dead_code)]
fn unlock_local(&self, key: PrimaryKey) -> Result<(), CommitError> {
let mut guard = self.locks.lock().unwrap();
guard.remove(&key);
Ok(())
}
#[allow(dead_code)]
async fn unlock(&self, key: PrimaryKey) -> Result<(), CommitError> {
Ok(self.unlock_local(key)?)
}
fn set_next(&mut self, _next: Arc<Box<dyn EventPipe>>) {}
async fn conversation(&self) -> Option<Arc<ConversationSession>> {
None
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/core.rs | lib/src/chain/core.rs | use std::sync::Mutex as StdMutex;
use std::time::Duration;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use derivative::*;
use crate::error::*;
use crate::comms::Metrics;
use crate::comms::NodeId;
use crate::comms::Throttle;
use crate::transaction::*;
use std::sync::Arc;
use std::sync::RwLock as StdRwLock;
use tokio::sync::broadcast;
use tokio::sync::RwLock;
use crate::conf::ConfAte;
use crate::conf::MeshAddress;
use crate::mesh::BackupMode;
use crate::meta::*;
use crate::multi::*;
use crate::pipe::*;
use crate::prelude::PrimaryKey;
use crate::redo::RedoLog;
use crate::single::*;
use crate::spec::*;
use crate::time::TimeKeeper;
use crate::transaction::TransactionScope;
use crate::trust::ChainHeader;
use crate::trust::ChainKey;
use super::*;
/// Represents the main API to access a specific chain-of-trust
///
/// This object must stay within scope for the duration of its
/// use which has been optimized for infrequent initialization as
/// creating this object will reload the entire chain's metadata
/// into memory.
///
/// The actual data of the chain is stored locally on disk thus
/// huge chains can be stored here however very random access on
/// large chains will result in random access IO on the disk.
///
/// Chains also allow subscribe/publish models to be applied to
/// particular vectors (see the examples for details)
///
#[derive(Derivative, Clone)]
#[derivative(Debug)]
pub struct Chain {
pub(crate) key: ChainKey,
#[allow(dead_code)]
pub(crate) node_id: NodeId,
pub(crate) cfg_ate: ConfAte,
pub(crate) remote: Option<url::Url>,
pub(crate) remote_addr: Option<MeshAddress>,
pub(crate) default_format: MessageFormat,
#[derivative(Debug = "ignore")]
pub(crate) inside_sync: Arc<StdRwLock<ChainProtectedSync>>,
pub(crate) inside_async: Arc<RwLock<ChainProtectedAsync>>,
#[derivative(Debug = "ignore")]
pub(crate) pipe: Arc<Box<dyn EventPipe>>,
pub(crate) time: Arc<TimeKeeper>,
pub(crate) exit: broadcast::Sender<()>,
pub(crate) decache: broadcast::Sender<Vec<PrimaryKey>>,
pub(crate) metrics: Arc<StdMutex<Metrics>>,
pub(crate) throttle: Arc<StdMutex<Throttle>>,
}
impl<'a> Chain {
pub(crate) fn proxy(&mut self, mut proxy: Box<dyn EventPipe>) {
proxy.set_next(Arc::clone(&self.pipe));
let proxy = Arc::new(proxy);
let _ = std::mem::replace(&mut self.pipe, proxy);
}
pub fn key(&'a self) -> &'a ChainKey {
&self.key
}
pub fn remote(&'a self) -> Option<&'a url::Url> {
self.remote.as_ref()
}
pub fn remote_addr(&'a self) -> Option<&'a MeshAddress> {
self.remote_addr.as_ref()
}
pub async fn single(&'a self) -> ChainSingleUser<'a> {
ChainSingleUser::new(self).await
}
pub async fn multi(&'a self) -> ChainMultiUser {
ChainMultiUser::new(self).await
}
pub async fn name(&'a self) -> String {
self.single().await.name()
}
pub fn default_format(&'a self) -> MessageFormat {
self.default_format.clone()
}
pub async fn count(&'a self) -> usize {
self.inside_async.read().await.chain.redo.count()
}
pub async fn flush(&'a self) -> Result<(), tokio::io::Error> {
Ok(self.inside_async.write().await.chain.flush().await?)
}
pub async fn sync(&'a self) -> Result<(), CommitError> {
let timeout = Duration::from_secs(30);
self.sync_ext(timeout).await
}
pub async fn sync_ext(&'a self, timeout: Duration) -> Result<(), CommitError> {
// Create the transaction
let trans = Transaction {
scope: TransactionScope::Full,
transmit: true,
events: Vec::new(),
timeout,
conversation: None,
};
// Feed the transaction into the chain
let pipe = self.pipe.clone();
pipe.feed(ChainWork { trans }).await?;
// Success!
Ok(())
}
pub(crate) async fn get_pending_uploads(&self) -> Vec<MetaDelayedUpload> {
let guard = self.inside_async.read().await;
guard.chain.timeline.pointers.get_pending_uploads()
}
pub fn metrics(&'a self) -> &'a Arc<StdMutex<Metrics>> {
&self.metrics
}
pub fn throttle(&'a self) -> &'a Arc<StdMutex<Throttle>> {
&self.throttle
}
pub async fn shutdown(&self) -> Result<(), CompactError> {
let include_active_files = match self.cfg_ate.backup_mode {
BackupMode::None => {
return Ok(());
}
BackupMode::Restore => {
return Ok(());
}
BackupMode::Rotating => false,
BackupMode::Full => true,
};
let mut single = self.single().await;
let we_are_the_one = if single.inside_async.is_shutdown == false {
single.inside_async.is_shutdown = true;
single
.inside_async
.chain
.redo
.backup(include_active_files)?
.await?;
true
} else {
false
};
drop(single);
if we_are_the_one {
#[cfg(feature = "enable_local_fs")]
if self.cfg_ate.log_path.is_some() && self.cfg_ate.compact_cleanup {
self.compact().await?;
}
}
Ok(())
}
}
impl Drop for Chain {
fn drop(&mut self) {
trace!("drop {}", self.key.to_string());
let _ = self.exit.send(());
}
}
impl RedoLog {
pub(crate) fn read_chain_header(&self) -> Result<ChainHeader, SerializationError> {
let header_bytes = self.header(u32::MAX);
Ok(if header_bytes.len() > 0 {
SerializationFormat::Json.deserialize(header_bytes)
.map_err(SerializationError::from)?
} else {
ChainHeader::default()
})
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/listener.rs | lib/src/chain/listener.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::event::*;
use tokio::sync::mpsc;
#[derive(Debug)]
pub(crate) struct ChainListener {
pub(crate) id: u64,
pub(crate) sender: mpsc::Sender<EventWeakData>,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/mod.rs | lib/src/chain/mod.rs | mod backup;
mod compact;
mod core;
mod inbox_pipe;
mod listener;
mod new;
mod protected_async;
mod protected_sync;
#[cfg(feature = "enable_rotate")]
mod rotate;
mod workers;
pub use self::core::*;
pub use compact::*;
pub(crate) use listener::*;
pub use new::*;
pub(crate) use protected_async::*;
pub(crate) use protected_sync::*;
pub(crate) use workers::*;
pub use crate::trust::ChainKey;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/backup.rs | lib/src/chain/backup.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::error::*;
use super::*;
impl<'a> Chain {
pub async fn backup(&'a self, include_active_files: bool) -> Result<(), SerializationError> {
let delayed_operations = {
let mut single = self.single().await;
single
.inside_async
.chain
.redo
.backup(include_active_files)?
};
delayed_operations.await?;
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/rotate.rs | lib/src/chain/rotate.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::error::*;
use crate::spec::*;
use crate::trust::ChainHeader;
use super::*;
impl<'a> Chain {
#[cfg(feature = "enable_rotate")]
pub async fn rotate(&'a self) -> Result<(), SerializationError> {
let delayed_operations = {
// Switch to single-user mode while we make the rotation
// of the log file - this will effectively freeze all IO
// operations on this datachain while the rotate happens
let mut single = self.single().await;
// Build the header
let header = ChainHeader {
cut_off: single.inside_async.chain.timeline.end(),
};
let header_bytes = SerializationFormat::Json.serialize(&header)?;
// Rotate the log
single.inside_async.chain.redo.rotate(header_bytes).await?;
// If there are any backups then we should run these on any
// of the archive files that are now in a state where backup
// can take place
single.inside_async.chain.redo.backup(false)?
};
delayed_operations.await?;
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/workers.rs | lib/src/chain/workers.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::chain::Chain;
use crate::compact::*;
use crate::engine::TaskEngine;
use crate::error::*;
use crate::pipe::*;
use crate::time::*;
use crate::transaction::TransactionScope;
use crate::transaction::*;
use std::sync::Arc;
use std::sync::RwLock as StdRwLock;
use tokio::select;
use tokio::sync::broadcast;
use tokio::sync::RwLock;
use super::*;
#[derive(Debug, Clone)]
pub(crate) struct ChainWork {
pub(crate) trans: Transaction,
}
pub(crate) struct ChainWorkProcessor {
pub(crate) inside_async: Arc<RwLock<ChainProtectedAsync>>,
pub(crate) inside_sync: Arc<StdRwLock<ChainProtectedSync>>,
pub(crate) compact_tx: CompactNotifications,
}
impl ChainWorkProcessor {
pub(crate) fn new(
inside_async: Arc<RwLock<ChainProtectedAsync>>,
inside_sync: Arc<StdRwLock<ChainProtectedSync>>,
compact_tx: CompactNotifications,
) -> ChainWorkProcessor {
ChainWorkProcessor {
inside_async,
inside_sync,
compact_tx,
}
}
pub(crate) async fn process(&self, work: ChainWork) -> Result<(), CommitError> {
// Check all the sniffers
let notifies = crate::service::callback_events_prepare(
&self.inside_sync.read().unwrap(),
&work.trans.events,
);
let trans = work.trans;
// Convert the events to weak events
// We lock the chain of trust while we update the local chain
let mut lock = self.inside_async.write().await;
// Push the events into the chain of trust and release the lock on it before
// we transmit the result so that there is less lock thrashing
match lock
.feed_async_internal(
&self.inside_sync,
&trans.events,
trans.conversation.as_ref(),
)
.await
{
Ok(_) => {
let log_size = lock.chain.redo.size() as u64;
let _ = self.compact_tx.log_size.send(log_size);
Ok(())
}
Err(err) => Err(err),
}?;
// If the scope requires it then we flush
let late_flush = match trans.scope {
TransactionScope::Full => {
lock.chain.flush().await.unwrap();
false
}
_ => true,
};
// Drop the lock
drop(lock);
{
let inside_async = Arc::clone(&self.inside_async);
TaskEngine::spawn(async move {
ChainProtectedAsync::notify(inside_async, trans.events).await;
});
}
TaskEngine::spawn(async move {
match crate::service::callback_events_notify(notifies).await {
Ok(_) => {}
Err(err) => {
#[cfg(debug_assertions)]
warn!("notify-err - {}", err);
#[cfg(not(debug_assertions))]
debug!("notify-err - {}", err);
}
};
});
// If we have a late flush in play then execute it
if late_flush {
let flush_async = self.inside_async.clone();
let mut lock = flush_async.write().await;
let _ = lock.chain.flush().await;
};
Ok(())
}
}
struct ChainExitNotifier {
exit: broadcast::Sender<()>,
}
impl Drop for ChainExitNotifier {
fn drop(&mut self) {
let _ = self.exit.send(());
}
}
impl<'a> Chain {
pub(super) async fn worker_compactor(
inside_async: Arc<RwLock<ChainProtectedAsync>>,
inside_sync: Arc<StdRwLock<ChainProtectedSync>>,
pipe: Arc<Box<dyn EventPipe>>,
time: Arc<TimeKeeper>,
mut compact_state: CompactState,
mut exit: broadcast::Receiver<()>,
) -> Result<(), CompactError> {
loop {
select! {
a = compact_state.wait_for_compact() => { a?; },
a = exit.recv() => {
a?;
break;
}
}
let inside_async = Arc::clone(&inside_async);
let inside_sync = Arc::clone(&inside_sync);
let pipe = Arc::clone(&pipe);
let time = Arc::clone(&time);
Chain::compact_ext(inside_async, inside_sync, pipe, time).await?;
}
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/protected_sync.rs | lib/src/chain/protected_sync.rs | use error_chain::bail;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::error::*;
use crate::event::*;
use crate::index::*;
use crate::plugin::*;
use crate::transaction::*;
use crate::validator::*;
use std::sync::Arc;
use crate::lint::*;
use crate::service::*;
use crate::session::AteSession;
use crate::spec::*;
use crate::transform::*;
pub(crate) struct ChainProtectedSync {
pub(crate) integrity: TrustMode,
pub(crate) default_session: Box<dyn AteSession>,
pub(crate) sniffers: Vec<ChainSniffer>,
pub(crate) plugins: Vec<Box<dyn EventPlugin>>,
pub(crate) indexers: Vec<Box<dyn EventIndexer>>,
pub(crate) linters: Vec<Box<dyn EventMetadataLinter>>,
pub(crate) transformers: Vec<Box<dyn EventDataTransformer>>,
pub(crate) validators: Vec<Box<dyn EventValidator>>,
pub(crate) services: Vec<Arc<dyn Service>>,
}
impl ChainProtectedSync {
#[allow(dead_code)]
pub(super) fn validate_event(
&self,
header: &EventHeader,
conversation: Option<&Arc<ConversationSession>>,
) -> Result<ValidationResult, ValidationError> {
let mut deny_reason = String::default();
let mut is_deny = false;
let mut is_allow = false;
for validator in self.validators.iter() {
match validator.validate(header, conversation) {
Ok(ValidationResult::Deny) => {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str(
format!("denied by validator({})", validator.validator_name()).as_str(),
);
is_deny = true
}
Ok(ValidationResult::Allow) => is_allow = true,
Ok(ValidationResult::Abstain) => {}
Err(ValidationError(ValidationErrorKind::Denied(reason), _)) => {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str(reason.as_str());
is_deny = true
}
Err(ValidationError(ValidationErrorKind::Detached, _)) => is_deny = true,
Err(ValidationError(ValidationErrorKind::AllAbstained, _)) => {}
Err(ValidationError(ValidationErrorKind::NoSignatures, _)) => {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str("no signatures");
is_deny = true
}
Err(ValidationError(ValidationErrorKind::Many(errors), _)) => {
for err in errors {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str(err.to_string().as_str());
is_deny = true
}
}
Err(err) => {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str(err.to_string().as_str());
is_deny = true
}
}
}
for plugin in self.plugins.iter() {
match plugin.validate(header, conversation) {
Ok(ValidationResult::Deny) => {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str(
format!("denied by validator({})", plugin.validator_name()).as_str(),
);
is_deny = true
}
Ok(ValidationResult::Allow) => is_allow = true,
Ok(ValidationResult::Abstain) => {}
Err(ValidationError(ValidationErrorKind::Denied(reason), _)) => {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str(reason.as_str());
is_deny = true
}
Err(ValidationError(ValidationErrorKind::Detached, _)) => is_deny = true,
Err(ValidationError(ValidationErrorKind::AllAbstained, _)) => {}
Err(ValidationError(ValidationErrorKind::NoSignatures, _)) => {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str("no signatures");
is_deny = true
}
Err(ValidationError(ValidationErrorKind::Many(errors), _)) => {
for err in errors {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str(err.to_string().as_str());
is_deny = true
}
}
Err(err) => {
if deny_reason.is_empty() == false {
deny_reason.push_str(" + ");
};
deny_reason.push_str(err.to_string().as_str());
is_deny = true
}
}
}
if is_deny == true {
bail!(ValidationErrorKind::Denied(deny_reason))
}
if is_allow == false {
bail!(ValidationErrorKind::AllAbstained);
}
Ok(ValidationResult::Allow)
}
pub fn set_integrity_mode(&mut self, mode: TrustMode) {
debug!("switching to {}", mode);
self.integrity = mode;
for val in self.validators.iter_mut() {
val.set_integrity_mode(mode);
}
for val in self.plugins.iter_mut() {
val.set_integrity_mode(mode);
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/chain/new.rs | lib/src/chain/new.rs | #![allow(unused_imports)]
use error_chain::bail;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tracing_futures::Instrument;
use btreemultimap::BTreeMultiMap;
use multimap::MultiMap;
use tokio::sync::broadcast;
use crate::compact::*;
use crate::conf::*;
use crate::error::*;
use crate::index::*;
use crate::transaction::*;
use fxhash::FxHashSet;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use std::sync::RwLock as StdRwLock;
use tokio::sync::RwLock;
use crate::engine::TaskEngine;
use crate::event::EventHeader;
use crate::loader::*;
use crate::pipe::*;
use crate::redo::*;
use crate::spec::SerializationFormat;
use crate::spec::TrustMode;
use crate::time::TimeKeeper;
use crate::trust::*;
use crate::trust::ChainKey;
use super::inbox_pipe::*;
use super::workers::ChainWorkProcessor;
use super::*;
impl<'a> Chain {
#[allow(dead_code)]
pub(crate) async fn new(
builder: ChainBuilder,
key: &ChainKey,
load_integrity: TrustMode,
idle_integrity: TrustMode,
) -> Result<Chain, ChainCreationError> {
Chain::new_ext(
builder,
key.clone(),
None,
true,
load_integrity,
idle_integrity,
)
.await
}
#[allow(dead_code)]
pub async fn new_ext(
builder: ChainBuilder,
key: ChainKey,
extra_loader: Option<Box<dyn Loader>>,
allow_process_errors: bool,
load_integrity: TrustMode,
idle_integrity: TrustMode,
) -> Result<Chain, ChainCreationError> {
debug!("open: {}", key);
// Compute the open flags
#[cfg(feature = "enable_local_fs")]
let flags = OpenFlags {
truncate: builder.truncate,
temporal: builder.temporal,
integrity: load_integrity,
read_only: false,
};
let compact_mode = builder.cfg_ate.compact_mode;
let compact_bootstrap = builder.cfg_ate.compact_bootstrap;
// Create a redo log loader which will listen to all the events as they are
// streamed in and extract the event headers
#[cfg(feature = "enable_local_fs")]
let (loader, mut rx) = RedoLogLoader::new();
// We create a composite loader that includes any user defined loader
let mut composite_loader = Box::new(crate::loader::CompositionLoader::default());
#[cfg(feature = "enable_local_fs")]
composite_loader.loaders.push(loader);
if let Some(a) = extra_loader {
composite_loader.loaders.push(a);
}
// Build the header
let header = ChainHeader::default();
let header_bytes = SerializationFormat::Json.serialize(&header)
.map_err(SerializationError::from)?;
// Create the redo log itself which will open the files and stream in the events
// in a background thread
#[cfg(feature = "enable_local_fs")]
let redo_log = {
let key = key.clone();
let builder = builder.clone();
async move {
RedoLog::open_ext(
&builder.cfg_ate,
&key,
flags,
composite_loader,
header_bytes,
)
.await
}
};
#[cfg(not(feature = "enable_local_fs"))]
let redo_log = { async move { RedoLog::open(header_bytes).await } };
// While the events are streamed in we build a list of all the event headers
// but we strip off the data itself
let process_local = async move {
#[allow(unused_mut)]
let mut headers = Vec::new();
#[cfg(feature = "enable_local_fs")]
while let Some(result) = rx.recv().await {
headers.push(result.header.as_header()?);
}
Result::<Vec<EventHeader>, SerializationError>::Ok(headers)
};
// Join the redo log thread earlier after the events were successfully streamed in
let (redo_log, process_local) = futures::join!(redo_log, process_local);
let headers = process_local?;
let redo_log = redo_log?;
// Construnct the chain-of-trust on top of the redo-log
let chain = ChainOfTrust {
debug_id: fastrand::u64(..),
key: key.clone(),
redo: redo_log,
timeline: ChainTimeline {
history: BTreeMultiMap::new(),
pointers: BinaryTreeIndexer::default(),
compactors: builder.compactors,
},
metrics: Arc::clone(&builder.metrics),
};
// Construct all the protected fields that are behind a synchronous critical section
// that does not wait
let mut inside_sync = ChainProtectedSync {
sniffers: Vec::new(),
services: Vec::new(),
indexers: builder.indexers,
plugins: builder.plugins,
linters: builder.linters,
validators: builder.validators,
transformers: builder.transformers,
default_session: builder.session,
integrity: load_integrity,
};
// Add a tree authority plug if one is in the builder
if let Some(tree) = builder.tree {
inside_sync.plugins.push(Box::new(tree));
}
// Set the integrity mode on all the validators
inside_sync.set_integrity_mode(load_integrity);
// Wrap the sync object
let inside_sync = Arc::new(StdRwLock::new(inside_sync));
// Create an exit watcher
let (exit_tx, _) = broadcast::channel(1);
// The asynchronous critical section protects the chain-of-trust itself and
// will have longer waits on it when there are writes occuring
let mut inside_async = ChainProtectedAsync {
chain,
default_format: builder.cfg_ate.log_format,
disable_new_roots: false,
sync_tolerance: builder.cfg_ate.sync_tolerance,
listeners: MultiMap::new(),
is_shutdown: false,
integrity: load_integrity,
};
// Check all the process events
#[cfg(feature = "enable_verbose")]
for a in headers.iter() {
match a.meta.get_data_key() {
Some(key) => debug!("loaded: {} data {}", a.raw.event_hash, key),
None => debug!("loaded: {}", a.raw.event_hash),
}
}
// Process all the events in the chain-of-trust
let mut conversation = ConversationSession::default();
if let TrustMode::Centralized(_) = load_integrity {
conversation.weaken_validation = true;
}
let conversation = Arc::new(conversation);
if let Err(err) =
inside_async.process(inside_sync.write().unwrap(), headers, Some(&conversation))
{
if allow_process_errors == false {
return Err(err);
}
}
// Now switch to the integrity mode we will use after loading
inside_sync
.write()
.unwrap()
.set_integrity_mode(idle_integrity);
// Create the compaction state (which later we will pass to the compaction thread)
let (compact_tx, compact_rx) =
CompactState::new(compact_mode, inside_async.chain.redo.size() as u64);
// Make the inside async immutable
let inside_async = Arc::new(RwLock::new(inside_async));
// The worker thread processes events that come in
let worker_inside_async = Arc::clone(&inside_async);
let worker_inside_sync = Arc::clone(&inside_sync);
// background thread - receives events and processes them
let processor = ChainWorkProcessor::new(worker_inside_async, worker_inside_sync, compact_tx);
// decache subscription
let (decache_tx, _) = broadcast::channel(1000);
// The inbox pipe intercepts requests to and processes them
let mut pipe: Arc<Box<dyn EventPipe>> = Arc::new(Box::new(InboxPipe {
inbox: processor,
decache: decache_tx.clone(),
inside_async: inside_async.clone(),
locks: StdMutex::new(FxHashSet::default()),
}));
if let Some(second) = builder.pipes {
pipe = Arc::new(Box::new(DuelPipe::new(second, pipe)));
};
// Create the NTP worker thats needed to build the timeline
let tolerance = builder.configured_for.ntp_tolerance();
let time = Arc::new(TimeKeeper::new(&builder.cfg_ate, tolerance).await?);
// Create the chain that will be returned to the caller
let chain = Chain {
key: key.clone(),
node_id: builder.node_id.clone(),
cfg_ate: builder.cfg_ate.clone(),
remote: None,
remote_addr: None,
default_format: builder.cfg_ate.log_format,
inside_sync,
inside_async,
pipe,
time,
exit: exit_tx.clone(),
decache: decache_tx,
metrics: Arc::clone(&builder.metrics),
throttle: Arc::clone(&builder.throttle),
};
// If we are to compact the log on bootstrap then do so
debug!("compact-now: {}", compact_bootstrap);
if compact_bootstrap {
chain.compact().await?;
}
// Start the compactor worker thread on the chain
if builder.cfg_ate.compact_mode != CompactMode::Never {
debug!("compact-mode-on: {}", builder.cfg_ate.compact_mode);
let worker_exit = exit_tx.subscribe();
let worker_inside_async = Arc::clone(&chain.inside_async);
let worker_inside_sync = Arc::clone(&chain.inside_sync);
let worker_pipe = Arc::clone(&chain.pipe);
let time = Arc::clone(&chain.time);
// background thread - periodically compacts the chain into a smaller memory footprint
TaskEngine::spawn(Chain::worker_compactor(
worker_inside_async,
worker_inside_sync,
worker_pipe,
time,
compact_rx,
worker_exit,
));
} else {
debug!("compact-mode-off: {}", builder.cfg_ate.compact_mode);
}
// Create the chain
Ok(chain)
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/notify.rs | lib/src/service/notify.rs | use std::sync::Arc;
use tokio::sync::mpsc;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::error::*;
use crate::header::*;
use super::*;
pub(crate) enum NotifyWho {
Sender(mpsc::Sender<PrimaryKey>),
Service(Arc<dyn Service>),
}
impl std::fmt::Debug for NotifyWho {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "notify-who:sender")
}
}
#[derive(Debug)]
pub(crate) struct Notify {
pub(crate) key: PrimaryKey,
pub(crate) who: NotifyWho,
}
impl Notify {
pub(crate) async fn notify(self) -> Result<(), InvokeError> {
match self.who {
NotifyWho::Sender(sender) => sender.send(self.key).await?,
NotifyWho::Service(service) => service.notify(self.key).await?,
}
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/chain_invoke.rs | lib/src/service/chain_invoke.rs | use error_chain::bail;
use serde::{de::DeserializeOwned, Serialize};
#[allow(unused_imports)]
use std::ops::Deref;
use std::sync::Arc;
use std::time::Duration;
use tokio::select;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::chain::*;
use crate::dio::*;
use crate::meta::*;
use crate::session::*;
use crate::transaction::TransactionScope;
use crate::{error::*, meta::CoreMetadata};
use super::*;
impl Chain {
pub async fn invoke<REQ, RES, ERR>(
self: Arc<Self>,
request: REQ,
) -> Result<Result<RES, ERR>, InvokeError>
where
REQ: Clone + Serialize + DeserializeOwned + Sync + Send + ?Sized,
RES: Serialize + DeserializeOwned + Sync + Send + ?Sized,
ERR: Serialize + DeserializeOwned + Sync + Send + ?Sized,
{
self.invoke_ext(None, request, std::time::Duration::from_secs(30))
.await
}
pub async fn invoke_ext<REQ, RES, ERR>(
self: Arc<Self>,
session: Option<&'_ dyn AteSession>,
request: REQ,
timeout: Duration,
) -> Result<Result<RES, ERR>, InvokeError>
where
REQ: Clone + Serialize + DeserializeOwned + Sync + Send + ?Sized,
RES: Serialize + DeserializeOwned + Sync + Send + ?Sized,
ERR: Serialize + DeserializeOwned + Sync + Send + ?Sized,
{
// If no session was provided then use the empty one
let session_store;
let session = match session {
Some(a) => a,
None => {
session_store = self
.inside_sync
.read()
.unwrap()
.default_session
.clone_session();
session_store.deref()
}
};
// Build the command object
let dio = self.dio_trans(session, TransactionScope::None).await;
let (join_res, join_err) = {
dio.auto_cancel();
let mut cmd = dio.store(request)?;
// Add an encryption key on the command (if the session has one)
if let Some(key) = session
.read_keys(AteSessionKeyCategory::AllKeys)
.into_iter()
.next()
{
cmd.auth_mut().read = ReadOption::from_key(key);
}
if session
.write_keys(AteSessionKeyCategory::AllKeys)
.next()
.is_none()
{
cmd.auth_mut().write = WriteOption::Everyone;
}
// Add the extra metadata about the type so the other side can find it
cmd.add_extra_metadata(CoreMetadata::Type(MetaType {
type_name: std::any::type_name::<REQ>().to_string(),
}))?;
// Sniff out the response object
let cmd_id = cmd.key().clone();
let response_type_name = std::any::type_name::<RES>().to_string();
let error_type_name = std::any::type_name::<ERR>().to_string();
let sniff_res = sniff_for_command_begin(
Arc::downgrade(&self),
Box::new(move |h| {
if let Some(reply) = h.meta.is_reply_to_what() {
if reply == cmd_id {
if let Some(t) = h.meta.get_type_name() {
return t.type_name == response_type_name;
}
}
}
false
}),
);
let sniff_err = sniff_for_command_begin(
Arc::downgrade(&self),
Box::new(move |h| {
if let Some(reply) = h.meta.is_reply_to_what() {
if reply == cmd_id {
if let Some(t) = h.meta.get_type_name() {
return t.type_name == error_type_name;
}
}
}
false
}),
);
// Send our command
dio.commit().await?;
// Wait for the response
let join_res = sniff_for_command_finish(sniff_res);
let join_err = sniff_for_command_finish(sniff_err);
(join_res, join_err)
};
// The caller will wait on the response from the sniff that is looking for a reply object
select! {
key = join_res => {
let key = match key {
Some(a) => a,
None => { bail!(InvokeErrorKind::Aborted); }
};
let ret = dio.load_and_take::<RES>(&key).await?;
if dio.delete(&key).await.is_ok() {
let _ = dio.commit().await;
}
Ok(Ok(ret))
},
key = join_err => {
let key = match key {
Some(a) => a,
None => { bail!(InvokeErrorKind::Aborted); }
};
let ret = dio.load_and_take::<ERR>(&key).await?;
if dio.delete(&key).await.is_ok() {
let _ = dio.commit().await;
}
Ok(Err(ret))
},
_ = crate::engine::sleep(timeout) => {
Err(InvokeErrorKind::Timeout.into())
}
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/tests.rs | lib/src/service/tests.rs | #![cfg(test)]
use serde::{Deserialize, Serialize};
use std::sync::Arc;
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::error::*;
use crate::session::*;
#[derive(Clone, Serialize, Deserialize)]
struct Ping {
msg: String,
}
#[derive(Serialize, Deserialize)]
struct Pong {
msg: String,
}
#[derive(Serialize, Deserialize, Debug)]
struct Noise {
dummy: u64,
}
#[derive(Default)]
struct PingPongTable {}
impl PingPongTable {
async fn process(self: Arc<PingPongTable>, ping: Ping) -> Result<Pong, Noise> {
Ok(Pong { msg: ping.msg })
}
}
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_service() -> Result<(), AteError> {
crate::utils::bootstrap_test_env();
info!("creating test chain");
let mut mock_cfg = crate::conf::tests::mock_test_config();
let (chain, _builder) =
crate::trust::create_test_chain(&mut mock_cfg, "test_chain".to_string(), true, true, None)
.await;
info!("start the service on the chain");
let session = AteSessionUser::new();
chain.add_service(
&session,
Arc::new(PingPongTable::default()),
PingPongTable::process,
);
info!("sending ping");
let pong: Result<Pong, Noise> = chain
.invoke(Ping {
msg: "hi".to_string(),
})
.await?;
let pong = pong.unwrap();
info!("received pong with msg [{}]", pong.msg);
Ok(())
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/service_hook.rs | lib/src/service/service_hook.rs | use async_trait::async_trait;
use bytes::Bytes;
use error_chain::bail;
use fxhash::FxHashSet;
use std::ops::Deref;
use std::sync::{Arc, Weak};
#[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use crate::chain::*;
use crate::dio::row::RowData;
use crate::dio::row::RowHeader;
use crate::engine::TaskEngine;
use crate::header::*;
use crate::meta::*;
use crate::prelude::DioMut;
use crate::prelude::TransactionScope;
use crate::session::*;
use crate::{crypto::AteHash, error::*, event::*, meta::CoreMetadata, spec::MessageFormat};
use super::*;
pub struct ServiceHook {
pub session: Box<dyn AteSession>,
pub scope: TransactionScope,
handler: Arc<dyn ServiceInvoker>,
chain: Weak<Chain>,
}
impl ServiceHook {
pub(crate) fn new(
chain: &Arc<Chain>,
session: Box<dyn AteSession>,
handler: &Arc<dyn ServiceInvoker>,
) -> ServiceHook {
ServiceHook {
chain: Arc::downgrade(chain),
session,
handler: Arc::clone(handler),
scope: TransactionScope::None,
}
}
}
#[async_trait]
impl Service for ServiceHook {
fn filter(&self, evt: &EventWeakData) -> bool {
if let Some(t) = evt.meta.get_type_name() {
return t.type_name == self.handler.request_type_name();
}
false
}
async fn notify(&self, key: PrimaryKey) -> Result<(), InvokeError> {
// Get a reference to the chain
let chain = match self.chain.upgrade() {
Some(a) => a,
None => {
bail!(InvokeErrorKind::Aborted);
}
};
// Build the data access layer
let dio = chain.dio_trans(self.session.deref(), self.scope).await;
dio.auto_cancel();
// Lock the data row
if dio.try_lock(key).await? == false {
debug!("service call skipped - someone else locked it");
return Ok(());
}
// Load the object and lock it (to prevent others processing it)
let mut evt = dio.load_raw(&key).await?;
// Convert the data using the encryption and decryption routines
dio.data_as_overlay(self.session.deref(), &mut evt)?;
let req = match evt.data_bytes {
Some(a) => a,
None => {
bail!(InvokeErrorKind::NoData);
}
};
// Invoke the callback in the service
let ret = self.handler.invoke(req).await?;
// Commit the results - If an error occurs cancel everything and delete the command
if let Err(_) = &ret {
dio.cancel();
}
// We delete the row under a concurrent task to prevent deadlocks
dio.delete(&key).await?;
// Process the results
let reply_ret = match ret {
Ok(res) => {
debug!(
"service [{}] sending OK({})",
self.handler.request_type_name(),
self.handler.response_type_name()
);
self.send_reply(&dio, key, res, self.handler.response_type_name())
}
Err(err) => {
debug!(
"service [{}] sending ERR({})",
self.handler.request_type_name(),
self.handler.error_type_name()
);
self.send_reply(&dio, key, err, self.handler.error_type_name())
}
};
// We commit the transactions that holds the reply message under a concurrent
// thread to prevent deadlocks
TaskEngine::spawn(async move {
let ret = dio.commit().await;
if let Err(err) = ret {
debug!("notify-err - {}", err);
}
});
// If the reply failed to send then return that error - otherwise success!
reply_ret?;
Ok(())
}
}
impl ServiceHook {
fn send_reply(
&self,
dio: &Arc<DioMut>,
req: PrimaryKey,
res: Bytes,
res_type: String,
) -> Result<(), InvokeError> {
let key = PrimaryKey::generate();
let format = self.handler.data_format();
let data = res;
let data_hash = AteHash::from_bytes(&data[..]);
let mut auth = MetaAuthorization::default();
if let Some(key) = self
.session
.read_keys(AteSessionKeyCategory::AllKeys)
.into_iter()
.map(|a| a.clone())
.next()
{
auth.read = ReadOption::from_key(&key);
}
auth.write = WriteOption::Inherit;
let mut extra_meta = Vec::new();
extra_meta.push(CoreMetadata::Type(MetaType {
type_name: res_type.clone(),
}));
extra_meta.push(CoreMetadata::Reply(req));
let mut state = dio.state.lock().unwrap();
state.dirty_header(RowHeader {
key,
parent: None,
auth: auth.clone(),
});
state.dirty_row(RowData {
key,
type_name: res_type,
format: MessageFormat {
data: format,
meta: dio.default_format().meta,
},
data_hash,
data,
collections: FxHashSet::default(),
created: 0,
updated: 0,
extra_meta,
parent: None,
auth,
is_new: true,
});
Ok(())
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/service_handler.rs | lib/src/service/service_handler.rs | use async_trait::async_trait;
use bytes::Bytes;
use serde::{de::DeserializeOwned, Serialize};
use std::future::Future;
use std::marker::PhantomData;
use std::sync::Arc;
use tokio::sync::Mutex;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::error::*;
use crate::spec::SerializationFormat;
#[async_trait]
pub trait ServiceInvoker
where
Self: Send + Sync,
{
async fn invoke(&self, request: Bytes) -> Result<Result<Bytes, Bytes>, SerializationError>;
fn data_format(&self) -> SerializationFormat;
fn request_type_name(&self) -> String;
fn response_type_name(&self) -> String;
fn error_type_name(&self) -> String;
}
pub struct ServiceHandler<CTX, REQ, RES, ERR, C, F>
where
CTX: Send + Sync,
REQ: DeserializeOwned + Send + Sync,
RES: Serialize + Send + Sync,
ERR: Serialize + Send + Sync,
C: Fn(Arc<CTX>, REQ) -> F + Send,
F: Future<Output = Result<RES, ERR>> + Send,
{
context: Arc<CTX>,
callback: Mutex<C>,
_marker1: PhantomData<REQ>,
_marker2: PhantomData<RES>,
_marker3: PhantomData<ERR>,
}
impl<CTX, REQ, RES, ERR, C, F> ServiceHandler<CTX, REQ, RES, ERR, C, F>
where
Self: Sync + Send,
CTX: Send + Sync,
REQ: DeserializeOwned + Send + Sync,
RES: Serialize + Send + Sync,
ERR: Serialize + Send + Sync,
C: Fn(Arc<CTX>, REQ) -> F + Send,
F: Future<Output = Result<RES, ERR>> + Send,
{
pub fn new(context: Arc<CTX>, callback: C) -> Arc<ServiceHandler<CTX, REQ, RES, ERR, C, F>> {
let ret = ServiceHandler {
context,
callback: Mutex::new(callback),
_marker1: PhantomData,
_marker2: PhantomData,
_marker3: PhantomData,
};
Arc::new(ret)
}
}
#[async_trait]
impl<CTX, REQ, RES, ERR, C, F> ServiceInvoker for ServiceHandler<CTX, REQ, RES, ERR, C, F>
where
Self: Sync + Send,
CTX: Send + Sync,
REQ: DeserializeOwned + Send + Sync,
RES: Serialize + Send + Sync,
ERR: Serialize + Send + Sync,
C: Fn(Arc<CTX>, REQ) -> F + Send,
F: Future<Output = Result<RES, ERR>> + Send,
{
async fn invoke(&self, req: Bytes) -> Result<Result<Bytes, Bytes>, SerializationError> {
let format = self.data_format();
let req = format.deserialize_ref::<REQ>(&req[..])
.map_err(SerializationError::from)?;
let ctx = Arc::clone(&self.context);
let ret = {
let callback = self.callback.lock().await;
(callback)(ctx, req)
};
let ret = ret.await;
let ret = match ret {
Ok(res) => Ok(Bytes::from(format.serialize_ref::<RES>(&res)
.map_err(SerializationError::from)?)),
Err(err) => Err(Bytes::from(format.serialize_ref::<ERR>(&err)
.map_err(SerializationError::from)?)),
};
Ok(ret)
}
fn data_format(&self) -> SerializationFormat {
SerializationFormat::Json
}
fn request_type_name(&self) -> String {
std::any::type_name::<REQ>().to_string()
}
fn response_type_name(&self) -> String {
std::any::type_name::<RES>().to_string()
}
fn error_type_name(&self) -> String {
std::any::type_name::<ERR>().to_string()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/service.rs | lib/src/service/service.rs | use async_trait::async_trait;
use serde::{de::DeserializeOwned, Serialize};
use std::future::Future;
use std::sync::Arc;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::chain::Chain;
use crate::error::*;
use crate::event::*;
use crate::header::*;
use crate::service::ServiceHook;
use crate::session::AteSession;
use super::*;
#[async_trait]
pub trait Service
where
Self: Send + Sync,
{
fn filter(&self, evt: &EventWeakData) -> bool;
async fn notify(&self, key: PrimaryKey) -> Result<(), InvokeError>;
}
impl Chain {
pub fn add_service<CTX, REQ, RES, ERR, C, F>(
self: &Arc<Self>,
session: &'_ dyn AteSession,
context: Arc<CTX>,
callback: C,
) -> Arc<ServiceHook>
where
CTX: Send + Sync + 'static,
REQ: DeserializeOwned + Send + Sync + Sized + 'static,
RES: Serialize + Send + Sync + Sized + 'static,
ERR: Serialize + Send + Sync + Sized + 'static,
C: Fn(Arc<CTX>, REQ) -> F + Send + 'static,
F: Future<Output = Result<RES, ERR>> + Send + 'static,
{
let svr = ServiceHandler::new(context, callback);
let svr: Arc<dyn ServiceInvoker> = svr;
self.add_generic_service(session.clone_session(), &svr)
}
pub fn add_generic_service(
self: &Arc<Self>,
session: Box<dyn AteSession>,
handler: &Arc<dyn ServiceInvoker>,
) -> Arc<ServiceHook> {
let ret = Arc::new(ServiceHook::new(self, session, handler));
{
let svr = Arc::clone(&ret);
let svr: Arc<dyn Service> = svr;
let mut guard = self.inside_sync.write().unwrap();
guard.services.push(svr);
}
ret
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/helper.rs | lib/src/service/helper.rs | use std::sync::Arc;
use std::sync::RwLockReadGuard as StdRwLockReadGuard;
use std::sync::Weak;
use tokio::sync::mpsc;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::chain::*;
use crate::header::*;
use crate::{error::*, event::*};
use super::*;
pub(crate) fn callback_events_prepare(
guard: &StdRwLockReadGuard<ChainProtectedSync>,
events: &Vec<EventWeakData>,
) -> Vec<Notify> {
let mut ret = Vec::new();
for sniffer in guard.sniffers.iter() {
if let Some(key) = events
.iter()
.filter_map(|e| match (*sniffer.filter)(e) {
true => e.meta.get_data_key(),
false => None,
})
.next()
{
ret.push(sniffer.convert(key));
}
}
for service in guard.services.iter() {
for key in events
.iter()
.filter(|e| service.filter(&e))
.filter_map(|e| e.meta.get_data_key())
{
ret.push(Notify {
key,
who: NotifyWho::Service(Arc::clone(service)),
});
}
}
ret
}
pub(crate) async fn callback_events_notify(mut notifies: Vec<Notify>) -> Result<(), InvokeError> {
let mut joins = Vec::new();
for notify in notifies.drain(..) {
joins.push(notify.notify());
}
for notify in futures::future::join_all(joins).await {
if let Err(err) = notify {
#[cfg(debug_assertions)]
warn!("notify-err - {}", err);
#[cfg(not(debug_assertions))]
debug!("notify-err - {}", err);
}
}
Ok(())
}
pub(super) struct SniffCommandHandle {
id: u64,
rx: mpsc::Receiver<PrimaryKey>,
chain: Weak<Chain>,
}
pub(super) fn sniff_for_command_begin(
chain: Weak<Chain>,
what: Box<dyn Fn(&EventWeakData) -> bool + Send + Sync>,
) -> SniffCommandHandle {
// Create a sniffer
let id = fastrand::u64(..);
let (tx, rx) = mpsc::channel(1);
let sniffer = ChainSniffer {
id,
filter: what,
notify: tx,
};
// Insert a sniffer under a lock
if let Some(chain) = chain.upgrade() {
let mut guard = chain.inside_sync.write().unwrap();
guard.sniffers.push(sniffer);
}
SniffCommandHandle {
id,
rx,
chain: Weak::clone(&chain),
}
}
pub(super) async fn sniff_for_command_finish(mut handle: SniffCommandHandle) -> Option<PrimaryKey> {
// Now wait for the response
let ret = handle.rx.recv().await;
// Remove the sniffer
if let Some(chain) = handle.chain.upgrade() {
let mut guard = chain.inside_sync.write().unwrap();
guard.sniffers.retain(|s| s.id != handle.id);
}
// Return the result
ret
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/mod.rs | lib/src/service/mod.rs | pub mod chain_invoke;
pub mod chain_sniffer;
pub mod helper;
pub mod notify;
pub mod service;
pub mod service_handler;
pub mod service_hook;
pub mod tests;
pub(crate) use chain_sniffer::*;
pub(crate) use helper::*;
pub(crate) use notify::*;
pub use chain_invoke::*;
pub use service::*;
pub use service_handler::*;
pub use service_hook::*;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/service/chain_sniffer.rs | lib/src/service/chain_sniffer.rs | use tokio::sync::mpsc;
#[allow(unused_imports)]
use tracing::{debug, error, info, warn};
use crate::event::*;
use crate::header::*;
use super::*;
pub(crate) struct ChainSniffer {
pub(crate) id: u64,
pub(crate) filter: Box<dyn Fn(&EventWeakData) -> bool + Send + Sync>,
pub(crate) notify: mpsc::Sender<PrimaryKey>,
}
impl ChainSniffer {
pub(super) fn convert(&self, key: PrimaryKey) -> Notify {
Notify {
key,
who: NotifyWho::Sender(self.notify.clone()),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/commit_error.rs | lib/src/error/commit_error.rs | use error_chain::error_chain;
error_chain! {
types {
CommitError, CommitErrorKind, ResultExt, Result;
}
links {
CommsError(super::CommsError, super::CommsErrorKind);
ValidationError(super::ValidationError, super::ValidationErrorKind);
TransformError(super::TransformError, super::TransformErrorKind);
LintError(super::LintError, super::LintErrorKind);
TimeError(super::TimeError, super::TimeErrorKind);
SinkError(super::SinkError, super::SinkErrorKind);
SerializationError(super::SerializationError, super::SerializationErrorKind);
}
foreign_links {
IO(::tokio::io::Error);
}
errors {
Aborted {
description("the transaction aborted before it could be completed"),
display("the transaction aborted before it could be completed"),
}
Timeout(elapsed: String) {
description("the transaction has timed out"),
display("the transaction has timed out after {}", elapsed),
}
ReadOnly {
description("the chain of trust is currently read only"),
display("the chain of trust is currently read only")
}
LockError(err: super::CommsErrorKind) {
description("failed to lock the data due to an error in communication"),
display("failed to lock the data due to an error in communication - {}", err.to_string()),
}
NewRootsAreDisabled {
description("new root objects are currently not allowed for this chain"),
display("new root objects are currently not allowed for this chain"),
}
PipeError(err: String) {
description("failed to commit the data due to an error receiving the result in the interprocess pipe"),
display("failed to commit the data due to an error receiving the result in the interprocess pipe - {}", err.to_string()),
}
RootError(err: String) {
description("failed to commit the data due to an error at the root server while processing the events"),
display("failed to commit the data due to an error at the root server while processing the events - {}", err.to_string()),
}
}
}
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for CommitError {
fn from(err: tokio::sync::mpsc::error::SendError<T>) -> CommitError {
CommitErrorKind::PipeError(err.to_string()).into()
}
}
impl<T> From<tokio::sync::broadcast::error::SendError<T>> for CommitError {
fn from(err: tokio::sync::broadcast::error::SendError<T>) -> CommitError {
CommitErrorKind::PipeError(err.to_string()).into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/compact_error.rs | lib/src/error/compact_error.rs | use error_chain::error_chain;
use tokio::sync::broadcast;
use tokio::sync::watch;
error_chain! {
types {
CompactError, CompactErrorKind, ResultExt, Result;
}
links {
SinkError(super::SinkError, super::SinkErrorKind);
TimeError(super::TimeError, super::TimeErrorKind);
SerializationError(super::SerializationError, super::SerializationErrorKind);
LoadError(super::LoadError, super::LoadErrorKind);
}
foreign_links {
IO(tokio::io::Error);
}
errors {
WatchError(err: String) {
description("failed to compact the chain due to an error in watch notification"),
display("failed to compact the chain due to an error in watch notification - {}", err),
}
BroadcastError(err: String) {
description("failed to compact the chain due to an error in broadcast notification"),
display("failed to compact the chain due to an error in broadcast notification - {}", err)
}
Aborted {
description("compacting has been aborted")
display("compacting has been aborted")
}
}
}
impl From<watch::error::RecvError> for CompactError {
fn from(err: watch::error::RecvError) -> CompactError {
CompactErrorKind::WatchError(err.to_string()).into()
}
}
impl<T> From<watch::error::SendError<T>> for CompactError
where
T: std::fmt::Debug,
{
fn from(err: watch::error::SendError<T>) -> CompactError {
CompactErrorKind::WatchError(err.to_string()).into()
}
}
impl From<broadcast::error::RecvError> for CompactError {
fn from(err: broadcast::error::RecvError) -> CompactError {
CompactErrorKind::BroadcastError(err.to_string()).into()
}
}
impl<T> From<broadcast::error::SendError<T>> for CompactError
where
T: std::fmt::Debug,
{
fn from(err: broadcast::error::SendError<T>) -> CompactError {
CompactErrorKind::BroadcastError(err.to_string()).into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/lint_error.rs | lib/src/error/lint_error.rs | use error_chain::error_chain;
use crate::crypto::AteHash;
error_chain! {
types {
LintError, LintErrorKind, ResultExt, Result;
}
links {
TrustError(super::TrustError, super::TrustErrorKind);
TimeError(super::TimeError, super::TimeErrorKind);
SerializationError(super::SerializationError, super::SerializationErrorKind);
}
foreign_links {
IO(std::io::Error);
}
errors {
MissingWriteKey(hash: AteHash) {
description("could not find the write public key in the session"),
display("could not find the write public key ({}) in the session", hash.to_string()),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/load_error.rs | lib/src/error/load_error.rs | use error_chain::error_chain;
use rmp_serde::decode::Error as RmpDecodeError;
use rmp_serde::encode::Error as RmpEncodeError;
use crate::crypto::AteHash;
use crate::header::PrimaryKey;
error_chain! {
types {
LoadError, LoadErrorKind, ResultExt, Result;
}
links {
SerializationError(super::SerializationError, super::SerializationErrorKind);
TransformationError(super::TransformError, super::TransformErrorKind);
}
errors {
IO(err: String) {
description("IO error")
display("{}", err)
}
NotFound(key: PrimaryKey) {
description("data object with key could not be found"),
display("data object with key ({}) could not be found", key.as_hex_string()),
}
NoPrimaryKey {
description("entry has no primary could and hence could not be loaded")
display("entry has no primary could and hence could not be loaded")
}
VersionMismatch {
description("entry has an invalid version for this log file")
display("entry has an invalid version for this log file")
}
NotFoundByHash(hash: AteHash) {
description("data object with hash could not be found"),
display("data object with hash ({}) could not be found", hash.to_string()),
}
ObjectStillLocked(key: PrimaryKey) {
description("data object with key is still being edited in the current scope"),
display("data object with key ({}) is still being edited in the current scope", key.as_hex_string()),
}
AlreadyDeleted(key: PrimaryKey) {
description("data object with key has already been deleted"),
display("data object with key ({}) has already been deleted", key.as_hex_string()),
}
Tombstoned(key: PrimaryKey) {
description("data object with key has already been tombstoned"),
display("data object with key ({}) has already been tombstoned", key.as_hex_string()),
}
ChainCreationError(err: String) {
description("chain creation error while attempting to load data object"),
display("chain creation error while attempting to load data object - {}", err),
}
NoRepository {
description("chain has no repository thus could not load foreign object")
display("chain has no repository thus could not load foreign object")
}
MissingData {
description("the data is missing for this record")
display("the data is missing for this record")
}
Disconnected {
description("unable to load record as the client is currently disconnected from the server")
display("unable to load record as the client is currently disconnected from the server")
}
Timeout {
description("timeout while waiting for the data from the server")
display("timeout while waiting for the data from the server")
}
LoadFailed(err: String) {
description("failed to load the data from the server"),
display("failed to load the data from the server - {}", err),
}
CollectionDetached {
description("collection is detached from its parent, it must be attached before it can be used")
display("collection is detached from its parent, it must be attached before it can be used")
}
WeakDio {
description("the dio that created this object has gone out of scope")
display("the dio that created this object has gone out of scope")
}
}
}
impl From<tokio::io::Error> for LoadError {
fn from(err: tokio::io::Error) -> LoadError {
LoadErrorKind::IO(err.to_string()).into()
}
}
impl From<RmpEncodeError> for LoadError {
fn from(err: RmpEncodeError) -> LoadError {
LoadErrorKind::SerializationError(super::SerializationErrorKind::EncodeError(err).into())
.into()
}
}
impl From<RmpDecodeError> for LoadError {
fn from(err: RmpDecodeError) -> LoadError {
LoadErrorKind::SerializationError(super::SerializationErrorKind::DecodeError(err).into())
.into()
}
}
impl From<bincode::Error> for LoadError {
fn from(err: bincode::Error) -> LoadError {
LoadErrorKind::SerializationError(super::SerializationErrorKind::BincodeError(err).into())
.into()
}
}
impl From<super::ChainCreationError> for LoadError {
fn from(err: super::ChainCreationError) -> LoadError {
LoadErrorKind::ChainCreationError(err.to_string()).into()
}
}
impl From<super::ChainCreationErrorKind> for LoadError {
fn from(err: super::ChainCreationErrorKind) -> LoadError {
LoadErrorKind::ChainCreationError(err.to_string()).into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/ate_error.rs | lib/src/error/ate_error.rs | use error_chain::error_chain;
error_chain! {
types {
AteError, AteErrorKind, ResultExt, Result;
}
links {
BusError(super::BusError, super::BusErrorKind);
ChainCreationError(super::ChainCreationError, super::ChainCreationErrorKind);
CommitError(super::CommitError, super::CommitErrorKind);
CommsError(super::CommsError, super::CommsErrorKind);
CompactError(super::CompactError, super::CompactErrorKind);
CryptoError(super::CryptoError, super::CryptoErrorKind);
InvokeError(super::InvokeError, super::InvokeErrorKind);
LintError(super::LintError, super::LintErrorKind);
LoadError(super::LoadError, super::LoadErrorKind);
LockError(super::LockError, super::LockErrorKind);
SerializationError(super::SerializationError, super::SerializationErrorKind);
SinkError(super::SinkError, super::SinkErrorKind);
TimeError(super::TimeError, super::TimeErrorKind);
TransformError(super::TransformError, super::TransformErrorKind);
TrustError(super::TrustError, super::TrustErrorKind);
ValidationError(super::ValidationError, super::ValidationErrorKind);
}
foreign_links {
IO(::tokio::io::Error);
UrlInvalid(::url::ParseError);
ProcessError(super::process_error::ProcessError);
}
errors {
NotImplemented {
description("not implemented")
display("not implemented")
}
ServiceError(err: String) {
description("service error"),
display("service error - {}", err)
}
}
}
impl From<serde_json::Error> for AteError {
fn from(err: serde_json::Error) -> AteError {
AteErrorKind::SerializationError(
super::SerializationErrorKind::SerdeError(err.to_string()).into(),
)
.into()
}
}
impl From<tokio::sync::watch::error::RecvError> for AteError {
fn from(err: tokio::sync::watch::error::RecvError) -> AteError {
AteErrorKind::IO(tokio::io::Error::new(
tokio::io::ErrorKind::Other,
err.to_string(),
))
.into()
}
}
impl<T> From<tokio::sync::watch::error::SendError<T>> for AteError
where
T: std::fmt::Debug,
{
fn from(err: tokio::sync::watch::error::SendError<T>) -> AteError {
AteErrorKind::IO(tokio::io::Error::new(
tokio::io::ErrorKind::Other,
err.to_string(),
))
.into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/transform_error.rs | lib/src/error/transform_error.rs | use error_chain::error_chain;
error_chain! {
types {
TransformError, TransformErrorKind, ResultExt, Result;
}
links {
CryptoError(super::CryptoError, super::CryptoErrorKind);
TrustError(super::TrustError, super::TrustErrorKind);
}
foreign_links {
IO(std::io::Error);
}
errors {
#[cfg(feature = "enable_openssl")]
EncryptionError(stack: openssl::error::ErrorStack) {
description("encryption error while transforming event data"),
display("encryption error while transforming event data - {}", err),
}
MissingData {
description("missing data for this record")
display("missing data for this record")
}
MissingReadKey(hash: String) {
description("missing the read key needed to encrypt/decrypt this data object"),
display("missing the read key ({}) needed to encrypt/decrypt this data object", hash)
}
UnspecifiedReadability {
description("the readability for this data object has not been specified")
display("the readability for this data object has not been specified")
}
}
}
#[cfg(feature = "enable_openssl")]
impl From<openssl::error::ErrorStack> for Error {
fn from(err: openssl::error::ErrorStack) -> Error {
ErrorKind::EncryptionError(err).into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/trust_error.rs | lib/src/error/trust_error.rs | use error_chain::error_chain;
use crate::header::PrimaryKey;
error_chain! {
types {
TrustError, TrustErrorKind, ResultExt, Result;
}
links {
TimeError(super::TimeError, super::TimeErrorKind);
}
errors {
NoAuthorizationWrite(type_code: String, key: PrimaryKey, write: crate::meta::WriteOption) {
description("data object with key could not be written as the current session has no signature key for this authorization"),
display("data object of type ({}) with key ({}) could not be written as the current session has no signature key for this authorization ({})", type_code, key.as_hex_string(), write),
}
NoAuthorizationRead(type_code: String, key: PrimaryKey, read: crate::meta::ReadOption) {
description("data object with key could not be read as the current session has no encryption key for this authorization"),
display("data object of type ({}) with key ({}) could not be read as the current session has no encryption key for this authorization ({})", type_code, key.as_hex_string(), read),
}
OwnedByNobody(type_code: String) {
description("data object can not be modified or deleted as it is owned by nobody"),
display("data object of type ({}) can not be modified or deleted as it is owned by nobody", type_code),
}
NoAuthorizationOrphan {
description("data objects without a primary key has no write authorization")
display("data objects without a primary key has no write authorization")
}
MissingParent(key: PrimaryKey) {
description("data object references a parent object that does not exist"),
display("data object references a parent object that does not exist ({})", key.as_hex_string()),
}
UnspecifiedWritability {
description("the writability of this data object has not been specified")
display("the writability of this data object has not been specified")
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/invoke_error.rs | lib/src/error/invoke_error.rs | use error_chain::error_chain;
error_chain! {
types {
InvokeError, InvokeErrorKind, ResultExt, Result;
}
links {
LoadError(super::LoadError, super::LoadErrorKind);
SerializationError(super::SerializationError, super::SerializationErrorKind);
CommitError(super::CommitError, super::CommitErrorKind);
TransformError(super::TransformError, super::TransformErrorKind);
LockError(super::LockError, super::LockErrorKind);
}
foreign_links {
IO(std::io::Error);
}
errors {
PipeError(err: String) {
description("command failed due to pipe error"),
display("command failed due to pipe error - {}", err)
}
ServiceError(err: String) {
description("command failed due to an error at the service"),
display("command failed due to an error at the service - {}", err)
}
Timeout {
description("command failed due to a timeout"),
display("command failed due to a timeout")
}
Aborted {
description("command failed as it was aborted"),
display("command failed as it was aborted")
}
NoData {
description("command failed as there was no data"),
display("command failed as there was no data")
}
MissingData {
description("command failed as the data is missing"),
display("command failed as the data is missing"),
}
}
}
impl<T> From<tokio::sync::mpsc::error::SendError<T>> for InvokeError {
fn from(err: tokio::sync::mpsc::error::SendError<T>) -> InvokeError {
InvokeErrorKind::PipeError(err.to_string()).into()
}
}
impl From<tokio::time::error::Elapsed> for InvokeError {
fn from(_elapsed: tokio::time::error::Elapsed) -> InvokeError {
InvokeErrorKind::Timeout.into()
}
}
#[cfg(target_family = "wasm")]
impl From<wasmer_bus_time::prelude::Elapsed> for InvokeError {
fn from(_elapsed: wasmer_bus_time::prelude::Elapsed) -> InvokeError {
InvokeErrorKind::Timeout.into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/time_error.rs | lib/src/error/time_error.rs | use chrono::DateTime;
use chrono::Utc;
use error_chain::error_chain;
use std::time::SystemTime;
use std::time::SystemTimeError;
error_chain! {
types {
TimeError, TimeErrorKind, ResultExt, Result;
}
foreign_links {
IO(std::io::Error);
SystemTimeError(SystemTimeError);
}
errors {
BeyondTolerance(tolerance: u32) {
description("the network latency is beyond tolerance to synchronize the clocks"),
display("the network latency is beyond tolerance ({}) to synchronize the clocks", tolerance.to_string()),
}
NoTimestamp {
description("the data object has no timestamp metadata attached to it")
display("the data object has no timestamp metadata attached to it")
}
OutOfBounds(cursor: SystemTime, timestamp: SystemTime) {
description("the network latency is out of bound"),
display("the network latency is out of bounds - cursor:{}, timestamp:{}",
DateTime::<Utc>::from(*cursor).format("%Y-%m-%d %H:%M:%S.%f").to_string(),
DateTime::<Utc>::from(*timestamp).format("%Y-%m-%d %H:%M:%S.%f").to_string())
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/mod.rs | lib/src/error/mod.rs | pub mod ate_error;
pub mod bus_error;
pub mod chain_creation_error;
pub mod commit_error;
pub mod comms_error;
pub mod compact_error;
pub mod invoke_error;
pub mod lint_error;
pub mod load_error;
pub mod lock_error;
pub mod process_error;
pub mod sink_error;
pub mod time_error;
pub mod transform_error;
pub mod trust_error;
pub mod validation_error;
pub use ate_error::AteError;
pub use ate_error::AteErrorKind;
pub use bus_error::BusError;
pub use bus_error::BusErrorKind;
pub use chain_creation_error::ChainCreationError;
pub use chain_creation_error::ChainCreationErrorKind;
pub use commit_error::CommitError;
pub use commit_error::CommitErrorKind;
pub use comms_error::CommsError;
pub use comms_error::CommsErrorKind;
pub use compact_error::CompactError;
pub use compact_error::CompactErrorKind;
pub use ate_crypto::error::CryptoError;
pub use ate_crypto::error::CryptoErrorKind;
pub use invoke_error::InvokeError;
pub use invoke_error::InvokeErrorKind;
pub use lint_error::LintError;
pub use lint_error::LintErrorKind;
pub use load_error::LoadError;
pub use load_error::LoadErrorKind;
pub use lock_error::LockError;
pub use lock_error::LockErrorKind;
pub use process_error::ProcessError;
pub use ate_crypto::error::SerializationError;
pub use ate_crypto::error::SerializationErrorKind;
pub use sink_error::SinkError;
pub use sink_error::SinkErrorKind;
pub use time_error::TimeError;
pub use time_error::TimeErrorKind;
pub use transform_error::TransformError;
pub use transform_error::TransformErrorKind;
pub use trust_error::TrustError;
pub use trust_error::TrustErrorKind;
pub use validation_error::ValidationError;
pub use validation_error::ValidationErrorKind;
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/sink_error.rs | lib/src/error/sink_error.rs | use error_chain::error_chain;
use crate::crypto::AteHash;
error_chain! {
types {
SinkError, SinkErrorKind, ResultExt, Result;
}
links {
TrustError(super::TrustError, super::TrustErrorKind);
}
errors {
MissingPublicKey(hash: AteHash) {
description("the public key for signature could not be found in the chain-of-trust"),
display("the public key ({}) for signature could not be found in the chain-of-trust", hash.to_string()),
}
InvalidSignature(hash: AteHash, err: Option<pqcrypto_traits_wasi::Error>) {
description("failed verification of hash while using public key"),
display("failed verification of hash while using public key ({}) - {}", hash.to_string(), err.map(|a| a.to_string()).unwrap_or_else(|| "unknown reason".to_string()))
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/lock_error.rs | lib/src/error/lock_error.rs | use error_chain::error_chain;
error_chain! {
types {
LockError, LockErrorKind, ResultExt, Result;
}
links {
SerializationError(super::SerializationError, super::SerializationErrorKind);
LintError(super::LintError, super::LintErrorKind);
}
errors {
CommitError(err: String) {
description("failed to lock the data object due to issue committing the event to the pipe"),
display("failed to lock the data object due to issue committing the event to the pipe - {}", err),
}
ReceiveError(err: String) {
description("failed to lock the data object due to an error receiving on the pipe"),
display("failed to lock the data object due to an error receiving on the pipe - {}", err),
}
WeakDio {
description("the dIO that created this object has gone out of scope")
display("the dIO that created this object has gone out of scope")
}
}
}
impl From<super::CommitError> for LockError {
fn from(err: super::CommitError) -> LockError {
LockErrorKind::CommitError(err.to_string()).into()
}
}
impl From<super::CommitErrorKind> for LockError {
fn from(err: super::CommitErrorKind) -> LockError {
LockErrorKind::CommitError(err.to_string()).into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/process_error.rs | lib/src/error/process_error.rs | #[allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use super::*;
#[derive(Debug, Default)]
pub struct ProcessError {
pub sink_errors: Vec<SinkError>,
pub validation_errors: Vec<ValidationError>,
}
impl ProcessError {
pub fn has_errors(&self) -> bool {
if self.sink_errors.is_empty() == false {
return true;
}
if self.validation_errors.is_empty() == false {
return true;
}
false
}
pub fn as_result(self) -> Result<(), ProcessError> {
match self.has_errors() {
true => Err(self),
false => Ok(()),
}
}
}
impl std::fmt::Display for ProcessError {
fn fmt(&self, f: &mut std::fmt::Formatter) -> std::fmt::Result {
let mut err = "Processing error - ".to_string();
for sink in self.sink_errors.iter() {
err = err + &sink.to_string()[..] + " - ";
}
for validation in self.validation_errors.iter() {
err = err + &validation.to_string()[..] + " - ";
}
write!(f, "{}", err)
}
}
impl std::error::Error for ProcessError {}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/comms_error.rs | lib/src/error/comms_error.rs | use error_chain::error_chain;
use rmp_serde::decode::Error as RmpDecodeError;
use serde_json::Error as JsonError;
use tokio::sync::mpsc;
use crate::crypto::KeySize;
error_chain! {
types {
CommsError, CommsErrorKind, ResultExt, Result;
}
links {
SerializationError(super::SerializationError, super::SerializationErrorKind);
ValidationError(super::ValidationError, super::ValidationErrorKind);
LoadError(super::LoadError, super::LoadErrorKind);
}
foreign_links {
IO(::tokio::io::Error);
JoinError(::tokio::task::JoinError);
UrlError(::url::ParseError);
}
errors {
SendError(err: String) {
description("sending error while processing communication"),
display("sending error while processing communication - {}", err),
}
ReceiveError(err: String) {
description("receiving error while processing communication"),
display("receiving error while processing communication - {}", err),
}
MissingCertificate {
description("the server requires wire encryption but you did not supply a certificate"),
display("the server requires wire encryption but you did not supply a certificate"),
}
CertificateTooWeak(needed: KeySize, actual: KeySize) {
description("the server requires strong wire encryption then available in the certificate you supplied"),
display("the server requires strong wire encryption({}) then available in the certificate you supplied({})", needed, actual),
}
ServerCertificateValidation {
description("the server certificate failed the clients validation check"),
display("the server certificate failed the clients validation check"),
}
ServerEncryptionWeak {
description("the server encryption strength is too weak"),
display("the server encryption strength is too weak"),
}
RedirectNotSupported {
description("redirecting to another address is not supported by this process")
display("redirecting to another address is not supported by this process")
}
Disconnected {
description("channel has been disconnected")
display("channel has been disconnected")
}
ReadOnly {
description("the chain is currently read-only")
display("the chain is currently read-only")
}
Timeout {
description("io timeout")
display("io timeout")
}
NoAddress {
description("no address to connect to")
display("no address to connect to")
}
Refused {
description("connection was refused by the destination address")
display("connection was refused by the destination address")
}
ShouldBlock {
description("operation should have blocked but it didnt")
display("operation should have blocked but it didnt")
}
InvalidDomainName {
description("the supplied domain name is not valid")
display("the supplied domain name is not valid")
}
RequiredExplicitNodeId {
description("ate is unable to determine the node_id of this root and thus you must explicily specify it in cfg")
display("ate is unable to determine the node_id of this root and thus you must explicily specify it in cfg")
}
ListenAddressInvalid(addr: String) {
description("could not listen on the address as it is not a valid IPv4/IPv6 address"),
display("could not listen on the address ({}) as it is not a valid IPv4/IPv6 address", addr),
}
NotYetSubscribed {
description("attempted to perform a chain operation on a connection that is not yet subscribed to chain")
}
FatalError(err: String) {
description("error at the root server while processing communication which has terminated the connection"),
display("error at the root server while processing communication which has terminated the connection - {}", err),
}
InternalError(err: String) {
description("internal comms error"),
display("internal comms error - {}", err),
}
WebSocketError(err: String) {
description("web socket error"),
display("web socket error - {}", err),
}
WebSocketInternalError(err: String) {
description("web socket internal error"),
display("web socket internal error - {}", err),
}
UnsupportedProtocolError(proto: String) {
description("unsupported wire protocol"),
display("unsupported wire protocol ({})", proto),
}
}
}
impl From<tokio::time::error::Elapsed> for CommsError {
fn from(_err: tokio::time::error::Elapsed) -> CommsError {
CommsErrorKind::IO(std::io::Error::new(
std::io::ErrorKind::TimedOut,
format!("Timeout while waiting for communication channel").to_string(),
))
.into()
}
}
#[cfg(target_family = "wasm")]
impl From<wasmer_bus_time::prelude::Elapsed> for CommsError {
fn from(_err: wasmer_bus_time::prelude::Elapsed) -> CommsError {
CommsErrorKind::IO(std::io::Error::new(
std::io::ErrorKind::TimedOut,
format!("Timeout while waiting for communication channel").to_string(),
))
.into()
}
}
impl<T> From<mpsc::error::SendError<T>> for CommsError {
fn from(err: mpsc::error::SendError<T>) -> CommsError {
CommsErrorKind::SendError(err.to_string()).into()
}
}
#[cfg(feature = "enable_full")]
impl From<tokio_tungstenite::tungstenite::Error> for CommsError {
fn from(err: tokio_tungstenite::tungstenite::Error) -> CommsError {
CommsErrorKind::WebSocketError(err.to_string()).into()
}
}
#[cfg(feature = "enable_full")]
impl From<tokio_tungstenite::tungstenite::http::uri::InvalidUri> for CommsError {
fn from(err: tokio_tungstenite::tungstenite::http::uri::InvalidUri) -> CommsError {
CommsErrorKind::WebSocketInternalError(format!(
"Failed to establish websocket due to an invalid URI - {}",
err.to_string()
))
.into()
}
}
impl<T> From<tokio::sync::broadcast::error::SendError<T>> for CommsError {
fn from(err: tokio::sync::broadcast::error::SendError<T>) -> CommsError {
CommsErrorKind::SendError(err.to_string()).into()
}
}
impl From<tokio::sync::broadcast::error::RecvError> for CommsError {
fn from(err: tokio::sync::broadcast::error::RecvError) -> CommsError {
CommsErrorKind::ReceiveError(err.to_string()).into()
}
}
impl From<super::CommitError> for CommsError {
fn from(err: super::CommitError) -> CommsError {
match err {
super::CommitError(super::CommitErrorKind::ValidationError(errs), _) => {
CommsErrorKind::ValidationError(errs).into()
}
err => {
CommsErrorKind::InternalError(format!("commit-failed - {}", err.to_string())).into()
}
}
}
}
impl From<super::ChainCreationError> for CommsError {
fn from(err: super::ChainCreationError) -> CommsError {
CommsErrorKind::FatalError(err.to_string()).into()
}
}
impl From<super::ChainCreationErrorKind> for CommsError {
fn from(err: super::ChainCreationErrorKind) -> CommsError {
CommsErrorKind::FatalError(err.to_string()).into()
}
}
impl From<bincode::Error> for CommsError {
fn from(err: bincode::Error) -> CommsError {
CommsErrorKind::SerializationError(super::SerializationErrorKind::BincodeError(err).into())
.into()
}
}
impl From<RmpDecodeError> for CommsError {
fn from(err: RmpDecodeError) -> CommsError {
CommsErrorKind::SerializationError(super::SerializationErrorKind::DecodeError(err).into())
.into()
}
}
impl From<JsonError> for CommsError {
fn from(err: JsonError) -> CommsError {
CommsErrorKind::SerializationError(super::SerializationErrorKind::JsonError(err).into())
.into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/chain_creation_error.rs | lib/src/error/chain_creation_error.rs | use error_chain::error_chain;
error_chain! {
types {
ChainCreationError, ChainCreationErrorKind, ResultExt, Result;
}
links {
CompactError(super::CompactError, super::CompactErrorKind);
TimeError(super::TimeError, super::TimeErrorKind);
CommsError(super::CommsError, super::CommsErrorKind);
SerializationError(super::SerializationError, super::SerializationErrorKind);
InvokeError(super::InvokeError, super::InvokeErrorKind);
LoadError(super::LoadError, super::LoadErrorKind);
}
foreign_links {
IO(::tokio::io::Error);
UrlInvalid(::url::ParseError);
ProcessError(super::ProcessError);
}
errors {
NoRootFoundInConfig {
description("failed to create chain-of-trust as the root node is not found in the configuration settings"),
display("failed to create chain-of-trust as the root node is not found in the configuration settings"),
}
RootRedirect(expected: u32, actual: u32) {
description("failed to create chain-of-trust as the server you connected is not hosting these chains"),
display("failed to create chain-of-trust as the server you connected (node_id={}) is not hosting these chains - instead you must connect to another node (node_id={})", actual, expected),
}
NoRootFoundForDomain(domain: String) {
description("failed to create chain-of-trust as the root node is not found in the domain"),
display("failed to create chain-of-trust as the root node is not found in the domain [{}]", domain),
}
UnsupportedProtocol(proto: String) {
description("failed to create chain-of-trust as the protocol is not supported"),
display("failed to create chain-of-trust as the protocol is not supported ({})", proto),
}
NotSupported {
description("failed to create chain-of-trust as the operation is not supported. possible causes are calling 'open_by_key' on a Registry which only supports the 'open_by_url'."),
display("failed to create chain-of-trust as the operation is not supported. possible causes are calling 'open_by_key' on a Registry which only supports the 'open_by_url'."),
}
NotThisRoot {
description("failed to create chain-of-trust as this is the wrong root node"),
display("failed to create chain-of-trust as this is the wrong root node"),
}
NotImplemented {
description("failed to create chain-of-trust as the method is not implemented"),
display("failed to create chain-of-trust as the method is not implemented"),
}
NoValidDomain(domain: String) {
description("failed to create chain-of-trust as the address does not have a valid domain name"),
display("failed to create chain-of-trust as the address does not have a valid domain name [{}]", domain),
}
InvalidRoute(route: String) {
description("failed to create chain-of-trust as the chain path is not hosted as a route"),
display("failed to create chain-of-trust as the chain path ({}) is not hosted as a route", route),
}
ServerRejected(reason: crate::mesh::FatalTerminate) {
description("failed to create chain-of-trust as the server refused to create the chain"),
display("failed to create chain-of-trust as the server refused to create the chain ({})", reason),
}
#[cfg(feature="enable_dns")]
DnsProtoError(err: String) {
description("failed to create chain-of-trust due to a DNS error"),
display("failed to create chain-of-trust due to a DNS error - {}", err),
}
#[cfg(feature="enable_dns")]
DnsClientError(err: String) {
description("failed to create chain-of-trust due to a DNS error"),
display("failed to create chain-of-trust due to a DNS error - {}", err),
}
InternalError(err: String) {
description("internal error"),
display("{}", err),
}
}
}
#[cfg(feature = "enable_dns")]
impl From<::trust_dns_proto::error::ProtoError> for ChainCreationError {
fn from(err: ::trust_dns_proto::error::ProtoError) -> ChainCreationError {
ChainCreationErrorKind::DnsProtoError(err.to_string()).into()
}
}
#[cfg(feature = "enable_dns")]
impl From<::trust_dns_client::error::ClientError> for ChainCreationError {
fn from(err: ::trust_dns_client::error::ClientError) -> ChainCreationError {
ChainCreationErrorKind::DnsClientError(err.to_string()).into()
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/bus_error.rs | lib/src/error/bus_error.rs | use error_chain::error_chain;
error_chain! {
types {
BusError, BusErrorKind, ResultExt, Result;
}
links {
LoadError(super::LoadError, super::LoadErrorKind);
SerializationError(super::SerializationError, super::SerializationErrorKind);
LockError(super::LockError, super::LockErrorKind);
TransformError(super::TransformError, super::TransformErrorKind);
}
errors {
ReceiveError(err: String) {
description("failed to receive event from bus due to an internal error"),
display("failed to receive event from bus due to an internal error: '{}'", err),
}
ChannelClosed {
description("failed to receive event from bus as the channel is closed"),
display("failed to receive event from bus as the channel is closed"),
}
SaveParentFirst {
description("you must save the parent object before attempting to initiate a bus from this vector"),
display("you must save the parent object before attempting to initiate a bus from this vector"),
}
WeakDio {
description("the dio that created this object has gone out of scope"),
display("the dio that created this object has gone out of scope"),
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/error/validation_error.rs | lib/src/error/validation_error.rs | use error_chain::error_chain;
error_chain! {
types {
ValidationError, ValidationErrorKind, ResultExt, Result;
}
links {
TrustError(super::TrustError, super::TrustErrorKind);
TimeError(super::TimeError, super::TimeErrorKind);
}
errors {
Denied(reason: String) {
description("the data was rejected"),
display("the data was rejected - {}", reason),
}
Many(errors: Vec<ValidationError>) {
description("the data was rejected by one (or more) of the validators"),
display("the data was rejected by {} of the validators", errors.len()),
}
AllAbstained {
description("none of the validators approved this data object event")
display("none of the validators approved this data object event")
}
Detached {
description("the data object event is detached from the chain of trust")
display("the data object event is detached from the chain of trust")
}
NoSignatures {
description("the data object event has no signatures and one is required to store it at this specific location within the chain of trust")
display("the data object event has no signatures and one is required to store it at this specific location within the chain of trust")
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/flow/mod.rs | lib/src/flow/mod.rs | #![allow(unused_imports)]
use async_trait::async_trait;
pub mod basic;
use crate::{crypto::EncryptKey, session::AteSessionUser};
use super::chain::Chain;
use super::chain::ChainKey;
use super::conf::ChainBuilder;
use super::conf::ConfAte;
use super::crypto::PrivateSignKey;
use super::crypto::PublicSignKey;
use super::error::ChainCreationError;
use super::spec::TrustMode;
use crate::crypto::KeySize;
use std::sync::Arc;
pub type MessageOfTheDay = Option<String>;
pub enum OpenAction {
/// The open request will be denied (with the following reason)
Deny { reason: String },
/// The open action has resulted in a chain that can be consumed as a distributed chain
/// (distributed chains can be validated without the need for a central authority as the
/// signatures are cryptographically signed)
DistributedChain { chain: Arc<Chain> },
/// The open action has resulted in a chain that can be consumed as a centralized chain
/// (centralized chains are higher performance as signatures are not needed to verify the
/// integrity of the tree however it requires the clients to trust the integrity checks
/// of the server they are connecting to)
CentralizedChain { chain: Arc<Chain> },
/// The open action has resulted in a private chain that can only be consumed if
/// the caller has a copy of the encryption key
PrivateChain {
chain: Arc<Chain>,
session: AteSessionUser,
},
}
#[async_trait]
pub trait OpenFlow
where
Self: Send + Sync,
{
async fn open(
&self,
builder: ChainBuilder,
key: &ChainKey,
wire_encryption: Option<KeySize>,
) -> Result<OpenAction, ChainCreationError>;
async fn message_of_the_day(
&self,
chain: &Arc<Chain>,
) -> Result<Option<String>, ChainCreationError>;
fn hello_path(&self) -> &str;
}
pub async fn all_persistent_and_centralized() -> Box<basic::OpenStaticBuilder> {
Box::new(basic::OpenStaticBuilder::all_persistent_and_centralized().await)
}
pub async fn all_persistent_and_distributed() -> Box<basic::OpenStaticBuilder> {
Box::new(basic::OpenStaticBuilder::all_persistent_and_distributed().await)
}
pub async fn all_ethereal_distributed() -> Box<basic::OpenStaticBuilder> {
Box::new(basic::OpenStaticBuilder::all_ethereal_distributed().await)
}
pub async fn all_ethereal_centralized() -> Box<basic::OpenStaticBuilder> {
Box::new(basic::OpenStaticBuilder::all_ethereal_centralized().await)
}
pub async fn all_persistent_and_centralized_with_root_key(
root_key: PublicSignKey,
) -> Box<basic::OpenStaticBuilder> {
Box::new(basic::OpenStaticBuilder::all_persistent_and_centralized_with_root_key(root_key).await)
}
pub async fn all_persistent_and_distributed_with_root_key(
root_key: PublicSignKey,
) -> Box<basic::OpenStaticBuilder> {
Box::new(basic::OpenStaticBuilder::all_persistent_and_distributed_with_root_key(root_key).await)
}
pub async fn all_ethereal_centralized_with_root_key(
root_key: PublicSignKey,
) -> Box<basic::OpenStaticBuilder> {
Box::new(basic::OpenStaticBuilder::all_ethereal_centralized_with_root_key(root_key).await)
}
pub async fn all_ethereal_distributed_with_root_key(
root_key: PublicSignKey,
) -> Box<basic::OpenStaticBuilder> {
Box::new(basic::OpenStaticBuilder::all_ethereal_distributed_with_root_key(root_key).await)
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/flow/basic.rs | lib/src/flow/basic.rs | #![allow(unused_imports)]
use async_trait::async_trait;
use std::sync::Arc;
use tracing::{debug, error, info};
use super::OpenAction;
use super::OpenFlow;
use crate::chain::Chain;
use crate::chain::ChainKey;
use crate::conf::ChainBuilder;
use crate::conf::ConfAte;
use crate::crypto::AteHash;
use crate::crypto::EncryptKey;
use crate::crypto::KeySize;
use crate::crypto::PublicSignKey;
use crate::error::*;
use crate::spec::*;
pub struct OpenStaticBuilder {
temporal: bool,
root_key: Option<PublicSignKey>,
centralized_integrity: bool,
}
impl OpenStaticBuilder {
fn new(
temporal: bool,
centralized_integrity: bool,
root_key: Option<PublicSignKey>,
) -> OpenStaticBuilder {
OpenStaticBuilder {
temporal,
centralized_integrity,
root_key,
}
}
pub async fn all_persistent_and_centralized() -> OpenStaticBuilder {
OpenStaticBuilder::new(false, true, None)
}
pub async fn all_persistent_and_distributed() -> OpenStaticBuilder {
OpenStaticBuilder::new(false, false, None)
}
pub async fn all_ethereal_centralized() -> OpenStaticBuilder {
OpenStaticBuilder::new(true, true, None)
}
pub async fn all_ethereal_distributed() -> OpenStaticBuilder {
OpenStaticBuilder::new(true, false, None)
}
pub async fn all_persistent_and_centralized_with_root_key(
root_key: PublicSignKey,
) -> OpenStaticBuilder {
OpenStaticBuilder::new(false, true, Some(root_key))
}
pub async fn all_persistent_and_distributed_with_root_key(
root_key: PublicSignKey,
) -> OpenStaticBuilder {
OpenStaticBuilder::new(false, false, Some(root_key))
}
pub async fn all_ethereal_centralized_with_root_key(
root_key: PublicSignKey,
) -> OpenStaticBuilder {
OpenStaticBuilder::new(true, true, Some(root_key))
}
pub async fn all_ethereal_distributed_with_root_key(
root_key: PublicSignKey,
) -> OpenStaticBuilder {
OpenStaticBuilder::new(true, false, Some(root_key))
}
}
#[async_trait]
impl OpenFlow for OpenStaticBuilder {
fn hello_path(&self) -> &str {
"/"
}
async fn message_of_the_day(
&self,
_chain: &Arc<Chain>,
) -> Result<Option<String>, ChainCreationError> {
Ok(None)
}
async fn open(
&self,
mut builder: ChainBuilder,
key: &ChainKey,
_wire_encryption: Option<KeySize>,
) -> Result<OpenAction, ChainCreationError> {
debug!("open_static: {}", key.to_string());
if let Some(root_key) = &self.root_key {
builder = builder.add_root_public_key(root_key);
}
Ok(match self.centralized_integrity {
true => {
debug!("chain-builder: centralized integrity");
OpenAction::CentralizedChain {
chain: builder.temporal(self.temporal).build().open(&key).await?,
}
}
false => {
debug!("chain-builder: distributed integrity");
OpenAction::DistributedChain {
chain: builder.temporal(self.temporal).build().open(&key).await?,
}
}
})
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/stream.rs | lib/src/comms/stream.rs | use crate::crypto::InitializationVector;
use crate::engine::timeout as tokio_timeout;
use bytes::BytesMut;
use error_chain::bail;
use std::io;
use std::ops::DerefMut;
use std::collections::VecDeque;
use std::fs::File;
use std::pin::Pin;
use std::task::Context;
use std::task::Poll;
use std::net::SocketAddr;
use std::result::Result;
use std::str::FromStr;
use std::sync::Arc;
use std::time::Duration;
use std::io::Error as TError;
use std::io::ErrorKind as TErrorKind;
#[cfg(feature = "enable_full")]
use tokio::net::tcp::OwnedReadHalf;
#[cfg(feature = "enable_full")]
use tokio::net::tcp::OwnedWriteHalf;
#[cfg(feature = "enable_full")]
use tokio::net::TcpStream;
use tokio::sync::mpsc;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
use tokio::io::ReadBuf;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use wasmer_bus_ws::prelude::RecvHalf as WasmRecvHalf;
use wasmer_bus_ws::prelude::SendHalf as WasmSendHalf;
use wasmer_bus_ws::prelude::WebSocket as WasmWebSocket;
use bytes::Bytes;
use crate::comms::PacketData;
use crate::crypto::EncryptKey;
#[cfg(feature = "enable_full")]
use super::helper::setup_tcp_stream;
pub use ate_comms::StreamRx;
pub use ate_comms::StreamTx;
pub use ate_comms::StreamReadable;
pub use ate_comms::StreamWritable;
pub use ate_comms::MessageProtocolVersion;
pub use ate_comms::StreamClient;
pub use ate_comms::StreamSecurity;
#[cfg(feature = "enable_dns")]
pub use ate_comms::Dns;
#[cfg(feature = "enable_server")]
use {
hyper_tungstenite::hyper::upgrade::Upgraded as HyperUpgraded,
hyper_tungstenite::tungstenite::Error as HyperError,
hyper_tungstenite::tungstenite::Message as HyperMessage,
hyper_tungstenite::WebSocketStream as HyperWebSocket,
};
#[cfg(feature = "enable_full")]
use {
tokio::io::{AsyncReadExt, AsyncWriteExt},
tokio_tungstenite::{tungstenite::Message, WebSocketStream},
};
use crate::error::*;
use super::NodeId;
use ate_comms::MessageProtocolApi;
#[derive(Debug, Clone, Copy)]
pub enum StreamProtocol {
Tcp,
WebSocket,
SecureWebSocket,
}
impl std::str::FromStr for StreamProtocol {
type Err = CommsError;
fn from_str(s: &str) -> Result<StreamProtocol, CommsError> {
let ret = match s {
"tcp" => StreamProtocol::Tcp,
"ws" => StreamProtocol::WebSocket,
"wss" => StreamProtocol::SecureWebSocket,
_ => {
bail!(CommsErrorKind::UnsupportedProtocolError(s.to_string()));
}
};
Ok(ret)
}
}
impl StreamProtocol {
pub fn to_scheme(&self) -> String {
let ret = match self {
StreamProtocol::Tcp => "tcp",
StreamProtocol::WebSocket => "ws",
StreamProtocol::SecureWebSocket => "wss",
};
ret.to_string()
}
pub fn to_string(&self) -> String {
self.to_scheme()
}
pub fn default_port(&self) -> u16 {
match self {
StreamProtocol::Tcp => 5000,
StreamProtocol::WebSocket => 80,
StreamProtocol::SecureWebSocket => 443,
}
}
pub fn is_tcp(&self) -> bool {
match self {
StreamProtocol::Tcp => true,
StreamProtocol::WebSocket => false,
StreamProtocol::SecureWebSocket => false,
}
}
pub fn is_web_socket(&self) -> bool {
match self {
StreamProtocol::Tcp => false,
StreamProtocol::WebSocket => true,
StreamProtocol::SecureWebSocket => true,
}
}
}
impl std::fmt::Display for StreamProtocol {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.to_scheme())
}
}
impl StreamProtocol {
pub fn make_url(
&self,
domain: String,
port: u16,
path: String,
) -> Result<url::Url, url::ParseError> {
let scheme = self.to_scheme();
let input = match port {
a if a == self.default_port() => match path.starts_with("/") {
true => format!("{}://{}:{}{}", scheme, domain, port, path),
false => format!("{}://{}:{}/{}", scheme, domain, port, path),
},
_ => match path.starts_with("/") {
true => format!("{}://{}{}", scheme, domain, path),
false => format!("{}://{}/{}", scheme, domain, path),
},
};
url::Url::parse(input.as_str())
}
pub fn parse(url: &url::Url) -> Result<StreamProtocol, CommsError> {
let scheme = url.scheme().to_string().to_lowercase();
StreamProtocol::from_str(scheme.as_str())
}
#[cfg(feature = "enable_full")]
pub async fn upgrade_client_and_split(&self, stream: TcpStream) -> Result<
(
Box<dyn AsyncRead + Send + Sync + Unpin + 'static>,
Box<dyn AsyncWrite + Send + Sync + Unpin + 'static>
), CommsError>
{
// Setup the TCP stream
setup_tcp_stream(&stream)?;
// Convert the stream into rx/tx
match self {
StreamProtocol::Tcp => {
let (rx, tx) = stream.into_split();
Ok((
Box::new(rx),
Box::new(tx)
))
},
wire_protocol if self.is_web_socket() => {
let port = match wire_protocol {
StreamProtocol::SecureWebSocket => 443,
_ => 80
};
let url = StreamProtocol::WebSocket.make_url(
"localhost".to_string(),
port,
"/".to_string(),
)?;
let mut request = tokio_tungstenite::tungstenite::http::Request::new(());
*request.uri_mut() =
tokio_tungstenite::tungstenite::http::Uri::from_str(url.as_str())?;
let (stream, response) = tokio_tungstenite::client_async(request, stream).await?;
if response.status().is_client_error() {
bail!(CommsErrorKind::WebSocketInternalError(format!(
"HTTP error while performing WebSocket handshack - status-code={}",
response.status().as_u16()
)));
}
use futures_util::StreamExt;
let (sink, stream) = stream.split();
Ok((
Box::new(wasmer_bus_ws::ws::RecvHalf::new(stream)),
Box::new(wasmer_bus_ws::ws::SendHalf::new(sink))
))
},
wire_protocol => {
bail!(CommsErrorKind::UnsupportedProtocolError(format!("the protocol isnt supported - {}", wire_protocol)));
}
}
}
#[cfg(feature = "enable_full")]
pub async fn upgrade_server_and_split(&self, stream: TcpStream, timeout: Duration) -> Result<
(
Box<dyn AsyncRead + Send + Sync + Unpin + 'static>,
Box<dyn AsyncWrite + Send + Sync + Unpin + 'static>
), CommsError>
{
// Setup the TCP stream
setup_tcp_stream(&stream)?;
// Convert the stream into rx/tx
match self {
StreamProtocol::Tcp => {
let (rx, tx) = stream.into_split();
Ok((
Box::new(rx),
Box::new(tx)
))
},
StreamProtocol::WebSocket |
StreamProtocol::SecureWebSocket => {
let wait = tokio_tungstenite::accept_async(stream);
let socket = tokio_timeout(timeout, wait).await??;
//use tokio::io::*;
use futures_util::StreamExt;
let (sink, stream) = socket.split();
Ok((
Box::new(wasmer_bus_ws::ws::RecvHalf::new(stream)),
Box::new(wasmer_bus_ws::ws::SendHalf::new(sink))
))
}
}
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/test.rs | lib/src/comms/test.rs | #![allow(unused_imports)]
#[cfg(feature = "enable_server")]
use super::Listener;
use super::MeshConfig;
use crate::comms::Metrics;
use crate::comms::NodeId;
use crate::comms::PacketData;
use crate::comms::PacketWithContext;
#[cfg(feature = "enable_server")]
use crate::comms::ServerProcessor;
use crate::comms::Throttle;
use crate::comms::Tx;
use crate::crypto::{EncryptKey, InitializationVector, PrivateEncryptKey, PublicEncryptKey};
use crate::engine::TaskEngine;
use crate::error::*;
use crate::prelude::*;
use async_trait::async_trait;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::net::SocketAddr;
use std::sync::Arc;
use std::sync::Mutex as StdMutex;
use tokio::sync::broadcast;
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
#[cfg(test)]
#[derive(Serialize, Deserialize, Debug, Clone)]
enum TestMessage {
Noop,
Rejected(Box<TestMessage>),
Ping(String),
Pong(String),
}
#[cfg(test)]
impl Default for TestMessage {
fn default() -> TestMessage {
TestMessage::Noop
}
}
#[derive(Default)]
struct DummyContext {}
#[cfg(all(feature = "enable_server", feature = "enable_client"))]
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_server_client_for_comms_with_tcp() -> Result<(), AteError> {
test_server_client_for_comms(StreamProtocol::Tcp, 4001).await
}
#[cfg(all(feature = "enable_server", feature = "enable_client"))]
#[tokio::main(flavor = "current_thread")]
#[test]
async fn test_server_client_for_comms_with_websocket() -> Result<(), AteError> {
test_server_client_for_comms(StreamProtocol::WebSocket, 4011).await
}
#[cfg(test)]
pub(crate) fn mock_test_mesh(port: u16) -> ConfMesh {
let mut roots = Vec::new();
#[cfg(feature = "enable_dns")]
roots.push(MeshAddress::new(
IpAddr::from_str("127.0.0.1").unwrap(),
port,
));
#[cfg(not(feature = "enable_dns"))]
roots.push(MeshAddress::new("localhost", port));
let remote = url::Url::parse(format!("{}://localhost", Registry::guess_schema(port)).as_str()).unwrap();
let ret = ConfMesh::new("localhost", remote, roots.iter());
ret
}
#[cfg(all(feature = "enable_server", feature = "enable_client"))]
#[cfg(test)]
async fn test_server_client_for_comms(
wire_protocol: StreamProtocol,
port: u16,
) -> Result<(), AteError> {
use crate::comms::helper::InboxProcessor;
async move {
crate::utils::bootstrap_test_env();
let listener;
let wire_format = SerializationFormat::MessagePack;
let cert = PrivateEncryptKey::generate(KeySize::Bit192);
{
// Start the server
info!("starting listen server on 127.0.0.1");
let mut cfg = mock_test_mesh(port);
cfg.wire_protocol = wire_protocol;
cfg.wire_format = wire_format;
cfg.wire_encryption = Some(KeySize::Bit192);
let cfg = MeshConfig::new(cfg)
.listen_on(IpAddr::from_str("127.0.0.1").unwrap(), port)
.listen_cert(cert.clone());
#[derive(Debug, Clone, Default)]
struct Handler {}
#[async_trait]
impl ServerProcessor<TestMessage, DummyContext> for Handler {
async fn process(
&'_ self,
pck: PacketWithContext<TestMessage, DummyContext>,
tx: &'_ mut Tx,
) -> Result<(), CommsError> {
let pck: super::Packet<TestMessage> = pck.packet;
match &pck.msg {
TestMessage::Ping(txt) => {
tx.send_reply_msg(TestMessage::Pong(txt.clone()))
.await
.unwrap();
}
_ => {}
};
Ok(())
}
async fn shutdown(&self, _addr: SocketAddr) {}
}
let (exit_tx, _exit_rx) = broadcast::channel(1);
let server_id = NodeId::generate_server_id(0);
listener =
Listener::new(&cfg, server_id, Arc::new(Handler::default()), exit_tx).await?;
{
let mut guard = listener.lock().unwrap();
guard.add_route("/comm-test")?;
};
};
#[cfg(feature = "enable_dns")]
{
// Start the client
info!("start another client that will connect to the server");
#[derive(Debug, Clone, Default)]
struct Handler {}
#[async_trait]
impl InboxProcessor<TestMessage, ()> for Handler {
async fn process(
&mut self,
pck: PacketWithContext<TestMessage, ()>,
) -> Result<(), CommsError> {
let pck: super::Packet<TestMessage> = pck.packet;
if let TestMessage::Pong(txt) = pck.msg {
assert_eq!("hello", txt.as_str());
} else {
panic!("Wrong message type returned")
}
Ok(())
}
async fn shutdown(&mut self, _addr: SocketAddr) {}
}
let inbox = Handler::default();
let client_id = NodeId::generate_client_id();
let metrics = Arc::new(StdMutex::new(Metrics::default()));
let throttle = Arc::new(StdMutex::new(Throttle::default()));
let (_exit_tx, exit_rx) = broadcast::channel(1);
let mut cfg = mock_test_mesh(port);
cfg.wire_protocol = wire_protocol;
cfg.wire_format = wire_format;
cfg.wire_encryption = Some(KeySize::Bit192);
cfg.certificate_validation =
CertificateValidation::AllowedCertificates(vec![cert.hash()]);
let cfg = MeshConfig::new(cfg).connect_to(MeshAddress {
host: IpAddr::from_str("127.0.0.1").unwrap(),
port,
});
let mut client_tx = super::connect(
&cfg,
"/comm-test".to_string(),
client_id,
inbox,
metrics,
throttle,
exit_rx,
)
.await?;
// We need to test it alot
info!("send lots of hellos");
for _n in 0..1000 {
// Send a ping
let test = "hello".to_string();
client_tx
.send_reply_msg(TestMessage::Ping(test.clone()))
.await
.unwrap();
}
}
Ok(())
}
.await
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/throttle.rs | lib/src/comms/throttle.rs | #[derive(Debug, Clone, Default)]
pub struct Throttle {
pub download_per_second: Option<u64>,
pub upload_per_second: Option<u64>,
pub delete_only: bool,
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/packet.rs | lib/src/comms/packet.rs | #![allow(unused_imports)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use tokio::sync::mpsc;
use crate::comms::*;
use crate::error::*;
use crate::spec::*;
use bytes::Bytes;
use serde::{de::DeserializeOwned, Deserialize, Serialize};
use std::sync::Arc;
#[derive(Debug, Clone)]
pub struct PacketData {
pub bytes: Bytes,
pub wire_format: SerializationFormat,
}
#[derive(Debug)]
pub(crate) struct PacketWithContext<M, C>
where
M: Send + Sync + Clone,
C: Send + Sync,
{
pub packet: Packet<M>,
pub data: PacketData,
#[allow(dead_code)]
pub context: Arc<C>,
#[allow(dead_code)]
pub id: NodeId,
#[allow(dead_code)]
pub peer_id: NodeId,
}
impl<M, C> PacketWithContext<M, C>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
C: Send + Sync,
{
#[allow(dead_code)]
pub(crate) async fn reply(&self, tx: &mut StreamTx, msg: M) -> Result<(), CommsError> {
Ok(Self::reply_at(tx, self.data.wire_format, msg).await?)
}
#[allow(dead_code)]
pub(crate) async fn reply_at(
tx: &mut StreamTx,
format: SerializationFormat,
msg: M,
) -> Result<(), CommsError> {
Ok(PacketData::reply_at(tx, format, msg).await?)
}
}
impl PacketData {
#[allow(dead_code)]
pub(crate) async fn reply<M>(&self, tx: &mut StreamTx, msg: M) -> Result<(), CommsError>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
{
Ok(Self::reply_at(tx, self.wire_format, msg).await?)
}
#[allow(dead_code)]
pub(crate) async fn reply_at<M>(
tx: &mut StreamTx,
wire_format: SerializationFormat,
msg: M,
) -> Result<(), CommsError>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
{
let pck = PacketData {
bytes: Bytes::from(wire_format.serialize(msg)
.map_err(SerializationError::from)?),
wire_format,
};
tx.write(&pck.bytes[..]).await?;
Ok(())
}
}
#[derive(Serialize, Deserialize, Debug, Clone)]
pub(crate) struct Packet<M>
where
M: Send + Sync + Clone,
{
pub msg: M,
}
impl<M> From<M> for Packet<M>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
{
fn from(msg: M) -> Packet<M> {
Packet { msg }
}
}
impl<M> Packet<M>
where
M: Send + Sync + Serialize + DeserializeOwned + Clone,
{
pub(crate) fn to_packet_data(
self,
wire_format: SerializationFormat,
) -> Result<PacketData, CommsError> {
let buf = wire_format.serialize(self.msg)
.map_err(SerializationError::from)?;
Ok(PacketData {
bytes: Bytes::from(buf),
wire_format,
})
}
}
| rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
wasmerio/ate | https://github.com/wasmerio/ate/blob/87635b5b49c4163885ce840f4f1c2f30977f40cc/lib/src/comms/router.rs | lib/src/comms/router.rs | use async_trait::async_trait;
use error_chain::bail;
use std::net::SocketAddr;
use std::ops::DerefMut;
use tokio::sync::Mutex;
use tokio::io::AsyncRead;
use tokio::io::AsyncWrite;
#[cfg(feature = "enable_full")]
use tokio::net::TcpStream;
use std::sync::Arc;
use std::time::Duration;
#[allow(unused_imports, dead_code)]
use tracing::{debug, error, info, instrument, span, trace, warn, Level};
use fxhash::FxHashMap;
use http::*;
use std::result::Result;
use crate::comms::{
StreamRx,
StreamTx,
Upstream,
StreamProtocol,
NodeId,
hello::{
HelloMetadata,
},
key_exchange,
};
#[cfg(feature = "enable_server")]
use crate::comms::{
hello::{
mesh_hello_exchange_receiver
},
};
use crate::spec::SerializationFormat;
use crate::crypto::{
KeySize,
PrivateEncryptKey,
EncryptKey,
};
use crate::error::{
CommsError,
CommsErrorKind
};
#[async_trait]
pub trait StreamRoute
where Self: Send + Sync
{
async fn accepted_web_socket(
&self,
rx: StreamRx,
rx_proto: StreamProtocol,
tx: Upstream,
hello: HelloMetadata,
sock_addr: SocketAddr,
wire_encryption: Option<EncryptKey>,
) -> Result<(), CommsError>;
}
#[async_trait]
pub trait RawStreamRoute
where Self: Send + Sync
{
async fn accepted_raw_web_socket(
&self,
rx: Box<dyn AsyncRead + Send + Sync + Unpin + 'static>,
tx: Box<dyn AsyncWrite + Send + Sync + Unpin + 'static>,
uri: http::Uri,
headers: http::HeaderMap,
sock_addr: SocketAddr,
server_id: NodeId,
) -> Result<(), CommsError>;
}
#[async_trait]
pub trait RawWebRoute
where Self: Send + Sync
{
async fn accepted_raw_post_request(
&self,
uri: http::Uri,
headers: http::HeaderMap,
sock_addr: SocketAddr,
server_id: NodeId,
body: Vec<u8>,
) -> Result<Vec<u8>, (Vec<u8>, StatusCode)>;
async fn accepted_raw_put_request(
&self,
uri: http::Uri,
headers: http::HeaderMap,
sock_addr: SocketAddr,
server_id: NodeId,
body: Vec<u8>,
) -> Result<Vec<u8>, (Vec<u8>, StatusCode)>;
}
#[allow(dead_code)]
pub struct StreamRouter {
wire_format: SerializationFormat,
wire_protocol: StreamProtocol,
min_encryption: Option<KeySize>,
server_cert: Option<PrivateEncryptKey>,
server_id: NodeId,
timeout: Duration,
post_routes: Mutex<FxHashMap<String, Arc<dyn RawWebRoute>>>,
put_routes: Mutex<FxHashMap<String, Arc<dyn RawWebRoute>>>,
raw_routes: Mutex<FxHashMap<String, Arc<dyn RawStreamRoute>>>,
routes: Mutex<FxHashMap<String, Arc<dyn StreamRoute>>>,
default_route: Option<Arc<dyn StreamRoute>>,
}
impl StreamRouter {
pub fn new(format: SerializationFormat, protocol: StreamProtocol, min_encryption: Option<KeySize>, server_cert: Option<PrivateEncryptKey>, server_id: NodeId, timeout: Duration) -> Self {
StreamRouter {
wire_format: format,
wire_protocol: protocol,
min_encryption,
server_cert,
server_id,
timeout,
post_routes: Mutex::new(FxHashMap::default()),
put_routes: Mutex::new(FxHashMap::default()),
raw_routes: Mutex::new(FxHashMap::default()),
routes: Mutex::new(FxHashMap::default()),
default_route: None,
}
}
pub fn set_default_route(&mut self, route: Arc<dyn StreamRoute>) {
self.default_route = Some(route);
}
pub async fn add_socket_route(&mut self, path: &str, route: Arc<dyn StreamRoute>) {
let mut guard = self.routes.lock().await;
guard.insert(path.to_string(), route);
}
pub async fn add_raw_route(&mut self, path: &str, raw_route: Arc<dyn RawStreamRoute>) {
let mut guard = self.raw_routes.lock().await;
guard.insert(path.to_string(), raw_route);
}
pub async fn add_post_route(&mut self, path: &str, web_route: Arc<dyn RawWebRoute>) {
let mut guard = self.post_routes.lock().await;
guard.insert(path.to_string(), web_route);
}
pub async fn add_put_route(&mut self, path: &str, web_route: Arc<dyn RawWebRoute>) {
let mut guard = self.put_routes.lock().await;
guard.insert(path.to_string(), web_route);
}
#[cfg(feature = "enable_server")]
pub async fn try_web_request(
&self,
_body: Vec<u8>,
_sock_addr: SocketAddr,
uri: uri::Uri,
_headers: http::HeaderMap,
) -> Result<Vec<u8>, StatusCode> {
let path = uri.path();
let _route = {
let request_routes = self.post_routes.lock().await;
match request_routes
.iter()
.filter(|(k, _)| path.starts_with(k.as_str()))
.next()
{
Some(r) => r.1.clone(),
None => {
return Err(StatusCode::BAD_REQUEST);
}
}
};
Err(StatusCode::BAD_REQUEST)
}
#[cfg(feature = "enable_server")]
pub async fn accept_socket(
&self,
rx: Box<dyn AsyncRead + Send + Sync + Unpin + 'static>,
tx: Box<dyn AsyncWrite + Send + Sync + Unpin + 'static>,
sock_addr: SocketAddr,
uri: Option<http::Uri>,
headers: Option<http::HeaderMap>
) -> Result<(), CommsError>
{
// Attempt to open it with as a raw stream (if a URI is supplied)
if let (Some(uri), Some(headers)) = (uri, headers)
{
let path = uri.path().to_string();
let raw_routes = self.raw_routes.lock().await;
for (test, raw_route) in raw_routes.iter() {
if path.starts_with(test) {
drop(test);
let route = {
let r = raw_route.clone();
drop(raw_route);
r
};
drop(raw_routes);
// Execute the accept command
route.accepted_raw_web_socket(rx, tx, uri, headers, sock_addr, self.server_id).await?;
return Ok(());
}
}
}
// Say hello
let (mut proto, hello_meta) = mesh_hello_exchange_receiver(
rx,
tx,
self.server_id,
self.min_encryption.clone(),
self.wire_format,
)
.await?;
let wire_encryption = hello_meta.encryption;
let node_id = hello_meta.client_id;
// If wire encryption is required then make sure a certificate of sufficient size was supplied
let ek = match &wire_encryption {
Some(size) => {
match self.server_cert.as_ref() {
None => {
return Err(CommsError::from(CommsErrorKind::MissingCertificate).into());
}
Some(a) if a.size() < *size => {
return Err(CommsError::from(CommsErrorKind::CertificateTooWeak(size.clone(), a.size())).into());
}
Some(server_key) =>
{
// If we are using wire encryption then exchange secrets
let ek = key_exchange::mesh_key_exchange_receiver(proto.deref_mut(), server_key.clone())
.await?;
Some(ek)
}
}
}
None => None
};
let (rx, tx) = proto.split(ek);
let tx = Upstream {
id: node_id,
outbox: tx,
wire_format: self.wire_format,
};
// Look for a registered route for this path
{
let routes = self.routes.lock().await;
for (test, route) in routes.iter() {
if hello_meta.path.starts_with(test) {
drop(test);
let route = {
let r = route.clone();
drop(route);
r
};
drop(routes);
// Execute the accept command
route.accepted_web_socket(rx, self.wire_protocol, tx, hello_meta, sock_addr, ek).await?;
return Ok(());
}
}
}
// Check the default route and execute the accept command
if let Some(route) = &self.default_route {
route.accepted_web_socket(rx, self.wire_protocol, tx, hello_meta, sock_addr, ek).await?;
return Ok(());
}
// Fail as no routes are found
error!(
"There are no routes for this connection path ({})",
hello_meta.path,
);
return Ok(());
}
#[cfg(feature = "enable_server")]
pub async fn post_request(
&self,
body: Vec<u8>,
sock_addr: SocketAddr,
uri: http::Uri,
headers: http::HeaderMap,
) -> Result<Vec<u8>, (Vec<u8>, StatusCode)> {
// Get the path
let path = uri.path();
// Look for a registered route for this path
let routes = self.post_routes.lock().await;
for (test, route) in routes.iter() {
if path.starts_with(test) {
drop(test);
let route = {
let r = route.clone();
drop(route);
r
};
drop(routes);
// Execute the accept command
return route.accepted_raw_post_request(uri, headers, sock_addr, self.server_id, body)
.await;
}
}
// Fail
let msg = format!("Bad Request (No Route)").as_bytes().to_vec();
return Err((msg, StatusCode::BAD_REQUEST));
}
#[cfg(feature = "enable_server")]
pub async fn put_request(
&self,
body: Vec<u8>,
sock_addr: SocketAddr,
uri: http::Uri,
headers: http::HeaderMap,
) -> Result<Vec<u8>, (Vec<u8>, StatusCode)> {
// Get the path
let path = uri.path();
// Look for a registered route for this path
let routes = self.post_routes.lock().await;
for (test, route) in routes.iter() {
if path.starts_with(test) {
drop(test);
let route = {
let r = route.clone();
drop(route);
r
};
drop(routes);
// Execute the accept command
return route.accepted_raw_put_request(uri, headers, sock_addr, self.server_id, body)
.await;
}
}
// Fail
let msg = format!("Bad Request (No Route)").as_bytes().to_vec();
return Err((msg, StatusCode::BAD_REQUEST));
}
} | rust | Apache-2.0 | 87635b5b49c4163885ce840f4f1c2f30977f40cc | 2026-01-04T20:14:33.413949Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.