repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v4/mod.rs
src/v4/mod.rs
//! # DHCPv4 //! //! This module provides types and utility functions for encoding/decoding a DHCPv4 message. //! //! ## Example - constructing messages //! //! ```rust //! # fn main() -> Result<(), Box<dyn std::error::Error>> { //! use dhcproto::{v4, Encodable, Encoder}; //! // arbitrary hardware addr //! let chaddr = vec![ //! 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, //! ]; //! // construct a new Message //! let mut msg = v4::Message::default(); //! msg.set_flags(v4::Flags::default().set_broadcast()) // set broadcast to true //! .set_chaddr(&chaddr) // set chaddr //! .opts_mut() //! .insert(v4::DhcpOption::MessageType(v4::MessageType::Discover)); // set msg type //! //! // set some more options //! msg.opts_mut() //! .insert(v4::DhcpOption::ParameterRequestList(vec![ //! v4::OptionCode::SubnetMask, //! v4::OptionCode::Router, //! v4::OptionCode::DomainNameServer, //! v4::OptionCode::DomainName, //! ])); //! msg.opts_mut() //! .insert(v4::DhcpOption::ClientIdentifier(chaddr)); //! //! // now encode to bytes //! let mut buf = Vec::new(); //! let mut e = Encoder::new(&mut buf); //! msg.encode(&mut e)?; //! //! // buf now has the contents of the encoded DHCP message //! # Ok(()) } //! ``` //! //! ## Example - decoding messages //! //! ```rust //! # fn bootreq() -> Vec<u8> { //! # vec![ //! # 1u8, // op //! # 2, // htype //! # 3, // hlen //! # 4, // ops //! # 5, 6, 7, 8, // xid //! # 9, 10, // secs //! # 11, 12, // flags //! # 13, 14, 15, 16, // ciaddr //! # 17, 18, 19, 20, // yiaddr //! # 21, 22, 23, 24, // siaddr //! # 25, 26, 27, 28, // giaddr //! # 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, // chaddr //! # 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, //! # 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, //! # 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, //! # 0, // sname: "-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk", //! # 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, //! # 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, //! # 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, //! # 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, //! # 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, //! # 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, //! # 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, //! # 109, 0, 0, 0, 0, 0, 0, 0, //! # 0, // file: "mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}m", //! # 99, 130, 83, 99, // magic cookie //! # ] //! # } //! # fn main() -> Result<(), Box<dyn std::error::Error>> { //! use dhcproto::{v4::Message, Decoder, Decodable}; //! let offer = bootreq(); //! let msg = Message::decode(&mut Decoder::new(&offer))?; //! # Ok(()) } //! ``` //! use core::{fmt, net::Ipv4Addr, str::Utf8Error}; use alloc::{string::String, vec::Vec}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; pub mod bulk_query; mod flags; pub mod fqdn; mod htype; mod opcode; mod options; pub mod borrowed; pub mod relay; // re-export submodules from proto::msg pub use self::{flags::*, htype::*, opcode::*, options::*}; pub use crate::{ decoder::{Decodable, Decoder}, encoder::{Encodable, Encoder}, error::*, }; pub const MAGIC: [u8; 4] = [99, 130, 83, 99]; pub const MIN_PACKET_SIZE: usize = 300; /// default dhcpv4 server port pub const SERVER_PORT: u16 = 67; /// default dhcpv4 client port pub const CLIENT_PORT: u16 = 68; /// [Dynamic Host Configuration Protocol](https://tools.ietf.org/html/rfc2131#section-2) /// ///```text /// 0 1 2 3 /// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 /// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /// | op (1) | htype (1) | hlen (1) | hops (1) | /// +---------------+---------------+---------------+---------------+ /// | xid (4) | /// +-------------------------------+-------------------------------+ /// | secs (2) | flags (2) | /// +-------------------------------+-------------------------------+ /// | ciaddr (4) | /// +---------------------------------------------------------------+ /// | yiaddr (4) | /// +---------------------------------------------------------------+ /// | siaddr (4) | /// +---------------------------------------------------------------+ /// | giaddr (4) | /// +---------------------------------------------------------------+ /// | chaddr (16) | /// +---------------------------------------------------------------+ /// | sname (64) | /// +---------------------------------------------------------------+ /// | file (128) | /// +---------------------------------------------------------------+ /// | options (variable) | /// +---------------------------------------------------------------+ /// ``` #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct Message { /// op code / message type opcode: Opcode, /// Hardware address type: <https://tools.ietf.org/html/rfc3232> htype: HType, /// Hardware address length hlen: u8, /// Client sets to zero, optionally used by relay agents when booting via a relay agent. hops: u8, /// Transaction ID, a random number chosen by the client xid: u32, /// seconds elapsed since client began address acquisition or renewal process secs: u16, /// Flags flags: Flags, /// Client IP ciaddr: Ipv4Addr, /// Your IP yiaddr: Ipv4Addr, /// Server IP siaddr: Ipv4Addr, /// Gateway IP giaddr: Ipv4Addr, /// Client hardware address chaddr: [u8; 16], /// Server hostname sname: Option<Vec<u8>>, // File name fname: Option<Vec<u8>>, magic: [u8; 4], opts: DhcpOptions, } impl Default for Message { fn default() -> Self { Self { opcode: Opcode::BootRequest, htype: HType::Eth, hlen: 0, hops: 0, xid: rand::random(), secs: 0, flags: Flags::default(), ciaddr: Ipv4Addr::UNSPECIFIED, yiaddr: Ipv4Addr::UNSPECIFIED, siaddr: Ipv4Addr::UNSPECIFIED, giaddr: Ipv4Addr::UNSPECIFIED, chaddr: [0; 16], sname: None, fname: None, magic: MAGIC, opts: DhcpOptions::default(), } } } impl Message { /// returns a new Message with OpCode set to BootRequest and a new random id /// # Panic /// panics if chaddr is greater len than 16 pub fn new( ciaddr: Ipv4Addr, yiaddr: Ipv4Addr, siaddr: Ipv4Addr, giaddr: Ipv4Addr, chaddr: &[u8], ) -> Self { Self::new_with_id(rand::random(), ciaddr, yiaddr, siaddr, giaddr, chaddr) } /// returns a new Message with OpCode set to BootRequest /// # Panic /// panics if chaddr is greater len than 16 pub fn new_with_id( xid: u32, ciaddr: Ipv4Addr, yiaddr: Ipv4Addr, siaddr: Ipv4Addr, giaddr: Ipv4Addr, chaddr: &[u8], ) -> Self { assert!(chaddr.len() <= 16); // copy our chaddr into static array let mut new_chaddr = [0; 16]; let len = chaddr.len(); new_chaddr[..len].copy_from_slice(chaddr); Self { hlen: len as u8, xid, flags: Flags::default(), ciaddr, yiaddr, siaddr, giaddr, chaddr: new_chaddr, ..Self::default() } } /// Get the message's opcode. /// op code / message type pub fn opcode(&self) -> Opcode { self.opcode } /// Set the message's opcode. /// op code / message type pub fn set_opcode(&mut self, opcode: Opcode) -> &mut Self { self.opcode = opcode; self } /// Get the message's hardware type. pub fn htype(&self) -> HType { self.htype } /// Set the message's hardware type. pub fn set_htype(&mut self, htype: HType) -> &mut Self { self.htype = htype; self } /// Get the message's hardware len (len of chaddr). pub fn hlen(&self) -> u8 { self.hlen } /// Get the message's hops. /// Client sets to zero, optionally used by relay agents when booting via a relay agent. pub fn hops(&self) -> u8 { self.hops } /// Set the message's hops. /// Client sets to zero, optionally used by relay agents when booting via a relay agent. pub fn set_hops(&mut self, hops: u8) -> &mut Self { self.hops = hops; self } /// Get the message's chaddr. pub fn chaddr(&self) -> &[u8] { &self.chaddr[..(self.hlen as usize)] } /// Set the message's chaddr. `chaddr` can only up to 16 bytes in length pub fn set_chaddr(&mut self, chaddr: &[u8]) -> &mut Self { let mut new_chaddr = [0; 16]; self.hlen = chaddr.len() as u8; if chaddr.len() >= 16 { new_chaddr.copy_from_slice(&chaddr[..16]); self.hlen = 16 } else { new_chaddr[..chaddr.len()].copy_from_slice(chaddr); } self.chaddr = new_chaddr; self } /// Get the message's giaddr. /// Gateway IP pub fn giaddr(&self) -> Ipv4Addr { self.giaddr } /// Set the message's giaddr. /// Gateway IP pub fn set_giaddr<I: Into<Ipv4Addr>>(&mut self, giaddr: I) -> &mut Self { self.giaddr = giaddr.into(); self } /// Get the message's siaddr. /// Server IP pub fn siaddr(&self) -> Ipv4Addr { self.siaddr } /// Set the message's siaddr. /// Server IP pub fn set_siaddr<I: Into<Ipv4Addr>>(&mut self, siaddr: I) -> &mut Self { self.siaddr = siaddr.into(); self } /// Get the message's yiaddr. /// Your IP /// In an OFFER this is the ip the server is offering pub fn yiaddr(&self) -> Ipv4Addr { self.yiaddr } /// Set the message's siaddr. /// Your IP pub fn set_yiaddr<I: Into<Ipv4Addr>>(&mut self, yiaddr: I) -> &mut Self { self.yiaddr = yiaddr.into(); self } /// Get the message's ciaddr. /// Client IP pub fn ciaddr(&self) -> Ipv4Addr { self.ciaddr } /// Set the message's siaddr. /// Client IP pub fn set_ciaddr<I: Into<Ipv4Addr>>(&mut self, ciaddr: I) -> &mut Self { self.ciaddr = ciaddr.into(); self } /// clear addrs pub fn clear_addrs(&mut self) -> &mut Self { self.ciaddr = Ipv4Addr::UNSPECIFIED; self.yiaddr = Ipv4Addr::UNSPECIFIED; self.siaddr = Ipv4Addr::UNSPECIFIED; self.giaddr = Ipv4Addr::UNSPECIFIED; self } /// Get the message's flags. pub fn flags(&self) -> Flags { self.flags } /// Set the message's flags. pub fn set_flags(&mut self, flags: Flags) -> &mut Self { self.flags = flags; self } /// Get the message's secs. pub fn secs(&self) -> u16 { self.secs } /// Set the message's secs. pub fn set_secs(&mut self, secs: u16) -> &mut Self { self.secs = secs; self } /// Get the message's xid. /// Transaction ID, a random number chosen by the client pub fn xid(&self) -> u32 { self.xid } /// Set the message's xid. /// Transaction ID, a random number chosen by the client pub fn set_xid(&mut self, xid: u32) -> &mut Self { self.xid = xid; self } /// Get a reference to the message's fname. No particular encoding is enforced. pub fn fname(&self) -> Option<&[u8]> { self.fname.as_deref() } /// Clear the `fname` header field. pub fn clear_fname(&mut self) { self.fname = None; } /// Get a reference to the message's fname, UTF-8 encoded pub fn fname_str(&self) -> Option<Result<&str, Utf8Error>> { self.fname().map(core::str::from_utf8) } /// Set the message's fname using a UTF-8 string /// # Panic /// panics if file is greater than 128 bytes long pub fn set_fname_str<S: AsRef<str>>(&mut self, file: S) -> &mut Self { let file = file.as_ref().as_bytes(); assert!(file.len() <= 128); self.fname = Some(file.to_vec()); self } /// Set the message's fname. No particular encoding is enforced. /// # Panic /// panics if file is greater than 128 bytes long pub fn set_fname(&mut self, file: &[u8]) -> &mut Self { assert!(file.len() <= 128); self.fname = Some(file.to_vec()); self } /// Get a reference to the message's sname. No particular encoding is enforced. pub fn sname(&self) -> Option<&[u8]> { self.sname.as_deref() } /// Clear the `sname` header field. pub fn clear_sname(&mut self) { self.sname = None; } /// Get a reference to the message's sname as a UTF-8 encoded string. pub fn sname_str(&self) -> Option<Result<&str, Utf8Error>> { self.sname().map(core::str::from_utf8) } /// Set the message's sname. No particular encoding is enforced. /// # Panic /// panics will if sname is greater than 64 bytes long pub fn set_sname(&mut self, sname: &[u8]) -> &mut Self { assert!(sname.len() <= 64); self.sname = Some(sname.to_vec()); self } /// Set the message's sname using a UTF-8 string /// # Panic /// panics will if sname is greater than 64 bytes long pub fn set_sname_str<S: AsRef<str>>(&mut self, sname: S) -> &mut Self { let sname = sname.as_ref().as_bytes(); assert!(sname.len() <= 64); self.sname = Some(sname.to_vec()); self } /// Get a reference to the message's opts. pub fn opts(&self) -> &DhcpOptions { &self.opts } /// Set the DHCP options pub fn set_opts(&mut self, opts: DhcpOptions) -> &mut Self { self.opts = opts; self } /// Get a mutable reference to the message's options. pub fn opts_mut(&mut self) -> &mut DhcpOptions { &mut self.opts } } impl Decodable for Message { fn decode(decoder: &mut Decoder<'_>) -> DecodeResult<Self> { Ok(Message { opcode: Opcode::decode(decoder)?, htype: decoder.read_u8()?.into(), hlen: decoder.read_u8()?, hops: decoder.read_u8()?, xid: decoder.read_u32()?, secs: decoder.read_u16()?, flags: decoder.read_u16()?.into(), ciaddr: decoder.read_u32()?.into(), yiaddr: decoder.read_u32()?.into(), siaddr: decoder.read_u32()?.into(), giaddr: decoder.read_u32()?.into(), chaddr: decoder.read::<16>()?, sname: decoder.read_nul_bytes::<64>()?, fname: decoder.read_nul_bytes::<128>()?, // TODO: check magic bytes against expected? magic: decoder.read::<4>()?, opts: DhcpOptions::decode(decoder)?, }) } } impl Encodable for Message { fn encode(&self, e: &mut Encoder<'_>) -> EncodeResult<()> { self.opcode.encode(e)?; self.htype.encode(e)?; e.write_u8(self.hlen)?; e.write_u8(self.hops)?; e.write_u32(self.xid)?; e.write_u16(self.secs)?; e.write_u16(self.flags.into())?; e.write_u32(self.ciaddr.into())?; e.write_u32(self.yiaddr.into())?; e.write_u32(self.siaddr.into())?; e.write_u32(self.giaddr.into())?; e.write_slice(&self.chaddr[..])?; e.write_fill(&self.sname, 64)?; e.write_fill(&self.fname, 128)?; e.write(self.magic)?; self.opts.encode(e)?; Ok(()) } } impl fmt::Display for Message { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Message") .field("xid", &self.xid()) .field("broadcast_flag", &self.flags().broadcast()) .field("ciaddr", &self.ciaddr()) .field("yiaddr", &self.yiaddr()) .field("siaddr", &self.siaddr()) .field("giaddr", &self.giaddr()) .field( "chaddr", &bytes_to_hex(self.chaddr()) .chars() .enumerate() .flat_map(|(i, c)| { if i != 0 && i % 2 == 0 { Some(':') } else { None } .into_iter() .chain(core::iter::once(c)) }) .collect::<String>(), ) .field( "opts", &self.opts().iter().map(|(_, v)| v).collect::<Vec<_>>(), ) .finish() } } fn bytes_to_hex(bytes: &[u8]) -> String { let mut ret = String::with_capacity(bytes.len() * 2); for b in bytes { ret.push_str(&alloc::format!("{:02x}", b)); } ret } #[cfg(test)] mod tests { use alloc::boxed::Box; use super::*; type Result<T> = core::result::Result<T, Box<dyn core::error::Error>>; fn decode_ipv4(input: Vec<u8>, expected: MessageType) -> Result<()> { // decode let msg = Message::decode(&mut Decoder::new(&input))?; dbg!(&msg); assert_eq!(msg.opts().msg_type().unwrap(), expected); // now encode let mut buf = Vec::new(); let mut e = Encoder::new(&mut buf); msg.encode(&mut e)?; println!("{buf:?}"); println!("{input:?}"); // decode again let res = Message::decode(&mut Decoder::new(&buf))?; // check Messages are equal after decoding/encoding assert_eq!(msg, res); Ok(()) } #[test] fn test_hex() { let data: &[u8] = &[0xDE, 0xAD, 0xBE, 0xEF]; let hex = bytes_to_hex(data); assert_eq!(&hex, "deadbeef"); } #[test] fn decode_offer() -> Result<()> { decode_ipv4(offer(), MessageType::Offer)?; Ok(()) } #[test] fn decode_discover() -> Result<()> { decode_ipv4(discover(), MessageType::Discover)?; Ok(()) } #[test] fn decode_offer_two() -> Result<()> { decode_ipv4(other_offer(), MessageType::Offer)?; Ok(()) } #[test] fn decode_bootreq() -> Result<()> { let offer = bootreq(); let msg = Message::decode(&mut Decoder::new(&offer))?; println!("{msg:?}"); // now encode let mut buf = Vec::new(); let mut e = Encoder::new(&mut buf); msg.encode(&mut e)?; assert_eq!(buf, bootreq()); Ok(()) } #[test] fn test_set_chaddr() -> Result<()> { let mut msg = Message::new( Ipv4Addr::UNSPECIFIED, Ipv4Addr::UNSPECIFIED, Ipv4Addr::UNSPECIFIED, Ipv4Addr::UNSPECIFIED, &[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15], ); msg.set_chaddr(&[0, 1, 2, 3, 4, 5]); assert_eq!(msg.chaddr().len(), 6); msg.set_chaddr(&[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, ]); assert_eq!(msg.chaddr().len(), 16); Ok(()) } #[cfg(feature = "serde")] #[test] fn test_json() -> Result<()> { let msg = Message::decode(&mut Decoder::new(&offer()))?; let s = serde_json::to_string_pretty(&msg)?; println!("{s}"); let other = serde_json::from_str(&s)?; assert_eq!(msg, other); Ok(()) } fn offer() -> Vec<u8> { vec![ 0x02, 0x01, 0x06, 0x00, 0x00, 0x00, 0x15, 0x5c, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, 0x0a, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x02, 0x36, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x33, 0x04, 0x00, 0x00, 0x00, 0x3c, 0x3a, 0x04, 0x00, 0x00, 0x00, 0x1e, 0x3b, 0x04, 0x00, 0x00, 0x00, 0x34, 0x01, 0x04, 0xff, 0xff, 0xff, 0x00, 0x03, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x06, 0x08, 0xc0, 0xa8, 0x00, 0x01, 0xc0, 0xa8, 0x01, 0x01, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ] } fn bootreq() -> Vec<u8> { vec![ 1u8, // op 2, // htype 3, // hlen 4, // ops 5, 6, 7, 8, // xid 9, 10, // secs 11, 12, // flags 13, 14, 15, 16, // ciaddr 17, 18, 19, 20, // yiaddr 21, 22, 23, 24, // siaddr 25, 26, 27, 28, // giaddr 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, // chaddr 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 0, // sname: "-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk", 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 0, 0, 0, 0, 0, 0, 0, 0, // file: "mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}m", 99, 130, 83, 99, // magic cookie ] } fn discover() -> Vec<u8> { vec![ 0x01, 0x01, 0x06, 0x00, 0xa6, 0x80, 0x56, 0x74, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xc0, 0xde, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x01, 0x37, 0x40, 0xfc, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x43, 0x42, 0x33, 0x04, 0x00, 0x00, 0x00, 0x01, 0xff, ] } fn other_offer() -> Vec<u8> { vec![ 0x02, 0x01, 0x06, 0x00, 0xa6, 0x80, 0x56, 0x74, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x00, 0x95, 0xc0, 0xa8, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xc0, 0xde, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x02, 0x36, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x33, 0x04, 0x00, 0x00, 0x00, 0x78, 0x3a, 0x04, 0x00, 0x00, 0x00, 0x3c, 0x3b, 0x04, 0x00, 0x00, 0x00, 0x69, 0x01, 0x04, 0xff, 0xff, 0xff, 0x00, 0x1c, 0x04, 0xc0, 0xa8, 0x00, 0xff, 0x06, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x03, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ] } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v4/fqdn.rs
src/v4/fqdn.rs
use core::fmt; use hickory_proto::rr::Name; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; /// A client FQDN #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Clone, PartialEq, Eq, Debug)] pub struct ClientFQDN { pub(crate) flags: FqdnFlags, pub(crate) r1: u8, pub(crate) r2: u8, pub(crate) domain: Name, } impl ClientFQDN { // creates a new client fqdn setting the rcode1/rcode2 fields to 255 pub fn new(flags: FqdnFlags, domain: Name) -> Self { Self { flags, r1: 0xFF, r2: 0xFF, domain, } } pub fn flags(&self) -> FqdnFlags { self.flags } pub fn set_flags(&mut self, flags: FqdnFlags) -> &mut Self { self.flags = flags; self } pub fn r1(&self) -> u8 { self.r1 } pub fn set_r1(&mut self, rcode1: u8) -> &mut Self { self.r1 = rcode1; self } pub fn r2(&self) -> u8 { self.r2 } pub fn set_r2(&mut self, rcode2: u8) -> &mut Self { self.r2 = rcode2; self } pub fn domain(&self) -> &Name { &self.domain } pub fn set_domain(&mut self, domain: Name) -> &mut Self { self.domain = domain; self } pub fn domain_mut(&mut self) -> &mut Name { &mut self.domain } } /// Represents available flags on message #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Copy, Default, Clone, PartialEq, Eq, Hash)] pub struct FqdnFlags(u8); impl fmt::Debug for FqdnFlags { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("FqdnFlags") .field("N", &self.n()) .field("E", &self.e()) .field("O", &self.o()) .field("S", &self.s()) .finish() } } impl fmt::Display for FqdnFlags { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self:?}") } } impl FqdnFlags { /// Create new FqdnFlags from u8 pub fn new(n: u8) -> Self { Self(n) } /// get the status of the n flag pub fn n(&self) -> bool { (self.0 & 0x08) > 0 } /// set the n bit, if true will also set the s bit to false. pub fn set_n(mut self, bit: bool) -> Self { if bit { self.0 |= 0x08; // 1000 self = self.set_s(false); } else { self.0 &= 0xf7; // 0111 } self } pub fn set_n_mut(&mut self, bit: bool) -> &mut Self { *self = self.set_n(bit); self } /// get the status of the e flag pub fn e(&self) -> bool { (self.0 & 0x04) > 0 } /// set the e bit pub fn set_e(mut self, bit: bool) -> Self { if bit { self.0 |= 0x04; // 0100 } else { self.0 &= 0xfb; // 1011 } self } pub fn set_e_mut(&mut self, bit: bool) -> &mut Self { *self = self.set_e(bit); self } /// get the status of the o flag pub fn o(&self) -> bool { (self.0 & 0x02) > 0 } /// set the o bit pub fn set_o(mut self, bit: bool) -> Self { if bit { self.0 |= 0x02; // 0010 } else { self.0 &= 0xfd; // 1101 } self } pub fn set_o_mut(&mut self, bit: bool) -> &mut Self { *self = self.set_o(bit); self } /// get the status of the s flag pub fn s(&self) -> bool { (self.0 & 0x01) > 0 } /// set the s bit. Indicates whether the server should perform an A RR update pub fn set_s(mut self, bit: bool) -> Self { if bit { self.0 |= 0x01; // 0001 } else { self.0 &= 0xfe; // 1110 } self } pub fn set_s_mut(&mut self, bit: bool) -> &mut Self { *self = self.set_s(bit); self } } impl From<u8> for FqdnFlags { fn from(n: u8) -> Self { Self(n) } } impl From<FqdnFlags> for u8 { fn from(f: FqdnFlags) -> Self { f.0 } } #[cfg(test)] mod tests { use super::*; #[test] fn test_fqdn_flags() { let mut flag = FqdnFlags::default(); assert_eq!(flag.0, 0); flag.set_s_mut(true); // passing true clears the s bit flag.set_n_mut(true); assert!(flag.n()); assert!(!flag.s()); assert_eq!(flag.0, 0x08); flag.set_n_mut(false); assert!(!flag.n()); assert!(!flag.s()); assert_eq!(flag.0, 0x00); let flag = FqdnFlags::new(0x40).set_s(true); assert!(!flag.e()); assert!(flag.s()); assert!(!flag.n()); assert!(!flag.o()); assert_eq!(flag.0, 0x41); let mut flag = flag.set_e(true); assert!(flag.e() && flag.s()); flag.set_e_mut(false); assert_eq!(flag.0, 0x41); flag.set_s_mut(false); assert_eq!(flag.0, 0x40); assert!(!flag.s()); let flag = FqdnFlags::default().set_e(true); assert!(flag.e()); assert_eq!(flag.0, 0x04); let flag = flag.set_s(true); assert_eq!(flag.0, 0x05); } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v4/borrowed.rs
src/v4/borrowed.rs
use alloc::borrow::Cow; use core::{fmt::Debug, net::Ipv4Addr}; use crate::{ Decoder, error::DecodeError, v4::{DecodeResult, Flags, HType, Opcode, OptionCode}, }; /// A lazily decoded DHCPv4 message. /// It holds a reference to the original byte buffer and provides /// methods to access fields. Most fields are parsed on-demand. #[derive(Debug)] pub struct Message<'a> { buffer: &'a [u8], } impl<'a> Message<'a> { /// Creates a new `Message` from a byte slice. /// This is a zero-copy operation and does not perform any allocations. pub fn new(buffer: &'a [u8]) -> Result<Self, DecodeError> { if buffer.len() < 240 { return Err(DecodeError::NotEnoughBytes); } Ok(Self { buffer }) } /// opcode pub fn opcode(&self) -> Opcode { self.buffer[0].into() } /// hardware type pub fn htype(&self) -> HType { self.buffer[1].into() } /// hardware len pub fn hlen(&self) -> u8 { self.buffer[2] } /// hops pub fn hops(&self) -> u8 { self.buffer[3] } /// transaction id pub fn xid(&self) -> u32 { u32::from_be_bytes(self.buffer[4..=7].try_into().unwrap()) } /// seconds elapsed since client began address acquisition or renewal pub fn secs(&self) -> u16 { u16::from_be_bytes(self.buffer[8..=9].try_into().unwrap()) } pub fn flags(&self) -> Flags { u16::from_be_bytes(self.buffer[10..=11].try_into().unwrap()).into() } /// client addr (siaddr) pub fn ciaddr(&self) -> Ipv4Addr { TryInto::<[u8; 4]>::try_into(&self.buffer[12..=15]) .unwrap() .into() } /// your addr (siaddr) pub fn yiaddr(&self) -> Ipv4Addr { TryInto::<[u8; 4]>::try_into(&self.buffer[16..=19]) .unwrap() .into() } /// server addr (siaddr) pub fn siaddr(&self) -> Ipv4Addr { TryInto::<[u8; 4]>::try_into(&self.buffer[20..=23]) .unwrap() .into() } /// gateway addr (giaddr) pub fn giaddr(&self) -> Ipv4Addr { TryInto::<[u8; 4]>::try_into(&self.buffer[24..=27]) .unwrap() .into() } /// chaddr pub fn chaddr(&self) -> &'a [u8] { &self.buffer[28..28 + self.hlen() as usize] } // For variable-length fields, we can return slices // The sname and file fields are null-terminated strings /// server name pub fn sname(&self) -> &'a [u8] { debug_assert!( self.buffer.get(44..108).is_some(), "not enough bytes for sname" ); let sname_bytes = &self.buffer[44..108]; let end = sname_bytes .iter() .position(|&b| b == 0) .unwrap_or(sname_bytes.len()); &sname_bytes[..end] } /// file name pub fn fname(&self) -> &'a [u8] { debug_assert!( self.buffer.get(108..236).is_some(), "not enough bytes for fname" ); let file_bytes = &self.buffer[108..236]; let end = file_bytes .iter() .position(|&b| b == 0) .unwrap_or(file_bytes.len()); &file_bytes[..end] } /// Returns a `DhcpOptions` iterator that lazily parses DHCP options. pub fn opts(&self) -> DhcpOptionIterator<'a> { // Magic cookie check if self.buffer[236..240] != crate::v4::MAGIC { return DhcpOptionIterator::empty(); } DhcpOptionIterator::new(&self.buffer[240..]) } } /// An iterator over DHCP options. Handles long-form encoding #[derive(Debug)] pub struct DhcpOptionIterator<'a> { decoder: Decoder<'a>, } /// Represents a single DHCP option, which may be concatenated from multiple parts. #[derive(Debug)] pub struct DhcpOption<'a> { code: OptionCode, data: Cow<'a, [u8]>, } impl DhcpOption<'_> { /// option code pub fn code(&self) -> OptionCode { self.code } /// data len pub fn len(&self) -> usize { self.data.len() } /// is empty pub fn is_empty(&self) -> bool { self.len() == 0 } /// option data pub fn data(&self) -> &[u8] { self.data.as_ref() } /// Consumes the raw option and attempts to parse it into owned `DhcpOption`. /// This method will do allocations pub fn into_option(self) -> DecodeResult<crate::v4::options::DhcpOption> { let mut decoder = Decoder::new(&self.data); crate::v4::decode_inner(self.code(), self.len(), &mut decoder) } } impl<'a> DhcpOptionIterator<'a> { pub fn new(buffer: &'a [u8]) -> Self { Self { decoder: Decoder::new(buffer), } } pub fn empty() -> DhcpOptionIterator<'a> { Self { decoder: Decoder::new(&[]), } } } impl<'a> Iterator for DhcpOptionIterator<'a> { type Item = DhcpOption<'a>; fn next(&mut self) -> Option<Self::Item> { loop { let code = self.decoder.read_u8().ok()?; match code { 0 => continue, // Pad 255 => return None, // End _ => { let len = self.decoder.read_u8().ok()?; let data = self.decoder.read_slice(len as usize).ok()?; let mut buf = Cow::Borrowed(data); let mut lookahead = self.decoder; let mut bytes_consumed = 0; while let Ok(next_code) = lookahead.peek_u8() { if next_code == code { // Advance past the code we just peeked lookahead.read_u8().ok()?; let next_len = lookahead.read_u8().ok()?; let next_data = lookahead.read_slice(next_len as usize).ok()?; buf.to_mut().extend_from_slice(next_data); bytes_consumed += 1 + 1 + next_len as usize; } else { break; } } if bytes_consumed > 0 { self.decoder.read_slice(bytes_consumed).unwrap(); } return Some(DhcpOption { code: OptionCode::from(code), data: buf, }); } } } } } #[cfg(test)] mod tests { use super::*; fn bootreq() -> Vec<u8> { vec![ 1u8, // op 2, // htype 3, // hlen 4, // ops 5, 6, 7, 8, // xid 9, 10, // secs 11, 12, // flags 13, 14, 15, 16, // ciaddr 17, 18, 19, 20, // yiaddr 21, 22, 23, 24, // siaddr 25, 26, 27, 28, // giaddr 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, // chaddr 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 0, // sname: "-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk", 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 0, 0, 0, 0, 0, 0, 0, 0, // file: "mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}m", 99, 130, 83, 99, // magic cookie ] } #[test] fn test_bootreq() { let buf = bootreq(); let msg = Message::new(&buf).unwrap(); assert_eq!(msg.opcode(), Opcode::BootRequest); assert_eq!(msg.htype(), HType::ExperimentalEth); assert_eq!(msg.hlen(), 3); assert_eq!(msg.hops(), 4); assert_eq!(msg.xid(), 0x05060708); assert_eq!(msg.secs(), 0x090A); assert_eq!(u16::from(msg.flags()), 0x0B0C); assert_eq!(msg.ciaddr(), Ipv4Addr::new(13, 14, 15, 16)); assert_eq!(msg.yiaddr(), Ipv4Addr::new(17, 18, 19, 20)); assert_eq!(msg.siaddr(), Ipv4Addr::new(21, 22, 23, 24)); assert_eq!(msg.giaddr(), Ipv4Addr::new(25, 26, 27, 28)); assert_eq!(msg.chaddr(), &[29, 30, 31]); assert_eq!( msg.sname(), &[ 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107 ] ); assert_eq!( msg.fname(), &[ 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109 ] ); assert!(msg.opts().next().is_none()); } #[test] fn test_empty() { // Empty buffer let buffer = []; let mut iter = DhcpOptionIterator::new(&buffer); assert!(iter.next().is_none()); // Just the end option let buffer = [255]; let mut iter = DhcpOptionIterator::new(&buffer); assert!(iter.next().is_none()); } #[test] fn test_pad() { // Padding before an option and end let buffer = [0, 0, 53, 1, 1, 0, 255]; let mut iter = DhcpOptionIterator::new(&buffer); let option = iter.next().unwrap(); assert_eq!(option.code, OptionCode::MessageType); assert_eq!(option.data, Cow::from(&[1][..])); assert!(iter.next().is_none()); } #[test] fn test_simple_options() { let buffer = [ 53, 1, 1, // DHCP Message Type: Discover (1) 61, 7, 1, 11, 22, 33, 44, 55, 66, // Client Identifier 55, 2, 1, 3, // Parameter Request List: Subnet Mask (1), Router (3) 255, // End ]; let mut iter = DhcpOptionIterator::new(&buffer); let opt1 = iter.next().unwrap(); assert_eq!(opt1.code, OptionCode::MessageType); assert_eq!(opt1.data, Cow::from(&[1][..])); let opt2 = iter.next().unwrap(); assert_eq!(opt2.code, OptionCode::ClientIdentifier); assert_eq!(opt2.data, Cow::from(&[1, 11, 22, 33, 44, 55, 66][..])); let opt3 = iter.next().unwrap(); assert_eq!(opt3.code, OptionCode::ParameterRequestList); assert_eq!(opt3.data, Cow::from(&[1, 3][..])); assert!(iter.next().is_none()); } #[test] fn test_concatenated_option() { let buffer = [ 12, 5, b'h', b'e', b'l', b'l', b'o', // Host Name part 1 12, 6, b' ', b'w', b'o', b'r', b'l', b'd', // Host Name part 2 255, ]; let mut iter = DhcpOptionIterator::new(&buffer); let option = iter.next().unwrap(); assert_eq!(option.code, OptionCode::Hostname); // Check that the data is concatenated and is now owned let expected_data = b"hello world"; assert_eq!(option.data, Cow::from(&expected_data[..])); assert!(matches!(option.data, Cow::Owned(_))); assert!(iter.next().is_none()); } #[test] fn test_mixed_simple_and_concatenated() { let buffer = [ 53, 1, 1, // Simple option 50, 4, 192, 168, 1, 100, // Another simple option 43, 3, b'd', b'e', b'f', // Concatenated option part 1 43, 3, b'g', b'h', b'i', // Concatenated option part 2 54, 4, 192, 168, 1, 1, // A final simple option 255, ]; let mut iter = DhcpOptionIterator::new(&buffer); let opt1 = iter.next().unwrap(); assert_eq!(opt1.code, OptionCode::MessageType); assert_eq!(opt1.data, Cow::from(&[1][..])); let opt3 = iter.next().unwrap(); assert_eq!(opt3.code, OptionCode::RequestedIpAddress); assert_eq!(opt3.data, Cow::from(&[192, 168, 1, 100][..])); let opt2 = iter.next().unwrap(); assert_eq!(opt2.code, OptionCode::VendorExtensions); assert_eq!(opt2.data, Cow::from(&b"defghi"[..])); assert!(matches!(opt2.data, Cow::Owned(_))); let opt4 = iter.next().unwrap(); assert_eq!(opt4.code, OptionCode::ServerIdentifier); assert_eq!(opt4.data, Cow::from(&[192, 168, 1, 1][..])); assert!(iter.next().is_none()); } #[test] fn test_malformed_length() { // Length of 10 but only 3 bytes remaining let buffer = [1, 10, 1, 2, 3]; let mut iter = DhcpOptionIterator::new(&buffer); // The parser should detect this and stop, returning None. assert!(iter.next().is_none()); } #[test] fn test_malformed_abrupt_end() { // Buffer ends right after an option code let buffer = [53]; let mut iter = DhcpOptionIterator::new(&buffer); assert!(iter.next().is_none()); // Buffer ends after a length field let buffer = [53, 5]; let mut iter = DhcpOptionIterator::new(&buffer); assert!(iter.next().is_none()); } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v6/oro_codes.rs
src/v6/oro_codes.rs
//! Valid Option Codes for ORO //! <https://datatracker.ietf.org/doc/html/rfc8415#section-24> #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use crate::v6::OptionCode; #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum OROCode { /// Optional VendorOpts, SipServerD, SipServerA, DomainNameServers, DomainSearchList, NisServers, NispServers, NisDomainName, NispDomainName, SntpServers, /// Required for Information-request InformationRefreshTime, BcmcsServerD, BcmcsServerA, GeoconfCivic, ClientFqdn, PanaAgent, NewPosixTimezone, NewTzdbTimezone, Mip6Hnidf, Mip6Vdinf, V6Lost, CapwapAcV6, Ipv6AddressMoS, Ipv6FQDNMoS, NtpServer, V6AccessDomain, SipUaCsList, OptBootfileUrl, OptBootfileParam, Nii, Geolocation, AftrName, ErpLocalDomainName, PdExclude, Mip6Idinf, Mip6Udinf, Mip6Hnp, Mip6Haa, Mip6Haf, RdnssSelection, KrbPrincipalName, KrbRealmName, KrbDefaultRealmName, KrbKdc, /// Required for Solicit SolMaxRt, /// Required for Information-request InfMaxRt, Addrsel, AddrselTable, V6PcpServer, Dhcp4ODhcp6Server, S46ContMape, S46ContMapt, S46ContLw, _4Rd, _4RdMapRule, _4RdNonMapRule, DhcpCaptivePortal, MplParameters, S46Priority, V6Prefix64, Ipv6AddressANDSF, /// Avalible for future codes. Unknown(u16), } impl From<OROCode> for u16 { fn from(opt: OROCode) -> Self { OptionCode::from(opt).into() } } // should this be a TryFrom? impl From<u16> for OROCode { fn from(opt: u16) -> Self { OptionCode::from(opt) .try_into() .unwrap_or(OROCode::Unknown(opt)) } } impl TryFrom<OptionCode> for OROCode { type Error = &'static str; fn try_from(opt: OptionCode) -> Result<OROCode, Self::Error> { match opt { OptionCode::VendorOpts => Ok(OROCode::VendorOpts), OptionCode::SipServerD => Ok(OROCode::SipServerD), OptionCode::SipServerA => Ok(OROCode::SipServerA), OptionCode::DomainNameServers => Ok(OROCode::DomainNameServers), OptionCode::DomainSearchList => Ok(OROCode::DomainSearchList), OptionCode::NisServers => Ok(OROCode::NisServers), OptionCode::NispServers => Ok(OROCode::NispServers), OptionCode::NisDomainName => Ok(OROCode::NisDomainName), OptionCode::NispDomainName => Ok(OROCode::NispDomainName), OptionCode::SntpServers => Ok(OROCode::SntpServers), OptionCode::InformationRefreshTime => Ok(OROCode::InformationRefreshTime), OptionCode::BcmcsServerD => Ok(OROCode::BcmcsServerD), OptionCode::BcmcsServerA => Ok(OROCode::BcmcsServerA), OptionCode::GeoconfCivic => Ok(OROCode::GeoconfCivic), OptionCode::ClientFqdn => Ok(OROCode::ClientFqdn), OptionCode::PanaAgent => Ok(OROCode::PanaAgent), OptionCode::NewPosixTimezone => Ok(OROCode::NewPosixTimezone), OptionCode::NewTzdbTimezone => Ok(OROCode::NewTzdbTimezone), OptionCode::Mip6Hnidf => Ok(OROCode::Mip6Hnidf), OptionCode::Mip6Vdinf => Ok(OROCode::Mip6Vdinf), OptionCode::V6Lost => Ok(OROCode::V6Lost), OptionCode::CapwapAcV6 => Ok(OROCode::CapwapAcV6), OptionCode::Ipv6AddressMoS => Ok(OROCode::Ipv6AddressMoS), OptionCode::Ipv6FQDNMoS => Ok(OROCode::Ipv6FQDNMoS), OptionCode::NtpServer => Ok(OROCode::NtpServer), OptionCode::V6AccessDomain => Ok(OROCode::V6AccessDomain), OptionCode::SipUaCsList => Ok(OROCode::SipUaCsList), OptionCode::OptBootfileUrl => Ok(OROCode::OptBootfileUrl), OptionCode::OptBootfileParam => Ok(OROCode::OptBootfileParam), OptionCode::Nii => Ok(OROCode::Nii), OptionCode::Geolocation => Ok(OROCode::Geolocation), OptionCode::AftrName => Ok(OROCode::AftrName), OptionCode::ErpLocalDomainName => Ok(OROCode::ErpLocalDomainName), OptionCode::PdExclude => Ok(OROCode::PdExclude), OptionCode::Mip6Idinf => Ok(OROCode::Mip6Idinf), OptionCode::Mip6Udinf => Ok(OROCode::Mip6Udinf), OptionCode::Mip6Hnp => Ok(OROCode::Mip6Hnp), OptionCode::Mip6Haa => Ok(OROCode::Mip6Haa), OptionCode::Mip6Haf => Ok(OROCode::Mip6Haf), OptionCode::RdnssSelection => Ok(OROCode::RdnssSelection), OptionCode::KrbPrincipalName => Ok(OROCode::KrbPrincipalName), OptionCode::KrbRealmName => Ok(OROCode::KrbRealmName), OptionCode::KrbDefaultRealmName => Ok(OROCode::KrbDefaultRealmName), OptionCode::KrbKdc => Ok(OROCode::KrbKdc), OptionCode::SolMaxRt => Ok(OROCode::SolMaxRt), OptionCode::InfMaxRt => Ok(OROCode::InfMaxRt), OptionCode::Addrsel => Ok(OROCode::Addrsel), OptionCode::AddrselTable => Ok(OROCode::AddrselTable), OptionCode::V6PcpServer => Ok(OROCode::V6PcpServer), OptionCode::Dhcp4ODhcp6Server => Ok(OROCode::Dhcp4ODhcp6Server), OptionCode::S46ContMape => Ok(OROCode::S46ContMape), OptionCode::S46ContMapt => Ok(OROCode::S46ContMapt), OptionCode::S46ContLw => Ok(OROCode::S46ContLw), OptionCode::_4Rd => Ok(OROCode::_4Rd), OptionCode::_4RdMapRule => Ok(OROCode::_4RdMapRule), OptionCode::_4RdNonMapRule => Ok(OROCode::_4RdNonMapRule), OptionCode::DhcpCaptivePortal => Ok(OROCode::DhcpCaptivePortal), OptionCode::MplParameters => Ok(OROCode::MplParameters), OptionCode::S46Priority => Ok(OROCode::S46Priority), OptionCode::V6Prefix64 => Ok(OROCode::V6Prefix64), OptionCode::Ipv6AddressANDSF => Ok(OROCode::Ipv6AddressANDSF), OptionCode::Unknown(u16) => Ok(OROCode::Unknown(u16)), _ => Err("conversion error, is not a valid OROCode"), } } } impl From<OROCode> for OptionCode { fn from(opt: OROCode) -> OptionCode { match opt { OROCode::VendorOpts => OptionCode::VendorOpts, OROCode::SipServerD => OptionCode::SipServerD, OROCode::SipServerA => OptionCode::SipServerA, OROCode::DomainNameServers => OptionCode::DomainNameServers, OROCode::DomainSearchList => OptionCode::DomainSearchList, OROCode::NisServers => OptionCode::NisServers, OROCode::NispServers => OptionCode::NispServers, OROCode::NisDomainName => OptionCode::NisDomainName, OROCode::NispDomainName => OptionCode::NispDomainName, OROCode::SntpServers => OptionCode::SntpServers, OROCode::InformationRefreshTime => OptionCode::InformationRefreshTime, OROCode::BcmcsServerD => OptionCode::BcmcsServerD, OROCode::BcmcsServerA => OptionCode::BcmcsServerA, OROCode::GeoconfCivic => OptionCode::GeoconfCivic, OROCode::ClientFqdn => OptionCode::ClientFqdn, OROCode::PanaAgent => OptionCode::PanaAgent, OROCode::NewPosixTimezone => OptionCode::NewPosixTimezone, OROCode::NewTzdbTimezone => OptionCode::NewTzdbTimezone, OROCode::Mip6Hnidf => OptionCode::Mip6Hnidf, OROCode::Mip6Vdinf => OptionCode::Mip6Vdinf, OROCode::V6Lost => OptionCode::V6Lost, OROCode::CapwapAcV6 => OptionCode::CapwapAcV6, OROCode::Ipv6AddressMoS => OptionCode::Ipv6AddressMoS, OROCode::Ipv6FQDNMoS => OptionCode::Ipv6FQDNMoS, OROCode::NtpServer => OptionCode::NtpServer, OROCode::V6AccessDomain => OptionCode::V6AccessDomain, OROCode::SipUaCsList => OptionCode::SipUaCsList, OROCode::OptBootfileUrl => OptionCode::OptBootfileUrl, OROCode::OptBootfileParam => OptionCode::OptBootfileParam, OROCode::Nii => OptionCode::Nii, OROCode::Geolocation => OptionCode::Geolocation, OROCode::AftrName => OptionCode::AftrName, OROCode::ErpLocalDomainName => OptionCode::ErpLocalDomainName, OROCode::PdExclude => OptionCode::PdExclude, OROCode::Mip6Idinf => OptionCode::Mip6Idinf, OROCode::Mip6Udinf => OptionCode::Mip6Udinf, OROCode::Mip6Hnp => OptionCode::Mip6Hnp, OROCode::Mip6Haa => OptionCode::Mip6Haa, OROCode::Mip6Haf => OptionCode::Mip6Haf, OROCode::RdnssSelection => OptionCode::RdnssSelection, OROCode::KrbPrincipalName => OptionCode::KrbPrincipalName, OROCode::KrbRealmName => OptionCode::KrbRealmName, OROCode::KrbDefaultRealmName => OptionCode::KrbDefaultRealmName, OROCode::KrbKdc => OptionCode::KrbKdc, OROCode::SolMaxRt => OptionCode::SolMaxRt, OROCode::InfMaxRt => OptionCode::InfMaxRt, OROCode::Addrsel => OptionCode::Addrsel, OROCode::AddrselTable => OptionCode::AddrselTable, OROCode::V6PcpServer => OptionCode::V6PcpServer, OROCode::Dhcp4ODhcp6Server => OptionCode::Dhcp4ODhcp6Server, OROCode::S46ContMape => OptionCode::S46ContMape, OROCode::S46ContMapt => OptionCode::S46ContMapt, OROCode::S46ContLw => OptionCode::S46ContLw, OROCode::_4Rd => OptionCode::_4Rd, OROCode::_4RdMapRule => OptionCode::_4RdMapRule, OROCode::_4RdNonMapRule => OptionCode::_4RdNonMapRule, OROCode::DhcpCaptivePortal => OptionCode::DhcpCaptivePortal, OROCode::MplParameters => OptionCode::MplParameters, OROCode::S46Priority => OptionCode::S46Priority, OROCode::V6Prefix64 => OptionCode::V6Prefix64, OROCode::Ipv6AddressANDSF => OptionCode::Ipv6AddressANDSF, OROCode::Unknown(u16) => OptionCode::Unknown(u16), } } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v6/option_codes.rs
src/v6/option_codes.rs
use crate::v6::{UnknownOption, options::DhcpOption}; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; /// option code type #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum OptionCode { ClientId, ServerId, IANA, IATA, IAAddr, ORO, Preference, ElapsedTime, RelayMsg, Authentication, ServerUnicast, StatusCode, RapidCommit, UserClass, VendorClass, VendorOpts, InterfaceId, ReconfMsg, ReconfAccept, SipServerD, SipServerA, DomainNameServers, DomainSearchList, IAPD, IAPrefix, NisServers, NispServers, NisDomainName, NispDomainName, SntpServers, InformationRefreshTime, BcmcsServerD, BcmcsServerA, GeoconfCivic, RemoteId, SubscriberId, ClientFqdn, PanaAgent, NewPosixTimezone, NewTzdbTimezone, ERO, LqQuery, ClientData, CltTime, LqRelayData, LqClientLink, Mip6Hnidf, Mip6Vdinf, V6Lost, CapwapAcV6, RelayId, Ipv6AddressMoS, Ipv6FQDNMoS, NtpServer, V6AccessDomain, SipUaCsList, OptBootfileUrl, OptBootfileParam, ClientArchType, Nii, Geolocation, AftrName, ErpLocalDomainName, Rsoo, PdExclude, Vss, Mip6Idinf, Mip6Udinf, Mip6Hnp, Mip6Haa, Mip6Haf, RdnssSelection, KrbPrincipalName, KrbRealmName, KrbDefaultRealmName, KrbKdc, ClientLinklayerAddr, LinkAddress, Radius, SolMaxRt, InfMaxRt, Addrsel, AddrselTable, V6PcpServer, Dhcpv4Msg, Dhcp4ODhcp6Server, S46Rule, S46Br, S46Dmr, S46V4v6bind, S46Portparams, S46ContMape, S46ContMapt, S46ContLw, _4Rd, _4RdMapRule, _4RdNonMapRule, LqBaseTime, LqStartTime, LqEndTime, DhcpCaptivePortal, MplParameters, AniAtt, AniNetworkName, AniApName, AniApBssid, AniOperatorId, AniOperatorRealm, S46Priority, MudUrlV6, V6Prefix64, FBindingStatus, FConnectFlags, Fdnsremovalinfo, FDNSHostName, FDNSZoneName, Fdnsflags, Fexpirationtime, FMaxUnackedBndupd, FMclt, FPartnerLifetime, FPartnerLifetimeSent, FPartnerDownTime, FPartnerRawCltTime, FProtocolVersion, FKeepaliveTime, FReconfigureData, FRelationshipName, FServerFlags, FServerState, FStartTimeOfState, FStateExpirationTime, RelayPort, Ipv6AddressANDSF, Unknown(u16), } impl From<OptionCode> for u16 { fn from(opt: OptionCode) -> Self { use OptionCode as O; match opt { O::ClientId => 1, O::ServerId => 2, O::IANA => 3, O::IATA => 4, O::IAAddr => 5, O::ORO => 6, O::Preference => 7, O::ElapsedTime => 8, O::RelayMsg => 9, O::Authentication => 11, O::ServerUnicast => 12, O::StatusCode => 13, O::RapidCommit => 14, O::UserClass => 15, O::VendorClass => 16, O::VendorOpts => 17, O::InterfaceId => 18, O::ReconfMsg => 19, O::ReconfAccept => 20, O::SipServerD => 21, O::SipServerA => 22, O::DomainNameServers => 23, O::DomainSearchList => 24, O::IAPD => 25, O::IAPrefix => 26, O::NisServers => 27, O::NispServers => 28, O::NisDomainName => 29, O::NispDomainName => 30, O::SntpServers => 31, O::InformationRefreshTime => 32, O::BcmcsServerD => 33, O::BcmcsServerA => 34, O::GeoconfCivic => 36, O::RemoteId => 37, O::SubscriberId => 38, O::ClientFqdn => 39, O::PanaAgent => 40, O::NewPosixTimezone => 41, O::NewTzdbTimezone => 42, O::ERO => 43, O::LqQuery => 44, O::ClientData => 45, O::CltTime => 46, O::LqRelayData => 47, O::LqClientLink => 48, O::Mip6Hnidf => 49, O::Mip6Vdinf => 50, O::V6Lost => 51, O::CapwapAcV6 => 52, O::RelayId => 53, O::Ipv6AddressMoS => 54, O::Ipv6FQDNMoS => 55, O::NtpServer => 56, O::V6AccessDomain => 57, O::SipUaCsList => 58, O::OptBootfileUrl => 59, O::OptBootfileParam => 60, O::ClientArchType => 61, O::Nii => 62, O::Geolocation => 63, O::AftrName => 64, O::ErpLocalDomainName => 65, O::Rsoo => 66, O::PdExclude => 67, O::Vss => 68, O::Mip6Idinf => 69, O::Mip6Udinf => 70, O::Mip6Hnp => 71, O::Mip6Haa => 72, O::Mip6Haf => 73, O::RdnssSelection => 74, O::KrbPrincipalName => 75, O::KrbRealmName => 76, O::KrbDefaultRealmName => 77, O::KrbKdc => 78, O::ClientLinklayerAddr => 79, O::LinkAddress => 80, O::Radius => 81, O::SolMaxRt => 82, O::InfMaxRt => 83, O::Addrsel => 84, O::AddrselTable => 85, O::V6PcpServer => 86, O::Dhcpv4Msg => 87, O::Dhcp4ODhcp6Server => 88, O::S46Rule => 89, O::S46Br => 90, O::S46Dmr => 91, O::S46V4v6bind => 92, O::S46Portparams => 93, O::S46ContMape => 94, O::S46ContMapt => 95, O::S46ContLw => 96, O::_4Rd => 97, O::_4RdMapRule => 98, O::_4RdNonMapRule => 99, O::LqBaseTime => 100, O::LqStartTime => 101, O::LqEndTime => 102, O::DhcpCaptivePortal => 103, O::MplParameters => 104, O::AniAtt => 105, O::AniNetworkName => 106, O::AniApName => 107, O::AniApBssid => 108, O::AniOperatorId => 109, O::AniOperatorRealm => 110, O::S46Priority => 111, O::MudUrlV6 => 112, O::V6Prefix64 => 113, O::FBindingStatus => 114, O::FConnectFlags => 115, O::Fdnsremovalinfo => 116, O::FDNSHostName => 117, O::FDNSZoneName => 118, O::Fdnsflags => 119, O::Fexpirationtime => 120, O::FMaxUnackedBndupd => 121, O::FMclt => 122, O::FPartnerLifetime => 123, O::FPartnerLifetimeSent => 124, O::FPartnerDownTime => 125, O::FPartnerRawCltTime => 126, O::FProtocolVersion => 127, O::FKeepaliveTime => 128, O::FReconfigureData => 129, O::FRelationshipName => 130, O::FServerFlags => 131, O::FServerState => 132, O::FStartTimeOfState => 133, O::FStateExpirationTime => 134, O::RelayPort => 135, O::Ipv6AddressANDSF => 143, O::Unknown(n) => n, } } } impl From<u16> for OptionCode { fn from(n: u16) -> Self { use OptionCode::*; match n { 1 => ClientId, 2 => ServerId, 3 => IANA, 4 => IATA, 5 => IAAddr, 6 => ORO, 7 => Preference, 8 => ElapsedTime, 9 => RelayMsg, 11 => Authentication, 12 => ServerUnicast, 13 => StatusCode, 14 => RapidCommit, 15 => UserClass, 16 => VendorClass, 17 => VendorOpts, 18 => InterfaceId, 19 => ReconfMsg, 20 => ReconfAccept, 21 => SipServerD, 22 => SipServerA, 23 => DomainNameServers, 24 => DomainSearchList, 25 => IAPD, 26 => IAPrefix, 27 => NisServers, 28 => NispServers, 29 => NisDomainName, 30 => NispDomainName, 31 => SntpServers, 32 => InformationRefreshTime, 33 => BcmcsServerD, 34 => BcmcsServerA, 36 => GeoconfCivic, 37 => RemoteId, 38 => SubscriberId, 39 => ClientFqdn, 40 => PanaAgent, 41 => NewPosixTimezone, 42 => NewTzdbTimezone, 43 => ERO, 44 => LqQuery, 45 => ClientData, 46 => CltTime, 47 => LqRelayData, 48 => LqClientLink, 49 => Mip6Hnidf, 50 => Mip6Vdinf, 51 => V6Lost, 52 => CapwapAcV6, 53 => RelayId, 54 => Ipv6AddressMoS, 55 => Ipv6FQDNMoS, 56 => NtpServer, 57 => V6AccessDomain, 58 => SipUaCsList, 59 => OptBootfileUrl, 60 => OptBootfileParam, 61 => ClientArchType, 62 => Nii, 63 => Geolocation, 64 => AftrName, 65 => ErpLocalDomainName, 66 => Rsoo, 67 => PdExclude, 68 => Vss, 69 => Mip6Idinf, 70 => Mip6Udinf, 71 => Mip6Hnp, 72 => Mip6Haa, 73 => Mip6Haf, 74 => RdnssSelection, 75 => KrbPrincipalName, 76 => KrbRealmName, 77 => KrbDefaultRealmName, 78 => KrbKdc, 79 => ClientLinklayerAddr, 80 => LinkAddress, 81 => Radius, 82 => SolMaxRt, 83 => InfMaxRt, 84 => Addrsel, 85 => AddrselTable, 86 => V6PcpServer, 87 => Dhcpv4Msg, 88 => Dhcp4ODhcp6Server, 89 => S46Rule, 90 => S46Br, 91 => S46Dmr, 92 => S46V4v6bind, 93 => S46Portparams, 94 => S46ContMape, 95 => S46ContMapt, 96 => S46ContLw, 97 => _4Rd, 98 => _4RdMapRule, 99 => _4RdNonMapRule, 100 => LqBaseTime, 101 => LqStartTime, 102 => LqEndTime, 103 => DhcpCaptivePortal, 104 => MplParameters, 105 => AniAtt, 106 => AniNetworkName, 107 => AniApName, 108 => AniApBssid, 109 => AniOperatorId, 110 => AniOperatorRealm, 111 => S46Priority, 112 => MudUrlV6, 113 => V6Prefix64, 114 => FBindingStatus, 115 => FConnectFlags, 116 => Fdnsremovalinfo, 117 => FDNSHostName, 118 => FDNSZoneName, 119 => Fdnsflags, 120 => Fexpirationtime, 121 => FMaxUnackedBndupd, 122 => FMclt, 123 => FPartnerLifetime, 124 => FPartnerLifetimeSent, 125 => FPartnerDownTime, 126 => FPartnerRawCltTime, 127 => FProtocolVersion, 128 => FKeepaliveTime, 129 => FReconfigureData, 130 => FRelationshipName, 131 => FServerFlags, 132 => FServerState, 133 => FStartTimeOfState, 134 => FStateExpirationTime, 135 => RelayPort, 143 => Ipv6AddressANDSF, _ => Unknown(n), } } } impl PartialOrd for OptionCode { fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { Some(self.cmp(other)) } } impl Ord for OptionCode { fn cmp(&self, other: &Self) -> core::cmp::Ordering { u16::from(*self).cmp(&u16::from(*other)) } } impl From<&DhcpOption> for OptionCode { fn from(opt: &DhcpOption) -> Self { use DhcpOption as O; match opt { O::ClientId(_) => OptionCode::ClientId, O::ServerId(_) => OptionCode::ServerId, O::IANA(_) => OptionCode::IANA, O::IATA(_) => OptionCode::IATA, O::IAAddr(_) => OptionCode::IAAddr, O::ORO(_) => OptionCode::ORO, O::Preference(_) => OptionCode::Preference, O::ElapsedTime(_) => OptionCode::ElapsedTime, O::RelayMsg(_) => OptionCode::RelayMsg, O::Authentication(_) => OptionCode::Authentication, O::ServerUnicast(_) => OptionCode::ServerUnicast, O::StatusCode(_) => OptionCode::StatusCode, O::RapidCommit => OptionCode::RapidCommit, O::UserClass(_) => OptionCode::UserClass, O::VendorClass(_) => OptionCode::VendorClass, O::VendorOpts(_) => OptionCode::VendorOpts, O::InterfaceId(_) => OptionCode::InterfaceId, O::ReconfMsg(_) => OptionCode::ReconfMsg, O::ReconfAccept => OptionCode::ReconfAccept, O::DomainNameServers(_) => OptionCode::DomainNameServers, O::DomainSearchList(_) => OptionCode::DomainSearchList, O::IAPD(_) => OptionCode::IAPD, O::IAPrefix(_) => OptionCode::IAPrefix, O::InformationRefreshTime(_) => OptionCode::InformationRefreshTime, O::NtpServer(_) => OptionCode::NtpServer, // SolMaxRt(_) => OptionCode::SolMaxRt, // InfMaxRt(_) => OptionCode::InfMaxRt, // LqQuery(_) => OptionCode::LqQuery, // ClientData(_) => OptionCode::ClientData, // CltTime(_) => OptionCode::CltTime, // LqRelayData(_) => OptionCode::LqRelayData, // LqClientLink(_) => OptionCode::LqClientLink, // RelayId(_) => OptionCode::RelayId, // LinkAddress(_) => OptionCode::LinkAddress, O::Unknown(UnknownOption { code, .. }) => OptionCode::Unknown(*code), } } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v6/htype.rs
src/v6/htype.rs
/// In DHCPv6, the Hardware Types are expanded from u8 to u16, referring to https://www.iana.org/assignments/arp-parameters/arp-parameters.xhtml for implementation. use crate::{ decoder::{Decodable, Decoder}, encoder::{Encodable, Encoder}, error::{DecodeResult, EncodeResult}, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; /// Hardware type of message #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Copy, Hash, Clone, PartialEq, Eq)] pub enum HType { /// 1 Ethernet Eth, /// 2 Experimental Ethernet ExperimentalEth, /// 3 Amateur Radio AX25 AmRadioAX25, /// 4 Proteon Token Ring ProteonTokenRing, /// 5 Chaos Chaos, /// 6 IEEE.802 IEEE802, /// 7 ARCNET ARCNET, /// 8 Hyperchannel Hyperchannel, /// 9 LANSTAR Lanstar, /// 10 Autonet Short Addr AutonetShortAddr, /// 11 LocalTalk LocalTalk, /// 12 LocalNet LocalNet, /// 13 Ultralink Ultralink, /// 14 SMDS SMDS, /// 15 FrameRelay FrameRelay, /// 17 HDLC HDLC, /// 18 FibreChannel FibreChannel, /// 20 SerialLine SerialLine, /// 22 Mil STD MilStd188220, /// 23 Metricom Metricom, /// 24 IEEE1394.1995 IEEE13941995, /// 25 MAPOS MAPOS, /// 26 Twinaxial Twinaxial, /// 27 EUI64 EUI64, /// 28 HIPARP HIPARP, /// 29 IP and ARP over ISO 7816-3 IPandARPoverISO78163, /// 30 ARPSec ARPSec, /// 31 IPsec tunnel IPsecTunnel, /// 32 Infiniband Infiniband, /// 33 TIA-102 Project 25 Common Air Interface (CAI) CAI, /// 34 WeigandInt WiegandInt, /// 35 PureIP PureIP, /// 36 HW_EXP1 HWExp1, /// 37 HFI HFI, /// 38 Unified BUS(UB), UB, /// 256 HW_EXP2 HWExp2, /// 257 AEthernet AEthernet, /// 65535 Reserved Reserved, /// Unknown or not yet implemented htype Unknown(u16), } impl From<u16> for HType { fn from(n: u16) -> Self { use HType::*; match n { 1 => Eth, 2 => ExperimentalEth, 3 => AmRadioAX25, 4 => ProteonTokenRing, 5 => Chaos, 6 => IEEE802, 7 => ARCNET, 8 => Hyperchannel, 9 => Lanstar, 10 => AutonetShortAddr, 11 => LocalTalk, 12 => LocalNet, 13 => Ultralink, 14 => SMDS, 15 => FrameRelay, 17 => HDLC, 18 => FibreChannel, 20 => SerialLine, 22 => MilStd188220, 23 => Metricom, 24 => IEEE13941995, 25 => MAPOS, 26 => Twinaxial, 27 => EUI64, 28 => HIPARP, 29 => IPandARPoverISO78163, 30 => ARPSec, 31 => IPsecTunnel, 32 => Infiniband, 33 => CAI, 34 => WiegandInt, 35 => PureIP, 36 => HWExp1, 37 => HFI, 38 => UB, 256 => HWExp2, 257 => AEthernet, 65535 => Reserved, n => Unknown(n), } } } impl From<HType> for u16 { fn from(n: HType) -> Self { use HType as H; match n { H::Eth => 1, H::ExperimentalEth => 2, H::AmRadioAX25 => 3, H::ProteonTokenRing => 4, H::Chaos => 5, H::IEEE802 => 6, H::ARCNET => 7, H::Hyperchannel => 8, H::Lanstar => 9, H::AutonetShortAddr => 10, H::LocalTalk => 11, H::LocalNet => 12, H::Ultralink => 13, H::SMDS => 14, H::FrameRelay => 15, H::HDLC => 17, H::FibreChannel => 18, H::SerialLine => 20, H::MilStd188220 => 22, H::Metricom => 23, H::IEEE13941995 => 24, H::MAPOS => 25, H::Twinaxial => 26, H::EUI64 => 27, H::HIPARP => 28, H::IPandARPoverISO78163 => 29, H::ARPSec => 30, H::IPsecTunnel => 31, H::Infiniband => 32, H::CAI => 33, H::WiegandInt => 34, H::PureIP => 35, H::HWExp1 => 36, H::HFI => 37, H::UB => 38, H::HWExp2 => 256, H::AEthernet => 257, H::Reserved => 65535, H::Unknown(n) => n, } } } impl Decodable for HType { fn decode(decoder: &mut Decoder<'_>) -> DecodeResult<Self> { Ok(decoder.read_u16()?.into()) } } impl Encodable for HType { fn encode(&self, e: &mut Encoder<'_>) -> EncodeResult<()> { e.write_u16((*self).into()) } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v6/options.rs
src/v6/options.rs
use hickory_proto::{ rr::Name, serialize::binary::{BinDecodable, BinDecoder, BinEncodable, BinEncoder}, }; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use alloc::{string::String, vec::Vec}; use core::{cmp::Ordering, net::Ipv6Addr, ops::RangeInclusive}; use crate::v6::option_codes::OptionCode; use crate::{ decoder::{Decodable, Decoder}, encoder::{Encodable, Encoder}, error::{DecodeResult, EncodeResult}, v6::{MessageType, RelayMessage}, }; // server can send multiple IA_NA options to request multiple addresses // so we must be able to handle multiple of the same option type // <https://datatracker.ietf.org/doc/html/rfc8415#section-6.6> // TODO: consider HashMap<OptionCode, TinyVec<DhcpOption>> /// <https://datatracker.ietf.org/doc/html/rfc8415#section-21> #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Default)] pub struct DhcpOptions(Vec<DhcpOption>); // vec maintains sorted on OptionCode impl DhcpOptions { /// construct empty DhcpOptions pub fn new() -> Self { Self::default() } /// get the first element matching this option code pub fn get(&self, code: OptionCode) -> Option<&DhcpOption> { let first = first(&self.0, |x| OptionCode::from(x).cmp(&code))?; // get_unchecked? self.0.get(first) } /// get all elements matching this option code pub fn get_all(&self, code: OptionCode) -> Option<&[DhcpOption]> { let range = range_binsearch(&self.0, |x| OptionCode::from(x).cmp(&code))?; Some(&self.0[range]) } /// get the first element matching this option code pub fn get_mut(&mut self, code: OptionCode) -> Option<&mut DhcpOption> { let first = first(&self.0, |x| OptionCode::from(x).cmp(&code))?; self.0.get_mut(first) } /// get all elements matching this option code pub fn get_mut_all(&mut self, code: OptionCode) -> Option<&mut [DhcpOption]> { let range = range_binsearch(&self.0, |x| OptionCode::from(x).cmp(&code))?; Some(&mut self.0[range]) } /// remove the first element with a matching option code pub fn remove(&mut self, code: OptionCode) -> Option<DhcpOption> { let first = first(&self.0, |x| OptionCode::from(x).cmp(&code))?; Some(self.0.remove(first)) } /// remove all elements with a matching option code pub fn remove_all( &mut self, code: OptionCode, ) -> Option<impl Iterator<Item = DhcpOption> + '_> { let range = range_binsearch(&self.0, |x| OptionCode::from(x).cmp(&code))?; Some(self.0.drain(range)) } /// insert a new option into the list of opts pub fn insert(&mut self, opt: DhcpOption) { let i = self.0.partition_point(|x| x < &opt); self.0.insert(i, opt) } /// return a reference to an iterator pub fn iter(&self) -> impl Iterator<Item = &DhcpOption> { self.0.iter() } /// return a mutable ref to an iterator pub fn iter_mut(&mut self) -> impl Iterator<Item = &mut DhcpOption> { self.0.iter_mut() } } impl IntoIterator for DhcpOptions { type Item = DhcpOption; type IntoIter = alloc::vec::IntoIter<Self::Item>; fn into_iter(self) -> Self::IntoIter { self.0.into_iter() } } impl FromIterator<DhcpOption> for DhcpOptions { fn from_iter<T: IntoIterator<Item = DhcpOption>>(iter: T) -> Self { let mut opts = iter.into_iter().collect::<Vec<_>>(); opts.sort_unstable(); DhcpOptions(opts) } } /// DHCPv6 option types #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub enum DhcpOption { /// 1 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.2> ClientId(Vec<u8>), // should duid for this be bytes or string? /// 2 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.3> ServerId(Vec<u8>), /// 3 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.4> IANA(IANA), /// 4 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.5> IATA(IATA), /// 5 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.6> IAAddr(IAAddr), /// 6 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.7> ORO(ORO), /// 7 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.8> Preference(u8), /// 8 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.9> /// Elapsed time in millis ElapsedTime(u16), /// 9 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.10> RelayMsg(RelayMessage), /// 11 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.11> Authentication(Authentication), /// 12 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.12> ServerUnicast(Ipv6Addr), /// 13 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.13> StatusCode(StatusCode), /// 14 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.14> RapidCommit, /// 15 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.15> UserClass(UserClass), /// 16 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.16> VendorClass(VendorClass), /// 17 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.17> VendorOpts(VendorOpts), /// 18 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.18> InterfaceId(Vec<u8>), /// 19 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.19> ReconfMsg(MessageType), /// 20 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.20> ReconfAccept, /// 23 - <https://datatracker.ietf.org/doc/html/rfc3646> DomainNameServers(Vec<Ipv6Addr>), /// 24 - <https://datatracker.ietf.org/doc/html/rfc3646> DomainSearchList(Vec<Name>), /// 25 - <https://datatracker.ietf.org/doc/html/rfc8415#section-21.21> IAPD(IAPD), /// 26 - <https://datatracker.ietf.org/doc/html/rfc3633#section-10> IAPrefix(IAPrefix), InformationRefreshTime(u32), /// 56 - <https://datatracker.ietf.org/doc/html/rfc5908> NtpServer(Vec<NtpSuboption>), // SolMaxRt(u32), // InfMaxRt(u32), // LqQuery(_), // ClientData(_), // CltTime(_), // LqRelayData(_), // LqClientLink(_), // RelayId(_), // LinkAddress(_), /// An unknown or unimplemented option type Unknown(UnknownOption), } impl PartialOrd for DhcpOption { fn partial_cmp(&self, other: &Self) -> Option<core::cmp::Ordering> { Some(self.cmp(other)) } } impl Ord for DhcpOption { fn cmp(&self, other: &Self) -> core::cmp::Ordering { OptionCode::from(self).cmp(&OptionCode::from(other)) } } /// wrapper around interface id #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct InterfaceId { pub id: String, } /// vendor options #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct VendorOpts { pub num: u32, // encapsulated options values pub opts: DhcpOptions, } /// vendor class #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct VendorClass { pub num: u32, pub data: Vec<Vec<u8>>, // each item in data is [len (2 bytes) | data] } /// user class #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct UserClass { pub data: Vec<Vec<u8>>, // each item in data is [len (2 bytes) | data] } #[inline] fn decode_data(decoder: &'_ mut Decoder<'_>) -> Vec<Vec<u8>> { let mut data = Vec::new(); while let Ok(len) = decoder.read_u16() { // if we can read the len and the string match decoder.read_slice(len as usize) { Ok(s) => data.push(s.to_vec()), // push, otherwise stop _ => break, } } data } /// Server Unicast #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct StatusCode { pub status: Status, // 2 + len pub msg: String, } /// Status code for Server Unicast #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum Status { Success, UnspecFail, NoAddrsAvail, NoBinding, NotOnLink, UseMulticast, NoPrefixAvail, UnknownQueryType, MalformedQuery, NotConfigured, NotAllowed, QueryTerminated, DataMissing, CatchUpComplete, NotSupported, TLSConnectionRefused, AddressInUse, ConfigurationConflict, MissingBindingInformation, OutdatedBindingInformation, ServerShuttingDown, DNSUpdateNotSupported, ExcessiveTimeSkew, /// unknown/unimplemented message type Unknown(u16), } impl From<u16> for Status { fn from(n: u16) -> Self { use Status::*; match n { 0 => Success, 1 => UnspecFail, 2 => NoAddrsAvail, 3 => NoBinding, 4 => NotOnLink, 5 => UseMulticast, 6 => NoPrefixAvail, 7 => UnknownQueryType, 8 => MalformedQuery, 9 => NotConfigured, 10 => NotAllowed, 11 => QueryTerminated, 12 => DataMissing, 13 => CatchUpComplete, 14 => NotSupported, 15 => TLSConnectionRefused, 16 => AddressInUse, 17 => ConfigurationConflict, 18 => MissingBindingInformation, 19 => OutdatedBindingInformation, 20 => ServerShuttingDown, 21 => DNSUpdateNotSupported, 22 => ExcessiveTimeSkew, _ => Unknown(n), } } } impl From<Status> for u16 { fn from(n: Status) -> Self { use Status as S; match n { S::Success => 0, S::UnspecFail => 1, S::NoAddrsAvail => 2, S::NoBinding => 3, S::NotOnLink => 4, S::UseMulticast => 5, S::NoPrefixAvail => 6, S::UnknownQueryType => 7, S::MalformedQuery => 8, S::NotConfigured => 9, S::NotAllowed => 10, S::QueryTerminated => 11, S::DataMissing => 12, S::CatchUpComplete => 13, S::NotSupported => 14, S::TLSConnectionRefused => 15, S::AddressInUse => 16, S::ConfigurationConflict => 17, S::MissingBindingInformation => 18, S::OutdatedBindingInformation => 19, S::ServerShuttingDown => 20, S::DNSUpdateNotSupported => 21, S::ExcessiveTimeSkew => 22, S::Unknown(n) => n, } } } /// Authentication #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct Authentication { pub proto: u8, pub algo: u8, pub rdm: u8, pub replay_detection: u64, // 11 + len pub info: Vec<u8>, } impl Decodable for Authentication { fn decode(decoder: &'_ mut Decoder<'_>) -> DecodeResult<Self> { let len = decoder.buffer().len(); Ok(Authentication { proto: decoder.read_u8()?, algo: decoder.read_u8()?, rdm: decoder.read_u8()?, replay_detection: decoder.read_u64()?, info: decoder.read_slice(len - 11)?.to_vec(), }) } } /// Option Request Option /// <https://datatracker.ietf.org/doc/html/rfc8415#section-21.7> #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct ORO { // 2 * num opts pub opts: Vec<OptionCode>, } impl Decodable for ORO { fn decode(decoder: &'_ mut Decoder<'_>) -> DecodeResult<Self> { let len = decoder.buffer().len(); Ok(ORO { opts: { decoder .read_slice(len)? .chunks_exact(2) // TODO: use .array_chunks::<2>() when stable .map(|code| OptionCode::from(u16::from_be_bytes([code[0], code[1]]))) .collect() }, }) } } /// Identity Association for Temporary Addresses #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct IATA { pub id: u32, // 4 + opts.len() // should this be Vec<DhcpOption> ? // the RFC suggests it 'encapsulates options' pub opts: DhcpOptions, } impl Decodable for IATA { fn decode(decoder: &'_ mut Decoder<'_>) -> DecodeResult<Self> { Ok(IATA { id: decoder.read_u32()?, opts: DhcpOptions::decode(decoder)?, }) } } /// Identity Association for Non-Temporary Addresses #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct IANA { pub id: u32, pub t1: u32, pub t2: u32, // 12 + opts.len() pub opts: DhcpOptions, } impl Decodable for IANA { fn decode(decoder: &'_ mut Decoder<'_>) -> DecodeResult<Self> { Ok(IANA { id: decoder.read_u32()?, t1: decoder.read_u32()?, t2: decoder.read_u32()?, opts: DhcpOptions::decode(decoder)?, }) } } /// Identity Association Prefix Delegation #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct IAPD { pub id: u32, pub t1: u32, pub t2: u32, // 12 + opts.len() pub opts: DhcpOptions, } impl Decodable for IAPD { fn decode(decoder: &'_ mut Decoder<'_>) -> DecodeResult<Self> { Ok(IAPD { id: decoder.read_u32()?, t1: decoder.read_u32()?, t2: decoder.read_u32()?, opts: DhcpOptions::decode(decoder)?, }) } } /// Identity Association Prefix Delegation Prefix Option #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct IAPrefix { pub preferred_lifetime: u32, pub valid_lifetime: u32, pub prefix_len: u8, pub prefix_ip: Ipv6Addr, // 25 + opts.len() pub opts: DhcpOptions, } impl Decodable for IAPrefix { fn decode(decoder: &'_ mut Decoder<'_>) -> DecodeResult<Self> { Ok(IAPrefix { preferred_lifetime: decoder.read_u32()?, valid_lifetime: decoder.read_u32()?, prefix_len: decoder.read_u8()?, prefix_ip: decoder.read::<16>()?.into(), opts: DhcpOptions::decode(decoder)?, }) } } /// Identity Association Address #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct IAAddr { pub addr: Ipv6Addr, pub preferred_life: u32, pub valid_life: u32, // 24 + opts.len() // should this be DhcpOptions ? // the RFC suggests it 'encapsulates options' pub opts: DhcpOptions, } impl Decodable for IAAddr { fn decode(decoder: &'_ mut Decoder<'_>) -> DecodeResult<Self> { Ok(IAAddr { addr: decoder.read::<16>()?.into(), preferred_life: decoder.read_u32()?, valid_life: decoder.read_u32()?, opts: DhcpOptions::decode(decoder)?, }) } } #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub enum NtpSuboption { ServerAddress(Ipv6Addr), MulticastAddress(Ipv6Addr), FQDN(Name), } impl Decodable for NtpSuboption { fn decode(decoder: &mut Decoder<'_>) -> DecodeResult<Self> { let code = decoder.read_u16()?; match code { 1 | 2 => { let len = decoder.read_u16()?; if len != 16 { return Err(super::DecodeError::NotEnoughBytes); } let addr: Ipv6Addr = decoder.read::<16>()?.into(); let option = if addr.is_multicast() { NtpSuboption::MulticastAddress(addr) } else { NtpSuboption::ServerAddress(addr) }; Ok(option) } 3 => { let len = decoder.read_u16()? as _; let mut name_decoder = BinDecoder::new(decoder.read_slice(len)?); Ok(NtpSuboption::FQDN(Name::read(&mut name_decoder)?)) } code => Err(super::DecodeError::InvalidData( code as u32, "invalid ntp suboption code", )), } } } impl Encodable for NtpSuboption { fn encode(&self, e: &mut Encoder<'_>) -> EncodeResult<()> { match self { NtpSuboption::ServerAddress(addr) | NtpSuboption::MulticastAddress(addr) => { let code = if addr.is_multicast() { 2 } else { 1 }; e.write_u16(code)?; e.write_u16(16)?; e.write::<16>(addr.octets())?; } NtpSuboption::FQDN(name) => { let mut buf = Vec::new(); let mut name_encoder = BinEncoder::new(&mut buf); name.emit(&mut name_encoder)?; e.write_u16(3)?; e.write_u16(buf.len() as _)?; e.write_slice(&buf)?; } } Ok(()) } } /// fallback for options not yet implemented #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq, Hash)] pub struct UnknownOption { pub(crate) code: u16, pub(crate) data: Vec<u8>, } impl UnknownOption { pub fn new(code: OptionCode, data: Vec<u8>) -> Self { Self { code: code.into(), data, } } /// return the option code pub fn code(&self) -> OptionCode { self.code.into() } /// return the data for this option pub fn data(&self) -> &[u8] { &self.data } /// consume option into its components pub fn into_parts(self) -> (OptionCode, Vec<u8>) { (self.code.into(), self.data) } } impl Decodable for DhcpOptions { fn decode(decoder: &mut Decoder<'_>) -> DecodeResult<Self> { let mut opts = Vec::new(); while let Ok(opt) = DhcpOption::decode(decoder) { opts.push(opt); } // sorts by OptionCode opts.sort_unstable(); Ok(DhcpOptions(opts)) } } impl Encodable for DhcpOptions { fn encode(&self, e: &'_ mut Encoder<'_>) -> EncodeResult<()> { self.0.iter().try_for_each(|opt| opt.encode(e)) } } impl Decodable for DhcpOption { fn decode(decoder: &mut Decoder<'_>) -> DecodeResult<Self> { let code = decoder.read_u16()?.into(); let len = decoder.read_u16()? as usize; Ok(match code { OptionCode::ClientId => DhcpOption::ClientId(decoder.read_slice(len)?.to_vec()), OptionCode::ServerId => DhcpOption::ServerId(decoder.read_slice(len)?.to_vec()), OptionCode::IANA => { let mut dec = Decoder::new(decoder.read_slice(len)?); DhcpOption::IANA(IANA::decode(&mut dec)?) } OptionCode::IATA => { let mut dec = Decoder::new(decoder.read_slice(len)?); DhcpOption::IATA(IATA::decode(&mut dec)?) } OptionCode::IAAddr => { let mut dec = Decoder::new(decoder.read_slice(len)?); DhcpOption::IAAddr(IAAddr::decode(&mut dec)?) } OptionCode::ORO => { let mut dec = Decoder::new(decoder.read_slice(len)?); DhcpOption::ORO(ORO::decode(&mut dec)?) } OptionCode::Preference => DhcpOption::Preference(decoder.read_u8()?), OptionCode::ElapsedTime => DhcpOption::ElapsedTime(decoder.read_u16()?), OptionCode::RelayMsg => { let mut relay_dec = Decoder::new(decoder.read_slice(len)?); DhcpOption::RelayMsg(RelayMessage::decode(&mut relay_dec)?) } OptionCode::Authentication => { let mut dec = Decoder::new(decoder.read_slice(len)?); DhcpOption::Authentication(Authentication::decode(&mut dec)?) } OptionCode::ServerUnicast => DhcpOption::ServerUnicast(decoder.read::<16>()?.into()), OptionCode::StatusCode => DhcpOption::StatusCode(StatusCode { status: decoder.read_u16()?.into(), msg: decoder.read_string(len - core::mem::size_of::<u16>())?, }), OptionCode::RapidCommit => DhcpOption::RapidCommit, OptionCode::UserClass => { let buf = decoder.read_slice(len)?; DhcpOption::UserClass(UserClass { data: decode_data(&mut Decoder::new(buf)), }) } OptionCode::VendorClass => { let num = decoder.read_u32()?; let buf = decoder.read_slice(len - 4)?; DhcpOption::VendorClass(VendorClass { num, data: decode_data(&mut Decoder::new(buf)), }) } OptionCode::VendorOpts => DhcpOption::VendorOpts(VendorOpts { num: decoder.read_u32()?, opts: { let mut opt_decoder = Decoder::new(decoder.read_slice(len - 4)?); DhcpOptions::decode(&mut opt_decoder)? }, }), OptionCode::InterfaceId => DhcpOption::InterfaceId(decoder.read_slice(len)?.to_vec()), OptionCode::ReconfMsg => DhcpOption::ReconfMsg(decoder.read_u8()?.into()), OptionCode::ReconfAccept => DhcpOption::ReconfAccept, OptionCode::DomainNameServers => { DhcpOption::DomainNameServers(decoder.read_ipv6s(len)?) } OptionCode::IAPD => { let mut dec = Decoder::new(decoder.read_slice(len)?); DhcpOption::IAPD(IAPD::decode(&mut dec)?) } OptionCode::IAPrefix => { let mut dec = Decoder::new(decoder.read_slice(len)?); DhcpOption::IAPrefix(IAPrefix::decode(&mut dec)?) } OptionCode::DomainSearchList => { let mut name_decoder = BinDecoder::new(decoder.read_slice(len)?); let mut names = Vec::new(); while let Ok(name) = Name::read(&mut name_decoder) { names.push(name); } DhcpOption::DomainSearchList(names) } OptionCode::NtpServer => { let mut dec = Decoder::new(decoder.read_slice(len)?); let mut suboptions = Vec::new(); while !dec.buffer().is_empty() { suboptions.push(NtpSuboption::decode(&mut dec)?); } DhcpOption::NtpServer(suboptions) } // not yet implemented OptionCode::Unknown(code) => DhcpOption::Unknown(UnknownOption { code, data: decoder.read_slice(len)?.to_vec(), }), _ => DhcpOption::Unknown(UnknownOption { code: code.into(), data: decoder.read_slice(len)?.to_vec(), }), }) } } impl Encodable for DhcpOption { fn encode(&self, e: &'_ mut Encoder<'_>) -> EncodeResult<()> { let code: OptionCode = self.into(); e.write_u16(code.into())?; match self { DhcpOption::ClientId(duid) | DhcpOption::ServerId(duid) => { e.write_u16(duid.len() as u16)?; e.write_slice(duid)?; } DhcpOption::IANA(IANA { id, t1, t2, opts }) | DhcpOption::IAPD(IAPD { id, t1, t2, opts }) => { // write len let mut buf = Vec::new(); let mut opt_enc = Encoder::new(&mut buf); opts.encode(&mut opt_enc)?; // buf now has total len e.write_u16(12 + buf.len() as u16)?; // write data e.write_u32(*id)?; e.write_u32(*t1)?; e.write_u32(*t2)?; e.write_slice(&buf)?; } DhcpOption::IATA(IATA { id, opts }) => { // write len let mut buf = Vec::new(); let mut opt_enc = Encoder::new(&mut buf); opts.encode(&mut opt_enc)?; // buf now has total len e.write_u16(4 + buf.len() as u16)?; // data e.write_u32(*id)?; e.write_slice(&buf)?; } DhcpOption::IAAddr(IAAddr { addr, preferred_life, valid_life, opts, }) => { // write len let mut buf = Vec::new(); let mut opt_enc = Encoder::new(&mut buf); opts.encode(&mut opt_enc)?; // buf now has total len e.write_u16(24 + buf.len() as u16)?; // data e.write_u128((*addr).into())?; e.write_u32(*preferred_life)?; e.write_u32(*valid_life)?; e.write_slice(&buf)?; } DhcpOption::ORO(ORO { opts }) => { // write len e.write_u16(2 * opts.len() as u16)?; // data for code in opts { e.write_u16(u16::from(*code))?; } } DhcpOption::Preference(pref) => { e.write_u16(1)?; e.write_u8(*pref)?; } DhcpOption::ElapsedTime(elapsed) => { e.write_u16(2)?; e.write_u16(*elapsed)?; } DhcpOption::RelayMsg(msg) => { let mut buf = Vec::new(); let mut relay_enc = Encoder::new(&mut buf); msg.encode(&mut relay_enc)?; e.write_u16(buf.len() as u16)?; e.write_slice(&buf)?; } DhcpOption::Authentication(Authentication { proto, algo, rdm, replay_detection, info, }) => { e.write_u16(11 + info.len() as u16)?; e.write_u8(*proto)?; e.write_u8(*algo)?; e.write_u8(*rdm)?; e.write_u64(*replay_detection)?; e.write_slice(info)?; } DhcpOption::ServerUnicast(addr) => { e.write_u16(16)?; e.write_u128((*addr).into())?; } DhcpOption::StatusCode(StatusCode { status, msg }) => { e.write_u16(2 + msg.len() as u16)?; e.write_u16((*status).into())?; e.write_slice(msg.as_bytes())?; } DhcpOption::RapidCommit => { e.write_u16(0)?; } DhcpOption::UserClass(UserClass { data }) => { e.write_u16(data.len() as u16)?; for s in data { e.write_u16(s.len() as u16)?; e.write_slice(s)?; } } DhcpOption::VendorClass(VendorClass { num, data }) => { e.write_u16(4 + data.len() as u16)?; e.write_u32(*num)?; for s in data { e.write_u16(s.len() as u16)?; e.write_slice(s)?; } } DhcpOption::VendorOpts(VendorOpts { num, opts }) => { let mut buf = Vec::new(); let mut opt_enc = Encoder::new(&mut buf); opts.encode(&mut opt_enc)?; // buf now has total len e.write_u16(4 + buf.len() as u16)?; e.write_u32(*num)?; e.write_slice(&buf)?; } DhcpOption::InterfaceId(id) => { e.write_u16(id.len() as u16)?; e.write_slice(id)?; } DhcpOption::ReconfMsg(msg_type) => { e.write_u16(1)?; e.write_u8((*msg_type).into())?; } DhcpOption::ReconfAccept => { e.write_u16(0)?; } DhcpOption::DomainNameServers(addrs) => { e.write_u16(addrs.len() as u16 * 16)?; for addr in addrs { e.write_u128((*addr).into())?; } } DhcpOption::DomainSearchList(names) => { let mut buf = Vec::new(); let mut name_encoder = BinEncoder::new(&mut buf); for name in names { name.emit(&mut name_encoder)?; } e.write_u16(buf.len() as u16)?; e.write_slice(&buf)?; } DhcpOption::IAPrefix(IAPrefix { preferred_lifetime, valid_lifetime, prefix_len, prefix_ip, opts, }) => { let mut buf = Vec::new(); let mut opt_enc = Encoder::new(&mut buf); opts.encode(&mut opt_enc)?; // buf now has total len e.write_u16(25 + buf.len() as u16)?; // write data e.write_u32(*preferred_lifetime)?; e.write_u32(*valid_lifetime)?; e.write_u8(*prefix_len)?; e.write_u128((*prefix_ip).into())?; e.write_slice(&buf)?; } DhcpOption::InformationRefreshTime(time) => { e.write_u16(4)?; e.write_u32(*time)?; } DhcpOption::NtpServer(suboptions) => { let mut buf = Vec::new(); let mut subopt_enc = Encoder::new(&mut buf); for suboption in suboptions { suboption.encode(&mut subopt_enc)?; } e.write_u16(buf.len() as _)?; e.write_slice(&buf)?; } DhcpOption::Unknown(UnknownOption { data, .. }) => { e.write_u16(data.len() as u16)?; e.write_slice(data)?; } }; Ok(()) } } #[inline] fn first<T, F>(arr: &[T], f: F) -> Option<usize> where T: Ord, F: Fn(&T) -> Ordering, { if arr.is_empty() { return None; } let mut l = 0; let mut r = arr.len() - 1; while l <= r { let mid = (l + r) >> 1; // SAFETY: we know it is within the length let mid_cmp = f(unsafe { arr.get_unchecked(mid) }); let prev_cmp = if mid > 0 { f(unsafe { arr.get_unchecked(mid - 1) }) == Ordering::Less } else { false }; match mid_cmp { Ordering::Less => l = mid + 1, Ordering::Equal if (mid == 0 || prev_cmp) => return Some(mid), Ordering::Greater | Ordering::Equal if mid == 0 => return None, Ordering::Greater | Ordering::Equal => r = mid - 1, } } None } #[inline] fn last<T, F>(arr: &[T], f: F) -> Option<usize> where T: Ord, F: Fn(&T) -> Ordering, { if arr.is_empty() { return None; } let n = arr.len(); let mut l = 0; let mut r = n - 1; while l <= r { let mid = (l + r) >> 1; // SAFETY: we know it is within the length let mid_cmp = f(unsafe { arr.get_unchecked(mid) }); let nxt_cmp = if mid < n && mid != n - 1 { f(unsafe { arr.get_unchecked(mid + 1) }) == Ordering::Greater } else { false }; match mid_cmp { Ordering::Greater => r = mid - 1, Ordering::Equal if (mid == n - 1 || nxt_cmp) => return Some(mid), Ordering::Less | Ordering::Equal if mid == n - 1 => return None, Ordering::Less | Ordering::Equal => l = mid + 1, } } None } #[inline] fn range_binsearch<T, F>(arr: &[T], f: F) -> Option<RangeInclusive<usize>> where T: Ord, F: Fn(&T) -> Ordering, { let first = first(arr, &f)?; let last = last(arr, &f)?; Some(first..=last) } #[cfg(test)] mod tests { use alloc::vec; use core::str::FromStr; use super::*; #[test] fn test_range_binsearch() { let arr = vec![0, 1, 1, 1, 1, 4, 6, 7, 9, 9, 10];
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
true
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v6/mod.rs
src/v6/mod.rs
//! # DHCPv6 //! //! This module provides types and utility functions for encoding/decoding a DHCPv4 message. //! //! ## Example - constructing messages //! //! ```rust //! # fn main() -> Result<(), Box<dyn std::error::Error>> { //! use dhcproto::{v6, Encodable, Encoder}; //! // arbitrary DUID //! let duid = vec![ //! 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, //! ]; //! // construct a new Message with a random xid //! let mut msg = v6::Message::new(v6::MessageType::Solicit); //! // set an option //! msg.opts_mut() //! .insert(v6::DhcpOption::ClientId(duid)); //! //! // now encode to bytes //! let mut buf = Vec::new(); //! let mut e = Encoder::new(&mut buf); //! msg.encode(&mut e)?; //! //! // buf now has the contents of the encoded DHCP message //! # Ok(()) } //! ``` //! //! ## Example - encoding/decoding messages //! //! ```rust //! # fn solicit() -> Vec<u8> { //! # vec![ //! # 0x01, 0x10, 0x08, 0x74, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x01, 0x1c, 0x39, //! # 0xcf, 0x88, 0x08, 0x00, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x06, 0x00, 0x04, 0x00, 0x17, //! # 0x00, 0x18, 0x00, 0x08, 0x00, 0x02, 0x00, 0x00, 0x00, 0x19, 0x00, 0x0c, 0x27, 0xfe, //! # 0x8f, 0x95, 0x00, 0x00, 0x0e, 0x10, 0x00, 0x00, 0x15, 0x18, //! # ] //! # } //! # fn main() -> Result<(), Box<dyn std::error::Error>> { //! use dhcproto::{v6::Message, Decoder, Decodable, Encoder, Encodable}; //! // example message //! let solicit = solicit(); //! // decode //! let msg = Message::decode(&mut Decoder::new(&solicit))?; //! // now encode //! let mut buf = Vec::new(); //! let mut e = Encoder::new(&mut buf); //! msg.encode(&mut e)?; //! //! assert_eq!(solicit, buf); //! # Ok(()) } //! ``` //! pub mod duid; mod htype; mod option_codes; mod options; mod oro_codes; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use core::{convert::TryInto, fmt, net::Ipv6Addr}; // re-export submodules from v6 pub use self::htype::*; pub use self::option_codes::*; pub use self::options::*; pub use self::oro_codes::*; pub use crate::{ decoder::{Decodable, Decoder}, encoder::{Encodable, Encoder}, error::*, }; /// default dhcpv6 server port pub const SERVER_PORT: u16 = 547; /// default dhcpv6 client port pub const CLIENT_PORT: u16 = 546; /// See RFC 8415 for updated DHCPv6 info /// [DHCP for Ipv6](https://datatracker.ietf.org/doc/html/rfc8415) /// /// All DHCP messages sent between clients and servers share an identical /// fixed-format header and a variable-format area for options. /// /// All values in the message header and in options are in network byte /// order. /// /// Options are stored serially in the "options" field, with no padding /// between the options. Options are byte-aligned but are not aligned in /// any other way (such as on 2-byte or 4-byte boundaries). /// /// The following diagram illustrates the format of DHCP messages sent /// between clients and servers: /// /// ```text /// 0 1 2 3 /// 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 /// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /// | msg-type | transaction-id | /// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /// | | /// . options . /// . (variable number and length) . /// | | /// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ /// /// msg-type Identifies the DHCP message type; the /// available message types are listed in /// Section 7.3. A 1-octet field. /// /// transaction-id The transaction ID for this message exchange. /// A 3-octet field. /// /// options Options carried in this message; options are /// described in Section 21. A variable-length /// field (4 octets less than the size of the /// message). /// ``` #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct Message { /// message type /// <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> msg_type: MessageType, /// transaction id /// trns id must be the same for all messages in a DHCP transaction /// <https://datatracker.ietf.org/doc/html/rfc8415#section-16.1> xid: [u8; 3], /// Options /// <https://datatracker.ietf.org/doc/html/rfc8415#section-21> opts: DhcpOptions, } impl Default for Message { fn default() -> Self { Self { msg_type: MessageType::Solicit, xid: rand::random(), opts: DhcpOptions::new(), } } } impl Message { /// returns a new `Message` with a random xid and empty opt section pub fn new(msg_type: MessageType) -> Self { Self { msg_type, ..Self::default() } } /// returns a new `Message` with a given xid and message type and empty opt section pub fn new_with_id(msg_type: MessageType, xid: [u8; 3]) -> Self { Self { msg_type, xid, ..Self::default() } } /// Get the message's message type. pub fn msg_type(&self) -> MessageType { self.msg_type } /// Set message type pub fn set_msg_type(&mut self, msg_type: MessageType) -> &mut Self { self.msg_type = msg_type; self } /// Get the message's transaction id. pub fn xid(&self) -> [u8; 3] { self.xid } /// Get the msgs transaction id as a number pub fn xid_num(&self) -> u32 { u32::from_be_bytes([0, self.xid[0], self.xid[1], self.xid[2]]) } /// Set transaction id pub fn set_xid(&mut self, xid: [u8; 3]) -> &mut Self { self.xid = xid; self } /// Set transaction id from u32, will only use last 3 bytes pub fn set_xid_num(&mut self, xid: u32) -> &mut Self { let arr = xid.to_be_bytes(); self.xid = arr[1..=3] .try_into() .expect("a u32 has 4 bytes so this shouldn't fail"); self } /// Get a reference to the message's options. pub fn opts(&self) -> &DhcpOptions { &self.opts } /// Set DHCP opts pub fn set_opts(&mut self, opts: DhcpOptions) -> &mut Self { self.opts = opts; self } /// Get a mutable reference to the message's options. pub fn opts_mut(&mut self) -> &mut DhcpOptions { &mut self.opts } } /// DHCPv6 message types /// <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Copy, Clone, PartialEq, Eq, Hash)] pub enum MessageType { // RFC 3315 /// client solicit - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Solicit, /// server advertise - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Advertise, /// request - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Request, /// confirm - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Confirm, /// renew - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Renew, /// rebind - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Rebind, /// reply - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Reply, /// release message type - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Release, /// decline - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Decline, /// reconfigure - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> Reconfigure, /// information request - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> InformationRequest, /// relay forward - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> RelayForw, /// relay reply - <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> RelayRepl, // RFC 5007 /// lease query - <https://datatracker.ietf.org/doc/html/rfc5007#section-4.2.1> LeaseQuery, /// lease query reply - <https://datatracker.ietf.org/doc/html/rfc5007#section-4.2.2> LeaseQueryReply, // RFC 5460 /// lease query done - <https://datatracker.ietf.org/doc/html/rfc5460#section-5.2.2> LeaseQueryDone, /// lease query data - <https://datatracker.ietf.org/doc/html/rfc5460#section-5.2.1> LeaseQueryData, // RFC 6977 /// reconfigure request - <https://datatracker.ietf.org/doc/html/rfc6977#section-6.2.1> ReconfigureRequest, /// reconfigure reply - <https://datatracker.ietf.org/doc/html/rfc6977#section-6.2.2> ReconfigureReply, // RFC 7341 /// dhcpv4 query - <https://datatracker.ietf.org/doc/html/rfc7341#section-6.2> DHCPv4Query, /// dhcpv4 response - <https://datatracker.ietf.org/doc/html/rfc7341#section-6.2> DHCPv4Response, /// unknown/unimplemented message type Unknown(u8), } impl From<u8> for MessageType { fn from(n: u8) -> Self { use MessageType::*; match n { // RFC 3315 1 => Solicit, 2 => Advertise, 3 => Request, 4 => Confirm, 5 => Renew, 6 => Rebind, 7 => Reply, 8 => Release, 9 => Decline, 10 => Reconfigure, 11 => InformationRequest, 12 => RelayForw, 13 => RelayRepl, // RFC 5007 14 => LeaseQuery, 15 => LeaseQueryReply, // RFC 5460 16 => LeaseQueryDone, 17 => LeaseQueryData, // RFC 6977 18 => ReconfigureRequest, 19 => ReconfigureReply, // RFC 7341 20 => DHCPv4Query, 21 => DHCPv4Response, n => Unknown(n), } } } impl From<MessageType> for u8 { fn from(m: MessageType) -> Self { use MessageType as M; match m { // RFC 3315 M::Solicit => 1, M::Advertise => 2, M::Request => 3, M::Confirm => 4, M::Renew => 5, M::Rebind => 6, M::Reply => 7, M::Release => 8, M::Decline => 9, M::Reconfigure => 10, M::InformationRequest => 11, M::RelayForw => 12, M::RelayRepl => 13, // RFC 5007 M::LeaseQuery => 14, M::LeaseQueryReply => 15, // RFC 5460 M::LeaseQueryDone => 16, M::LeaseQueryData => 17, // RFC 6977 M::ReconfigureRequest => 18, M::ReconfigureReply => 19, // RFC 7341 M::DHCPv4Query => 20, M::DHCPv4Response => 21, M::Unknown(n) => n, } } } impl Decodable for Message { fn decode(decoder: &mut Decoder<'_>) -> DecodeResult<Self> { Ok(Message { msg_type: decoder.read_u8()?.into(), xid: decoder.read::<3>()?, opts: DhcpOptions::decode(decoder)?, }) } } impl Encodable for Message { fn encode(&self, e: &mut Encoder<'_>) -> EncodeResult<()> { e.write_u8(self.msg_type.into())?; e.write(self.xid)?; self.opts.encode(e)?; Ok(()) } } impl fmt::Display for Message { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("Message") .field("xid", &self.xid_num()) .field("msg_type", &self.msg_type()) .field("opts", &self.opts()) .finish() } } #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct RelayMessage { /// message type /// <https://datatracker.ietf.org/doc/html/rfc8415#section-7.3> msg_type: MessageType, /// hop count /// <https://datatracker.ietf.org/doc/html/rfc8415#section-9> hop_count: u8, /// link address /// <https://datatracker.ietf.org/doc/html/rfc8415#section-9> link_addr: Ipv6Addr, /// peer address /// <https://datatracker.ietf.org/doc/html/rfc8415#section-9> peer_addr: Ipv6Addr, /// Options /// <https://datatracker.ietf.org/doc/html/rfc8415#section-21> opts: DhcpOptions, } impl RelayMessage { pub fn msg_type(&self) -> MessageType { self.msg_type } pub fn hop_count(&self) -> u8 { self.hop_count } pub fn link_addr(&self) -> Ipv6Addr { self.link_addr } pub fn peer_addr(&self) -> Ipv6Addr { self.peer_addr } /// Get a reference to the message's options. pub fn opts(&self) -> &DhcpOptions { &self.opts } /// Set DHCP opts pub fn set_opts(&mut self, opts: DhcpOptions) -> &mut Self { self.opts = opts; self } /// Get a mutable reference to the message's options. pub fn opts_mut(&mut self) -> &mut DhcpOptions { &mut self.opts } } impl Decodable for RelayMessage { fn decode(decoder: &mut Decoder<'_>) -> DecodeResult<Self> { Ok(Self { msg_type: decoder.read_u8()?.into(), hop_count: decoder.read_u8()?, link_addr: decoder.read::<16>()?.into(), peer_addr: decoder.read::<16>()?.into(), opts: DhcpOptions::decode(decoder)?, }) } } impl Encodable for RelayMessage { fn encode(&self, e: &mut Encoder<'_>) -> EncodeResult<()> { e.write_u8(self.msg_type.into())?; e.write_u8(self.hop_count)?; e.write_slice(&self.link_addr.octets())?; e.write_slice(&self.peer_addr.octets())?; self.opts.encode(e)?; Ok(()) } } impl fmt::Display for RelayMessage { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("RelayMessage") .field("msg_type", &self.msg_type()) .field("hop_count", &self.hop_count()) .field("link_addr", &self.link_addr()) .field("peer_addr", &self.peer_addr()) .field("opts", &self.opts()) .finish() } } #[cfg(test)] mod tests { use super::*; type Result<T> = std::result::Result<T, Box<dyn std::error::Error>>; fn decode_ipv6(input: Vec<u8>, mtype: MessageType) -> Result<()> { // decode let msg = Message::decode(&mut Decoder::new(&input))?; dbg!(&msg); assert_eq!(mtype, msg.msg_type); // now encode let mut buf = Vec::new(); let mut e = Encoder::new(&mut buf); msg.encode(&mut e)?; println!("{buf:?}"); println!("{input:?}"); // no PAD bytes or hashmap with ipv6 so the lens will be exact assert_eq!(buf.len(), input.len()); // decode again let res = Message::decode(&mut Decoder::new(&buf))?; // check Messages are equal after decoding/encoding assert_eq!(msg, res); Ok(()) } #[test] fn decode_solicit() -> Result<()> { decode_ipv6(solicit(), MessageType::Solicit)?; Ok(()) } #[test] fn decode_advertise() -> Result<()> { decode_ipv6(advertise(), MessageType::Advertise)?; Ok(()) } #[test] fn decode_request() -> Result<()> { decode_ipv6(request(), MessageType::Request)?; Ok(()) } #[test] fn decode_reply() -> Result<()> { decode_ipv6(reply(), MessageType::Reply)?; Ok(()) } #[test] fn xid_num() { let mut msg = Message::default(); msg.set_xid_num(16_777_215); assert_eq!(msg.xid_num(), 16_777_215); msg.set_xid_num(16_777_000); assert_eq!(msg.xid_num(), 16_777_000); msg.set_xid_num(8); assert_eq!(msg.xid_num(), 8); } #[cfg(feature = "serde")] #[test] fn test_json_v6() -> Result<()> { let msg = Message::decode(&mut Decoder::new(&solicit()))?; let s = serde_json::to_string_pretty(&msg)?; println!("{s}"); let other = serde_json::from_str(&s)?; assert_eq!(msg, other); Ok(()) } fn solicit() -> Vec<u8> { vec![ 0x01, 0x10, 0x08, 0x74, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x01, 0x1c, 0x39, 0xcf, 0x88, 0x08, 0x00, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x06, 0x00, 0x04, 0x00, 0x17, 0x00, 0x18, 0x00, 0x08, 0x00, 0x02, 0x00, 0x00, 0x00, 0x19, 0x00, 0x0c, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x00, 0x0e, 0x10, 0x00, 0x00, 0x15, 0x18, ] } fn advertise() -> Vec<u8> { vec![ 0x02, 0x10, 0x08, 0x74, 0x00, 0x19, 0x00, 0x29, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x19, 0x00, 0x00, 0x11, 0x94, 0x00, 0x00, 0x1c, 0x20, 0x40, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x01, 0x1c, 0x39, 0xcf, 0x88, 0x08, 0x00, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x01, 0x1c, 0x38, 0x25, 0xe8, 0x08, 0x00, 0x27, 0xd4, 0x10, 0xbb, ] } fn request() -> Vec<u8> { vec![ 0x03, 0x49, 0x17, 0x4e, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x01, 0x1c, 0x39, 0xcf, 0x88, 0x08, 0x00, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x01, 0x1c, 0x38, 0x25, 0xe8, 0x08, 0x00, 0x27, 0xd4, 0x10, 0xbb, 0x00, 0x06, 0x00, 0x04, 0x00, 0x17, 0x00, 0x18, 0x00, 0x08, 0x00, 0x02, 0x00, 0x00, 0x00, 0x19, 0x00, 0x29, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x00, 0x0e, 0x10, 0x00, 0x00, 0x15, 0x18, 0x00, 0x1a, 0x00, 0x19, 0x00, 0x00, 0x1c, 0x20, 0x00, 0x00, 0x1d, 0x4c, 0x40, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ] } fn reply() -> Vec<u8> { vec![ 0x07, 0x49, 0x17, 0x4e, 0x00, 0x19, 0x00, 0x29, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x1a, 0x00, 0x19, 0x00, 0x00, 0x11, 0x94, 0x00, 0x00, 0x1c, 0x20, 0x40, 0x20, 0x01, 0x00, 0x00, 0x00, 0x00, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x01, 0x1c, 0x39, 0xcf, 0x88, 0x08, 0x00, 0x27, 0xfe, 0x8f, 0x95, 0x00, 0x02, 0x00, 0x0e, 0x00, 0x01, 0x00, 0x01, 0x1c, 0x38, 0x25, 0xe8, 0x08, 0x00, 0x27, 0xd4, 0x10, 0xbb, ] } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/src/v6/duid.rs
src/v6/duid.rs
use alloc::vec::Vec; use core::net::Ipv6Addr; #[cfg(feature = "serde")] use serde::{Deserialize, Serialize}; use crate::Encoder; use crate::v6::HType; /// Duid helper type #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub struct Duid(Vec<u8>); // TODO: define specific duid types impl Duid { /// new DUID link layer address with time pub fn link_layer_time(htype: HType, time: u32, addr: Ipv6Addr) -> Self { let mut buf = Vec::new(); let mut e = Encoder::new(&mut buf); e.write_u16(1).unwrap(); // duid type e.write_u16(u16::from(htype)).unwrap(); e.write_u32(time).unwrap(); e.write_u128(addr.into()).unwrap(); Self(buf) } /// new DUID enterprise number pub fn enterprise(enterprise: u32, id: &[u8]) -> Self { let mut buf = Vec::new(); let mut e = Encoder::new(&mut buf); e.write_u16(2).unwrap(); // duid type e.write_u32(enterprise).unwrap(); e.write_slice(id).unwrap(); Self(buf) } /// new link layer DUID pub fn link_layer(htype: HType, addr: Ipv6Addr) -> Self { let mut buf = Vec::new(); let mut e = Encoder::new(&mut buf); e.write_u16(3).unwrap(); // duid type e.write_u16(u16::from(htype)).unwrap(); e.write_u128(addr.into()).unwrap(); Self(buf) } /// new DUID-UUID /// `uuid` must be 16 bytes long pub fn uuid(uuid: &[u8]) -> Self { assert!(uuid.len() == 16); let mut buf = Vec::new(); let mut e = Encoder::new(&mut buf); e.write_u16(4).unwrap(); // duid type e.write_slice(uuid).unwrap(); Self(buf) } /// create a DUID of unknown type pub fn unknown(duid: &[u8]) -> Self { Self(duid.to_vec()) } /// total length of contained DUID pub fn len(&self) -> usize { self.0.len() } /// is contained DUID empty pub fn is_empty(&self) -> bool { self.len() == 0 } } impl AsRef<[u8]> for Duid { fn as_ref(&self) -> &[u8] { &self.0 } } impl From<Vec<u8>> for Duid { fn from(v: Vec<u8>) -> Self { Self(v) } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/benches/encode.rs
benches/encode.rs
use criterion::{Criterion, criterion_group, criterion_main}; use dhcproto::v4::Message; use dhcproto::{Decodable, Encodable, Encoder}; fn encode_benches(c: &mut Criterion) { let mut g = c.benchmark_group("encode"); g.bench_function("encode_offer", |b| { let offer: &[u8] = &[ 0x02, 0x01, 0x06, 0x00, 0x00, 0x00, 0x15, 0x5c, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, 0x0a, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x02, 0x36, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x33, 0x04, 0x00, 0x00, 0x00, 0x3c, 0x3a, 0x04, 0x00, 0x00, 0x00, 0x1e, 0x3b, 0x04, 0x00, 0x00, 0x00, 0x34, 0x01, 0x04, 0xff, 0xff, 0xff, 0x00, 0x03, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x06, 0x08, 0xc0, 0xa8, 0x00, 0x01, 0xc0, 0xa8, 0x01, 0x01, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; let message = Message::from_bytes(offer).unwrap(); let mut bytes = Vec::with_capacity(offer.len()); b.iter(|| { message.encode(&mut Encoder::new(&mut bytes)).unwrap(); }); }); g.bench_function("encode_bootreq", |b| { let bootreq: &[u8] = &[ 1u8, // op 2, // htype 3, // hlen 4, // ops 5, 6, 7, 8, // xid 9, 10, // secs 11, 12, // flags 13, 14, 15, 16, // ciaddr 17, 18, 19, 20, // yiaddr 21, 22, 23, 24, // siaddr 25, 26, 27, 28, // giaddr 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, // chaddr 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 0, // sname: "-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk", 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 0, 0, 0, 0, 0, 0, 0, 0, // file: "mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}m", 99, 130, 83, 99, // magic cookie ]; let message = Message::from_bytes(bootreq).unwrap(); let mut bytes = Vec::with_capacity(bootreq.len()); b.iter(|| { message.encode(&mut Encoder::new(&mut bytes)).unwrap(); }); }); g.bench_function("encode_discover", |b| { let discover: &[u8] = &[ 0x01, 0x01, 0x06, 0x00, 0xa6, 0x80, 0x56, 0x74, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xc0, 0xde, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x01, 0x37, 0x40, 0xfc, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x43, 0x42, 0x33, 0x04, 0x00, 0x00, 0x00, 0x01, 0xff, ]; let message = Message::from_bytes(discover).unwrap(); let mut bytes = Vec::with_capacity(discover.len()); b.iter(|| { message.encode(&mut Encoder::new(&mut bytes)).unwrap(); }); }); g.bench_function("encode_other_offer", |b| { let other_offer: &[u8] = &[ 0x02, 0x01, 0x06, 0x00, 0xa6, 0x80, 0x56, 0x74, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x00, 0x95, 0xc0, 0xa8, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xc0, 0xde, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x02, 0x36, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x33, 0x04, 0x00, 0x00, 0x00, 0x78, 0x3a, 0x04, 0x00, 0x00, 0x00, 0x3c, 0x3b, 0x04, 0x00, 0x00, 0x00, 0x69, 0x01, 0x04, 0xff, 0xff, 0xff, 0x00, 0x1c, 0x04, 0xc0, 0xa8, 0x00, 0xff, 0x06, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x03, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; let message = Message::from_bytes(other_offer).unwrap(); let mut bytes = Vec::with_capacity(other_offer.len()); b.iter(|| { message.encode(&mut Encoder::new(&mut bytes)).unwrap(); }); }); g.bench_function("encode_opts", |b| { let opts: &[u8] = &[ 53, 1, 2, 54, 4, 192, 168, 0, 1, 51, 4, 0, 0, 0, 60, 58, 4, 0, 0, 0, 30, 59, 4, 0, 0, 0, 52, 1, 4, 255, 255, 255, 0, 3, 4, 192, 168, 0, 1, 6, 8, 192, 168, 0, 1, 192, 168, 1, 1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; let x = dhcproto::v4::DhcpOptions::from_bytes(opts).unwrap(); let mut bytes = Vec::with_capacity(opts.len()); b.iter(|| { x.encode(&mut Encoder::new(&mut bytes)).unwrap(); }); }); } criterion_group!(benches, encode_benches); criterion_main!(benches);
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/benches/decode.rs
benches/decode.rs
use criterion::{Criterion, criterion_group, criterion_main}; use dhcproto::Decodable; fn decode_benches(c: &mut Criterion) { let mut g = c.benchmark_group("decode"); g.bench_function("decode_offer", |b| { let offer: &[u8] = &[ 0x02, 0x01, 0x06, 0x00, 0x00, 0x00, 0x15, 0x5c, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x00, 0x03, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xcc, 0x00, 0x0a, 0xc4, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x02, 0x36, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x33, 0x04, 0x00, 0x00, 0x00, 0x3c, 0x3a, 0x04, 0x00, 0x00, 0x00, 0x1e, 0x3b, 0x04, 0x00, 0x00, 0x00, 0x34, 0x01, 0x04, 0xff, 0xff, 0xff, 0x00, 0x03, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x06, 0x08, 0xc0, 0xa8, 0x00, 0x01, 0xc0, 0xa8, 0x01, 0x01, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; b.iter(|| { dhcproto::v4::Message::from_bytes(offer).unwrap(); }); }); g.bench_function("decode_bootreq", |b| { let bootreq: &[u8] = &[ 1u8, // op 2, // htype 3, // hlen 4, // ops 5, 6, 7, 8, // xid 9, 10, // secs 11, 12, // flags 13, 14, 15, 16, // ciaddr 17, 18, 19, 20, // yiaddr 21, 22, 23, 24, // siaddr 25, 26, 27, 28, // giaddr 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, // chaddr 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107, 0, // sname: "-./0123456789:;<=>?@ABCDEFGHIJKLMNOPQRSTUVWXYZ[\\]^_`abcdefghijk", 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 109, 0, 0, 0, 0, 0, 0, 0, 0, // file: "mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}mnopqrstuvwxyz{|}m", 99, 130, 83, 99, // magic cookie ]; b.iter(|| { dhcproto::v4::Message::from_bytes(bootreq).unwrap(); }); }); g.bench_function("decode_discover", |b| { let discover: &[u8] = &[ 0x01, 0x01, 0x06, 0x00, 0xa6, 0x80, 0x56, 0x74, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xc0, 0xde, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x01, 0x37, 0x40, 0xfc, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09, 0x0a, 0x0b, 0x0c, 0x0d, 0x0e, 0x0f, 0x10, 0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18, 0x19, 0x1a, 0x1b, 0x1c, 0x1d, 0x1e, 0x1f, 0x20, 0x21, 0x22, 0x23, 0x24, 0x25, 0x26, 0x27, 0x28, 0x29, 0x2a, 0x2b, 0x2c, 0x2d, 0x2e, 0x2f, 0x30, 0x31, 0x32, 0x33, 0x34, 0x35, 0x36, 0x37, 0x38, 0x39, 0x3a, 0x3b, 0x3c, 0x3d, 0x43, 0x42, 0x33, 0x04, 0x00, 0x00, 0x00, 0x01, 0xff, ]; b.iter(|| { dhcproto::v4::Message::from_bytes(discover).unwrap(); }); }); g.bench_function("decode_other_offer", |b| { let other_offer: &[u8] = &[ 0x02, 0x01, 0x06, 0x00, 0xa6, 0x80, 0x56, 0x74, 0x00, 0x00, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0xc0, 0xa8, 0x00, 0x95, 0xc0, 0xa8, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0xde, 0xad, 0xc0, 0xde, 0xca, 0xfe, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x63, 0x82, 0x53, 0x63, 0x35, 0x01, 0x02, 0x36, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x33, 0x04, 0x00, 0x00, 0x00, 0x78, 0x3a, 0x04, 0x00, 0x00, 0x00, 0x3c, 0x3b, 0x04, 0x00, 0x00, 0x00, 0x69, 0x01, 0x04, 0xff, 0xff, 0xff, 0x00, 0x1c, 0x04, 0xc0, 0xa8, 0x00, 0xff, 0x06, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0x03, 0x04, 0xc0, 0xa8, 0x00, 0x01, 0xff, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, ]; b.iter(|| { dhcproto::v4::Message::from_bytes(other_offer).unwrap(); }); }); g.bench_function("decode_opts", |b| { let opts: &[u8] = &[ 53, 1, 2, 54, 4, 192, 168, 0, 1, 51, 4, 0, 0, 0, 60, 58, 4, 0, 0, 0, 30, 59, 4, 0, 0, 0, 52, 1, 4, 255, 255, 255, 0, 3, 4, 192, 168, 0, 1, 6, 8, 192, 168, 0, 1, 192, 168, 1, 1, 255, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ]; b.iter(|| dhcproto::v4::DhcpOptions::from_bytes(opts).unwrap()); }); } criterion_group!(benches, decode_benches); criterion_main!(benches);
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
bluecatengineering/dhcproto
https://github.com/bluecatengineering/dhcproto/blob/b4ea30defc01e7ae66e7075d6a0533d9bb9503dc/dhcproto-macros/src/lib.rs
dhcproto-macros/src/lib.rs
use proc_macro::TokenStream; use quote::quote; use syn::{ Ident, LitInt, LitStr, Token, Type, parse::{Parse, ParseStream, Result}, parse_macro_input, }; // parses a single entry in the format: // {code, id, "description", (Type1, Type2, ...)} struct Entry { code: u8, id: Ident, description: String, data_types: Option<Vec<Type>>, } impl Parse for Entry { // {code, id, "description", (Type1, Type2, ...)} fn parse(input: ParseStream) -> Result<Self> { let content; syn::braced!(content in input); let code: LitInt = content.parse()?; content.parse::<Token![,]>()?; let id: Ident = content.parse()?; content.parse::<Token![,]>()?; let description: LitStr = content.parse()?; let data_types = if content.peek(Token![,]) && content.peek2(syn::token::Paren) { content.parse::<Token![,]>()?; let types_content; syn::parenthesized!(types_content in content); let mut types = Vec::new(); if !types_content.is_empty() { types.push(types_content.parse()?); while types_content.peek(Token![,]) { types_content.parse::<Token![,]>()?; if !types_content.is_empty() { types.push(types_content.parse()?); } } } Some(types) } else { None }; Ok(Entry { code: code.base10_parse()?, id, description: description.value(), data_types, }) } } struct DeclareCodesInput { entries: Vec<Entry>, } impl Parse for DeclareCodesInput { fn parse(input: ParseStream) -> Result<Self> { let mut entries = Vec::new(); while !input.is_empty() { entries.push(input.parse()?); if input.peek(Token![,]) { input.parse::<Token![,]>()?; } } Ok(DeclareCodesInput { entries }) } } fn generate_option_code_enum(entries: &[Entry]) -> proc_macro2::TokenStream { let variants = entries.iter().map(|e| { let id = &e.id; let code = e.code; let description = &e.description; let doc = format!("{code} - {description}"); quote! { #[doc = #doc] #id, } }); quote! { /// DHCP Options #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum OptionCode { #(#variants)* /// Unknown code Unknown(u8), } } } fn generate_option_code_from_u8(entries: &[Entry]) -> proc_macro2::TokenStream { let match_arms = entries.iter().map(|e| { let id = &e.id; let code = e.code; quote! { #code => Self::#id, } }); quote! { impl core::convert::From<u8> for OptionCode { fn from(x: u8) -> Self { match x { #(#match_arms)* _ => Self::Unknown(x), } } } } } fn generate_u8_from_option_code(entries: &[Entry]) -> proc_macro2::TokenStream { let match_arms = entries.iter().map(|e| { let id = &e.id; let code = e.code; quote! { OptionCode::#id => #code, } }); quote! { impl core::convert::From<OptionCode> for u8 { fn from(x: OptionCode) -> Self { match x { #(#match_arms)* OptionCode::Unknown(code) => code, } } } } } fn generate_dhcp_option_enum(entries: &[Entry]) -> proc_macro2::TokenStream { let variants = entries.iter().map(|e| { let id = &e.id; let code = e.code; let description = &e.description; let doc = format!("{code} - {description}"); match &e.data_types { Some(types) => { quote! { #[doc = #doc] #id(#(#types),*), } } None => { quote! { #[doc = #doc] #id, } } } }); quote! { /// DHCP Options #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, PartialEq, Eq)] pub enum DhcpOption { #(#variants)* /// Unknown option Unknown(UnknownOption), } } } fn generate_option_code_from_dhcp_option(entries: &[Entry]) -> proc_macro2::TokenStream { let match_arms = entries.iter().map(|e| { let id = &e.id; match &e.data_types { Some(types) => { let wildcards = types.iter().map(|_| quote! { _ }); quote! { DhcpOption::#id(#(#wildcards),*) => OptionCode::#id, } } None => quote! { DhcpOption::#id => OptionCode::#id, }, } }); quote! { impl core::convert::From<&DhcpOption> for OptionCode { fn from(opt: &DhcpOption) -> Self { use DhcpOption as O; match opt { #(#match_arms)* O::Unknown(n) => OptionCode::Unknown(n.code), } } } } } #[proc_macro] pub fn declare_codes(input: TokenStream) -> TokenStream { let input = parse_macro_input!(input as DeclareCodesInput); let entries = &input.entries; let option_code_enum = generate_option_code_enum(entries); let option_code_from_u8 = generate_option_code_from_u8(entries); let u8_from_option_code = generate_u8_from_option_code(entries); let dhcp_option_enum = generate_dhcp_option_enum(entries); let option_code_from_dhcp_option = generate_option_code_from_dhcp_option(entries); let expanded = quote! { #option_code_enum #option_code_from_u8 #u8_from_option_code #dhcp_option_enum #option_code_from_dhcp_option }; TokenStream::from(expanded) } #[cfg(test)] mod tests { use super::*; use quote::quote; use syn::parse_quote; #[test] fn test_macro_expansion() { let input: DeclareCodesInput = parse_quote! { {1, SubnetMask, "Subnet Mask", (Ipv4Addr)}, {53, MessageType, "Message Type", (MessageType)}, }; let opt_code = generate_option_code_enum(&input.entries); // Check that it contains expected variants let expected = quote! { /// DHCP Options #[cfg_attr(feature = "serde", derive(Serialize, Deserialize))] #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum OptionCode { #[doc = "1 - Subnet Mask"] SubnetMask, #[doc = "53 - Message Type"] MessageType, /// Unknown code Unknown(u8), } }; println!("Generated OptionCode enum: {}", opt_code); // Compare token streams (this is approximate) assert_eq!(opt_code.to_string(), expected.to_string()); } }
rust
MIT
b4ea30defc01e7ae66e7075d6a0533d9bb9503dc
2026-01-04T20:19:33.979507Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/src/lib.rs
src/lib.rs
// Copyright 2018 The immense Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. //! immense describes 3D structures with simple composable rules and outputs them as Wavefront //! object files you can plug into your renderer of choice. //! //! # Demo //! //!``` //! # use immense::*; //! Rule::new().push(vec![ //! Replicate::n(1, vec![Tf::saturation(0.8), Tf::hue(160.0)]), //! Replicate::n(36, vec![Tf::rz(10.0), Tf::ty(0.1)]), //! Replicate::n(36, vec![Tf::ry(10.0), Tf::tz(1.2), Tf::hue(3.4)]), //! ], //! cube(), //!) //! # ; //!``` //! //! ![](https://i.imgur.com/1Emik4Z.png) //! //! # Table of Contents //! //! 1. [Intro](#intro) //! 2. [Composing Rules](#composing_rules) //! 1. [Recursion](#recursion) //! 2. [Randomness](#randomness) //! 3. [Color](#color) //! 4. [Ergonomics Macros](#ergonomics-macros) //! 5. [Custom Meshes](#custom-meshes) //! //! # Intro //! //! In immense, you create a [Rule][self::rule::Rule] that describes your structure, which is //! ultimately composed of [meshes](https://en.wikipedia.org/wiki/Polygon_mesh). immense provides //! some builtin meshes, such as [cube][self::rule::builtin::cube], and you can create your own rules //! by using these builtins which you'll see in the next section. //! //! After you've built your [Rule][self::rule::Rule], you can export the meshes it expands to as a //! Wavefront object file for the next part of your workflow, whether that is rendering it in Blender, //! printing it in your 3D printer, or importing it into your game! //! //! # Composing Rules //! //! Let's start with a cube. You probably want to write your meshes to a file and watch them in a //! viewer with autoreload. [Meshlab](http://www.meshlab.net/) is a great viewer (and much more) //! that can reload your meshes when changed. Check out [ExportConfig][self::export::ExportConfig] //! to see what options you can set that will work best for your rendering or printing workflow. //! //! ```` //! # use failure::{Error}; //! # let _ = || -> Result<(), Error> { //! use immense::*; //! use std::fs::File; //! //! let rule = cube(); //! let meshes = rule.generate(); //! let mut output_file = File::create("my_mesh.obj")?; //! write_meshes(ExportConfig::default(), meshes, &mut output_file)?; //! # Ok(()) //! # }; //! ```` //! //! //! ![](https://i.imgur.com/s68Kk0U.png) //! //! We can translate the cube with the `Tf::t*` family of functions which generate translate //! transforms. We'll apply [Tf::tx][rule::transforms::Transform::tx] by creating our own rule and //! invoking the cube rule with a transform. //! //! ```` //! # use immense::*; //! let rule = Rule::new().push(Tf::tx(3.0), cube()); //! ```` //! //! ![](https://i.imgur.com/1nALK9q.png) //! //! We can replicate transforms with [Replicate][rule::transforms::Replicate] which generates //! multiple invocations of a subrule, each with more applications of the same transform applied to //! it. //! //! ```` //! # use immense::*; //! let rule = Rule::new().push(Replicate::n(3, Tf::ty(1.1)), cube()); //! ```` //! //! Notice that our translation is 1.1 and that that is 0.1 more than the length of our cube. That's //! no coincidence! All the built in meshes are 1 in length so that you can use convenient //! measurements like this, even when deep in a transform stack. //! //! ![](https://i.imgur.com/xqufPmN.png) //! //! ## Recursion //! //! You can generate rules recursively with the api we've covered so far, but doing so would put //! your entire rule tree in memory at one time, which can become a problem. immense provides a //! trait, [ToRule][rule::ToRule], so you can give it types that can instantiate rules when needed. //! //! ```` //! # use immense::*; //! struct RecursiveTile { //! depth_budget: usize, //! } //! //! impl ToRule for RecursiveTile { //! fn to_rule(&self) -> Rule { //! let rule = Rule::new() //! .push(vec![Tf::t(0.25, 0.25, 0.0), Tf::s(0.4)], cube()) //! .push(vec![Tf::t(-0.25, -0.25, 0.0), Tf::s(0.4)], cube()) //! .push(vec![Tf::t(-0.25, 0.25, 0.0), Tf::s(0.4)], cube()); //! if self.depth_budget > 0 { //! rule.push( //! vec![Tf::t(0.25, -0.25, 0.0), Tf::s(0.4)], //! RecursiveTile { //! depth_budget: self.depth_budget - 1, //! }, //! ) //! } else { //! rule //! } //! } //! } //! //! let rule = RecursiveTile { //! depth_budget: 3 //! }.to_rule(); //! ```` //! //! ![](https://i.imgur.com/huqVLHE.png) //! //! ## Randomness //! //! Using [ToRule][rule::ToRule] to delay rule construction, we can sample some random values //! each time our type builds a rule. //! //! ```` //! # use immense::*; //! # use rand::*; //! struct RandCube; //! //! impl ToRule for RandCube { //! fn to_rule(&self) -> Rule { //! Rule::new().push( //! *thread_rng() //! .choose(&[Tf::tx(0.1), //! Tf::tx(-0.1), //! Tf::tx(0.2), //! Tf::tx(-0.2)]) //! .unwrap(), //! cube(), //! ) //! } //! } //! //! let rule = Rule::new().push(Replicate::n(4, Tf::ty(1.0)), //! RandCube {}); //! ```` //! //! ![](https://i.imgur.com/bSNc6jw.png) //! //! # Color //! //! immense can export some colors alongside your mesh, by linking the object file output to an //! mtl file (material library). Set the output mtl file in //! [export_colors][crate::export::ExportConfig::export_colors] and immense will write out colors. //! //! You can specify colors overrides and transforms in HSV color space using Ogeon's [palette][palette]. //! See [Tf::color][crate::rule::transforms::Transform::color], [Tf::hue][crate::rule::transforms::Transform::hue], //! [Tf::saturation][crate::rule::transforms::Transform::saturation], [Tf::value][crate::rule::transforms::Transform::value]. //! //! # Ergonomics Macros //! //! immense provides two ergonomics macros that make defining rules and transform sequences a little //! easier once you have an intuition for their semantics. They are [`rule!`] and [`tf!`], which //! help compose rules and transform sequences respectively. //! //! They transform the demo code above into: //! //! ```` //! # use immense::*; //! rule![ //! tf![ //! Tf::saturation(0.8), //! Tf::hue(160.0), //! Replicate::n(36, vec![Tf::rz(10.0), Tf::ty(0.1)]), //! Replicate::n(36, vec![Tf::ry(10.0), Tf::tz(1.2), Tf::hue(3.4)]), //! ] => cube(), //! ] //! # ; //! ```` //! //! # Custom Meshes //! //! You can create meshes on your own and use them as rules by calling //! [Mesh::from][self::mesh::Mesh::from] if you format your meshes according to object file format. //! //! Meshes can be expensive to allocate. immense handles the primitives on your behalf, but if you //! introduce your own meshes you must be careful not to allocate them more than once. One million //! references to a sphere are fine, one million spheres will probably kill the process. //! //! An example is the [sphere][self::rule::builtin::sphere] builtin which allows you to create a //! potentially expensive sphere estimation: //! //! ```` //! # use immense::*; //! # use std::rc::Rc; //! let sphere: Rc<Mesh> = sphere(/*resolution=*/4); //! let rule = Rule::new().push(Tf::s(2.0), sphere); //! ```` mod error; mod export; mod mesh; mod rule; pub use crate::error::Error; pub use crate::export::{ExportConfig, MeshGrouping}; pub use crate::mesh::{vertex, Mesh, Vertex}; pub use crate::rule::*; pub use palette::{Hsv, RgbHue}; use crate::error::Result; use std::io; /// Writes out meshes as a Wavefront object file to the given [Write][io::Write] sink. pub fn write_meshes( config: ExportConfig, meshes: impl Iterator<Item = OutputMesh>, sink: impl io::Write, ) -> Result<()> { export::write_meshes(config, meshes, sink)?; Ok(()) }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/src/mesh.rs
src/mesh.rs
// Copyright 2018 The immense Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::Tf; use genmesh::generators::{IcoSphere, IndexedPolygon, SharedVertex}; use lazy_static::lazy_static; use nalgebra::base::dimension::{U1, U4}; use std::rc::Rc; /// A type for custom mesh vertices. Initialize with [vertex][self::vertex]. pub type Vertex = nalgebra::Matrix<f32, U4, U1, nalgebra::MatrixArray<f32, U4, U1>>; /// Initializes a vertex for a custom mesh. pub fn vertex(x: f32, y: f32, z: f32) -> Vertex { Vertex::new(x, y, z, 1.0) } pub(crate) fn sphere_of_resolution(resolution: usize) -> Mesh { Mesh::new( IcoSphere::subdivide(resolution) .shared_vertex_iter() .map(|v| Tf::s(0.5).apply_to(vertex(v.pos.x, v.pos.y, v.pos.z))) .collect(), Some( IcoSphere::subdivide(resolution) .shared_vertex_iter() .map(|v| vertex(v.normal.x, v.normal.y, v.normal.z)) .collect(), ), IcoSphere::subdivide(resolution) .indexed_polygon_iter() .map(|t| vec![t.x + 1, t.y + 1, t.z + 1]) .collect(), ) } lazy_static! { static ref CUBE_MESH: Mesh = Mesh::new( vec![ vertex(-0.5, 0.5, 0.5), vertex(-0.5, -0.5, 0.5), vertex(0.5, -0.5, 0.5), vertex(0.5, 0.5, 0.5), vertex(-0.5, 0.5, -0.5), vertex(-0.5, -0.5, -0.5), vertex(0.5, -0.5, -0.5), vertex(0.5, 0.5, -0.5), ], None, vec![ vec![1, 2, 3, 4], vec![8, 7, 6, 5], vec![4, 3, 7, 8], vec![5, 1, 4, 8], vec![5, 6, 2, 1], vec![2, 6, 7, 3], ] ); static ref ICO_SPHERE: Mesh = sphere_of_resolution(0); } /// A custom mesh definition described by a set of vertices, normals, and faces. /// /// This is a low-level type and you are expected to know what you are doing in this part of the API. /// /// 1. There should be a normal for each vertex if you provide any normals at all. /// 2. Each face is a set of indices to the vertices that the face connects. /// 3. Vertex indices start at 1, according to the object file standard. #[derive(Debug)] pub struct Mesh { vertices: Vec<Vertex>, normals: Option<Vec<Vertex>>, faces: Vec<Vec<usize>>, } impl Mesh { /// Allocates a mesh from the given vertices, normals, and faces, which can invoked as rules. pub fn from( vertices: Vec<Vertex>, normals: Option<Vec<Vertex>>, faces: Vec<Vec<usize>>, ) -> Rc<Self> { Rc::new(Self::new(vertices, normals, faces)) } pub(crate) fn new( vertices: Vec<Vertex>, normals: Option<Vec<Vertex>>, faces: Vec<Vec<usize>>, ) -> Self { Self { vertices, normals, faces: faces, } } pub(crate) fn vertices<'a>(&'a self) -> &'a [Vertex] { self.vertices.as_slice() } pub(crate) fn normals<'a>(&'a self) -> Option<&'a [Vertex]> { self.normals.as_ref().map(|ns| ns.as_slice()) } pub(crate) fn faces<'a>(&'a self) -> impl Iterator<Item = &'a [usize]> { self.faces.iter().map(|f| f.as_slice()) } } #[derive(Clone, Debug)] pub enum PrimitiveMesh { Cube, IcoSphere, } impl PrimitiveMesh { pub(crate) fn mesh(&self) -> &'static Mesh { match *self { PrimitiveMesh::Cube => &*CUBE_MESH, PrimitiveMesh::IcoSphere => &*ICO_SPHERE, } } }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/src/error.rs
src/error.rs
// Copyright 2018 The immense Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use auto_from::auto_from; use crate::export::ExportError; use failure_derive::Fail; use std; pub type Result<T> = std::result::Result<T, Error>; /// immense Error type. #[auto_from] #[derive(Fail, Debug)] pub enum Error { #[fail(display = "Error exporting mesh.")] Export(ExportError), }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/src/export.rs
src/export.rs
// Copyright 2018 The immense Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::rule::OutputMesh; use failure_derive::Fail; use std::fs::File; use std::io; #[derive(Fail, Debug)] pub enum ExportError { #[fail(display = "Failed to write to obj file.")] ObjWriteError { #[cause] write_error: io::Error, }, #[fail(display = "Failed to write to material file.")] MtlWriteError { #[cause] write_error: io::Error, }, } macro_rules! try_write_obj { ($expr:expr) => { match $expr { Ok(val) => val, Err(err) => return Err(ExportError::ObjWriteError { write_error: err }), } }; ($expr:expr,) => { try!($expr) }; } macro_rules! try_write_mtl { ($expr:expr) => { match $expr { Ok(val) => val, Err(err) => return Err(ExportError::MtlWriteError { write_error: err }), } }; ($expr:expr,) => { try!($expr) }; } /// A policy for grouping meshes in the object file. /// /// Use this to specify how you want to work with your meshes later. E.g. if you want to use Blender /// to procedurally material each mesh based on their location, you want /// [MeshGrouping::Individual][MeshGrouping::Individual], but if you want to print the mesh with a /// 3D printer, you want [MeshGrouping::AllTogether][MeshGrouping::AllTogether]. #[derive(Copy, Clone, Debug)] pub enum MeshGrouping { /// All meshes will be combined into one object. AllTogether, /// Each mesh will be its own object. Individual, /// Each mesh is grouped with others of the same color. ByColor, } /// The default is [MeshGrouping::AllTogether][MeshGrouping::AllTogether]. impl Default for MeshGrouping { fn default() -> MeshGrouping { MeshGrouping::AllTogether } } /// Configuration for Wavefront object file output. #[derive(Clone, Debug, Default)] pub struct ExportConfig { /// Mesh grouping policy. pub grouping: MeshGrouping, /// Material definition sink to export colors to. /// /// This will write each color to a material lib file named by this parameter and reference /// those materials in the output object file. pub export_colors: Option<String>, } /// Writes out meshes as a Wavefront object file to the given [Write][io::Write] sink. pub fn write_meshes( config: ExportConfig, meshes: impl Iterator<Item = OutputMesh>, mut sink: impl io::Write, ) -> Result<(), ExportError> { let mut mtl_file = if let Some(ref mtl_filename) = config.export_colors { let mtl_file = try_write_mtl!(File::create(mtl_filename)); try_write_obj!(write!(&mut sink, "mtllib {}\n", mtl_filename)); Some(mtl_file) } else { None }; let mut vertex_offset = 0; let mut normal_offset = 0; for mesh in meshes { let vertex_count = mesh.mesh().vertices().len(); let normal_count = mesh.mesh().normals().map(|ns| ns.len()).unwrap_or(0); render_obj( &config, mesh, vertex_offset, normal_offset, &mut sink, mtl_file.as_mut(), )?; normal_offset += normal_count; vertex_offset += vertex_count; } Ok(()) } fn render_obj( config: &ExportConfig, output_mesh: OutputMesh, vertex_offset: usize, normal_offset: usize, mut sink: impl io::Write, material_sink: Option<impl io::Write>, ) -> Result<(), ExportError> { let color = output_mesh.color(); let color_hex = format!("#{:x}", color.into_format::<u8>()); match config.grouping { MeshGrouping::Individual => try_write_obj!(write!(&mut sink, "g g{}\n", vertex_offset)), MeshGrouping::ByColor => try_write_obj!(write!(&mut sink, "g {}\n", color_hex)), _ => (), }; if let Some(mut material_sink) = material_sink { try_write_obj!(write!(&mut sink, "usemtl {}\n", color_hex)); try_write_mtl!(write!( &mut material_sink, "newmtl {}\nKd {} {} {}\nillum 0\n", color_hex, color.red, color.green, color.blue )); } for vertex in output_mesh.vertices() { try_write_obj!(write!( &mut sink, "v {} {} {}\n", vertex.x, vertex.y, vertex.z )); } if let Some(normals) = output_mesh.normals() { for normal in normals { try_write_obj!(write!( &mut sink, "vn {} {} {}\n", normal.x, normal.y, normal.z )); } } let write_face_vertex = |sink: &mut io::Write, vertex_index| -> Result<(), ExportError> { if let Some(_) = output_mesh.normals() { try_write_mtl!(write!( sink, " {}//{}", vertex_index + vertex_offset, vertex_index + normal_offset )); } else { try_write_mtl!(write!(sink, " {}", vertex_index + vertex_offset)); }; Ok(()) }; for face in output_mesh.faces() { try_write_obj!(write!(&mut sink, "f ")); for vertex_index in face { write_face_vertex(&mut sink, vertex_index)?; } try_write_obj!(write!(&mut sink, "\n")); } Ok(()) }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/src/rule.rs
src/rule.rs
// Copyright 2018 The immense Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. mod builtin; mod transforms; pub use self::builtin::*; pub use self::transforms::*; use auto_from::auto_from; use crate::mesh::{Mesh, PrimitiveMesh, Vertex}; use palette::rgb::Rgb; use std::rc::Rc; /// A composition of subrules to expand until meshes are generated. #[derive(Clone)] pub struct Rule { invocations: Vec<(Option<Transform>, RuleInternal)>, } /// An ergonomics macro for defining rules out of transformed subrule invocations. /// /// Where normally you would have to write /// /// ```` /// # use immense::*; /// let rule = Rule::new().push(Tf::tx(2.0), cube()) /// .push(Tf::s(0.5), cube()); /// ```` /// /// you can write /// /// ```` /// # use immense::*; /// let rule = rule![Tf::tx(2.0) => cube(), /// Tf::s(0.5) => cube()]; /// ```` #[macro_export] macro_rules! rule { ($($transforms:expr => $subrule:expr),+ $(,)*) => ({ let mut rule = Rule::new(); $(let rule = rule.push($transforms, $subrule);)* rule }); } impl Rule { /// Returns a new rule that contains no subrules. pub fn new() -> Rule { Rule { invocations: vec![], } } pub(crate) fn primitive(mesh: PrimitiveMesh) -> Self { let mut rule = Rule::new(); rule.invocations .push((None, RuleInternal::Mesh(OutputMeshSource::Primitive(mesh)))); rule } pub(crate) fn mesh(mesh: Rc<Mesh>) -> Self { let mut rule = Rule::new(); rule.invocations .push((None, RuleInternal::Mesh(OutputMeshSource::Dynamic(mesh)))); rule } /// Adds a subrule to the Rule. pub fn push(mut self, transforms: impl Into<TransformArgument>, rule: impl ToRule) -> Rule { match transforms.into() { TransformArgument::Single(transform) => { self.invocations .push((Some(transform), RuleInternal::Invocations(Rc::new(rule)))); } TransformArgument::Many(ref transforms) if !transforms.is_empty() => { let rule = Rc::new(rule); self.invocations.append( &mut transforms .into_iter() .map(|transform| { (Some(*transform), RuleInternal::Invocations(rule.clone())) }) .collect(), ); } _ => self .invocations .push((None, RuleInternal::Invocations(Rc::new(rule)))), }; self } /// Returns an iterator expands the Rule's subrules, outputting the meshes it generates until /// all rules have been fully expanded. As an iterator the meshes are computed lazily so you can /// use this method and terminate with [take][std::iter::Iterator::take], or /// [until][std::iter::Iterator::take_while], etc if your rule tree is infinite. pub fn generate(self) -> impl Iterator<Item = OutputMesh> { let root = RuleInternal::Invocations(Rc::new(self)); MeshIter::new(vec![(None, root)]) } } /// An iterator that iterates over a [Rule][self::Rule]'s generated meshes. pub struct MeshIter { rules: Vec<(Option<Transform>, RuleInternal)>, } impl MeshIter { fn new(rules: Vec<(Option<Transform>, RuleInternal)>) -> Self { Self { rules } } } /// An OutputMesh can be written out in an object file. #[derive(Debug)] pub struct OutputMesh { transform: Option<Transform>, source: OutputMeshSource, } #[derive(Debug, Clone)] enum OutputMeshSource { Primitive(PrimitiveMesh), Dynamic(Rc<Mesh>), } impl OutputMesh { pub(crate) fn color(&self) -> Rgb { self.transform.unwrap_or(Transform::default()).get_color() } /// An iterator over the vertices that compose the mesh. Access `.x`, `.y`, and `.z`. pub fn vertices<'a>(&'a self) -> impl Iterator<Item = Vertex> + 'a { self.mesh() .vertices() .iter() .map(move |v: &'a Vertex| -> Vertex { self.transform.map(|t| t.apply_to(*v)).unwrap_or(*v) }) } /// An iterator over the normals of each vertex if they are defined for the mesh. pub fn normals<'a>(&'a self) -> Option<impl Iterator<Item = Vertex> + 'a> { match self.mesh().normals() { Some(ref normals) => Some(normals.iter().map(move |v: &Vertex| -> Vertex { self.transform.map(|t| t.apply_to(*v)).unwrap_or(*v) })), None => None, } } /// An iterator over the faces of the output mesh. /// /// Important things to note if you are not writing out an object file: /// /// * These faces are not necessarily triangles. /// * The vertex indices start at 1. pub fn faces<'a>(&'a self) -> impl Iterator<Item = &'a [usize]> { self.mesh().faces() } pub(crate) fn mesh<'a>(&'a self) -> &'a Mesh { match self.source { OutputMeshSource::Primitive(ref primitive) => primitive.mesh(), OutputMeshSource::Dynamic(ref mesh) => mesh.as_ref(), } } } impl Iterator for MeshIter { type Item = OutputMesh; fn next(&mut self) -> Option<Self::Item> { while let Some((transform, rule)) = self.rules.pop() { match rule { RuleInternal::Mesh(mesh) => { return Some(OutputMesh { transform, source: mesh, }) } RuleInternal::Invocations(composite_rule) => { let composite_rule = composite_rule.to_rule(); self.rules.reserve(composite_rule.invocations.len()); for (sub_transform, sub_rule) in composite_rule.invocations { self.rules.push(( match (transform, sub_transform) { (None, None) => None, (Some(parent), None) => Some(parent), (Some(parent), Some(child)) => Some(parent.cons(child)), (None, Some(child)) => Some(child), }, sub_rule, )); } } } } None } } /// A trait for types that can become rules. pub trait ToRule: 'static { fn to_rule(&self) -> Rule; } impl ToRule for Rule { fn to_rule(&self) -> Rule { self.clone() } } impl ToRule for Rc<Mesh> { fn to_rule(&self) -> Rule { Rule::mesh(self.clone()) } } #[auto_from] #[derive(Clone)] enum RuleInternal { Mesh(OutputMeshSource), Invocations(Rc<ToRule>), }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/src/rule/transforms.rs
src/rule/transforms.rs
// Copyright 2018 The immense Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::mesh::Vertex; use nalgebra::Matrix4; use palette::{encoding::srgb::Srgb, rgb::Rgb, Hsv, RgbHue}; use std::iter; fn identity() -> Matrix4<f32> { Matrix4::new( 1.0, 0.0, 0.0, 0.0, // 0.0, 1.0, 0.0, 0.0, // 0.0, 0.0, 1.0, 0.0, // 0.0, 0.0, 0.0, 1.0, ) } /// An ergonomic alias for [Transform][self::Transform]. pub type Tf = Transform; /// A Transform, when applied, modifies a mesh. When applied to a rule, it transforms all the meshes /// that rule eventually expands to. Transforms may be translations, scales, rotations, etc. /// /// It may be helpful to think of transforms to rules as transforming the space in which the rule or /// mesh is evaluated. For example this rule will translate a cube 4.0 on the x axis in our root /// frame of reference: /// /// ```` /// # use immense::*; /// let our_translated_cube = Rule::new().push(Tf::tx(4.0), cube()); /// # ; /// ```` /// /// This rule will translate a cube -4.0 on the x axis in our root frame of reference: /// /// ```` /// # use immense::*; /// # let our_translated_cube = Rule::new().push(Tf::tx(4.0), cube()); ///let containing_rule = Rule::new().push(Tf::ry(180.0), our_translated_cube); /// # ; /// ```` /// /// This rule will translate a half-sized cube 2.0 on the x axis in our root frame of reference. /// /// ```` /// # use immense::*; /// # let our_translated_cube = Rule::new().push(Tf::tx(4.0), cube()); /// let containing_rule = Rule::new().push(Tf::s(0.5), our_translated_cube) /// # ; /// ```` #[derive(Copy, Clone, Debug)] pub struct Transform { spatial: Matrix4<f32>, color: ColorTransform, } #[derive(Copy, Clone, Debug)] enum ColorTransform { Override(Hsv), Delta(Hsv), } impl Default for ColorTransform { fn default() -> ColorTransform { ColorTransform::Delta(Hsv::new(0.0, 1.0, 1.0)) } } impl ColorTransform { fn cons(self, other: ColorTransform) -> Self { match (self, other) { (_, ColorTransform::Override(color)) => ColorTransform::Override(color), (ColorTransform::Override(color), ColorTransform::Delta(delta)) => { ColorTransform::Override(Hsv::new( color.hue + delta.hue, color.saturation * delta.saturation, color.value * delta.value, )) } (ColorTransform::Delta(delta_a), ColorTransform::Delta(delta_b)) => { ColorTransform::Delta(Hsv::new( delta_a.hue + delta_b.hue, delta_a.saturation * delta_b.saturation, delta_a.value * delta_b.value, )) } } } fn color(self) -> Hsv { match self { ColorTransform::Override(color) => color, ColorTransform::Delta(delta) => { let color = Hsv::new(0.0, 1.0, 1.0); Hsv::new( color.hue + delta.hue, color.saturation * delta.saturation, color.value * delta.value, ) } } } } impl Transform { pub(crate) fn cons(&self, other: Transform) -> Transform { // TODO: determine when translation to origin is necessary if ever. Transform { spatial: self.spatial * other.spatial, color: self.color.cons(other.color), } } pub(crate) fn apply_to(&self, vertex: Vertex) -> Vertex { self.spatial * vertex } pub(crate) fn get_color(&self) -> Rgb<Srgb, f32> { Rgb::from( ColorTransform::Override(Hsv::new(0.0, 1.0, 1.0)) .cons(self.color) .color(), ) } /// A translation on all axes. pub fn t(x: f32, y: f32, z: f32) -> Self { Self { spatial: Translate::by(x, y, z), ..Self::default() } } /// A translation on the x axis. pub fn tx(x: f32) -> Self { Self { spatial: Translate::x(x), ..Self::default() } } /// A translation on the y axis. pub fn ty(y: f32) -> Self { Self { spatial: Translate::y(y), ..Self::default() } } /// A translation on the z axis. pub fn tz(z: f32) -> Self { Self { spatial: Translate::z(z), ..Self::default() } } /// A uniform scale in all dimensions. pub fn s(factor: f32) -> Self { Self { spatial: Scale::all(factor), ..Self::default() } } /// A scale in all dimensions. pub fn sby(x: f32, y: f32, z: f32) -> Self { Self { spatial: Scale::by(x, y, z), ..Self::default() } } /// A rotation about the x axis. pub fn rx(x: f32) -> Self { Self { spatial: Rotate::x(x), ..Self::default() } } /// A rotation about the y axis. pub fn ry(y: f32) -> Self { Self { spatial: Rotate::y(y), ..Self::default() } } /// A rotation about the z axis. pub fn rz(z: f32) -> Self { Self { spatial: Rotate::z(z), ..Self::default() } } /// A color override that takes precedence over colors set higher in the rule tree. pub fn color(color: Hsv) -> Self { Self { color: ColorTransform::Override(color), ..Self::default() } } /// Adds `delta` to the current color hue. pub fn hue(delta: impl Into<RgbHue<f32>>) -> Self { Self { color: ColorTransform::Delta(Hsv::new(delta, 1.0, 1.0)), ..Self::default() } } /// Multiplies the current color saturation by `factor` pub fn saturation(factor: f32) -> Self { Self { color: ColorTransform::Delta(Hsv::new(0.0, factor, 1.0)), ..Self::default() } } /// Multiplies the current color value by `factor`. pub fn value(factor: f32) -> Self { Self { color: ColorTransform::Delta(Hsv::new(0.0, 1.0, factor)), ..Self::default() } } // Multiplicatively branch transforms. fn cross(parents: Vec<Transform>, children: Vec<Transform>) -> Vec<Transform> { let mut emitted = vec![]; emitted.reserve(parents.len() * children.len()); for parent in parents { for child in &children { emitted.push(parent.cons(*child)); } } emitted } fn coalesce(default: Option<Transform>, source: impl Iterator<Item = Transform>) -> Self { source.fold(default.unwrap_or(Transform::default()), |prefix, suffix| { prefix.cons(suffix) }) } fn stack(self, n: usize) -> Self { Transform::coalesce(Some(self), iter::repeat(self).take(n)) } } impl Default for Transform { fn default() -> Self { Self { spatial: identity(), color: ColorTransform::default(), } } } /// A TransformArgument is a transform that should be applied to the invocation of a /// [Rule][crate::rule::Rule]. /// /// See the [From][std::convert::From] and [Into][std::convert::Into] implementations /// which produce this type to find out what kind of argument each type becomes. #[derive(Debug)] pub enum TransformArgument { /// A single transform that corresponds to one invocation with the given transform. Single(Transform), /// An arbitrary number of transforms (e.g. from [Replicate][self::Replicate]) that correspond /// to one invocation each. Many(Vec<Transform>), } /// An ergonomics macro for listing transforms that will apply in order and branch on replications. /// /// Where normally you would have to write /// /// ```` /// # use immense::*; /// let transforms_with_a_replication = vec![Replicate::n(1, Tf::tx(2.0)), /// Replicate::n(3, Tf::ty(1.0))]; /// ```` /// /// you can write /// /// ```` /// # use immense::*; /// let transforms_with_a_replication = tf![Tf::tx(2.0), Replicate::n(3, Tf::ty(1.0))]; /// ```` /// /// and the branching of transforms will be the same. Both these when passed to a rule invocation /// will invoke a rule twice, applying `Tf::tx(2.0)` and `Tf::ty(1.0)` to the first invocation, and /// `Tf::tx(2.0)` and `Tf::ty(2.0)` to the second invocation. #[macro_export] macro_rules! tf { ($($transform:expr),+ $(,)*) => ({ let mut args: Vec<TransformArgument> = vec![]; $(args.push($transform.into());)* args }); } impl Into<Vec<Transform>> for TransformArgument { fn into(self) -> Vec<Transform> { match self { TransformArgument::Single(transform) => vec![transform], TransformArgument::Many(transforms) => transforms, } } } /// A single transform will correspond to one invocation. impl From<Transform> for TransformArgument { fn from(transform: Transform) -> Self { TransformArgument::Single(transform) } } /// A vector of transforms will be sequentially composed into a single transform and correspond to /// one invocation. impl From<Vec<Transform>> for TransformArgument { fn from(transforms: Vec<Transform>) -> Self { TransformArgument::Single(Transform::coalesce(None, transforms.into_iter())) } } /// A slice of transforms will be sequentially composed into a single transform and correspond to /// one invocation. impl From<&[Transform]> for TransformArgument { fn from(transforms: &[Transform]) -> Self { TransformArgument::Single(Transform::coalesce(None, transforms.iter().map(|t| *t))) } } impl From<Vec<TransformArgument>> for TransformArgument { fn from(args: Vec<TransformArgument>) -> Self { let mut emitted = vec![Transform::default()]; for arg in args { emitted = Transform::cross(emitted, arg.into()); } TransformArgument::Many(emitted) } } /// An optional transform will of course correspond to one invocation. This implementation /// also allows you to pass [None][std::option::Option::None] to invoke rules unmodified. impl From<Option<Transform>> for TransformArgument { fn from(maybe_input: Option<Transform>) -> Self { match maybe_input { Some(input) => input.into(), None => TransformArgument::Many(vec![]), } } } /// A vector of replications will be composed sequentially, which means the number of corresponding /// rule invocations is the product of each replication. A vector with a replication of transform A /// 36 times then replication of B 10 times will yield transforms for every sequence of A then B /// (e.g. (A_1, B_1), (A_1, B_2), ..., (A_36, B_36)), so 360 total. impl From<Vec<Replicate>> for TransformArgument { fn from(replications: Vec<Replicate>) -> TransformArgument { let mut emitted = vec![]; for replication in replications.into_iter().map(|r| -> Vec<Transform> { let input: TransformArgument = r.into(); input.into() }) { emitted = if emitted.is_empty() { replication } else { Transform::cross(emitted, replication) }; } TransformArgument::Many(emitted) } } /// Replicates a transform n times. /// /// The transforms will stack, so ```Replicate::n(2, Tf::x(1.0))``` on some rule will result in two /// invocations of the rule with ```Tf::x(1.0)``` and ```Tf::x(2.0)```. pub struct Replicate { n: usize, source: TransformArgument, } impl Replicate { pub fn n(n: usize, source: impl Into<TransformArgument>) -> Self { Self { n, source: source.into(), } } } /// The replication will become ```n``` transforms, corresponding to one invocation each. impl Into<TransformArgument> for Replicate { fn into(self) -> TransformArgument { match self.source { TransformArgument::Single(transform) => { TransformArgument::Many((0..self.n).map(|i| transform.stack(i)).collect()) } TransformArgument::Many(transforms) => TransformArgument::Many({ let mut emitted = vec![]; for transform in transforms { for i in 0..self.n { emitted.push(transform.stack(i)); } } emitted }), } } } #[derive(Default, Clone, Copy, Debug)] struct Translate; impl Translate { pub fn by(x: f32, y: f32, z: f32) -> Matrix4<f32> { Matrix4::new( 1.0, 0.0, 0.0, x, // 0.0, 1.0, 0.0, y, // 0.0, 0.0, 1.0, z, // 0.0, 0.0, 0.0, 1.0, ) } pub fn x(x: f32) -> Matrix4<f32> { Translate::by(x, 0.0, 0.0) } pub fn y(y: f32) -> Matrix4<f32> { Translate::by(0.0, y, 0.0) } pub fn z(z: f32) -> Matrix4<f32> { Translate::by(0.0, 0.0, z) } } #[derive(Default, Clone, Copy, Debug)] struct Scale; impl Scale { pub fn all(factor: f32) -> Matrix4<f32> { Scale::by(factor, factor, factor) } pub fn by(x: f32, y: f32, z: f32) -> Matrix4<f32> { Matrix4::new( x, 0.0, 0.0, 0.0, // 0.0, y, 0.0, 0.0, // 0.0, 0.0, z, 0.0, // 0.0, 0.0, 0.0, 1.0, ) } } #[derive(Clone, Copy, Debug)] struct Rotate; impl Rotate { #[rustfmt::skip] pub fn x(x: f32) -> Matrix4<f32> { let r = x.to_radians(); Translate::by(0.0, 0.5, 0.5) * Matrix4::new( 1.0, 0.0, 0.0, 0.0, // 0.0, r.cos(), -r.sin(), 0.0, // 0.0, r.sin(), r.cos(), 0.0, // 0.0, 0.0, 0.0, 1.0 ) * Translate::by(0.0, -0.5, -0.5) } #[rustfmt::skip] pub fn y(y: f32) -> Matrix4<f32> { let r = y.to_radians(); Translate::by(0.5, 0.0, 0.5) * Matrix4::new( r.cos(), 0.0, r.sin(), 0.0, // 0.0, 1.0, 0.0, 0.0, // -r.sin(), 0.0, r.cos(), 0.0, // 0.0, 0.0, 0.0, 1.0 )* Translate::by(-0.5, 0.0, -0.5) } #[rustfmt::skip] pub fn z(z: f32) -> Matrix4<f32> { let r = z.to_radians(); Translate::by(0.5, 0.5, 0.0) * Matrix4::new( r.cos(), -r.sin(), 0.0, 0.0, // r.sin(), r.cos(), 0.0, 0.0, // 0.0, 0.0, 1.0, 0.0, // 0.0, 0.0, 0.0, 1.0 ) * Translate::by(-0.5, -0.5, 0.0) } }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/src/rule/builtin.rs
src/rule/builtin.rs
// Copyright 2018 The immense Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // https://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use crate::mesh::{sphere_of_resolution, Mesh, PrimitiveMesh}; use crate::rule::Rule; use std::rc::Rc; /// A cube of size 1 whose center is at the origin. pub fn cube() -> Rule { Rule::primitive(PrimitiveMesh::Cube) } /// An icosphere of diameter 1. pub fn icosphere() -> Rule { Rule::primitive(PrimitiveMesh::IcoSphere) } /// A sphere of the given resolution. Produces 20 * 4 ^ resolution polygons to estimate the sphere. /// /// This is an expensive mesh. Try to call this function once and use the Rc wherever needed. pub fn sphere(resolution: usize) -> Rc<Mesh> { Rc::new(sphere_of_resolution(resolution)) }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/examples/torus.rs
examples/torus.rs
use immense::*; use std::fs::File; fn main() { let meshes = rule![ tf![ Tf::saturation(0.8), Tf::hue(160.0), Replicate::n(36, vec![Tf::rz(10.0), Tf::ty(0.1)]), Replicate::n(36, vec![Tf::ry(10.0), Tf::tz(1.2), Tf::hue(3.4)]), ] => cube(), ] .generate(); let mut output = File::create("torus.obj").expect("obj file"); write_meshes( ExportConfig { grouping: MeshGrouping::ByColor, export_colors: Some(String::from("torus.mtl")), }, meshes, &mut output, ) .expect("rendered scene"); }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/examples/randtower.rs
examples/randtower.rs
use immense::*; use rand::*; use std::fs::File; #[derive(Debug)] struct RandCube; impl ToRule for RandCube { fn to_rule(&self) -> Rule { rule![ *thread_rng() .choose(&[Tf::tx(0.1), Tf::tx(-0.1), Tf::tx(0.2), Tf::tx(-0.2)]) .unwrap() => cube() ] } } fn main() { let meshes = Rule::new() .push(Replicate::n(4, Tf::ty(1.0)), RandCube {}) .generate(); let mut output = File::create("randtower.obj").expect("obj file"); write_meshes(ExportConfig::default(), meshes, &mut output).expect("rendered scene"); }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/examples/grid2d.rs
examples/grid2d.rs
use immense::*; use std::fs::File; struct Grid2D { rows: usize, cols: usize, } impl ToRule for Grid2D { fn to_rule(&self) -> Rule { rule![ tf![ Replicate::n(self.rows, Tf::ty(1.1)), Replicate::n(self.cols, Tf::tx(1.1)), ] => cube(), ] } } fn main() { let mut output = File::create("grid2d.obj").expect("obj file"); let meshes = Grid2D { rows: 2, cols: 2 }.to_rule().generate(); write_meshes(ExportConfig::default(), meshes, &mut output).expect("rendered scene"); }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/examples/playpen.rs
examples/playpen.rs
use hex; use immense::*; use itertools::iproduct; use lazy_static::lazy_static; use noise::{Fbm, NoiseFn}; use palette::encoding::pixel::Pixel; use palette::encoding::srgb::Srgb; use palette::rgb::Rgb; use rand::seq::SliceRandom; use rand::thread_rng; use rand::Rng; use std::fs::File; use std::io::BufWriter; use std::rc::Rc; const SPHERE_RESOLUTION: usize = 0; lazy_static! { static ref PALETTE: [Hsv; 5] = [ hexcolor("4F4052"), hexcolor("6D7577"), hexcolor("95A8A9"), hexcolor("A8C4BE"), hexcolor("AFD8DB"), ]; } fn hexcolor(hex: &str) -> Hsv { let bytes: Vec<u8> = hex::decode(hex).expect("raw bytes from hex"); let fmt = |i| bytes[i] as f32 / 255.0; let color: Rgb<Srgb, f32> = *Rgb::from_raw(&[fmt(0), fmt(1), fmt(2)]); Hsv::from(color) } trait Tilable: Clone + 'static { fn to_tile(&self, row: usize, col: usize) -> Rule; } struct GridTile<T> { row: usize, col: usize, tilable: T, } impl<T: Tilable> ToRule for GridTile<T> { fn to_rule(&self) -> Rule { self.tilable.to_tile(self.row, self.col) } } fn grid(rows: usize, cols: usize, rule: impl Tilable) -> Rule { rule![ tf![Tf::tx((cols - 1) as f32 / -2.0), Tf::tz((rows - 1) as f32 / -2.0)] => iproduct!(0..rows, 0..cols).fold(Rule::new(), |root_rule, (r, c)| { root_rule.push(tf![ Tf::tx(c as f32), Tf::tz(r as f32) ], GridTile { row: r, col: c, tilable: rule.clone() }) }) ] } #[derive(Clone)] struct Pyramid { levels: usize, sphere: Rc<Mesh>, } impl ToRule for Pyramid { fn to_rule(&self) -> Rule { (0..self.levels).fold(Rule::new(), |rule, i| { let target_downscale = 1.0 - ((i + 1) as f32 / self.levels as f32); rule.push( tf![ Tf::sby( target_downscale, ((1.0 / self.levels as f32) * 1.6).powi(2), target_downscale ), Tf::ty(i as f32), Tf::color(*PALETTE.choose(&mut thread_rng()).unwrap()) ], (*&[cube(), self.sphere.to_rule()] .choose(&mut thread_rng()) .unwrap()) .clone(), ) }) } } #[derive(Clone)] struct Tower; impl ToRule for Tower { fn to_rule(&self) -> Rule { let thin = 0.002; let bars = thread_rng().gen_range(4, 20); let height = 0.03 * (1.0 / bars as f32); let bar = rule![ tf![Tf::color(*PALETTE.choose(&mut thread_rng()).unwrap()), Tf::tx(-0.5), Tf::sby(thin, height, 1.0)] => cube(), tf![Tf::color(*PALETTE.choose(&mut thread_rng()).unwrap()), Tf::tx(0.5), Tf::sby(thin, height, 1.0)] => cube(), tf![Tf::color(*PALETTE.choose(&mut thread_rng()).unwrap()), Tf::tz(-0.5), Tf::sby(1.0, height, thin)] => cube(), tf![Tf::color(*PALETTE.choose(&mut thread_rng()).unwrap()), Tf::tz(0.5), Tf::sby(1.0, height, thin)] => cube(), tf![Tf::color(*PALETTE.choose(&mut thread_rng()).unwrap()), Tf::ty(0.0), Tf::s(0.2)] => icosphere(), ]; rule![ Replicate::n(bars, Tf::ty(height * 13.0)) => bar, ] } } #[derive(Clone)] struct CityBlock { sphere: Rc<Mesh>, noise: Fbm, depth: usize, } impl Tilable for CityBlock { fn to_tile(&self, row: usize, col: usize) -> Rule { let division = (self.noise.get([row as f64, col as f64]).abs() * 10.0) as usize + 4; let mut candidates = vec![ rule![None => Pyramid { levels: division, sphere: self.sphere.clone(), }], rule![None => Tower {}], ]; if self.depth < 3 { candidates.push(rule![tf![Tf::sby(0.5, 0.5, 0.5)] => grid( 2, 2, CityBlock { sphere: self.sphere.clone(), noise: self.noise.clone(), depth: self.depth + 1, }, )]); } let mut rng = thread_rng(); (&candidates).choose(&mut rng).unwrap().clone() } } #[derive(Copy, Clone)] struct Wire; impl Tilable for Wire { fn to_tile(&self, _: usize, _: usize) -> Rule { let height = 0.03; let thin = 0.05; rule![Tf::color(*PALETTE.choose(&mut thread_rng()).unwrap()) => rule![ tf![Tf::tx(-0.5), Tf::sby(thin, height, 1.0)] => cube(), tf![Tf::tx(0.5), Tf::sby(thin, height, 1.0)] => cube(), tf![Tf::tz(-0.5), Tf::sby(1.0, height, thin)] => cube(), tf![Tf::tz(0.5), Tf::sby(1.0, height, thin)] => cube() ]] } } fn main() { let grid_size = 10; let meshes = rule![ tf![ Tf::sby(grid_size as f32, 1.0, grid_size as f32), Tf::ty(-0.5), Tf::color(Hsv::new(0.0, 0.0, 1.0)) ] => cube(), None => grid(grid_size, grid_size, CityBlock { sphere: sphere(SPHERE_RESOLUTION), noise: Fbm::new(), depth: 0 }), None => grid(grid_size, grid_size, Wire {}) ] .generate(); let output = File::create("pyramid.obj").expect("obj file"); write_meshes( ExportConfig { grouping: MeshGrouping::ByColor, export_colors: Some(String::from("pyramid_colors.mtl")), }, meshes, &mut BufWriter::new(output), ) .expect("rendered scene"); }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
turnage/immense
https://github.com/turnage/immense/blob/cf415f6dc6edd0b88b27c48d6a55ad7c5173000b/examples/recursive_tile.rs
examples/recursive_tile.rs
use immense::*; use std::fs::File; struct RecursiveTile { depth_budget: usize, } impl ToRule for RecursiveTile { fn to_rule(&self) -> Rule { let rule = rule![ tf![Tf::t(0.25, 0.25, 0.0), Tf::s(0.4)] => icosphere(), tf![Tf::t(-0.25, -0.25, 0.0), Tf::s(0.4)] => icosphere(), tf![Tf::t(-0.25, 0.25, 0.0), Tf::s(0.4)] => icosphere(), ]; if self.depth_budget > 0 { rule.push( vec![Tf::t(0.25, -0.25, 0.0), Tf::s(0.4)], RecursiveTile { depth_budget: self.depth_budget - 1, }, ) } else { rule } } } fn main() { let meshes = Rule::new() .push(None, icosphere()) .push(Tf::tx(1.0), cube()) .generate(); //RecursiveTile { depth_budget: 3 }.to_rule().generate(); let mut output = File::create("recursive_tile.obj").expect("obj file"); write_meshes(ExportConfig::default(), meshes, &mut output).expect("rendered scene"); }
rust
Apache-2.0
cf415f6dc6edd0b88b27c48d6a55ad7c5173000b
2026-01-04T20:19:37.243955Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/main.rs
src/main.rs
extern crate rltk; use rltk::Rltk; mod game; fn main() { let gs = game::State::new_menu(); let mut context = Rltk::init_simple8x8(80, 50, "Rusty Roguelike", "resources"); context.with_post_scanlines(true); rltk::main_loop(context, gs); }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/fighter.rs
src/game/fighter.rs
use std::cmp::{min}; use super::{Player, Mob}; extern crate serde; use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, Clone)] pub struct Fighter { pub max_hp : i32, pub hp: i32, pub defense: i32, pub power: i32, pub dead: bool, pub xp_value : i32 } impl Fighter { pub fn new(max_hp: i32, defense: i32, power: i32, xp:i32) -> Fighter { Fighter{ max_hp, hp: max_hp, defense, power, dead: false, xp_value : xp } } } pub trait Combat { fn get_power(&self)->i32; fn get_defense(&self)->i32; fn take_damage(&mut self, amount:i32); fn heal_damage(&mut self, amount:i32); fn get_name(&self)->String; fn get_hp(&self)->i32; fn kill(&mut self); fn xp_value(&self)->i32 { 0 } } pub fn attack(instigator_name: String, instigator_power : i32, target: &mut Combat) -> (i32, Vec<String>) { let mut results = Vec::new(); let mut xp = 0; let damage = instigator_power - target.get_defense(); if damage > 0 { target.take_damage(damage); results.push(format!("{} attacks {}, for {} hit points of damage.", instigator_name, target.get_name(), damage)); results.push(format!("{} has {} remaining hit points.", target.get_name(), target.get_hp())); if target.get_hp() < 1 { results.push(format!("{} is dead.", target.get_name())); target.kill(); xp += target.xp_value(); } } else { results.push(format!("{} attacks {}, but lacks the power to do anything useful.", instigator_name, target.get_name())); } (xp, results) } impl Combat for Player { fn take_damage(&mut self, amount:i32) { self.fighter.hp -= amount; } fn heal_damage(&mut self, amount:i32) { self.fighter.hp = min(self.fighter.max_hp, self.fighter.hp + amount); } fn get_name(&self) -> String { "Player".to_string() } fn get_defense(&self) -> i32 { let mut defense = self.fighter.defense; for item in self.inventory.equipped.iter() { defense += item.equippable.unwrap().defense_bonus; } defense } fn get_power(&self) -> i32 { let mut power = self.fighter.power; for item in self.inventory.equipped.iter() { power += item.equippable.unwrap().power_bonus; } power } fn get_hp(&self) -> i32 { self.fighter.hp } fn kill(&mut self) { self.fighter.dead = true; } } impl Combat for Mob { fn take_damage(&mut self, amount:i32) { self.fighter.hp -= amount; } fn heal_damage(&mut self, amount:i32) { self.fighter.hp = min(self.fighter.max_hp, self.fighter.hp + amount); } fn get_name(&self) -> String { self.name.clone() } fn get_defense(&self) -> i32 { self.fighter.defense } fn get_power(&self) -> i32 { self.fighter.power } fn get_hp(&self) -> i32 { self.fighter.hp } fn kill(&mut self) { self.fighter.dead = true; } fn xp_value(&self)->i32 { self.fighter.xp_value } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/random.rs
src/game/random.rs
use rand::Rng; pub fn random_choice(table : Vec<(String, i32)>) -> String { let mut rng = rand::thread_rng(); let n = rng.gen_range(1,100); let mut running_sum = 0; for (opt,chance) in table.iter() { if n < chance+running_sum { return opt.to_string() } running_sum += chance; } table[0].0.to_string() }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/inventory.rs
src/game/inventory.rs
use super::{Item, gui, gui::ItemMenuResult, State, TickType, BaseEntity, player}; use crate::rltk; use rltk::Rltk; extern crate serde; use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, Clone)] pub struct Inventory { pub items : Vec<Item>, pub equipped : Vec<Item>, capacity: i32 } impl Inventory { pub fn new(capacity : i32) -> Inventory { Inventory{ items: Vec::new(), capacity, equipped: Vec::new() } } pub fn add_item(&mut self, item : Item) -> Vec<String> { let mut result : Vec<String> = Vec::new(); if self.items.len() as i32 > self.capacity { result.push("You cannot carry any more!".to_string()); } else { result.push(format!("You pick up the {}", item.name)); self.items.push(item); } result } pub fn remove_item_return_clone(&mut self, item_index: i32) -> Item { let item_copy = self.items[item_index as usize].clone(); self.items.remove(item_index as usize); item_copy } pub fn get_equippable_items(&self) -> Vec<i32> { let mut result = Vec::new(); for (i,item) in self.items.iter().enumerate() { match item.equippable { None => {} Some(_) => { result.push(i as i32); } } } result } } pub fn pickup(gs : &mut State) { let mut item_index = 0; let ppos = gs.player().position; for (i,e) in gs.entities.iter_mut().enumerate() { if e.can_pickup() && e.get_position() == ppos { // We can do it! item_index = i; } } if item_index > 0 { let cloned_item = gs.entities[item_index].as_item().unwrap().clone(); let results = gs.player_mut().inventory.add_item(cloned_item); gs.entities.remove(item_index); for s in results.iter() { gs.add_log_entry(s.clone()); } } else { gs.add_log_entry("There is nothing to pick up.".to_string()); } } pub fn use_item(gs : &mut State, ctx : &mut Rltk) { let (result, selection) = gui::handle_item_menu(gs, ctx, "Use which item? (or ESC)"); match result { ItemMenuResult::NoResponse => {} ItemMenuResult::Selected => { let result = player::use_item(selection, gs); for s in result.iter() { gs.add_log_entry(s.to_string()); } } ItemMenuResult::Cancel => { gs.game_state = TickType::PlayersTurn } } } pub fn drop_item(gs : &mut State, ctx : &mut Rltk) { let (result, selection) = gui::handle_item_menu(gs, ctx, "Drop which item? (or ESC)"); match result { ItemMenuResult::NoResponse => {} ItemMenuResult::Selected => { let mut item_copy = gs.player_mut().inventory.remove_item_return_clone(selection); item_copy.position = gs.player().get_position(); gs.add_log_entry(format!("You drop the {}", item_copy.name)); gs.entities.push(Box::new(item_copy)); gs.game_state = TickType::EnemyTurn; } ItemMenuResult::Cancel => { gs.game_state = TickType::PlayersTurn } } } pub fn wield_item(gs : &mut State, ctx : &mut Rltk) { let (result, selection) = gui::handle_equippable_menu(gs, ctx, "Wield which item? (or ESC)"); match result { ItemMenuResult::NoResponse => {} ItemMenuResult::Selected => { let result = wield_item_final(selection, gs); for s in result.iter() { gs.add_log_entry(s.to_string()); } } ItemMenuResult::Cancel => { gs.game_state = TickType::PlayersTurn } } } fn wield_item_final(item_index : i32, gs : &mut State) -> Vec<String> { let mut result = Vec::new(); let slot = gs.player().inventory.items[item_index as usize].equippable.unwrap().slot; // Do we already have anything in that slot? If so, move it to the inventory let mut already_equipped : Vec<Item> = Vec::new(); for equipped in gs.player().inventory.equipped.iter() { if equipped.equippable.unwrap().slot == slot { result.push(format!("You unequip the {}", equipped.name)); already_equipped.push(equipped.clone()); } } gs.player_mut().inventory.equipped.retain(|a| a.equippable.unwrap().slot != slot); for item in already_equipped { gs.player_mut().inventory.items.push(item); } // Put the item in the equip list and remove it from the backpack let item = gs.player_mut().inventory.items[item_index as usize].clone(); result.push(format!("You equip the {}", item.name)); gs.player_mut().inventory.items.remove(item_index as usize); gs.player_mut().inventory.equipped.push(item); gs.game_state = TickType::EnemyTurn; result } pub fn unequip_item(gs : &mut State, ctx : &mut Rltk) { let (result, selection) = gui::handle_equipped_menu(gs, ctx, "Unequip which item? (or ESC)"); match result { ItemMenuResult::NoResponse => {} ItemMenuResult::Selected => { let result = unequip_item_final(selection, gs); for s in result.iter() { gs.add_log_entry(s.to_string()); } } ItemMenuResult::Cancel => { gs.game_state = TickType::PlayersTurn } } } fn unequip_item_final(item_index : i32, gs : &mut State) -> Vec<String> { let mut result = Vec::new(); let item = gs.player().inventory.equipped[item_index as usize].clone(); result.push(format!("You remove the {}", item.name)); gs.player_mut().inventory.equipped.remove(item_index as usize); gs.player_mut().inventory.items.push(item); gs.game_state = TickType::EnemyTurn; result } pub fn item_targeting(gs : &mut State, ctx : &mut Rltk) { let result = gui::handle_item_targeting(gs, ctx, "Select your target with cursor keys or mouse, Escape to cancel."); match result { ItemMenuResult::NoResponse => {} ItemMenuResult::Cancel => { gs.game_state = TickType::PlayersTurn } ItemMenuResult::Selected => { player::use_area_item(gs); } } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/player.rs
src/game/player.rs
use crate::rltk; use rltk::{RGB, Point, Rltk, field_of_view, Algorithm2D, VirtualKeyCode}; use super::{fighter::Fighter, Inventory, BaseEntity, Combat, Map, ItemType, State, attack, TickType, inventory, item_effects, TileType, Particle}; extern crate serde; use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, Clone)] pub struct Player { pub position : Point, pub glyph: u8, pub fg : RGB, pub visible_tiles : Vec<Point>, pub fighter : Fighter, pub inventory : Inventory, pub dungeon_level : i32, pub xp : i32, pub level : i32 } impl Player { pub fn new(x:i32, y:i32, glyph:u8, fg : RGB) -> Player { Player{ position: Point::new(x, y), glyph, fg, visible_tiles: Vec::new(), fighter: Fighter::new(10, 0, 1, 0), inventory: Inventory::new(26), dungeon_level : 0, xp : 0, level : 1 } } pub fn copy_from_other_player(&mut self, other : &Player) { self.glyph = other.glyph; self.fg = other.fg; self.fighter = other.fighter.clone(); self.inventory = other.inventory.clone(); self.dungeon_level = other.dungeon_level; self.fighter.hp = self.fighter.max_hp; // Not copying visible tiles or position, since this is intended for map transition } pub fn xp_to_level(&self) -> i32 { 200 + (self.level * 150) } } #[typetag::serde(name = "BEPlayer")] impl BaseEntity for Player { fn get_position(&self) -> Point { self.position } fn get_fg_color(&self) -> RGB { self.fg } fn get_glyph(&self) -> u8 { self.glyph } fn as_player(&self) -> Option<&Player> { Some(self) } fn as_player_mut(&mut self) -> Option<&mut Player> { Some(self) } fn as_combat(&mut self) -> Option<&mut Combat> { Some(self) } fn plot_visibility(&mut self, map : &Map) { self.visible_tiles = field_of_view(self.get_position(), 6, map); } fn get_tooltip_text(&self) -> String { "It's you!".to_string() } fn get_name(&self) -> String { "Player".to_string() } fn is_player(&self) -> bool { true } } // Handlers for gameplay #[derive(PartialEq)] pub enum PlayerTickResult { None, NextMap } pub fn player_tick(gs : &mut State, ctx : &mut Rltk) -> PlayerTickResult { let player_ro = gs.player(); if player_ro.xp > player_ro.xp_to_level() { let player_rw = gs.player_mut(); player_rw.level += 1; let new_level = player_rw.level; player_rw.fighter.hp = player_rw.fighter.max_hp; gs.add_log_entry(format!("You are now level {}! Your wounds heal.", new_level)); gs.game_state = TickType::LevelUpMenu; return PlayerTickResult::None; } let mut turn_ended = false; let mut attack_target : Option<usize> = None; if let Some(key) = ctx.key { match key { VirtualKeyCode::Escape => { gs.save(); gs.game_state = TickType::MainMenu; } // Numpad VirtualKeyCode::Numpad8 => { attack_target = move_player(gs, 0, -1); turn_ended = true; } VirtualKeyCode::Numpad4 => { attack_target = move_player(gs, -1, 0); turn_ended = true; } VirtualKeyCode::Numpad6 => { attack_target = move_player(gs, 1, 0); turn_ended = true; } VirtualKeyCode::Numpad2 => { attack_target = move_player(gs, 0, 1); turn_ended = true; } VirtualKeyCode::Numpad7 => { attack_target = move_player(gs, -1, -1); turn_ended = true; } VirtualKeyCode::Numpad9 => { attack_target = move_player(gs, 1, -1); turn_ended = true; } VirtualKeyCode::Numpad1 => { attack_target = move_player(gs, -1, 1); turn_ended = true; } VirtualKeyCode::Numpad3 => { attack_target = move_player(gs, 1, 1); turn_ended = true; } // Cursors VirtualKeyCode::Up => { attack_target = move_player(gs, 0, -1); turn_ended = true; } VirtualKeyCode::Down => { attack_target = move_player(gs, 0, 1); turn_ended = true; } VirtualKeyCode::Left => { attack_target = move_player(gs, -1, 0); turn_ended = true; } VirtualKeyCode::Right => { attack_target = move_player(gs, 1, 0); turn_ended = true; } // Wait VirtualKeyCode::Numpad5 => { turn_ended = true; } VirtualKeyCode::W => { turn_ended = true; } // Items VirtualKeyCode::G => { inventory::pickup(gs); turn_ended = true; } VirtualKeyCode::U => { use_menu(gs); } VirtualKeyCode::D => { drop_menu(gs); } VirtualKeyCode::E => { equip_menu(gs); } VirtualKeyCode::R => { unequip_menu(gs); } // Level Change VirtualKeyCode::Period => { if gs.map.tiles[gs.map.point2d_to_index(gs.player().position) as usize] == TileType::Stairs { return PlayerTickResult::NextMap; } else { gs.add_log_entry("You aren't on stairs".to_string()); } } // Character Info VirtualKeyCode::C => { gs.game_state = TickType::CharacterMenu; } VirtualKeyCode::Slash => { gs.game_state = TickType::HelpMenu; } _ => {} } } if let Some(target) = attack_target { gs.vfx.push(Particle::new(gs.entities[target].get_position(), RGB::named(rltk::RED), RGB::named(rltk::BLACK), 176, 200.0)); let player = gs.player_as_combat(); let (xp, result) = attack(player.get_name(), player.get_power(), gs.entities[target].as_combat().unwrap()); for s in result { gs.add_log_entry(s.to_string()); } gs.entities.retain(|e| !e.is_dead()); let p = gs.player_mut(); p.xp += xp; } if turn_ended { gs.update_visibility(); gs.game_state = TickType::EnemyTurn; } PlayerTickResult::None } // Returns the ID of the target if we're attacking fn move_player(gs : &mut State, delta_x : i32, delta_y: i32) -> Option<usize> { let mut result : Option<usize> = None; let new_x = gs.player().position.x + delta_x; let new_y = gs.player().position.y + delta_y; let mut can_move : bool = true; if new_x > 0 && new_x < 79 && new_y > 0 && new_y < 49 && gs.map.is_walkable(new_x, new_y) { // Lets see if we are bumping a mob let new_pos = Point::new(new_x, new_y); for (i,e) in gs.entities.iter_mut().enumerate() { if e.get_position() == new_pos && e.blocks_tile() { // Tile is indeed blocked can_move = false; if e.can_be_attacked() { // Attack it! result = Some(i); } } } if can_move { gs.player_mut().position.x = new_x; gs.player_mut().position.y = new_y; } } result } fn use_menu(gs : &mut State) { if gs.player().inventory.items.is_empty() { gs.add_log_entry("You don't have any usable items.".to_string()); } else { gs.game_state = TickType::UseMenu; } } fn drop_menu(gs : &mut State) { if gs.player().inventory.items.is_empty() { gs.add_log_entry("You don't have any items to drop!".to_string()); } else { gs.game_state = TickType::DropMenu; } } fn equip_menu(gs : &mut State) { if gs.player().inventory.get_equippable_items().is_empty() { gs.add_log_entry("You don't have any equippable items.".to_string()); } else { gs.game_state = TickType::WieldMenu; } } fn unequip_menu(gs : &mut State) { if gs.player().inventory.equipped.is_empty() { gs.add_log_entry("You don't have any equipped items.".to_string()); } else { gs.game_state = TickType::UnequipMenu; } } pub fn use_item(item_index : i32, gs : &mut State) -> Vec<String> { let mut result = Vec::new(); if gs.player().inventory.items[item_index as usize].requires_targeting_mode { gs.game_state = TickType::TargetingItem; gs.target_cell = gs.player().position; gs.targeting_item = item_index; result.push("Select a target tile".to_string()); return result; } let item_type = gs.player().inventory.items[item_index as usize].item_type; match item_type { ItemType::HealthPotion => { item_effects::use_health_potion(item_index, gs, &mut result) } ItemType::ZapScroll => { item_effects::use_zap_scroll(item_index, gs, &mut result) } ItemType::ConfusionScroll => { item_effects::use_confusion_scroll(item_index, gs, &mut result) } _ => {} } gs.game_state = TickType::PlayersTurn; result } pub fn use_area_item(gs : &mut State) { let mut result = Vec::new(); let item_type = gs.player().inventory.items[gs.targeting_item as usize].item_type; if let ItemType::FireballScroll = item_type { item_effects::use_fireball_scroll(gs, &mut result) } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/entity.rs
src/game/entity.rs
use crate::rltk; use rltk::{RGB, Rltk, Point, Console}; use super::{Map, Player, Combat, Mob, Item}; extern crate typetag; #[typetag::serde(tag = "BaseEntity")] pub trait BaseEntity { fn get_position(&self) -> Point; fn get_fg_color(&self) -> RGB; fn get_glyph(&self) -> u8; fn draw_to_map(&self, ctx : &mut Rltk, map : &Map) { if map.is_tile_visible(self.get_position()) { let pos = self.get_position(); ctx.set(pos.x, pos.y, self.get_fg_color(), RGB::named(rltk::BLACK), self.get_glyph()); } } fn as_player(&self) -> Option<&Player> { None } fn as_player_mut(&mut self) -> Option<&mut Player> { None } fn as_combat(&mut self) -> Option<&mut Combat> { None } fn as_mob_mut(&mut self) ->Option<&mut Mob> { None } fn as_item(&self) -> Option<&Item> { None } fn plot_visibility(&mut self, map : &Map); fn get_tooltip_text(&self) -> String; fn blocks_tile(&self) -> bool { false } fn can_be_attacked(&self) -> bool { false } fn is_dead(&self) -> bool { false } fn is_mob(&self) -> bool { false } fn get_name(&self) -> String; fn can_pickup(&self) -> bool { false } fn is_player(&self) -> bool { false } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/item_effects.rs
src/game/item_effects.rs
use super::{State, BaseEntity, TickType, Combat, Particle}; use crate::rltk; use rltk::{RGB}; pub fn use_health_potion(item_index : i32, gs : &mut State, result : &mut Vec<String>) { let player = &mut gs.player_mut(); if player.fighter.hp == player.fighter.max_hp { result.push("You are already at maximum health.".to_string()); } else { player.fighter.hp = player.fighter.max_hp; // Cheezed due to confusion over borrowing result.push("You are healed!".to_string()); player.inventory.remove_item_return_clone(item_index); } } pub fn use_zap_scroll(item_index : i32, gs : &mut State, result : &mut Vec<String>) { let mut possible_targets : Vec<(usize, f32)> = Vec::new(); let visible_tiles = gs.player().visible_tiles.clone(); let my_pos = gs.player().get_position(); for (i,potential_target) in gs.entities.iter().enumerate() { if potential_target.is_mob() { let target_pos = potential_target.get_position(); if visible_tiles.contains(&target_pos) { possible_targets.push((i, rltk::DistanceAlg::Pythagoras.distance2d(my_pos, target_pos))); } } } if possible_targets.is_empty() { result.push("You can't see anyone to zap, so you put the scroll away.".to_string()); } else { possible_targets.sort_by(|a,b| a.1.partial_cmp(&b.1).unwrap()); let target = &mut gs.entities[possible_targets[0].0].as_mob_mut().unwrap(); let tp = target.get_position(); let line = rltk::line2d(rltk::LineAlg::Bresenham, tp, my_pos); for zap in line { gs.vfx.push(Particle::new(zap, RGB::named(rltk::CYAN), RGB::named(rltk::BLACK), 15, 200.0)); } result.push(format!("Lightning from the scroll zaps {} for 8 points of damage.", target.name)); target.take_damage(8); if target.fighter.hp < 1 { target.kill(); result.push(format!("{} is burned to a crisp.", target.name)); gs.player_mut().xp += target.fighter.xp_value; } gs.entities.retain(|e| !e.is_dead()); // Remove the scroll gs.player_mut().inventory.remove_item_return_clone(item_index); } } pub fn use_fireball_scroll(gs : &mut State, result : &mut Vec<String>) { result.push("You launch a fireball!".to_string()); let target = gs.target_cell; let item_index = gs.targeting_item; let area_of_effect = rltk::field_of_view(target, 3, &gs.map); for pos in area_of_effect.iter() { gs.vfx.push(Particle::new(*pos, RGB::named(rltk::RED), RGB::named(rltk::YELLOW), 176, 200.0)); } let mut targets : Vec<usize> = Vec::new(); for (i,e) in gs.entities.iter().enumerate() { if area_of_effect.contains(&e.get_position()) && e.can_be_attacked() { targets.push(i); } } for target_id in targets { let target = gs.entities[target_id].as_combat(); match target { None => {} Some(target) => { result.push(format!("{} is burned for 8 points of damage.", target.get_name())); target.take_damage(8); if target.get_hp() < 1 { result.push(format!("{} is dead.", target.get_name())); target.kill(); gs.player_mut().xp += target.xp_value(); } } } } gs.entities.retain(|e| !(e.is_dead() && !e.is_player())); // Remove the scroll gs.player_mut().inventory.remove_item_return_clone(item_index); gs.game_state = TickType::EnemyTurn; for r in result { gs.add_log_entry(r.to_string()); } } pub fn use_confusion_scroll(item_index : i32, gs : &mut State, result : &mut Vec<String>) { let mut possible_targets : Vec<(usize, f32)> = Vec::new(); let visible_tiles = gs.player().visible_tiles.clone(); let my_pos = gs.player().get_position(); for (i,potential_target) in gs.entities.iter().enumerate() { if potential_target.is_mob() { let target_pos = potential_target.get_position(); if visible_tiles.contains(&target_pos) { possible_targets.push((i, rltk::DistanceAlg::Pythagoras.distance2d(my_pos, target_pos))); } } } if possible_targets.is_empty() { result.push("You can't see anyone to zap, so you put the scroll away.".to_string()); } else { possible_targets.sort_by(|a,b| a.1.partial_cmp(&b.1).unwrap()); let mut target = &mut gs.entities[possible_targets[0].0].as_mob_mut().unwrap(); result.push(format!("{} is confused.", target.name)); target.confused = Some(5); // Remove the scroll gs.player_mut().inventory.remove_item_return_clone(item_index); } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/gui.rs
src/game/gui.rs
use crate ::rltk; use crate ::rltk::Console; use rltk::{Rltk, Point, RGB, Algorithm2D, VirtualKeyCode}; use super::{Map, TileType, State, TickType}; use std::cmp::{max, min}; use serde::{Serialize, Deserialize}; use rand::Rng; use std::path::Path; pub enum ItemMenuResult { Cancel, NoResponse, Selected } pub fn render(gs : &State, ctx : &mut Rltk, map : &Map) { draw_map(ctx, map); draw_entities(gs, ctx, map); draw_user_interface(gs, ctx); draw_mouse_info(gs, ctx, map); for p in gs.vfx.iter() { p.render(ctx); } } fn draw_map(ctx : &mut Rltk, map : &Map) { ctx.cls(); let mut idx = 0; for y in 0 .. map.height { for x in 0 .. map.width { // You wouldn't normally make this mess - clean up! if map.revealed[idx] { if map.visible[idx] { match map.tiles[idx] { TileType::Floor => { ctx.print_color(x, y, RGB::named(rltk::DARK_GREEN), RGB::named(rltk::BLACK), ".") } TileType::Wall => { ctx.set(x, y, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), decorate_wall_tile(map, Point::new(x,y))) } TileType::Stairs => { ctx.print_color(x, y, RGB::named(rltk::MAGENTA), RGB::named(rltk::BLACK), ">") } } } else { match map.tiles[idx] { TileType::Floor => { ctx.print_color(x, y, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), ".") } TileType::Wall => { ctx.set(x, y, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), decorate_wall_tile(map, Point::new(x,y))) } TileType::Stairs => { ctx.print_color(x, y, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), ">") } } } } idx += 1; } } } fn is_revealed_and_wall(map : &Map, coord: Point) -> bool { let idx = map.point2d_to_index(coord) as usize; map.tiles[idx] == TileType::Wall && map.revealed[idx] } fn decorate_wall_tile(map : &Map, coord: Point) -> u8 { if coord.x == 0 || coord.x == map.width || coord.y == 0 || coord.y == map.height { return 35; } let mut mask : u8 = 0; if is_revealed_and_wall(map, Point::new(coord.x, coord.y - 1)) { mask += 1; } if is_revealed_and_wall(map, Point::new(coord.x, coord.y + 1)) { mask += 2; } if is_revealed_and_wall(map, Point::new(coord.x - 1, coord.y)) { mask += 4; } if is_revealed_and_wall(map, Point::new(coord.x + 1, coord.y)) { mask += 8; } match mask { 0 => { 9 } // Pillar because we can't see neighbors 1 => { 186 } // Wall only to the north 2 => { 186 } // Wall only to the south 3 => { 186 } // Wall to the north and south 4 => { 205 } // Wall only to the west 5 => { 188 } // Wall to the north and west 6 => { 187 } // Wall to the south and west 7 => { 185 } // Wall to the north, south and west 8 => { 205 } // Wall only to the east 9 => { 200 } // Wall to the north and east 10 => { 201 } // Wall to the south and east 11 => { 204 } // Wall to the north, south and east 12 => { 205 } // Wall to the east and west 13 => { 202 } // Wall to the east, west, and south 14 => { 203 } // Wall to the east, west, and north _ => { 35 } // We missed one? } } fn draw_entities(gs: &State, ctx: &mut Rltk, map : &Map) { for e in gs.entities.iter() { e.draw_to_map(ctx, &map); } } fn draw_user_interface(gs: &State, ctx : &mut Rltk) { let mouse_pos = ctx.mouse_pos(); ctx.set_bg(mouse_pos.0, mouse_pos.1, RGB::named(rltk::MAGENTA)); ctx.draw_box(1, 43, 78, 6, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK)); let maplvl = format!("Depth: {} ", gs.player().dungeon_level); ctx.print_color(3, 43, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), &maplvl); let health = format!(" HP: {} / {} ", gs.player().fighter.hp, gs.player().fighter.max_hp); ctx.print_color(12, 43, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), &health); ctx.draw_bar_horizontal(28, 43, 51, gs.player().fighter.hp, gs.player().fighter.max_hp, RGB::named(rltk::RED), RGB::named(rltk::BLACK)); let mut y = 44; for s in gs.log.iter() { ctx.print(2, y, &s.to_string()); y += 1; } } fn draw_mouse_info(gs : &State, ctx : &mut Rltk, map: &Map) { let mouse_pos = ctx.mouse_pos(); if map.is_tile_visible(Point::new(mouse_pos.0, mouse_pos.1)) { let mut tooltip : Vec<String> = Vec::new(); let tile_info = map.tile_description(Point::new(mouse_pos.0, mouse_pos.1)); tooltip.push(format!("Tile: {}", tile_info)); for e in gs.entities.iter() { if e.get_position() == Point::new(mouse_pos.0, mouse_pos.1) { tooltip.push(e.get_tooltip_text()); } } if !tooltip.is_empty() { let mut width :i32 = 0; for s in tooltip.iter() { if width < s.len() as i32 { width = s.len() as i32; } } width += 3; if mouse_pos.0 > 40 { let arrow_pos = Point::new(mouse_pos.0 - 2, mouse_pos.1); let left_x = mouse_pos.0 - width; let mut y = mouse_pos.1; for s in tooltip.iter() { ctx.print_color(left_x, y, RGB::named(rltk::WHITE), RGB::named(rltk::GREY), &s.to_string()); let padding = (width - s.len() as i32)-1; for i in 0..padding { ctx.print_color(arrow_pos.x - i, y, RGB::named(rltk::WHITE), RGB::named(rltk::GREY), &" ".to_string()); } y += 1; } ctx.print_color(arrow_pos.x, arrow_pos.y, RGB::named(rltk::WHITE), RGB::named(rltk::GREY), &"->".to_string()); } else { let arrow_pos = Point::new(mouse_pos.0 + 1, mouse_pos.1); let left_x = mouse_pos.0 +3; let mut y = mouse_pos.1; for s in tooltip.iter() { ctx.print_color(left_x, y, RGB::named(rltk::WHITE), RGB::named(rltk::GREY), &s.to_string()); let padding = (width - s.len() as i32)-1; for i in 0..padding { ctx.print_color(left_x + s.len() as i32 + i, y, RGB::named(rltk::WHITE), RGB::named(rltk::GREY), &" ".to_string()); } y += 1; } ctx.print_color(arrow_pos.x, arrow_pos.y, RGB::named(rltk::WHITE), RGB::named(rltk::GREY), &"<-".to_string()); } } } } #[allow(non_snake_case)] pub fn handle_item_menu<S: ToString>(gs : &mut State, ctx: &mut Rltk, title: S) -> (ItemMenuResult, i32) { let count = gs.player().inventory.items.len(); let mut y = (25 - (count / 2)) as i32; ctx.draw_box(15, y-2, 31, (count+3) as i32, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK)); ctx.print_color(18, y-2, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), &title.to_string()); for (j,i) in gs.player().inventory.items.iter().enumerate() { ctx.set(17, y, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), 40); ctx.set(18, y, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), 97+j as u8); ctx.set(19, y, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), 41); ctx.print(21, y, &i.name.to_string()); y += 1; } match ctx.key { None => {} Some(KEY) => { match KEY { VirtualKeyCode::Escape => { return (ItemMenuResult::Cancel, 0) } _ => { let selection = rltk::letter_to_option(KEY); if selection > -1 && selection < gs.player().inventory.items.len() as i32 { return (ItemMenuResult::Selected, selection); } return (ItemMenuResult::NoResponse, 0); } } } } (ItemMenuResult::NoResponse, 0) } #[allow(non_snake_case)] pub fn handle_equippable_menu<S: ToString>(gs : &mut State, ctx: &mut Rltk, title: S) -> (ItemMenuResult, i32) { let equippable = gs.player().inventory.get_equippable_items(); let count = equippable.len(); let mut y = (25 - (count / 2)) as i32; ctx.draw_box(15, y-2, 31, (count+3) as i32, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK)); ctx.print_color(18, y-2, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), &title.to_string()); for (j,i) in equippable.iter().enumerate() { ctx.set(17, y, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), 40); ctx.set(18, y, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), 97+j as u8); ctx.set(19, y, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), 41); ctx.print(21, y, &gs.player().inventory.items[*i as usize].name.to_string()); y += 1; } match ctx.key { None => {} Some(KEY) => { match KEY { VirtualKeyCode::Escape => { return (ItemMenuResult::Cancel, 0) } _ => { let selection = rltk::letter_to_option(KEY); if selection > -1 && selection < gs.player().inventory.items.len() as i32 { return (ItemMenuResult::Selected, equippable[selection as usize]); } return (ItemMenuResult::NoResponse, 0); } } } } (ItemMenuResult::NoResponse, 0) } #[allow(non_snake_case)] pub fn handle_equipped_menu<S: ToString>(gs : &mut State, ctx: &mut Rltk, title: S) -> (ItemMenuResult, i32) { let count = gs.player().inventory.equipped.len(); let mut y = (25 - (count / 2)) as i32; ctx.draw_box(15, y-2, 31, (count+3) as i32, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK)); ctx.print_color(18, y-2, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), &title.to_string()); for (j,i) in gs.player().inventory.equipped.iter().enumerate() { ctx.set(17, y, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), 40); ctx.set(18, y, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), 97+j as u8); ctx.set(19, y, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), 41); ctx.print(21, y, &i.name.to_string()); y += 1; } match ctx.key { None => {} Some(KEY) => { match KEY { VirtualKeyCode::Escape => { return (ItemMenuResult::Cancel, 0) } _ => { let selection = rltk::letter_to_option(KEY); if selection > -1 && selection < gs.player().inventory.equipped.len() as i32 { return (ItemMenuResult::Selected, selection); } return (ItemMenuResult::NoResponse, 0); } } } } (ItemMenuResult::NoResponse, 0) } pub fn display_game_over_and_handle_quit(ctx : &mut Rltk, gs : &mut State) { ctx.cls(); ctx.print_color(33, 25, RGB::named(rltk::RED), RGB::named(rltk::BLACK), &"You are dead.".to_string()); ctx.print_color(28, 27, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), &"Press any key for the menu.".to_string()); if let Some(_) = ctx.key { gs.game_state = TickType::MainMenu } } #[allow(non_snake_case)] pub fn handle_item_targeting<S: ToString>(gs : &mut State, ctx: &mut Rltk, title: S) -> ItemMenuResult { ctx.print_color(0,0, RGB::named(rltk::YELLOW), RGB::named(rltk::RED), &title.to_string()); let mouse_tuple = ctx.mouse_pos(); let mouse_pos = Point::new(mouse_tuple.0, mouse_tuple.1); let previous_mouse = gs.prev_mouse_for_targeting; if mouse_pos != previous_mouse && mouse_pos.x > 0 && mouse_pos.x < 79 && mouse_pos.y > 0 && mouse_pos.y < 40 { gs.target_cell = mouse_pos; } if gs.target_cell.x < 1 { gs.target_cell.x = 1; } if gs.target_cell.x > 79 { gs.target_cell.x = 79; } if gs.target_cell.y < 1 { gs.target_cell.y = 1; } if gs.target_cell.y > 39 { gs.target_cell.y = 39; } let possible = gs.map.is_tile_visible(gs.target_cell); if possible { ctx.set_bg(gs.target_cell.x, gs.target_cell.y, RGB::named(rltk::RED)); if ctx.left_click { return ItemMenuResult::Selected; } } match ctx.key { None => {} Some(KEY) => { match KEY { VirtualKeyCode::Escape => { return ItemMenuResult::Cancel } VirtualKeyCode::Return => { if possible { return ItemMenuResult::Selected } } VirtualKeyCode::Space => { if possible { return ItemMenuResult::Selected } } VirtualKeyCode::Left => { gs.target_cell.x = max(gs.target_cell.x-1, 1) } VirtualKeyCode::Right => { gs.target_cell.x = min(gs.target_cell.x+1, 79) } VirtualKeyCode::Up => { gs.target_cell.y = max(gs.target_cell.y-1, 1) } VirtualKeyCode::Down => { gs.target_cell.y = min(gs.target_cell.y+1, 40) } VirtualKeyCode::Numpad4 => { gs.target_cell.x = max(gs.target_cell.x-1, 1) } VirtualKeyCode::Numpad6 => { gs.target_cell.x = min(gs.target_cell.x+1, 79) } VirtualKeyCode::Numpad8 => { gs.target_cell.y = max(gs.target_cell.y-1, 1) } VirtualKeyCode::Numpad2 => { gs.target_cell.y = min(gs.target_cell.y+1, 40) } VirtualKeyCode::Numpad7 => { gs.target_cell = Point::new( max(gs.target_cell.x-1, 1), max(gs.target_cell.y-1, 1) ) } VirtualKeyCode::Numpad9 => { gs.target_cell = Point::new( min(gs.target_cell.x+1, 79), max(gs.target_cell.y-1, 1) ) } VirtualKeyCode::Numpad1 => { gs.target_cell = Point::new( max(gs.target_cell.x-1, 1), min(gs.target_cell.y+1, 40) ) } VirtualKeyCode::Numpad3 => { gs.target_cell = Point::new( min(gs.target_cell.x+1, 79), min(gs.target_cell.y+1, 40) ) } _ => { } } } } ItemMenuResult::NoResponse } const STORY_TYPES : & [& str] = &["Tales", "Sagas", "Adventures", "Anecdotes", "Fables", "Narratives"]; const STORY_NOUNS : & [& str] = &["Heroism", "Cowardice", "Vengeance", "Heroism", "Exploration", "Delving", "Dungeoneering"]; #[derive(Serialize, Deserialize)] pub struct MenuState { random : Vec<usize>, save_exists : bool, current_menu_option : i32, backdrop : Vec<(u8, f32)> } impl MenuState { pub fn new() -> MenuState { let mut rng = rand::thread_rng(); let save_exists = Path::new("./savegame.json").exists(); let mut cmo = 1; if save_exists { cmo = 0; } let mut bd : Vec<(u8, f32)> = Vec::new(); for _i in 0..(80*50) { let bg_i = rng.gen_range(0, 192); let bg : f32 = bg_i as f32 / 255.0; bd.push((rng.gen_range(32, 62) as u8, bg)); } MenuState{ random: vec![rng.gen_range(0, 6), rng.gen_range(0, 7), rng.gen_range(0, 7)], save_exists, current_menu_option : cmo, backdrop : bd } } } pub enum MainMenuResult { None, Continue, New, Quit } #[allow(non_snake_case)] pub fn display_main_menu(ctx : &mut Rltk, ms : &mut MenuState) -> MainMenuResult { let mut rng = rand::thread_rng(); ctx.cls(); // Backdrop for y in 0..50 { for x in 0..80 { let idx = (y*80)+x; ctx.set(x, y, RGB::from_f32(0.0, ms.backdrop[idx as usize].1, 0.0), RGB::named(rltk::BLACK), ms.backdrop[idx as usize].0); } } for x in 0..80 { for y in (1..50).rev() { let idx = (y * 80) + x; let above_idx = ((y-1) * 80) + x; ms.backdrop[idx] = ms.backdrop[above_idx]; ms.backdrop[idx].1 -= 0.02; if ms.backdrop[idx].1 < 0.0 { let bg_i = rng.gen_range(0, 192); let bg : f32 = bg_i as f32 / 255.0; ms.backdrop[idx] = (rng.gen_range(32, 62) as u8, bg); } } let y = 0; let idx = (y * 80) + x; let bg_i = rng.gen_range(0, 192); let bg : f32 = bg_i as f32 / 255.0; ms.backdrop[idx] = (rng.gen_range(32, 62) as u8, bg); } // Header ctx.draw_box(15, 8, 50, 11, RGB::named(rltk::GREEN), RGB::named(rltk::BLACK)); ctx.print_color_centered(10, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "Rusty Roguelike v1.0"); ctx.print_color_centered(12, RGB::named(rltk::RED), RGB::named(rltk::BLACK), &format!("{} in {} and {}", STORY_TYPES[ms.random[0]], STORY_NOUNS[ms.random[1]], STORY_NOUNS[ms.random[2]])); // Menu render let mut y = 15; if ms.save_exists { if ms.current_menu_option == 0 { ctx.print_color_centered(y, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), "(C)ontinue Saved Game"); } else { ctx.print_color_centered(y, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), "(C)ontinue Saved Game"); } y += 1; } if ms.current_menu_option == 1 { ctx.print_color_centered(y, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), "(N)ew Game"); } else { ctx.print_color_centered(y, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), "(N)ew Game"); } y += 1; if ms.current_menu_option == 2 { ctx.print_color_centered(y, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), "(Q)uit"); } else { ctx.print_color_centered(y, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), "(Q)uit"); } // Copyright blurb ctx.print_color_centered(42, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), "/r/roguelikedev Roguelike Tutorial Series"); ctx.print_color_centered(43, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), "https://github.com/thebracket/rustyroguelike"); ctx.print_color_centered(44, RGB::named(rltk::GREY), RGB::named(rltk::BLACK), "(c) 2019 Bracket Productions"); // Keyboard input match ctx.key { None => {} Some(KEY) => { match KEY { VirtualKeyCode::Escape => { return MainMenuResult::Quit } VirtualKeyCode::Q => { return MainMenuResult::Quit } VirtualKeyCode::N => { return MainMenuResult::New } VirtualKeyCode::C => { if ms.save_exists { return MainMenuResult::Continue } } VirtualKeyCode::Up => { ms.current_menu_option -= 1; if ms.save_exists && ms.current_menu_option < 0 { ms.current_menu_option = 2 } if (!ms.save_exists) && ms.current_menu_option < 1 { ms.current_menu_option = 1 } } VirtualKeyCode::Down => { ms.current_menu_option += 1; if ms.save_exists && ms.current_menu_option > 2 { ms.current_menu_option = 0 } if (!ms.save_exists) && ms.current_menu_option > 2 { ms.current_menu_option = 1 } } VirtualKeyCode::Return => { match ms.current_menu_option { 0 => { return MainMenuResult::Continue } 1 => { return MainMenuResult::New } 2 => { return MainMenuResult::Quit } _ => {} } } _ => {} } } } MainMenuResult::None } #[allow(non_snake_case)] pub fn handle_level_up(ctx : &mut Rltk, gs : &mut State) { ctx.draw_box(10, 8, 60, 18, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK)); ctx.print_color_centered(10, RGB::named(rltk::WHITE), RGB::named(rltk::RED), &format!("Congratulations, you are now level {}!", gs.player().level)); ctx.print_color_centered(12, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "Your experience has improved your battle prowess."); ctx.print_color_centered(13, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "Select one of the following to improve:"); ctx.print_color_centered(15, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), "(A) Give me more hit points."); ctx.print_color_centered(16, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), "(B) I'd like to do more damage."); // Keyboard input match ctx.key { None => {} Some(KEY) => { match KEY { VirtualKeyCode::A => { gs.player_mut().fighter.max_hp += 10; gs.player_mut().fighter.hp = gs.player().fighter.max_hp; gs.game_state = TickType::PlayersTurn; } VirtualKeyCode::B => { gs.player_mut().fighter.power += 1; gs.game_state = TickType::PlayersTurn; } _ => {} } } } } #[allow(non_snake_case)] pub fn display_character_info(ctx : &mut Rltk, gs : &mut State) { let player = gs.player(); ctx.draw_box(10, 8, 60, 16, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK)); ctx.print_color_centered(10, RGB::named(rltk::WHITE), RGB::named(rltk::RED), "Character Information"); ctx.print_color_centered(12, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "You are not dead yet. That's something."); ctx.print_color_centered(13, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), &format!("You have beaten {} dungeon levels.", player.dungeon_level)); ctx.print_color_centered(14, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), &format!("You have {} experience points, needing {} to level.", player.xp, player.xp_to_level())); ctx.print_color_centered(15, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), &format!("You are level {}.", player.level)); ctx.print_color_centered(16, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), &format!("You have {} hit points, out of {}.", player.fighter.hp, player.fighter.max_hp)); ctx.print_color_centered(17, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), &format!("Your hit power is {}.", player.fighter.power)); ctx.print_color_centered(18, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), &format!("Your defense power is {}.", player.fighter.defense)); ctx.print_color_centered(20, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), "Press any key to resume dungeon bashing!"); match ctx.key { None => {} Some(_) => { gs.game_state = TickType::PlayersTurn; } } } #[allow(non_snake_case)] pub fn display_help_info(ctx : &mut Rltk, gs : &mut State) { ctx.draw_box(10, 8, 60, 17, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK)); ctx.print_color_centered(10, RGB::named(rltk::WHITE), RGB::named(rltk::RED), "Controls"); ctx.print_color_centered(12, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "Arrow keys or NumPad keys to move."); ctx.print_color_centered(13, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "Walk into a monster to attack it."); ctx.print_color_centered(14, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "NumPad 5, or W to Wait."); ctx.print_color_centered(15, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "G to Get an item from the ground."); ctx.print_color_centered(16, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "U to Use an item from your inventory."); ctx.print_color_centered(17, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "E to Equip an item from your inventory."); ctx.print_color_centered(17, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "R to Remove an item you are using."); ctx.print_color_centered(17, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "D to Drop an item from your inventory."); ctx.print_color_centered(18, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "> to go down stairs, if you are standing on them."); ctx.print_color_centered(19, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "C for Character Info."); ctx.print_color_centered(20, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "? for this help menu. You've found this one."); ctx.print_color_centered(21, RGB::named(rltk::WHITE), RGB::named(rltk::BLACK), "ESCAPE to save the game and quit to the menu."); ctx.print_color_centered(23, RGB::named(rltk::YELLOW), RGB::named(rltk::BLACK), "Press any key to resume dungeon bashing!"); match ctx.key { None => {} Some(_) => { gs.game_state = TickType::PlayersTurn; } } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/mob.rs
src/game/mob.rs
use crate::rltk; use rltk::{RGB, Point, Algorithm2D, a_star_search, field_of_view}; use super::{fighter::Fighter, Map, Combat, BaseEntity, State, attack, random_choice, Particle}; use rand::Rng; extern crate serde; use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize)] pub struct Mob { pub position : Point, pub glyph: u8, pub fg : RGB, pub visible_tiles : Vec<Point>, pub name : String, pub fighter : Fighter, pub confused: Option<i32> } impl Mob { pub fn new_random(x:i32, y:i32) -> Mob { let choice = random_choice(vec![("Wight".to_string(), 10), ("Hound".to_string(), 45), ("Itereater".to_string(), 45)]); if choice == "Wight" { Mob::new_wight(x, y) } else if choice == "Hound" { Mob::new_hound(x, y) } else { Mob::new_iter(x, y) } } fn new_wight(x:i32, y:i32) -> Mob { Mob{ position: Point::new(x, y), glyph: 38, fg: RGB::named(rltk::RED), visible_tiles: Vec::new(), name: "Borrow Wight".to_string(), fighter: Fighter::new(2, 0, 1, 60), confused: None } } fn new_hound(x:i32, y:i32) -> Mob { Mob{ position: Point::new(x, y), glyph: 109, fg: RGB::named(rltk::RED), visible_tiles: Vec::new(), name: "Mut Hound".to_string(), fighter: Fighter::new(1, 0, 1, 30), confused: None } } fn new_iter(x:i32, y:i32) -> Mob { Mob{ position: Point::new(x, y), glyph: 105, fg: RGB::named(rltk::RED), visible_tiles: Vec::new(), name: "Itereater Beast".to_string(), fighter: Fighter::new(1, 0, 1, 30), confused: None } } pub fn turn_tick(&mut self, player_pos : Point, map : &mut Map) -> bool { if let Some(turns) = self.confused { let new_turns = turns-1; if new_turns == 0 { self.confused = None; } else { self.confused = Some(new_turns); } let mut rng = rand::thread_rng(); let delta_x = rng.gen_range(0, 3)-1; let delta_y = rng.gen_range(0, 3)-1; let new_loc = Point::new(self.position.x + delta_x, self.position.y + delta_y); if map.is_walkable(new_loc.x, new_loc.y) && !map.is_tile_blocked(map.point2d_to_index(new_loc)) { self.position = new_loc; } return false; } let can_see_player = self.visible_tiles.contains(&player_pos); if can_see_player { let distance = rltk::DistanceAlg::Pythagoras.distance2d(player_pos, self.position); if distance < 1.5 { return true; } else { self.path_to_player(player_pos, map); } } false } fn path_to_player(&mut self, player_pos : Point, map : &mut Map) { let path = a_star_search(map.point2d_to_index(self.position), map.point2d_to_index(player_pos), map); if path.success { let idx = path.steps[1]; if !map.is_tile_blocked(idx) { let old_idx = (self.position.y * map.width) + self.position.x; map.clear_tile_blocked(old_idx); map.set_tile_blocked(idx); self.position = map.index_to_point2d(idx); } } } } #[typetag::serde(name = "BEMob")] impl BaseEntity for Mob { fn get_position(&self) -> Point { self.position } fn get_fg_color(&self) -> RGB { self.fg } fn get_glyph(&self) -> u8 { self.glyph } fn as_combat(&mut self) -> Option<&mut Combat> { Some(self) } fn plot_visibility(&mut self, map : &Map) { self.visible_tiles = field_of_view(self.get_position(), 6, map); } fn get_tooltip_text(&self) -> String { format!("Enemy: {}", self.name) } fn blocks_tile(&self) -> bool { true } fn can_be_attacked(&self) -> bool { true } fn is_dead(&self) -> bool { self.fighter.dead } fn is_mob(&self) -> bool { true } fn as_mob_mut(&mut self) ->Option<&mut Mob> { Some(self) } fn get_name(&self) -> String { self.name.to_string() } } pub fn mob_tick(gs : &mut State) { // Build the master map of unavailable tiles gs.map.refresh_blocked(); for e in gs.entities.iter() { if e.blocks_tile() { let pos = e.get_position(); gs.map.set_tile_blocked(gs.map.point2d_to_index(pos)); } } let mut active_mobs : Vec<usize> = Vec::new(); for (i,e) in gs.entities.iter_mut().enumerate() { if e.is_mob() { active_mobs.push(i); } } let ppos = gs.player().position; let mut attacking_mobs : Vec<usize> = Vec::new(); for id in active_mobs { let mob = gs.entities[id].as_mob_mut().unwrap(); if mob.turn_tick(ppos, &mut gs.map) { attacking_mobs.push(id); } } let mut tmp : Vec<String> = Vec::new(); for id in attacking_mobs { let attacker_name = gs.entities[id].get_name(); let attacker_power = gs.entities[id].as_combat().unwrap().get_power(); gs.vfx.push(Particle::new(gs.player().get_position(), RGB::named(rltk::RED), RGB::named(rltk::BLACK), 176, 200.0)); let (_xp, result) = attack(attacker_name, attacker_power, gs.player_as_combat()); for r in result { tmp.push(r); } } for s in tmp { gs.add_log_entry(s); } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/rect.rs
src/game/rect.rs
pub struct Rect { pub x1 : i32, pub x2 : i32, pub y1 : i32, pub y2 : i32 } impl Rect { pub fn new(x1:i32, y1: i32, x2:i32, y2:i32) -> Rect { Rect{x1, y1, x2, y2} } // Returns true if this overlaps with other pub fn intersect(&self, other:&Rect) -> bool { self.x1 <= other.x2 && self.x2 >= other.x1 && self.y1 <= other.y2 && self.y2 >= other.y1 } pub fn center(&self) -> (i32, i32) { ((self.x1 + self.x2)/2, (self.y1 + self.y2)/2) } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/map.rs
src/game/map.rs
use crate::rltk; use rltk::{ Point, Algorithm2D, BaseMap }; use super::TileType; extern crate serde; use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize)] pub struct Map { pub tiles : Vec<TileType>, pub visible : Vec<bool>, pub revealed : Vec<bool>, pub width: i32, pub height: i32, pub blocked : Vec<bool> } impl Map { pub fn new(w:i32, h:i32) -> Map { let mut visible = Vec::new(); let mut blank_map = Vec::new(); let mut revealed = Vec::new(); let mut blocked = Vec::new(); for _i in 0 .. (w*h) { blank_map.push(TileType::Wall); visible.push(false); revealed.push(false); blocked.push(false); } Map{tiles : blank_map, visible, revealed, width: w, height: h, blocked } } pub fn set_visibility(&mut self, vis : &[Point]) { for v in self.visible.iter_mut() { *v = false; } for pt in vis { let idx = self.tile_idx(pt.x, pt.y); if let Some(x) = idx { self.visible[x] = true; self.revealed[x] = true; } } } // Utility function: find the index of a tile at x/y fn tile_idx(&self, x:i32, y:i32) -> Option<usize> { if self.valid_tile(x, y) { Some(((y*self.width)+x) as usize) } else { None } } // Utility function: bounds checking fn valid_tile(&self, x:i32, y:i32) -> bool { x > 0 && x < self.width-1 && y > 0 && y < self.height-1 } // Utility function: is a tile walkable pub fn is_walkable(&self, x:i32, y:i32) -> bool { let idx = self.tile_idx(x, y); match idx { Some(idx) => { match self.tiles[idx] { TileType::Floor => { true } TileType::Wall => { false } TileType::Stairs => { true } } } None => { false } } } // Utility function: is a tile walkable pub fn is_transparent(&self, x:i32, y:i32) -> bool { let idx = self.tile_idx(x, y); match idx { Some(idx) => { match self.tiles[idx] { TileType::Floor => { false } TileType::Wall => { true } TileType::Stairs => { false } } } None => { false } } } pub fn is_tile_visible(&self, pos : Point) -> bool { let idx = self.tile_idx(pos.x, pos.y); match idx { None => { false } Some(x) => { self.visible[x] } } } pub fn tile_description(&self, pos : Point) -> String { let idx = self.tile_idx(pos.x, pos.y); match idx { None => { return "".to_string(); } Some(x) => { if self.visible[x] { match self.tiles[x] { TileType::Floor => { return "Floor".to_string() } TileType::Wall => { return "Wall".to_string() } TileType::Stairs => { return "Stairs".to_string() } } } } } "".to_string() } pub fn refresh_blocked(&mut self) { for y in 0..self.height { for x in 0..self.width { let idx = (y * self.width) + x; self.blocked[idx as usize] = !self.is_walkable(x, y); } } } pub fn set_tile_blocked(&mut self, idx : i32) { self.blocked[idx as usize] = true; } pub fn clear_tile_blocked(&mut self, idx : i32) { self.blocked[idx as usize] = false; } pub fn is_exit_valid(&self, x:i32, y:i32) -> bool { if x < 1 || x > self.width-1 || y < 1 || y > self.height-1 { return false; } let idx = (y * self.width) + x; !self.blocked[idx as usize] } pub fn is_tile_blocked(&self, idx: i32) -> bool { self.blocked[idx as usize] } } impl BaseMap for Map { fn is_opaque(&self, idx: i32) -> bool { self.is_transparent(idx % self.width, idx / self.width) } fn get_available_exits(&self, idx:i32) -> Vec<(i32, f32)> { let mut exits : Vec<(i32, f32)> = Vec::new(); let x = idx % self.width; let y = idx / self.width; // Cardinal directions if self.is_exit_valid(x-1, y) { exits.push((idx-1, 1.0)) }; if self.is_exit_valid(x+1, y) { exits.push((idx+1, 1.0)) }; if self.is_exit_valid(x, y-1) { exits.push((idx-self.width, 1.0)) }; if self.is_exit_valid(x, y+1) { exits.push((idx+self.width, 1.0)) }; // Diagonals if self.is_exit_valid(x-1, y-1) { exits.push(((idx-self.width)-1, 1.4)); } if self.is_exit_valid(x+1, y-1) { exits.push(((idx-self.width)+1, 1.4)); } if self.is_exit_valid(x-1, y+1) { exits.push(((idx+self.width)-1, 1.4)); } if self.is_exit_valid(x+1, y+1) { exits.push(((idx+self.width)+1, 1.4)); } exits } fn get_pathing_distance(&self, idx1:i32, idx2:i32) -> f32 { let p1 = Point::new(idx1 % self.width, idx1 / self.width); let p2 = Point::new(idx2 % self.width, idx2 / self.width); rltk::DistanceAlg::Pythagoras.distance2d(p1, p2) } } impl Algorithm2D for Map { fn point2d_to_index(&self, pt : Point) -> i32 { (pt.y * self.width) + pt.x } fn index_to_point2d(&self, idx:i32) -> Point { Point{ x: idx % self.width, y: idx / self.width } } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/ticktype.rs
src/game/ticktype.rs
extern crate serde; use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, PartialEq)] pub enum TickType { None, MainMenu, PlayersTurn, EnemyTurn, GameOver, UseMenu, DropMenu, TargetingItem, LevelUpMenu, CharacterMenu, HelpMenu, WieldMenu, UnequipMenu }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/mod.rs
src/game/mod.rs
use crate::rltk; use rltk::{ GameState }; extern crate serde; mod entity; pub use entity::BaseEntity; mod tiletype; pub use tiletype::TileType; mod ticktype; pub use ticktype::TickType; mod fighter; pub use fighter::Fighter; pub use fighter::Combat; pub use fighter::attack; mod player; pub use player::Player; mod mob; pub use mob::Mob; mod rect; pub use rect::Rect; mod map; pub use map::Map; mod item; use item::Item; use item::ItemType; mod inventory; use inventory::Inventory; mod item_effects; extern crate rand; mod map_builder; mod gui; mod gamestate; pub use gamestate::State; mod random; pub use random::random_choice; mod vfx; pub use vfx::Particle;
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/vfx.rs
src/game/vfx.rs
use crate::rltk; use crate ::rltk::Console; use rltk::{Point, RGB, Rltk}; use super::{ State }; extern crate serde; use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, Clone, Copy)] pub struct Particle { position : Point, lifetime_ms : f32, fg : RGB, bg : RGB, glyph : u8 } impl Particle { pub fn new(position:Point, fg:RGB, bg:RGB, glyph: u8, lifetime_ms : f32) -> Particle { Particle{ position, fg, bg, glyph, lifetime_ms } } pub fn render(&self, ctx : &mut Rltk) { ctx.set(self.position.x, self.position.y, self.fg, self.bg, self.glyph); } } pub fn age_particles(gs : &mut State, ctx : &mut Rltk) { for p in gs.vfx.iter_mut() { p.lifetime_ms -= ctx.frame_time_ms; } gs.vfx.retain(|a| a.lifetime_ms > 0.0); }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/tiletype.rs
src/game/tiletype.rs
extern crate serde; use serde::{Serialize, Deserialize}; #[derive(Serialize, Deserialize, PartialEq)] pub enum TileType { Wall, Floor, Stairs }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/item.rs
src/game/item.rs
use crate::rltk; use rltk::{RGB, Point}; use super::{BaseEntity, Map, random_choice}; extern crate serde; use serde::{Serialize, Deserialize}; #[derive(PartialEq, Clone, Copy, Serialize, Deserialize)] pub enum ItemType { HealthPotion, ZapScroll, FireballScroll, ConfusionScroll, Sword, Shield } #[derive(PartialEq, Clone, Copy, Serialize, Deserialize)] pub enum ItemSlot { MainHand, OffHand } #[derive(PartialEq, Clone, Copy, Serialize, Deserialize)] pub struct Equippable { pub slot : ItemSlot, pub power_bonus : i32, pub defense_bonus : i32 } #[derive(PartialEq, Clone, Serialize, Deserialize)] pub struct Item { pub position : Point, pub glyph: u8, pub fg : RGB, pub name : String, pub item_type : ItemType, pub requires_targeting_mode : bool, pub equippable : Option<Equippable> } impl Item { pub fn new_random(x:i32, y:i32) -> Item { let choice = random_choice(vec![ ("Health".to_string(), 45), ("Zap".to_string(), 10), ("Fireball".to_string(), 10), ("Confusion".to_string(), 10), ("Sword".to_string(), 10), ("Shield".to_string(), 10), ("Dagger".to_string(), 5), ]); if choice == "Health" { Item::new_health_potion(x,y) } else if choice == "Zap" { Item::new_zap_scroll(x,y) } else if choice == "Fireball" { Item::new_fireball_scroll(x,y) } else if choice == "Sword" { Item::new_sword(x,y) } else if choice == "Shield" { Item::new_shield(x,y) } else if choice == "Dagger" { Item::new_dagger(x,y) } else { Item::new_confusion_scroll(x,y) } } pub fn new_health_potion(x:i32, y:i32) -> Item { Item{ position: Point::new(x, y), glyph: 173, fg: RGB::named(rltk::MAGENTA), name: "Health Potion".to_string(), item_type: ItemType::HealthPotion, requires_targeting_mode : false, equippable: None } } pub fn new_zap_scroll(x:i32, y:i32) -> Item { Item{ position: Point::new(x, y), glyph: 63, fg: RGB::named(rltk::CYAN), name: "Zap Scroll".to_string(), item_type: ItemType::ZapScroll, requires_targeting_mode : false, equippable: None } } pub fn new_fireball_scroll(x:i32, y:i32) -> Item { Item{ position: Point::new(x, y), glyph: 63, fg: RGB::named(rltk::ORANGE), name: "Fireball Scroll".to_string(), item_type: ItemType::FireballScroll, requires_targeting_mode : true, equippable: None } } pub fn new_confusion_scroll(x:i32, y:i32) -> Item { Item{ position: Point::new(x, y), glyph: 63, fg: RGB::named(rltk::BLUE), name: "Confusion Scroll".to_string(), item_type: ItemType::ConfusionScroll, requires_targeting_mode : false, equippable: None } } pub fn new_sword(x:i32, y:i32) -> Item { Item{ position: Point::new(x, y), glyph: 47, fg: RGB::named(rltk::CYAN), name: "Sword".to_string(), item_type: ItemType::Sword, requires_targeting_mode : false, equippable: Some(Equippable{ slot : ItemSlot::MainHand, power_bonus: 1, defense_bonus: 0 }) } } pub fn new_dagger(x:i32, y:i32) -> Item { Item{ position: Point::new(x, y), glyph: 47, fg: RGB::named(rltk::GREEN), name: "Dagger".to_string(), item_type: ItemType::Sword, requires_targeting_mode : false, equippable: Some(Equippable{ slot : ItemSlot::MainHand, power_bonus: 2, defense_bonus: 0 }) } } pub fn new_shield(x:i32, y:i32) -> Item { Item{ position: Point::new(x, y), glyph: 93, fg: RGB::named(rltk::BROWN1), name: "Shield".to_string(), item_type: ItemType::Shield, requires_targeting_mode : false, equippable: Some(Equippable{ slot : ItemSlot::OffHand, power_bonus: 0, defense_bonus: 1 }) } } } #[typetag::serde(name = "BEItem")] impl BaseEntity for Item { fn get_position(&self) -> Point { self.position } fn get_fg_color(&self) -> RGB { self.fg } fn get_glyph(&self) -> u8 { self.glyph } fn plot_visibility(&mut self, _map : &Map) {} fn get_tooltip_text(&self) -> String { format!("Item: {}", self.name) } fn get_name(&self) -> String { self.name.clone() } fn can_pickup(&self) -> bool { true } fn as_item(&self) -> Option<&Item> { Some(self) } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/gamestate.rs
src/game/gamestate.rs
use super::{gui, TickType, inventory, Map, Player, map_builder, Combat, BaseEntity, GameState, rltk, player, mob, TileType, Particle, vfx}; use rltk::{Rltk, RGB, Point}; use serde::{Serialize, Deserialize}; use std::fs; use std::fs::File; use std::io::Write; use std::path::Path; #[derive(Serialize, Deserialize)] pub struct State { pub map : Map, pub game_state : TickType, pub log : Vec<String>, pub entities : Vec<Box<BaseEntity>>, pub target_cell : Point, pub targeting_item : i32, pub prev_mouse_for_targeting : Point, pub menu_state : gui::MenuState, pub vfx : Vec<Particle> } impl GameState for State { fn tick(&mut self, ctx : &mut Rltk) { vfx::age_particles(self, ctx); if self.game_state != TickType::MainMenu { gui::render(self, ctx, &self.map); } match self.game_state { TickType::MainMenu => { let result = gui::display_main_menu(ctx, &mut self.menu_state); match result { gui::MainMenuResult::Quit => { ctx.quit() } gui::MainMenuResult::Continue => { let saved = State::load_saved(); self.map = saved.map; self.game_state = saved.game_state; self.log = saved.log; self.entities = saved.entities; self.target_cell = saved.target_cell; self.targeting_item = saved.targeting_item; self.prev_mouse_for_targeting = saved.prev_mouse_for_targeting; } gui::MainMenuResult::New => { let saved = State::new(0); self.map = saved.map; self.game_state = saved.game_state; self.log = saved.log; self.entities = saved.entities; self.target_cell = saved.target_cell; self.targeting_item = saved.targeting_item; self.prev_mouse_for_targeting = saved.prev_mouse_for_targeting; } _ => {} } } TickType::PlayersTurn => { let result = player::player_tick(self, ctx); if result == player::PlayerTickResult::NextMap { // Move to next level self.player_mut().dungeon_level += 1; let mut saved = State::new(self.player().dungeon_level); saved.player_mut().copy_from_other_player(self.player()); self.map = saved.map; self.entities = saved.entities; self.add_log_entry("You descend to the next level, and take a moment to rest.".to_string()); } } TickType::EnemyTurn => { mob::mob_tick(self); self.game_state = TickType::PlayersTurn; if self.player().fighter.dead { self.game_state = TickType::GameOver; if Path::new("./savegame.json").exists() { std::fs::remove_file("./savegame.json").expect("Unable to delete file"); } } } TickType::GameOver => { gui::display_game_over_and_handle_quit(ctx, self); } TickType::UseMenu => { inventory::use_item(self, ctx); } TickType::DropMenu => { inventory::drop_item(self, ctx); } TickType::WieldMenu => { inventory::wield_item(self, ctx); } TickType::UnequipMenu => { inventory::unequip_item(self, ctx); } TickType::TargetingItem => { inventory::item_targeting(self, ctx); } TickType::LevelUpMenu => { gui::handle_level_up(ctx, self); } TickType::CharacterMenu => { gui::display_character_info(ctx, self); } TickType::HelpMenu => { gui::display_help_info(ctx, self); } TickType::None => {} } } } impl State { pub fn new_menu() -> State { State{ map: Map::new(80, 43), game_state: TickType::MainMenu, log: Vec::new(), entities : Vec::new(), target_cell : Point::new(-1,-1), targeting_item : -1, prev_mouse_for_targeting : Point::new(-1,-1), menu_state: gui::MenuState::new(), vfx : Vec::new() } } pub fn load_saved() -> State { let data = fs::read_to_string("./savegame.json").expect("Unable to read file"); let loaded : State = serde_json::from_str(&data).unwrap(); std::fs::remove_file("./savegame.json").expect("Unable to delete file"); loaded } pub fn new(depth: i32) -> State { if Path::new("./savegame.json").exists() { std::fs::remove_file("./savegame.json").expect("Unable to delete file"); } let mut entities : Vec<Box<BaseEntity>> = Vec::new(); let mut map = Map::new(80, 43); let rooms = map_builder::random_rooms_tut3(&mut map); let (player_x, player_y) = rooms[0].center(); let mobs = map_builder::spawn_mobs(&rooms, depth); let items = map_builder::spawn_items(&rooms, &mobs, depth); let mut player = Player::new(player_x, player_y, 64, RGB::named(rltk::YELLOW)); let stairs_pos = rooms[rooms.len()-1].center(); map.tiles[((stairs_pos.1 * 80) + stairs_pos.0) as usize] = TileType::Stairs; // Start with a viewshed player.plot_visibility(&map); map.set_visibility(&player.visible_tiles); entities.push(Box::new(player)); for m in mobs { entities.push(Box::new(m)); } for i in items { entities.push(Box::new(i)); } State{ map, game_state: TickType::PlayersTurn, log: Vec::new(), entities, target_cell : Point::new(-1,-1), targeting_item : -1, prev_mouse_for_targeting : Point::new(-1,-1), menu_state : gui::MenuState::new(), vfx : Vec::new() } } pub fn player(&self) -> &Player { self.entities[0].as_player().unwrap() } pub fn player_mut(&mut self) -> &mut Player { self.entities[0].as_player_mut().unwrap() } pub fn player_as_combat(&mut self) -> &mut Combat { self.entities[0].as_combat().unwrap() } pub fn update_visibility(&mut self) { for e in self.entities.iter_mut() { e.plot_visibility(&self.map); } let vt = self.player().visible_tiles.clone(); self.map.set_visibility(&vt); } pub fn add_log_entry(&mut self, line : String) { self.log.insert(0, line.clone()); while self.log.len() > 5 { self.log.remove(4); } } pub fn save(&self) { let data = serde_json::to_string(&self).unwrap(); let mut f = File::create("./savegame.json").expect("Unable to create file"); f.write_all(data.as_bytes()).expect("Unable to write data"); } }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
thebracket/rustyroguelike
https://github.com/thebracket/rustyroguelike/blob/06f6ef51829b07962e58896e0fad13a78bd1a92d/src/game/map_builder.rs
src/game/map_builder.rs
use super::{ Map, Rect, TileType, Mob, Item }; use rand::Rng; use std::cmp::{max, min}; const ROOM_MAX_SIZE : i32 = 10; const ROOM_MIN_SIZE : i32 = 6; const MAX_ROOMS : i32 = 30; pub fn random_rooms_tut3(map : &mut Map) -> Vec<Rect> { let mut rng = rand::thread_rng(); let mut rooms : Vec<Rect> = Vec::new(); for _i in 1..MAX_ROOMS { let w = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE); let h = rng.gen_range(ROOM_MIN_SIZE, ROOM_MAX_SIZE); let x = rng.gen_range(1, map.width - w - 1); let y = rng.gen_range(1, map.height - h - 1); let room_candidate = Rect::new(x, y, x+w, y+h); let mut collides = false; for room in rooms.iter() { if room_candidate.intersect(room) { collides = true; } } if !collides { apply_room(map, &room_candidate); if !rooms.is_empty() { let (new_x, new_y) = room_candidate.center(); let (prev_x, prev_y) = rooms[rooms.len()-1].center(); if rng.gen_range(0,1)==1 { apply_horizontal_tunnel(map, prev_x, new_x, prev_y); apply_vertical_tunnel(map, prev_y, new_y, new_x); } else { apply_vertical_tunnel(map, prev_y, new_y, prev_x); apply_horizontal_tunnel(map, prev_x, new_x, new_y); } } rooms.push(room_candidate); } } rooms } // Applies a rectangle room to the map fn apply_room(map : &mut Map, rect : &Rect) { for y in min(rect.y1, rect.y2) .. max(rect.y1, rect.y2) { for x in min(rect.x1, rect.x2) .. max(rect.x1, rect.x2) { let idx = (y * map.width) + x; if idx > 0 && idx < map.width*map.height { map.tiles[idx as usize] = TileType::Floor; } } } } fn apply_horizontal_tunnel(map: &mut Map, x1:i32, x2:i32, y:i32) { for x in min(x1,x2) ..= max(x1,x2) { let idx = (y * map.width) + x; if idx > 0 && idx < map.width*map.height { map.tiles[idx as usize] = TileType::Floor; } } } fn apply_vertical_tunnel(map: &mut Map, y1:i32, y2:i32, x:i32) { for y in min(y1,y2) ..= max(y1,y2) { let idx = (y * map.width) + x; if idx > 0 && idx < map.width*map.height { map.tiles[idx as usize] = TileType::Floor; } } } pub fn spawn_mobs(rooms: &[Rect], dungeon_level : i32) -> Vec<Mob> { let mut rng = rand::thread_rng(); let mut mobs : Vec<Mob> = Vec::new(); for i in 1 .. rooms.len() { let number_of_mobs = rng.gen_range(1, dungeon_level+3); if number_of_mobs > 0 { for _mobn in 1 .. number_of_mobs { let mob_x = rng.gen_range(rooms[i].x1+1, rooms[i].x2-1); let mob_y = rng.gen_range(rooms[i].y1+1, rooms[i].y2-1); let mut found = false; for existing_mob in mobs.iter() { if existing_mob.position.x == mob_x && existing_mob.position.y == mob_y { found = true; } } if !found { let mob = Mob::new_random(mob_x, mob_y); mobs.push(mob); } } } } mobs } pub fn spawn_items(rooms: &[Rect], mobs: &[Mob], dungeon_level : i32) -> Vec<Item> { let mut rng = rand::thread_rng(); let mut items : Vec<Item> = Vec::new(); for i in 1 .. rooms.len() { let number_of_items = rng.gen_range(1, dungeon_level+3); if number_of_items > 0 { for _itemn in 1 .. number_of_items { let item_x = rng.gen_range(rooms[i].x1+1, rooms[i].x2-1); let item_y = rng.gen_range(rooms[i].y1+1, rooms[i].y2-1); let mut found = false; for existing_mob in mobs.iter() { if existing_mob.position.x == item_x && existing_mob.position.y == item_y { found = true; } } if !found { let item = Item::new_random(item_x, item_y); items.push(item); } } } } items }
rust
MIT
06f6ef51829b07962e58896e0fad13a78bd1a92d
2026-01-04T20:19:35.207300Z
false
acidnik/ppcp
https://github.com/acidnik/ppcp/blob/258f91eade20e9bbfb48fa022b0f028e056556ac/src/app.rs
src/app.rs
use clap::ArgMatches; use indicatif::*; use std::ops::{Deref, DerefMut}; use std::path::PathBuf; use std::sync::mpsc::*; use std::thread; use std::time::*; use crate::avgspeed::*; use crate::copy::*; use anyhow::Result; /// utility to track changes of variable #[derive(Default, Clone)] pub struct TrackChange<T: PartialEq> { val: T, changed: bool, } impl<T: PartialEq> TrackChange<T> { pub fn new(val: T) -> Self { TrackChange { val, changed: false, } } pub fn changed(&mut self) -> bool { let r = self.changed; self.changed = false; r } pub fn set(&mut self, val: T) { if val == self.val { return; } self.changed = true; self.val = val; } } impl<T: PartialEq> Deref for TrackChange<T> { type Target = T; fn deref(&self) -> &T { &self.val } } impl<T: PartialEq> DerefMut for TrackChange<T> { fn deref_mut(&mut self) -> &mut T { self.changed = true; // XXX not checking prev value &mut self.val } } pub struct OperationStats { files_done: u32, bytes_done: u64, files_total: TrackChange<u64>, bytes_total: TrackChange<u64>, current_total: TrackChange<u64>, current_done: u64, current_path: TrackChange<PathBuf>, current_start: Instant, } impl Default for OperationStats { fn default() -> Self { OperationStats { files_done: 0, bytes_done: 0, files_total: TrackChange::new(0), bytes_total: TrackChange::new(0), current_total: TrackChange::new(0), current_done: 0, current_path: TrackChange::new(PathBuf::new()), current_start: Instant::now(), } } } struct SourceWalker {} impl SourceWalker { fn run(tx: Sender<(PathBuf, PathBuf, u64, std::fs::Permissions, bool)>, sources: Vec<PathBuf>) { thread::spawn(move || { for src in sources { // let src = PathAbs::new(&src).unwrap().as_path().to_owned(); let src = src.canonicalize().unwrap(); for entry in walkdir::WalkDir::new(src.clone()) { match entry { Ok(entry) => { if entry.file_type().is_file() || entry.path_is_symlink() { let m = entry.metadata().unwrap(); let size = m.len(); let perm = m.permissions(); let is_link = m.file_type().is_symlink(); tx.send((src.clone(), entry.into_path(), size, perm, is_link)) .expect("send"); } } Err(_) => { // TODO } } } } }); } } pub struct App { pb_curr: ProgressBar, pb_files: ProgressBar, pb_bytes: ProgressBar, pb_name: ProgressBar, last_update: Instant, avg_speed: AvgSpeed, } impl App { pub fn new() -> Self { let pb_name = ProgressBar::with_draw_target(Some(10_u64), ProgressDrawTarget::stdout()); // \u{00A0} (nbsp) to make indicatif draw lines as wide as possible // otherwise it leaves leftovers from prev lines at the end of lines pb_name.set_style( ProgressStyle::default_spinner() .template("{spinner} {wide_msg} \u{00A0}") .unwrap(), ); let pb_curr = ProgressBar::new(10); pb_curr.set_style(ProgressStyle::default_bar() .template("current {bar:40.} {bytes:>10} / {total_bytes:<10} {elapsed:>5} ETA {eta} {wide_msg} \u{00A0}").unwrap() ); let pb_files = ProgressBar::with_draw_target(Some(10_u64), ProgressDrawTarget::stdout()); pb_files.set_style( ProgressStyle::default_bar() .template("files {bar:40} {pos:>10} / {len:<10} {wide_msg} \u{00A0}") .unwrap(), ); let pb_bytes = ProgressBar::with_draw_target(Some(10), ProgressDrawTarget::stdout()); pb_bytes.set_style(ProgressStyle::default_bar() .template("bytes {bar:40} {bytes:>10} / {total_bytes:<10} {elapsed:>5} ETA {eta} {wide_msg} \u{00A0}").unwrap() // .progress_chars("=> ") ); let multi_pb = MultiProgress::new(); let pb_name = multi_pb.add(pb_name); let pb_curr = multi_pb.add(pb_curr); let pb_files = multi_pb.add(pb_files); let pb_bytes = multi_pb.add(pb_bytes); multi_pb.set_move_cursor(true); App { pb_curr, pb_files, pb_bytes, pb_name, last_update: Instant::now(), avg_speed: AvgSpeed::new(), } } // fn error_ask(&self, err: String) -> OperationControl { // OperationControl::Skip // TODO // } fn update_progress(&mut self, stats: &mut OperationStats) { // return; if Instant::now().duration_since(self.last_update) < Duration::from_millis(97) { return; } self.last_update = Instant::now(); self.pb_name.tick(); // spin the spinner if stats.current_path.changed() { self.pb_name .set_message(format!("{}", stats.current_path.display())); self.pb_curr.set_length(*stats.current_total); stats.current_start = Instant::now(); // This is inaccurate. Init current_start in copy worker and send instant with path? self.pb_curr.reset_elapsed(); self.pb_curr.reset_eta(); } self.pb_curr.set_position(stats.current_done); self.avg_speed.add(stats.bytes_done); self.pb_curr .set_message(format!("{}/s", HumanBytes(self.avg_speed.get()))); if stats.files_total.changed() { self.pb_files.set_length(*stats.files_total); } self.pb_files.set_position(u64::from(stats.files_done)); if stats.bytes_total.changed() { self.pb_bytes.set_length(*stats.bytes_total); } self.pb_bytes.set_position(stats.bytes_done); } pub fn run(&mut self, matches: &ArgMatches) -> Result<()> { // for sending errors, progress info and other events from worker to ui: let (worker_tx, worker_rx) = channel::<WorkerEvent>(); // TODO for sending user input (retry/skip/abort) to worker: let (_user_tx, user_rx) = channel::<OperationControl>(); // fs walker sends files to operation let (src_tx, src_rx) = channel(); let operation = OperationCopy::new(matches, user_rx, worker_tx, src_rx)?; let search_path = operation.search_path(); assert!(!search_path.is_empty()); SourceWalker::run(src_tx, search_path); let mut stats: OperationStats = Default::default(); let start = Instant::now(); while let Ok(event) = worker_rx.recv() { match event { WorkerEvent::Stat(StatsChange::FileDone) => { stats.files_done += 1 } WorkerEvent::Stat(StatsChange::BytesTotal(n)) => { *stats.bytes_total += n; *stats.files_total += 1; }, WorkerEvent::Stat(StatsChange::Current(p, chunk, done, todo)) => { stats.current_path.set(p); stats.current_total.set(todo); stats.current_done = done; stats.bytes_done += u64::from(chunk); } // WorkerEvent::Status(OperationStatus::Error(err)) => { // let answer = self.error_ask(err); // user_tx.send(answer).expect("send"); // }, // _ => {}, } self.update_progress(&mut stats); } self.pb_curr.finish(); self.pb_files.finish(); self.pb_bytes.finish(); self.pb_name.finish(); let ela = Instant::now().duration_since(start); println!( "copied {} files ({}) in {} {}/s", *stats.files_total, HumanBytes(*stats.bytes_total), HumanDuration(ela), HumanBytes(get_speed(*stats.bytes_total, &ela)) ); Ok(()) } }
rust
MIT
258f91eade20e9bbfb48fa022b0f028e056556ac
2026-01-04T20:19:37.965610Z
false
acidnik/ppcp
https://github.com/acidnik/ppcp/blob/258f91eade20e9bbfb48fa022b0f028e056556ac/src/avgspeed.rs
src/avgspeed.rs
use std::collections::VecDeque; use std::ops::*; use std::time::{Duration, Instant}; /// moving (rolling) average pub struct RollingAverage<T> { hist: VecDeque<T>, sum: T, size: usize, } impl<T> RollingAverage<T> // moments like this I miss dating duck typing where T: AddAssign + SubAssign + Div + std::convert::From<u64> + std::convert::From<<T as std::ops::Div>::Output> + Copy, { pub fn new(size: usize) -> Self { RollingAverage { hist: VecDeque::with_capacity(size), sum: 0_u64.into(), size, } } pub fn add(&mut self, val: T) { self.hist.push_back(val); self.sum += val; if self.hist.len() > self.size { self.sum -= self.hist.pop_front().unwrap(); } } pub fn get(&self) -> T { (self.sum / (self.hist.len() as u64).into()).into() } } pub struct AvgSpeed { avg: RollingAverage<u64>, prev_bytes: u64, last_chunk: Instant, } impl AvgSpeed { pub fn new() -> Self { AvgSpeed { avg: RollingAverage::new(100), prev_bytes: 0, last_chunk: Instant::now(), } } pub fn add(&mut self, total_bytes: u64) { let db = total_bytes - self.prev_bytes; self.avg.add(get_speed( db, &Instant::now().duration_since(self.last_chunk), )); self.last_chunk = Instant::now(); self.prev_bytes = total_bytes; } pub fn get(&self) -> u64 { self.avg.get() } } pub fn get_speed(x: u64, ela: &Duration) -> u64 { if *ela >= Duration::from_nanos(1) && x < std::u64::MAX / 1_000_000_000 { x * 1_000_000_000 / ela.as_nanos() as u64 } else if *ela >= Duration::from_micros(1) && x < std::u64::MAX / 1_000_000 { x * 1_000_000 / ela.as_micros() as u64 } else if *ela >= Duration::from_millis(1) && x < std::u64::MAX / 1_000 { x * 1_000 / ela.as_millis() as u64 } else if *ela >= Duration::from_secs(1) { x / ela.as_secs() } else { // what the hell are you? std::u64::MAX } }
rust
MIT
258f91eade20e9bbfb48fa022b0f028e056556ac
2026-01-04T20:19:37.965610Z
false
acidnik/ppcp
https://github.com/acidnik/ppcp/blob/258f91eade20e9bbfb48fa022b0f028e056556ac/src/copy.rs
src/copy.rs
use anyhow::Result; use clap::ArgMatches; use std::collections::HashSet; use std::fs::{self, *}; use std::io::{self, *}; use std::path::PathBuf; use std::sync::mpsc::{channel, Receiver, Sender}; use std::thread; use std::time::Duration; use thiserror::Error; #[derive(Clone, PartialEq, Debug)] pub enum StatsChange { FileDone, BytesTotal(u64), Current(PathBuf, u32, u64, u64), } #[derive(Clone, PartialEq, Debug)] pub enum OperationStatus { // Running, // Error(String), // Done, } pub enum OperationControl { // Abort, // Skip, // Retry, // SkipAll, } #[derive(Debug)] pub enum WorkerEvent { Stat(StatsChange), // Status(OperationStatus), } pub trait Operation { fn search_path(&self) -> Vec<PathBuf>; } pub struct OperationCopy { sources: Vec<PathBuf>, } impl Operation for OperationCopy { fn search_path(&self) -> Vec<PathBuf> { self.sources.clone() } } #[derive(Error, Debug)] pub enum OperationError { #[error("Arguments missing")] ArgumentsMissing, #[error("Can not copy directory {src:?} to file {dest:?}")] DirOverFile { src: String, dest: String }, } impl OperationCopy { pub fn new( matches: &ArgMatches, _user_rx: Receiver<OperationControl>, worker_tx: Sender<WorkerEvent>, src_rx: Receiver<(PathBuf, PathBuf, u64, Permissions, bool)>, ) -> Result<Self> { let source: Vec<PathBuf> = matches .get_many::<PathBuf>("source") .ok_or(OperationError::ArgumentsMissing)? .cloned() .collect(); let dest: &PathBuf = matches .get_one::<PathBuf>("destination") .ok_or(OperationError::ArgumentsMissing)?; let dest_parent = dest .parent() .ok_or_else(|| io::Error::new(io::ErrorKind::Other, "dest.parent?"))? .to_owned(); if !dest_parent.exists() { fs::create_dir_all(&dest_parent)?; } let (dest_is_file, dest_dir) = if !dest.exists() { // if dest not exists - consider it a dir // cp /path/to/dir . -> must create dir and set it as dest // cp /dir1 /dir2 /file . -> cp /dir1/* ./dir; cp /dir2/* ./dir2; cp /file ./ (false, dest.clone()) } else { let meta = fs::symlink_metadata(dest)?; if meta.is_file() { // cp /path/to/file.txt ./here/file.txt: dest_dir = ./here (true, dest_parent) } else { // cp /path/to/dir ./here/foo -> copy to/dir/* ./here/foo (false, dest.clone()) } }; for src in source.iter() { let meta = fs::symlink_metadata(src)?; if dest_is_file && meta.is_dir() { Err(OperationError::DirOverFile { src: src.display().to_string(), dest: dest.display().to_string(), })? } } if !dest_is_file && !dest_dir.exists() { fs::create_dir_all(&dest_dir)? } let dest_dir = dest_dir.canonicalize()?; let (q_tx, q_rx) = channel::<(PathBuf, PathBuf, u64, Permissions, bool)>(); // source_path, source_file, total, let (d_tx, d_rx) = channel::<(PathBuf, u32, u64, u64)>(); // src_path, chunk, done, total CopyWorker::run(dest_dir, d_tx, q_rx); // MockCopyWorker::run(dest_dir, d_tx, q_rx); { let worker_tx = worker_tx.clone(); thread::spawn(move || { for (p, chunk, done, todo) in d_rx.iter() { worker_tx .send(WorkerEvent::Stat(StatsChange::Current( p, chunk, done, todo, ))) .expect("send"); if done >= todo { worker_tx .send(WorkerEvent::Stat(StatsChange::FileDone)) .expect("send"); } } }); } thread::spawn(move || { // let mut question = "".to_string(); // let mut skip_all = true; while let Ok((src, path, size, perm, is_link)) = src_rx.recv() { worker_tx .send(WorkerEvent::Stat(StatsChange::BytesTotal(size))) .expect("send"); q_tx.send((src, path, size, perm, is_link)).expect("send"); } }); Ok(OperationCopy { sources: source }) } } struct CopyWorker {} impl CopyWorker { fn run( dest: PathBuf, tx: Sender<(PathBuf, u32, u64, u64)>, rx: Receiver<(PathBuf, PathBuf, u64, Permissions, bool)>, ) { thread::spawn(move || { let mut mkdird = HashSet::new(); for (src, p, sz, perm, is_link) in rx.iter() { let r = if src.is_file() { p.file_name().unwrap().into() } else { // cp /dir1 d/ // src = /dir1 p = /dir1/inner/inner2/f.txt // dest_dir = d/dir1/inner/inner2/f.txt // diff(/dir1 /dir1/inner/inner2/f.txt) = inner/inner2/f.txt let p_parent: PathBuf = src.file_name().unwrap().into(); p_parent.join(pathdiff::diff_paths(&p, &src).unwrap()) }; let dest_file = dest.join(r.clone()); let dest_dir = dest_file.parent().unwrap().to_owned(); if !mkdird.contains(&dest_dir) { // TODO : this will make dir foo/bar/baz and then foo/bar again fs::create_dir_all(&dest_dir).unwrap(); mkdird.insert(dest_dir.clone()); } if is_link { let link_dest = std::fs::read_link(&p).unwrap(); std::os::unix::fs::symlink(&link_dest, &dest_file).unwrap_or_else(|err| { eprintln!("Error creating symlink: {}", err); }); // FIXME tx.send((p, sz as u32, sz, sz)).unwrap(); continue; } let fwh = File::create(&dest_file).unwrap(); fwh.set_permissions(perm).unwrap_or(()); // works on unix fs only let mut fr = BufReader::new(File::open(&p).unwrap()); let mut fw = BufWriter::new(fwh); let mut buf = vec![0; 10_000_000]; let mut s: u64 = 0; loop { match fr.read(&mut buf) { Ok(ds) => { s += ds as u64; if ds == 0 { break; } fw.write_all(&buf[..ds]).unwrap(); tx.send((p.clone(), ds as u32, s, sz)).unwrap(); } Err(e) => { println!("{:?}", e); break; } } } } }); } } struct _MockCopyWorker {} impl _MockCopyWorker { fn _run( _dest: PathBuf, tx: Sender<(PathBuf, u32, u64, u64)>, rx: Receiver<(PathBuf, PathBuf, u64)>, ) { let chunk = 1_048_576; thread::spawn(move || { for (_src, p, sz) in rx.iter() { let mut s = 0; while s < sz { let ds = if s + chunk > sz { sz - s } else { chunk }; s += ds; let delay = Duration::from_micros(ds / chunk * 100_000); tx.send((p.clone(), ds as u32, s, sz)).unwrap(); thread::sleep(delay); } } }); } }
rust
MIT
258f91eade20e9bbfb48fa022b0f028e056556ac
2026-01-04T20:19:37.965610Z
false
acidnik/ppcp
https://github.com/acidnik/ppcp/blob/258f91eade20e9bbfb48fa022b0f028e056556ac/src/main.rs
src/main.rs
use clap::{command, value_parser, ArgAction}; use std::error; use std::path::PathBuf; mod app; mod avgspeed; mod copy; use clap::Arg; fn main() -> Result<(), Box<dyn error::Error>> { let matches = command!() .version("0.0.1") .author("Nikita Bilous <nikita@bilous.me>") .about("Copy files in console with progress bar") .arg( Arg::new("source") .required(true) .num_args(1..) .action(ArgAction::Append) .index(1) .value_parser(value_parser!(PathBuf)), ) .arg( Arg::new("destination") .required(true) .index(2) // .last(true) .value_parser(value_parser!(PathBuf)), ) .get_matches(); let mut app = app::App::new(); app.run(&matches)?; Ok(()) }
rust
MIT
258f91eade20e9bbfb48fa022b0f028e056556ac
2026-01-04T20:19:37.965610Z
false
AlexAltea/milli-py
https://github.com/AlexAltea/milli-py/blob/a2a92e14710b2dc9a9505f2534ad2d389153041a/src/lib.rs
src/lib.rs
extern crate milli as mi; use std::ops::Deref; use pyo3::prelude::*; use pyo3::types::*; use mi::{DocumentId, Index, Search}; use mi::documents::{DocumentsBatchBuilder, DocumentsBatchReader}; use mi::update::{ClearDocuments, DocumentAdditionResult, IndexerConfig, IndexDocumentsConfig, IndexDocumentsMethod, IndexDocuments}; use serde::Deserializer; mod conv; // Helpers macro_rules! obkv_to_pydict { ($self:ident, $py:ident, $rtxn:ident, $obkv:ident) => {{ let fields = $self.index.fields_ids_map(&$rtxn).unwrap(); let dict = PyDict::new($py); for (id, bytes) in $obkv.iter() { let key = fields.name(id); let mut deserializer = serde_json::Deserializer::from_slice(&bytes); let value = conv::ObkvValue::new($py); let value = deserializer.deserialize_any(value).unwrap(); dict.set_item(key, value).unwrap(); } dict }}; } #[pyclass(name="Index")] struct PyIndex { index: Index, } #[pymethods] impl PyIndex { #[new] fn new(path: String, map_size: Option<usize>) -> Self { let mut options = mi::heed::EnvOpenOptions::new(); if map_size.is_some() { options.map_size(map_size.unwrap()); } let index = Index::new(options, &path).unwrap(); return PyIndex{ index }; } fn add_documents(&self, py: Python<'_>, list: &PyList, update_method: Option<PyIndexDocumentsMethod>) -> PyResult<PyDocumentAdditionResult> { let mut config = IndexDocumentsConfig::default(); if update_method.is_some() { config.update_method = update_method.unwrap().into(); } let mut wtxn = self.write_txn().unwrap(); let indexer_config = IndexerConfig::default(); let builder = IndexDocuments::new( &mut wtxn, &self, &indexer_config, config.clone(), |_| (), || false).unwrap(); // Convert Python array into Vec<milli::Object> let list = list.to_object(py); let list = conv::to_json(py, &list)?; let mut docbuilder = DocumentsBatchBuilder::new(Vec::new()); for item in list.as_array().unwrap() { let object = item.as_object().unwrap(); docbuilder.append_json_object(object).unwrap(); } let vector = docbuilder.into_inner().unwrap(); let reader = DocumentsBatchReader::from_reader(std::io::Cursor::new(vector)).unwrap(); let (builder, _user_error) = builder.add_documents(reader).unwrap(); let result = builder.execute().unwrap(); wtxn.commit().unwrap(); Ok(result.into()) } fn all_documents(&self, py: Python<'_>) -> PyResult<Py<PyIterator>> { let rtxn = self.read_txn().unwrap(); let docs = self.index.all_documents(&rtxn).unwrap(); // TODO: Wrap as a Python iterator without converting to list let list = PyList::empty(py); for document in docs { let (docid, obkv) = document.unwrap(); let doc = obkv_to_pydict!(self, py, rtxn, obkv); let tuple = PyTuple::new(py, &[docid.into_py(py), doc.into()]); list.append(tuple).unwrap(); } let iter = PyIterator::from_object(py, list).unwrap(); Ok(iter.into()) } fn clear_documents(&self) -> PyResult<u64> { let mut wtxn = self.write_txn().unwrap(); let builder = ClearDocuments::new(&mut wtxn, self); let result = builder.execute().unwrap(); wtxn.commit().unwrap(); Ok(result.into()) } fn delete_documents(&self, ids: Vec<String>) -> PyResult<u64> { let config = IndexDocumentsConfig::default(); let indexer_config = IndexerConfig::default(); let mut wtxn = self.write_txn().unwrap(); let builder = IndexDocuments::new( &mut wtxn, &self, &indexer_config, config.clone(), |_| (), || false).unwrap(); let (builder, removed) = builder.remove_documents(ids).unwrap(); let _result = builder.execute().unwrap(); wtxn.commit().unwrap(); Ok(removed.unwrap().into()) } fn get_document(&self, py: Python<'_>, id: DocumentId) -> PyResult<Py<PyDict>> { let rtxn = self.read_txn().unwrap(); let (_docid, obkv) = self.index.documents(&rtxn, [id]).unwrap()[0]; let dict = obkv_to_pydict!(self, py, rtxn, obkv); Ok(dict.into()) } fn get_documents(&self, py: Python<'_>, ids: Vec<DocumentId>) -> PyResult<Py<PyList>> { let rtxn = self.read_txn().unwrap(); let docs = self.documents(&rtxn, ids).unwrap(); let list = PyList::empty(py); for (_docid, obkv) in docs { list.append(obkv_to_pydict!(self, py, rtxn, obkv)).unwrap(); } Ok(list.into()) } fn primary_key(&self) -> PyResult<Option<String>> { let rtxn = self.read_txn().unwrap(); let result = self.index.primary_key(&rtxn).unwrap(); let converted_result = result.map(|s| s.to_string()); Ok(converted_result) } fn search(&self, query: String) -> Vec<DocumentId> { let rtxn = self.read_txn().unwrap(); let mut search = Search::new(&rtxn, &self); search.query(query); let results = search.execute().unwrap(); return results.documents_ids; } } impl Deref for PyIndex { type Target = Index; fn deref(&self) -> &Self::Target { &self.index } } impl Drop for PyIndex { fn drop(&mut self) { self.index.clone().prepare_for_closing(); } } #[derive(Clone)] #[pyclass(name="IndexDocumentsMethod")] enum PyIndexDocumentsMethod { ReplaceDocuments, UpdateDocuments, } impl From<PyIndexDocumentsMethod> for IndexDocumentsMethod { fn from(value: PyIndexDocumentsMethod) -> Self { match value { PyIndexDocumentsMethod::ReplaceDocuments => Self::ReplaceDocuments, PyIndexDocumentsMethod::UpdateDocuments => Self::UpdateDocuments, } } } #[pyclass(name="DocumentAdditionResult")] struct PyDocumentAdditionResult { #[pyo3(get, set)] indexed_documents: u64, #[pyo3(get, set)] number_of_documents: u64, } impl From<DocumentAdditionResult> for PyDocumentAdditionResult { fn from(value: DocumentAdditionResult) -> Self { PyDocumentAdditionResult{ indexed_documents: value.indexed_documents, number_of_documents: value.number_of_documents, } } } #[pymodule] fn milli(_py: Python<'_>, m: &PyModule) -> PyResult<()> { m.add_class::<PyIndex>()?; m.add_class::<PyIndexDocumentsMethod>()?; m.add_class::<PyDocumentAdditionResult>()?; Ok(()) }
rust
MIT
a2a92e14710b2dc9a9505f2534ad2d389153041a
2026-01-04T20:19:37.508142Z
false
AlexAltea/milli-py
https://github.com/AlexAltea/milli-py/blob/a2a92e14710b2dc9a9505f2534ad2d389153041a/src/conv.rs
src/conv.rs
use std::collections::BTreeMap; use std::fmt; use std::marker::PhantomData; use pyo3::exceptions::PyTypeError; use pyo3::prelude::*; use pyo3::types::{PyDict, PyFloat, PyList, PyTuple}; use serde::de::{DeserializeSeed, Deserializer, MapAccess, SeqAccess, Visitor}; // From https://github.com/mozilla-services/python-canonicaljson-rs/blob/62599b246055a1c8a78e5777acdfe0fd594be3d8/src/lib.rs#L87-L167 #[derive(Debug)] pub enum PyCanonicalJSONError { InvalidConversion { error: String }, PyErr { error: String }, DictKeyNotSerializable { typename: String }, InvalidFloat { value: PyObject }, InvalidCast { typename: String }, } impl From<pyo3::PyErr> for PyCanonicalJSONError { fn from(error: pyo3::PyErr) -> PyCanonicalJSONError { PyCanonicalJSONError::PyErr { error: format!("{:?}", error), } } } impl From<PyCanonicalJSONError> for pyo3::PyErr { fn from(e: PyCanonicalJSONError) -> pyo3::PyErr { match e { PyCanonicalJSONError::InvalidConversion { error } => { PyErr::new::<PyTypeError, _>(format!("Conversion error: {:?}", error)) } PyCanonicalJSONError::PyErr { error } => { PyErr::new::<PyTypeError, _>(format!("Python Runtime exception: {}", error)) } PyCanonicalJSONError::DictKeyNotSerializable { typename } => { PyErr::new::<PyTypeError, _>(format!( "Dictionary key is not serializable: {}", typename )) } PyCanonicalJSONError::InvalidFloat { value } => { PyErr::new::<PyTypeError, _>(format!("Invalid float: {:?}", value)) } PyCanonicalJSONError::InvalidCast { typename } => { PyErr::new::<PyTypeError, _>(format!("Invalid type: {}", typename)) } } } } pub fn to_json(py: Python, obj: &PyObject) -> Result<serde_json::Value, PyCanonicalJSONError> { macro_rules! return_cast { ($t:ty, $f:expr) => { if let Ok(val) = obj.downcast::<$t>(py) { return $f(val); } }; } macro_rules! return_to_value { ($t:ty) => { if let Ok(val) = obj.extract::<$t>(py) { return serde_json::value::to_value(val).map_err(|error| { PyCanonicalJSONError::InvalidConversion { error: format!("{}", error), } }); } }; } if obj.as_ref(py).eq(&py.None())? { return Ok(serde_json::Value::Null); } return_to_value!(String); return_to_value!(bool); return_to_value!(u64); return_to_value!(i64); return_cast!(PyDict, |x: &PyDict| { let mut map = serde_json::Map::new(); for (key_obj, value) in x.iter() { let key = if key_obj.eq(py.None().as_ref(py))? { Ok("null".to_string()) } else if let Ok(val) = key_obj.extract::<bool>() { Ok(if val { "true".to_string() } else { "false".to_string() }) } else if let Ok(val) = key_obj.str() { Ok(val.to_string()) } else { Err(PyCanonicalJSONError::DictKeyNotSerializable { typename: key_obj .to_object(py) .as_ref(py) .get_type() .name()? .to_string(), }) }; map.insert(key?, to_json(py, &value.to_object(py))?); } Ok(serde_json::Value::Object(map)) }); return_cast!(PyList, |x: &PyList| Ok(serde_json::Value::Array( x.iter().map(|x| to_json(py, &x.to_object(py)).unwrap()).collect() ))); return_cast!(PyTuple, |x: &PyTuple| Ok(serde_json::Value::Array( x.iter().map(|x| to_json(py, &x.to_object(py)).unwrap()).collect() ))); return_cast!(PyFloat, |x: &PyFloat| { match serde_json::Number::from_f64(x.value()) { Some(n) => Ok(serde_json::Value::Number(n)), None => Err(PyCanonicalJSONError::InvalidFloat { value: x.to_object(py), }), } }); // At this point we can't cast it, set up the error object Err(PyCanonicalJSONError::InvalidCast { typename: obj.as_ref(py).get_type().name()?.to_string(), }) } // From https://github.com/mre/hyperjson/blob/87335d442869832b46e7e9f10800a27360dd8169/src/lib.rs#L397 #[derive(Copy, Clone)] pub struct ObkvValue<'a> { py: Python<'a>, } impl<'a> ObkvValue<'a> { pub fn new(py: Python<'a>) -> ObkvValue<'a> { ObkvValue { py } } } impl<'de, 'a> DeserializeSeed<'de> for ObkvValue<'a> { type Value = PyObject; fn deserialize<D>(self, deserializer: D) -> Result<Self::Value, D::Error> where D: Deserializer<'de>, { deserializer.deserialize_any(self) } } impl<'de, 'a> Visitor<'de> for ObkvValue<'a> { type Value = PyObject; fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result { formatter.write_str("any valid JSON value") } fn visit_bool<E>(self, value: bool) -> Result<Self::Value, E> where E: serde::de::Error { Ok(value.to_object(self.py)) } fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E> where E: serde::de::Error { Ok(value.to_object(self.py)) } fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E> where E: serde::de::Error { Ok(value.to_object(self.py)) } fn visit_f64<E>(self, value: f64) -> Result<Self::Value, E> where E: serde::de::Error { Ok(value.to_object(self.py)) } fn visit_str<E>(self, value: &str) -> Result<Self::Value, E> where E: serde::de::Error { Ok(value.to_object(self.py)) } fn visit_unit<E>(self) -> Result<Self::Value, E> { Ok(self.py.None()) } fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error> where A: SeqAccess<'de> { let mut elements = Vec::new(); while let Some(elem) = seq.next_element_seed(self)? { elements.push(elem); } Ok(elements.to_object(self.py)) } fn visit_map<A>(self, mut map: A) -> Result<Self::Value, A::Error> where A: MapAccess<'de> { let mut entries = BTreeMap::new(); while let Some((key, value)) = map.next_entry_seed(PhantomData::<String>, self)? { entries.insert(key, value); } Ok(entries.to_object(self.py)) } }
rust
MIT
a2a92e14710b2dc9a9505f2534ad2d389153041a
2026-01-04T20:19:37.508142Z
false
leakec/multitask
https://github.com/leakec/multitask/blob/acba08810ae8bac60f156361078cef0b5cff0453/src/parallel_tasks.rs
src/parallel_tasks.rs
use zellij_tile::prelude::{PaneManifest, rename_terminal_pane}; use std::fmt; #[derive(Default, Debug)] pub struct ParallelTasks { pub run_tasks: Vec<RunTask> } #[derive(Default, Debug)] pub struct RunTask { pub command: String, pub args: Vec<String>, pub terminal_pane_id: Option<u32>, pub is_complete: bool, pub succeeded: bool, pub title: Option<String>, } impl ParallelTasks { pub fn new(run_tasks: Vec<RunTask>) -> Self { ParallelTasks { run_tasks, } } pub fn all_tasks_completed_successfully(&self) -> bool { self.run_tasks.iter().all(|t| t.succeeded()) } pub fn pane_ids(&self) -> Vec<u32> { let mut pane_ids = vec![]; for task in &self.run_tasks { if let Some(terminal_pane_id) = task.terminal_pane_id { pane_ids.push(terminal_pane_id); } } pane_ids } pub fn update_task_status(&mut self, pane_manifest: &PaneManifest) { for (_tab_id, panes) in &pane_manifest.panes { for pane in panes { for task in &mut self.run_tasks { let stringified_task = task.to_string(); if Some(stringified_task) == pane.terminal_command { if task.terminal_pane_id.is_none() { task.mark_pane_id(pane.id); if let Some(title) = &task.title { rename_terminal_pane(pane.id as u32, title); } } if !task.is_complete() && pane.exited { task.mark_complete(pane.exit_status); break; } } } } } } } impl fmt::Display for RunTask { fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { if self.args.is_empty() { write!(f, "{}", self.command) } else { write!(f, "{} {}", self.command, self.args.join(" ")) } } } impl RunTask { pub fn new<T: AsRef<str>>(mut command_and_args: Vec<T>) -> Self { RunTask { command: command_and_args.remove(0).as_ref().to_owned(), args: command_and_args.iter().map(|c| c.as_ref().to_owned()).collect(), ..Default::default() } } pub fn from_file_line(shell: &str, file_line: &str, step_number: usize) -> Self { Self::new(vec![shell, "-c", file_line]) .pane_title(format!("STEP {} - {}", step_number, file_line)) } pub fn pane_title(mut self, title: String) -> Self { self.title = Some(title); self } pub fn is_complete(&self) -> bool { self.is_complete } pub fn succeeded(&self) -> bool { self.is_complete && self.succeeded } pub fn mark_pane_id(&mut self, pane_id: u32) { self.terminal_pane_id = Some(pane_id); } pub fn mark_complete(&mut self, exit_status: Option<i32>) { self.is_complete = true; match exit_status { Some(exit_status) => { self.succeeded = exit_status == 0; }, None => { self.succeeded = true; } } } }
rust
MIT
acba08810ae8bac60f156361078cef0b5cff0453
2026-01-04T20:19:38.611485Z
false
leakec/multitask
https://github.com/leakec/multitask/blob/acba08810ae8bac60f156361078cef0b5cff0453/src/multitask_file.rs
src/multitask_file.rs
use zellij_tile::prelude::*; use std::path::{PathBuf, Path}; use std::io::prelude::*; use crate::parallel_tasks::{ParallelTasks, RunTask}; pub fn create_file_with_text(path: &Path, text: &str) { if !path.exists() { // Only create the file if it does not already exists. Otherwise, use the file that is // already there. if let Err(e) = std::fs::File::create(PathBuf::from("/host").join(path)).and_then(|mut f| { f.write_all(text.as_bytes()) }) { eprintln!("Failed to create file with error: {}", e); }; } } pub fn parse_multitask_file(filename: PathBuf, shell: &str) -> Result<Vec<ParallelTasks>, std::io::Error> { let stringified_file = std::fs::read_to_string(filename)?; let mut parallel_tasks = vec![]; let mut current_tasks = vec![]; let mut current_step = 1; for line in stringified_file.lines() { let line = line.to_string(); let line_is_empty = line.trim().is_empty(); if !line.starts_with("#") && !line_is_empty { let task = RunTask::from_file_line(shell, &line, current_step); current_tasks.push(task); } else if line_is_empty && !current_tasks.is_empty() { parallel_tasks.push(ParallelTasks::new(current_tasks.drain(..).collect())); current_step += 1; } } if !current_tasks.is_empty() { parallel_tasks.push(ParallelTasks::new(current_tasks.drain(..).collect())); } Ok(parallel_tasks) }
rust
MIT
acba08810ae8bac60f156361078cef0b5cff0453
2026-01-04T20:19:38.611485Z
false
leakec/multitask
https://github.com/leakec/multitask/blob/acba08810ae8bac60f156361078cef0b5cff0453/src/main.rs
src/main.rs
mod parallel_tasks; mod multitask_file; use zellij_tile::prelude::*; use std::collections::{VecDeque, BTreeMap}; use std::path::PathBuf; use std::time::Instant; use parallel_tasks::ParallelTasks; use multitask_file::{parse_multitask_file, create_file_with_text}; #[derive(Default)] struct State { tasks: VecDeque<ParallelTasks>, running_tasks: Option<ParallelTasks>, multitask_file: PathBuf, multitask_file_name: String, completed_task_ids: Vec<u32>, edit_pane_id: Option<u32>, last_run: Option<Instant>, is_hidden: bool, plugin_id: Option<u32>, shell: String, ccwd: Option<PathBuf>, layout: String, } impl ZellijPlugin for State { fn load(&mut self, config: BTreeMap<String, String>) { request_permission(&[PermissionType::ReadApplicationState, PermissionType::ChangeApplicationState, PermissionType::RunCommands, PermissionType::OpenFiles]); subscribe(&[EventType::PaneUpdate]); self.plugin_id = Some(get_plugin_ids().plugin_id); self.multitask_file_name = match config.get("multitask_file_name") { Some(s) => format!("{}", s), _ => format!(".multitask{}",get_plugin_ids().plugin_id.to_string()), }; self.shell = match config.get("shell") { Some(s) => String::from(s), _ => String::from("bash") }; self.ccwd = match config.get("ccwd") { Some(s) => Some(PathBuf::from(s)), _ => None }; // Get the user's layout for multitask. If not defined, then fallback to the // assets/multitask_layout.kdl self.layout = match config.get("layout") { Some(s) => { s.to_string() }, _ => String::from(include_str!("assets/multitask_layout.kdl")) }; self.layout = self.layout.replace(".multitask",self.multitask_file_name.as_str()); self.multitask_file = PathBuf::from("/host").join(self.multitask_file_name.clone()); show_self(true); } fn pipe(&mut self, pipe_message: PipeMessage) -> bool { match pipe_message.payload { Some(msg) => { if msg == "multitask_run" { self.stop_run_and_reparse_file(); } } _ => () } return false; } fn update(&mut self, event: Event) -> bool { match event { Event::PaneUpdate(pane_manifest) => { if self.gained_focus(&pane_manifest) { // whenever the plugin gains focus, eg. with the `LaunchOrFocusPlugin` keybind // we clean up our state and start over, allowing the plugin to be triggered by // a keybinding hide_self(); self.start_multitask_env(); } else if let Some(running_tasks) = &mut self.running_tasks { running_tasks.update_task_status(&pane_manifest); if running_tasks.all_tasks_completed_successfully() { self.progress_running_tasks(); } } } _ => (), }; return false; // this plugin never renders } fn render(&mut self, _rows: usize, _cols: usize) {} // no ui, no problems! } impl State { pub fn start_current_tasks(&mut self) { if let Some(running_tasks) = &self.running_tasks { for task in &running_tasks.run_tasks { let cmd = CommandToRun { path: (&task.command).into(), args: task.args.clone(), cwd: self.ccwd.clone() }; open_command_pane_floating(cmd, None, BTreeMap::<String, String>::new()); } } } pub fn progress_running_tasks(&mut self) { if let Some(running_tasks) = self.running_tasks.as_ref() { for task in &running_tasks.run_tasks { if let Some(terminal_pane_id) = task.terminal_pane_id { focus_terminal_pane(terminal_pane_id as u32, true); toggle_pane_embed_or_eject(); self.completed_task_ids.push(terminal_pane_id); } } if let Some(edit_pane_id) = self.edit_pane_id { focus_terminal_pane(edit_pane_id as u32, false); } } self.running_tasks = None; if let Some(tasks) = self.tasks.remove(0) { self.running_tasks = Some(tasks); self.start_current_tasks(); } } pub fn stop_run(&mut self) { let mut all_tasks = vec![]; if let Some(running_tasks) = self.running_tasks.as_mut() { all_tasks.append(&mut running_tasks.pane_ids()); } all_tasks.append(&mut self.completed_task_ids.drain(..).collect()); for pane_id in all_tasks { close_terminal_pane(pane_id as u32); } self.running_tasks = None; self.completed_task_ids = vec![]; } pub fn parse_file(&mut self) -> bool { match parse_multitask_file(self.multitask_file.clone(), self.shell.as_str()) { Ok(new_tasks) => { self.tasks = new_tasks.into(); return true; }, Err(e) => { eprintln!("Failed to parse multitask file: {}", e); return false; } }; } pub fn stop_run_and_reparse_file(&mut self) { self.stop_run(); let file_changed = self.parse_file(); if file_changed { self.last_run = Some(Instant::now()); self.progress_running_tasks(); } } pub fn start_multitask_env(&mut self) { self.stop_run(); create_file_with_text( &self.multitask_file, &format!("{}{}\n#\n{}\n{}\n{}\n{}\n", "#!", self.shell, "# Hi there! Anything below these lines will be executed on save.", "# One command per line.", "# Place empty lines between steps that should run in parallel.", "# Enjoy!" ) ); new_tabs_with_layout(&self.layout); } pub fn gained_focus(&mut self, pane_manifest: &PaneManifest) -> bool { if let Some(own_plugin_id) = self.plugin_id { for (_tab_id, panes) in &pane_manifest.panes { for pane in panes { let is_own_plugin_pane = pane.is_plugin && own_plugin_id == pane.id; if is_own_plugin_pane && pane.is_focused && !self.is_hidden { self.is_hidden = true; return true; } } } } return false; } } register_plugin!(State);
rust
MIT
acba08810ae8bac60f156361078cef0b5cff0453
2026-01-04T20:19:38.611485Z
false
integer32llc/margo
https://github.com/integer32llc/margo/blob/c29ba4782272af3bac69425eb3382f07687d63d1/src/html.rs
src/html.rs
use indoc::formatdoc; use maud::{html, Markup, PreEscaped, DOCTYPE}; use semver::Version; use snafu::prelude::*; use std::{fs, io, path::PathBuf}; use crate::{index_entry, ConfigV1, Index, ListAll, Registry}; #[rustfmt::skip] mod assets; pub fn write(registry: &Registry) -> Result<(), Error> { use error::*; let crates = registry.list_all()?; let index = index(&registry.config, &crates).into_string(); let index_path = registry.path.join("index.html"); fs::write(&index_path, index).context(WriteIndexSnafu { path: index_path })?; let assets_dir = registry.path.join("assets"); fs::create_dir_all(&assets_dir).context(AssetDirSnafu { path: &assets_dir })?; let css_path = assets_dir.join(assets::CSS_NAME); fs::write(&css_path, assets::CSS).context(CssSnafu { path: &css_path })?; let css_map_path = { let mut css_map_path = css_path; css_map_path.as_mut_os_string().push(".map"); css_map_path }; fs::write(&css_map_path, assets::CSS_MAP).context(CssMapSnafu { path: &css_map_path, })?; let js_path = assets_dir.join(assets::JS_NAME); fs::write(&js_path, assets::JS).context(JsSnafu { path: &js_path })?; let js_map_path = { let mut js_map_path = js_path; js_map_path.as_mut_os_string().push(".map"); js_map_path }; fs::write(&js_map_path, assets::JS_MAP).context(JsMapSnafu { path: &js_map_path })?; Ok(()) } #[derive(Debug, Snafu)] #[snafu(module)] pub enum Error { #[snafu(display("Could not list the crates"))] #[snafu(context(false))] ListAll { source: crate::ListAllError }, #[snafu(display("Could not write the HTML index page to {}", path.display()))] WriteIndex { source: io::Error, path: PathBuf }, #[snafu(display("Could not create the HTML asset directory at {}", path.display()))] AssetDir { source: io::Error, path: PathBuf }, #[snafu(display("Could not write the CSS file to {}", path.display()))] Css { source: io::Error, path: PathBuf }, #[snafu(display("Could not write the CSS sourcemap file to {}", path.display()))] CssMap { source: io::Error, path: PathBuf }, #[snafu(display("Could not write the JS file to {}", path.display()))] Js { source: io::Error, path: PathBuf }, #[snafu(display("Could not write the JS sourcemap file to {}", path.display()))] JsMap { source: io::Error, path: PathBuf }, } const CARGO_DOCS: &str = "https://doc.rust-lang.org/cargo/reference/registries.html#using-an-alternate-registry"; fn index(config: &ConfigV1, crates: &ListAll) -> Markup { let base_url = &config.base_url; let suggested_name = config.html.suggested_registry_name(); let asset_head_elements = PreEscaped(assets::INDEX); fn link(href: &str, content: &str) -> Markup { html! { a href=(href) class="underline text-blue-600 hover:text-blue-800 visited:text-purple-600" { (content) } } } fn section(name: &str, id: &str, content: Markup) -> Markup { html! { section class="p-1" { h1 class="text-2xl" { a class="hover:after:content-['_§']" id=(id) href={"#" (id)} { (name) } } (content) } } } fn code_block(content: impl AsRef<str>) -> Markup { let content = content.as_ref(); let span_class = "col-start-1 row-start-1 leading-none p-1"; html! { mg-copy { pre class="relative border border-black bg-theme-rose-light m-1 p-1 overflow-x-auto" { button class="hidden absolute top-0 right-0 grid" data-target="copy" { span class=(span_class) data-target="state0" { "Copy" } span class={(span_class) " invisible"} data-target="state1" { "Copied" } } code data-target="content" { (content) } } } } } let config_stanza = formatdoc! {r#" [registries] {suggested_name} = {{ index = "sparse+{base_url}" }} "#}; let cargo_add_stanza = formatdoc! {" cargo add --registry {suggested_name} some-crate-name "}; html! { (DOCTYPE) html lang="en-US" { head { meta charset="utf-8"; meta name="viewport" content="width=device-width, initial-scale=1"; title { "Margo Crate Registry" }; (asset_head_elements); } body class="flex flex-col min-h-screen bg-theme-salmon-light" { header { h1 class="text-3xl font-bold bg-theme-purple text-theme-salmon-light p-2 drop-shadow-xl" { "Margo Crate Registry" } } (section("Getting started", "getting-started", html! { ol class="list-inside list-decimal" { li { "Add the registry definition to your " code { ".cargo/config.toml" } ":" (code_block(config_stanza)) } li { "Add your dependency to your project:" (code_block(cargo_add_stanza)) } } "For complete details, check the " (link(CARGO_DOCS, "Cargo documentation")) "." })) (section("Available crates", "crates", html! { table class="table-fixed w-full" { thead { tr { th class="w-4/5 text-left" { "Name" } th { "Versions" } } } tbody { @for (c, v) in crates { tr class="hover:bg-theme-orange" { td { span class="truncate" { (c.as_str()) } } td { select class="w-full bg-white" name="version" { @for (v, c, select) in most_interesting(v) { @let suffix = if c.yanked { " (yanked)" } else { "" }; option selected[select] { (v) (suffix) } } } } } } } } })) footer class="grow place-content-end text-center" { span class="border-t border-dashed border-theme-purple" { "Powered by " (link("https://github.com/integer32llc/margo", "Margo")) } } } } } } fn most_interesting(i: &Index) -> impl Iterator<Item = (&Version, &index_entry::Root, bool)> { let last_non_yanked = i.iter().rfind(|(_, c)| !c.yanked).map(|(v, _)| v); i.iter() .map(move |(v, c)| (v, c, Some(v) == last_non_yanked)) }
rust
Apache-2.0
c29ba4782272af3bac69425eb3382f07687d63d1
2026-01-04T20:19:37.290944Z
false
integer32llc/margo
https://github.com/integer32llc/margo/blob/c29ba4782272af3bac69425eb3382f07687d63d1/src/main.rs
src/main.rs
use common::CrateName; use semver::Version; use serde::{Deserialize, Serialize}; use snafu::prelude::*; use std::{ collections::{BTreeMap, BTreeSet}, env, fmt, fs::{self, File}, io::{self, BufRead, BufReader, BufWriter, Read, Write}, path::{Component, Path, PathBuf}, str, }; use url::Url; #[cfg(feature = "html")] mod html; #[derive(Debug, argh::FromArgs)] /// Manage a static crate registry struct Args { #[argh(subcommand)] subcommand: Subcommand, } #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] enum Subcommand { Init(InitArgs), Add(AddArgs), Remove(RemoveArgs), Yank(YankArgs), List(ListArgs), GenerateHtml(GenerateHtmlArgs), } /// Initialize a new registry #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] #[argh(name = "init")] struct InitArgs { /// the URL that the registry is hosted at #[argh(option)] base_url: Option<Url>, /// use default values where possible, instead of prompting for them #[argh(switch)] defaults: bool, /// require HTTP authentication to access crates #[argh(option)] auth_required: Option<bool>, /// generate an HTML file showing crates in the index #[argh(option)] html: Option<bool>, /// name you'd like to suggest other people call your registry #[argh(option)] html_suggested_registry_name: Option<String>, #[argh(positional)] path: PathBuf, } /// Add a crate to the registry #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] #[argh(name = "add")] struct AddArgs { /// path to the registry to modify #[argh(option)] registry: Option<PathBuf>, #[argh(positional)] path: Vec<PathBuf>, } /// Remove a crate from the registry #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] #[argh(name = "rm")] struct RemoveArgs { /// path to the registry to modify #[argh(option)] registry: Option<PathBuf>, // FUTURE: Allow removing all versions at once? /// the version of the crate #[argh(option)] version: Version, #[argh(positional)] name: CrateName, } /// Generate an HTML index for the registry #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] #[argh(name = "generate-html")] struct GenerateHtmlArgs { /// path to the registry to modify #[argh(option)] registry: Option<PathBuf>, } /// Yank a version of a crate from the registry #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] #[argh(name = "yank")] struct YankArgs { /// path to the registry to modify #[argh(option)] registry: Option<PathBuf>, /// undo a previous yank #[argh(switch)] undo: bool, /// the version of the crate #[argh(option)] version: Version, /// the name of the crate #[argh(positional)] name: CrateName, } /// List all crates and their versions in the registry #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] #[argh(name = "list")] struct ListArgs { /// path to the registry to list #[argh(option)] registry: Option<PathBuf>, } #[snafu::report] fn main() -> Result<(), Error> { let args: Args = argh::from_env(); let global = Global::new()?; let global = Box::leak(Box::new(global)); match args.subcommand { Subcommand::Init(init) => do_init(global, init)?, Subcommand::Add(add) => do_add(global, add)?, Subcommand::Remove(rm) => do_remove(global, rm)?, Subcommand::Yank(yank) => do_yank(global, yank)?, Subcommand::List(list) => do_list(global, list)?, Subcommand::GenerateHtml(html) => do_generate_html(global, html)?, } Ok(()) } #[derive(Debug, Snafu)] enum Error { #[snafu(display("Could not initialize global variables"))] #[snafu(context(false))] Global { #[snafu(source(from(GlobalError, Box::new)))] source: Box<GlobalError>, }, #[snafu(transparent)] Initialize { #[snafu(source(from(DoInitializeError, Box::new)))] source: Box<DoInitializeError>, }, #[snafu(transparent)] Open { #[snafu(source(from(DiscoverRegistryError, Box::new)))] source: Box<DiscoverRegistryError>, }, #[snafu(transparent)] Add { #[snafu(source(from(AddError, Box::new)))] source: Box<AddError>, }, #[snafu(transparent)] Remove { #[snafu(source(from(RemoveError, Box::new)))] source: Box<RemoveError>, }, #[snafu(transparent)] Html { #[snafu(source(from(HtmlError, Box::new)))] source: Box<HtmlError>, }, #[snafu(transparent)] Yank { #[snafu(source(from(YankError, Box::new)))] source: Box<YankError>, }, } trait UnwrapOrDialog<T> { fn apply_default(self, use_default: bool, value: impl Into<T>) -> Self; fn unwrap_or_dialog(self, f: impl FnOnce() -> dialoguer::Result<T>) -> dialoguer::Result<T>; } impl<T> UnwrapOrDialog<T> for Option<T> { fn apply_default(self, use_default: bool, value: impl Into<T>) -> Self { if self.is_none() && use_default { Some(value.into()) } else { self } } fn unwrap_or_dialog(self, f: impl FnOnce() -> dialoguer::Result<T>) -> dialoguer::Result<T> { match self { Some(v) => Ok(v), None => f(), } } } fn do_init(_global: &Global, init: InitArgs) -> Result<(), DoInitializeError> { use do_initialize_error::*; let base_url = init .base_url .unwrap_or_dialog(|| { dialoguer::Input::new() .with_prompt("What URL will the registry be served from") .interact() }) .context(BaseUrlSnafu)?; let auth_required = init .auth_required .apply_default(init.defaults, ConfigV1::USER_DEFAULT_AUTH_REQUIRED) .unwrap_or_dialog(|| { dialoguer::Confirm::new() .default(ConfigV1::USER_DEFAULT_AUTH_REQUIRED) .show_default(true) .with_prompt("Require HTTP authentication to access crates?") .interact() }) .context(AuthRequiredSnafu)?; let enabled = init .html .apply_default(init.defaults, ConfigV1Html::USER_DEFAULT_ENABLED) .unwrap_or_dialog(|| { dialoguer::Confirm::new() .default(ConfigV1Html::USER_DEFAULT_ENABLED) .show_default(true) .with_prompt("Enable HTML index generation?") .interact() }) .context(HtmlEnabledSnafu)?; let suggested_registry_name = if enabled { let name = init .html_suggested_registry_name .apply_default( init.defaults, ConfigV1Html::USER_DEFAULT_SUGGESTED_REGISTRY_NAME, ) .unwrap_or_dialog(|| { dialoguer::Input::new() .default(ConfigV1Html::USER_DEFAULT_SUGGESTED_REGISTRY_NAME.to_owned()) .show_default(true) .with_prompt("Name you'd like to suggest other people call your registry") .interact() }) .context(HtmlSuggestedRegistryNameSnafu)?; Some(name) } else { None }; let config = ConfigV1 { base_url, auth_required, html: ConfigV1Html { enabled, suggested_registry_name, }, }; let r = Registry::initialize(config, &init.path)?; if r.config.html.enabled { let res = r.generate_html(); if cfg!(feature = "html") { res?; } else if let Err(e) = res { eprintln!("Warning: {e}"); } } Ok(()) } #[derive(Debug, Snafu)] #[snafu(module)] enum DoInitializeError { #[snafu(display("Could not determine the base URL"))] BaseUrl { source: dialoguer::Error }, #[snafu(display("Could not determine if HTTP authorization is required"))] AuthRequired { source: dialoguer::Error }, #[snafu(display("Could not determine if HTML generation is enabled"))] HtmlEnabled { source: dialoguer::Error }, #[snafu(display("Could not determine the suggested registry name"))] HtmlSuggestedRegistryName { source: dialoguer::Error }, #[snafu(transparent)] Initialize { source: InitializeError }, #[snafu(transparent)] Html { source: HtmlError }, } fn do_add(global: &Global, add: AddArgs) -> Result<(), Error> { let r = discover_registry(add.registry)?; for i in add.path { r.add(global, i)?; } r.maybe_generate_html()?; Ok(()) } fn do_remove(_global: &Global, rm: RemoveArgs) -> Result<(), Error> { let r = discover_registry(rm.registry)?; r.remove(rm.name, rm.version)?; r.maybe_generate_html()?; Ok(()) } fn do_generate_html(_global: &Global, html: GenerateHtmlArgs) -> Result<(), Error> { let r = discover_registry(html.registry)?; r.generate_html()?; Ok(()) } fn do_yank(_global: &Global, yank: YankArgs) -> Result<(), Error> { let r = discover_registry(yank.registry)?; r.yank(yank.name, yank.version, !yank.undo)?; r.maybe_generate_html()?; Ok(()) } fn do_list(_global: &Global, list: ListArgs) -> Result<(), Error> { let r = discover_registry(list.registry)?; let crates = r.list_all().unwrap(); #[derive(Default)] struct Max(usize, String); impl Max { fn push(&mut self, v: impl fmt::Display) { use std::fmt::Write; let Self(m, s) = self; s.clear(); _ = write!(s, "{v}"); *m = usize::max(*m, s.len()); } fn max(&self) -> usize { self.0 } } let mut max_c = Max::default(); let mut max_v = Max::default(); for (crate_, versions) in &crates { max_c.push(crate_); for version in versions.keys() { max_v.push(version); } } let max_c = max_c.max(); let max_v = max_v.max(); for (crate_, versions) in crates { for version in versions.keys() { println!("{crate_:<max_c$} {version:<max_v$}"); } } Ok(()) } fn discover_registry(path: Option<PathBuf>) -> Result<Registry, DiscoverRegistryError> { use discover_registry_error::*; match path { Some(p) => Registry::open(p).context(OpenSnafu), None => { let cwd = env::current_dir().context(CurrentDirSnafu)?; match Registry::open(cwd) { Ok(r) => Ok(r), Err(e) if e.is_not_found() => FallbackNotFoundSnafu.fail(), Err(e) => Err(e).context(FallbackOpenSnafu)?, } } } } #[derive(Debug, Snafu)] #[snafu(module)] enum DiscoverRegistryError { #[snafu(display("Could not open the specified registry"))] Open { source: OpenError }, #[snafu(display("Could not determine the current directory, {}", Self::TRY_THIS))] CurrentDir { source: io::Error }, #[snafu(display( "The current directory does not contain a registry, {}", Self::TRY_THIS, ))] FallbackNotFound, #[snafu(display("Could not open the registry in the current directory"))] FallbackOpen { source: OpenError }, } impl DiscoverRegistryError { const TRY_THIS: &'static str = "please use the `--registry` command line option"; } #[derive(Debug)] struct Registry { path: PathBuf, config: ConfigV1, } type Index = BTreeMap<Version, index_entry::Root>; type ListAll = BTreeMap<CrateName, Index>; impl Registry { fn initialize(config: ConfigV1, path: impl Into<PathBuf>) -> Result<Self, InitializeError> { use initialize_error::*; let config = config.normalize(); let path = path.into(); println!("Initializing registry in `{}`", path.display()); fs::create_dir_all(&path).context(RegistryCreateSnafu)?; let config_toml_path = path.join(CONFIG_FILE_NAME); let config = Config::V1(config); let config_toml = toml::to_string(&config).context(ConfigTomlSerializeSnafu)?; fs::write(&config_toml_path, config_toml).context(ConfigTomlWriteSnafu { path: &config_toml_path, })?; let Config::V1(config) = config; let dl = format!( "{base_url}crates/{{lowerprefix}}/{{crate}}/{{version}}.crate", base_url = config.base_url, ); let auth_required = config.auth_required; let this = Self { path, config }; let config_json_path = this.config_json_path(); let config_json = config_json::Root { dl, api: None, auth_required, }; let config_json = serde_json::to_string(&config_json).context(ConfigJsonSerializeSnafu)?; fs::write(&config_json_path, config_json).context(ConfigJsonWriteSnafu { path: &config_json_path, })?; Ok(this) } fn open(path: impl Into<PathBuf>) -> Result<Self, OpenError> { use open_error::*; let path = path.into(); let config_path = path.join(CONFIG_FILE_NAME); let config = fs::read_to_string(&config_path).context(ReadSnafu { path: &config_path })?; let Config::V1(config) = toml::from_str(&config).context(DeserializeSnafu { path: &config_path })?; Ok(Self { path, config }) } fn add(&self, global: &Global, crate_path: impl AsRef<Path>) -> Result<(), AddError> { use add_error::*; let crate_path = crate_path.as_ref(); println!("Adding crate `{}` to registry", crate_path.display()); let crate_file = fs::read(crate_path).context(ReadCrateSnafu)?; use sha2::Digest; let checksum = sha2::Sha256::digest(&crate_file); let checksum_hex = hex::encode(checksum); let cargo_toml = extract_root_cargo_toml(&crate_file)?.context(CargoTomlMissingSnafu)?; let cargo_toml = String::from_utf8(cargo_toml).context(CargoTomlUtf8Snafu)?; let cargo_toml = toml::from_str(&cargo_toml).context(CargoTomlMalformedSnafu)?; let index_entry = adapt_cargo_toml_to_index_entry(global, &self.config, cargo_toml, checksum_hex); let index_path = self.index_file_path_for(&index_entry.name); if let Some(path) = index_path.parent() { fs::create_dir_all(path).context(IndexDirSnafu { path })?; } let crate_file_path = self.crate_file_path_for(&index_entry.name, &index_entry.vers); if let Some(path) = crate_file_path.parent() { fs::create_dir_all(path).context(CrateDirSnafu { path })?; } // FUTURE: Stronger file system consistency (atomic file overwrites, rollbacks on error) // FUTURE: "transactional" adding of multiple crates self.read_modify_write(&index_entry.name.clone(), |index_file| { index_file.insert(index_entry.vers.clone(), index_entry); Ok::<_, AddError>(()) })?; println!("Wrote crate index to `{}`", index_path.display()); fs::write(&crate_file_path, &crate_file).context(CrateWriteSnafu { path: &crate_file_path, })?; println!("Wrote crate to `{}`", crate_file_path.display()); Ok(()) } fn remove(&self, name: CrateName, version: Version) -> Result<(), RemoveError> { use remove_error::*; self.read_modify_write(&name, |index| { index.remove(&version); Ok::<_, RemoveError>(()) })?; let crate_file = self.crate_file_path_for(&name, &version); match fs::remove_file(&crate_file) { Ok(()) => Ok(()), Err(e) if e.kind() == io::ErrorKind::NotFound => Ok(()), Err(e) => Err(e).context(DeleteSnafu { path: crate_file }), } } #[cfg(feature = "html")] fn generate_html(&self) -> Result<(), HtmlError> { html::write(self) } #[cfg(not(feature = "html"))] fn generate_html(&self) -> Result<(), HtmlError> { Err(HtmlError) } fn maybe_generate_html(&self) -> Result<(), HtmlError> { if self.config.html.enabled { self.generate_html() } else { Ok(()) } } fn yank(&self, name: CrateName, version: Version, yanked: bool) -> Result<(), YankError> { use yank_error::*; self.read_modify_write(&name, |index| { let entry = index.get_mut(&version).context(VersionSnafu)?; entry.yanked = yanked; Ok(()) }) } fn read_modify_write<T, E>( &self, name: &CrateName, modify: impl FnOnce(&mut Index) -> Result<T, E>, ) -> Result<T, E> where E: From<ReadModifyWriteError>, { use read_modify_write_error::*; let path = self.index_file_path_for(name); let mut index = Self::parse_index_file(&path).context(IndexParseSnafu { path: &path })?; let val = modify(&mut index)?; Self::write_index_file(index, &path).context(IndexWriteSnafu { path })?; Ok(val) } fn list_crate_files( crate_dir: &Path, ) -> impl Iterator<Item = walkdir::Result<walkdir::DirEntry>> { walkdir::WalkDir::new(crate_dir) .into_iter() .flat_map(|entry| { let Ok(entry) = entry else { return Some(entry) }; let fname = entry.path().file_name()?; let fname = Path::new(fname); let extension = fname.extension()?; if extension == "crate" { Some(Ok(entry)) } else { None } }) } fn list_index_files(&self) -> Result<BTreeSet<PathBuf>, ListIndexFilesError> { use list_index_files_error::*; let crate_dir = self.crate_dir(); let index_files = Self::list_crate_files(&crate_dir) .map(|entry| { let entry = entry.context(WalkdirSnafu { path: &crate_dir })?; let mut path = entry.into_path(); path.pop(); let subdir = path.strip_prefix(&crate_dir).context(PrefixSnafu { path: &path, prefix: &crate_dir, })?; let index_path = self.path.join(subdir); Ok(index_path) }) .collect::<Result<BTreeSet<_>, ListIndexFilesError>>(); match index_files { Err(e) if e.is_not_found() => Ok(Default::default()), r => r, } } fn list_all(&self) -> Result<ListAll, ListAllError> { use list_all_error::*; let mut crates = BTreeMap::new(); for path in self.list_index_files()? { let index = Self::parse_index_file(&path).context(ParseSnafu { path })?; if let Some(entry) = index.values().next() { crates.insert(entry.name.clone(), index); } } Ok(crates) } fn parse_index_file(path: &Path) -> Result<Index, ParseIndexError> { use parse_index_error::*; let index_file = match File::open(path) { Ok(f) => f, Err(e) if e.kind() == io::ErrorKind::NotFound => return Ok(Default::default()), Err(e) => Err(e).context(OpenSnafu)?, }; let index_file = BufReader::new(index_file); let mut index = BTreeMap::new(); for (i, line) in index_file.lines().enumerate() { let line = line.context(ReadSnafu { line: i })?; let entry = serde_json::from_str::<index_entry::Root>(&line).context(ParseSnafu { line: i })?; index.insert(entry.vers.clone(), entry); } Ok(index) } fn write_index_file(index_file: Index, path: &Path) -> Result<(), WriteIndexError> { use write_index_error::*; let file = File::create(path).context(OpenSnafu)?; let mut file = BufWriter::new(file); for entry in index_file.values() { serde_json::to_writer(&mut file, entry).context(EntrySerializeSnafu)?; file.write_all(b"\n").context(EntryNewlineSnafu)?; } Ok(()) } fn crate_dir(&self) -> PathBuf { self.path.join(CRATE_DIR_NAME) } #[cfg(test)] fn margo_config_toml_path(&self) -> PathBuf { self.path.join(CONFIG_FILE_NAME) } fn config_json_path(&self) -> PathBuf { self.path.join("config.json") } fn index_file_path_for(&self, name: &CrateName) -> PathBuf { let mut index_path = self.path.clone(); name.append_prefix_directories(&mut index_path); index_path.push(name); index_path } fn crate_dir_for(&self, name: &CrateName) -> PathBuf { let mut crate_dir = self.crate_dir(); name.append_prefix_directories(&mut crate_dir); crate_dir.push(name); crate_dir } fn crate_file_path_for(&self, name: &CrateName, version: &Version) -> PathBuf { let mut crate_file_path = self.crate_dir_for(name); crate_file_path.push(format!("{}.crate", version)); crate_file_path } } #[derive(Debug, Snafu)] #[snafu(module)] enum InitializeError { #[snafu(display("Could not create the registry directory"))] RegistryCreate { source: io::Error }, #[snafu(display("Could not serialize the registry's internal configuration"))] ConfigTomlSerialize { source: toml::ser::Error }, #[snafu(display("Could not write the registry's internal configuration to {}", path.display()))] ConfigTomlWrite { source: io::Error, path: PathBuf }, #[snafu(display("Could not serialize the registry's public configuration"))] ConfigJsonSerialize { source: serde_json::Error }, #[snafu(display("Could not write the registry's public configuration to {}", path.display()))] ConfigJsonWrite { source: io::Error, path: PathBuf }, } #[derive(Debug, Snafu)] #[snafu(module)] enum OpenError { #[snafu(display("Could not open the registry's internal configuration at {}", path.display()))] Read { source: io::Error, path: PathBuf }, #[snafu(display("Could not deserialize the registry's internal configuration at {}", path.display()))] Deserialize { source: toml::de::Error, path: PathBuf, }, } impl OpenError { fn is_not_found(&self) -> bool { match self { Self::Read { source, .. } => source.kind() == io::ErrorKind::NotFound, Self::Deserialize { .. } => false, } } } #[derive(Debug, Snafu)] #[snafu(module)] enum AddError { #[snafu(display("Could not read the crate package"))] ReadCrate { source: io::Error }, #[snafu(transparent)] CargoTomlExtract { source: ExtractRootCargoTomlError }, #[snafu(display("The crate package does not contain a Cargo.toml file"))] CargoTomlMissing, #[snafu(display("The crate's Cargo.toml is not valid UTF-8"))] CargoTomlUtf8 { source: std::string::FromUtf8Error }, #[snafu(display("The crate's Cargo.toml is malformed"))] CargoTomlMalformed { source: toml::de::Error }, #[snafu(display("Could not create the crate's index directory {}", path.display()))] IndexDir { source: io::Error, path: PathBuf }, #[snafu(transparent)] IndexModify { source: ReadModifyWriteError }, #[snafu(display("Could not create the crate directory {}", path.display()))] CrateDir { source: io::Error, path: PathBuf }, #[snafu(display("Could not write the crate {}", path.display()))] CrateWrite { source: io::Error, path: PathBuf }, } #[derive(Debug, Snafu)] #[snafu(module)] enum RemoveError { #[snafu(transparent)] IndexModify { source: ReadModifyWriteError }, #[snafu(display("Could not delete the crate file {}", path.display()))] Delete { source: io::Error, path: PathBuf }, } #[cfg(feature = "html")] use html::Error as HtmlError; #[cfg(not(feature = "html"))] #[derive(Debug, Snafu)] #[snafu(display("Margo was not compiled with the HTML feature enabled. This binary will not be able to generate HTML files"))] struct HtmlError; #[derive(Debug, Snafu)] #[snafu(module)] enum YankError { #[snafu(display("The version does not exist in the index"))] Version, #[snafu(transparent)] Modify { source: ReadModifyWriteError }, } #[derive(Debug, Snafu)] #[snafu(module)] enum ReadModifyWriteError { #[snafu(display("Could not parse the crate's index file {}", path.display()))] IndexParse { source: ParseIndexError, path: PathBuf, }, #[snafu(display("Could not write the crate's index file {}", path.display()))] IndexWrite { source: WriteIndexError, path: PathBuf, }, } #[derive(Debug, Snafu)] #[snafu(module)] enum ListIndexFilesError { #[snafu(display("Could not enumerate the crate directory `{}`", path.display()))] Walkdir { source: walkdir::Error, path: PathBuf, }, #[snafu(display( "Could not remove the path prefix `{prefix}` from the crate package entry `{path}`", prefix = prefix.display(), path = path.display(), ))] Prefix { source: std::path::StripPrefixError, path: PathBuf, prefix: PathBuf, }, } impl ListIndexFilesError { fn is_not_found(&self) -> bool { if let Self::Walkdir { source, .. } = self { if let Some(e) = source.io_error() { if e.kind() == io::ErrorKind::NotFound { return true; } } } false } } #[derive(Debug, Snafu)] #[snafu(module)] enum ListAllError { #[snafu(display("Unable to list the crate index files"))] #[snafu(context(false))] ListIndex { source: ListIndexFilesError }, #[snafu(display("Unable to parse the crate index file at `{}`", path.display()))] Parse { source: ParseIndexError, path: PathBuf, }, } #[derive(Debug, Snafu)] #[snafu(module)] enum ParseIndexError { #[snafu(display("Could not open the file"))] Open { source: io::Error }, #[snafu(display("Could not read line {line}"))] Read { source: io::Error, line: usize }, #[snafu(display("Could not parse line {line}"))] Parse { source: serde_json::Error, line: usize, }, } #[derive(Debug, Snafu)] #[snafu(module)] enum WriteIndexError { #[snafu(display("Could not open the file"))] Open { source: io::Error }, #[snafu(display("Could not serialize the entry"))] EntrySerialize { source: serde_json::Error }, #[snafu(display("Could not write the entry's newline"))] EntryNewline { source: io::Error }, } fn extract_root_cargo_toml( crate_data: &[u8], ) -> Result<Option<Vec<u8>>, ExtractRootCargoTomlError> { use extract_root_cargo_toml_error::*; let crate_data = flate2::read::GzDecoder::new(crate_data); let mut crate_data = tar::Archive::new(crate_data); let entries = crate_data.entries().context(EntriesSnafu)?; let mut dirname = None; for entry in entries { let mut entry = entry.context(EntrySnafu)?; let path = entry.path().context(PathSnafu)?; let dirname = match &mut dirname { Some(v) => v, None => { let Some(Component::Normal(first)) = path.components().next() else { return MalformedSnafu.fail(); }; dirname.insert(first.to_owned()) } }; let fname = path.strip_prefix(dirname).context(PrefixSnafu)?; if fname == Path::new("Cargo.toml") { let mut data = vec![]; entry.read_to_end(&mut data).context(ReadSnafu)?; return Ok(Some(data)); } } Ok(None) } #[derive(Debug, Snafu)] #[snafu(module)] enum ExtractRootCargoTomlError { #[snafu(display("Could not get the entries of the crate package"))] Entries { source: io::Error }, #[snafu(display("Could not get the next crate package entry"))] Entry { source: io::Error }, #[snafu(display("Could not get the path of the crate package entry"))] Path { source: io::Error }, #[snafu(display("The crate package was malformed"))] Malformed, #[snafu(display("Could not remove the path prefix from the crate package entry"))] Prefix { source: std::path::StripPrefixError }, #[snafu(display("Could not read the crate package entry for Cargo.toml"))] Read { source: io::Error }, } fn adapt_cargo_toml_to_index_entry( global: &Global, config: &ConfigV1, mut cargo_toml: cargo_toml::Root, checksum_hex: String, ) -> index_entry::Root { // Remove features that refer to dev-dependencies as we don't // track those anyway. { // Ignore dependencies that also occur as a regular or build // dependency, as we *do* track those. let reg_dep_names = cargo_toml.dependencies.keys(); let build_dep_names = cargo_toml.build_dependencies.keys(); let mut only_dev_dep_names = cargo_toml.dev_dependencies.keys().collect::<BTreeSet<_>>(); for name in reg_dep_names.chain(build_dep_names) { only_dev_dep_names.remove(name); } for name in only_dev_dep_names { // We don't care about the official package name here as the // feature syntax has to match the user-specified dependency // name. let prefix = format!("{name}/"); for enabled in cargo_toml.features.values_mut() { enabled.retain(|enable| !enable.starts_with(&prefix)); } } } let mut deps: Vec<_> = cargo_toml .dependencies .into_iter() .map(|(name, dep)| adapt_dependency(global, config, dep, name)) .collect(); let build_deps = cargo_toml .build_dependencies .into_iter() .map(|(name, dep)| { let mut dep = adapt_dependency(global, config, dep, name); dep.kind = index_entry::DependencyKind::Build; dep }); deps.extend(build_deps); for (target, defn) in cargo_toml.target { let target_deps = defn.dependencies.into_iter().map(|(name, dep)| { let mut dep = adapt_dependency(global, config, dep, name); dep.target = Some(target.clone()); dep }); deps.extend(target_deps); } // FUTURE: Opt-in to checking that all dependencies already exist index_entry::Root { name: cargo_toml.package.name, vers: cargo_toml.package.version, deps, cksum: checksum_hex, features: cargo_toml.features, yanked: false, links: cargo_toml.package.links, v: 2, features2: Default::default(), rust_version: cargo_toml.package.rust_version, } } fn adapt_dependency( global: &Global, config: &ConfigV1, dep: cargo_toml::Dependency, name: String, ) -> index_entry::Dependency { let cargo_toml::Dependency { version, features, optional, default_features, registry_index, package, } = dep; index_entry::Dependency { name, req: version, features, optional, default_features, target: None, kind: index_entry::DependencyKind::Normal, registry: adapt_index(global, config, registry_index), package, } } fn adapt_index(global: &Global, config: &ConfigV1, registry_index: Option<Url>) -> Option<Url> { // The dependency is in... match registry_index { // ...crates.io None => Some(global.crates_io_index_url.clone()), // ...this registry Some(url) if url == config.base_url => None, // ...another registry r => r, } } /// Only intended for the normalized Cargo.toml created for the /// packaged crate. mod cargo_toml { use semver::{Version, VersionReq}; use serde::Deserialize; use std::collections::BTreeMap; use url::Url; use crate::common::{CrateName, RustVersion}; pub type Dependencies = BTreeMap<String, Dependency>; #[derive(Debug, Deserialize)] #[serde(rename_all = "kebab-case")] pub struct Root { pub package: Package, #[serde(default)] pub features: BTreeMap<String, Vec<String>>, #[serde(default)] pub dependencies: Dependencies, #[serde(default)] pub build_dependencies: Dependencies, #[serde(default)] pub dev_dependencies: Dependencies, #[serde(default)] pub target: BTreeMap<String, Target>, }
rust
Apache-2.0
c29ba4782272af3bac69425eb3382f07687d63d1
2026-01-04T20:19:37.290944Z
true
integer32llc/margo
https://github.com/integer32llc/margo/blob/c29ba4782272af3bac69425eb3382f07687d63d1/xtask/src/main.rs
xtask/src/main.rs
use notify::{RecursiveMode, Watcher}; use quote::quote; use regex::Regex; use snafu::prelude::*; use std::{ env, fs, io, path::{Path, PathBuf}, process::Command, sync::mpsc, thread, time::Duration, }; use toml_edit::{DocumentMut, Item}; /// Build tools for Margo #[derive(Debug, argh::FromArgs)] struct Args { #[argh(subcommand)] subcommand: Subcommand, } #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] enum Subcommand { Assets(AssetsArgs), PrepareRelease(PrepareReleaseArgs), } /// Manage assets #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] #[argh(name = "assets")] struct AssetsArgs { /// rebuild assets as they change #[argh(switch)] watch: bool, } /// Prepare a release #[derive(Debug, argh::FromArgs)] #[argh(subcommand)] #[argh(name = "prepare-release")] struct PrepareReleaseArgs { #[argh(positional)] tag: String, } #[snafu::report] fn main() -> Result<(), Error> { let args: Args = argh::from_env(); match args.subcommand { Subcommand::Assets(args) => do_assets(args)?, Subcommand::PrepareRelease(args) => do_prepare_release(args)?, } Ok(()) } #[derive(Debug, Snafu)] enum Error { #[snafu(transparent)] Assets { source: AssetsError }, #[snafu(transparent)] PrepareRelease { source: PrepareReleaseError }, } fn do_assets(args: AssetsArgs) -> Result<(), AssetsError> { use assets_error::*; let root = env::var("CARGO_MANIFEST_DIR").context(CargoManifestSnafu)?; let mut root = PathBuf::from(root); root.pop(); // Exit the `xtask` directory let asset_root = join!(&root, "ui", "dist"); let asset_index = join!(&asset_root, "ui.html"); pnpm!("install")?; if args.watch { do_assets_watch(&root, &asset_root, &asset_index).context(WatchSnafu { asset_index })?; } else { do_assets_once(&root, &asset_root, &asset_index).context(OnceSnafu { asset_index })?; } Ok(()) } #[derive(Debug, Snafu)] #[snafu(module)] enum AssetsError { #[snafu(display("`CARGO_MANIFEST_DIR` must be set"))] CargoManifest { source: env::VarError }, #[snafu(display("Could not install JS dependencies"))] #[snafu(context(false))] PnpmInstall { source: PnpmError }, #[snafu(display("Could not extract the assets from {}", asset_index.display()))] Watch { source: AssetsWatchError, asset_index: PathBuf, }, #[snafu(display("Could not extract the assets from {}", asset_index.display()))] Once { source: AssetsOnceError, asset_index: PathBuf, }, } fn do_assets_watch( root: &Path, asset_root: &Path, asset_index: &Path, ) -> Result<(), AssetsWatchError> { use assets_watch_error::*; // The directory needs to exist before we can watch it. std::fs::create_dir_all(asset_root).context(AssetDirectoryCreateSnafu { path: asset_root })?; let (tx, rx) = mpsc::channel(); let mut watcher = notify::recommended_watcher(move |evt: notify::Result<notify::Event>| { if let Ok(evt) = evt { if evt.paths.iter().any(|p| is_asset_file(p).unwrap_or(false)) { let _ = tx.send(()); } } }) .context(WatcherCreateSnafu)?; watcher .watch(asset_root, RecursiveMode::NonRecursive) .context(WatcherWatchSnafu)?; let rust_file_thread = thread::spawn({ let root = root.to_owned(); let asset_root = asset_root.to_owned(); let asset_index = asset_index.to_owned(); move || loop { recv_debounced(&rx)?; rebuild_asset_file(&root, &asset_root, &asset_index)?; } }); let pnpm_thread = thread::spawn(|| { pnpm!("watch")?; Ok(()) }); // Wait for the first thread to exit. Ah, to have the comforts // of async... let mut threads = [Some(rust_file_thread), Some(pnpm_thread)]; loop { for thread_slot in &mut threads { if let Some(t) = thread_slot.take() { if dbg!(t.is_finished()) { return match t.join() { Ok(Ok(())) => ThreadExitSnafu.fail(), Ok(e) => e, Err(e) => std::panic::resume_unwind(e), }; } *thread_slot = Some(t); } } thread::sleep(Duration::from_millis(250)); } } #[derive(Debug, Snafu)] #[snafu(module)] enum AssetsWatchError { #[snafu(display("Could not create the asset directory"))] AssetDirectoryCreate { source: io::Error, path: PathBuf }, #[snafu(display("Could not create the filesystem watcher"))] WatcherCreate { source: notify::Error }, #[snafu(display("Could not watch the asset directory"))] WatcherWatch { source: notify::Error }, #[snafu(display("Event channel receiver closed unexpectedly"))] #[snafu(context(false))] RxClosed { source: mpsc::RecvError }, #[snafu(transparent)] Rebuild { source: RebuildAssetFileError }, #[snafu(display("Could not watch assets"))] #[snafu(context(false))] PnpmWatch { source: PnpmError }, #[snafu(display("Thread exited"))] ThreadExit, } fn is_asset_file(p: &Path) -> Option<bool> { let fname = p.file_name()?; let fname = Path::new(fname); let ext = fname.extension()?; let matched = if ext == "js" || ext == "css" || ext == "html" { true } else if ext == "map" { let stem = fname.file_stem()?; let stem = Path::new(stem); let ext = stem.extension()?; ext == "js" || ext == "css" || ext == "html" } else { false }; Some(matched) } fn recv_debounced(rx: &mpsc::Receiver<()>) -> Result<(), mpsc::RecvError> { // Wait for an initial event rx.recv()?; loop { // Wait for subsequent events to stop coming in match rx.recv_timeout(Duration::from_millis(50)) { Ok(()) => continue, Err(mpsc::RecvTimeoutError::Timeout) => return Ok(()), _ => return Err(mpsc::RecvError), }; } } fn do_assets_once( root: &Path, asset_root: &Path, asset_index: &Path, ) -> Result<(), AssetsOnceError> { pnpm!("build")?; rebuild_asset_file(root, asset_root, asset_index)?; Ok(()) } #[derive(Debug, Snafu)] #[snafu(module)] enum AssetsOnceError { #[snafu(display("Could not build assets"))] #[snafu(context(false))] PnpmBuild { source: PnpmError }, #[snafu(transparent)] Rebuild { source: RebuildAssetFileError }, } fn rebuild_asset_file( root: &Path, asset_root: &Path, asset_index: &Path, ) -> Result<(), RebuildAssetFileError> { use rebuild_asset_file_error::*; let entry = fs::read_to_string(asset_index).context(ReadEntrypointSnafu { path: asset_index })?; let (css_name, css, css_map) = extract_asset( &entry, asset_root, r#"href=(?:")?assets/(ui.[a-zA-Z0-9]+.css)"#, ) .context(ExtractCssSnafu)?; let (js_name, js, js_map) = extract_asset( &entry, asset_root, r#"src=(?:")?assets/(ui.[a-zA-Z0-9]+.js)"#, ) .context(ExtractJsSnafu)?; let html_dir = join!(root, "src", "html"); fs::create_dir_all(&html_dir).context(CreateHtmlDirSnafu { path: &html_dir })?; let asset_src = quote! { pub const INDEX: &str = #entry; pub const CSS_NAME: &str = #css_name; pub const CSS: &str = #css; pub const CSS_MAP: &str = #css_map; pub const JS_NAME: &str = #js_name; pub const JS: &str = #js; pub const JS_MAP: &str = #js_map; }; let out_path = join!(html_dir, "assets.rs"); fs::write(&out_path, asset_src.to_string()).context(WriteAssetFileSnafu { path: out_path })?; Ok(()) } #[derive(Debug, Snafu)] #[snafu(module)] enum RebuildAssetFileError { #[snafu(display("Could not read the UI entrypoint from `{}`", path.display()))] ReadEntrypoint { source: io::Error, path: PathBuf }, #[snafu(display("Could not extract the CSS filename"))] ExtractCss { source: ExtractAssetError }, #[snafu(display("Could not extract the JS filename"))] ExtractJs { source: ExtractAssetError }, #[snafu(display("Could not create the HTML assets directory `{}`", path.display()))] CreateHtmlDir { source: io::Error, path: PathBuf }, #[snafu(display("Could not write HTML assets file `{}`", path.display()))] WriteAssetFile { source: io::Error, path: PathBuf }, } fn extract_asset<'a>( entry: &'a str, asset_root: &Path, re: &str, ) -> Result<(&'a str, String, String), ExtractAssetError> { use extract_asset_error::*; let find_asset = Regex::new(re)?; let (_, [asset_name]) = find_asset .captures(entry) .context(AssetMissingSnafu)? .extract(); let asset = join!(&asset_root, asset_name); let asset_map = { let mut a = asset.clone(); a.as_mut_os_string().push(".map"); a }; let asset = fs::read_to_string(&asset).context(ReadAssetSnafu { path: asset })?; let asset_map = fs::read_to_string(&asset_map).context(ReadAssetMapSnafu { path: asset_map })?; Ok((asset_name, asset, asset_map)) } #[derive(Debug, Snafu)] #[snafu(module)] enum ExtractAssetError { #[snafu(display("Invalid asset regex"))] #[snafu(context(false))] Regex { source: regex::Error }, #[snafu(display("Could not find asset"))] AssetMissing, #[snafu(display("Could not read the asset from `{}`", path.display()))] ReadAsset { source: io::Error, path: PathBuf }, #[snafu(display("Could not read the asset sourcemap from `{}`", path.display()))] ReadAssetMap { source: io::Error, path: PathBuf }, } macro_rules! join { ($base:expr, $($c:expr),+ $(,)?) => {{ let mut base = PathBuf::from($base); $( base.push($c); )* base }}; } use join; fn do_prepare_release(args: PrepareReleaseArgs) -> Result<(), PrepareReleaseError> { use prepare_release_error::*; let PrepareReleaseArgs { tag } = args; do_assets(AssetsArgs { watch: false })?; const ASSET_FILE: &str = "src/html/assets.rs"; const CARGO_TOML_FILE: &str = "Cargo.toml"; const CARGO_LOCK_FILE: &str = "Cargo.lock"; let add_msg = format!("Commit assets for release {tag}"); let update_msg = format!("Release {tag}"); let rm_msg = format!("Remove assets for release {tag}"); git!("add", "--force", ASSET_FILE).context(AssetAddSnafu)?; git!("commit", "--message", add_msg).context(AssetAddCommitSnafu)?; set_version(CARGO_TOML_FILE, &tag)?; cargo!("update", "margo").context(VersionLockUpdateSnafu)?; git!("add", CARGO_TOML_FILE, CARGO_LOCK_FILE).context(VersionAddSnafu)?; git!("commit", "--message", update_msg).context(VersionCommitSnafu)?; git!("tag", tag).context(VersionTagSnafu)?; git!("rm", ASSET_FILE).context(AssetRmSnafu)?; git!("commit", "--message", rm_msg).context(AssetRmCommitSnafu)?; Ok(()) } #[derive(Debug, Snafu)] #[snafu(module)] enum PrepareReleaseError { #[snafu(transparent)] AssetBuild { source: AssetsError }, #[snafu(display("Could not add the asset file to git"))] AssetAdd { source: GitError }, #[snafu(display("Could not commit the asset file addition to git"))] AssetAddCommit { source: GitError }, #[snafu(transparent)] VersionSet { source: SetVersionError }, #[snafu(display("Could not update Cargo.lock"))] VersionLockUpdate { source: CargoError }, #[snafu(display("Could not add Cargo.toml and Cargo.lock to git"))] VersionAdd { source: GitError }, #[snafu(display("Could not commit Cargo.toml and Cargo.lock to git"))] VersionCommit { source: GitError }, #[snafu(display("Could not tag the release commit in git"))] VersionTag { source: GitError }, #[snafu(display("Could not remove the asset file from git"))] AssetRm { source: GitError }, #[snafu(display("Could not commit the asset file removal to git"))] AssetRmCommit { source: GitError }, } fn set_version(fname: impl AsRef<Path>, version: &str) -> Result<(), SetVersionError> { use set_version_error::*; let fname = fname.as_ref(); let cargo_toml = fs::read_to_string(fname).context(ReadSnafu)?; let mut cargo_toml: DocumentMut = cargo_toml.parse().context(ParseSnafu)?; *cargo_toml .get_mut("package") .context(PackageSnafu)? .get_mut("version") .context(VersionSnafu)? = Item::Value(version.into()); let cargo_toml = cargo_toml.to_string(); fs::write(fname, cargo_toml).context(WriteSnafu)?; Ok(()) } #[derive(Debug, Snafu)] #[snafu(module)] enum SetVersionError { #[snafu(display("Could not read the file"))] Read { source: io::Error }, #[snafu(display("Could not parse the file"))] Parse { source: toml_edit::TomlError }, #[snafu(display("The file did not contain a package table"))] Package, #[snafu(display("The file did not contain a version field"))] Version, #[snafu(display("Could not write the file"))] Write { source: io::Error }, } macro_rules! pnpm { ($cmd:expr $(, $arg:expr)* $(,)?) => { command!("pnpm", $cmd $(, $arg)*).map_err(PnpmError::from) }; } use pnpm; #[derive(Debug, Snafu)] #[snafu(display("Executing `pnpm` failed"))] #[snafu(context(false))] struct PnpmError { source: ProcessError, } macro_rules! git { ($cmd:expr $(, $arg:expr)* $(,)?) => { command!("git", $cmd $(, $arg)*).map_err(GitError::from) }; } use git; #[derive(Debug, Snafu)] #[snafu(display("Executing `git` failed"))] #[snafu(context(false))] struct GitError { source: ProcessError, } macro_rules! cargo { ($cmd:expr $(, $arg:expr)* $(,)?) => { command!("cargo", $cmd $(, $arg)*).map_err(CargoError::from) }; } use cargo; #[derive(Debug, Snafu)] #[snafu(display("Executing `cargo` failed"))] #[snafu(context(false))] struct CargoError { source: ProcessError, } macro_rules! command { ($cmd:expr $(, $arg:expr)* $(,)?) => { (|| -> Result<(), ProcessError> { use process_error::*; let mut cmd = Command::new($cmd); $( cmd.arg($arg); )* let status = cmd.status() .context(SpawnSnafu)?; ensure!(status.success(), SuccessSnafu); Ok(()) })() }; } use command; #[derive(Debug, Snafu)] #[snafu(module)] enum ProcessError { #[snafu(display("Could not start the process"))] Spawn { source: io::Error }, #[snafu(display("The process did not succeed"))] Success, }
rust
Apache-2.0
c29ba4782272af3bac69425eb3382f07687d63d1
2026-01-04T20:19:37.290944Z
false
integer32llc/margo
https://github.com/integer32llc/margo/blob/c29ba4782272af3bac69425eb3382f07687d63d1/conformance/src/main.rs
conformance/src/main.rs
use axum::{ extract::Request, http::StatusCode, middleware::{self, Next}, response::{IntoResponse, Response}, Router, }; use axum_extra::{ headers::{self, authorization::Basic}, TypedHeader, }; use registry_conformance::{CommandExt, CreatedCrate, Registry, RegistryBuilder}; use snafu::prelude::*; use std::{ env, future::IntoFuture, io, net::SocketAddr, path::{Path, PathBuf}, process::ExitCode, }; use tokio::{net::TcpListener, process::Command, task::JoinHandle}; use tokio_util::sync::CancellationToken; use tower_http::services::ServeDir; #[tokio::main] async fn main() -> Result<ExitCode, BuildError> { if env::var_os("MARGO_BINARY").is_none() { Margo::build().await?; } Ok(registry_conformance::test_conformance::<Margo>(std::env::args()).await) } type BasicAuth = Option<(String, String)>; #[derive(Debug, Default)] pub struct MargoBuilder { webserver_basic_auth: BasicAuth, } impl MargoBuilder { fn enable_basic_auth_(mut self, username: &str, password: &str) -> Self { self.webserver_basic_auth = Some((username.into(), password.into())); self } async fn start_( self, directory: impl Into<PathBuf>, ) -> Result<<Self as RegistryBuilder>::Registry, StartError> { use start_error::*; let Self { webserver_basic_auth, } = self; let auth_required = webserver_basic_auth.is_some(); let directory = directory.into(); let webserver_cancel = CancellationToken::new(); let address = "127.0.0.1:0"; let listener = TcpListener::bind(address) .await .context(BindSnafu { address })?; let webserver_address = listener.local_addr().context(AddressSnafu)?; let serve_files = ServeDir::new(&directory); let auth_middleware = middleware::from_fn(move |hdr, req, next| { let webserver_basic_auth = webserver_basic_auth.clone(); auth(webserver_basic_auth, hdr, req, next) }); let serve_files = Router::new() .fallback_service(serve_files) .layer(auth_middleware); let webserver = axum::serve(listener, serve_files) .with_graceful_shutdown(webserver_cancel.clone().cancelled_owned()) .into_future(); let webserver = tokio::spawn(webserver); let this = Margo { directory, webserver_cancel, webserver_address, webserver, }; let mut cmd = this.command(); cmd.arg("init") .args(["--base-url", &format!("http://{webserver_address}")]) .arg("--defaults"); if auth_required { cmd.args(["--auth-required", "true"]); } cmd.arg(&this.directory) .expect_success() .await .context(ExecutionSnafu)?; Ok(this) } } async fn auth( webserver_basic_auth: BasicAuth, auth_header: Option<TypedHeader<headers::Authorization<Basic>>>, req: Request, next: Next, ) -> Result<Response, StatusCode> { if let Some((username, password)) = webserver_basic_auth { let creds_match = auth_header .as_ref() .is_some_and(|auth| auth.username() == username && auth.password() == password); if !creds_match { return Err(StatusCode::UNAUTHORIZED); } } Ok(next.run(req).await.into_response()) } #[derive(Debug, Snafu)] #[snafu(module)] pub enum StartError { #[snafu(display("Could not bind to address {address}"))] Bind { source: std::io::Error, address: String, }, #[snafu(display("Could not get the listening address"))] Address { source: std::io::Error }, #[snafu(display("Could not initialize the registry"))] Execution { source: registry_conformance::CommandError, }, } pub struct Margo { directory: PathBuf, webserver_cancel: CancellationToken, webserver_address: SocketAddr, webserver: JoinHandle<io::Result<()>>, } impl Margo { const EXE_PATH: &'static str = "../target/debug/margo"; async fn build() -> Result<(), BuildError> { use build_error::*; Command::new("cargo") .current_dir("..") .arg("build") .expect_success() .await .map(drop) .context(ExecutionSnafu) } async fn publish_crate_(&mut self, crate_: &CreatedCrate) -> Result<(), PublishError> { use publish_error::*; let package_path = crate_.package().await.context(PackageSnafu)?; self.command() .arg("add") .arg("--registry") .arg(&self.directory) .arg(package_path) .expect_success() .await .context(ExecutionSnafu)?; Ok(()) } async fn remove_crate_(&mut self, crate_: &CreatedCrate) -> Result<(), RemoveError> { use remove_error::*; self.command() .arg("rm") .arg("--registry") .arg(&self.directory) .arg(crate_.name()) .args(["--version", crate_.version()]) .expect_success() .await .context(ExecutionSnafu)?; Ok(()) } async fn yank_crate_(&mut self, crate_: &CreatedCrate, yanked: bool) -> Result<(), YankError> { use yank_error::*; let mut cmd = self.command(); cmd.arg("yank") .arg("--registry") .arg(&self.directory) .arg(crate_.name()) .args(["--version", crate_.version()]); if !yanked { cmd.arg("--undo"); } cmd.expect_success().await.context(ExecutionSnafu)?; Ok(()) } async fn shutdown_(self) -> Result<(), ShutdownError> { use shutdown_error::*; self.webserver_cancel.cancel(); self.webserver .await .context(JoinSnafu)? .context(ServeSnafu)?; Ok(()) } fn command(&self) -> Command { let exe_path = env::var_os("MARGO_BINARY").map(PathBuf::from); let exe_path = exe_path .as_deref() .unwrap_or_else(|| Path::new(Self::EXE_PATH)); let mut cmd = Command::new(exe_path); cmd.kill_on_drop(true); cmd } } #[derive(Debug, Snafu)] #[snafu(module)] pub enum BuildError { #[snafu(display("Could not build the registry"))] Execution { source: registry_conformance::CommandError, }, } #[derive(Debug, Snafu)] #[snafu(module)] pub enum YankError { #[snafu(display("Could not yank the crate from the registry"))] Execution { source: registry_conformance::CommandError, }, } #[derive(Debug, Snafu)] #[snafu(module)] pub enum PublishError { #[snafu(display("Could not package the crate"))] Package { source: registry_conformance::PackageError, }, #[snafu(display("Could not add the crate to the registry"))] Execution { source: registry_conformance::CommandError, }, } #[derive(Debug, Snafu)] #[snafu(module)] pub enum RemoveError { #[snafu(display("Could not remove the crate from the registry"))] Execution { source: registry_conformance::CommandError, }, } #[derive(Debug, Snafu)] #[snafu(module)] pub enum ShutdownError { #[snafu(display("The webserver task panicked"))] Join { source: tokio::task::JoinError }, #[snafu(display("The webserver had an error"))] Serve { source: std::io::Error }, } impl RegistryBuilder for MargoBuilder { type Registry = Margo; type Error = Error; fn enable_basic_auth(self, username: &str, password: &str) -> Self { self.enable_basic_auth_(username, password) } async fn start(self, directory: impl Into<PathBuf>) -> Result<Self::Registry, Error> { Ok(self.start_(directory).await?) } } impl Registry for Margo { type Builder = MargoBuilder; type Error = Error; async fn registry_url(&self) -> String { format!("sparse+http://{}/", self.webserver_address) } async fn publish_crate(&mut self, crate_: &CreatedCrate) -> Result<(), Error> { Ok(self.publish_crate_(crate_).await?) } async fn remove_crate(&mut self, crate_: &CreatedCrate) -> Result<(), Error> { Ok(self.remove_crate_(crate_).await?) } async fn yank_crate(&mut self, crate_: &CreatedCrate) -> Result<(), Error> { Ok(self.yank_crate_(crate_, true).await?) } async fn unyank_crate(&mut self, crate_: &CreatedCrate) -> Result<(), Error> { Ok(self.yank_crate_(crate_, false).await?) } async fn shutdown(self) -> Result<(), Error> { Ok(self.shutdown_().await?) } } #[derive(Debug, Snafu)] #[snafu(module)] pub enum Error { #[snafu(transparent)] Start { source: StartError }, #[snafu(transparent)] Publish { source: PublishError }, #[snafu(transparent)] Remove { source: RemoveError }, #[snafu(transparent)] Yank { source: YankError }, #[snafu(transparent)] Shutdown { source: ShutdownError }, }
rust
Apache-2.0
c29ba4782272af3bac69425eb3382f07687d63d1
2026-01-04T20:19:37.290944Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/build.rs
build.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use rand::Rng as _; use rand_distr::Distribution; use std::{ collections::BTreeSet, fs::File, io::{BufRead, BufReader, Write}, ops::Bound, path::Path, }; /// Increment to indicate that random_dots.rs generated by an older build.rs must be re-generated. const GEN: usize = 1; fn main() { println!("cargo:rustc-link-arg-benches=-rdynamic"); println!("cargo:rerun-if-changed=build.rs"); let gencheck = format!("// gen-version={GEN}"); // generate sets of random Dots to use in benchmarks. // we generate this file rather than committing it so that the logic for generating it is // checked in somewhere and documented, which also means we can change it if we wish. the // variability of generated dots themselves should (hopefully) not influence the measurements // too much. and even if they do, that's also a valuable signal. let out_dir = std::env::var_os("OUT_DIR").unwrap(); let dest_path = Path::new(&out_dir).join("random_dots.rs"); // don't re-generate if it already exists to ensure benchmarks // of re-builds (e.g., tango) use the same data sets. // but _do_ generate if build.rs has changed its code version. if dest_path.exists() { if BufReader::new(File::open(&dest_path).unwrap()) .lines() .next() .unwrap() .unwrap() == gencheck { eprintln!("existing random_dots.rs is new enough"); return; } else { eprintln!("existing random_dots.rs is outdated"); } } else { eprintln!("no existing random_dots.rs, so generating"); } let mut out = File::create(dest_path).unwrap(); writeln!(out, "{gencheck}").unwrap(); let mut rng = rand::rng(); // NOTE: 2.2 is arbitrarily chosen here as a "quite skewed" distribution, meaning // we'll get nearly all dots near the beginning of the sequence (i.e., consecutive dots), // and only a few sparse dots in the tail (i.e., in the dot cloud). let zipf = rand_distr::Zipf::new(TOP as f64, 2.2).unwrap(); const TOP: usize = 8192; const IDS: usize = 8; for i in [1, 2] { let mut all = BTreeSet::new(); for id in 1..=IDS { // higher ids have fewer holes because we sample more for _ in 0..(id * 4 * TOP) { let sample = zipf.sample(&mut rng) as usize; all.insert((id, sample)); } } writeln!(out, "#[allow(dead_code)]").unwrap(); writeln!(out, "pub(crate) const BIG{i}: &[Dot] = &[").unwrap(); for &(id, seq) in &all { writeln!(out, "Dot::mint(Identifier::new({id}, 0), {seq}),").unwrap(); } writeln!(out, "];").unwrap(); let mut in_small = BTreeSet::new(); // small only has new dots from a subset of the IDs, as that's more common // (this goes to 3, not IDS) for id in 1..=3 { // one sender has a fair number of new dots, one has some, one has few let adj = match id { 1 => 1.0, 2 => 0.5, 3 => 0.1, _ => unreachable!(), }; let max = all .range((Bound::Included((id, 1)), Bound::Included((id, usize::MAX)))) .next_back() .map(|(_, seq)| seq) .copied() .unwrap_or(1); for seq in 1..=TOP { if all.contains(&(id, seq)) { // small should contain a few dots (~5) that are in BIG if rng.random_bool(adj * 10.0 / TOP as f64) { in_small.insert((id, seq)); } } else { // some that fill holes (preferring early ones) // NOTE: the * 2.0 in there is because we're iterating through all the // seqs, so the last value will average to 0.5, which would otherwise // effectively halve the n/TOP value. if seq < max { if rng.random_bool( adj * (20.0 / TOP as f64) * 2.0 * (1.0 - seq as f64 / max as f64), ) { in_small.insert((id, seq)); } } else { // and a small amount that extend the max // biased towards those close to max if rng.random_bool( adj * (5.0 / TOP as f64) * 2.0 * (1.0 - (seq - max) as f64 / (TOP - max) as f64), ) { in_small.insert((id, seq)); } } } } } writeln!(out, "#[allow(dead_code)]").unwrap(); writeln!(out, "pub(crate) const SMALL{i}: &[Dot] = &[").unwrap(); for &(id, seq) in &in_small { writeln!(out, "Dot::mint(Identifier::new({id}, 0), {seq}),").unwrap(); } writeln!(out, "];").unwrap(); } out.flush().unwrap(); }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/dotstores.rs
src/dotstores.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! # Dot Stores //! //! This module defines the core data structures, known as "dot stores", that underpin the DSON //! (JSON CRDT Using Delta-Mutations For Document Stores) implementation. The concepts and data //! structures defined here are based on the research paper "[DSON: JSON CRDT Using //! Delta-Mutations For Document Stores](dson_paper.txt)". //! //! ## Overview //! //! At the heart of DSON is the idea of a **dot store**, a container for data-type-specific //! information that stores the state of a delta-based CRDT. Each dot store is paired with a //! [`CausalContext`], which tracks the set of observed events (dots) across replicas. This //! combination, encapsulated in the [`CausalDotStore`] struct, forms the basis for building //! CRDTs. //! //! The primary dot stores defined in this module are: //! //! - [`DotFun`]: A map from [`Dot`]s to values, where the set of dots is its keyset. This is used //! to implement simple CRDTs like [`MvReg`](crate::crdts::mvreg::MvReg) (multi-value registers). //! - [`DotMap`]: A map from an arbitrary key type to a `DotStore`, where the computed dots are the //! union of the dots of its values. This is used to implement OR-Maps (Observed-Remove Maps). //! - [`DotFunMap`]: A map from [`Dot`]s to `DotStore`s, combining the properties of `DotFun` and //! `DotMap`. This is used to implement OR-Arrays (Observed-Remove Arrays). //! //! These dot stores are designed to be composable, allowing for the construction of arbitrarily //! nested JSON-like structures. //! //! ## Join Operations //! //! The core of the CRDT logic is the `join` operation, defined in the [`DotStoreJoin`] trait. The //! `join` operation merges the state of two `CausalDotStore`s, resolving conflicts in a //! deterministic way. The exact semantics of the join operation vary depending on the concrete //! dot store type, but the general principle is to keep the most up-to-date values and discard //! those that have been causally overwritten. //! //! ## References //! //! The theoretical foundations for the dot stores and their join operations are detailed in //! the DSON paper. In particular, see the following sections: //! //! - **Section 3.3**: Introduces the concept of dot stores and defines `DotFun` and `DotMap`. //! - **Section 4**: Describes the observed-remove semantics used in DSON. //! - **Section 5**: Introduces the `CompDotFun` (here named `DotFunMap`) and the OR-Array //! algorithm. //! //! The original work on delta-based CRDTs can be found in the 2018 paper _Delta state replicated //! data types_ by Paulo Sérgio Almeida, Ali Shoker, and Carlos Baquero. use crate::{ CausalContext, Dot, DsonRandomState, create_map, create_map_with_capacity, sentinel::{DummySentinel, KeySentinel, Sentinel, ValueSentinel, Visit}, }; use smallvec::SmallVec; use std::{borrow::Borrow, collections::HashMap, fmt, hash::Hash, ops::Index}; /// A [`DotStore`] paired with a [`CausalContext`]. /// /// This is the fundamental building block of the DSON CRDT. It combines a `DotStore`, which holds /// the state of a specific data type, with a `CausalContext`, which tracks the set of observed /// events (dots) across replicas. This pairing allows for the implementation of delta-based /// CRDTs, where changes can be calculated and transmitted as deltas rather than entire states. #[derive(Debug, Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct CausalDotStore<DS> { /// The data-type-specific information. pub store: DS, /// The causal context, tracking observed events. pub context: CausalContext, } impl<'cs, DS> From<&'cs CausalDotStore<DS>> for (&'cs DS, &'cs CausalContext) { fn from(cds: &'cs CausalDotStore<DS>) -> Self { (&cds.store, &cds.context) } } impl<'cs, DS> From<&'cs mut CausalDotStore<DS>> for (&'cs DS, &'cs mut CausalContext) { fn from(cds: &'cs mut CausalDotStore<DS>) -> Self { (&cds.store, &mut cds.context) } } impl<DS> CausalDotStore<DS> where DS: DotStore, { /// Returns true if this is ⊥ (that is, empty). /// /// NOTE: the DSON paper does not explicitly define what a bottom is for a Causal⟨DotStore⟩, but /// it does provide that "For any 𝑋 ∈ Causal⟨DotStore⟩, 𝑋 ⊔ ⊥ = 𝑋", which constrains it to ⊥ = /// ({}, {}), since that is the only value that satisfies that equation and Equation 4 for any /// arbitrary 𝑋. pub fn is_bottom(&self) -> bool { self.store.is_bottom() && self.context.is_empty() } /// Returns a subset-CRDT derived from `self` that allows inflating state at the vector time /// `frontier` to what is in `self`. /// /// Does not include deletions not represented by `self.context - frontier` (that is, deletions of /// already-known store entries); that is left as an exercise for the caller. pub fn subset_for_inflation_from(&self, frontier: &CausalContext) -> CausalDotStore<DS> where DS: Clone + Default, { // our goal here is to produce a CRDT that contains aspects of `self.store` that are _not_ // known to `frontier`. this could be additions (the easy case), but could also be deletes. // since deletes are handled via the _absence_ of state plus the _presence_ of a dot, we // need to carefully construct both the (delta) store and (delta) context here. // // the state isn't too bad: we call subset_for_inflation_from recursively all the way // "down" the CRDT, keeping only entries whose dot is not in frontier, or that are on the // path _to_ such a dot. // // the context is trickier: we can't send all of self.context since we're excluding values // that aren't in context. more concretely, consider what happens if (0, 1) => 'a', and the // dot (0, 1) is in both self.context and frontier. we will _not_ include it in the delta // of store (since the node represented by frontier already knows it). if we nevertheless // include (0, 1) in the delta context, the semantics of that is a _deletion_ of (0, 1) // [see DotFun::join], which obviously isn't correct. instead, we produce the delta context // // self.context - frontier + delta_store.dots() // // this indicates that we a) have included anything we know that frontier does not, and b) // acknowledges that some dots are included simply by virtue of being on the path to // new/updated values. let delta_store = self.store.subset_for_inflation_from(frontier); let mut delta_context = &self.context - frontier; // NOTE: it _could_ be that nothing bad happens if we don't add in delta_store.dots(), // but that relies on the join at the other end not getting confused by the presence of a // dot in store but not in context. Feels safer to just join them here anyway. delta_store.add_dots_to(&mut delta_context); // unfortunately, this doesn't capture deletions. remember from above, the way a delete is // represented is the _presence_ of the dot that inserted a value `context`, coupled with // the _absence_ of its value in `store`. // // consider what happens if A and B are fully synchronized, and both hold, say, just an // MVReg with (1, 1) => 'x' as well as the causal context {(1,1)}. now, A deletes 'x'. this // does not generate a dot, so A's causal context is the same. when A and B next // synchronize, A does not know solely from B's causal context ({(1,1)}) that it is missing // the deletion of (1, 1) => 'x'. the store won't include (1,1) [and shouldn't], // self.context - frontier is empty, and so is delta_store.dots() [since delta_store is // bottom]. // // even if we _do_ associate a dot with a deletion (e.g., writing the ALIVE field like we // do for maps and arrays), it doesn't solve this problem. A would then generate (1,2) for // the deletion, and would realize B doesn't have (1,2), *but* it won't know that that // implies that the causal context of the delta it sends to be should therefore include // (1,1). it doesn't know the relationship between (1,2) and (1,1). // // we could keep a "graveyard" that holds // // Dot(A, B) ---(deleted)----> Dot(X, Y) // // and then here _also_ add in Dot(X, Y) for any Dot(A, B) not in frontier, but that raises // the question of how to garbage-collect said graveyard. it's also not clear what happens // for types where a deletion actually implies _multiple_ removed dots. // // for now, we leave making the context reflect deletions as an exercise for the caller. CausalDotStore { store: delta_store, context: delta_context, } } } impl<DS> Default for CausalDotStore<DS> where DS: Default, { fn default() -> Self { Self { store: Default::default(), context: Default::default(), } } } impl<DS> CausalDotStore<DS> { /// Constructs a new empty [`CausalDotStore`]. pub fn new() -> Self where DS: Default, { Self::default() } } #[cfg(any(test, feature = "arbitrary"))] use crate::dotstores::recording_sentinel::RecordingSentinel; impl<DS> CausalDotStore<DS> { /// Joins the given [`CausalDotStore`] with this one, and returns the join. /// /// This is a convenience function around [`CausalDotStore::join_with`]. pub fn join<S>(mut self, other: Self, sentinel: &mut S) -> Result<CausalDotStore<DS>, S::Error> where S: Sentinel, DS: DotStoreJoin<S> + Default, { self.consume(other, sentinel)?; Ok(CausalDotStore { store: self.store, context: self.context, }) } // variant of join intended for tests, so it is not built for performance (for example, it clones eagerly // internally to make the interface more convenient) and it exposes internal bits (like // `on_dot_change`) #[cfg(any(test, feature = "arbitrary"))] pub fn test_join<S>( &self, other: &Self, on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<CausalDotStore<DS>, S::Error> where S: Sentinel, DS: DotStoreJoin<S> + DotStoreJoin<RecordingSentinel> + Default + Clone, { let mut this = self.clone(); this.test_join_with_and_track( other.store.clone(), &other.context, on_dot_change, sentinel, )?; Ok(CausalDotStore { store: this.store, context: this.context, }) } #[cfg(any(test, feature = "arbitrary"))] pub fn test_join_with_and_track<S>( &mut self, store: DS, context: &CausalContext, on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<(), S::Error> where DS: DotStoreJoin<S> + DotStoreJoin<RecordingSentinel> + Clone + Default, S: Sentinel, { #[cfg(debug_assertions)] { // We do a dry_join here first, to ensure that dry-join // and join always result in the same set of calls being // made to Sentinel. This is an invariant that we want to always // hold, so we check it in debug builds for all test cases using this function. let mut dry_join_sentinel = RecordingSentinel::new(); let dry_result = <DS as DotStoreJoin<RecordingSentinel>>::dry_join( (&self.store, &self.context), (&store, context), &mut dry_join_sentinel, ) .expect("RecordingSentinel is infallible"); let mut full_run_sentinel = RecordingSentinel::new(); let full_result = DS::join( (self.store.clone(), &self.context), (store.clone(), context), &mut |_| {}, &mut full_run_sentinel, ) .expect("RecordingSentinel is infallible"); assert_eq!( dry_join_sentinel.changes_seen, full_run_sentinel.changes_seen ); assert_eq!(dry_result.is_bottom(), full_result.is_bottom()); } self.join_with_and_track(store, context, on_dot_change, sentinel)?; Ok(()) } /// Joins the given [`CausalDotStore`] into this one. /// /// This is a convenience function around [`CausalDotStore::join_with`]. pub fn consume<S>( &mut self, other: CausalDotStore<DS>, sentinel: &mut S, ) -> Result<(), S::Error> where DS: DotStoreJoin<S> + Default, S: Sentinel, { self.join_with(other.store, &other.context, sentinel) } /// Joins the given [`CausalDotStore`] into this one. /// /// This is a convenience function around [`CausalDotStore::join_with`]. #[cfg(any(test, feature = "arbitrary"))] pub fn test_consume(&mut self, other: CausalDotStore<DS>) where DS: DotStoreJoin<RecordingSentinel> + Clone + Default, { self.test_join_with(other.store, &other.context) } /// Joins or replaces the current [`CausalDotStore`] with the provided one. /// /// If the current value is bottom, it is replaced wholesale, bypassing the /// join. This method does not accept a sentinel as changes cannot always /// be tracked. pub fn join_or_replace_with(&mut self, store: DS, context: &CausalContext) where DS: DotStoreJoin<DummySentinel> + Default, { if self.is_bottom() { *self = CausalDotStore { store, context: context.clone(), }; } else { self.join_with(store, context, &mut DummySentinel) .expect("DummySentinel is Infallible"); } } /// Joins the given [`DotStore`]-[`CausalContext`] pair into those in `self`. /// /// Prefer this method when you need to avoid cloning the [`CausalContext`]. pub fn join_with<S>( &mut self, store: DS, context: &CausalContext, sentinel: &mut S, ) -> Result<(), S::Error> where DS: DotStoreJoin<S> + Default, S: Sentinel, { self.join_with_and_track(store, context, &mut |_| (), sentinel) } /// Joins the given [`DotStore`]-[`CausalContext`] pair into those in `self`. /// /// Prefer this method when you need to avoid cloning the [`CausalContext`]. #[cfg(any(test, feature = "arbitrary"))] pub fn test_join_with(&mut self, store: DS, context: &CausalContext) where DS: DotStoreJoin<RecordingSentinel> + Clone + Default, { self.test_join_with_and_track(store, context, &mut |_| (), &mut RecordingSentinel::new()) .expect("RecordingSentinel is infallible"); } fn join_with_and_track<S>( &mut self, store: DS, context: &CausalContext, on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<(), S::Error> where DS: DotStoreJoin<S> + Default, S: Sentinel, { let old_store = std::mem::take(&mut self.store); self.store = DS::join( (old_store, &self.context), (store, context), on_dot_change, sentinel, )?; self.context.union(context); Ok(()) } } impl<DS> CausalDotStore<DS> { /// Constructs a new [`CausalDotStore`] by applying the given function to the current store. /// /// This method keeps the causal context as-is. pub fn map_store<DS2>(self, m: impl FnOnce(DS) -> DS2) -> CausalDotStore<DS2> { CausalDotStore { store: (m)(self.store), context: self.context, } } /// Constructs a new [`CausalDotStore`] by applying the given function to the current context. /// /// This method keeps the store as-is. pub fn map_context(self, m: impl FnOnce(CausalContext) -> CausalContext) -> CausalDotStore<DS> { CausalDotStore { store: self.store, context: (m)(self.context), } } /// Calls a function with a reference to the contained store. /// /// Returns the original [`CausalDotStore`] unchanged. pub fn inspect(self, f: impl FnOnce(&DS)) -> CausalDotStore<DS> { f(&self.store); self } } /// A container for data-type specific information that stores the state of a 𝛿-based CRDT. /// /// This trait defines the common interface for all dot stores. It provides methods for querying /// the dots contained within the store, checking if the store is empty (i.e., ⊥), and creating a /// subset of the store for inflation. pub trait DotStore { /// Queries the set of event identifiers (ie, dots) currently stored in the dot store. /// /// Has a default implementation that creates an empty [`CausalContext`] and invokes /// `add_dots_to`. fn dots(&self) -> CausalContext { let mut cc = CausalContext::default(); self.add_dots_to(&mut cc); cc } /// Add the set of event identifiers (ie, dots) currently stored in the dot store to `other`. /// /// Should not compact the resulting `CausalContext`. fn add_dots_to(&self, other: &mut CausalContext); /// Returns true if this dot store is ⊥ (ie, empty). fn is_bottom(&self) -> bool; /// Returns a subset-CRDT derived from `self` that allows inflating state at the vector time /// `frontier` to what's in `self`. fn subset_for_inflation_from(&self, frontier: &CausalContext) -> Self; } /// An observed change to a dot store. #[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] pub enum DotChange { /// The given dot was added to the store. Add(Dot), /// The given dot was removed from the store. Remove(Dot), } /// The outcome of performing a dry-join. /// /// When doing a dry-join, we don't perform the join completely. /// However, we often need to know whether the join would have /// been bottom or not, had it been carried out. This single /// bit of information is significantly cheaper to calculate than /// a full join. #[derive(Debug)] pub struct DryJoinOutput { /// True if the output of the dry-join was ⊥ (bottom). is_bottom: bool, } impl DryJoinOutput { /// Create a join-result representing ⊥ pub fn bottom() -> Self { Self { is_bottom: true } } /// Update self, setting it to 'not bottom' pub fn set_is_not_bottom(&mut self) { self.is_bottom = false; } pub fn new(is_bottom: bool) -> Self { Self { is_bottom } } /// If parameter is false, set self.is_bottom to false. /// Intuitively this is a join of two dry-join results. pub fn union_with(&mut self, other: DryJoinOutput) { if !other.is_bottom { self.is_bottom = false; } } /// Like `join_with`, but returns the result instead. pub fn union(&self, other: Self) -> Self { Self { is_bottom: self.is_bottom && other.is_bottom, } } /// Returns true if this instance represents ⊥ (bottom) pub fn is_bottom(&self) -> bool { self.is_bottom } } /// A trait for dot stores that can be joined. /// /// This trait defines the `join` and `dry_join` operations, which are the core of the CRDT /// logic. The `join` operation merges the state of two dot stores, while `dry_join` simulates a /// join without actually modifying the state, which is useful for validation. pub trait DotStoreJoin<S>: DotStore { /// Computes the join (⊔) between two CausalDotStores. /// /// Note that for efficiency this does not take a [`CausalDotStore`] directly, but instead /// takes owned [`DotStore`]s and a shared reference to the [`CausalContext`] to avoid /// excessive cloning. /// /// Quoth the DSON paper: /// /// > For any 𝑋 ∈ Causal⟨DotStore⟩, 𝑋 ⊔ ⊥ = 𝑋. /// > /// > For two elements 𝑋1, 𝑋2 ∈ Causal⟨DotStore⟩ /// > we say that 𝑋1 < 𝑋2 iff ∃𝑋 ≠ ⊥ ∈ Causal⟨DotStore⟩ such that 𝑋1 ⊔ 𝑋 = 𝑋2. /// /// > An example of a ⊥ value is a merge between two elements of the Causal⟨DotFun⟩ /// > semilattice, where the domains are disjoint but all mappings are in the others causal /// > history. Consider for example a write 𝑤1 that precedes a write 𝑤2, i.e., 𝑤1 ≺𝜎 𝑤2, then /// > the dot generated by 𝑤1 is in the causal context of the delta generated by 𝑤2. By the /// > definition of join, the mapping doesn’t “survive” the join, and therefore the old value /// > (written by 𝑤1) is overwritten – it isn’t present in the range of the map after 𝑤2. /// /// The exact semantics of a DotStore's join varies depending on the concrete type used. /// /// # Observing changes /// Join (⊔) operations are commutative, i.e. 𝑋1 ⊔ 𝑋2 = 𝑋2 ⊔ 𝑋1, so the order of arguments /// ds1 and ds2 doesn't matter w.r.t. the final result. However, conventionally we interpret /// ds1 as the current state and ds2 as an incoming delta, so from the perspective of the /// sentinel, changes are applied from ds2 into ds1. The same applies to `on_dot_change`. fn join( ds1: (Self, &CausalContext), ds2: (Self, &CausalContext), on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<Self, S::Error> where Self: Sized, S: Sentinel; // YADR: 2024-10-10 Implementation of DryJoin // // In the context of ensuring the schema-conformance of deltas, we faced the challenge of // how to efficiently validate such deltas prior to merging them into the canonical root // document. // // We decided for adding a "dry" join that duplicates the join logic but doesn't change any // document state as it runs, and neglected alternatives that would require cloning the root // document, keeping an undo log, or allow a sentinel to exit after applying a subset of the // changes (further details at the end of the YADR). // // We did this to achieve minimal performance overhead (cloning), complexity (undo-log), and // surprise factor (partial application) of performing delta validations, accepting the // duplication of code between join and dry-join as well as the maintenance burden this // brings when adding or updating CRDTs. // // We think this is the right trade-off because adding new CRDTs and changing old ones is // uncommon, performance of validation is paramount given its frequency, and debugging // two nearly-identical implementations is likely to be easier than debugging a // join + undo-log combination. // Alternatives that were considered and rejected: // // * Do the regular join, but keep an undo-log. If a validation error is detected, // the delta is undone. This was deemed relatively hard to implement. // * Clone the entire document before the join, then do a regular join. This // does not perform well. Experiments show a cost of ~50ms on fast machines, for reasonably // sized documents (in the 10s of megabytes). // * Do a regular join, but allow the sentinel to veto changes. This was deemed difficult // to implement, and to define the semantics of. It would also result in partial updated, // which is generally undesirable and hard to reason about. // * Implement DryJoin and regular join using the same code. Create abstractions and // use generics as needed to achieve this. A simplified, but functional, prototype using this // approach was written. The complexity was deemed undesirable. /// Simulates a [`DotStoreJoin::join`] without constructing the output of the join. /// /// This simulation allows a sentinel to observe a join without committing its result, /// such as to validate a delta prior to joining it. /// /// Since this method does not have to construct the join output, it does not need to take /// ownership of its parameters (ie, it can be run on shared references to the dot stores). /// /// This method returns an indicator determining if the result of the real join would have /// been the bottom type. fn dry_join( ds1: (&Self, &CausalContext), ds2: (&Self, &CausalContext), sentinel: &mut S, ) -> Result<DryJoinOutput, S::Error> where Self: Sized, S: Sentinel; } /// A map from [`Dot`] to `V` whose computed dots is its keyset. /// /// Quoth the DSON paper: /// /// > A join of Causal⟨DotFun⟩ keeps values that exist in both of the mappings and merges their /// > respective values, or that exist in either one of the mappings and are “new” to the other in /// > the sense that they are not in its causal history. /// /// In practice, this means that a join of two [`DotFun`] will keep only up-to-date elements. In /// particular, if instance X1 has observed some [`Dot`] that exists in X2, but that [`Dot`] is not /// present in X1, then that [`Dot`] is _not_ preserved (as it has presumably been removed). #[derive(Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct DotFun<V> { // NOTE: the store is explicitly ordered by dot so that self-healing conflicts arising due // to out-of-order delivery of messages can be dealt with by final consumers by just taking // the last value among the conflicts, thus avoiding the need to access the dots directly. This // implicit resolution strategy works as long as the entry is only ever mutated by a single // `Identifier`, as in that case it is guaranteed that later/higher dots will override their // predecessors once all dots have eventually been observed. state: SmallVec<[(Dot, V); 1]>, } impl<V: fmt::Debug> fmt::Debug for DotFun<V> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_map().entries(self.iter()).finish() } } // manual impl because auto-derive'd `Clone` requires `V: Clone`. impl<V> Default for DotFun<V> { fn default() -> Self { Self { state: Default::default(), } } } /// An iterator over the values of a [`DotFun`]. pub struct DotFunValueIter<'df, V> { it: std::slice::Iter<'df, (Dot, V)>, } impl<'df, V> Iterator for DotFunValueIter<'df, V> { type Item = &'df V; fn next(&mut self) -> Option<Self::Item> { self.it.next().map(|(_, v)| v) } fn size_hint(&self) -> (usize, Option<usize>) { self.it.size_hint() } fn count(self) -> usize where Self: Sized, { self.it.count() } fn last(self) -> Option<Self::Item> where Self: Sized, { self.it.last().map(|(_, v)| v) } fn nth(&mut self, n: usize) -> Option<Self::Item> { self.it.nth(n).map(|(_, v)| v) } } impl<V> ExactSizeIterator for DotFunValueIter<'_, V> {} impl<V> Clone for DotFunValueIter<'_, V> { fn clone(&self) -> Self { Self { it: self.it.clone(), } } } impl<V> fmt::Debug for DotFunValueIter<'_, V> where V: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.it.fmt(f) } } impl<V> DotFun<V> { /// Constructs a [`DotFun`] with the given initial capacity. pub fn with_capacity(capacity: usize) -> Self { Self { state: SmallVec::with_capacity(capacity), } } /// Produces an iterator over the map's keys and values. pub fn iter(&self) -> impl ExactSizeIterator<Item = (Dot, &V)> { self.state.iter().map(|(k, v)| (*k, v)) } /// Produces an iterator over the map's keys. pub fn keys(&self) -> impl ExactSizeIterator<Item = Dot> + '_ { self.iter().map(|(k, _)| k) } /// Produces an iterator over the map's values. pub fn values(&self) -> DotFunValueIter<'_, V> { DotFunValueIter { it: self.state.iter(), } } /// Returns the number of keys in the map. pub fn len(&self) -> usize { self.state.len() } /// Returns true if the map is empty. pub fn is_empty(&self) -> bool { self.state.is_empty() } fn get_index(&self, dot: &Dot) -> Option<usize> { self.state .as_slice() .binary_search_by_key(dot, |(k, _)| *k) .ok() } /// Retrieves the associated value, if any, for the given [`Dot`]. pub fn get(&self, dot: &Dot) -> Option<&V> { self.get_index(dot).map(|idx| &self.state[idx].1) } /// Retrieves a mutable reference to the associated value, if any, for the given [`Dot`]. pub fn get_mut(&mut self, dot: &Dot) -> Option<&mut V> { self.get_index(dot).map(|idx| &mut self.state[idx].1) } /// Returns `true` if the given [`Dot`] has a value in this map. pub fn has(&self, dot: &Dot) -> bool { self.get_index(dot).is_some() } /// Associates the value with the given [`Dot`]. /// /// Returns the previous value if any. pub fn set(&mut self, dot: Dot, value: V) -> Option<V> { if let Some(v) = self.get_mut(&dot) { Some(std::mem::replace(v, value)) } else { let idx = self.state.partition_point(|(d, _)| *d < dot); self.state.insert(idx, (dot, value)); None } } /// Removes and returns the value associated with a [`Dot`], if the dot exists. pub fn remove(&mut self, dot: &Dot) -> Option<V> { if let Some(idx) = self.get_index(dot) { // as tempting as it may be, we shouldn't use swap_remove here as we // want to keep the list sorted Some(self.state.remove(idx).1) } else { None } } /// Retains only the values for which a predicate is true. pub fn retain(&mut self, mut f: impl FnMut(&Dot, &mut V) -> bool) { self.state.retain(|(k, v)| f(k, v)) } } impl<V> DotStore for DotFun<V> where V: PartialEq + fmt::Debug + Clone, { fn add_dots_to(&self, other: &mut CausalContext) { other.insert_dots(self.keys()); } fn is_bottom(&self) -> bool { self.is_empty() } fn subset_for_inflation_from(&self, frontier: &CausalContext) -> Self { Self { state: self .state .iter() .filter(|(dot, _)| !frontier.dot_in(*dot)) .map(|(dot, v)| (*dot, v.clone())) .collect(), } } } impl<V, S> DotStoreJoin<S> for DotFun<V> where S: ValueSentinel<V>, V: PartialEq + fmt::Debug + Clone, { /// Formally (Equation 4): /// ```text /// > (𝑚, 𝑐) ⊔ (𝑚′, 𝑐′) = /// > ( /// > {𝑑 ↦ 𝑚[𝑑] ⊔ 𝑚′ [𝑑] | 𝑑 ∈ dom 𝑚 ∩ dom 𝑚′} /// > ∪ {(𝑑, 𝑣) ∈ 𝑚 | 𝑑 ∉ 𝑐′} /// > ∪ {(𝑑, 𝑣) ∈ 𝑚′ | 𝑑 ∉ 𝑐} /// > , 𝑐 ∪ 𝑐′ /// > ) /// ``` /// /// Informally: /// - for dots in both stores, join the values /// - for dots in store 1 that haven't been observed by store 2, keep the value /// - for dots in store 2 that haven't been observed by store 1, keep the value /// - don't keep other dots /// - the resulting causal context is the union of the provided causal contexts fn join( (m1, cc1): (Self, &CausalContext), (mut m2, cc2): (Self, &CausalContext), on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<Self, S::Error> where S: Sentinel, { // NOTE! When making changes to this method, consider if corresponding // changes need to be done to ::dry_join as well! let mut res_m = Self::with_capacity(m1.len().max(m2.len())); for (dot, v1) in m1.state { if let Some(v2) = m2.remove(&dot) { // dots are assumed to be unique, so there's no need to join these as they must by // implication be identical. if v1 != v2 {
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
true
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/lib.rs
src/lib.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! # DSON: A Delta-State CRDT for JSON-like Data Structures //! //! This crate provides a Rust implementation of **DSON**, a space-efficient, //! delta-state Conflict-Free Replicated Datatype (CRDT) for JSON-like data structures. //! It is based on the research paper ["DSON: JSON CRDT Using Delta-Mutations For Document Stores"][paper] //! and inspired by the original author's [JavaScript implementation][js-impl]. //! //! The primary goal of this library is to enable robust, and efficient //! multi-writer collaboration in extremely constrained environments (high //! latency and low bandwidth; opportunistic networking). //! //! Unlike other CRDT libraries that expose a single "Document" type, DSON provides a set of //! composable primitives. This allows you to build the exact data structure you need. The most //! common top-level structure is an [`OrMap`], which can contain other CRDTs, enabling nested, //! JSON-like objects. The entire state is typically wrapped in a [`CausalDotStore`], which //! tracks the causal history. //! //! [paper]: https://dl.acm.org/doi/10.14778/3510397.3510403 //! [oppnet]: https://hal.science/hal-03405138/document "Frédéric Guidec, Yves Mahéo, Camille Noûs. Delta-State-Based Synchronization of CRDTs in Opportunistic Networks. In 2021 IEEE 46th Conference on Local Computer Networks (LCN). doi:10.1109/LCN52139.2021.9524978" //! [js-impl]: https://github.com/crdt-ibm-research/json-delta-crdt //! //! ## Core Concepts //! //! DSON provides three fundamental, composable CRDTs: //! //! - [`OrMap`]: An **Observed-Remove Map**, mapping arbitrary keys to other CRDT values. //! - [`OrArray`]: An **Observed-Remove Array**, providing a list-like structure. //! - [`MvReg`]: A **Multi-Value Register**, for storing primitive values. When //! concurrent writes occur, the register holds all conflicting values. This is //! the only CRDT in this library that can represent value conflicts. //! //! These primitives can be nested to create arbitrarily complex data structures, such as a map //! containing an array of other maps. //! //! All modifications produce a **delta**. Instead of sending the entire state after each //! change, only this small delta needs to be transmitted to other replicas. //! //! ## Observed-Remove Semantics //! //! DSON uses **Observed-Remove (OR)** semantics for its collections. This means //! an element can only be removed if its addition has been observed. If an //! element is updated concurrently with its removal, the update "wins," and the //! element is preserved. OR-semantics are intuitive, and this is often the //! desired behavior. //! Consider a collaborative shopping list: //! //! 1. **Initial State**: Both Alice and Bob see `["apples", "bananas"]`. //! 2. **Alice's Action**: Alice updates "bananas" to "blueberries". //! 3. **Bob's Action**: Concurrently, Bob removes "bananas". //! //! With OR-semantics, the final list will be `["apples", "blueberries"]`. Bob's removal //! is overridden by Alice's concurrent update because the update implies the continued //! existence of the item. //! //! DSON can be extended with special CRDTs providing different semantics //! for specific use cases though. //! //! ## Causal CRDTs and Tombstone-Free Removals //! //! DSON is a **causal** CRDT, meaning it uses causal history to resolve conflicts. //! This history is tracked in a [`CausalContext`], which contains a set of "dots"—unique //! identifiers for every operation. //! //! ### Dots //! //! A **dot** is a globally unique identifier for an operation (for example, adding or updating a //! value). It is the fundamental unit for tracking causality. //! //! A [`Dot`] is a tuple `(Identifier, Sequence)`: //! //! - **[`Identifier`]**: A unique ID for the actor (a specific application instance on a //! specific node) that performed the operation. It is composed of a `NodeId` and an //! `ApplicationId`. This structure allows multiple applications on the same machine to //! collaborate without their histories conflicting. //! - **`Sequence`**: A monotonically increasing number (effectively a Lamport timestamp) //! that is unique to that actor. //! //! When a replica makes a change, it generates a new dot. This dot is broadcast to other //! replicas along with the **delta** describing the change. //! //! The collection of all dots a replica has observed forms its [`CausalContext`]. This //! context represents the replica's knowledge of the document's history. By comparing its //! local `CausalContext` with the context from a received delta, a replica can determine //! which operations are new, which are concurrent, and which have already been seen. This //! allows DSON to merge changes correctly and guarantee convergence. //! //! A key advantage of this model is the elimination of **tombstones**. In many other //! CRDTs, when an item is deleted, a "tombstone" marker is left behind to signify //! its removal. These tombstones are never garbage-collected and can cause unbounded //! metadata growth in long-lived documents. //! //! DSON avoids this growth by tracking which operations are "live". A removal is simply the //! absence of an operation's dot from the causal context. When replicas sync, they //! can determine which items have been deleted by comparing their causal contexts, //! without needing explicit tombstone markers. This ensures that the metadata size //! remains proportional to the size of the live data, not the entire history of //! operations. //! //! ## Scope of this Crate //! //! This crate provides the core data structures and algorithms for DSON. It is //! responsible for generating deltas from mutations and merging them to ensure //! eventual consistency. It is up to you to build your document structure by //! composing the provided CRDT primitives, most commonly by wrapping an [`OrMap`] //! in a [`CausalDotStore`]. //! //! Note that this is a low-level library. You will likely want to build a //! typed abstraction layer on top of `dson` rather than use it directly in your //! application code. //! //! **It does not include any networking protocols.** //! //! You are responsible for implementing the transport layer to broadcast deltas //! to other replicas. The correctness of this library, particularly its //! **causal consistency** guarantees, relies on the transport layer delivering //! deltas in an order that respects the causal history of events. This is typically //! achieved with an anti-entropy algorithm that exchanges deltas and their //! causal metadata ([`CausalContext`]). //! //! ## Getting Started: A Simple Conflict //! //! This example demonstrates how two users (Alice and Bob) concurrently edit the same //! data, creating a conflict that DSON resolves gracefully using the transaction API. //! //! ```rust //! use dson::{ //! crdts::{mvreg::MvRegValue, snapshot::ToValue}, //! CausalDotStore, Identifier, OrMap, //! }; //! //! // 1. SETUP: TWO REPLICAS //! // Create two replicas, Alice and Bob, each with a unique ID. //! let alice_id = Identifier::new(0, 0); //! let mut alice_store = CausalDotStore::<OrMap<String>>::default(); //! //! let bob_id = Identifier::new(1, 0); //! let mut bob_store = CausalDotStore::<OrMap<String>>::default(); //! //! // 2. INITIAL STATE //! // Alice creates an initial value using the transaction API. //! let key = "document".to_string(); //! let delta_from_alice = { //! let mut tx = alice_store.transact(alice_id); //! tx.write_register(&key, MvRegValue::String("initial value".to_string())); //! tx.commit() //! }; //! //! // 3. SYNC //! // Bob receives Alice's initial change. //! bob_store.join_or_replace_with(delta_from_alice.0.store, &delta_from_alice.0.context); //! assert_eq!(alice_store, bob_store); //! //! // 4. CONCURRENT EDITS //! // Now Alice and Bob make changes without syncing. //! //! // Alice updates the value to "from Alice". //! let delta_alice_edit = { //! let mut tx = alice_store.transact(alice_id); //! tx.write_register(&key, MvRegValue::String("from Alice".to_string())); //! tx.commit() //! }; //! //! // Concurrently, Bob updates the value to "from Bob". //! let delta_bob_edit = { //! let mut tx = bob_store.transact(bob_id); //! tx.write_register(&key, MvRegValue::String("from Bob".to_string())); //! tx.commit() //! }; //! //! // 5. MERGE //! // The replicas exchange their changes. //! alice_store.join_or_replace_with(delta_bob_edit.0.store, &delta_bob_edit.0.context); //! bob_store.join_or_replace_with(delta_alice_edit.0.store, &delta_alice_edit.0.context); //! //! // After merging, both stores are identical. //! assert_eq!(alice_store, bob_store); //! //! // 6. VERIFY CONFLICT //! // The concurrent writes are preserved as a conflict in the register. //! // The transaction API exposes this through the CrdtValue enum. //! use dson::transaction::CrdtValue; //! //! let tx = alice_store.transact(alice_id); //! match tx.get(&key) { //! Some(CrdtValue::Register(reg)) => { //! // Read all concurrent values //! let values: Vec<_> = reg.values().into_iter().collect(); //! assert_eq!(values.len(), 2); //! assert!(values.contains(&&MvRegValue::String("from Alice".to_string()))); //! assert!(values.contains(&&MvRegValue::String("from Bob".to_string()))); //! } //! _ => panic!("Expected register with conflict"), //! } //! ``` //! //! For more examples of the transaction API, including nested structures and performance //! considerations, see the [`transaction`] module documentation. //! //! ## Advanced Topics //! //! ### The Extension System //! //! DSON includes an extension system that allows developers to define custom CRDTs by //! implementing the [`ExtensionType`] trait. This is for building domain-specific data //! structures that go beyond the standard JSON-like primitives. //! //! By implementing the [`ExtensionType`] trait, you define how your custom type should be //! serialized, deserialized, and merged. The system handles conflict resolution based on //! the rules you define. //! //! This can be used to implement custom data structures like counters, text objects, or //! more efficient state representation. //! //! ### Validation and Observation //! //! DSON provides a [`Sentinel`](crate::sentinel::Sentinel) trait that allows you to observe or //! validate changes as they are applied during a merge. This can be used for implementing //! authorization, logging, or triggering side effects. //! //! ## Network and Consistency //! //! DSON's delta-based approach minimizes the amount of data that needs to be transmitted //! between replicas, making it efficient for low-bandwidth or high-latency networks. //! //! However, much of the complexity of using DSON in practice lies in the correct design and //! implementation of the gossip protocol used to exchange deltas between replicas. An //! efficient gossip protocol is not trivial to implement. For guidance, refer to the //! research on [opportunistic networking (oppnet)][oppnet]. //! //! It is also important to understand that DSON's causal consistency guarantees are provided on //! a per-register basis. This means that while individual values are guaranteed to be causally //! consistent, the relationships between different values are not. This can lead to very //! unintuitive behavior. //! For example, if you have two registers, `x` and `y`, you write to `x` and then to `y`, //! another replica might see the write to `y` before the write to `x`. //! //! ## License //! //! This project is licensed under either of //! //! - Apache License, Version 2.0, ([LICENSE-APACHE](LICENSE-APACHE) or http://www.apache.org/licenses/LICENSE-2.0) //! - MIT license ([LICENSE-MIT](LICENSE-MIT) or http://opensource.org/licenses/MIT) //! //! at your option. //! //! ## Features //! //! - `json`: Enables serialization and deserialization of DSON documents to and from //! `serde_json::Value`. This feature is enabled by default. //! - `serde`: Provides `serde` support for all CRDT types. //! - `arbitrary`: Implements `quickcheck::Arbitrary` for CRDT types, useful for property-based testing. //! - `chrono`: Enables `chrono` support for `Timestamp`. This feature is enabled by default. //! - `ulid`: Enables registers to hold ulids. This feature is enabled by default. #[cfg(test)] #[macro_use(quickcheck)] extern crate quickcheck_macros; use ahash::RandomState; use std::{ fmt, hash::BuildHasher, ops::BitAnd, sync::atomic::{AtomicBool, Ordering}, }; // Use a constant seed for hashing to make performance benchmarks have less variance. pub(crate) const DETERMINISTIC_HASHER: RandomState = RandomState::with_seeds(48, 1516, 23, 42); pub mod causal_context; pub use causal_context::{ CausalContext, Dot, Identifier, MAX_APPLICATION_ID, NodeId, Priority, ROOT_APP_ID, }; mod dotstores; pub use dotstores::{ CausalDotStore, DotChange, DotFun, DotFunMap, DotFunValueIter, DotMap, DotStore, DotStoreJoin, DryJoinOutput, }; pub mod crdts; pub use crdts::{mvreg::MvReg, orarray::OrArray, ormap::OrMap}; pub mod api; /// Transaction-based API for ergonomic CRDT mutations. /// /// See [`transaction`] module documentation for details and examples. pub mod transaction; pub use transaction::Delta; #[cfg(feature = "chrono")] pub mod datetime_literal; pub mod either; #[cfg(feature = "json")] mod json; /// Macros usable for tests and initialization pub mod macros; pub mod sentinel; // re-export for the datetime-literal macro #[cfg(feature = "chrono")] pub use chrono; // for [``] auto-linking #[cfg(doc)] use crdts::TypeVariantValue; static ENABLE_DETERMINISM: AtomicBool = AtomicBool::new(false); /// Makes all data structures behave deterministically. /// /// This should only be enabled for testing, as it increases the odds of DoS /// scenarios. #[doc(hidden)] pub fn enable_determinism() { ENABLE_DETERMINISM.store(true, Ordering::Release); } /// Checks if determinism is enabled. /// /// Should be used internally and for testing. #[doc(hidden)] pub fn determinism_enabled() -> bool { ENABLE_DETERMINISM.load(Ordering::Acquire) } /// Create a random state for a hashmap. /// If `enable_determinism` has been used, this will return a deterministic /// decidedly non-random RandomState, useful in tests. #[inline] fn make_random_state() -> RandomState { if determinism_enabled() { DETERMINISTIC_HASHER } else { // Create an instance of the standard ahash random state. // This will be random, and will not be the same for any two runs. RandomState::new() } } fn create_map<K, V>() -> std::collections::HashMap<K, V, DsonRandomState> { std::collections::HashMap::with_hasher(DsonRandomState::default()) } fn create_map_with_capacity<K, V>( capacity: usize, ) -> std::collections::HashMap<K, V, DsonRandomState> { std::collections::HashMap::with_capacity_and_hasher(capacity, DsonRandomState::default()) } /// This is a small wrapper around the standard RandomState. /// This allows us to easily switch to a non-random RandomState for use in tests. #[derive(Clone)] pub struct DsonRandomState { inner: RandomState, } // Implement default, falling back on regular ahash::RandomState except // when 'enable_determinism' has been called, in which case a static // only-for-test RandomState is used. impl Default for DsonRandomState { #[inline] fn default() -> Self { Self { inner: make_random_state(), } } } // We implement BuildHasher for DsonRandomState, but all we do is delegate to // the wrapped 'inner' RandomState. // // This construct allows us to easily use a deterministic RandomState (i.e, not random :-) ), // for tests. // // Since DsonRandomState implements default, the user doesn't have to do anything more than // specialize their hashmap using DsonRandomState instead of RandomState. impl BuildHasher for DsonRandomState { type Hasher = <RandomState as BuildHasher>::Hasher; #[inline] fn build_hasher(&self) -> Self::Hasher { self.inner.build_hasher() } } /// A type that extends [`TypeVariantValue`] and friends with additional value types. /// /// If you are looking for an implementor of this trait to stick with the standard DSON/JSON types, /// use [`crdts::NoExtensionTypes`]. /// /// The compiler should guide you towards all the various other traits and types you need in order /// to satisfy this trait once you add an impl of it. /// /// In terms of mental model, think of the type that directly implements this trait as a direct /// analogue of [`TypeVariantValue`]. That is, it should generally be a struct with one `Option` /// field for each possible kind of custom value type. It needs to be a struct, not an enum, so /// that it can represent conflicts in type changes (for example, one writer sets a value to custom kind A /// and another sets it to custom kind B concurrently). [`ExtensionType::Value`] is used in /// situations where it is known that only a single kind is held. /// [`ExtensionType::coerce_to_value_ref`] is the main way in which such type conflicts are /// resolved. /// /// The sub-types ("kinds") of a custom extension type must all be CRDTs, which in turn makes the /// implementing type also a CRDT assuming it follows the directions above. This is represented by /// the requirement that both `Self` and `ExtensionType::Value` implement [`DotStore`]. /// /// Implementors of this trait are generally used wherever `<Custom>` or `<C>` appears. pub trait ExtensionType: DotStore + Default { /// Represents the kind of the underlying type without holding any data. /// /// This is the extension equivalent of [`crdts::ValueType`], and will likely be a simple /// data-less enum. type ValueKind: Copy + fmt::Debug; /// Type that holds a known, single kind of this type. /// /// This is the extension equivalent of [`crdts::Value`], and will likely be an enum where each /// variant holds one of the field types of `Self`. /// /// Since each sub-type should be a CRDT, this type should trivially implement [`DotStore`] by /// forwarding to the [`DotStore`] implementation of the contained sub-type. /// /// Since `Self` is expected to be able to hold all sub-types (potentially more than one at a /// time), this type should be trivial to turn into `Self`. type Value: fmt::Debug + Clone + PartialEq + DotStore + Into<Self>; /// Type that holds a reference to a known, single kind of this type. /// /// This is the extension equivalent of [`crdts::ValueRef`], and will likely be an enum where /// each variant holds a `&` to one of the field types of `Self` (as indicated by the /// `From<&Self::Value>` requirement). /// /// This type is generally used to represent a view into sub-tree of a DSON document. That /// sub-tree is then read using [`crdts::snapshot::ToValue`]. /// /// Since this type is required to implement `Copy` (it is supposed to just be a reference /// type), it is expected to directly implement [`Into`] for [`ExtensionType::ValueKind`] as /// opposed to going via a `&self` method. /// /// The requirement of `Into<Self::Value>` may seem odd, but serves as a replacement for /// [`Clone`]. We can't use `Clone` since `Clone` is "special" when it comes to `&` -- the /// compiler knows that when you call `Clone` on a `&T`, you want a `T` back, but it wouldn't /// be as smart for `ValueRef`. type ValueRef<'doc>: Copy + fmt::Debug + From<&'doc Self::Value> + Into<Self::Value> + crdts::snapshot::ToValue + Into<Self::ValueKind> where Self: 'doc; /// Coerces the potentially type-conflicted value in `self` into a single-typed /// [`Self::ValueRef`]. /// /// This is an inherently lossy operation -- if a type conflict exists in `self`, this has to /// pick which type should be exposed when the document is read. This is required since the /// types in [`crdts::snapshot`] cannot represent type conflicts, only value conflicts. /// /// This is the extension equivalent of [`TypeVariantValue::coerce_to_value_ref`], and will /// generally be an `if-let` chain that returns a [`Self::ValueRef`] for the "first" sub-type /// of `self` that is set. The ordering of the fields checked in the chain dictates the /// inference-precedence for coercion in type conflicts. fn coerce_to_value_ref(&self) -> Self::ValueRef<'_>; /// Gives a short name to describe a given custom value type. /// /// Called by [`crdts::Value::type_name`] and [`crdts::ValueRef::type_name`]. fn type_name(value: &Self::ValueRef<'_>) -> &'static str; /// Get the bottom value of this type fn bottom() -> Self; } // NOTE: three arguments all of the same type -- big nope to have them be regular fn args. pub struct ComputeDeletionsArg<'a> { /// Should be the causal context (ie, `.context`) of the more up to date `CausalDotStore`. pub known_dots: &'a CausalContext, /// Should be `store.dots()` of the more up to date `CausalDotStore`. pub live_dots: &'a CausalContext, /// Should be `store.dots()` of the `CausalDotStore` that may be missing deletes. pub ignorant: &'a CausalContext, } /// Returns dots that `known_dots` has deleted (by virtue of not being in `live_dots`) that /// are still present in `ignorant`. /// /// Conceptually computes `(known_dots - live_dots) & ignorant`. pub fn compute_deletions_unknown_to( ComputeDeletionsArg { known_dots, live_dots, ignorant, }: ComputeDeletionsArg, ) -> CausalContext { // conceptually, this is: // // let deletes_ever = known_dots - live_dots; // let relevant_deletes = deletes_ever & ignorant; // // however, deletes_ever ends up quite large, as it holds all deletes ever, which is // wasteful since most of those dots then go away in the following set-intersection. // we can use set theory to our advantage here[1], which states that (with \ denoting // set subtraction): // // (L \ M) ∩ R = (L ∩ R) \ (M ∩ R) // = (L ∩ R) \ M // = L ∩ (R \ M) // // with // // L = known_dots // M = live_dots // R = ignorant // // [1]: https://en.wikipedia.org/wiki/List_of_set_identities_and_relations#(L\M)_%E2%81%8E_R // // many of these are significantly cheaper to compute than the original (both in memory // and compute), especially when we take into account that intersection and subtraction // are both O(left operand size). in particular, since ∩ is commutative, we can compute: let only_in_ignorant = ignorant - live_dots; only_in_ignorant.bitand(known_dots) // the first part will be O(.store.dots()), and should result in a very small set. the // second part iterates only over that small set, which should be cheap. at no point do // we materialize a big set. its worth noting that all the sets involved here _should_ // already be fully compacted, but if that weren't the case we'd want compacted sets to // be on the left-hand side. }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts.rs
src/crdts.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! # Composable CRDTs for JSON-like Data //! //! This module provides a set of composable, conflict-free replicated data types (CRDTs) //! that can be used to build complex, JSON-like data structures. These CRDTs are the //! building blocks of the DSON library and are designed to be nested //! to create arbitrarily complex documents. //! //! ## Core CRDTs //! //! The fundamental CRDTs provided are: //! //! - **[`OrMap`]**: An **Observed-Remove Map**, which maps arbitrary keys to other CRDT //! values. It allows for the creation of nested objects. //! //! - **[`OrArray`]**: An **Observed-Remove Array**, which provides a list-like structure //! that can hold other CRDTs. //! //! - **[`MvReg`]**: A **Multi-Value Register**, used for storing primitive values. When //! concurrent writes occur, the register holds all conflicting values, allowing the //! application to resolve them. //! //! ## Type-Safe Composition and Extensibility //! //! The CRDTs in this module are designed to be composable. The //! [`TypeVariantValue`] can hold any of the core CRDTs, as well as custom types //! defined through the [`ExtensionType`] trait. //! //! ### Type Conflicts //! //! [`TypeVariantValue`]s can also represent **type conflicts**. //! If one replica updates a field to be a map while another concurrently updates it to //! be an array, the [`TypeVariantValue`] will hold both the map and the array. //! This preserves all concurrent updates. use self::{mvreg::MvRegValue, orarray::Uid, snapshot::ToValue}; use crate::{ CausalContext, DotStoreJoin, ExtensionType, MvReg, OrArray, OrMap, dotstores::{DotChange, DotStore, DryJoinOutput}, either::Either, sentinel::{KeySentinel, Sentinel, TypeSentinel, ValueSentinel, Visit}, }; use std::{fmt, hash::Hash}; /* * YADR: 2024-05-06 Removal of `.alive` tracking * * In the context of DSON's ability to represent empty collections (maps and arrays), we faced * a question of whether to accept the significant number of extra updates supporting such * collections cause to _all_ map and array operations due to the requisite write of the `.alive` * field. * * We decided for removing the ability to represent empty collections by removing all use of the * `.alive` field, and neglected keeping the DSON implementation in line with the original paper * (which does support such collections). * * We did this to achieve (significantly) smaller deltas when updating fields in deeply nested * documents, specifically by avoiding the extra `DotFun`s that need to be transmitted for the * `.alive` of each nesting level, accepting the inability to distinguish between an unset * array/map and a set-but-empty one. * * We think this is the right trade-off because in practice we suspect most users of this crate * will use some kind of schema mechanism (even if it's just `serde_json`) that will be able to * interpret missing collection types as empty ones. * * For some added context, the original OrArray and OrMap implementations carry an `.alive` field * to distinguish an empty array from an undefined array. This field must be updated on every * insert/apply to maintain observe-removed semantics; if `.alive` wasn't written to, then a * concurrent unsetting of the array would lead to `.alive` being unset, which would in turn lead * to the array being in an inconsistent state (has elements but not `.alive`). Unfortunately, that * extra update for each OrArray or OrMap operation requires sending extra dots and bools with * _every_ update, one for each level of nesting, which adds significant overhead to even trivial * updates if they are deeply nested in a document. * * There was a brief discussion about the need to update this field with the original DSON paper * authors over email on 2023-08-25; they wrote: * * > This is intentional so the alive fields exists passed a concurrent delete. The field * > is a MVREG, so if an inner insert is concurrent with a delete and we don't update the * > alive field, then it gets deleted. If we then delete the value they we may delete the * > array rather than leaving it as empty. There may be an optimization that allows for * > forgoing this. * * We have not found such an optimization, and so have decided to opt for the weakened semantics * instead. * * It's worth noting that, prior to its removal, this crate's code had some bug fixes for the * handling of `.alive` compared to the research code published by the original authors. These are * still represented by NOTE comments at the time of writing, but interested parties may want to * inspect the commit prior to this YADR's introduction to also see the relevant code. */ pub mod mvreg; pub mod orarray; pub mod ormap; pub mod snapshot; // TODO: should we also provide more handy register types like counters? #[cfg(any(test, feature = "arbitrary"))] mod test_util; /// Indicator that only the basic DSON types should be supported. /// /// For use as the type value of the `<Custom>` type parameter to many of DSON's types. #[derive(Debug, Default, Clone, Copy, Hash, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct NoExtensionTypes; /// Always-uninhabited instance of [`NoExtensionTypes`]. #[derive(Debug, Clone, Copy, Hash, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] // TODO: potentially replace with ! when https://github.com/rust-lang/rust/issues/35121 lands. pub enum NoExtensionTypesType {} #[derive(Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[cfg_attr( feature = "serde", serde(bound = " Custom: ::serde::Serialize, for<'dea> Custom: ::serde::Deserialize<'dea>, <Custom as ExtensionType>::Value: ::serde::Serialize, for<'deb> <Custom as ExtensionType>::Value: ::serde::Deserialize<'deb>, ") )] pub enum Value<Custom> where Custom: ExtensionType, { Map(OrMap<String, Custom>), Array(OrArray<Custom>), Register(MvReg), Custom(<Custom as ExtensionType>::Value), } impl<C> fmt::Debug for Value<C> where C: fmt::Debug + ExtensionType, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Value::Map(m) => f.debug_tuple("Value::Map").field(m).finish(), Value::Array(a) => f.debug_tuple("Value::Array").field(a).finish(), Value::Register(r) => f.debug_tuple("Value::Register").field(r).finish(), Value::Custom(c) => f.debug_tuple("Value::Custom").field(c).finish(), } } } impl<C> Value<C> where C: ExtensionType, { fn is_bottom(&self) -> bool { match self { Value::Map(m) => m.is_bottom(), Value::Array(a) => a.is_bottom(), Value::Register(r) => r.is_bottom(), Value::Custom(c) => c.is_bottom(), } } #[cfg(any(test, feature = "arbitrary"))] fn dots(&self) -> CausalContext { match self { Value::Map(m) => m.dots(), Value::Array(a) => a.dots(), Value::Register(r) => r.dots(), Value::Custom(c) => c.dots(), } } pub fn type_name(&self) -> &'static str { match self { Self::Map(_) => "Map", Self::Array(_) => "Array", Self::Register(_) => "Register", Self::Custom(c) => C::type_name(&C::ValueRef::from(c)), } } } pub enum ValueRef<'a, Custom> where Custom: ExtensionType, Custom::ValueRef<'a>: Copy, { Map(&'a OrMap<String, Custom>), Array(&'a OrArray<Custom>), Register(&'a MvReg), Custom(Custom::ValueRef<'a>), } // NOTE: the Clone, Copy, PartialEq, and Debug impls must be manual (ie, they can't be // derived) so we get the right bounds; ref https://github.com/rust-lang/rust/issues/26925. impl<C> Clone for ValueRef<'_, C> where C: ExtensionType, { fn clone(&self) -> Self { *self } } impl<C> Copy for ValueRef<'_, C> where C: ExtensionType {} impl<C> PartialEq for ValueRef<'_, C> where C: ExtensionType + PartialEq, for<'doc> C::ValueRef<'doc>: PartialEq, { fn eq(&self, other: &Self) -> bool { match (*self, *other) { (ValueRef::Map(m1), ValueRef::Map(m2)) => m1.eq(m2), (ValueRef::Array(a1), ValueRef::Array(a2)) => a1.eq(a2), (ValueRef::Register(r1), ValueRef::Register(r2)) => r1.eq(r2), (ValueRef::Custom(c1), ValueRef::Custom(c2)) => c1.eq(&c2), _ => false, } } } macro_rules! impl_partial_eq { ({$($t:ty),+}) => { $(impl_partial_eq!($t);)+ }; ($t:ty) => { impl<C> PartialEq<$t> for ValueRef<'_, C> where C: ExtensionType, { fn eq(&self, other: &$t) -> bool { matches!(*self, ValueRef::Register(r1) if r1 == other) } } }; } impl_partial_eq!({[u8], &[u8], str, &str, bool, f64, u64, i64}); // i32 because it's the "default" inference integer type impl_partial_eq!(i32); // byte literals impl<C, const N: usize> PartialEq<&[u8; N]> for ValueRef<'_, C> where C: ExtensionType, { fn eq(&self, other: &&[u8; N]) -> bool { matches!(*self, ValueRef::Register(r1) if r1 == other) } } impl<C> fmt::Debug for ValueRef<'_, C> where C: fmt::Debug + ExtensionType, for<'a> C::ValueRef<'a>: fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { ValueRef::Map(m) => f.debug_tuple("ValueRef::Map").field(m).finish(), ValueRef::Array(a) => f.debug_tuple("ValueRef::Array").field(a).finish(), ValueRef::Register(r) => f.debug_tuple("ValueRef::Register").field(r).finish(), ValueRef::Custom(c) => f.debug_tuple("ValueRef::Custom").field(c).finish(), } } } impl<C> ValueRef<'_, C> where C: ExtensionType, { pub fn type_name(&self) -> &'static str { match self { Self::Map(_) => "Map", Self::Array(_) => "Array", Self::Register(_) => "Register", Self::Custom(c) => C::type_name(c), } } } impl<C> From<ValueRef<'_, C>> for Value<C> where C: ExtensionType + Clone, { fn from(val: ValueRef<'_, C>) -> Self { match val { ValueRef::Map(m) => Value::Map(m.clone()), ValueRef::Array(a) => Value::Array(a.clone()), ValueRef::Register(r) => Value::Register(r.clone()), ValueRef::Custom(c) => Value::Custom(c.into()), } } } impl<'a, C> From<&'a Value<C>> for ValueRef<'a, C> where C: ExtensionType, { fn from(val: &'a Value<C>) -> Self { match val { Value::Map(m) => ValueRef::Map(m), Value::Array(a) => ValueRef::Array(a), Value::Register(r) => ValueRef::Register(r), Value::Custom(c) => ValueRef::Custom(c.into()), } } } impl<'a, C> From<&'a OrMap<String, C>> for ValueRef<'a, C> where C: ExtensionType, { fn from(value: &'a OrMap<String, C>) -> Self { Self::Map(value) } } impl<'a, C> From<&'a OrArray<C>> for ValueRef<'a, C> where C: ExtensionType, { fn from(value: &'a OrArray<C>) -> Self { Self::Array(value) } } impl<'a, C> From<&'a MvReg> for ValueRef<'a, C> where C: ExtensionType, { fn from(value: &'a MvReg) -> Self { Self::Register(value) } } // we can't impl From<C> for ValueRef<C> since those are overlap hazards with the above impls, so // we instead provide a convenience variant for use with customs in place of `ValueRef::from`. impl<'a, C> ValueRef<'a, C> where C: ExtensionType, { pub fn custom(value: impl Into<C::ValueRef<'a>>) -> Self { Self::Custom(value.into()) } } impl<C> From<OrMap<String, C>> for Value<C> where C: ExtensionType, { fn from(value: OrMap<String, C>) -> Self { Self::Map(value) } } impl<C> From<OrArray<C>> for Value<C> where C: ExtensionType, { fn from(value: OrArray<C>) -> Self { Self::Array(value) } } impl<C> From<MvReg> for Value<C> where C: ExtensionType, { fn from(value: MvReg) -> Self { Self::Register(value) } } // ditto as for ValueRef::custom impl<C> Value<C> where C: ExtensionType, { pub fn custom(value: impl Into<C::Value>) -> Self { Self::Custom(value.into()) } } #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug)] pub enum ValueType<C> { Map, Array, Register, Custom(C), } impl<C> From<ValueRef<'_, C>> for ValueType<C::ValueKind> where C: ExtensionType, { fn from(value: ValueRef<'_, C>) -> Self { match value { ValueRef::Map(_) => Self::Map, ValueRef::Array(_) => Self::Array, ValueRef::Register(_) => Self::Register, ValueRef::Custom(c) => Self::Custom(c.into()), } } } /// A container for a value that can be one of several types. /// /// # Concurrent Mutations and Type Conflicts /// /// It is possible for different actors to concurrently modify the same piece of /// data. This can lead to situations where an actor updates a given value as a map, /// and another one updates it concurrently as an array. DSON /// is designed to represent these conflicts. /// /// `TypeVariantValue` is a struct rather than an enum precisely to manage these type conflicts. /// If it were an enum, one variant would have to be chosen over the other, potentially /// losing the concurrent update. Instead, `TypeVariantValue` can hold multiple types /// simultaneously. For instance, if one actor writes a map and another concurrently writes an /// array to the same logical field, the resulting `TypeVariantValue` will contain both the map /// and the array. /// /// This approach preserves all concurrent writes -- both on the value as well as on the type /// level -- allowing the application layer to decide how to resolve the conflict. #[derive(Clone, Default, PartialEq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct TypeVariantValue<Custom> { // NOTE: We decided for the OrMap to be not generic over the key type. If you require an // `OrMap` with another key type, consider using implementing an [`ExtensionType`]. pub map: OrMap<String, Custom>, pub array: OrArray<Custom>, pub reg: MvReg, #[cfg_attr(feature = "serde", serde(flatten))] pub custom: Custom, } impl<C> fmt::Debug for TypeVariantValue<C> where C: ExtensionType + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match ( self.map.is_bottom(), self.array.is_bottom(), self.reg.is_bottom(), self.custom.is_bottom(), ) { (false, true, true, true) => self.map.fmt(f), (true, false, true, true) => self.array.fmt(f), (true, true, false, true) => self.reg.fmt(f), (true, true, true, false) => self.custom.fmt(f), _ => { let mut w = f.debug_struct("TypeVariantValue"); if !self.map.is_bottom() { w.field("map", &self.map); } if !self.array.is_bottom() { w.field("array", &self.array); } if !self.reg.is_bottom() { w.field("reg", &self.reg); } if !self.custom.is_bottom() { w.field("custom", &self.custom); } w.finish_non_exhaustive() } } } } impl<C> TypeVariantValue<C> { /// Coerces the potentially type-conflicted value in `self` into a single-typed /// [`ValueRef`]. /// /// NOTE: as written in the trait's [`ExtensionType::coerce_to_value_ref`] /// documentation, this is a lossy operation: if the underlying value is /// type conflicted, an arbitrary but deterministic variant is chosen pub fn coerce_to_value_ref(&self) -> ValueRef<'_, C> where C: ExtensionType, { if !self.custom.is_bottom() { ValueRef::Custom(self.custom.coerce_to_value_ref()) } else if !self.map.is_bottom() { ValueRef::Map(&self.map) } else if !self.array.is_bottom() { ValueRef::Array(&self.array) } else if !self.reg.is_bottom() { ValueRef::Register(&self.reg) } else { // TODO: how is this possible? empty InnerMaps should not be left in the // map. it can perhaps happen if someone tries to read out of a CRDT that // represents a removal CRDT. panic!("attempt to coerce empty TypeVariantValue to ValueRef"); } } } impl<C> DotStore for TypeVariantValue<C> where C: ExtensionType, { fn add_dots_to(&self, other: &mut CausalContext) { self.map.add_dots_to(other); self.array.add_dots_to(other); self.reg.add_dots_to(other); self.custom.add_dots_to(other); } fn is_bottom(&self) -> bool { self.map.is_bottom() && self.array.is_bottom() && self.reg.is_bottom() && self.custom.is_bottom() } fn subset_for_inflation_from(&self, frontier: &CausalContext) -> Self { Self { map: self.map.subset_for_inflation_from(frontier), array: self.array.subset_for_inflation_from(frontier), reg: self.reg.subset_for_inflation_from(frontier), custom: self.custom.subset_for_inflation_from(frontier), } } } impl<C, S> DotStoreJoin<S> for TypeVariantValue<C> where S: Visit<String> + Visit<Uid> + KeySentinel + ValueSentinel<MvRegValue> + TypeSentinel<C::ValueKind>, C: ExtensionType + DotStoreJoin<S> + Clone + PartialEq + fmt::Debug, { fn join( ds1: (Self, &CausalContext), ds2: (Self, &CausalContext), on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<Self, S::Error> where Self: Sized, S: Sentinel, { // NOTE! When making changes to this method, consider if corresponding // changes need to be done to ::dry_join as well! let (m1, cc1) = ds1; let (m2, cc2) = ds2; let types_before = [ !m1.map.is_bottom(), !m1.array.is_bottom(), !m1.reg.is_bottom(), ]; let map = OrMap::join((m1.map, cc1), (m2.map, cc2), on_dot_change, sentinel)?; let array = OrArray::join((m1.array, cc1), (m2.array, cc2), on_dot_change, sentinel)?; let reg = MvReg::join((m1.reg, cc1), (m2.reg, cc2), on_dot_change, sentinel)?; let custom = C::join((m1.custom, cc1), (m2.custom, cc2), on_dot_change, sentinel)?; let types_after = [!map.is_bottom(), !array.is_bottom(), !reg.is_bottom()]; // Normally we either go from no type to 1 type (join bottom with non-bottom) or 1 type to 1 // type. But in case of conflicts we may end up with multiple types being set or unset. // NOTE: The following loop does not call set_type when transiting to Custom(C) and does not // call unset_type when transiting away from Custom(C). Those calls are emitted from the // C::join further up. for (ty, (before, after)) in [ValueType::Map, ValueType::Array, ValueType::Register] .into_iter() .zip(types_before.into_iter().zip(types_after)) { match (before, after) { (true, false) => sentinel.unset_type(ty)?, (false, true) => sentinel.set_type(ty)?, _ => (), } } Ok(TypeVariantValue { map, array, reg, custom, }) } fn dry_join( ds1: (&Self, &CausalContext), ds2: (&Self, &CausalContext), sentinel: &mut S, ) -> Result<DryJoinOutput, S::Error> where Self: Sized, S: Sentinel, { let (m1, cc1) = ds1; let (m2, cc2) = ds2; let types_before = [ !m1.map.is_bottom(), !m1.array.is_bottom(), !m1.reg.is_bottom(), ]; let map = OrMap::dry_join((&m1.map, cc1), (&m2.map, cc2), sentinel)?; let array = OrArray::dry_join((&m1.array, cc1), (&m2.array, cc2), sentinel)?; let reg = MvReg::dry_join((&m1.reg, cc1), (&m2.reg, cc2), sentinel)?; let custom = C::dry_join((&m1.custom, cc1), (&m2.custom, cc2), sentinel)?; let types_after = [!map.is_bottom(), !array.is_bottom(), !reg.is_bottom()]; // Normally we either go from no type to 1 type (join bottom with non-bottom) or 1 type to 1 // type. But in case of conflicts we may end up with multiple types being set or unset. // NOTE: The following loop does not call set_type when transiting to Custom(C) and does not // call unset_type when transiting away from Custom(C). Those calls are emitted from the // C::join further up. for (ty, (before, after)) in [ValueType::Map, ValueType::Array, ValueType::Register] .into_iter() .zip(types_before.into_iter().zip(types_after)) { match (before, after) { (true, false) => sentinel.unset_type(ty)?, (false, true) => sentinel.set_type(ty)?, _ => (), } } let result_is_non_bottom = types_after.iter().any(|x| *x) || !custom.is_bottom(); Ok(DryJoinOutput::new(!result_is_non_bottom)) } } impl<C> From<Value<C>> for TypeVariantValue<C> where C: ExtensionType, { fn from(value: Value<C>) -> Self { match value { Value::Map(m) => TypeVariantValue { map: m, array: Default::default(), reg: Default::default(), custom: Default::default(), }, Value::Array(a) => TypeVariantValue { map: Default::default(), array: a, reg: Default::default(), custom: Default::default(), }, Value::Register(r) => TypeVariantValue { map: Default::default(), array: Default::default(), reg: r, custom: Default::default(), }, Value::Custom(c) => TypeVariantValue { map: Default::default(), array: Default::default(), reg: Default::default(), custom: c.into(), }, } } } impl DotStore for NoExtensionTypes { fn add_dots_to(&self, _: &mut CausalContext) {} fn is_bottom(&self) -> bool { true } fn subset_for_inflation_from(&self, _: &CausalContext) -> Self { Self } } impl DotStore for NoExtensionTypesType { fn add_dots_to(&self, _: &mut CausalContext) { match *self {} } fn is_bottom(&self) -> bool { match *self {} } fn subset_for_inflation_from(&self, _: &CausalContext) -> Self { match *self {} } } impl DotStore for () { fn add_dots_to(&self, _: &mut CausalContext) {} fn is_bottom(&self) -> bool { true } fn subset_for_inflation_from(&self, _: &CausalContext) -> Self {} } impl<S> DotStoreJoin<S> for NoExtensionTypes { fn join( _: (Self, &CausalContext), _: (Self, &CausalContext), _: &mut dyn FnMut(DotChange), _: &mut S, ) -> Result<Self, <S>::Error> where Self: Sized, S: Sentinel, { // NOTE! When making changes to this method, consider if corresponding // changes need to be done to ::dry_join as well! Ok(Self) } fn dry_join( _ds1: (&Self, &CausalContext), _ds2: (&Self, &CausalContext), _sentinel: &mut S, ) -> Result<DryJoinOutput, S::Error> where Self: Sized, S: Sentinel, { Ok(DryJoinOutput::bottom()) } } #[cfg(feature = "json")] impl From<&'_ NoExtensionTypes> for serde_json::Value { fn from(_: &'_ NoExtensionTypes) -> Self { serde_json::Value::Null } } #[cfg(feature = "json")] impl From<NoExtensionTypes> for serde_json::Value { fn from(_: NoExtensionTypes) -> Self { serde_json::Value::Null } } impl From<NoExtensionTypesType> for NoExtensionTypes { fn from(v: NoExtensionTypesType) -> Self { match v {} } } impl From<&Self> for NoExtensionTypesType { fn from(v: &Self) -> Self { match *v {} } } impl From<NoExtensionTypes> for () { fn from(_: NoExtensionTypes) -> Self { Self::default() } } impl ToValue for NoExtensionTypesType { type Values = (); type Value = (); type LeafValue = (); fn values(self) -> Self::Values { match self {} } fn value(self) -> Result<Self::Value, Box<snapshot::SingleValueError<Self::LeafValue>>> { match self {} } } impl ExtensionType for NoExtensionTypes { type ValueKind = NoExtensionTypesType; type Value = NoExtensionTypesType; type ValueRef<'doc> = NoExtensionTypesType; fn coerce_to_value_ref(&self) -> Self::ValueRef<'_> { panic!("NoExtensionTypes is always bottom, and cannot be coerced into a ValueRef"); } fn type_name(value: &Self::ValueRef<'_>) -> &'static str { match *value {} } fn bottom() -> Self { Self } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/json.rs
src/json.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! JSON representation //! //! Value-level conflicts, which can only occur in [`MvReg`s](crate::crdts::MvReg), are //! represented as a JSON array of the conflicting values in an **arbitrary but deterministic order**. //! //! # Examples //! //! ## A simple document without conflicts //! //! ```json //! { //! "name": "John Doe", //! "age": 43, //! "phones": [ //! "+44 1234567", //! "+44 2345678" //! ] //! } //! ``` //! //! ## A document with a value conflict //! //! If two users concurrently edit the "name" field, the conflict is preserved. //! //! ```json //! { //! "name": ["John Doe", "Jon Dough"], //! "age": 43, //! "phones": [ //! "+44 1234567", //! "+44 2345678" //! ] //! } //! ``` use crate::{ ExtensionType, api::timestamp, crdts::{ ValueRef, mvreg::MvRegValue, snapshot::{self, ToValue}, }, }; use serde_json::Value; use std::{fmt, hash::Hash}; /// Converts a [`MvRegValue`] to a [`serde_json::Value`]. impl From<MvRegValue> for Value { fn from(val: MvRegValue) -> Self { match val { MvRegValue::Bytes(v) => { base64::Engine::encode(&base64::engine::general_purpose::STANDARD, v).into() } MvRegValue::String(v) => v.into(), MvRegValue::Float(v) => v.into(), MvRegValue::Double(v) => v.into(), MvRegValue::U64(v) => v.into(), MvRegValue::I64(v) => v.into(), MvRegValue::Bool(v) => v.into(), MvRegValue::Timestamp(v) => timestamp_to_json(v), #[cfg(feature = "ulid")] MvRegValue::Ulid(v) => serde_json::to_value(v).expect("ULID is JSON serializable"), } } } #[cfg(feature = "chrono")] fn timestamp_to_json(v: timestamp::Timestamp) -> Value { v.into() } #[cfg(not(feature = "chrono"))] fn timestamp_to_json(v: timestamp::Timestamp) -> Value { v.as_millis().into() } /// Converts a [`snapshot::AllValues`] to a [`serde_json::Value`]. impl<C> From<snapshot::AllValues<'_, C>> for Value where C: ToValue, serde_json::Value: From<C::Values>, { fn from(value: snapshot::AllValues<'_, C>) -> Self { match value { snapshot::AllValues::Register(reg) => reg.into(), snapshot::AllValues::Map(map) => map.into(), snapshot::AllValues::Array(arr) => arr.into(), snapshot::AllValues::Custom(c) => c.into(), } } } /// Converts a [`ValueRef`] to a `serde_json::Value`. impl<C> From<ValueRef<'_, C>> for Value where C: ExtensionType, for<'doc> serde_json::Value: From<<C::ValueRef<'doc> as ToValue>::Values>, { fn from(value: ValueRef<'_, C>) -> Self { value.values().into() } } /// Converts a [`snapshot::OrMap`] to a `serde_json::Value`. impl<K, V> From<snapshot::OrMap<'_, K, V>> for serde_json::Value where K: Hash + Eq + fmt::Display, V: Into<serde_json::Value>, { fn from(value: snapshot::OrMap<'_, K, V>) -> Self { let obj = value .map .into_iter() .map(|(k, v)| (k.to_string(), v.into())) .collect(); serde_json::Value::Object(obj) } } /// Converts a [`snapshot::OrArray`] to a `serde_json::Value`. impl<V> From<snapshot::OrArray<V>> for serde_json::Value where V: Into<serde_json::Value>, { fn from(value: snapshot::OrArray<V>) -> Self { // NOTE: items are sorted by the dot, which we need for handling // single-writer (temporary) conflicts client-side. let arr = value.list.into_iter().map(Into::into).collect(); serde_json::Value::Array(arr) } } /// Converts a [`snapshot::MvReg`] to a `serde_json::Value`. /// /// * If the register is empty, it returns `Null`. /// * If the register has one value, it returns that value. /// * If the register has multiple values, it returns an array of those values. impl From<snapshot::MvReg<'_>> for serde_json::Value { fn from(reg: snapshot::MvReg<'_>) -> Self { match reg.values.len() { 0 => serde_json::Value::Null, 1 => (reg.get(0).expect("len > 0")).clone().into(), _ => serde_json::Value::Array(reg.into_iter().map(|x| (*x).clone().into()).collect()), } } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/sentinel.rs
src/sentinel.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! Observe and validate changes to a CRDT. //! //! Sentinels are types that can be used to inspect the changes being applied to a CRDT. They are //! useful for validating that the changes conform to a schema, or simply to observe the changes //! for any other purpose (for example, logging, metrics, etc). //! //! The main entry point for this module is the [`Sentinel`] trait, which is composed of more //! specialized traits that can be implemented to observe different kinds of changes. //! //! For a testing-oriented example, see the `recording_sentinel` module. use crate::crdts::ValueType; use std::convert::Infallible; /// Observes and optionally stops a change being applied to a CRDT. /// /// This is the base trait that all Sentinels should implement. Different Sentinels may observe /// different changes, and different data structures may produce different changes, so we've split /// the actual behaviour into several specialized traits. /// /// This trait should normally be paired with [`Visit`] so that the Sentinel can keep track of /// which node is currently being modified in the document tree. /// /// If Error = Infallible, the Sentinel is referred to as an Observer. If it can produce an error, it /// may be referred to as a Validator. pub trait Sentinel { type Error; } /// Observe when a key is added or removed from the document tree. /// /// This is how Sentinels can track when container nodes are created. Register values changes can /// be tracked via the [`ValueSentinel`] trait. pub trait KeySentinel: Sentinel { /// Observe and validate the creation of a new entry under the current path. /// /// This method may be called _after_ [`ValueSentinel::set`] for a given entry. fn create_key(&mut self) -> Result<(), Self::Error> { Ok(()) } /// Observe and validate the deletion of the entry under the current path. fn delete_key(&mut self) -> Result<(), Self::Error> { Ok(()) } } /// Observes when a value's type changes. /// /// This is useful for tracking changes involving container values - particularly, when /// transitioning a value from a container to register or vice-versa, or when creating empty /// containers. The first case is because updates that switch to/from register values only produce /// one [`ValueSentinel`] event, as there is no set/unset counterpart for the container value. /// This leads to incorrectly interpreting the change as an addition or removal. The second case /// is because no [`ValueSentinel`] events are produced at all, which leaves the container type /// ambiguous. In either case these type change events are the only way to get the complete picture. #[expect(unused_variables)] pub trait TypeSentinel<Custom>: Sentinel { /// Observe and validate setting a type at the current path. fn set_type(&mut self, value_type: ValueType<Custom>) -> Result<(), Self::Error> { Ok(()) } /// Observe and validate unsetting a type at the current path. fn unset_type(&mut self, value_type: ValueType<Custom>) -> Result<(), Self::Error> { Ok(()) } } /// Observe when values are set or unset at the current path. /// /// Updates are represented as a value unset and another one set. There are no ordering /// guarantees between the calls. #[expect(unused_variables)] pub trait ValueSentinel<V>: Sentinel { /// Observe and validate setting a new value under the current path. fn set(&mut self, value: &V) -> Result<(), Self::Error> { Ok(()) } /// Observe and validate unsetting the value under the current path. fn unset(&mut self, value: V) -> Result<(), Self::Error> { Ok(()) } } /// Enables a Sentinel to keep track of document traversal. /// /// During a document mutation (typically via [`DotStoreJoin::join`](crate::DotStoreJoin)), the /// document tree is traversed in a depth-first manner and each map field or array element visited /// is reported via this interface, so that the Sentinel can update its internal pointer. /// /// Typically, you want to implement this for [`String`] (to visit [`OrMap`](crate::OrMap) values) /// and [`Uid`](crate::crdts::orarray::Uid) (to visit [`OrArray`](crate::OrArray)), for example. /// /// NOTE: any nodes in the document tree may be visited, regardless of whether they contain a change. /// Additionally, nodes that are visited may not exist in the final tree. #[expect(unused_variables)] pub trait Visit<K>: Sentinel { /// Descend into a map field or array element. fn enter(&mut self, key: &K) -> Result<(), Self::Error> { Ok(()) } /// Backtrack to the parent container. /// /// NOTE: may not be called if the Sentinel produces an Err. fn exit(&mut self) -> Result<(), Self::Error> { Ok(()) } } /// A Sentinel that does nothing. /// /// This is useful when the join doesn't need any introspection. Using it helps the compiler /// optimise some code away. pub struct DummySentinel; impl Sentinel for DummySentinel { type Error = Infallible; } impl KeySentinel for DummySentinel {} impl<C> TypeSentinel<C> for DummySentinel {} impl<K> Visit<K> for DummySentinel {} impl<V> ValueSentinel<V> for DummySentinel {} #[cfg(test)] pub(crate) mod test { use super::*; use std::{collections::BTreeMap, fmt}; /// A Sentinel that always rejects changes. pub struct NoChangeValidator; impl Sentinel for NoChangeValidator { type Error = (); } impl<K> Visit<K> for NoChangeValidator {} impl KeySentinel for NoChangeValidator { fn create_key(&mut self) -> Result<(), Self::Error> { Err(()) } fn delete_key(&mut self) -> Result<(), Self::Error> { Err(()) } } impl<C> TypeSentinel<C> for NoChangeValidator { fn set_type(&mut self, _value_type: ValueType<C>) -> Result<(), Self::Error> { Err(()) } fn unset_type(&mut self, _value_type: ValueType<C>) -> Result<(), Self::Error> { Err(()) } } impl<V> ValueSentinel<V> for NoChangeValidator { fn set(&mut self, _value: &V) -> Result<(), Self::Error> { Err(()) } fn unset(&mut self, _value: V) -> Result<(), Self::Error> { Err(()) } } /// A Sentinel that counts keys added or removed and rejects other changes. #[derive(Debug, Default)] pub struct KeyCountingValidator { pub added: usize, pub removed: usize, } impl Sentinel for KeyCountingValidator { type Error = (); } impl<K> Visit<K> for KeyCountingValidator {} impl KeySentinel for KeyCountingValidator { fn create_key(&mut self) -> Result<(), Self::Error> { self.added += 1; Ok(()) } fn delete_key(&mut self) -> Result<(), Self::Error> { self.removed += 1; Ok(()) } } impl<C> TypeSentinel<C> for KeyCountingValidator { fn set_type(&mut self, _value_type: crate::crdts::ValueType<C>) -> Result<(), Self::Error> { Err(()) } fn unset_type( &mut self, _value_type: crate::crdts::ValueType<C>, ) -> Result<(), Self::Error> { Err(()) } } impl<V> ValueSentinel<V> for KeyCountingValidator {} /// A Sentinel that counts changes to values and rejects other changes. /// /// Setting `permissive` to true disables erroring on key and type changes. #[derive(Debug)] pub struct ValueCountingValidator<V> { pub added: BTreeMap<V, usize>, pub removed: BTreeMap<V, usize>, path: Vec<String>, permissive: bool, } impl<V> Default for ValueCountingValidator<V> { fn default() -> Self { Self { added: Default::default(), removed: Default::default(), path: Default::default(), permissive: false, } } } impl<V> ValueCountingValidator<V> { pub fn new(permissive: bool) -> Self { Self { permissive, ..Default::default() } } } impl<V> Sentinel for ValueCountingValidator<V> { type Error = String; } impl<K, V> Visit<K> for ValueCountingValidator<V> where K: std::fmt::Debug, { fn enter(&mut self, key: &K) -> Result<(), Self::Error> { self.path.push(format!("{key:?}")); Ok(()) } fn exit(&mut self) -> Result<(), Self::Error> { self.path.pop(); Ok(()) } } impl<V> KeySentinel for ValueCountingValidator<V> { fn create_key(&mut self) -> Result<(), Self::Error> { self.permissive .then_some(()) .ok_or(format!("create_key at {}", self.path.join("/"))) } fn delete_key(&mut self) -> Result<(), Self::Error> { self.permissive .then_some(()) .ok_or(format!("delete_key at {}", self.path.join("/"))) } } impl<C, V> TypeSentinel<C> for ValueCountingValidator<V> where C: fmt::Debug, { fn set_type(&mut self, value_type: crate::crdts::ValueType<C>) -> Result<(), Self::Error> { self.permissive.then_some(()).ok_or(format!( "set_type: {value_type:?} at {}", self.path.join("/") )) } fn unset_type( &mut self, value_type: crate::crdts::ValueType<C>, ) -> Result<(), Self::Error> { self.permissive.then_some(()).ok_or(format!( "unset_type: {value_type:?} at {}", self.path.join("/") )) } } impl<V> ValueSentinel<V> for ValueCountingValidator<V> where V: std::fmt::Debug + Ord + Clone, { fn set(&mut self, value: &V) -> Result<(), Self::Error> { *self.added.entry(value.clone()).or_default() += 1; Ok(()) } fn unset(&mut self, value: V) -> Result<(), Self::Error> { *self.removed.entry(value).or_default() += 1; Ok(()) } } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/causal_context.rs
src/causal_context.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! # Causal Context //! //! This module provides the core data structures for tracking causality in DSON. //! Causal consistency is maintained by tracking the history of operations using //! `Dot`s, which are globally unique identifiers for each operation. The set of //! all observed dots forms a `CausalContext`. //! //! - **[`Identifier`]**: A unique identifier for an actor in the system. It consists of //! of a `NodeId` and an `ApplicationId` to distinguish between different //! applications running on the same node. //! //! - **[`Dot`]**: A globally unique identifier for a single operation (for example, an insert //! or update). It consists of an `Identifier` and a sequence number, which is //! monotonically increasing for that specific actor. //! //! - **[`CausalContext`]**: A data structure that stores the set of all `Dot`s that a //! replica has observed. It represents the replica's knowledge of the system's //! history. By comparing `CausalContext`s, replicas can determine which //! operations are new, concurrent, or have already been seen, enabling correct //! merging of states. //! //! The `CausalContext` is implemented using a `BTreeMap` of `Identifier`s to //! `IntervalSet`s, which efficiently stores contiguous ranges of sequence numbers. //! This allows for a compact representation of the causal history. use self::interval::{Interval, IntervalSet}; use interval::IntervalError; use std::{ cmp::Ordering, collections::{BTreeMap, btree_map::Entry}, fmt, num::NonZeroU64, ops::{BitAnd, Sub}, }; mod interval; /// Maximum representable application id. /// /// Note that id 0 is reserved, so the number of applications that can be /// registered is one lower. pub const MAX_APPLICATION_ID: u16 = (1 << 12) - 1; /// Error returned when attempting to create an [`Identifier`] from bits with an invalid [`Priority`] #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub struct InvalidPriority(pub u8); impl fmt::Display for InvalidPriority { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "invalid priority {}", self.0) } } impl std::error::Error for InvalidPriority {} /// Error returned when attempting to create an [`Identifier`] from an invalid bits sequence #[derive(Debug, Eq, PartialEq, Copy, Clone)] pub enum IdentifierError { /// Invalid priority Priority(InvalidPriority), /// Bits extracted from the value are invalid InvalidBits { field: &'static str, value: u32 }, } impl fmt::Display for IdentifierError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { IdentifierError::Priority(err) => write!(f, "{err}"), IdentifierError::InvalidBits { field, value } => { write!(f, "invalid value {value} for field {field}") } } } } impl std::error::Error for IdentifierError {} impl From<InvalidPriority> for IdentifierError { fn from(value: InvalidPriority) -> Self { Self::Priority(value) } } /// Indicates the priority level a given CRDT update is associated with. #[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[repr(u8)] // really 3 bits, so max value is 7 pub enum Priority { /// Update that should not leave the current node. Local = 0, /// Update that should be synchronized after all others. Low = 2, /// Update that should be synchronized as necessary. Medium = 4, /// Update that should be synchronized ahead of all others. High = 6, } pub const PRIORITY_MAX: Priority = Priority::High; impl TryFrom<u8> for Priority { type Error = InvalidPriority; fn try_from(value: u8) -> Result<Self, Self::Error> { Ok(match value { 0 => Priority::Local, 2 => Priority::Low, 4 => Priority::Medium, 6 => Priority::High, _ => return Err(InvalidPriority(value)), }) } } /// The application-id used for the root document. /// /// When [`Identifier`] instances are used to identify a node, they use this application value. pub const ROOT_APP_ID: u16 = 0; /// The identifier we choose to use for actors in the system. /// /// It is space-efficient and is passed around _everywhere_. /// /// This identifier is composed of a node identifier, and an application identifier. /// This is so that all the applications running _on_ a node can be modeled as /// independent actors as far as the CRDT is concerned. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[cfg_attr(feature = "serde", serde(transparent))] #[repr(transparent)] pub struct Identifier { /// ```text /// 0 7 8 19 21 24 32 /// +--------+------------+-+---+--------+ /// | N | A |R| P | unused | /// +--------+------------+-+---+--------+ /// ``` /// /// - N: node identifier (8 bits, so 256 nodes) /// - A: application identifier (12 bits, so 4096 applications per node) /// - R: reserved bit (should be 0) /// - P: priority (3 bits, so 8 priority levels) /// /// Note, bit 0 is the most significant bit in the diagram above. /* * YADR: 2024-04-19 Allocation of identifier bits * * In the context of reducing bandwidth footprint, we faced the question of how we should * represent participant identifiers in memory. * * We decided for making them stored as a packed 24-bit integer in a 32-bit integer with 8 bits * for node IDs, 12 bits for application IDs, and 3 bits for priority levels, and neglected * using structured types, tuples, differently-sized integer types, or other allocations of * bits. * * We did this to achieve a balance between the size of the identifier space (ie, how many * unique identifiers we can have) and the amount of bytes we need to send over the wire, * accepting a relatively small but realistic hard limit on the number of nodes, applications, * and priority levels. * * We think this is the right trade-off because identifiers are included _everywhere_ and thus * even a single excess byte translates into potentially kB included in CRDT deltas. * * We expect that there should never be more than 256 nodes (8 bits) in a single network -- * beyond that size the network should instead be multiple smaller networks that selectively * exchange (and potentially aggregate) data. * * We expect that there should never be more than 4096 applications (12 bits) connected to a * single instance. If there is, it suggests a broken use pattern where applications are * constantly establishing new connections, which comes with its own set of problems and should * be discouraged. We know that browsers sometime recycle connections, but even with occasional * recycling, 4k seems like a reasonable limit between "acceptable" and "something needs to * change". * * We expect that 8 priority levels (3 bits) should be sufficient for most applications. Even 8 * is potentially excessive, as it already captures high, medium, low, and local with four * levels to spare. It seems unlikely to us that finer-grained priorities (like 15 being * different to 16) are worth the extra bits. We did not limit to 2 bits so that we _do_ have * some room for growing requirements. * * We left a single reserved bit for unforeseen future uses, rather than allocating them to any * of the aforementioned limits (which we think are all reasonable already). * * We stayed within 24 bits so that identifiers can be sent as just 3 bytes on the wire rather * than 4, which potentially saves a significant amount of bandwidth if hundreds are sent in a * delta. */ bits: u32, } /// Custom implementation that renders the virtual components of this struct. impl std::fmt::Debug for Identifier { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self.priority() { Priority::High => write!(f, "@{}.{}h", self.node(), self.app()), Priority::Medium => write!(f, "@{}.{}", self.node(), self.app()), Priority::Low => write!(f, "@{}.{}l", self.node(), self.app()), Priority::Local => write!(f, "@{}.{}-", self.node(), self.app()), } } } impl From<(u8, u16)> for Identifier { fn from((node, application): (u8, u16)) -> Self { Identifier::new(node, application) } } impl PartialEq<(u8, u16)> for Identifier { fn eq(&self, &(node, application): &(u8, u16)) -> bool { self == &Identifier::new(node, application) } } impl TryFrom<u32> for Identifier { type Error = IdentifierError; fn try_from(value: u32) -> Result<Self, Self::Error> { const BIT_FIELDS: [(u32, u32); 5] = [(0, 7), (8, 19), (20, 20), (21, 23), (24, 31)]; let [_node_id, _app_id, reserved, priority, unused] = BIT_FIELDS.map(|(start, end)| { let bits_count = end - start + 1; let mask = (!0u32) >> (u32::BITS - bits_count); let shift = u32::BITS - end - 1; (value >> shift) & mask }); let _priority = Priority::try_from(priority as u8)?; if reserved != 0 { return Err(IdentifierError::InvalidBits { field: "reserved", value: reserved, }); } if unused != 0 { return Err(IdentifierError::InvalidBits { field: "unused", value: unused, }); } Ok(Self { bits: value }) } } impl Identifier { /// Constructs a new Identifier for the given node-application pair. /// /// Application must be a valid u12 (meaning a u16 with the high four bits unset), or the /// function will panic. pub const fn new(node: u8, application: u16) -> Self { if application > MAX_APPLICATION_ID { // NOTE: cannot print the value since we're in a const fn panic!("application exceeds u12"); } Self { bits: ((node as u32) << (12 + 1 + 3 + 8)) | ((application as u32) << (1 + 3 + 8)), } .with_priority(Priority::Medium) } /// Get the representable 'next larger' Identifier. /// Returns None if no such identifier exists. /// This does not take priority into account. pub fn checked_successor(self) -> Option<Identifier> { if self.app() != MAX_APPLICATION_ID { return Some(Identifier::new(self.node().value(), self.app() + 1)); } if self.node() != NodeId::MAX { return Some(Identifier::new(self.node().value() + 1, self.app())); } None } pub const fn node(&self) -> NodeId { NodeId { node_id: (self.bits >> (12 + 1 + 3 + 8)) as u8, } } pub const fn app(&self) -> u16 { ((self.bits >> (1 + 3 + 8)) & 0xfff) as u16 } pub const fn priority(&self) -> Priority { let bits = ((self.bits >> 8) & 0b111) as u8; match bits { 0 if Priority::Local as u8 == 0 => Priority::Local, 2 if Priority::Low as u8 == 2 => Priority::Low, 4 if Priority::Medium as u8 == 4 => Priority::Medium, 6 if Priority::High as u8 == 6 => Priority::High, _ if cfg!(debug_assertions) => { // NOTE: cannot print bits since we're in a const fn panic!("illegal priority") } // SAFETY: it's only possible to set the priority bits using methods on `Identifier`, // and those all take `Priority`, whose values we check for above. should we have // missed any (ie, because `Priority` was modified), that'll be caught at test time // with the branch above (+ quickcheck). at release time, this shouldn't be reachable. _ => unsafe { std::hint::unreachable_unchecked() }, } } // TODO: have this be more like a `resolve_priority` method that decides whether to use // the provided priority or the already-set one. we could have a number of heuristics for how // to decide (probably: last wins). we'll probably want to also include a `bool` argument here // to indicate "was overridden", which should take precedence over something from the schema, // and we can maybe tuck that flag into the one remaining bit we have in `Identifier.0`. pub const fn with_priority(self, priority: Priority) -> Self { Self { bits: (self.bits & !(0b111 << 8)) | ((priority as u32) << 8), } } pub const fn bits(&self) -> u32 { self.bits } } /// A unique identifier for a single node in the network. /// /// All applications in a single node have the same NodeId, /// even though they have different [`Identifier`]s. #[derive(Clone, Copy, PartialEq, Eq, Hash, Ord, PartialOrd)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct NodeId { node_id: u8, } impl NodeId { pub const MAX: NodeId = NodeId::new(u8::MAX); pub const fn value(self) -> u8 { self.node_id } pub const fn new(node_id: u8) -> Self { NodeId { node_id } } /// Returns the main-identifier for this node (that is, for application 0) pub fn identifier(self) -> Identifier { Identifier::new(self.node_id, ROOT_APP_ID) } } impl std::fmt::Debug for NodeId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.node_id) } } impl std::fmt::Display for NodeId { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { std::fmt::Debug::fmt(&self.node_id, f) } } /// A unique identifier for an operation. /// /// Every DSON operation is assigned a unique operation in the form of a `Dot`. These are a /// combination of a unique node identifier and an ever-increasing sequence number. /// /// Dots are ordered by the sequence number _first_ and _then_ the actor identifier. #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct Dot(Identifier, NonZeroU64); impl std::fmt::Debug for Dot { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "({:?}, {})", self.0, self.1) } } impl<I> From<(I, NonZeroU64)> for Dot where I: Into<Identifier>, { fn from((id, seq): (I, NonZeroU64)) -> Self { Self(id.into(), seq) } } impl Dot { /// Get the 'next' larger dot. /// This method never changes the identifier, it only increments the sequence number. /// This wraps around, keeping the same id, in case of overflow. pub fn successor(&self) -> Dot { Dot::mint(self.0, self.1.get().wrapping_add(1).max(1)) } /// Creates a new [`Dot`] out of thin air. /// /// All real dots should be made through the use of a [`CausalContext`]. /// This constructor is mainly useful for tests and documentation examples. /// /// # Panics /// /// If `seq == 0`. pub const fn mint(id: Identifier, seq: u64) -> Self { Self( id, if let Some(seq) = NonZeroU64::new(seq) { seq } else { panic!("attempted to construct Dot for 0th sequence number"); }, ) } pub const fn with_priority(self, priority: Priority) -> Self { Self(self.0.with_priority(priority), self.1) } /// Returns the [`Identifier`] of the actor that produced this [`Dot`]. pub fn actor(&self) -> Identifier { self.0 } /// Returns the sequence number (ie, per-actor operation index) of this [`Dot`]. pub fn sequence(&self) -> NonZeroU64 { self.1 } } impl PartialEq<(Identifier, u64)> for Dot { fn eq(&self, other: &(Identifier, u64)) -> bool { self.0 == other.0 && self.1.get() == other.1 } } impl PartialEq<((u8, u16), u64)> for Dot { fn eq(&self, other: &((u8, u16), u64)) -> bool { self.0 == Identifier::from(other.0) && self.1.get() == other.1 } } /// Tracks the set of sequence numbers observed from each actor in the system. /// /// This type can be used both to track observed causal context, and to produce new `Dot`s. If only /// needed for the former, construct using [`CausalContext::default()`]. To produce new /// `Dot`s as well, use the [`CausalContext::new`] constructor to also supply the /// current actor's identifier. /// /// # Examples /// /// ## Producing [`Dot`]s /// /// ```rust /// # use dson::{CausalContext, Dot, Identifier}; /// let id = Identifier::new(0, 0); /// let mut cause = CausalContext::new(); /// /// // The causal context can be used to produce new dots: /// let dot1 = cause.next_dot_for(id); /// // New dots are not implicitly absorbed: /// assert_eq!(cause.next_dot_for(id), dot1); /// // You must explicitly add them to generate newer dots: /// cause.insert_next_dot(dot1); /// let dot2 = cause.next_dot_for(id); /// assert_ne!(dot1, dot2); /// cause.insert_next_dot(dot2); /// /// // The first dot produced will have sequence number 1: /// assert_eq!(dot1, ((0, 0), 1)); /// /// // If one dot is produced after another, it is also ordered after: /// assert!(dot2 > dot1); /// /// // The causal context considers any dot produced as observed: /// assert!(cause.dot_in(dot1)); /// assert!(cause.dot_in(dot2)); /// ``` /// /// ## Tracking causal context /// /// ```rust /// # use dson::{CausalContext, Dot}; /// let mut cause = CausalContext::default(); /// /// // With no observed causal relationships, no dots are in the context: /// assert!(!cause.dot_in(Dot::mint((0, 0).into(), 1))); /// /// // Once a dot is observed, that dot is in the causal context, but no others: /// cause.extend([Dot::mint((0, 0).into(), 1)]); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 1))); /// assert!(!cause.dot_in(Dot::mint((0, 0).into(), 2))); /// assert!(!cause.dot_in(Dot::mint((1, 0).into(), 1))); /// /// // The context can track causal context across multiple nodes: /// cause.extend([Dot::mint((1, 0).into(), 1)]); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 1))); /// assert!(cause.dot_in(Dot::mint((1, 0).into(), 1))); /// /// // and the context can track out-of-order dots: /// cause.extend([Dot::mint((0, 0).into(), 10)]); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 1))); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 10))); /// assert!(!cause.dot_in(Dot::mint((0, 0).into(), 2))); /// assert!(!cause.dot_in(Dot::mint((0, 0).into(), 9))); /// assert!(!cause.dot_in(Dot::mint((0, 0).into(), 11))); /// /// // If more consecutive dots from the same actor are observed, they are stored compactly: /// let before = cause.size(); /// cause.extend([ /// Dot::mint((0, 0).into(), 2), /// Dot::mint((0, 0).into(), 3) /// ]); /// assert_eq!(before, cause.size()); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 1))); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 2))); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 3))); /// assert!(cause.dot_in(Dot::mint((1, 0).into(), 1))); /// assert!(!cause.dot_in(Dot::mint((0, 0).into(), 4))); /// assert!(!cause.dot_in(Dot::mint((1, 0).into(), 2))); /// ``` // TODO: here we spend a lot of time accessing map entries in batch operations. we could probably do // a lot better with btree cursors, which unfortunately is only on nightly right now. see // https://github.com/rust-lang/rust/issues/107540. // Also, we can add a temporary dot cloud as a way to buffer inserts before // applying them as a batch operation. This would require significant changes in // many places, including `partial_cmp_dots`. // TODO: check if `self.dots` is compacted on serialization. #[derive(Default, Clone)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct CausalContext { dots: BTreeMap<Identifier, IntervalSet>, } impl std::fmt::Debug for CausalContext { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_tuple("CausalContext").field(&self.dots).finish() } } impl CausalContext { /// Constructs a new [`CausalContext`]. pub fn new() -> Self { Self::default() } /// Create a [`CausalContext`] from a sequence of raw intervals pub fn from_intervals<I, RI>(iter: I) -> Result<Self, IntervalError> where I: IntoIterator<Item = (Identifier, RI)>, RI: IntoIterator<Item = (NonZeroU64, Option<NonZeroU64>)>, { let mut dots = BTreeMap::new(); for (id, bounds) in iter { dots.insert(id, IntervalSet::from_intervals(bounds)?); } Ok(Self { dots }) } /// Produces the next unused [`Dot`] for this node. pub fn next_dot_for(&self, id: Identifier) -> Dot { self.next_n_dots_for(1, id) .next() .expect("yields as many as are indicated") } /// Attempt to find an Identifier that is not present in this causal context. /// If one exists, it is returned. Otherwise, None is returned. pub fn unused_identifier(&self) -> Option<Identifier> { let mut candidate = Identifier::new(0, 0); for key in self.dots.keys() { if candidate < *key { // candidate is always set to the next representable Identifier. If // this is not the next key in the map, we have found an unused identifier. // It works for the first element, since we initialize candidate to (0,0), // and if the first key in the map isn't (0,0), then (0,0) is obviosly unused. return Some(candidate); } candidate = key.checked_successor()?; } Some(candidate) } /// Returns the largest sequence number that exists in the causal context /// for each identifier belonging to a single `node`. pub fn largest_for_node(&self, node: u8) -> impl Iterator<Item = Dot> + '_ { self.dots .range(Identifier::new(node, 0)..=Identifier::new(node, MAX_APPLICATION_ID)) .filter_map(|(k, v)| v.last().map(|last_element| Dot(*k, last_element.end()))) } /// Produces `n` unused [`Dot`]s for this node. pub fn next_n_dots_for( &self, n: u8, id: Identifier, ) -> impl Iterator<Item = Dot> + Clone + use<> { let spans = self.dots.get(&id); // NOTE: this method and its use implicitly assumes that a node's sequence numbers are // always produced in sequence _and_ always compacted. let's make sure that that's actually // the case: debug_assert!( spans .map(|seqs| seqs.len() == 1 && seqs.first().expect("not empty").start() == NonZeroU64::MIN) .unwrap_or(true), "dots for self.id are not sequential and compacted in {:?}", self.dots ); let next_seq = spans .map(IntervalSet::next_after) .unwrap_or(NonZeroU64::MIN); // TODO: avoid the extra NonZero wrapping once we get // https://github.com/rust-lang/libs-team/issues/130 (next_seq.get()..next_seq.get() + u64::from(n)).map(move |seq| { // SAFETY: the start of the range was a NonZeroU64, and we're adding unsigned u8s let seq = unsafe { NonZeroU64::new_unchecked(seq) }; Dot(id, seq) }) } #[cfg(test)] pub fn dots_for(&self, id: Identifier) -> impl Iterator<Item = Dot> + '_ { let dots: Vec<_> = self .dots .get(&id) .iter() .flat_map(|ivals| ivals.seqs().map(|seq| Dot::mint(id, seq.get()))) .collect(); dots.into_iter() } /// Iterator over all the dots that the context holds pub fn dots(&self) -> impl Iterator<Item = Dot> + '_ { self.dots .iter() .flat_map(|(id, ivals)| ivals.seqs().map(|seq| (*id, seq).into())) } /// True if there are no dots in this causal context. pub fn is_empty(&self) -> bool { debug_assert!( self.dots.values().all(|v| !v.is_empty()), "should not retain empty interval sets" ); self.dots.is_empty() } /// The approximate size of this causal context including compaction. pub fn size(&self) -> usize { std::mem::size_of::<Self>() + self.dots.len() * (std::mem::size_of::<Identifier>() + std::mem::size_of::<IntervalSet>()) + self .dots .values() .map(|ivals| ivals.len() * std::mem::size_of::<Interval>()) .sum::<usize>() } /// Return the total number of dots. #[must_use] pub fn dot_count(&self) -> u64 { self.dots .values() .map(|ivals| ivals.total_interval_length()) .sum() } /// Determines if the given `dot` is in the current causal context. #[must_use] pub fn dot_in(&self, dot: Dot) -> bool { self.dots .get(&dot.actor()) .is_some_and(|s| s.contains(dot.sequence())) } /// Returns an arbitrary [`Dot`] among those in this context. /// /// No guarantee is given about which [`Dot`] is returned if there are multiple. pub fn one(&self) -> Option<Dot> { self.dots .iter() .flat_map(|(id, ivals)| ivals.first().map(|ival| Dot::from((*id, ival.start())))) .next() } pub fn is_compact_for_node(&self, node: u8) -> bool { self.dots .range( Identifier::new(node, 0) ..=Identifier::new(node, MAX_APPLICATION_ID).with_priority(PRIORITY_MAX), ) .all(|(_, spans)| { spans.len() == 1 && spans.first().expect("not empty").start() == NonZeroU64::MIN }) } /// Records a new observed [`Dot`] in the causal context. /// ///This is a "dangerous" method, since it can break some internal ///assumptions about dots being contiguous and the causal context being ///compacted. /// /// Will not compact automatically. pub fn insert_dot(&mut self, dot: Dot) { self.dots .entry(dot.actor()) .or_insert_with(IntervalSet::new) .insert(dot.sequence()); } /// Records a newly generated [`Dot`] in the causal context. pub fn insert_next_dot(&mut self, dot: Dot) { match self.dots.entry(dot.actor()) { Entry::Vacant(v) => { assert_eq!(dot.sequence(), NonZeroU64::MIN); v.insert(IntervalSet::single(dot.sequence())); } Entry::Occupied(mut o) => { let next = o.get().next_after(); assert_eq!(dot.sequence(), next); o.get_mut().extend_end_by_one(); } } } /// Records multiple observed [`Dot`] in the causal context. /// /// Will not compact automatically. pub(crate) fn insert_dots(&mut self, dots: impl IntoIterator<Item = Dot>) { // TODO: batching would really help here, since the `Entry` API is a // huge bottleneck. we could just temporarily stash dots in a dot cloud // using something like a `BTreeSet` and then later make a single call // to `.entry` per actor when compacting (since dots will be sorted by // actor). for dot in dots { match self.dots.entry(dot.actor()) { Entry::Vacant(v) => { v.insert(IntervalSet::single(dot.sequence())); } Entry::Occupied(mut o) => { o.get_mut().insert(dot.sequence()); } } } } /// Removes the given `dot` from the causal context. /// /// Returns `true` if the `dot` was in the causal context. pub fn remove_dot(&mut self, dot: Dot) -> bool { let Some(ivals) = self.dots.get_mut(&dot.actor()) else { return false; }; let removed = ivals.remove(dot.sequence()); if ivals.is_empty() { self.dots.remove(&dot.actor()); } removed } /// Removes all the dots in the given causal context from this causal context. pub fn remove_dots_in(&mut self, remove: &CausalContext) { self.dots.retain(|k, v1| { if let Some(v2) = remove.dots.get(k) { *v1 = v1.difference(v2); !v1.is_empty() } else { true } }) } /// Incorporates the observations from another causal context into this one. /// /// After the `union`, all [`Dot`]s known to `other` will be considered observed by `self`. /// /// ```rust /// # use dson::{CausalContext, Dot}; /// let mut cause1 = CausalContext::default(); /// let mut cause2 = CausalContext::default(); /// /// cause1.extend([ /// Dot::mint((0, 0).into(), 1), /// Dot::mint((0, 0).into(), 2), /// Dot::mint((0, 0).into(), 4), /// Dot::mint((1, 0).into(), 2), /// ]); /// /// cause2.extend([ /// Dot::mint((0, 0).into(), 1), /// Dot::mint((0, 0).into(), 5), /// Dot::mint((1, 0).into(), 1), /// ]); /// /// cause1.union(&cause2); /// let cause = cause1; /// /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 1))); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 2))); /// assert!(!cause.dot_in(Dot::mint((0, 0).into(), 3))); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 4))); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 5))); /// assert!(!cause.dot_in(Dot::mint((0, 0).into(), 6))); /// assert!(cause.dot_in(Dot::mint((1, 0).into(), 1))); /// assert!(cause.dot_in(Dot::mint((1, 0).into(), 2))); /// assert!(!cause.dot_in(Dot::mint((1, 0).into(), 3))); /// ``` pub fn union(&mut self, other: &CausalContext) { for (k, v1) in &mut self.dots { if let Some(v2) = other.dots.get(k) { // see note on union as to why we don't unite in-place *v1 = v1.union(v2); } } for (k, v2) in &other.dots { if !self.dots.contains_key(k) { self.dots.insert(*k, v2.clone()); } } } /// Retains only dots whose [`Identifier`] the provided closure returns `true` for. /// /// ```rust /// # use dson::{CausalContext, Dot}; /// let mut cause = CausalContext::default(); /// /// cause.extend([ /// Dot::mint((0, 0).into(), 1), /// Dot::mint((0, 0).into(), 2), /// Dot::mint((1, 0).into(), 2), /// Dot::mint((2, 0).into(), 1), /// ]); /// /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 1))); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 2))); /// assert!(cause.dot_in(Dot::mint((1, 0).into(), 2))); /// assert!(cause.dot_in(Dot::mint((2, 0).into(), 1))); /// /// cause.retain_from(|id| id != (1, 0)); /// /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 1))); /// assert!(cause.dot_in(Dot::mint((0, 0).into(), 2))); /// assert!(!cause.dot_in(Dot::mint((1, 0).into(), 2))); /// assert!(cause.dot_in(Dot::mint((2, 0).into(), 1))); /// ``` pub fn retain_from(&mut self, mut f: impl FnMut(Identifier) -> bool) { self.dots.retain(|&id, _| f(id)); } /// Returns true if the provided closure returns `true` for the [`Identifier`]s of any [`Dot`] /// in this context. /// /// ```rust /// # use dson::{CausalContext, Dot}; /// let mut cause = CausalContext::default(); /// /// cause.extend([ /// Dot::mint((0, 0).into(), 1), /// Dot::mint((0, 0).into(), 2), /// Dot::mint((2, 0).into(), 1), /// ]); /// /// assert!(cause.any_dot_id_with(|id| id == (0, 0))); /// assert!(!cause.any_dot_id_with(|id| id == (1, 0))); /// assert!(cause.any_dot_id_with(|id| id == (2, 0))); /// ``` pub fn any_dot_id_with(&self, mut f: impl FnMut(Identifier) -> bool) -> bool { self.dots.keys().any(|&id| f(id)) } /// Returns true if the provided context contains at least one [`Dot`] that also exists in this /// context.
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
true
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/api.rs
src/api.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. pub mod array; pub mod map; pub mod register; pub mod timestamp;
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/either.rs
src/either.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! The enum Either with variants Left and Right is a general purpose sum type //! with two cases. #[derive(Debug, Clone, Ord, PartialOrd, PartialEq, Eq)] pub enum Either<A, B> { Left(A), Right(B), } impl<A, B> Either<Either<A, B>, B> { /// Converts from `Either<Either<A, B, B>>` to `Either<A, B>`. pub fn flatten(self) -> Either<A, B> { match self { Either::Left(nested) => nested, Either::Right(b) => Either::Right(b), } } } impl<A, B> Either<A, Either<A, B>> { /// Converts from `Either<A, Either<A, B>>` to `Either<A, B>`. pub fn flatten(self) -> Either<A, B> { match self { Either::Left(a) => Either::Left(a), Either::Right(nested) => nested, } } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/macros.rs
src/macros.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. /// Convenience macro for creating dot values. /// /// NOTE! This is mostly useful for tests, since it does not provide control /// over the app or priority fields of a dot. #[macro_export] macro_rules! dot { ($seq:expr) => { const { $crate::causal_context::Dot::mint( $crate::causal_context::Identifier::new(1, 1), $seq, ) } }; ($node:expr, $seq:expr) => { const { $crate::causal_context::Dot::mint( $crate::causal_context::Identifier::new($node, 1), $seq, ) } }; ($node:expr, $app:expr, $seq:expr) => { const { $crate::causal_context::Dot::mint( $crate::causal_context::Identifier::new($node, $app), $seq, ) } }; } /// Convenience macro for creating a OrMap instance. /// /// Use the [`crdt_map_store`](crate::crdt_map_store) literal to also create a matching /// CausalContext. /// /// ```rust /// # use dson::{crdt_map_literal, dot}; /// let map = crdt_map_literal! { /// "field_x" => ("Hello", dot!(1,2)), /// "field_y" => ("World", dot!(1,3)), /// "field_z" => { /// "field_x" => ("Nested", dot!(1,4)), /// "field_y" => ("Nested", dot!(1,5)) /// } /// }; /// ``` /// #[macro_export] macro_rules! crdt_map_literal { ($($k:literal => $v:tt),*) => { $crate::crdt_literal!( { $( $k => $v ),* } ).map }; } /// Convenience macro for creating a TypeVariantValue, of either map, array or register type. /// /// /// Register literal: /// ```rust /// # use dson::{crdt_literal, dot}; /// let reg = crdt_literal!( ("hello", dot!(1))); /// ``` /// /// Conflicted register literal: /// ```rust /// # use dson::{crdt_literal, dot}; /// let reg = crdt_literal!( ("Hello", dot!(1); "Bonjour", dot!(2); )); /// ``` /// /// Map literal (note the '{' and '}'): /// ```rust /// # use dson::{crdt_literal, dot}; /// let reg = crdt_literal!( { /// "Greeting" => ("Hello", dot!(1)) /// } ); /// ``` /// /// Array literal (note the '[' and ']'): /// ```rust /// # use dson::{crdt_literal, dot}; /// let reg = crdt_literal!( [ /// (("Banana", dot!(3)), dot!(4), dot!(5), dot!(6), 42.0), /// (("Cantaloupe", dot!(7)), dot!(8), dot!(9), dot!(10), 43.0) /// ] ); /// ``` /// The first tuple is the actual value in the array, with its dot. /// The remaining 4 parameters are: Uid, 2 array position dots (for dotfunmap + /// dotfun), and the f64 value that decides the sorting order of the array. /// /// See section 5, about the OrArray algorithm, in the DSON paper for more information. /// /// Note that this macro does not generate a CausalContext. #[macro_export] macro_rules! crdt_literal { // Map ({$($k:literal => $v:tt),*}) => { { let mut map = $crate::OrMap::<String, $crate::crdts::NoExtensionTypes>::default(); $( { $crate::crdt_literal!(map_insert, map, $k, $v); } )* $crate::crdts::TypeVariantValue { map, ..$crate::crdts::TypeVariantValue::<$crate::crdts::NoExtensionTypes>::default() } } }; // Array ([$($v:tt),*]) => { { let mut array = $crate::OrArray::<$crate::crdts::NoExtensionTypes>::default(); $( $crate::crdt_literal!(array_element, array, $v); )* $crate::crdts::TypeVariantValue { array, ..$crate::crdts::TypeVariantValue::<$crate::crdts::NoExtensionTypes>::default() } } }; // Mvreg ( ($($v:expr, $dot:expr $(;)? )* ) ) => { { let mut reg = $crate::crdts::mvreg::MvReg::default(); $( reg.push($dot, $v); )* $crate::crdts::TypeVariantValue { reg, ..$crate::crdts::TypeVariantValue::<$crate::crdts::NoExtensionTypes>::default() } } }; // Helper for creating map elements (map_insert, $temp:ident, $k:literal , $v: tt) => { $temp.insert($k.into(), $crate::crdt_literal!($v)); }; // Helper for creating array elements (array_element, $temp:ident, ($v:tt, $uid: expr, $dot1:expr, $dot2:expr, $pos_f64:expr)) => { let val = $crate::crdt_literal!($v); $temp.insert_raw($crate::crdts::orarray::Uid::from($uid), std::iter::once(($dot1,$dot2,$pos_f64)), val); }; } #[macro_export] macro_rules! crdt_map_store { ($($k:literal => $v:tt),*) => { { use $crate::{DotStore, CausalDotStore}; let ormap = $crate::crdt_map_literal!($($k => $v),*); let dots = ormap.dots(); CausalDotStore { store: ormap, context: dots } } } } #[cfg(test)] mod tests { use crate::enable_determinism; use insta::assert_debug_snapshot; #[test] fn crdt_map_literal_macro() { enable_determinism(); let map = crdt_map_literal! { "field_x" => ("Hello", dot!(1,2)), "field_y" => ("World", dot!(1,3)), "field_z" => { "field_x" => ("Nested", dot!(1,4)), "field_y" => ("Nested", dot!(1,5)) } }; assert_debug_snapshot!(map); } #[test] fn crdt_map_store_macro() { enable_determinism(); let map = crdt_map_store! { "field_x" => ("Hello", dot!(1,2)), "field_y" => ("World", dot!(1,3)), "field_z" => { "field_x" => ("Nested", dot!(1,4)), "field_y" => ("Nested", dot!(1,5)) } }; assert_debug_snapshot!(map); } #[test] fn crdt_map_literal_macro_array() { enable_determinism(); let map = crdt_map_literal! { "field_x" => ("Hello", dot!(1)), "field_y" => ("World", dot!(2)), "field_z" => [ (("Banana", dot!(3)), dot!(4), dot!(5), dot!(6), 42.0), (("Cantaloupe", dot!(7)), dot!(8), dot!(9), dot!(10), 43.0) ] }; assert_debug_snapshot!(map); } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/datetime_literal.rs
src/datetime_literal.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. /// Declarative macro to create a [`chrono::DateTime<chrono::Utc>`] suitable /// for const evaluation, as this is otherwise cumbersome. /// /// Usage: /// ```rust /// # use chrono::{DateTime, Utc}; /// # use dson::datetime; /// let datetime: DateTime<Utc> = datetime!( 2024-12-24 15:00:00 Z); /// # let _ = datetime; /// ``` #[macro_export] macro_rules! datetime { ( $year:literal-$month:literal-$day:literal $(T)? $hour:literal:$min:literal:$second:literal Z) => { const { #[allow(clippy::zero_prefixed_literal)] $crate::chrono::DateTime::<$crate::chrono::Utc>::from_naive_utc_and_offset( datetime!($year - $month - $day $hour:$min:$second), $crate::chrono::Utc ) } }; ( $year:literal-$month:literal-$day:literal $(T)? $hour:literal:$min:literal:$second:literal) => { const { #[allow(clippy::zero_prefixed_literal)] $crate::chrono::NaiveDateTime::new( match $crate::chrono::NaiveDate::from_ymd_opt($year, $month, $day) { Some(date) => date, None => ::std::panic!("year-month-day outside expected range.") }, match $crate::chrono::NaiveTime::from_hms_opt($hour, $min, $second) { Some(time) => time, None => ::std::panic!("hour:min:second outside expected range.") } ) } }; }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/mvreg.rs
src/crdts/mvreg.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use super::snapshot::{self, SingleValueError, SingleValueIssue, ToValue}; use crate::{ CausalContext, CausalDotStore, Dot, DotFun, DotStoreJoin, Identifier, api, dotstores::{DotChange, DotStore, DryJoinOutput}, sentinel::{Sentinel, ValueSentinel}, }; use std::cmp::Ordering; /// A **Multi-Value Register**, a CRDT for storing a single, atomic value. /// /// `MvReg` is one of the three core CRDT primitives provided by this crate, alongside [`crate::OrMap`] and /// [`crate::OrArray`]. It is used to hold primitive values like integers, strings, or booleans. /// /// ## Conflict Handling /// /// When two replicas concurrently write different values to the same `MvReg`, the register will /// hold both values simultaneously. This is the "multi-value" aspect. A subsequent read will return /// all conflicting values, allowing the application to resolve the conflict in a way that makes /// sense for its use case. A subsequent write will overwrite all conflicting values, resolving the /// conflict by establishing a new, single value. /// /// If a value is concurrently cleared and overwritten, the written value "wins" and the register /// will contain the new value. /// /// ## Usage /// /// An `MvReg` is typically used as a value within an [`crate::OrMap`] or [`crate::OrArray`]. /// It is not usually used as a top-level CRDT. /// /// ```rust /// # use dson::{CausalDotStore, MvReg, crdts::{mvreg::MvRegValue, snapshot::ToValue}, Identifier, sentinel::DummySentinel}; /// // Create a new CausalDotStore containing an MvReg. /// let mut doc: CausalDotStore<MvReg> = CausalDotStore::new(); /// let id = Identifier::new(0, 0); /// /// // Create a delta to write a value. /// let delta = doc.store.write(MvRegValue::U64(42), &doc.context, id); /// /// // Merge the delta into the document. /// doc = doc.join(delta, &mut DummySentinel).unwrap(); /// /// // The value can now be read from the register. /// assert_eq!(*doc.store.value().unwrap(), MvRegValue::U64(42)); /// ``` /// /// You can find more convenient, higher-level APIs for manipulating `MvReg` in the /// [`api::register`] module. #[derive(Clone, Default, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct MvReg(pub DotFun<MvRegValue>); impl std::fmt::Debug for MvReg { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "${:?}", self.0) } } macro_rules! impl_partial_eq { ({$($t:ty),+}) => { $(impl_partial_eq!($t);)+ }; ($t:ty) => { impl PartialEq<$t> for MvReg { fn eq(&self, other: &$t) -> bool { self.values().into_iter().any(|v| v == other) } } }; } impl_partial_eq!({[u8], &[u8], str, &str, bool, f64, u64, i64}); // i32 because it's the "default" inference integer type impl_partial_eq!(i32); // byte literals impl<const N: usize> PartialEq<&[u8; N]> for MvReg { fn eq(&self, other: &&[u8; N]) -> bool { self.values().into_iter().any(|v| v.eq(other)) } } impl DotStore for MvReg { fn dots(&self) -> CausalContext { self.0.dots() } fn add_dots_to(&self, other: &mut CausalContext) { self.0.add_dots_to(other); } fn is_bottom(&self) -> bool { self.0.is_bottom() } fn subset_for_inflation_from(&self, frontier: &CausalContext) -> Self { Self(DotFun::subset_for_inflation_from(&self.0, frontier)) } } impl<S> DotStoreJoin<S> for MvReg where S: ValueSentinel<MvRegValue>, { fn join( (m1, cc1): (Self, &CausalContext), (m2, cc2): (Self, &CausalContext), on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<Self, S::Error> where Self: Sized, S: Sentinel, { // NOTE! When making changes to this method, consider if corresponding // changes need to be done to ::dry_join as well! Ok(Self(DotFun::join( (m1.0, cc1), (m2.0, cc2), on_dot_change, sentinel, )?)) } fn dry_join( (m1, cc1): (&Self, &CausalContext), (m2, cc2): (&Self, &CausalContext), sentinel: &mut S, ) -> Result<DryJoinOutput, S::Error> where Self: Sized, S: Sentinel, { DotFun::dry_join((&m1.0, cc1), (&m2.0, cc2), sentinel) } } /// The value stored in a [`MvReg`]. /// /// This enum represents the different types of values that can be stored in a multi-value /// register. // NOTE: Why no U32 or I32? Make this a serialization concern. #[derive(Clone)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] // TODO(jon): should we make this #[non_exhaustive] so we can add to it without breaking? pub enum MvRegValue { // NOTE: the #[serde] here is needed to get efficient encoding of byte-arrays for // protocols that support it (like msgpack): // <https://docs.rs/rmp-serde/1/rmp_serde/index.html#efficient-storage-of-u8-types> Bytes(#[cfg_attr(feature = "serde", serde(with = "serde_bytes"))] Vec<u8>), String(String), Float(f32), Double(f64), U64(u64), I64(i64), Bool(bool), Timestamp(api::timestamp::Timestamp), #[cfg(feature = "ulid")] Ulid(ulid::Ulid), } impl MvRegValue { /// When ordering MvRegValue instances of different types, we order them /// according to this order. const fn comparison_order(&self) -> usize { // Desired order: Bytes > String > Ulid > Timestamp > Double > U64 > I64 > Bool match self { MvRegValue::Bytes(_) => 8, MvRegValue::String(_) => 7, #[cfg(feature = "ulid")] MvRegValue::Ulid(_) => 6, MvRegValue::Timestamp(_) => 5, MvRegValue::Double(_) => 4, MvRegValue::Float(_) => 3, MvRegValue::U64(_) => 2, MvRegValue::I64(_) => 1, MvRegValue::Bool(_) => 0, } } } macro_rules! impl_from { ( $( $source:ty => $target:ident $(with $conv:ident)? ),* $(,)? ) => { $( impl From<$source> for MvRegValue { fn from(value: $source) -> Self { Self::$target(impl_from!(value$(, $conv)?)) } } )* }; ($value:ident, $conv:ident) => { $value.$conv() }; ($value:ident) => { $value }; } impl_from!( &[u8] => Bytes with into, Vec<u8> => Bytes, String => String, &str => String with to_string, f64 => Double, u8 => U64 with into, u16 => U64 with into, u32 => U64 with into, u64 => U64, i8 => I64 with into, i16 => I64 with into, i32 => I64 with into, i64 => I64, bool => Bool, ); #[cfg(feature = "ulid")] impl From<ulid::Ulid> for MvRegValue { fn from(value: ulid::Ulid) -> Self { Self::Ulid(value) } } impl std::fmt::Debug for MvRegValue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { match self { Self::Bytes(inner) => write!(f, "{inner:02X?}"), Self::String(inner) => inner.fmt(f), Self::Bool(inner) => inner.fmt(f), // Make sure to always print at least 1 decimal, so we can non-ambiguously // tell apart I64 and floats (this is achieved by {:?} instead of {}). Self::Float(inner) => write!(f, "{inner:?}f"), Self::Double(inner) => write!(f, "{inner:?}d"), Self::U64(inner) => write!(f, "{inner}u"), Self::I64(inner) => write!(f, "{inner}"), Self::Timestamp(inner) => inner.fmt(f), #[cfg(feature = "ulid")] Self::Ulid(inner) => inner.fmt(f), } } } impl PartialEq for MvRegValue { fn eq(&self, other: &Self) -> bool { use MvRegValue::*; match (self, other) { (Bytes(b1), Bytes(b2)) => b1.eq(b2), (String(s1), String(s2)) => s1.eq(s2), (Double(d1), Double(d2)) => d1.total_cmp(d2).is_eq(), (Float(d1), Float(d2)) => d1.total_cmp(d2).is_eq(), (U64(u1), U64(u2)) => u1.eq(u2), (I64(i1), I64(i2)) => i1.eq(i2), (Bool(b1), Bool(b2)) => b1.eq(b2), (Timestamp(t1), Timestamp(t2)) => t1.eq(t2), #[cfg(feature = "ulid")] (Ulid(ulid1), Ulid(ulid2)) => ulid1.eq(ulid2), _ => false, } } } impl Eq for MvRegValue {} impl PartialEq<[u8]> for MvRegValue { fn eq(&self, other: &[u8]) -> bool { matches!(self, Self::Bytes(b) if b == other) } } impl PartialEq<&[u8]> for MvRegValue { fn eq(&self, other: &&[u8]) -> bool { matches!(self, Self::Bytes(b) if b == other) } } impl PartialEq<str> for MvRegValue { fn eq(&self, other: &str) -> bool { matches!(self, Self::String(s) if s == other) } } impl PartialEq<&str> for MvRegValue { fn eq(&self, other: &&str) -> bool { matches!(self, Self::String(s) if s == other) } } impl PartialEq<bool> for MvRegValue { fn eq(&self, other: &bool) -> bool { matches!(self, Self::Bool(b) if b == other) } } impl PartialEq<f64> for MvRegValue { fn eq(&self, other: &f64) -> bool { matches!(self, Self::Double(f) if f == other) } } impl PartialEq<u64> for MvRegValue { fn eq(&self, other: &u64) -> bool { match self { Self::U64(u) => u == other, Self::I64(i) => u64::try_from(*i).is_ok_and(|u| &u == other), Self::Bytes(_) | Self::String(_) | Self::Double(_) | Self::Float(_) | Self::Bool(_) | Self::Timestamp(_) => false, #[cfg(feature = "ulid")] Self::Ulid(_) => false, } } } impl PartialEq<i64> for MvRegValue { fn eq(&self, other: &i64) -> bool { match self { Self::U64(u) => i64::try_from(*u).is_ok_and(|i| &i == other), Self::I64(i) => i == other, Self::Bytes(_) | Self::String(_) | Self::Float(_) | Self::Double(_) | Self::Bool(_) | Self::Timestamp(_) => false, #[cfg(feature = "ulid")] Self::Ulid(_) => false, } } } // i32 because it's the "default" inference integer type impl PartialEq<i32> for MvRegValue { fn eq(&self, other: &i32) -> bool { match self { Self::U64(u) => i32::try_from(*u).is_ok_and(|i| &i == other), Self::I64(i) => i32::try_from(*i).is_ok_and(|i| &i == other), Self::Bytes(_) | Self::String(_) | Self::Double(_) | Self::Float(_) | Self::Bool(_) | Self::Timestamp(_) => false, #[cfg(feature = "ulid")] Self::Ulid(_) => false, } } } // byte literals impl<const N: usize> PartialEq<&[u8; N]> for MvRegValue { fn eq(&self, other: &&[u8; N]) -> bool { matches!(self, Self::Bytes(b) if b == other) } } impl PartialOrd for MvRegValue { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { Some(self.cmp(other)) } } impl Ord for MvRegValue { fn cmp(&self, other: &Self) -> Ordering { use MvRegValue::*; // For order of cross-variant comparisons, see: // [`MvRegValue::comparison_order`] match (self, other) { (Bytes(b1), Bytes(b2)) => b1.cmp(b2), (String(s1), String(s2)) => s1.cmp(s2), (Double(d1), Double(d2)) => d1.total_cmp(d2), (Float(d1), Float(d2)) => d1.total_cmp(d2), (U64(u1), U64(u2)) => u1.cmp(u2), (I64(i1), I64(i2)) => i1.cmp(i2), (Bool(b1), Bool(b2)) => b1.cmp(b2), (Timestamp(t1), Timestamp(t2)) => t1.cmp(t2), #[cfg(feature = "ulid")] (Ulid(ulid1), Ulid(ulid2)) => ulid1.cmp(ulid2), (a, b) => { let a_order = a.comparison_order(); let b_order = b.comparison_order(); debug_assert_ne!( a_order, b_order, "match must handle all comparisons between similar variants" ); a_order.cmp(&b_order) } } } } impl<'doc> ToValue for &'doc MvReg { type Values = snapshot::MvReg<'doc>; type Value = &'doc MvRegValue; type LeafValue = MvRegValue; /// Returns the set of all possible values for this register in an arbitrary /// order. /// /// NOTE: values are ordered by the sequence number of their associated dot. fn values(self) -> Self::Values { snapshot::MvReg { values: self.0.values(), } } /// Returns the single value of the MvReg. /// /// If the value has been cleared, [`SingleValueIssue::Cleared`] is returned as `Err`. /// /// If there are multiple (ie, conflicting) values, [`SingleValueIssue::HasConflict`] is /// returned as `Err`. fn value(self) -> Result<Self::Value, Box<SingleValueError<Self::LeafValue>>> { match self.0.len() { 0 => Err(Box::new(SingleValueError { path: Vec::new(), issue: SingleValueIssue::Cleared, })), 1 => { let a_dot = self .dots() .one() .expect("if we have values, we should also have dots"); Ok(self.0.get(&a_dot).expect( ".dots is the keys of the map, so if we get a Dot back, it must be present", )) } _ => { let conflicts = self.0.values().cloned().collect(); Err(Box::new(SingleValueError { path: Vec::new(), issue: SingleValueIssue::HasConflict(conflicts), })) } } } } impl MvReg { #[doc(hidden)] pub fn push(&mut self, dot: Dot, value: impl Into<MvRegValue>) { self.0.set(dot, value.into()); } /// Creates a CRDT that represents the overwrite of all past values of this /// register with the value in `self`. pub fn write(&self, v: MvRegValue, cc: &CausalContext, id: Identifier) -> CausalDotStore<Self> { let dot = cc.next_dot_for(id); // write collapses the state of the value let mut new_state = DotFun::default(); new_state.set(dot, v); let mut new_cc = CausalContext::new(); new_cc.insert_dot(dot); self.add_dots_to(&mut new_cc); CausalDotStore { store: Self(new_state), context: new_cc, } } /// Creates a CRDT that represents the erasure of all past values of this register. pub fn clear(&self) -> CausalDotStore<Self> { CausalDotStore { store: Self::default(), context: self.dots(), } } /// Directly retains only the values for which a predicate is true. /// /// This change is not represented as a delta-CRDT, meaning this may cause unintended /// consequences if `self` is later distributed along with an unmodified [`CausalContext`]. Only /// use this method if you know what you are doing. pub fn retain_immediately(&mut self, f: impl FnMut(&Dot, &mut MvRegValue) -> bool) { self.0.retain(f) } } #[cfg(test)] mod tests { use super::*; use crate::{ Dot, crdts::test_util::join_harness, sentinel::{DummySentinel, test::NoChangeValidator}, }; #[test] fn empty() { let cds = CausalDotStore::<MvReg>::default(); assert_eq!( cds.store.value().unwrap_err().issue, SingleValueIssue::Cleared ); assert_eq!(cds.store.values().len(), 0); assert_eq!(cds.store.values().get(0), None); } #[test] fn clear_and_write() { join_harness( MvReg::default(), |CausalDotStore { store: m, context: cc, }, id| { m.write(MvRegValue::Bool(false), &cc, id) }, |m, cc, id| m.write(MvRegValue::Bool(true), &cc, id), |m, _cc, _id| m.clear(), NoChangeValidator, |CausalDotStore { store: m, .. }, _| { // for a concurrent clear and write, only the written value should remain assert!(!m.is_bottom()); let values = m.values(); assert_eq!(values.len(), 1); assert!(values.into_iter().any(|v| v == &MvRegValue::Bool(true))); }, ); } #[quickcheck] fn values(vs: Vec<(Dot, MvRegValue)>) { // We shouldn't have the same dot for multiple values let mut dedup_dots = std::collections::HashSet::new(); let vs: Vec<_> = vs.into_iter().filter(|x| dedup_dots.insert(x.0)).collect(); let mut cds = CausalDotStore::<MvReg>::default(); let mut possible_values = Vec::<MvRegValue>::default(); for (dot, v) in vs.clone() { cds.store.0.set(dot, v.clone()); possible_values.push(v); } { let mut a = possible_values.clone(); let mut values_in_store = cds.store.values().into_iter().cloned().collect::<Vec<_>>(); a.sort_unstable(); values_in_store.sort_unstable(); assert_eq!(a, values_in_store); } let expected_value = if possible_values.len() == 1 { Ok(possible_values.first().unwrap()) } else if !possible_values.is_empty() { Err(Box::new(SingleValueError { path: Vec::new(), issue: SingleValueIssue::HasConflict(possible_values.into_iter().collect()), })) } else { Err(Box::new(SingleValueError { path: Vec::new(), issue: SingleValueIssue::Cleared, })) }; assert_eq!(cds.store.value(), expected_value, "input: {vs:?}"); } #[quickcheck] fn write(vs: Vec<(Dot, MvRegValue)>, new: MvRegValue) { let mut dedup_dots = std::collections::HashSet::new(); let vs: Vec<_> = vs.into_iter().filter(|x| dedup_dots.insert(x.0)).collect(); let mut cds = CausalDotStore::<MvReg>::new(); for &(dot, ref v) in &vs { cds.store.0.set(dot, v.clone()); cds.store.add_dots_to(&mut cds.context); } // Find an unused causal track, which we can use as 'our' id, so // we're sure to have a compact track (otherwise we will trigger asserts). let id = cds .context .unused_identifier() .expect("test case is not large enough to have used all identifiers"); // write a new value that dominates all the past writes let delta = cds.store.write(new, &cds.context, id); assert_eq!(delta.store.0.len(), 1); let new_dot = delta.store.0.keys().next().unwrap(); for &(dot, _) in &vs { assert!(delta.context.dot_in(dot)); } // check that the delta takes effect when joined into the original state let CausalDotStore { store, context } = cds.join(delta, &mut DummySentinel).unwrap(); assert_eq!(store.values().len(), 1); // clear the map, which will include clearing the new value let clear = store.clear(); assert_eq!(clear.store.0.len(), 0); // NOTE: one might expect that clear.context would also contain all the dots in vs, // but it does not. instead, it only contains the dot that it _observed_, which is the dot // from the write which in turn dominates all the dots in vs. for &(dot, _) in &vs { assert!(!clear.context.dot_in(dot)); } assert!(clear.context.dot_in(new_dot)); // check that the delta takes effect when joined into the original state let store = MvReg::join( (store, &context), (clear.store, &clear.context), &mut |_| {}, &mut DummySentinel, ) .unwrap(); assert_eq!(store.values().len(), 0); assert_eq!(store.value().unwrap_err().issue, SingleValueIssue::Cleared); } #[quickcheck] fn partial_cmp_is_involutive(v1: MvRegValue, v2: MvRegValue) { assert_eq!(v1.cmp(&v2), v2.cmp(&v1).reverse()); } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/test_util.rs
src/crdts/test_util.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use super::Value; use crate::{ CausalContext, CausalDotStore, DotStoreJoin, Identifier, dotstores::recording_sentinel::RecordingSentinel, sentinel::{DummySentinel, Sentinel}, }; use quickcheck::Gen; use std::{fmt, ops::RangeBounds}; mod arbitrary_delta_impls; mod qc_arbitrary_impls; mod qc_arbitrary_ops; #[cfg_attr(feature = "arbitrary", allow(dead_code))] pub(crate) fn join_harness<DS, Init, W1, W2, S, C>( zero: DS, init: Init, w1: W1, w2: W2, mut sentinel: S, check: C, ) where DS: DotStoreJoin<S> + DotStoreJoin<RecordingSentinel> + Default + Clone, S: Sentinel, S::Error: fmt::Debug, Init: FnOnce(CausalDotStore<DS>, Identifier) -> CausalDotStore<DS>, W1: FnOnce(&DS, CausalContext, Identifier) -> CausalDotStore<DS>, W2: FnOnce(&DS, CausalContext, Identifier) -> CausalDotStore<DS>, C: FnOnce(CausalDotStore<DS>, S), { let v = zero; let init_id = Identifier::new(9, 0); let v = init( CausalDotStore { store: v, context: CausalContext::new(), }, init_id, ); let w1_id = Identifier::new(0, 0); let mut w1_v = w1(&v.store, v.context.clone(), w1_id); let w2_id = Identifier::new(1, 0); let w2_v = w2(&v.store, v.context.clone(), w2_id); w1_v.test_join_with_and_track(w2_v.store, &w2_v.context, &mut |_| {}, &mut sentinel) .unwrap(); check(w1_v, sentinel) } /// Types that can construct descriptors of an arbitrary modification to themselves. pub(crate) trait ArbitraryDelta: Sized { #[cfg(not(feature = "serde"))] /// The type of the descriptor. type Delta: Delta<DS = Self>; #[cfg(feature = "serde")] type Delta: Delta<DS = Self> + ::serde::Serialize + ::serde::de::DeserializeOwned; /// Produces a descriptor for an arbitrary modification to `&self`. /// /// If the descriptor produces a new key in `self`, it should represent that key as a `usize` /// as returned by the `add_*_key` methods on [`KeyTracker`]. Any deltas to inner collections /// should be passed `&mut keys.inner_keys[keyi]` so they can also track their collections. /// /// `depth` is used solely to produce visual guides (eg, indents) so that nested calls to /// `arbitrary_delta` are easier to distinguish. fn arbitrary_delta( &self, cc: &CausalContext, id: Identifier, keys: &mut KeyTracker, g: &mut Gen, depth: usize, ) -> (Self::Delta, CausalDotStore<Self>); } /// Types that describe a modification to an instance of [`Delta::DS`]. pub(crate) trait Delta: Sized + fmt::Display { /// The [`DotStore`] type that this delta applies to. type DS: DotStoreJoin<DummySentinel>; /// Returns true if this delta specifically depends on a key in the given keyi range. /// /// Some examples: /// /// - an `Update(keyi = 42)` should return `true` when passed a range `16..`. /// - an `Update(keyi = 4)` should return `false` when passed a range `16..`. /// - an `Insert(keyi = 42)` should return `true` when passed a range `16..`. /// - a `Clear` should return `false` when passed a range `16..`. fn depends_on_keyi_in<R: RangeBounds<usize>>(&self, range: R) -> bool; /// Turns this modification description into a CRDT over `ds` that, when joined with `ds`, will /// produce the desired modification. /// /// `keys` tracks the sequence of keys produced so far. See [`ArbitraryDelta::arbitrary_delta`] /// for details. #[cfg_attr(feature = "arbitrary", allow(dead_code))] fn into_crdt( self, ds: &Self::DS, cc: &CausalContext, id: Identifier, keys: &mut KeyTracker, ) -> CausalDotStore<Self::DS>; } pub(crate) use qc_arbitrary_ops::KeyTracker; #[cfg(test)] pub(crate) use qc_arbitrary_ops::Ops;
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/ormap.rs
src/crdts/ormap.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use super::{ Either, NoExtensionTypes, TypeVariantValue, Value, ValueRef, mvreg::MvRegValue, orarray::Uid, snapshot::{self, AllValues, CollapsedValue, SingleValueError, SingleValueIssue, ToValue}, }; use crate::{ CausalContext, CausalDotStore, DotMap, DotStoreJoin, ExtensionType, Identifier, MvReg, OrArray, dotstores::{DotChange, DotStore, DryJoinOutput}, sentinel::{KeySentinel, TypeSentinel, ValueSentinel, Visit}, }; use std::{borrow::Borrow, fmt, hash::Hash, ops::Index}; /// An **Observed-Remove Map**, a map-like CRDT that allows for concurrent creation, updates, and /// removals of key-value pairs. /// /// `OrMap` is one of the three core CRDT primitives provided by this crate, alongside [`OrArray`] /// and [`MvReg`]. It is the most common choice for a top-level CRDT, as it can hold other CRDTs as /// values, allowing for the creation of nested, JSON-like data structures. /// /// ## Usage /// /// An `OrMap` is typically wrapped in a [`CausalDotStore`], which tracks the causal history of /// operations. Modifications are performed by creating a "delta" CRDT, which is then merged back /// into the original `CausalDotStore`. /// /// ```rust /// # use dson::{CausalDotStore, OrMap, MvReg, crdts::{mvreg::MvRegValue, snapshot::{ToValue, CollapsedValue}}, Identifier, sentinel::DummySentinel}; /// // Create a new CausalDotStore containing an OrMap. /// let mut doc: CausalDotStore<OrMap<String>> = CausalDotStore::new(); /// let id = Identifier::new(0, 0); /// /// // Create a delta to insert a value. /// let delta = doc.store.apply_to_register( /// |reg, cc, id| reg.write(MvRegValue::U64(42), cc, id), /// "key".into(), /// &doc.context, /// id, /// ); /// /// // Merge the delta into the document. /// doc = doc.join(delta, &mut DummySentinel).unwrap(); /// /// // The value can now be read from the map. /// let val = doc.store.get("key").unwrap(); /// assert_eq!(val.reg.value().unwrap(), &MvRegValue::U64(42)); /// ``` /// /// You can find more convenient, higher-level APIs for manipulating `OrMap` in the /// [`api::map`](crate::api::map) module. The methods on `OrMap` itself are low-level and /// intended for use when implementing custom CRDTs or when you need fine-grained control over /// delta creation. /// /// This type is a composable mapping of keys (`K`) to an arbitrary 𝛿-based CRDT such as an /// [`OrArray`], a [`MvReg`], or a nested [`OrMap`] (all represented via [`TypeVariantValue`]). #[derive(Clone, PartialEq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct OrMap<K: Hash + Eq, C = NoExtensionTypes>(pub(super) DotMap<K, TypeVariantValue<C>>); impl<K, C> Default for OrMap<K, C> where K: Hash + Eq, { fn default() -> Self { Self(Default::default()) } } impl<K, C> std::fmt::Debug for OrMap<K, C> where K: Hash + Eq + std::fmt::Debug, C: fmt::Debug + ExtensionType, { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.0.fmt(f) } } impl<K, C> FromIterator<(K, TypeVariantValue<C>)> for OrMap<K, C> where K: Eq + Hash, { fn from_iter<T: IntoIterator<Item = (K, TypeVariantValue<C>)>>(iter: T) -> Self { Self(DotMap::from_iter(iter)) } } impl<K, Q, C> Index<&Q> for OrMap<K, C> where K: Eq + Hash + Borrow<Q>, Q: Eq + Hash + ?Sized, { type Output = TypeVariantValue<C>; fn index(&self, index: &Q) -> &Self::Output { self.0.index(index) } } impl<K, C> DotStore for OrMap<K, C> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { fn dots(&self) -> CausalContext { self.0.dots() } fn add_dots_to(&self, other: &mut CausalContext) { self.0.add_dots_to(other); } fn is_bottom(&self) -> bool { self.0.is_bottom() } fn subset_for_inflation_from(&self, frontier: &CausalContext) -> Self { Self(DotMap::subset_for_inflation_from(&self.0, frontier)) } } impl<K, C, S> DotStoreJoin<S> for OrMap<K, C> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType + DotStoreJoin<S> + fmt::Debug + Clone + PartialEq, S: Visit<K> + Visit<String> + Visit<Uid> + KeySentinel + TypeSentinel<C::ValueKind> + ValueSentinel<MvRegValue>, { fn join( (m1, cc1): (Self, &CausalContext), (m2, cc2): (Self, &CausalContext), on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<Self, S::Error> where Self: Sized, S: KeySentinel, { // NOTE! When making changes to this method, consider if corresponding // changes need to be done to ::dry_join as well! Ok(Self(DotMap::join( (m1.0, cc1), (m2.0, cc2), on_dot_change, sentinel, )?)) } fn dry_join( (m1, cc1): (&Self, &CausalContext), (m2, cc2): (&Self, &CausalContext), sentinel: &mut S, ) -> Result<DryJoinOutput, S::Error> where Self: Sized, S: KeySentinel, { DotMap::dry_join((&m1.0, cc1), (&m2.0, cc2), sentinel) } } impl<'doc, K, C> ToValue for &'doc OrMap<K, C> where K: Hash + Eq + fmt::Display, C: ExtensionType, { type Values = snapshot::OrMap<'doc, K, AllValues<'doc, C::ValueRef<'doc>>>; type Value = snapshot::OrMap<'doc, K, CollapsedValue<'doc, C::ValueRef<'doc>>>; type LeafValue = Either<MvRegValue, <C::ValueRef<'doc> as ToValue>::LeafValue>; fn values(self) -> Self::Values { let mut ret_map = snapshot::OrMap::default(); for (key, inner_map) in self.0.iter() { let v = match inner_map.coerce_to_value_ref() { ValueRef::Map(m) => AllValues::Map(m.values()), ValueRef::Array(a) => AllValues::Array(a.values()), ValueRef::Register(r) => AllValues::Register(r.values()), ValueRef::Custom(c) => AllValues::Custom(c.values()), }; ret_map.map.insert(key.borrow(), v); } ret_map } fn value(self) -> Result<Self::Value, Box<SingleValueError<Self::LeafValue>>> { let mut ret_map = snapshot::OrMap::default(); for (key, inner_map) in self.0.iter() { let v = match inner_map.coerce_to_value_ref() { ValueRef::Map(m) => m.value().map(CollapsedValue::Map).map(Some), ValueRef::Array(a) => a.value().map(CollapsedValue::Array).map(Some), ValueRef::Register(r) => { match r.value() { Ok(v) => Ok(Some(CollapsedValue::Register(v))), // don't include empty values in the map // // NOTE: this means that clearing an `MvReg` that's held in a map // effectively removes the element, but does *not* actually remove it from the // map (ie, its `InnerMap` is still there). is that a problem? Err(e) if e.issue == SingleValueIssue::Cleared => Ok(None), Err(mut e) => { // make errors more helpful by including the path to the MvReg with conflicts e.path.push(key.to_string()); Err(e.map_values(Either::Left)) } } } ValueRef::Custom(c) => c .value() .map(CollapsedValue::Custom) .map(Some) .map_err(|v| v.map_values(Either::Right)), }?; if let Some(v) = v { ret_map.map.insert(key.borrow(), v); } } Ok(ret_map) } } impl<K, C> OrMap<K, C> where K: Hash + Eq, { /// Returns a reference to the element at the given key, if any. pub fn get<Q>(&self, key: &Q) -> Option<&TypeVariantValue<C>> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self.0.get(key) } /// Returns a mutable reference to the element at the given key, if any. /// /// Invalidates the dots cache for the given map entry, so calling `.dots()` on this collection /// after invoking this method may be slower as it has to call `.dots()` on this entry to /// re-compute. pub fn get_mut_and_invalidate<Q>(&mut self, key: &Q) -> Option<&mut TypeVariantValue<C>> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self.0.get_mut_and_invalidate(key) } /// Returns the number of elements in this map. pub fn len(&self) -> usize { self.0.len() } /// Returns true if this map has no elements. pub fn is_empty(&self) -> bool { self.0.is_empty() } // Insert an element into the map. // // Note, this is a low level operation. CRDT types should generally // not be manipulated directly by user code. For one thing, you'd normally // want to also modify a CausalContext every time an OrMap is modified. #[doc(hidden)] pub fn insert(&mut self, key: K, value: TypeVariantValue<C>) { self.0.insert(key, value); } /// Iterates over key-value pairs in this CRDT, mutably, in arbitrary order. /// /// Invalidates the dots cache for all the map's entries, so calling `.dots()` on this /// collection after invoking this method may be quite slow (it has to call `.dots()` on all /// the entries). pub fn iter_mut_and_invalidate( &mut self, ) -> impl ExactSizeIterator<Item = (&K, &mut TypeVariantValue<C>)> { self.0.iter_mut_and_invalidate() } /// Retain only the entries for which a predicate is true. /// /// Invalidates the dots cache for all the map's entries, so calling `.dots()` on this /// collection after invoking this method may be quite slow (it has to call `.dots()` on all /// the entries). pub fn retain_and_invalidate(&mut self, f: impl FnMut(&K, &mut TypeVariantValue<C>) -> bool) { self.0.retain_and_invalidate(f) } pub fn inner(&self) -> &DotMap<K, TypeVariantValue<C>> { &self.0 } } macro_rules! apply_to_X { ($name:ident, $frag:literal, $field:ident, [$($others:ident),*], $innerType:ty) => { /// Updates the value at key `k` to be #[doc = $frag] /// using `o`. /// /// This is mostly a convenience wrapper around [`OrMap::apply`]. /// See that method for more details. /// /// # Multiple Operations /// /// Multiple operations within the closure `o` require manual context management. /// Each operation needs a context containing dots from previous operations. /// Call this method multiple times to avoid manual context handling. pub fn $name<'data, O>(&'data self, o: O, k: K, cc: &'_ CausalContext, id: Identifier) -> CausalDotStore<Self> where O: for<'cc, 'v> FnOnce( &'v $innerType, &'cc CausalContext, Identifier, ) -> CausalDotStore<$innerType>, { let CausalDotStore { store: ret_map, context: mut ret_cc, } = self.apply( move |m, cc, id| { // NOTE: the original code calls ORMap.apply again here because everything // is just weakly-typed stringly maps. we use structured types, so can't easily // call ORMap.apply recursively here. that mostly shouldn't be a problem, // though there is one difference that I don't _think_ matters: ORMap.apply // _always_ injects an ALIVE key into the map it generates, which means that it // injects an ALIVE field into the equivalent of InnerMap as well! That extra // ALIVE is, as far as I can tell, not used or relevant, but I wanted to call // it out nonetheless. o(&m.$field, cc, id).map_store(Value::from) }, k.clone(), cc, id ); // recommitted value of type $field, delete the other two ($others). if let Some(inner) = self.0.get(&k) { $( inner.$others.add_dots_to(&mut ret_cc); )* } CausalDotStore { store: ret_map, context: ret_cc, } } }; } impl<K, C> OrMap<K, C> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { /// Creates a CRDT for the creation of a new empty [`OrMap`]. pub fn create(&self, _cc: &CausalContext, _id: Identifier) -> CausalDotStore<Self> { // NOTE: the original OrMap implementation also sets an `.alive` field here. // see the YADR in `mod crdts` for why we don't do that. CausalDotStore { store: Self(Default::default()), context: CausalContext::default(), } } apply_to_X!( apply_to_map, "an [`OrMap`]", map, [array, reg, custom], OrMap<String, C> ); apply_to_X!( apply_to_array, "an [`OrArray`]", array, [map, reg, custom], OrArray<C> ); apply_to_X!( apply_to_register, "an [`MvReg`]", reg, [map, array, custom], MvReg ); /// Updates the value at key `k` to be a custom type using `o`. /// /// This is mostly a convenience wrapper around [`OrMap::apply`]. /// See that method for more details. /// /// # Multiple Operations /// /// Multiple operations within the closure `o` require manual context management. /// Each operation needs a context containing dots from previous operations. /// Call this method multiple times to avoid manual context handling. // NOTE(ow): Can't use the `apply_to_X` macro above, as `O` goes from // `C` to `C::Value`. pub fn apply_to_custom<'data, O>( &'data self, o: O, k: K, cc: &'_ CausalContext, id: Identifier, ) -> CausalDotStore<Self> where O: for<'cc, 'v> FnOnce(&'v C, &'cc CausalContext, Identifier) -> CausalDotStore<C::Value>, { let CausalDotStore { store: ret_map, context: mut ret_cc, } = self.apply( move |m, cc, id| { let y = o(&m.custom, cc, id); y.map_store(Value::Custom) }, k.clone(), cc, id, ); if let Some(inner) = self.0.get(&k) { inner.map.add_dots_to(&mut ret_cc); inner.array.add_dots_to(&mut ret_cc); inner.reg.add_dots_to(&mut ret_cc); } CausalDotStore { store: ret_map, context: ret_cc, } } /// Creates a CRDT that represents `O` applied to the [`Value`] of the element with key `key`, /// if any, and written back to that same key in the map. /// /// `O` will be passed `None` if there is currently no value with key `key`, such as when apply /// is used on an empty map or on an [`OrMap`] CRDT that doesn't _change_ the value at `key`. /// /// # Multiple Operations /// /// Multiple operations within the closure require manual context management. Each operation /// needs a context containing dots from previous operations. Call `apply` multiple times or /// use the transaction API to avoid manual context handling. pub fn apply<'data, O>( &'data self, o: O, key: K, cc: &'_ CausalContext, id: Identifier, ) -> CausalDotStore<Self> where O: for<'cc, 'v> FnOnce( &'v TypeVariantValue<C>, &'cc CausalContext, Identifier, ) -> CausalDotStore<Value<C>>, { let mut ret_dot_map = Self::default(); let v = if let Some(v) = self.get(&key) { v } else { &TypeVariantValue::default() }; // NOTE: the original OrArray implementation also updates an `.alive` field here. // see the YADR in `mod crdts` for why we don't do that. // ask `O` to generate the new value for this key, // remembering to incorporate the `alive` change into what o receives let CausalDotStore { store: new_v, context: ret_cc, } = o(v, cc, id); ret_dot_map.0.set(key, new_v.into()); CausalDotStore { store: ret_dot_map, context: ret_cc, } } /// Creates a CRDT that represents the removal of the element with key `k`. /// /// A removed element, if there was one, is represented by a store only including the bottom /// element ⊥ and the embedded dots of the removed value. pub fn remove<Q>(&self, k: &Q, _cc: &CausalContext, _id: Identifier) -> CausalDotStore<Self> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { let Some(inner_map) = self.0.get(k) else { // If there's no inner map, there's nothing to change, // and an empty dot store is sufficient. return CausalDotStore::new(); }; // NOTE: the original implementation does not write alive here, but that means // deleting from an array can make it bottom, which doesn't align with the behavior seen // when clearing. one of the authors of the original DSON paper confirmed by email on // 2023-08-25 that the right thing to do here is likely to write alive in remove as well. // _but_ since we don't use `.alive` in this implementation (see YADR in `mod crdts`), we // do nothing. // mark all dots as seen so that joining with this CRDT will erase all other entries. let ret_cc = inner_map.dots(); CausalDotStore { store: Self(Default::default()), context: ret_cc, } } /// Creates a CRDT that represents the erasure of all elements values of this array. /// /// A cleared map is represented by a store only including the alive field. This means that it /// is not equal to the bottom element ⊥, and thus signals an empty map. It also includes all /// embedded dots of the map to make it clear that it has seen those writes and did not want /// them to continue existing. pub fn clear(&self, _cc: &CausalContext, _id: Identifier) -> CausalDotStore<Self> { // NOTE: the original OrArray implementation also updates an `.alive` field here. // see the YADR in `mod crdts` for why we don't do that. // mark all dots as seen so that joining with this CRDT will erase all other entries. let ret_cc = self.dots(); CausalDotStore { store: Self(Default::default()), context: ret_cc, } } /// Removes the entry for key `k` from this CRDT directly. /// /// This change is not represented as a delta-CRDT, meaning this may cause unintended /// consequences if `self` is later distributed along with an unmodified [`CausalContext`]. You /// almost certainly don't want to use this method and want [`OrMap::remove`] instead. pub fn remove_immediately<Q>(&mut self, k: &Q) -> Option<TypeVariantValue<C>> where K: Borrow<Q>, Q: Hash + Eq + ?Sized, { self.0.remove(k) } } #[cfg(test)] mod tests { use super::*; use crate::{ crdts::{ NoExtensionTypes, test_util::{Ops, join_harness}, }, sentinel::{DummySentinel, test::ValueCountingValidator}, }; use std::collections::BTreeMap; type OrMap<K> = super::OrMap<K, NoExtensionTypes>; #[test] fn empty() { let cds = CausalDotStore::<OrMap<String>>::default(); assert!(cds.store.is_bottom()); assert!(cds.store.value().unwrap().is_empty()); assert_eq!(cds.store.values().len(), 0); } #[test] fn created_is_bottom() { let map = OrMap::<String>::default(); let cc = CausalContext::new(); let id = Identifier::new(0, 0); let m = map.create(&cc, id); assert!(m.store.is_bottom()); assert_eq!(map, m.store); } #[test] fn cleared_is_bottom() { let map = OrMap::<String>::default(); let cc = CausalContext::new(); let id = Identifier::new(0, 0); let m = map.create(&cc, id); let m = m.store.clear(&m.context, id); assert!(m.store.is_bottom()); } #[test] fn set_get_remove() { let map = OrMap::<String>::default(); let cc = CausalContext::new(); let id = Identifier::new(0, 0); let m = map.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "foo".into(), &cc, id, ); assert!(!m.store.is_bottom()); assert_eq!( m.store.value().unwrap().get(&String::from("foo")).cloned(), Some(CollapsedValue::Register(&MvRegValue::Bool(true))) ); assert_eq!(m.store.len(), 1); // count the number of dots we generated under `id` assert_eq!( m.context.next_dot_for(id).sequence().get() - 1, 1 /* mvreg.write */ ); let m = m.store.remove("foo", &cc, id); assert!(m.store.is_bottom()); // empty maps become bottom assert_eq!(m.store.value().unwrap().get(&String::from("foo")), None); assert_eq!(m.store.len(), 0); assert_eq!(m.context.next_dot_for(id).sequence().get() - 1, 1); } #[test] fn set_one_key_then_another() { let map = CausalDotStore::<OrMap<String>>::new(); let id = Identifier::new(0, 0); let delta = map.store.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "true".into(), &map.context, id, ); assert!(!delta.store.is_bottom()); assert_eq!( delta .store .value() .unwrap() .get(&String::from("true")) .cloned(), Some(CollapsedValue::Register(&MvRegValue::Bool(true))) ); assert_eq!(delta.store.len(), 1); let map = map.join(delta, &mut DummySentinel).unwrap(); let delta = map.store.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(false), cc, id), "false".into(), &map.context, id, ); assert!(!delta.store.is_bottom()); assert_eq!( delta .store .value() .unwrap() .get(&String::from("false")) .cloned(), Some(CollapsedValue::Register(&MvRegValue::Bool(false))) ); assert_eq!(delta.store.len(), 1); let map = map.join(delta, &mut DummySentinel).unwrap(); assert!(!map.store.is_bottom()); assert_eq!( map.store .value() .unwrap() .get(&String::from("true")) .cloned(), Some(CollapsedValue::Register(&MvRegValue::Bool(true))) ); assert_eq!( map.store .value() .unwrap() .get(&String::from("false")) .cloned(), Some(CollapsedValue::Register(&MvRegValue::Bool(false))) ); assert_eq!(map.store.len(), 2); } #[test] fn independent_keys() { join_harness( OrMap::<String>::default(), |cds, _| cds, |m, cc, id| { m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "foo".into(), &cc, id, ) }, |m, cc, id| { m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::U64(42), cc, id), "bar".into(), &cc, id, ) }, DummySentinel, |CausalDotStore { store: m, .. }, _| { assert!(!m.is_bottom()); assert_eq!( m.value().unwrap().get(&String::from("foo")).cloned(), Some(CollapsedValue::Register(&MvRegValue::Bool(true))) ); assert_eq!( m.value().unwrap().get(&String::from("bar")).cloned(), Some(CollapsedValue::Register(&MvRegValue::U64(42))) ); }, ); } #[test] fn conflicting_reg_value() { join_harness( OrMap::<String>::default(), |cds, _| cds, |m, cc, id| { m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "foo".into(), &cc, id, ) }, |m, cc, id| { m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::U64(42), cc, id), "foo".into(), &cc, id, ) }, ValueCountingValidator::default(), |CausalDotStore { store: m, .. }, sentinel| { assert!(!m.is_bottom()); let values = m.values(); let AllValues::Register(v) = values.get(&String::from("foo")).unwrap() else { panic!("foo isn't a register even though we only wrote registers"); }; assert_eq!(v.len(), 2); assert!(v.contains(&MvRegValue::Bool(true))); assert!(v.contains(&MvRegValue::U64(42))); // we end up with two values, but only added 1 in the join assert_eq!(sentinel.added, BTreeMap::from([(MvRegValue::U64(42), 1)])); assert!(sentinel.removed.is_empty()); }, ); } #[test] fn concurrent_clear() { join_harness( OrMap::<String>::default(), |CausalDotStore { store: m, context: cc, }, id| { m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "foo".into(), &cc, id, ) }, |m, cc, id| m.clear(&cc, id), |m, cc, id| m.clear(&cc, id), DummySentinel, |CausalDotStore { store: m, .. }, _| { // empty maps become bottom assert!(m.is_bottom()); let values = m.values(); assert_eq!(values.len(), 0); }, ); } #[test] fn remove_reg_value() { join_harness( OrMap::<String>::default(), |CausalDotStore { store: m, context: cc, }, id| { m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "foo".into(), &cc, id, ) }, |m, cc, id| m.clear(&cc, id), |m, cc, _| CausalDotStore { store: m.clone(), context: cc, }, ValueCountingValidator::new(true), |CausalDotStore { store: m, .. }, sentinel| { // empty maps become bottom assert!(m.is_bottom()); let values = m.values(); assert_eq!(values.get(&String::from("foo")), None); assert!(sentinel.added.is_empty()); // conventionally the left side is the base state and the right side is the delta, // so this join semantically just discards the delta since the start and end states // are empty assert!(sentinel.removed.is_empty()); }, ); join_harness( OrMap::<String>::default(), |CausalDotStore { store: m, context: cc, }, id| { m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "foo".into(), &cc, id, ) }, |m, cc, _| CausalDotStore { store: m.clone(), context: cc, }, |m, cc, id| m.clear(&cc, id), ValueCountingValidator::new(true), |CausalDotStore { store: m, .. }, sentinel| { // empty maps become bottom assert!(m.is_bottom()); let values = m.values(); assert_eq!(values.get(&String::from("foo")), None); assert!(sentinel.added.is_empty()); // now we start with a non-empty value, so a change is observed assert_eq!( sentinel.removed, BTreeMap::from([(MvRegValue::Bool(true), 1)]) ); }, ); } #[test] fn update_vs_remove() { join_harness( OrMap::<String>::default(), |CausalDotStore { store: m, context: cc, }, id| { // start out with a map with the "foo" key set m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::U64(42), cc, id), "foo".into(), &cc, id, ) }, |m, cc, id| { // one writer updates foo m.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "foo".into(), &cc, id, ) }, |m, cc, id| { // the other writer removes foo m.remove("foo", &cc, id) }, ValueCountingValidator::default(), |CausalDotStore { store: m, .. }, sentinel| { // _not_ bottom since the map isn't empty assert!(!m.is_bottom()); // the semantics of observed-remove (remember "*OR*map") // is that updates concurrent with removes leave the updates intact let values = m.values(); let AllValues::Register(v) = values.get(&String::from("foo")).unwrap() else { panic!("foo isn't a register even though we only wrote registers"); }; assert_eq!(v, [MvRegValue::Bool(true)]); assert!(sentinel.added.is_empty()); assert!(sentinel.removed.is_empty()); }, ); } #[test] fn nested_update_vs_remove() { join_harness( OrMap::<String>::default(), |CausalDotStore { store: m, context: cc, }, id| { // start out with a map like {foo: {bar: 42}} m.apply_to_map( |_old, cc, id| { OrMap::default().apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::U64(42), cc, id), "bar".into(), cc, id, ) }, "foo".into(), &cc, id, ) }, |m, cc, id| { // one writer adds a field (baz) to the inner map m.apply_to_map( |old, cc, id| { old.apply_to_register( |_old, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id),
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
true
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/orarray.rs
src/crdts/orarray.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use super::{ Either, NoExtensionTypes, TypeVariantValue, Value, ValueRef, mvreg::MvRegValue, snapshot::{self, AllValues, CollapsedValue, SingleValueError, SingleValueIssue, ToValue}, }; use crate::{ CausalContext, CausalDotStore, DETERMINISTIC_HASHER, Dot, DotFun, DotFunMap, DotMap, DotStoreJoin, ExtensionType, Identifier, MvReg, OrMap, dotstores::{DotChange, DotStore, DryJoinOutput}, sentinel::{DummySentinel, KeySentinel, Sentinel, TypeSentinel, ValueSentinel, Visit}, }; pub use position::Position; use std::{convert::Infallible, fmt}; pub(super) mod position; #[derive(Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct Uid(Dot); impl From<Dot> for Uid { fn from(value: Dot) -> Self { Self(value) } } impl fmt::Debug for Uid { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("Uid") .field(&self.0.actor()) .field(&self.0.sequence()) .finish() } } impl Uid { // needs to be public for integration tests #[doc(hidden)] pub fn dot(&self) -> Dot { self.0 } } /// An **Observed-Remove Array**, a list-like CRDT that supports concurrent insertions, updates, /// removals, and moves of elements. /// /// `OrArray` is one of the three core CRDT primitives provided by this crate, alongside [`OrMap`] /// and [`MvReg`]. It can be nested within other CRDTs to build complex, JSON-like data structures. /// /// ## Stable Positioning /// /// This type does not use integer indices directly as the position would require updating multiple /// element positions following an insertion or deletion, which isn't feasible to do in a CRDT /// context. Therefore, the implementation uses _stable identifiers_ (Martin Kleppmann; Moving /// elements in list CRDTs (2020)) in the form of real numbers (`f64`), which allow insertion and /// deletion without updating other elements. In this model, an insertion of an element between an /// element at position 𝑝1 and one at 𝑝2, inserts an element at position (𝑝1 + 𝑝2)/2 (see /// [`Position::between`]. The array is then the sequence of values sorted by the corresponding /// position in ascending order. For example, the array [𝑎, 𝑏, 𝑐] can be represented by the set /// {(𝑎, 1.0), (𝑏, 7.5), (𝑐, 42.7)}. /// /// This type allows and reconciles concurrent operations (as all CRDTs) do. As a result, a given /// position may be ambiguous, similarly to the value of an [`MvReg`] under concurrent writes. To /// this end, the position is actually a set of _possible positions_. For example, if element 𝑏 is /// concurrently moved before 𝑎 and after 𝑐, it’s position may be {0.3, 54}. To sort the array, /// replicas deterministically choose an element from the position set (eg, the maximum) to use for /// ordering the values. Since all mutations explicitly also set the position of an element, the /// set of positions is collapsed any time a mutation happens. /// /// ## Unique Identifiers /// /// To keep track of values as they change positions under concurrent operations, we identify /// each array element by a unique identifier. Concretely, we use the dot with which the element /// was created as the creation point is unique. The representation of an [`OrArray`] is therefore /// a map from unique identifiers to value-positions pairs. For example, the aforementioned array /// can be represented as the map: /// {(𝑖, 1) ↦ (𝑎, {1.0}), /// (𝑗, 1) ↦ (𝑏, {0.3, 54}), /// (𝑖, 2) ↦ (𝑐, {42.7}) /// } /// /// where (𝑖, 1) is the dot marking the creation even of element 𝑎, (𝑗, 1) is the creation event of /// 𝑏, and (𝑖, 2) is the creation event of 𝑐. /// /// When an element is inserted for the first time, the API receives the unique identifier that /// identifies the inserted element until its deletion. /// /// ## Usage /// /// Like `OrMap`, an `OrArray` is typically wrapped in a [`CausalDotStore`]. Modifications are /// performed by creating a "delta" CRDT, which is then merged back into the original. /// /// ```rust /// # use dson::{CausalDotStore, OrArray, MvReg, crdts::{Value, mvreg::MvRegValue, orarray::{Position, Uid}, snapshot::{CollapsedValue, ToValue}}, Identifier, sentinel::DummySentinel}; /// // Create a new CausalDotStore containing an OrArray. /// let mut doc: CausalDotStore<OrArray> = CausalDotStore::new(); /// let id = Identifier::new(0, 0); /// /// // Create a delta to insert a value at the beginning of the array. /// let delta = dson::api::array::insert( /// |cc, id| MvReg::default().write(MvRegValue::U64(42), cc, id).map_store(Value::Register), /// doc.store.len(), /// )(&doc.store, &doc.context, id); /// /// // Merge the delta into the document. /// doc = doc.join(delta, &mut DummySentinel).unwrap(); /// /// // The value can now be read from the array. /// let array = doc.store.value().unwrap(); /// let val = doc.store.get(0).unwrap(); /// assert_eq!(val.reg.value().unwrap(), &MvRegValue::U64(42)); /// ``` /// /// You can find more convenient, higher-level APIs for manipulating `OrArray` in the /// [`api::array`](crate::api::array) module. The methods on `OrArray` itself are low-level and /// useful when implementing custom CRDTs or when you need fine-grained control over /// delta creation. #[derive(Clone, Default, PartialEq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct OrArray<C = NoExtensionTypes>(pub(super) DotMap<Uid, PairMap<C>>); impl<C> fmt::Debug for OrArray<C> where C: fmt::Debug + ExtensionType, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "[]{:?}", self.0) } } impl<C> DotStore for OrArray<C> where C: ExtensionType, { fn dots(&self) -> CausalContext { self.0.dots() } fn add_dots_to(&self, other: &mut CausalContext) { self.0.add_dots_to(other); } fn is_bottom(&self) -> bool { self.0.is_bottom() } fn subset_for_inflation_from(&self, frontier: &CausalContext) -> Self { Self(DotMap::subset_for_inflation_from(&self.0, frontier)) } } impl<C, S> DotStoreJoin<S> for OrArray<C> where C: ExtensionType + DotStoreJoin<S> + Default + fmt::Debug + Clone + PartialEq, S: Visit<Uid> + Visit<String> + KeySentinel + TypeSentinel<C::ValueKind> + ValueSentinel<MvRegValue>, { fn join( (m1, cc1): (Self, &CausalContext), (m2, cc2): (Self, &CausalContext), on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<Self, S::Error> where Self: Sized, S: Sentinel, { // NOTE! When making changes to this method, consider if corresponding // changes need to be done to ::dry_join as well! Ok(Self(DotMap::join( (m1.0, cc1), (m2.0, cc2), on_dot_change, sentinel, )?)) } fn dry_join( (m1, cc1): (&Self, &CausalContext), (m2, cc2): (&Self, &CausalContext), sentinel: &mut S, ) -> Result<DryJoinOutput, S::Error> where Self: Sized, S: Sentinel, { DotMap::dry_join((&m1.0, cc1), (&m2.0, cc2), sentinel) } } /// The position and value information for a given entry in an [`OrArray`]. #[derive(Clone, Default, PartialEq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub(super) struct PairMap<Custom> { /// The value assigned to this element. /// /// For reference, this field is called `first` in the original DSON paper and implementation. #[doc(alias = "first")] pub(super) value: TypeVariantValue<Custom>, /// The set of positions assigned to this element. /// /// For reference, this field is called `second` in the original DSON paper and implementation. #[doc(alias = "second")] pub(super) positions: DotFunMap<DotFun<Position>>, } impl<C> fmt::Debug for PairMap<C> where C: ExtensionType + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_tuple("") .field(&format_args!("{:?}", self.value)) .field(&format_args!("pos={:?}", self.positions)) .finish() } } impl<C> DotStore for PairMap<C> where C: ExtensionType, { fn add_dots_to(&self, other: &mut CausalContext) { self.value.add_dots_to(other); self.positions.add_dots_to(other); } fn is_bottom(&self) -> bool { self.value.is_bottom() && self.positions.is_bottom() } fn subset_for_inflation_from(&self, frontier: &CausalContext) -> Self { Self { value: self.value.subset_for_inflation_from(frontier), positions: self.positions.subset_for_inflation_from(frontier), } } } impl<C, S> DotStoreJoin<S> for PairMap<C> where C: ExtensionType + DotStoreJoin<S> + fmt::Debug + Clone + PartialEq, S: Visit<String> + Visit<Uid> + KeySentinel + TypeSentinel<C::ValueKind> + ValueSentinel<MvRegValue>, { fn join( ds1: (Self, &CausalContext), ds2: (Self, &CausalContext), on_dot_change: &mut dyn FnMut(DotChange), sentinel: &mut S, ) -> Result<Self, S::Error> where Self: Sized, S: Sentinel, { // NOTE! When making changes to this method, consider if corresponding // changes need to be done to ::dry_join as well! let (m1, cc1) = ds1; let (m2, cc2) = ds2; let value = DotStoreJoin::join((m1.value, cc1), (m2.value, cc2), on_dot_change, sentinel)?; let positions = DotStoreJoin::join( (m1.positions, cc1), (m2.positions, cc2), on_dot_change, // We don't consider the Position as a value, even though it works much like a MvReg so // changes are not observed by the sentinel &mut DummySentinel, ) .expect("DummySentinel is Infallible"); Ok(PairMap { value, positions }) } fn dry_join( ds1: (&Self, &CausalContext), ds2: (&Self, &CausalContext), sentinel: &mut S, ) -> Result<DryJoinOutput, S::Error> where Self: Sized, S: Sentinel, { let (m1, cc1) = ds1; let (m2, cc2) = ds2; let value = DotStoreJoin::dry_join((&m1.value, cc1), (&m2.value, cc2), sentinel)?; let positions = DotStoreJoin::dry_join( (&m1.positions, cc1), (&m2.positions, cc2), // We don't consider the Position as a value, even though it works much like a MvReg so // changes are not observed by the sentinel &mut DummySentinel, ) .expect("DummySentinel is Infallible"); Ok(positions.union(value)) } } impl<C> OrArray<C> { /// Create an array from raw entries pub fn from_entries<I>(iter: I) -> Self where I: IntoIterator<Item = (Uid, TypeVariantValue<C>, DotFunMap<DotFun<Position>>)>, { let iter = iter .into_iter() .map(|(uid, value, positions)| (uid, PairMap { value, positions })); Self(DotMap::from_iter(iter)) } #[doc(hidden)] pub fn insert_raw( &mut self, uid: Uid, pos: impl Iterator<Item = (Dot, Dot, f64)>, value: TypeVariantValue<C>, ) { let mut dotfunmap = DotFunMap::<DotFun<Position>>::default(); for (dot1, dot2, ordering_f64) in pos { let mut dotfun = DotFun::<Position>::default(); dotfun.set(dot2, Position(ordering_f64)); dotfunmap.set(dot1, dotfun); } self.0.insert( uid, PairMap { value, positions: dotfunmap, }, ) } /// Yields the array's elements in random order. /// /// This deterministically resolves any ambiguities around each element's position (eg, if it /// was concurrently moved by multiple actors) and returns the elements along with those /// resolved positions. Given the inherently concurrent nature of this data structure, calling /// `iter_as_is` after joining with another [`OrArray`] may yield significantly different /// positions. pub fn iter_as_is(&self) -> impl Iterator<Item = (&TypeVariantValue<C>, Uid, Position)> { self.0.iter().map(|(uid, pair)| { let inner_map = &pair.value; // choose which position among the possible positions to use for this element. // there may be more than one in the presence of concurrent moves. this operation // needs to happen identically across all replicas that have the same `OrArray` // (ie, it must be deterministic), so we always pick the max. let positions = &pair.positions; // positions is a Dot => { Dot => Position }. // the outer Dot is "someone created a position set" // the inner Dot is "someone updated a position set" // so if we have: // // {d1 => {d2 => 0.5, d3 => 1.5}, d4 => {100}} // // it implies that d2 and d3 were two simultaneous `mv`s of the element when its // value was the one created at d1. d4, meanwhile, was a concurrent apply to the // element (that may or may not have changed its original index). // // in this case, we're going to (arbitrarily but deterministically) pick the max // position of the max outer dot, so (assuming d4 > d3 > d2 > d1): 100. // // NOTE: we cannot rely on the position always being available. // // Consider the following sequence of events: // // 1. A composite array element is created by node A: // // store = {Uid(1) => ( // value => {"foo": {2 => true}}, // pos => {3 => {4 => 100.0}})} // cc = {1..=4} // // 2. An internal mutation is made to this element: // // store = {Uid(1) => ( // value => {"foo": {2 => true}, "bar": {5 => true}}, // pos => {6 => {7 => 50.0}} )} // cc = {1..=7} // // 3. Now node B receives only this second update. // // delta_store = {Uid(1) => ( // value => {"bar": {5 => true}}, pos => {6 => {7 => 50.0}})} // delta_cc = {3..=7} // // >> Results in state: // // store = {Uid(1) => (value => {"bar": {5 => true}}, pos => {6 => {7 => 50.0}})} // cc = {3..=7} // // 4. Node B now deletes the element: // // store = {} // cc = {3..=7} // // 5. And then sends an update to A: // // delta_store = {} // delta_cc = {5..=7} // // >> Results in state // // store = {Uid(1) => (value => {"foo": {2 => true}}, pos => {})} // cc = {1..=7} // // This is an example of a scenario where we end up with an array element which contains // a non-bottom value, yet doesn't have a position defined. Importantly, this is a // permanent situation: node A can later send a catch-up delta to B, which will achieve // global consistency, but not resolve the incomplete state. // // To get out of this situation, a node must actively modify the element by generating a // new APPLY operation. This will create a new position root, so the element will have a // well defined index again. But if no action is taken, the best we can do is synthesize // a position from the uid, so we have an arbitrary but deterministic ordering. That's // what we do here. // // The rationale for this choice is detailed in the following YADR. // // YADR: 2024-06-18 Array elements without a position defined // // In the context of dealing with array elements which don't have a position defined, // we faced a decision of how to expose these elements through the public OrArray API. // // We decided for assigning these elements an arbitrary but deterministic position, as // a function of their uid, and neglected to attempt to map them to the start or end of // the array, or provide a separate API for access to position-less elements, when their // uid is not yet known. // // We did this to achieve minimal impact to the user-facing API, to avoid increasing the // cognitive burden of using this crate, and to ensure that every node has a consistent // view of the array when they share the same state, accepting that users may be // surprised to find that a non-move operation (like a delete) can result in an element // being assigned a different position. // // We think this is the right trade-off because this is a rare edge case, and placing // the burden of handling it on users (by providing a separate access interface) // would've been unreasonable. Additionally, non-deterministic views of the array would // have violated a core assumption that nodes in sync with each other have the same view // of the state. let p = if let Some(max_root) = positions.keys().max() { let at_max_root = positions .get(&max_root) .expect("this is the max key from just above, so must exist"); let max_dot = at_max_root .keys() .max() .expect("every position set has at least one position (from its creator)"); at_max_root .get(&max_dot) .expect("this is the max key from just above, so must exist") } else { const MASK: u64 = u64::MAX >> (u64::BITS - f64::MANTISSA_DIGITS + 1); const SCALE_FACTOR: f64 = Position::UPPER / MASK as f64; let value = (DETERMINISTIC_HASHER.hash_one(uid) & MASK) as f64; &Position::from_raw(value * SCALE_FACTOR).expect("within range") }; (inner_map, *uid, *p) }) } /// Iterates over the raw entries of this array pub fn iter_entries( &self, ) -> impl Iterator<Item = (Uid, &TypeVariantValue<C>, &DotFunMap<DotFun<Position>>)> { self.0.iter().map(|(uid, pair)| { let value = &pair.value; let positions = &pair.positions; (*uid, value, positions) }) } /// Iterates over array elements in an arbitrary order with mutable access. /// /// This is similar to `iter_as_is`, but does not resolve ambiguities around each element's /// position, thus it is faster. /// /// Invalidates the dots cache for all the array's entries, so calling `.dots()` on this /// collection after invoking this method may be quite slow (it has to call `.dots()` on all /// the entries). pub fn iter_mut_and_invalidate( &mut self, ) -> impl ExactSizeIterator<Item = &mut TypeVariantValue<C>> { self.0 .iter_mut_and_invalidate() .map(|(_, pair)| &mut pair.value) } /// Keeps only the values for which a predicate is true. /// /// Iteration is done in some arbitrary order. /// /// Invalidates the dots cache for all the array's entries, so calling `.dots()` on this /// collection after invoking this method may be quite slow (it has to call `.dots()` on all /// the entries). pub fn retain_values_and_invalidate( &mut self, mut f: impl FnMut(&mut TypeVariantValue<C>) -> bool, ) { self.0.retain_and_invalidate(|_, pair| f(&mut pair.value)); } /// Generates the array's by-index representation. /// /// This deterministically resolves any ambiguities around each element's position (eg, if it /// was concurrently moved by multiple actors) and returns the elements according to those /// resolved positions. Given the inherently concurrent nature of this data structure, calling /// `with_list` after joining with another [`OrArray`] may yield significantly different /// orderings. /// /// The provided `map` function allows skipping over elements when the entire list is not /// needed (eg, when filtering) and propagating error cases (eg, for single-value collapsed /// reads in the presence of conflicts). // TODO: doctest once we have a reasonably-easy constructor // TODO: keep an internal materialization of the numerical index -> uid mapping so that we // don't need to re-compute this unnecessarily. maybe that materialization can even // be maintained incrementally through join! pub fn with_list<'ds, F, R, E>(&'ds self, mut map: F) -> Result<Vec<(R, Uid, Position)>, E> where F: FnMut(&'ds TypeVariantValue<C>, Uid, Position) -> Result<Option<R>, E>, { let mut result: Vec<_> = self .iter_as_is() .filter_map(|(inner_map, uid, p)| -> Option<Result<_, E>> { // NOTE: the transpose here is so that we get the `Option` on the _outside_ // and can use `?` at least once. i wanted `O` to have a return value of // `Result<Option>` so that it's easier to use `?` in the definition of `O` to // handle errors, but that has an impedance mismatch with the signature required by // `filter_map` which we're in the context of here. let v = match (map)(inner_map, uid, p).transpose()? { Ok(v) => v, Err(e) => return Some(Err(e)), }; Some(Ok((v, uid, p))) }) .collect::<Result<_, E>>()?; // NOTE: the original implementation sorts _only_ by Position here, but i belive that // to be wrong -- Position is not guaranteed to be unique (eg, if two nodes concurrently // push), which would mean a non-deterministic sort. so, we make it determinstic by also // sorting by the uid. result.sort_unstable_by_key(|&(_, uid, p)| (p, uid)); Ok(result) } /// Returns the element at the given index. /// /// Since the array is not actually stored in index order, this requires processing and sorting /// the entire array, which can be quite slow! pub fn get(&self, idx: usize) -> Option<&TypeVariantValue<C>> { self.get_entry(idx).map(|(_, v)| v) } /// Returns the element at the given index and its [`Uid`]. /// /// Since the array is not actually stored in index order, this requires processing and sorting /// the entire array, which can be quite slow! pub fn get_entry(&self, idx: usize) -> Option<(Uid, &TypeVariantValue<C>)> { if idx >= self.0.len() { return None; } if idx == 0 { // short-circuit head access which doesn't require collecting entire list let first = self .iter_as_is() .min_by_key(|&(_, _, p)| p) .expect("0 >= len, so len > 0"); return Some((first.1, first.0)); } if idx == self.len() - 1 { // short-circuit last element access which doesn't require collecting entire list let last = self .iter_as_is() .max_by_key(|&(_, _, p)| p) .expect("0 >= len, so len > 0"); return Some((last.1, last.0)); } // TODO(https://github.com/rust-lang/rust/issues/61695): use into_ok let mut result = self .with_list(|v, u, _| Ok::<_, Infallible>(Some((u, v)))) .expect("E == Infallible"); // NOTE: swap_remove is okay here since we're throwing away the array anyway Some(result.swap_remove(idx).0) } /// Returns a reference to the element at the given [`Uid`], if any. pub fn get_by_uid(&self, uid: Uid) -> Option<&TypeVariantValue<C>> { self.0.get(&uid).map(|pm| &pm.value) } /// Returns a mutable reference to the element at the given [`Uid`], if any. /// /// Invalidates the dots cache for the given array entry, so calling `.dots()` on this /// collection after invoking this method may be slower as it has to call `.dots()` on this /// entry to re-compute. pub fn get_by_uid_mut_and_invalidate(&mut self, uid: Uid) -> Option<&mut TypeVariantValue<C>> { self.0.get_mut_and_invalidate(&uid).map(|pm| &mut pm.value) } /// Returns the number of elements in this array. pub fn len(&self) -> usize { // NOTE: the original has to walk the fields to filter out alive, we don't \o/ self.0.len() } /// Returns true if this array has no elements. pub fn is_empty(&self) -> bool { self.0.is_empty() } } impl<'doc, C> ToValue for &'doc OrArray<C> where C: ExtensionType, { type Values = snapshot::OrArray<AllValues<'doc, C::ValueRef<'doc>>>; type Value = snapshot::OrArray<CollapsedValue<'doc, C::ValueRef<'doc>>>; type LeafValue = Either<MvRegValue, <C::ValueRef<'doc> as ToValue>::LeafValue>; fn values(self) -> Self::Values { let result = self.with_list(|v, _, _| match v.coerce_to_value_ref() { ValueRef::Map(m) => Ok::<_, Infallible>(Some(AllValues::Map(m.values()))), ValueRef::Array(a) => Ok(Some(AllValues::Array(a.values()))), ValueRef::Register(r) => Ok(Some(AllValues::Register(r.values()))), ValueRef::Custom(c) => Ok(Some(AllValues::Custom(c.values()))), }); // TODO(https://github.com/rust-lang/rust/issues/61695): use into_ok let list = result.unwrap().into_iter().map(|(v, _, _)| v).collect(); snapshot::OrArray { list } } fn value(self) -> Result<Self::Value, Box<SingleValueError<Self::LeafValue>>> { let result = self.with_list(|v, uid, p| match v.coerce_to_value_ref() { ValueRef::Map(m) => Ok(Some(CollapsedValue::Map(m.value()?))), ValueRef::Array(a) => Ok(Some(CollapsedValue::Array(a.value()?))), ValueRef::Custom(c) => Ok(Some(CollapsedValue::Custom( c.value().map_err(|v| v.map_values(Either::Right))?, ))), ValueRef::Register(r) => match r.value() { Ok(v) => Ok(Some(CollapsedValue::Register(v))), // don't include empty values in the array // // NOTE: this means that clearing an `MvReg` that's held in an array // effectively removes the element, but does *not* actually remove it from the // array (ie, its `PairMap` is still there). is that a problem? Err(e) if e.issue == SingleValueIssue::Cleared => Ok(None), Err(mut e) => { // make errors more helpful by including the path to the MvReg with conflicts e.path.push(format!("[{uid:?}@{}]", p.0)); Err(e.map_values(Either::Left)) } }, }); let list = result?.into_iter().map(|(v, _, _)| v).collect(); Ok(snapshot::OrArray { list }) } } macro_rules! apply_to_X { ($name:ident, $frag:literal, $field:ident, [$($others:ident),*], $innerType:ty) => { /// Updates the value at position `p` to be #[doc = $frag] /// using `o`. /// /// This is mostly a convenience wrapper around [`OrArray::apply`]. /// See that method for more details. pub fn $name<'data, O>( &'data self, uid: Uid, o: O, p: Position, cc: &'_ CausalContext, id: Identifier, ) -> CausalDotStore<Self> where O: for<'cc> FnOnce( &'data $innerType, &'cc CausalContext, Identifier ) -> CausalDotStore<$innerType>, { let CausalDotStore { store: ret_map, context: mut ret_cc, } = self.apply( uid, move |m, cc, id| { // NOTE: in the original code, this calls ORMap.apply since there `store` // is just a DotMap with the keys MAP, ARRAY, and VALUE. In our case, we have a // more strongly typed variant where we can write the fields directly. Like for // apply_to_X in ormap.rs, we just apply to the indicated field directly // instead and have all the other fields be `None`. o(&m.$field, cc, id).map_store(Value::from) }, p, cc, id ); // recommitted value of type $field, delete the other two ($others). if let Some(inner) = self.0.get(&uid).map(|pm| &pm.value) { $( inner.$others.add_dots_to(&mut ret_cc); )* } CausalDotStore { store: ret_map, context: ret_cc, } } }; } macro_rules! insert_X { ($name:ident, $frag:literal, $field:ident, [$($others:ident),*], $innerType:ty) => { /// Inserts #[doc = $frag] /// value produced by `O` at position `p`. /// /// This is mostly a convenience wrapper around [`OrArray::insert`]. /// See that method for more details. pub fn $name<O>( &self, uid: Uid, o: O, p: Position, cc: &'_ CausalContext, id: Identifier, ) -> CausalDotStore<Self> where O: for<'cc> FnOnce(&'cc CausalContext, Identifier) -> CausalDotStore<$innerType>, { self.insert( uid, move |cc, id| { // NOTE: see comment in apply_to_X about ORMap::apply // NOTE: the original code is provided with the old `PairMap` (which we // assume is always `None`; see comment in `fn insert`) which it // then passes directly to `o`. however, it never calls .get(FIRST) // to project out the value, so any `o`s that were passed in would // have a hard time access the value of the current element if it // _did_ ever exist. which is all to say i believe the original // implementation of `insert` never said the `PairMap` be `Some` for // `insert`, which supports our assumption in `fn insert`. o(cc, id).map_store(Value::from) }, p, cc, id, ) } }; } impl<C> OrArray<C> where C: ExtensionType + fmt::Debug + PartialEq, { /// Creates a CRDT for the creation of a new empty [`OrArray`]. pub fn create(&self, _cc: &CausalContext, _id: Identifier) -> CausalDotStore<Self> { // NOTE: the original OrArray implementation also sets an `.alive` field here. // see the YADR in `mod crdts` for why we don't do that. CausalDotStore { store: Self(Default::default()), context: CausalContext::default(), } } apply_to_X!( apply_to_map, "an [`OrMap`]", map, [array, reg], OrMap<String, C> ); apply_to_X!(apply_to_array, "an [`OrArray`]", array, [map, reg], Self); apply_to_X!(apply_to_register, "an [`MvReg`]", reg, [map, array], MvReg); insert_X!(insert_map, "an [`OrMap`]", map, [array, reg], OrMap<String, C>); insert_X!(insert_array, "an [`OrArray`]", array, [map, reg], Self);
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
true
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/snapshot.rs
src/crdts/snapshot.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! Provides snapshots of CRDTs for inspection. //! //! A snapshot is a read-only, immutable view of the state of a CRDT at a //! particular point in time. //! //! This module provides two ways to get a snapshot of a CRDT, exposed via the //! [`ToValue`] trait: //! //! 1. **Conflict-preserving snapshots** via [`ToValue::values()`]: This method returns an //! [`AllValues`] snapshot. In this representation, any multi-value registers //! ([`MvReg`]) will contain all of their concurrently written values. This is useful when you //! expect conflicts and want to handle them in your application logic (e.g., by merging, //! presenting them to the user, or picking one). //! //! 2. **Conflict-collapsing snapshots** via [`ToValue::value()`]: This method returns a //! [`CollapsedValue`] snapshot. It assumes that there are no conflicts in the CRDT. If any //! multi-value register holds more than one value, this method will return an error. This //! provides a more convenient, "normal" data view when you don't expect conflicts or have a //! logic in place to resolve them before reading. //! //! The snapshot types like [`OrMap`], [`OrArray`], and [`MvReg`] mirror the structure of the //! actual CRDTs but are read-only. use super::{Either, ValueRef, mvreg::MvRegValue}; use crate::{DsonRandomState, ExtensionType, create_map, dotstores::DotFunValueIter}; use std::{ collections::HashMap, error, fmt, hash::Hash, ops::{Deref, Index}, }; /// A type that holds values that may or may not feature conflicts. pub trait ToValue { /// The conflict-preserving value type. type Values; /// The conflict-less value type. type Value; /// The (owned) leaf value type. /// /// This is the value type of the outmost nested CRDT, like a [`MvRegValue`]. type LeafValue; /// Returns the values of this type without collapsing conflicts. /// /// That is, any [`MvReg`](crate::MvReg) nested below this value will produce the full _set_ of /// possible values, not just a single (arbitrarily chosen) value. fn values(self) -> Self::Values; /// Returns the values of this type assuming (and asserting) no conflicts on element values. /// /// That is, for any [`MvReg`](crate::MvReg) nested below this value, this method asserts that /// the `MvReg` has only a single possible value (ie, it ultimately calls [`ToValue::value`] on /// each such `MvReg`), and returns just that one value. /// /// This makes for a more ergonomic API than [`ToValue::values`], but comes at the cost of /// erroring when conflicts are found. /// /// If a contained [`MvReg`](crate::MvReg) has conflicting values, this method returns an `Err` /// with [`SingleValueIssue::HasConflict`]. fn value(self) -> Result<Self::Value, Box<SingleValueError<Self::LeafValue>>>; } impl<'doc, C> ToValue for ValueRef<'doc, C> where C: ExtensionType, C::ValueRef<'doc>: ToValue, { type Values = AllValues<'doc, C::ValueRef<'doc>>; type Value = CollapsedValue<'doc, C::ValueRef<'doc>>; type LeafValue = Either<MvRegValue, <C::ValueRef<'doc> as ToValue>::LeafValue>; fn values(self) -> Self::Values { match self { ValueRef::Map(map) => AllValues::Map(map.values()), ValueRef::Array(arr) => AllValues::Array(arr.values()), ValueRef::Register(reg) => AllValues::Register(reg.values()), ValueRef::Custom(custom) => AllValues::Custom(custom.values()), } } fn value(self) -> Result<Self::Value, Box<SingleValueError<Self::LeafValue>>> { Ok(match self { ValueRef::Map(map) => CollapsedValue::Map(map.value()?), ValueRef::Array(arr) => CollapsedValue::Array(arr.value()?), ValueRef::Register(reg) => { CollapsedValue::Register(reg.value().map_err(|v| v.map_values(Either::Left))?) } ValueRef::Custom(custom) => { CollapsedValue::Custom(custom.value().map_err(|v| v.map_values(Either::Right))?) } }) } } macro_rules! impl_partial_eq { ($on:ty, *; {$($t:ty),+}) => { $(impl_partial_eq!($on, *; $t);)+ }; ($on:ty; {$($t:ty),+}) => { $(impl_partial_eq!($on; $t);)+ }; ($on:ty$(, $map:tt)?; $t:ty) => { impl<C> PartialEq<$t> for $on where C: ToValue, { fn eq(&self, other: &$t) -> bool { matches!($($map)? self, Self::Register(r1) if r1 == other) } } }; } /// A representation of all values in a CRDT while preserving any potentially conflicting leaf /// values. /// /// This is a snapshot of the CRDT that can be used to inspect the current state of the CRDT. /// However, it is not a CRDT, and cannot be modified. /// /// See [`ToValue::values`]. #[derive(Debug, Clone, PartialEq)] pub enum AllValues<'doc, Custom> where Custom: ToValue, { /// A multi-value register, which can hold multiple values at the same time. Register(MvReg<'doc>), /// An observed-remove map, which is a map that supports concurrent removal of keys. Map(OrMap<'doc, String, AllValues<'doc, Custom>>), /// An observed-remove array, which is an array that supports concurrent removal of elements. Array(OrArray<AllValues<'doc, Custom>>), /// A custom CRDT type. Custom(Custom::Values), } impl_partial_eq!(AllValues<'_, C>; {[u8], &[u8], str, &str, bool, f64, u64, i64}); // i32 because it's the "default" inference integer type impl_partial_eq!(AllValues<'_, C>; i32); // byte literals impl<C, const N: usize> PartialEq<&[u8; N]> for AllValues<'_, C> where C: ToValue, { fn eq(&self, other: &&[u8; N]) -> bool { matches!(self, Self::Register(r1) if r1 == other) } } /// A representation of all values in a CRDT where there are no conflicting values at the leaves. /// /// This is a snapshot of the CRDT that can be used to inspect the current state of the CRDT. /// However, it is not a CRDT, and cannot be modified. /// /// See [`ToValue::value`]. #[derive(Debug, Clone, PartialEq)] pub enum CollapsedValue<'doc, Custom> where Custom: ToValue, { /// A multi-value register, which can hold multiple values at the same time. Register(&'doc MvRegValue), /// An observed-remove map, which is a map that supports concurrent removal of keys. Map(OrMap<'doc, String, CollapsedValue<'doc, Custom>>), /// An observed-remove array, which is an array that supports concurrent removal of elements. Array(OrArray<CollapsedValue<'doc, Custom>>), /// A custom CRDT type. Custom(Custom::Value), } impl_partial_eq!(CollapsedValue<'_, C>, *; {[u8], &[u8], str, &str, bool, f64, u64, i64}); // i32 because it's the "default" inference integer type impl_partial_eq!(CollapsedValue<'_, C>, *; i32); // byte literals impl<C, const N: usize> PartialEq<&[u8; N]> for CollapsedValue<'_, C> where C: ToValue, { fn eq(&self, other: &&[u8; N]) -> bool { matches!(*self, Self::Register(r1) if r1 == other) } } /// An error that occurs when trying to collapse a CRDT with conflicting values. #[derive(Debug, Clone)] pub struct SingleValueError<T> { /// The path to the value that has an issue. pub path: Vec<String>, /// The issue that occurred. pub issue: SingleValueIssue<T>, } // We can't derive `PartialEq` because `SingleValueIssue` implements it manually // to provide order-insensitive equality for conflicts. For that we need the // additional `Ord` bound. impl<V: PartialEq + Ord> PartialEq for SingleValueError<V> { fn eq(&self, other: &Self) -> bool { self.path == other.path && self.issue == other.issue } } impl<T> SingleValueError<T> { /// Maps the values within the error to a different type. pub fn map_values<Other>(self, f: impl Fn(T) -> Other) -> Box<SingleValueError<Other>> { let Self { path, issue } = self; let issue = match issue { SingleValueIssue::HasConflict(conflicts) => { SingleValueIssue::HasConflict(conflicts.into_iter().map(f).collect()) } SingleValueIssue::Cleared => SingleValueIssue::Cleared, }; Box::new(SingleValueError { path, issue }) } } impl<T> fmt::Display for SingleValueError<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "at <self>")?; // reverse since paths are appended as the error bubbles outwards for p in self.path.iter().rev() { write!(f, ".{p}")?; } Ok(()) } } impl<V: fmt::Debug + 'static> error::Error for SingleValueError<V> { fn source(&self) -> Option<&(dyn error::Error + 'static)> { Some(&self.issue) } } /// An issue that can occur when trying to collapse a CRDT with conflicting values. #[derive(Debug, Clone)] pub enum SingleValueIssue<V> { // NOTE: Contrary to `DotFun`, there are _NO_ ordering guarantees on the // conflicted values HasConflict(smallvec::SmallVec<[V; 2]>), Cleared, } impl<V: PartialEq + Ord> PartialEq for SingleValueIssue<V> { fn eq(&self, other: &Self) -> bool { match (self, other) { (Self::HasConflict(l0), Self::HasConflict(r0)) => { let mut l0 = l0.iter().collect::<Vec<_>>(); let mut r0 = r0.iter().collect::<Vec<_>>(); l0.sort_unstable(); r0.sort_unstable(); l0 == r0 } (Self::Cleared, Self::Cleared) => true, _ => false, } } } impl<V: Eq + Ord> Eq for SingleValueIssue<V> {} impl<T> fmt::Display for SingleValueIssue<T> { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { SingleValueIssue::HasConflict(set) => write!(f, "has {} possible values", set.len()), SingleValueIssue::Cleared => write!(f, "has been cleared"), } } } impl<T: fmt::Debug> error::Error for SingleValueIssue<T> {} // NOTE: This does not expose the HashMap so we can change its inner type later. /// A snapshot of an [`OrMap`](crate::OrMap) that can be used to inspect the current state of the /// CRDT. #[derive(Debug, PartialEq, Eq)] pub struct OrMap<'doc, K, V> where K: Hash + Eq + ?Sized + 'doc, V: 'doc, { // TODO: is it worthwhile to keep &K here instead of K? // it ends up making the ergonomics for accessing this map worse since &String doesn't // implement Borrow<str> (only String does). pub(crate) map: HashMap<&'doc K, V, DsonRandomState>, } impl<K, V> Clone for OrMap<'_, K, V> where K: Hash + Eq + ?Sized, V: Clone, { fn clone(&self) -> Self { Self { map: self.map.clone(), } } } impl<K, V> Default for OrMap<'_, K, V> where K: Hash + Eq + ?Sized, { fn default() -> Self { Self { map: create_map() } } } impl<K, V> Index<&K> for OrMap<'_, K, V> where K: Eq + Hash, { type Output = V; fn index(&self, index: &K) -> &Self::Output { self.map.index(index) } } impl<K, V> OrMap<'_, K, V> where K: Hash + Eq + ?Sized, { pub fn len(&self) -> usize { self.map.len() } pub fn is_empty(&self) -> bool { self.map.is_empty() } pub fn contains_key(&self, key: &K) -> bool { self.map.contains_key(key) } pub fn get(&self, key: &K) -> Option<&V> { self.map.get(key) } pub fn iter(&self) -> impl ExactSizeIterator<Item = (&K, &V)> { self.map.iter().map(|(&k, v)| (k, v)) } pub fn keys(&self) -> impl ExactSizeIterator<Item = &K> { self.map.keys().copied() } pub fn values(&self) -> impl ExactSizeIterator<Item = &V> { self.map.values() } } impl<'doc, K, V> IntoIterator for OrMap<'doc, K, V> where K: Hash + Eq + ?Sized, { type Item = (&'doc K, V); type IntoIter = std::collections::hash_map::IntoIter<&'doc K, V>; fn into_iter(self) -> Self::IntoIter { self.map.into_iter() } } /// A snapshot of an [`OrArray`](crate::OrArray) that can be used to inspect the current state of /// the CRDT. #[derive(Debug, Clone, PartialEq, Eq)] pub struct OrArray<V> { pub(crate) list: Vec<V>, } impl<V> Default for OrArray<V> { fn default() -> Self { Self { list: Default::default(), } } } impl<V> Deref for OrArray<V> { type Target = [V]; fn deref(&self) -> &Self::Target { &self.list[..] } } impl<V> AsRef<[V]> for OrArray<V> { fn as_ref(&self) -> &[V] { &self.list[..] } } impl<V> OrArray<V> { pub fn len(&self) -> usize { self.list.len() } pub fn is_empty(&self) -> bool { self.list.is_empty() } pub fn get(&self, i: usize) -> Option<&V> { self.list.get(i) } pub fn iter(&self) -> impl ExactSizeIterator<Item = &V> { self.list.iter() } } impl<V> IntoIterator for OrArray<V> { type Item = V; type IntoIter = std::vec::IntoIter<V>; fn into_iter(self) -> Self::IntoIter { self.list.into_iter() } } /// A snapshot of an [`MvReg`](crate::MvReg) that can be used to inspect the current state of the /// CRDT. #[derive(Debug, Clone)] pub struct MvReg<'doc> { // NOTE: DotFunValueIter is basically a std::slice::Iter, so is cheap to Clone pub(crate) values: DotFunValueIter<'doc, MvRegValue>, } impl<'doc> MvReg<'doc> { pub fn len(&self) -> usize { self.values.len() } pub fn is_empty(&self) -> bool { self.values.len() == 0 } pub fn get(&self, i: usize) -> Option<&'doc MvRegValue> { self.values.clone().nth(i) } pub fn contains(&self, x: &'_ MvRegValue) -> bool { self.values.clone().any(|v| v == x) } } impl PartialEq for MvReg<'_> { fn eq(&self, other: &Self) -> bool { self.values.clone().eq(other.clone()) } } impl Eq for MvReg<'_> {} macro_rules! impl_partial_eq { ({$($t:ty),+}) => { $(impl_partial_eq!($t);)+ }; ($t:ty) => { impl PartialEq<$t> for MvReg<'_> { fn eq(&self, other: &$t) -> bool { self.values.clone().any(|v| v == other) } } }; } impl_partial_eq!({[u8], &[u8], str, &str, bool, f64, u64, i64}); // i32 because it's the "default" inference integer type impl_partial_eq!(i32); // byte literals impl<const N: usize> PartialEq<&[u8; N]> for MvReg<'_> { fn eq(&self, other: &&[u8; N]) -> bool { self.values.clone().any(|v| v == other) } } impl PartialEq<[MvRegValue]> for MvReg<'_> { fn eq(&self, other: &[MvRegValue]) -> bool { (self.values.clone()).eq(other.iter()) } } impl PartialEq<[MvRegValue]> for &'_ MvReg<'_> { fn eq(&self, other: &[MvRegValue]) -> bool { (self.values.clone()).eq(other.iter()) } } impl<const N: usize> PartialEq<[MvRegValue; N]> for MvReg<'_> { fn eq(&self, other: &[MvRegValue; N]) -> bool { (self.values.clone()).eq(other.iter()) } } impl<const N: usize> PartialEq<[MvRegValue; N]> for &'_ MvReg<'_> { fn eq(&self, other: &[MvRegValue; N]) -> bool { (self.values.clone()).eq(other.iter()) } } impl<'doc> IntoIterator for MvReg<'doc> { type Item = &'doc MvRegValue; type IntoIter = DotFunValueIter<'doc, MvRegValue>; fn into_iter(self) -> Self::IntoIter { self.values } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/test_util/qc_arbitrary_ops.rs
src/crdts/test_util/qc_arbitrary_ops.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! This module implements Arbitrary for sequences of operations. use super::ArbitraryDelta; use crate::{ CausalDotStore, ComputeDeletionsArg, DotStoreJoin, Identifier, compute_deletions_unknown_to, crdts::{orarray::Uid, test_util::Delta}, dotstores::recording_sentinel::RecordingSentinel, sentinel::DummySentinel, }; use bimap::BiHashMap; use quickcheck::Gen; use std::{collections::HashMap, fmt}; /// A type that tracks the keys in a collection, and in any inner collections. /// /// This type exists to break the constraint that the exact same keys need to be used when /// _generating_ a [`Ops`] and when _executing_ one. This is desirable to enable shrinking -- we /// may want to eliminate some operations from the trace to produce more minimal reproducing /// examples, but this may in turn change what keys are generated (eg, for [`OrArray`] which use /// whatever the next [`Dot`] is as the key). By using an index, a [`Delta`] can store that it /// updates "the nth created key", whatever that happens to be in the current execution. #[derive(Debug, Clone, Default)] pub(crate) struct KeyTracker { /// Bijective (ie, 1:1) mapping between OrMap String keys and their keyi. pub(crate) map_keys: BiHashMap<String, usize>, /// Bijective (ie, 1:1) mapping between OrArray Uid keys and their keyi. pub(crate) array_keys: BiHashMap<Uid, usize>, /// Mapping from keyi to the KeyTracker for any inner collection. pub(crate) inner_keys: Vec<KeyTracker>, } impl KeyTracker { /// Returns the number of keys currently tracked. pub(crate) fn len(&self) -> usize { self.inner_keys.len() } /// Tracks a new map (ie, [`String`]-based) key, and returns its keyi. pub(crate) fn add_map_key(&mut self, key: String) -> usize { let keyi = self.len(); self.map_keys.insert_no_overwrite(key, keyi).unwrap(); self.inner_keys.push(Default::default()); keyi } /// Tracks a new array (ie, [`Uid`]-based) key, and returns its keyi. pub(crate) fn add_array_key(&mut self, key: Uid) -> usize { let keyi = self.len(); self.array_keys.insert_no_overwrite(key, keyi).unwrap(); self.inner_keys.push(Default::default()); keyi } } /// A single operation that a node may perform during a distributed systems trace. #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[derive(Debug, Clone)] struct Op<Delta> { /// The identifier of the node that should perform this action. by: Identifier, /// The action the node should take. action: Action<Delta>, } /// An action a distributed node can take with respects to its current state. #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[derive(Debug, Clone)] enum Action<Delta> { /// Synchronize with (specifically *from*) the node with the given identifier. Sync(Identifier), /// Apply the given modification to the current state. Data(Delta), } /// A sequence of legal per-node operations over a `DS: DotStore`. /// /// Specifically, this type constains a sequence of per-node operations over a [`DotStore`] in such /// a way that each node takes legal-but-arbitrary actions at each step based on its own current /// view of the world. Synchronization between nodes are also modeled explicitly to effectively /// produce a distributed systems trace. /// /// Since this type implements [`quickcheck::Arbitrary`], it can be used to fuzz-test the /// distributed operation of any [`DotStore`]. The most basic test to perform over any such /// sequence is the order-invariance of the produced CRDTs, which is done by /// [`Ops::check_order_invariance`]. /// /// The operations also model nested [`DotStore`]s (eg, an array of maps of registers), though /// avoids very deeply nested or large sub-elements as bugs tend to only require one level of /// nesting to present. /// /// ## A note about correctness /// /// The "simulator" that `impl Arbitrary for Ops` implements to determine what the set of legal /// actions are for each node at each point in time makes use of the [`DotStore`] to track the /// current state. For example, it uses the [`OrMap`] CRDT to determine which keys are valid to /// then generate operations against the OrMap CRDT. This is a circular assumption -- if there's a /// bug in the CRDT logic, it may cause us to then generate invalid operations (or not a full /// subset of legal ones). This is done for the sake of our collective sanity. Trust me on this /// one; modeling the set of keys that a node "should" know about _without_ using CRDTs in the face /// of arbitrary sync operations is _very_ painful. You basically end up re-inventing CRDTs one /// observed corner case at a time. /// /// Despite this limitation, this kind of circular-assumption simulation is still useful. It /// explores a wide range of operation sequences that _may_ end up surfacing bugs (often when /// _generating_ the sequence in the first place), and it will still produce a set of CRDTs that we /// can then check the order-invariance of. #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[cfg_attr(not(feature = "serde"), derive(::core::fmt::Debug))] pub(crate) struct Ops<DS> where DS: ArbitraryDelta, { /// The sequence of operations to perform. ops: Vec<Op<DS::Delta>>, /// The number of distinct top-level keys used in self.ops. /// /// This value is stored just so that [`OpsShrinker`] knows when it is including all keys. nkeys: usize, } #[cfg(feature = "json")] impl<DS> fmt::Debug for Ops<DS> where DS: ArbitraryDelta, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", serde_json::to_string(&self).unwrap()) } } // NOTE: manual impl so we don't require that DS: Clone (which derive(Clone) would) impl<DS> Clone for Ops<DS> where DS: ArbitraryDelta, DS::Delta: Clone, { fn clone(&self) -> Self { Self { ops: self.ops.clone(), nkeys: self.nkeys, } } } impl<DS> quickcheck::Arbitrary for Ops<DS> where DS: ArbitraryDelta + DotStoreJoin<RecordingSentinel> + Clone + Default + 'static, DS::Delta: Clone, { fn arbitrary(g: &mut quickcheck::Gen) -> Self { eprintln!("\n:: Generate a new test case"); // rather than storing the keys over and over, we use indices let mut key_tracker = KeyTracker::default(); // no need to have more than 4 nodes for _most_ distributed systems problems let nodes = 0..((u8::arbitrary(g) % 4) + 1/* at least one */); let mut ccs: Vec<(Identifier, CausalDotStore<DS>)> = nodes .map(|i| { let id = Identifier::new(1, i as u16); (id, CausalDotStore::new()) }) .collect(); assert!(!ccs.is_empty()); // restrict the amount of state generated per element of the ops vector. // we also avoid generating ops sequences that are _too_ long. very very few bugs // require 256+ operations to reproduce, and if they do, the chance of finding them by // fuzzing is quite small. the main reason to not make this number even smaller is that // a longer sequence means we'll explore more possible operation interleavings in one // execution of the test (at the cost of longer test times). 256 means the test takes // ~12s, which feels about right. let n = g.size().min(256) as u8; let mut g = Gen::new(g.size() / ccs.len()); let g = &mut g; let mut ops = Vec::with_capacity(usize::from(n)); for _ in 0..n { // choose which node should perform the next operation let id = { let &(id, _) = g.choose(&ccs).unwrap(); id }; // sometimes, nodes should sync with other nodes: // this is u8::arbitrary not bool::arbitrary so that we can make it less than 50% if u8::arbitrary(g) < 64 { // sync means we'll be joining the other node's causal context. // it's unidirectional, so if a syncs from b, it does _not_ // mean that b sees a's state. // choose who we'll synchronize with let (wid, with) = g.choose(&ccs).unwrap().clone(); // sync with self is a no-op if wid == id { continue; } eprintln!("==> {id:?} syncs from {wid:?}"); // make sure the executor also knows to sync ops.push(Op { by: id, action: Action::Sync(wid), }); // sync is then just to absorb the other node's state let (_, cc) = ccs.iter_mut().find(|&&mut (ccid, _)| ccid == id).unwrap(); cc.test_join_with(with.store.clone(), &with.context); continue; }; // determine the node's current view of the world let (_, cc) = ccs.iter_mut().find(|&&mut (ccid, _)| ccid == id).unwrap(); // okay, pick a random (legal) operation to perform eprintln!("==> {id:?} generates data delta"); let (op, crdt) = DS::arbitrary_delta(&cc.store, &cc.context, id, &mut key_tracker, g, 1); // merge the associated CRDT to update this node's view of the world. cc.test_join_with(crdt.store.clone(), &crdt.context); ops.push(Op { by: id, action: Action::Data(op), }); } let mut s = Self { ops, nkeys: key_tracker.inner_keys.len(), }; s.prune_unnecessary(); s } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { if self.ops.is_empty() { return quickcheck::empty_shrinker(); } Box::new(OpsShrinker { seed: self.clone(), key_subset: 1, size: 0, }) } } impl<DS> Ops<DS> where DS: ArbitraryDelta, DS::Delta: Clone, { /// Removes sub-sequences of operations that have no observable effect. fn prune_unnecessary(&mut self) { // keep track of what a node last did to see if sync is useful let mut previous_for_node = HashMap::new(); let mut i = 0; self.ops.retain(|op| { i += 1; let id = op.by; let mut keep = true; match op.action { // TODO: some repeated data operations can be pruned or combined, such as // clears that happen immediately after each other (on a given node) or a delete // that is followed by a clear. but leave that for future work. Action::Data(_) => {} // sync is unecessary if the other node has done nothing since the last time we // synced with them. // // TODO: two syncs with the same node right after each other is also // unnecessary. Action::Sync(oid) if id == oid => { // we _shouldn't_ be generating self-syncs, but handle them just in case. keep = false; } Action::Sync(oid) => { let other_last = previous_for_node.get(&oid).cloned(); // if the last thing _we_ did was a sync if let Some(&(synci, Action::Sync(loid))) = previous_for_node.get(&id) { // and that sync was with the same node if loid == oid { // and _that_ node didn't do anything in between if other_last.is_none_or(|(lasti, _)| lasti < synci) { keep = false; } } } } } if keep { previous_for_node.insert(id, (i, op.action.clone())); } keep }); } } impl<DS> fmt::Display for Ops<DS> where DS: ArbitraryDelta, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { for (i, op) in self.ops.iter().enumerate() { if i != 0 && i % 10 == 0 { writeln!(f, "--- {i} ---")?; } let id = op.by; match &op.action { Action::Data(op) => { writeln!(f, " -> {id:?} {op}")?; } Action::Sync(i2) => writeln!(f, " -> {id:?} syncs from {i2:?}")?, } } Ok(()) } } impl<DS> Ops<DS> where DS: DotStoreJoin<RecordingSentinel>, DS: ArbitraryDelta + fmt::Debug, { /// Produces a sequence of CRDTs for the operations in `self` and checks that the CRDTs resolve /// to the same final state no matter how they are combined. #[cfg_attr(feature = "arbitrary", allow(dead_code))] pub fn check_order_invariance(self, seed: u64) -> quickcheck::TestResult where DS: DotStoreJoin<DummySentinel> + Default + Clone + PartialEq, { eprintln!("\n:: Running test case:"); eprint!("{self}"); eprintln!("==> Executing steps"); if self.ops.is_empty() { return quickcheck::TestResult::passed(); } // we need to construct a new, fresh KeyTracker since shrinking may have happened between // when this [`Ops`] was generated and when this function is called. if it has, different // keys may end up being generated, and we need to allow for that. note also that we use a // single KeyTracker across all actors. this is needed so that node A can resolve a keyi // in a Delta for a key that was inserted by node B after A has synced from B. let mut keys = KeyTracker::default(); // set up a new, empty state for a node that hasn't seen anything let fresh = CausalDotStore::<DS>::new(); // apply all ops to synthesize a final state with local modifications and all deltas let (final_state, ordered_deltas) = self.ops.into_iter().enumerate().fold( (HashMap::new(), vec![]), |(mut causals, mut deltas), (opi, op)| { eprintln!(" -> executing #{opi}"); // grab the executing node's internal state let id = op.by; let mut causal = causals .entry(id) .or_insert_with(|| CausalDotStore::<DS>::new()); // generate the crdt for the indicated operation match op.action { Action::Data(op) => { let modded: CausalDotStore<DS> = op.into_crdt(&causal.store, &causal.context, id, &mut keys); // merge the crdt into the joined state _and_ keep track of the delta causal .join_with(modded.store.clone(), &modded.context, &mut DummySentinel) .unwrap(); deltas.push(modded); } Action::Sync(other) => { let other = causals .entry(other) .or_insert_with(|| CausalDotStore::<DS>::new()) .clone(); // re-borrow causal so we get to temporarily borrow causals above causal = causals.get_mut(&id).unwrap(); // pull out the delta to inflate this node to match the other's state let mut inflate = other.subset_for_inflation_from(&causal.context); inflate .context .union(&compute_deletions_unknown_to(ComputeDeletionsArg { known_dots: &other.context, live_dots: &other.store.dots(), ignorant: &causal.store.dots(), })); // while we're at it, check that inflation subset has the same effect as // merging with the full state let mut full_sync = causal.clone(); full_sync .join_with(other.store.clone(), &other.context, &mut DummySentinel) .unwrap(); let mut partial_sync = causal.clone(); partial_sync .join_with(inflate.store.clone(), &inflate.context, &mut DummySentinel) .unwrap(); assert_eq!(full_sync, partial_sync, "inflating with {inflate:?}"); *causal = partial_sync; // TODO: test that if a node syncs an "overheard" ::Update, it doesn't // botch their state (esp. around deletions). deltas.push(inflate); } } (causals, deltas) }, ); // compute a final state that joins the final state of all the nodes let final_state = final_state .into_values() .reduce(|acc, delta| acc.join(delta, &mut DummySentinel).unwrap()) .unwrap(); // apply all CRDTs one by one to the initial state in-order let state_ordered = ordered_deltas .clone() .into_iter() .fold(fresh.clone(), |acc, delta| { acc.join(delta, &mut DummySentinel).unwrap() }); // merge all CRDTs into a single one in-order, and then apply that to the initial state let merged_delta = ordered_deltas .clone() .into_iter() .reduce(|acc, delta| acc.join(delta, &mut DummySentinel).unwrap()) .unwrap(); let state_ordered_merged = { let mut fresh = fresh.clone(); fresh .join_with( merged_delta.store, &merged_delta.context, &mut DummySentinel, ) .unwrap(); fresh }; // apply all CRDTs one by one to the initial state in random order let shuffled_deltas = { use rand::{SeedableRng, seq::SliceRandom}; let mut rng = rand::rngs::SmallRng::seed_from_u64(seed); let mut tmp = ordered_deltas; tmp.shuffle(&mut rng); tmp }; let state_shuffled = shuffled_deltas.into_iter().fold(fresh, |acc, delta| { acc.join(delta, &mut DummySentinel).unwrap() }); // moment of truth -- are they all the same (ie, eventually consistent)? quickcheck::TestResult::from_bool( dbg!(final_state == state_ordered_merged) && dbg!(final_state == state_ordered) && dbg!(final_state == state_shuffled), ) } } /// An iterator that produces smaller-but-still-legal versions of an [`Ops`]. /// /// The "but still legal" part makes shrinking tricky since we're working with a distributed /// systems trace. This shrinker currently explores two shrinking dimensions of [`Ops`]: /// /// - the length of the operational sequence, since every prefix is valid by construction. /// - the set of keys that are used, since every trace remains valid if you take out all operations /// pertaining to a particular key (ie, there are no cross-key dependencies). struct OpsShrinker<DS> where DS: ArbitraryDelta, { /// The original `Ops` that we're shrinking. seed: Ops<DS>, /// The subset of keys ([..key_subset]) we're currently including. key_subset: usize, /// The subset of ops ([..size]) we're currently including. size: usize, } impl<DS> Iterator for OpsShrinker<DS> where DS: ArbitraryDelta, DS::Delta: Clone, { type Item = Ops<DS>; fn next(&mut self) -> Option<Self::Item> { // the general guidance for quickcheck shrinking appears to be to create "smaller" inputs // first. my understanding is that quickcheck walks the iterator until it first replicates // a failure. then it discards the rest of that iterator and instead shrinks starting from // that replication point. naturally it follows that it's best to produce the "most // reduced" candidates first. // // NOTE: we only generate items that are strictly smaller along at least one // dimension. that means we'll never here yield `seed` again, as that would just lead to // infinite recursion. // try to see how an empty set of ops does first if self.size == 0 { self.size = 1; return Some(Ops { ops: Vec::new(), nkeys: 0, }); } // first, try to yield a short (and then longer and longer) prefix of ops, as fewer ops // means simpler traces. it has to be a prefix since only prefixes are guaranteed to still // hold only legal operations. if self.size < self.seed.ops.len() { let ops = Vec::from(&self.seed.ops[..self.size]); // NOTE: grow by 2x to avoid very slow shrinking self.size *= 2; return Some(Ops { ops, nkeys: self.seed.nkeys, }); } // if we get here it means no shorter prefix reproduces the problem. so, instead of // reducing the trace length, we reduce its "breadth". specifically, we keep only ops // related to a subset of the keys, starting with just a single key. this works because // there is no dependence between the state of different keys, so erasing all ops for a // particular key leaves the ops on other keys still in a legal state. // // NOTE: there is one caveat to this, which is that removing the insertion of a key // would alter the observed keyi for all subsequent. we solve for that by eliminating keys // by index. ie, we will remove all the keys with keyi >= n, which ensures that there are // no later keyis that can be affected by the lack of an insert. if self.key_subset < self.seed.nkeys { let nkeys = self.key_subset; // NOTE: grow by 2x to avoid very slow shrinking self.key_subset *= 2; let ops = self .seed .ops .iter() .filter(|op| { if let Action::Data(d) = &op.action { !d.depends_on_keyi_in(nkeys..) } else { true } }) .cloned() .collect(); // re-prune since removing key ops may cause clears/syncs to now appear right after // each other, and thus become redundant. let mut s = Ops { ops, nkeys }; s.prune_unnecessary(); return Some(s); } // TODO: other prune opportunities: // - all operations each node does after it is last synced _from_ None } } #[cfg(test)] mod tests { use super::*; use crate::{ OrMap, crdts::{ NoExtensionTypes, mvreg::MvRegValue, test_util::arbitrary_delta_impls::{MapOp, RegisterOp, ValueDelta}, }, }; use quickcheck::Arbitrary; #[test] fn shrink_empty() { let ops = Ops::<OrMap<String, NoExtensionTypes>> { ops: vec![], nkeys: 0, }; assert_eq!(ops.shrink().count(), 0); } #[test] fn shrink_single_key() { let ops = Ops::<OrMap<String, NoExtensionTypes>> { ops: vec![ Op { by: (0, 0).into(), action: Action::Data(MapOp::Apply( 0, Some(String::from("key0")), Box::new(ValueDelta::Register(RegisterOp(Some(MvRegValue::Bool( true, ))))), )), }, Op { by: (0, 0).into(), action: Action::Data(MapOp::Apply( 0, None, Box::new(ValueDelta::Register(RegisterOp(Some(MvRegValue::Bool( false, ))))), )), }, ], nkeys: 1, }; // we should see 2 shrinks: let shrinks: Vec<_> = ops.shrink().collect(); // first, an empty set of ops assert_eq!(shrinks[0].ops.len(), 0); // then, only 1 of the 2 ops assert_eq!(shrinks[1].ops.len(), 1); assert!(matches!( shrinks[1].ops[0].action, Action::Data(MapOp::Apply(_, Some(_), _)) )); // then, no more (2/2 ops with all the keys present wouldn't be a shrink) assert_eq!(shrinks.len(), 2); } #[test] fn shrink_keys() { let ops = Ops::<OrMap<String, NoExtensionTypes>> { ops: vec![ Op { by: (0, 0).into(), action: Action::Data(MapOp::Apply( 0, Some(String::from("key0")), Box::new(ValueDelta::Register(RegisterOp(Some(MvRegValue::Bool( true, ))))), )), }, Op { by: (0, 0).into(), action: Action::Data(MapOp::Apply( 1, Some(String::from("key1")), Box::new(ValueDelta::Register(RegisterOp(Some(MvRegValue::Bool( true, ))))), )), }, Op { by: (0, 0).into(), action: Action::Data(MapOp::Apply( 0, None, Box::new(ValueDelta::Register(RegisterOp(Some(MvRegValue::Bool( false, ))))), )), }, ], nkeys: 2, }; // we should see 4 shrinks: let shrinks: Vec<_> = ops.shrink().collect(); // first, an empty set of ops assert_eq!(shrinks[0].ops.len(), 0); // then, only 1 of the 3 ops assert_eq!(shrinks[1].ops.len(), 1); assert!(matches!( shrinks[1].ops[0].action, Action::Data(MapOp::Apply(0, Some(_), _)) )); // then, only 2 of the 3 ops assert_eq!(shrinks[2].ops.len(), 2); assert!(matches!( shrinks[2].ops[0].action, Action::Data(MapOp::Apply(0, Some(_), _)) )); assert!(matches!( shrinks[2].ops[1].action, Action::Data(MapOp::Apply(1, Some(_), _)) )); // then, one with half the keys pruned (so only op[0] and op[2]) assert_eq!(shrinks[3].ops.len(), 2); assert!(matches!( shrinks[3].ops[0].action, Action::Data(MapOp::Apply(0, Some(_), _)) )); assert!(matches!( shrinks[3].ops[1].action, Action::Data(MapOp::Apply(0, None, _)) )); // then, no more assert_eq!(shrinks.len(), 4); } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/test_util/arbitrary_delta_impls.rs
src/crdts/test_util/arbitrary_delta_impls.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. pub(crate) mod mvreg; pub(crate) mod orarray; pub(crate) mod ormap; pub(crate) use mvreg::RegisterOp; pub(crate) use orarray::ArrayOp; pub(crate) use ormap::MapOp; /// A type that holds a [`Delta`] for one of the known CRDT [`Delta`] types. /// /// This exists so that [`MapOp`] and [`ArrayOp`] don't need a separate operation type for each /// type of inner value they may want to insert or update at a given key. /// #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[derive(Debug, Clone)] pub(crate) enum ValueDelta { Map(MapOp), Array(ArrayOp), Register(RegisterOp), }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/test_util/qc_arbitrary_impls.rs
src/crdts/test_util/qc_arbitrary_impls.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! Implementation of the quickcheck::Arbitrary trait for all of CRDT types. use crate::{ CausalContext, CausalDotStore, Dot, DotFun, DotFunMap, DotStore, DotStoreJoin, ExtensionType, Identifier, MvReg, OrArray, OrMap, api::timestamp::Timestamp, crdts::{ NoExtensionTypes, TypeVariantValue, Value, mvreg::MvRegValue, orarray::{PairMap, Position, Uid}, }, dotstores::{DotMapValue, recording_sentinel::RecordingSentinel}, }; use quickcheck::{Arbitrary, Gen}; use std::{collections::HashMap, fmt, hash::Hash, num::NonZeroU64}; impl Arbitrary for NoExtensionTypes { fn arbitrary(_: &mut Gen) -> Self { Self } } impl quickcheck::Arbitrary for Identifier { fn arbitrary(g: &mut quickcheck::Gen) -> Self { // Skew the distribution to increase the likelihood of triggering bugs. // Most interesting behavior occurs when the same Identifier occurs multiple times // in the same test. let node_choices = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, u8::arbitrary(g).saturating_add(1), ]; let app_choices = [ 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, u16::arbitrary(g).saturating_add(1) % 4096, ]; let node = g.choose(&node_choices).unwrap(); let app = g.choose(&app_choices).unwrap(); Self::new(*node, *app) } } impl quickcheck::Arbitrary for Dot { fn arbitrary(g: &mut quickcheck::Gen) -> Self { // Skew the distribution to increase the likelihood of triggering bugs. // Large distinct values of u64:s often cause the same code paths. // Interesting code paths often happen when several values are close to each other. let seq_choices = [1, 1, 1, 2, 2, 3, 4, 5, u64::arbitrary(g).saturating_add(1)]; let seq = g.choose(&seq_choices).unwrap(); (Identifier::arbitrary(g), NonZeroU64::new(*seq).unwrap()).into() } } impl<C> Arbitrary for Value<C> where C: Arbitrary + ExtensionType + DotStoreJoin<RecordingSentinel> + fmt::Debug + Clone + PartialEq, C::Value: Arbitrary, { fn arbitrary(g: &mut Gen) -> Self { match *g.choose(&["map", "array", "reg"]).unwrap() { "map" => { // penalize nesting let mut g = Gen::new(g.size() / 2); Self::Map(OrMap::arbitrary(&mut g)) } "array" => { // penalize nesting let mut g = Gen::new(g.size() / 2); Self::Array(OrArray::arbitrary(&mut g)) } "reg" => Self::Register(MvReg::arbitrary(g)), _ => unreachable!(), } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { match self { Self::Map(m) => Box::new(m.shrink().map(Self::Map)), Self::Array(a) => Box::new(a.shrink().map(Self::Array)), Self::Register(r) => Box::new(r.shrink().map(Self::Register)), Self::Custom(c) => Box::new(c.shrink().map(Self::Custom)), } } } impl<C> Arbitrary for TypeVariantValue<C> where C: Arbitrary + ExtensionType + DotStoreJoin<RecordingSentinel> + fmt::Debug + Clone + PartialEq, C::Value: Arbitrary, { fn arbitrary(g: &mut Gen) -> Self { Self { map: if bool::arbitrary(g) { // penalize nesting let mut g = Gen::new(g.size() / 2); <_>::arbitrary(&mut g) } else { Default::default() }, array: if bool::arbitrary(g) { // penalize nesting let mut g = Gen::new(g.size() / 2); <_>::arbitrary(&mut g) } else { Default::default() }, reg: <_>::arbitrary(g), custom: <_>::arbitrary(g), } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { let mut vars = Vec::new(); if !self.map.is_bottom() && (!self.array.is_bottom() || !self.reg.is_bottom()) { vars.push({ let mut v = self.clone(); v.map = Default::default(); v }) } if !self.array.is_bottom() && (!self.map.is_bottom() || !self.reg.is_bottom()) { vars.push({ let mut v = self.clone(); v.array = Default::default(); v }) } if !self.reg.is_bottom() && (!self.array.is_bottom() || !self.map.is_bottom()) { vars.push({ let mut v = self.clone(); v.reg = Default::default(); v }) } Box::new(vars.into_iter()) } } impl Arbitrary for MvReg { fn arbitrary(g: &mut Gen) -> Self { if g.size() == 0 || bool::arbitrary(g) { MvReg::default() } else { let reg = MvReg::default(); let src = MvRegValue::arbitrary(g); let id = Identifier::arbitrary(g); let cc = CausalContext::new(); reg.write(src, &cc, id).store } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { if self.is_bottom() { quickcheck::empty_shrinker() } else { assert_eq!( self.0.len(), 1, "our arbitrary never generates multi-dot MvRegs (atm)" ); let dot = self.0.keys().next().unwrap(); let v = self.0.values().next().unwrap().clone(); Box::new(Arbitrary::shrink(&(dot, v)).map(|(dot, v)| { let mut dot_fun = DotFun::default(); dot_fun.set(dot, v); Self(dot_fun) })) } } } // This cannot be implemented from chrono's arbitrary trait. // 1. The implementation is for arbitrary crate, not quickcheck // 2. This would produce values outside of dson::timestamp::Timestamp's supported range #[cfg(feature = "chrono")] impl Arbitrary for Timestamp { // A random date between 0 and 9999 fn arbitrary(g: &mut Gen) -> Timestamp { use chrono::DateTime; let range = Timestamp::MAX.as_datetime().timestamp() - Timestamp::MIN.as_datetime().timestamp(); let random_number: i64 = Arbitrary::arbitrary(g); let random_secs = Timestamp::MIN.as_datetime().timestamp() + (random_number.rem_euclid(range)); let random_number: u32 = Arbitrary::arbitrary(g); let random_nanoseconds = random_number % 1_000_000_000; let random_datetime = DateTime::from_timestamp(random_secs, random_nanoseconds) .expect("random timestamp is within accepted range"); Timestamp::new(random_datetime).expect("random timestamp is within accepted range") } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { use chrono::{DateTime, Datelike, NaiveTime, Utc}; let datetime = self.as_datetime(); let midnight = datetime.date_naive().and_time(NaiveTime::MIN); let first_day_of_the_month = datetime .date_naive() .with_day(1) .expect("1 is a valid day") .and_time(NaiveTime::MIN); let first_month_of_the_year = datetime .date_naive() .with_month(1) .expect("1 is a valid month") .and_time(NaiveTime::MIN); let epoch = DateTime::<Utc>::UNIX_EPOCH; let this = *self; let shrunk_datetimes = [midnight, first_day_of_the_month, first_month_of_the_year] .into_iter() .map(|dt| DateTime::<Utc>::from_naive_utc_and_offset(dt, Utc)) .chain(std::iter::once(epoch)) .map(|dt| Timestamp::new(dt).expect("Static datetime is a valid Timestamp")) // repeated calls to shrink must eventually end up with an empty result // so we make sure the shrunk stamps are always strictly smaller than 'self' .filter(move |x| x < &this); Box::new(shrunk_datetimes) } } #[cfg(not(feature = "chrono"))] impl Arbitrary for Timestamp { fn arbitrary(g: &mut Gen) -> Timestamp { Timestamp::new(i64::arbitrary(g)).unwrap() } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { Box::new( self.as_millis() .shrink() .map(|v| Timestamp::new(v).unwrap()), ) } } impl Arbitrary for MvRegValue { fn arbitrary(g: &mut Gen) -> Self { let mut choices = vec![ "bytes", "string", "double", "u64", "i64", "bool", "timestamp", ]; if cfg!(feature = "ulid") { choices.push("ulid"); } match *g.choose(&choices).unwrap() { "bytes" => Self::Bytes(<_>::arbitrary(g)), "string" => Self::String(<_>::arbitrary(g)), "double" => Self::Double(<_>::arbitrary(g)), "u64" => Self::U64(<_>::arbitrary(g)), "i64" => Self::I64(<_>::arbitrary(g)), "bool" => Self::Bool(<_>::arbitrary(g)), "timestamp" => Self::Timestamp(<_>::arbitrary(g)), #[cfg(feature = "ulid")] "ulid" => Self::Ulid(ulid::Ulid(<_>::arbitrary(g))), _ => unreachable!(), } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { match self { MvRegValue::Bytes(v) => Box::new( v.shrink() .map(MvRegValue::Bytes) .chain(std::iter::once(MvRegValue::Bool(true))), ), MvRegValue::String(v) => Box::new( v.shrink() .map(MvRegValue::String) .chain(std::iter::once(MvRegValue::Bool(true))), ), MvRegValue::Double(v) => Box::new( v.shrink() .map(MvRegValue::Double) .chain(std::iter::once(MvRegValue::Bool(true))), ), MvRegValue::Float(v) => Box::new( v.shrink() .map(MvRegValue::Float) .chain(std::iter::once(MvRegValue::Bool(true))), ), MvRegValue::U64(v) => Box::new( v.shrink() .map(MvRegValue::U64) .chain(std::iter::once(MvRegValue::Bool(true))), ), MvRegValue::I64(v) => Box::new( v.shrink() .map(MvRegValue::I64) .chain(std::iter::once(MvRegValue::Bool(true))), ), MvRegValue::Bool(v) => Box::new(v.shrink().map(MvRegValue::Bool)), MvRegValue::Timestamp(v) => Box::new( v.shrink() .map(MvRegValue::Timestamp) .chain(std::iter::once(MvRegValue::Bool(true))), ), #[cfg(feature = "ulid")] MvRegValue::Ulid(v) => Box::new( v.0.shrink() .map(|u| MvRegValue::Ulid(ulid::Ulid(u))) .chain(std::iter::once(MvRegValue::Bool(true))), ), } } } impl<V> Arbitrary for DotMapValue<V> where V: Arbitrary, { fn arbitrary(g: &mut Gen) -> Self { Self { dots: None, value: V::arbitrary(g), } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { Box::new(self.value.shrink().map(|value| Self { dots: None, value })) } } impl<K, C> Arbitrary for OrMap<K, C> where K: Hash + Eq + fmt::Debug + Clone + Arbitrary, C: Arbitrary + ExtensionType + DotStoreJoin<RecordingSentinel> + Clone + PartialEq + fmt::Debug, C::Value: Arbitrary, { fn arbitrary(g: &mut Gen) -> Self { if g.size() == 0 || bool::arbitrary(g) { OrMap::default() } else { let map = OrMap::default(); let src = HashMap::<K, super::Value<C>>::arbitrary(g); let id = Identifier::arbitrary(g); let cc = CausalContext::new(); src.into_iter() .fold(map.create(&cc, id), |mut map, (k, v)| { let map_update = map.store.apply( |_old, cc, _id| CausalDotStore { store: v, context: cc.clone(), }, k, &map.context, id, ); map.test_consume(map_update); map }) .store } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { if self.is_bottom() { return quickcheck::empty_shrinker(); } let mut vars = Vec::new(); vars.push(OrMap::default()); for map in self.0.shrink() { vars.push(Self(map)); } Box::new(vars.into_iter()) } } impl<C> Arbitrary for OrArray<C> where C: Arbitrary + ExtensionType + DotStoreJoin<RecordingSentinel> + Clone + PartialEq + fmt::Debug, C::Value: Arbitrary, { fn arbitrary(g: &mut Gen) -> Self { if g.size() == 0 || bool::arbitrary(g) { OrArray::default() } else { let list = OrArray::default(); let src = Vec::<super::Value<C>>::arbitrary(g); let cc = CausalContext::new(); // Find an unused causal track, which we can use as 'our' id, so // we're sure to have a compact track (otherwise we will trigger asserts). let id = src .iter() .map(|x| x.dots()) .fold(CausalContext::default(), |mut a, b| { a.union(&b); a }) .unused_identifier() .expect("test case is small enough that some Identifier is always unused"); src.into_iter() .fold(list.create(&cc, id), |mut list, v| { let uid = list.context.next_dot_for(id).into(); let list_update = list.store.insert( uid, |cc, _id| { let mut cc = cc.clone(); cc.union(&v.dots()); CausalDotStore { store: v, context: cc.clone(), } }, Position::arbitrary(g), &list.context, id, ); list.test_consume(list_update); list }) .store } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { if self.is_bottom() { return quickcheck::empty_shrinker(); } let mut vars = Vec::new(); vars.push(OrArray::default()); for map in self.0.shrink() { vars.push(Self(map)); } Box::new(vars.into_iter()) } } impl<C> Arbitrary for PairMap<C> where C: Arbitrary + ExtensionType + DotStoreJoin<RecordingSentinel> + fmt::Debug + Clone + PartialEq, C::Value: Arbitrary, { fn arbitrary(g: &mut Gen) -> Self { let first = if g.size() == 0 { // bottom value Default::default() } else { Arbitrary::arbitrary(g) }; let mut dot_fun_map = DotFunMap::default(); for (d, p) in Vec::<(Dot, Position)>::arbitrary(g) { let mut dot_fun = DotFun::default(); dot_fun.set(d, p); dot_fun_map.set(d, dot_fun); } Self { value: first, positions: dot_fun_map, } } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { if self.value.is_bottom() { return quickcheck::empty_shrinker(); } let mut vars = Vec::new(); vars.push(Self { value: Default::default(), positions: self.positions.clone(), }); // NOTE: based on the impl Arbitrary for tuples, // the recommendation is that shrinking is one-at-a-time // rather than carthesian product. for shrunk in self.value.shrink() { vars.push(Self { value: shrunk, positions: self.positions.clone(), }); } let dps: Vec<(Dot, Position)> = self .positions .values() .flat_map(|v| v.iter()) .map(|(dot, &p)| (dot, p)) .collect(); for shrunk in dps.shrink() { let mut dot_fun_map = DotFunMap::default(); for (d, p) in shrunk { let mut dot_fun = DotFun::default(); dot_fun.set(d, p); dot_fun_map.set(d, dot_fun); } vars.push(Self { value: self.value.clone(), positions: dot_fun_map.clone(), }); } Box::new(vars.into_iter()) } } impl Arbitrary for Position { fn arbitrary(g: &mut Gen) -> Self { let val = u64::arbitrary(g) % (Position::UPPER as u64); Self( val as f64 / g.choose(&[1.0, 2.0, 5.0, 10.0]) .expect("choose non empty slice"), ) } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { Box::new(self.0.shrink().map(Self)) } } impl Arbitrary for Uid { fn arbitrary(g: &mut Gen) -> Self { Self::from(Dot::arbitrary(g)) } fn shrink(&self) -> Box<dyn Iterator<Item = Self>> { Box::new(self.dot().shrink().map(Self::from)) } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/test_util/arbitrary_delta_impls/mvreg.rs
src/crdts/test_util/arbitrary_delta_impls/mvreg.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use crate::{ CausalContext, CausalDotStore, Identifier, MvReg, crdts::{ mvreg::MvRegValue, test_util::{ArbitraryDelta, Delta, KeyTracker}, }, }; use quickcheck::{Arbitrary, Gen}; use std::{fmt, ops::RangeBounds}; #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[derive(Debug, Clone)] pub(crate) struct RegisterOp(pub(crate) Option<MvRegValue>); impl fmt::Display for RegisterOp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { if self.0.is_some() { write!(f, "writes a value to the register") } else { write!(f, "clears the register") } } } impl ArbitraryDelta for MvReg { type Delta = RegisterOp; fn arbitrary_delta( &self, cc: &CausalContext, id: Identifier, _keys: &mut KeyTracker, g: &mut Gen, depth: usize, ) -> (Self::Delta, CausalDotStore<Self>) { // NOTE: it's tempting to assert that keys.inner_keys.is_empty(), but since we // generate traces where values change _type_, inner_keys may actually hold things for // "when this value is an array". let indent = " ".repeat(depth); // TODO: we currently do not generate clear()s as they do _really_ weird things to // registers. see the OrArray push_bottom test. #[expect(clippy::overly_complex_bool_expr)] if false && bool::arbitrary(g) { eprintln!("{indent} -> clearing register"); (RegisterOp(None), self.clear()) } else { let v = MvRegValue::arbitrary(g); eprintln!("{indent} -> writing to register ({v:?})"); (RegisterOp(Some(v.clone())), self.write(v, cc, id)) } } } impl Delta for RegisterOp { type DS = MvReg; fn depends_on_keyi_in<R: RangeBounds<usize>>(&self, _range: R) -> bool { // TODO: how can we support shrinking MvRegs given they don't have keys? false } fn into_crdt( self, ds: &Self::DS, cc: &CausalContext, id: Identifier, _keys: &mut KeyTracker, ) -> CausalDotStore<Self::DS> { // NOTE: same as in arbitrary_delta, we cannot assert that keys.inner_keys.is_empty() if let Some(v) = self.0 { ds.write(v, cc, id) } else { ds.clear() } } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/test_util/arbitrary_delta_impls/ormap.rs
src/crdts/test_util/arbitrary_delta_impls/ormap.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use super::ValueDelta; use crate::{ CausalContext, CausalDotStore, DotStore, Identifier, MvReg, OrArray, OrMap, crdts::{ NoExtensionTypes, Value, test_util::{ArbitraryDelta, Delta, KeyTracker}, }, }; use quickcheck::{Arbitrary, Gen}; use std::{fmt, ops::RangeBounds}; // NOTE: Box is needed here to allow arbitrary nesting, otherwise the type isn't Sized. #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[derive(Debug, Clone)] pub(crate) enum MapOp { Apply(usize, Option<String>, Box<ValueDelta>), Remove(usize), Clear, } impl fmt::Display for MapOp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Apply(keyi, Some(key), _) => write!(f, "inserts key #{keyi} ({key})"), Self::Apply(keyi, None, _) => write!(f, "updates key #{keyi}"), Self::Remove(keyi) => write!(f, "deletes key #{keyi}"), Self::Clear => write!(f, "clears the map"), } } } impl Delta for MapOp { type DS = OrMap<String, NoExtensionTypes>; fn depends_on_keyi_in<R: RangeBounds<usize>>(&self, range: R) -> bool { match *self { Self::Apply(keyi, _, _) | Self::Remove(keyi) => range.contains(&keyi), Self::Clear => false, } } fn into_crdt( self, ds: &Self::DS, cc: &CausalContext, id: Identifier, keys: &mut KeyTracker, ) -> CausalDotStore<Self::DS> { match self { Self::Apply(expected_keyi, insert_key, v) => { let keyi = expected_keyi; // apply is both insert and update -- figure out which one this is let should_exist = if let Some(insert_key) = insert_key { if let Some(&keyi) = keys.map_keys.get_by_left(&insert_key) { // this means two actors simulteneously inserted with the same key // that's totally possible! in that case we want to make sure they share // the same keyi and thus the same KeyTracker so that we accurately track // inner keys and dependent operations (for shrinking). assert_eq!(keyi, expected_keyi); } else { // this is the actually-first insert of this key, so set up the necessary // state for tracking inner keys. let keyi = keys.add_map_key(insert_key); assert_eq!(keyi, expected_keyi); } // NOTE: even for a simultaneous insert, _this_ node should not see the // key as already existing at the time of this op. false } else { true }; let inner_keys = &mut keys.inner_keys[keyi]; let key = keys.map_keys.get_by_right(&keyi).unwrap(); ds.apply( |old, cc, id| { if should_exist { assert!(!old.is_bottom()); } else { assert!(old.is_bottom()); } match *v { ValueDelta::Map(m) => m .into_crdt(&old.map, cc, id, inner_keys) .map_store(Value::Map), ValueDelta::Array(a) => a .into_crdt(&old.array, cc, id, inner_keys) .map_store(Value::Array), ValueDelta::Register(r) => r .into_crdt(&old.reg, cc, id, inner_keys) .map_store(Value::Register), } }, key.clone(), cc, id, ) } Self::Remove(keyi) => { let key = keys.map_keys.get_by_right(&keyi).unwrap(); ds.remove(key, cc, id) } Self::Clear => ds.clear(cc, id), } } } impl ArbitraryDelta for OrMap<String, NoExtensionTypes> { type Delta = MapOp; fn arbitrary_delta( &self, cc: &CausalContext, id: Identifier, keys: &mut KeyTracker, g: &mut Gen, depth: usize, ) -> (Self::Delta, CausalDotStore<Self>) { let op = if self.0.is_empty() { g.choose(&["insert", "clear"]) } else { g.choose(&["insert", "update", "remove", "clear"]) }; let indent = " ".repeat(depth); match op.copied().unwrap() { "insert" => { let (key, keyi) = { if self.0.len() != keys.map_keys.len() && bool::arbitrary(g) { // generate an insert of the same key as another node just inserted (but // that we haven't observed yet). that is, a simultaneous same-key insert. let candidates: Vec<_> = keys .map_keys .iter() .filter(|&(k, _)| !self.0.has(k)) .collect(); let (key, keyi) = *g .choose(&candidates) .expect("if means we only get here if there's at least one candidate"); (key.clone(), *keyi) } else { // generate an insert with a key that not only doesn't exist in this node's // map, but also isn't used by any _other_ node. let mut tries = 0; let key = loop { let candidate = format!("key{}", u8::arbitrary(g)); if !keys.map_keys.contains_left(&candidate) { break candidate; } tries += 1; if tries >= 10 { panic!("could not generate a distinct map key for insert"); } }; let keyi = keys.add_map_key(key.clone()); (key, keyi) } }; eprintln!("{indent} -> insert #{keyi} ({key})"); let inner_keys = &mut keys.inner_keys[keyi]; let key = keys.map_keys.get_by_right(&keyi).unwrap().clone(); let mut value_delta = None; let crdt = self.apply( |old, cc, id| { assert!(old.is_bottom()); let kind = if g.size() <= 1 { "register" } else { g.choose(&["map", "array", "register"]).copied().unwrap() }; eprintln!("{indent} -> generating inner {kind} operation"); let (vd, value_crdt) = match kind { "map" => { let mut g = Gen::new(g.size() / 2); let g = &mut g; let (delta, crdt) = OrMap::arbitrary_delta( &<_>::default(), cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Map(delta), crdt.map_store(Value::Map)) } "array" => { let mut g = Gen::new(g.size() / 2); let g = &mut g; let (delta, crdt) = OrArray::arbitrary_delta( &<_>::default(), cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Array(delta), crdt.map_store(Value::Array)) } "register" => { let (delta, crdt) = MvReg::arbitrary_delta( &<_>::default(), cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Register(delta), crdt.map_store(Value::Register)) } kind => unreachable!("need match arm for '{kind}'"), }; value_delta = Some(vd); value_crdt }, key.clone(), cc, id, ); ( MapOp::Apply( keyi, Some(key), Box::new(value_delta.expect("insert closure is always called")), ), crdt, ) } "update" => { let mut keyset = self.0.keys(); let keyi = usize::arbitrary(g) % keyset.len(); let key = keyset .nth(keyi) .expect("this arm is only taken if non-empty, and n is % len") .clone(); let keyi = *keys.map_keys.get_by_left(&key).unwrap(); eprintln!("{indent} -> updating #{keyi} ({key})"); let inner_keys = &mut keys.inner_keys[keyi]; // NOTE: this _may_ change the type -- that is intentional! test thoroughly. let mut value_delta = None; let crdt = self.apply( |old, cc, id| { let kind = if g.size() <= 1 { "register" } else { g.choose(&["map", "array", "register"]).copied().unwrap() }; eprintln!("{indent} -> generating inner {kind} operation"); let (vd, value_crdt) = match kind { "map" => { let mut g = Gen::new(g.size() / 2); let g = &mut g; let (delta, crdt) = OrMap::arbitrary_delta( &old.map, cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Map(delta), crdt.map_store(Value::Map)) } "array" => { let mut g = Gen::new(g.size() / 2); let g = &mut g; let (delta, crdt) = OrArray::arbitrary_delta( &old.array, cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Array(delta), crdt.map_store(Value::Array)) } "register" => { let (delta, crdt) = MvReg::arbitrary_delta( &old.reg, cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Register(delta), crdt.map_store(Value::Register)) } kind => unreachable!("need match arm for '{kind}'"), }; value_delta = Some(vd); value_crdt }, key, cc, id, ); ( MapOp::Apply( keyi, None, Box::new(value_delta.expect("apply closure is always called")), ), crdt, ) } "remove" => { let mut keyset = self.0.keys(); let keyi = usize::arbitrary(g) % keyset.len(); let key = keyset .nth(keyi) .expect("this arm is only taken if non-empty, and n is % len") .clone(); let keyi = *keys.map_keys.get_by_left(&key).unwrap(); eprintln!("{indent} -> removing #{keyi} ({key})"); (MapOp::Remove(keyi), self.remove(&key, cc, id)) } "clear" => { eprintln!("{indent} -> clearing map"); (MapOp::Clear, self.clear(cc, id)) } op => unreachable!("need match arm for '{op}'"), } } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/test_util/arbitrary_delta_impls/orarray.rs
src/crdts/test_util/arbitrary_delta_impls/orarray.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use super::ValueDelta; use crate::{ CausalContext, CausalDotStore, DotStore, Identifier, MvReg, OrArray, OrMap, crdts::{ NoExtensionTypes, Value, orarray::Position, test_util::{ArbitraryDelta, Delta, KeyTracker}, }, }; use quickcheck::{Arbitrary, Gen}; use std::{fmt, ops::RangeBounds}; // NOTE: Box is needed here to allow arbitrary nesting, otherwise the type isn't Sized. // This is because `ValueDelta` itself contains `ArrayOp`. #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[derive(Debug, Clone)] pub(crate) enum ArrayOp { Insert(usize, Position, Box<ValueDelta>), Update(usize, Position, Box<ValueDelta>), Delete(usize), Move(usize, Position), Clear, } impl fmt::Display for ArrayOp { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { Self::Insert(keyi, _, _) => write!(f, "insert key #{keyi}"), Self::Update(keyi, _, _) => write!(f, "updates key #{keyi}"), Self::Delete(keyi) => write!(f, "deletes key #{keyi}"), Self::Move(keyi, _) => write!(f, "moves key #{keyi}"), Self::Clear => write!(f, "clears the map"), } } } impl Delta for ArrayOp { type DS = OrArray<NoExtensionTypes>; fn depends_on_keyi_in<R: RangeBounds<usize>>(&self, range: R) -> bool { match *self { Self::Insert(keyi, _, _) | Self::Update(keyi, _, _) | Self::Delete(keyi) | Self::Move(keyi, _) => range.contains(&keyi), Self::Clear => false, } } fn into_crdt( self, ds: &Self::DS, cc: &CausalContext, id: Identifier, keys: &mut KeyTracker, ) -> CausalDotStore<Self::DS> { match self { Self::Insert(expected_keyi, p, v) => { assert_eq!(expected_keyi, keys.len()); let keyi = expected_keyi; let cc = cc.clone(); let uid = cc.next_dot_for(id).into(); let mut inner_keys = KeyTracker::default(); let crdt = ds.insert( uid, |cc, id| match *v { ValueDelta::Map(m) => m .into_crdt(&<_>::default(), cc, id, &mut inner_keys) .map_store(Value::Map), ValueDelta::Array(a) => a .into_crdt(&<_>::default(), cc, id, &mut inner_keys) .map_store(Value::Array), ValueDelta::Register(r) => r .into_crdt(&<_>::default(), cc, id, &mut inner_keys) .map_store(Value::Register), }, p, &cc, id, ); keys.inner_keys.push(inner_keys); keys.array_keys.insert(uid, keyi); crdt } Self::Update(keyi, p, v) => { let inner_keys = &mut keys.inner_keys[keyi]; let uid = *keys.array_keys.get_by_right(&keyi).unwrap(); ds.apply( uid, |old, cc, id| match *v { ValueDelta::Map(m) => m .into_crdt(&old.map, cc, id, inner_keys) .map_store(Value::Map), ValueDelta::Array(a) => a .into_crdt(&old.array, cc, id, inner_keys) .map_store(Value::Array), ValueDelta::Register(r) => r .into_crdt(&old.reg, cc, id, inner_keys) .map_store(Value::Register), }, p, cc, id, ) } Self::Delete(keyi) => { let uid = *keys.array_keys.get_by_right(&keyi).unwrap(); ds.delete(uid, cc, id) } Self::Move(keyi, p) => { let uid = *keys.array_keys.get_by_right(&keyi).unwrap(); ds.mv(uid, p, cc, id) } Self::Clear => ds.clear(cc, id), } } } impl ArbitraryDelta for OrArray<NoExtensionTypes> { type Delta = ArrayOp; fn arbitrary_delta( &self, cc: &CausalContext, id: Identifier, keys: &mut KeyTracker, g: &mut Gen, depth: usize, ) -> (Self::Delta, CausalDotStore<Self>) { // NOTE: see the outer_remove_vs_inner_mv test for why we need this let valid_keys: Vec<_> = self .0 .iter() .filter_map(|(k, v)| (!v.value.is_bottom()).then_some(k)) .collect(); let op = if valid_keys.is_empty() && self.0.is_empty() { g.choose(&["insert", "clear"]) } else if valid_keys.is_empty() { g.choose(&["insert", "delete", "clear"]) } else { g.choose(&["insert", "update", "delete", "move", "clear"]) }; let indent = " ".repeat(depth); match op.copied().unwrap() { "insert" => { let uid = cc.next_dot_for(id).into(); let kind = if g.size() <= 1 { "register" } else { g.choose(&["map", "array", "register"]).copied().unwrap() }; let keyi = keys.add_array_key(uid); eprintln!("{indent} -> inserting #{keyi} ({uid:?})"); let inner_keys = &mut keys.inner_keys[keyi]; let p = Position::arbitrary(g); let mut value_delta = None; let crdt = self.insert( uid, |cc, id| { eprintln!("{indent} -> generating inner {kind} operation"); let (vd, value_crdt) = match kind { "map" => { let mut g = Gen::new(g.size() / 2); let g = &mut g; let (delta, crdt) = OrMap::arbitrary_delta( &<_>::default(), cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Map(delta), crdt.map_store(Value::Map)) } "array" => { let mut g = Gen::new(g.size() / 2); let g = &mut g; let (delta, crdt) = OrArray::arbitrary_delta( &<_>::default(), cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Array(delta), crdt.map_store(Value::Array)) } "register" => { let (delta, crdt) = MvReg::arbitrary_delta( &<_>::default(), cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Register(delta), crdt.map_store(Value::Register)) } kind => unreachable!("need match arm for '{kind}'"), }; value_delta = Some(vd); value_crdt }, p, cc, id, ); ( ArrayOp::Insert( keyi, p, Box::new(value_delta.expect("insert closure is always called")), ), crdt, ) } "update" => { let uid = **g .choose(&valid_keys) .expect("this arm is only taken if non-empty"); // TODO: how should this handle the case of concurrent inserts of the same // key, which will imply that a single key has _multiple_ keyi. let keyi = *keys.array_keys.get_by_left(&uid).unwrap(); eprintln!("{indent} -> updating #{keyi} ({uid:?})"); let inner_keys = &mut keys.inner_keys[keyi]; let p = Position::arbitrary(g); // NOTE: this _may_ change the type -- that is intentional! test thoroughly. let mut value_delta = None; let crdt = self.apply( uid, |old, cc, id| { let kind = if g.size() <= 1 { "register" } else { g.choose(&["map", "array", "register"]).copied().unwrap() }; eprintln!("{indent} -> generating inner {kind} operation"); let (vd, value_crdt) = match kind { "map" => { let mut g = Gen::new(g.size() / 2); let g = &mut g; let (delta, crdt) = OrMap::arbitrary_delta( &old.map, cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Map(delta), crdt.map_store(Value::Map)) } "array" => { let mut g = Gen::new(g.size() / 2); let g = &mut g; let (delta, crdt) = OrArray::arbitrary_delta( &old.array, cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Array(delta), crdt.map_store(Value::Array)) } "register" => { let (delta, crdt) = MvReg::arbitrary_delta( &old.reg, cc, id, inner_keys, g, depth + 1, ); (ValueDelta::Register(delta), crdt.map_store(Value::Register)) } kind => unreachable!("need match arm for '{kind}'"), }; value_delta = Some(vd); value_crdt }, p, cc, id, ); ( ArrayOp::Update( keyi, p, Box::new(value_delta.expect("apply closure is always called")), ), crdt, ) } "delete" => { // NOTE: we specifically use the whole range of keys here, not just // "valid_keys", since we want to test what happens if a bottom-value element is // deleted. let mut uids = self.0.keys(); let uidi = usize::arbitrary(g) % uids.len(); let uid = *uids .nth(uidi) .expect("this arm is only taken if non-empty, and n is % len"); let keyi = *keys.array_keys.get_by_left(&uid).unwrap(); eprintln!("{indent} -> deleting #{keyi} ({uid:?})"); (ArrayOp::Delete(keyi), self.delete(uid, cc, id)) } "move" => { let uid = **g .choose(&valid_keys) .expect("this arm is only taken if non-empty"); let keyi = *keys.array_keys.get_by_left(&uid).unwrap(); eprintln!("{indent} -> moving #{keyi} ({uid:?})"); let p = Position::arbitrary(g); (ArrayOp::Move(keyi, p), self.mv(uid, p, cc, id)) } "clear" => { eprintln!("{indent} -> clearing array"); (ArrayOp::Clear, self.clear(cc, id)) } op => unreachable!("need match arm for '{op}'"), } } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/crdts/orarray/position.rs
src/crdts/orarray/position.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use std::cmp::Ordering; // NOTE: the original implementation has an atoms field with node identifiers in them stored // inside each Position, but none of that is actually _used_ anywhere, so it's been left over. one // of the original DSON paper authors confirmed by email on 2023-08-25 that the atoms/nodeid bits // are leftover from an earlier algorithm they used. // // TODO: in the same email, the author suggests that even `f64` may not be an ideal choice // here since the algorithm assumes that between every two points there exist a third, which is // true for real numbers, but only kind of true for `f64`. One option is to use a SmallVec<u8; 8> // so that for the happy case (fewer than 64 pushes) we use no more space, and with more we // seamlessly transition to a bigger type. /// A position in an [`OrArray`](super::OrArray). /// /// This is a wrapper around an `f64` that represents a position in an ordered sequence. The /// positions are used to determine the order of elements in the array. // TODO: Consider replacing `Position(f64)` with an unbounded rational // identifier such as `Fraction`, which stores each coordinate as a growable // vector of 31-bit digits (base = 2^31). A 64-bit float yields only 2^52 // distinct values in our interval, so after roughly fifty “insert-the-average” // operations in the same gap the two neighbours become bit-identical and // `between()` can no longer create a fresh position, forcing an expensive // renumbering of the entire list. By contrast, a vector-based representation can // always append another digit to refine the interval, ensuring that a new // position can be generated. #[derive(Clone, Copy)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct Position(pub(in super::super) f64); impl std::fmt::Debug for Position { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.0) } } impl PartialEq for Position { fn eq(&self, other: &Self) -> bool { self.cmp(other) == Ordering::Equal } } impl Eq for Position {} impl PartialOrd for Position { fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> { Some(self.cmp(other)) } } impl Ord for Position { fn cmp(&self, other: &Self) -> std::cmp::Ordering { self.0.total_cmp(&other.0) } } impl Position { pub(crate) const LOWER: f64 = 0.0; pub(crate) const UPPER: f64 = 32767.0; /// Returns a new position between two existing positions. pub fn between(left: Option<Position>, right: Option<Position>) -> Self { // NOTE: the original implementation also takes a node id (ie, `Identifier`), but then // never does anything with it, so we leave it off here. Self( (left.map(|p| p.0).unwrap_or(Position::LOWER) + right.map(|p| p.0).unwrap_or(Position::UPPER)) / 2.0, ) } /// Creates a `Position` from a raw `f64` value. /// /// Returns `None` if the value is outside the valid range. pub fn from_raw(value: f64) -> Option<Position> { (Position::LOWER..=Position::UPPER) .contains(&value) .then_some(Self(value)) } /// Returns the raw `f64` value of the position. pub fn as_raw(&self) -> f64 { self.0 } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/transaction/conflicted.rs
src/transaction/conflicted.rs
use crate::crdts::TypeVariantValue; use crate::dotstores::DotStore; use crate::{ExtensionType, MvReg, OrArray, OrMap}; use std::{fmt, hash::Hash}; /// A value with concurrent type conflicts. /// /// When replicas concurrently write different types to the same key /// (e.g., one writes a map, another an array), DSON preserves both /// in a [`TypeVariantValue`]. This type exposes methods to inspect conflicts. /// /// # Example /// /// ```no_run /// # use dson::transaction::ConflictedValue; /// # use dson::crdts::NoExtensionTypes; /// # let conflicted: ConflictedValue<String, NoExtensionTypes> = todo!(); /// if conflicted.has_map() && conflicted.has_array() { /// println!("Map and array were written concurrently!"); /// // Application must decide how to resolve this /// } /// ``` pub struct ConflictedValue<'tx, K, C> where K: Hash + Eq, C: ExtensionType, { inner: &'tx TypeVariantValue<C>, // K appears in CrdtValue<'tx, K, C> but TypeVariantValue doesn't use it. // PhantomData maintains consistent type parameters across the API. _phantom: std::marker::PhantomData<K>, } impl<'tx, K, C> ConflictedValue<'tx, K, C> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { pub(crate) fn new(value: &'tx TypeVariantValue<C>) -> Self { Self { inner: value, _phantom: std::marker::PhantomData, } } /// Returns true if a map value is present in the conflict. pub fn has_map(&self) -> bool { !self.inner.map.is_bottom() } /// Returns true if an array value is present in the conflict. pub fn has_array(&self) -> bool { !self.inner.array.is_bottom() } /// Returns true if a register value is present in the conflict. pub fn has_register(&self) -> bool { !self.inner.reg.is_bottom() } /// Returns a reference to the map value, if present. pub fn map(&self) -> Option<&OrMap<String, C>> { if self.has_map() { Some(&self.inner.map) } else { None } } /// Returns a reference to the array value, if present. pub fn array(&self) -> Option<&OrArray<C>> { if self.has_array() { Some(&self.inner.array) } else { None } } /// Returns a reference to the register value, if present. pub fn register(&self) -> Option<&MvReg> { if self.has_register() { Some(&self.inner.reg) } else { None } } /// Returns the number of different types present in this conflict. /// /// A value of 0 means the key exists but is empty (all types are bottom). /// A value of 1 means there's no actual type conflict. /// A value > 1 indicates a genuine type conflict. pub fn conflict_count(&self) -> usize { let mut count = 0; if self.has_map() { count += 1; } if self.has_array() { count += 1; } if self.has_register() { count += 1; } count } } impl<'tx, K, C> fmt::Debug for ConflictedValue<'tx, K, C> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType + fmt::Debug, { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("ConflictedValue") .field("has_map", &self.has_map()) .field("has_array", &self.has_array()) .field("has_register", &self.has_register()) .finish() } } #[cfg(test)] mod tests { use super::*; use crate::crdts::NoExtensionTypes; use crate::{CausalDotStore, Identifier, OrMap}; #[test] fn conflicted_value_empty() { use crate::crdts::TypeVariantValue; let value = TypeVariantValue::<NoExtensionTypes>::default(); let conflicted = ConflictedValue::<String, _>::new(&value); assert_eq!(conflicted.conflict_count(), 0); } #[test] fn conflicted_value_single_type() { // Create a real map with a value using CRDT operations let store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let delta = store.store.apply_to_register( |reg, ctx, id| reg.write("test".to_string().into(), ctx, id), "key".to_string(), &store.context, id, ); // The delta contains a non-bottom register let value = delta.store.get(&"key".to_string()).unwrap(); let conflicted = ConflictedValue::<String, _>::new(value); assert!(!conflicted.has_map()); assert!(!conflicted.has_array()); assert!(conflicted.has_register()); assert_eq!(conflicted.conflict_count(), 1); assert!(conflicted.register().is_some()); } #[test] fn conflicted_value_conflict_detection() { // Test that we can detect when there are multiple types // This is more of a structural test - we verify the logic works use crate::crdts::TypeVariantValue; let value = TypeVariantValue::<NoExtensionTypes>::default(); let conflicted = ConflictedValue::<String, _>::new(&value); assert_eq!(conflicted.conflict_count(), 0); assert!(!conflicted.has_map()); assert!(!conflicted.has_array()); assert!(!conflicted.has_register()); } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/transaction/delta.rs
src/transaction/delta.rs
/// Changes to a CRDT, not full state. /// /// Prevents accidental misuse through type safety. Access the inner /// `CausalDotStore` via the public field. /// /// # Example /// ``` /// use dson::{Delta, CausalDotStore, OrMap}; /// /// # fn example(delta: Delta<CausalDotStore<OrMap<String>>>) { /// // Access inner value /// let store = delta.0; /// # } /// ``` #[derive(Debug, Clone, PartialEq, Eq, Hash)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] #[must_use = "deltas should be sent to other replicas or applied to stores"] pub struct Delta<T>(pub T); impl<T> Delta<T> { /// Creates a new Delta wrapping the given value. pub fn new(value: T) -> Self { Self(value) } /// Unwraps the Delta, returning the inner value. pub fn into_inner(self) -> T { self.0 } } #[cfg(test)] mod tests { use super::*; use crate::{CausalDotStore, OrMap}; #[test] fn delta_new_and_into_inner() { let store = CausalDotStore::<OrMap<String>>::default(); let delta = Delta::new(store.clone()); assert_eq!(delta.into_inner(), store); } #[test] fn delta_access_inner_via_field() { let store = CausalDotStore::<OrMap<String>>::default(); let delta = Delta::new(store.clone()); assert_eq!(delta.0, store); } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/transaction/map_transaction.rs
src/transaction/map_transaction.rs
use super::{ArrayTransaction, CrdtValue, Delta}; use crate::crdts::mvreg::MvRegValue; use crate::dotstores::DotStoreJoin; use crate::sentinel::DummySentinel; use crate::{CausalDotStore, ExtensionType, Identifier, OrMap}; use std::{fmt, hash::Hash}; /// A transaction for making multiple mutations to a DSON store. /// /// Transactions provide an ergonomic API for mutations and automatically /// manage delta generation. Operations apply eagerly to a cloned store, /// and commit swaps the modified clone back to the original store. /// /// # Eager Application with Rollback /// /// Each operation (write, remove, clear) applies immediately to a cloned copy of the store. /// This means `get()` sees uncommitted changes from the current transaction. /// If the transaction is dropped without calling `commit()`, all changes are automatically /// rolled back by discarding the clone. /// /// # Borrowing /// /// A transaction exclusively borrows the underlying store, preventing other /// access until the transaction is committed or dropped. This follows Rust's /// standard borrowing rules and has zero runtime overhead. /// /// # Example /// /// ``` /// use dson::{CausalDotStore, Identifier, OrMap, transaction::MapTransaction}; /// /// let mut store = CausalDotStore::<OrMap<String>>::default(); /// let id = Identifier::new(0, 0); /// /// let mut tx = MapTransaction::new(&mut store, id); /// // Make mutations... /// let delta = tx.commit(); /// ``` pub struct MapTransaction<'a, K, C = crate::crdts::NoExtensionTypes> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { original_store: &'a mut CausalDotStore<OrMap<K, C>>, working_store: CausalDotStore<OrMap<K, C>>, id: Identifier, // Accumulated deltas from mutations (will be joined on commit) changes: Vec<CausalDotStore<OrMap<K, C>>>, } impl<'a, K, C> MapTransaction<'a, K, C> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { /// Creates a new transaction for the given store and replica identifier. /// /// The transaction clones the store and exclusively borrows the original until committed. /// Changes apply to the clone, enabling automatic rollback on drop. /// /// # Example /// /// ``` /// # use dson::{CausalDotStore, Identifier, OrMap, transaction::MapTransaction}; /// let mut store = CausalDotStore::<OrMap<String>>::default(); /// let id = Identifier::new(0, 0); /// let tx = MapTransaction::new(&mut store, id); /// ``` pub fn new(store: &'a mut CausalDotStore<OrMap<K, C>>, id: Identifier) -> Self where C: Clone, { let working_store = store.clone(); Self { working_store, original_store: store, id, changes: Vec::new(), } } /// Creates a nested transaction without cloning. /// /// Used internally for nested transactions (`in_map`, `in_array`). /// Nested transactions don't need rollback support since they commit /// automatically when the closure returns. The store is swapped back on commit. pub(crate) fn new_nested(store: &'a mut CausalDotStore<OrMap<K, C>>, id: Identifier) -> Self { // Take ownership of store contents via mem::take, leaving default in its place let working_store = std::mem::take(store); Self { working_store, original_store: store, id, changes: Vec::new(), } } /// Combines accumulated deltas, swaps the working store to the original, and returns the delta. /// /// All changes have been applied to the working store clone. This method swaps the /// working store back to the original store reference, making the changes permanent. /// The combined delta is returned for network transmission. /// /// # Example /// /// ``` /// # use dson::{CausalDotStore, Identifier, OrMap, transaction::MapTransaction}; /// # let mut store = CausalDotStore::<OrMap<String>>::default(); /// # let id = Identifier::new(0, 0); /// let tx = MapTransaction::new(&mut store, id); /// // Make changes... /// let delta = tx.commit(); /// // Send delta over network... /// ``` pub fn commit(mut self) -> Delta<CausalDotStore<OrMap<K, C>>> where C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { // Swap the working store back to the original to make changes permanent *self.original_store = self.working_store; if self.changes.is_empty() { // No changes, return empty delta return Delta::new(CausalDotStore::default()); } // Join all accumulated deltas into a single delta let mut combined = self.changes.remove(0); for delta in self.changes.drain(..) { combined = combined .join(delta, &mut DummySentinel) .expect("DummySentinel is infallible"); } Delta::new(combined) } /// Records a change and applies it immediately to the working store. /// /// This enables eager (read-uncommitted) semantics, allowing subsequent /// operations within the transaction to see uncommitted changes. fn record_change(&mut self, delta: CausalDotStore<OrMap<K, C>>) where C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { // Apply delta to working store immediately so subsequent operations see updated state self.working_store .join_or_replace_with(delta.store.clone(), &delta.context); self.changes.push(delta); } /// Reads a value from the map, requiring explicit type handling. /// /// This method returns a [`CrdtValue`] enum that forces the caller to /// handle type conflicts and different types explicitly. /// /// Returns `None` if the key doesn't exist in the map. /// /// # Isolation Semantics /// /// This reads the current state of the working store, which includes all changes /// made during this transaction. Map operations apply immediately to the working store, /// so `get` sees uncommitted changes (eager/read-uncommitted semantics). /// /// This matches the behavior of [`ArrayTransaction::get`](crate::transaction::ArrayTransaction::get) /// and enables consistent behavior in nested transactions. /// /// # Example /// /// ``` /// # use dson::{CausalDotStore, Identifier, OrMap, transaction::{MapTransaction, CrdtValue}}; /// # use dson::crdts::snapshot::ToValue; /// # let mut store = CausalDotStore::<OrMap<String>>::default(); /// # let id = Identifier::new(0, 0); /// let tx = store.transact(id); /// match tx.get(&"key".to_string()) { /// Some(CrdtValue::Register(reg)) => { /// // Read register value /// if let Ok(value) = reg.value() { /// println!("Value: {:?}", value); /// } /// } /// Some(CrdtValue::Conflicted(conflicts)) => { /// // Handle type conflict /// println!("Conflict count: {}", conflicts.conflict_count()); /// } /// None => { /// println!("Key doesn't exist"); /// } /// _ => { /// // Other types (map, array, empty) /// } /// } /// ``` pub fn get(&self, key: &K) -> Option<CrdtValue<'_, K, C>> { let value = self.working_store.store.get(key)?; Some(CrdtValue::from_type_variant(value)) } /// Writes a value to a register at the given key, overwriting any /// existing map, array, or conflicted value. /// /// Accumulates the delta for commit. /// /// # Example /// /// ``` /// # use dson::{CausalDotStore, Identifier, OrMap, transaction::MapTransaction}; /// # let mut store = CausalDotStore::<OrMap<String>>::default(); /// # let id = Identifier::new(0, 0); /// let mut tx = MapTransaction::new(&mut store, id); /// tx.write_register("count", 42u64.into()); /// tx.write_register("name", "Alice".to_string().into()); /// let delta = tx.commit(); /// ``` pub fn write_register(&mut self, key: impl Into<K>, value: MvRegValue) where C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { let key = key.into(); // Call OrMap::apply_to_register directly, passing a closure that calls MvReg::write let delta = self.working_store.store.apply_to_register( |reg, ctx, id| reg.write(value.clone(), ctx, id), key, &self.working_store.context, self.id, ); // Record the delta (which also updates working_store.context) self.record_change(delta); } /// Removes a key from the map. /// /// This creates a CRDT tombstone that marks the key as removed. The removal /// will be propagated to other replicas via the delta. /// /// # Example /// /// ``` /// # use dson::{CausalDotStore, Identifier, OrMap, transaction::MapTransaction}; /// # use dson::crdts::mvreg::MvRegValue; /// # let mut store = CausalDotStore::<OrMap<String>>::default(); /// # let id = Identifier::new(0, 0); /// # { /// # let mut tx = MapTransaction::new(&mut store, id); /// # tx.write_register("key", MvRegValue::String("value".to_string())); /// # tx.commit(); /// # } /// let mut tx = MapTransaction::new(&mut store, id); /// tx.remove("key"); /// let delta = tx.commit(); /// ``` pub fn remove(&mut self, key: impl Into<K>) where C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { let key = key.into(); // Call OrMap::remove directly let delta = self .working_store .store .remove(&key, &self.working_store.context, self.id); // Record the delta (which also updates working_store.context) self.record_change(delta); } /// Clears all keys from the map. /// /// This creates CRDT tombstones for all existing keys. The clear operation /// will be propagated to other replicas via the delta. /// /// # Example /// /// ``` /// # use dson::{CausalDotStore, Identifier, OrMap, transaction::MapTransaction}; /// # use dson::crdts::mvreg::MvRegValue; /// # let mut store = CausalDotStore::<OrMap<String>>::default(); /// # let id = Identifier::new(0, 0); /// # { /// # let mut tx = MapTransaction::new(&mut store, id); /// # tx.write_register("a", MvRegValue::U64(1)); /// # tx.write_register("b", MvRegValue::U64(2)); /// # tx.commit(); /// # } /// let mut tx = MapTransaction::new(&mut store, id); /// tx.clear(); /// let delta = tx.commit(); /// ``` pub fn clear(&mut self) where C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { // Call OrMap::clear directly let delta = self .working_store .store .clear(&self.working_store.context, self.id); // Record the delta (which also updates working_store.context) self.record_change(delta); } /// Creates a nested transaction for a map at the given key. /// /// The closure receives a mutable reference to a child `MapTransaction`. /// All operations on the child are accumulated and applied to the parent /// when the closure returns. /// /// # Example /// /// ```rust /// # use dson::{CausalDotStore, Identifier, OrMap, transaction::MapTransaction}; /// # use dson::crdts::mvreg::MvRegValue; /// # let mut store = CausalDotStore::<OrMap<String>>::default(); /// # let id = Identifier::new(0, 0); /// let mut tx = MapTransaction::new(&mut store, id); /// tx.in_map("user", |user_tx| { /// user_tx.write_register("email", MvRegValue::String("alice@example.com".to_string())); /// user_tx.write_register("age", MvRegValue::U64(30)); /// }); /// let delta = tx.commit(); /// ``` pub fn in_map<F>(&mut self, key: impl Into<K>, f: F) where F: FnOnce(&mut MapTransaction<'_, String, C>), C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { let key = key.into(); // Get current nested map (or create empty) let nested_map = self .working_store .store .get(&key) .map(|v| v.map.clone()) .unwrap_or_default(); // Create temporary store for child transaction let mut child_store = CausalDotStore { store: nested_map, context: self.working_store.context.clone(), }; // Create child transaction (no clone needed for nested transactions) let mut child_tx = MapTransaction::new_nested(&mut child_store, self.id); // User performs operations on child // If f panics, child_tx drops without commit, child_store retains Default value f(&mut child_tx); // Get delta from child let child_delta = child_tx.commit(); // If child made changes, wrap in parent's map operation if !child_delta.0.is_bottom() { let delta = self.working_store.store.apply_to_map( |_old_map, _ctx, _id| child_delta.0.clone(), key, &self.working_store.context, self.id, ); self.record_change(delta); } } /// Creates a nested transaction for an array at the given key. /// /// The closure receives a mutable reference to a child `ArrayTransaction`. /// All operations on the child are accumulated and applied to the parent /// when the closure returns. /// /// # Example /// /// ```rust /// # use dson::{CausalDotStore, Identifier, OrMap, transaction::MapTransaction}; /// # use dson::crdts::mvreg::MvRegValue; /// # let mut store = CausalDotStore::<OrMap<String>>::default(); /// # let id = Identifier::new(0, 0); /// let mut tx = MapTransaction::new(&mut store, id); /// tx.in_array("tags", |tags_tx| { /// tags_tx.insert_register(0, MvRegValue::String("rust".to_string())); /// tags_tx.insert_register(1, MvRegValue::String("crdt".to_string())); /// }); /// let delta = tx.commit(); /// ``` pub fn in_array<F>(&mut self, key: impl Into<K>, f: F) where F: FnOnce(&mut ArrayTransaction<'_, C>), C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { let key = key.into(); // Get current nested array (or create empty) let nested_array = self .working_store .store .get(&key) .map(|v| v.array.clone()) .unwrap_or_default(); // Create temporary store for child transaction let mut child_store = CausalDotStore { store: nested_array, context: self.working_store.context.clone(), }; // Create child transaction (no clone needed for nested transactions) let mut child_tx = ArrayTransaction::new_nested(&mut child_store, self.id); // User performs operations on child f(&mut child_tx); // Get delta from child let child_delta = child_tx.commit(); // If child made changes, wrap in parent's array operation if !child_delta.0.is_bottom() { let delta = self.working_store.store.apply_to_array( |_old_array, _ctx, _id| child_delta.0.clone(), key.clone(), &self.working_store.context, self.id, ); self.record_change(delta); } } } #[cfg(test)] mod tests { use super::*; use crate::{DotStore, crdts::NoExtensionTypes}; #[test] fn transaction_new() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let _tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); } #[test] fn transaction_borrows_exclusively() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); // This should not compile (uncomment to verify): // let _ = &store; // Error: cannot borrow `store` as immutable drop(tx); // After dropping tx, we can borrow again let _ = &store; } #[test] fn transaction_get_nonexistent() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); assert!(tx.get(&"nonexistent".to_string()).is_none()); } #[test] fn transaction_get_returns_correct_type() { use crate::crdts::mvreg::MvRegValue; use crate::sentinel::DummySentinel; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create a register at "key" let delta1 = store.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::String("test".to_string()), ctx, id), "key".to_string(), &store.context, id, ); store = store.join(delta1, &mut DummySentinel).unwrap(); let tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); match tx.get(&"key".to_string()) { Some(CrdtValue::Register(_)) => { // Expected: we created a register } _ => panic!("Expected Register variant"), } } #[test] fn transaction_get_register() { use crate::crdts::mvreg::MvRegValue; use crate::crdts::snapshot::ToValue; use crate::sentinel::DummySentinel; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Add a register value using proper CRDT operations let delta = store.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::String("test".to_string()), ctx, id), "key".to_string(), &store.context, id, ); store = store.join(delta, &mut DummySentinel).unwrap(); let tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); match tx.get(&"key".to_string()) { Some(CrdtValue::Register(reg)) => { assert_eq!( reg.value().unwrap(), &MvRegValue::String("test".to_string()) ); } _ => panic!("Expected Register variant"), } } #[test] fn transaction_write_register() { use crate::crdts::mvreg::MvRegValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.write_register("count", MvRegValue::U64(42)); assert_eq!(tx.changes.len(), 1); } #[test] fn transaction_write_multiple_registers() { use crate::crdts::mvreg::MvRegValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.write_register("count", MvRegValue::U64(42)); tx.write_register("name", MvRegValue::String("Alice".to_string())); tx.write_register("active", MvRegValue::Bool(true)); assert_eq!(tx.changes.len(), 3); } #[test] fn transaction_commit_applies_changes() { use crate::crdts::mvreg::MvRegValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.write_register("count", MvRegValue::U64(42)); let _delta = tx.commit(); } // Verify the change was applied to the store let value = store.store.get(&"count".to_string()).unwrap(); assert!(!value.reg.is_bottom()); use crate::crdts::snapshot::ToValue; assert_eq!(value.reg.value().unwrap(), &MvRegValue::U64(42)); } #[test] fn transaction_commit_returns_delta() { use crate::crdts::mvreg::MvRegValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let delta = { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.write_register("count", MvRegValue::U64(42)); tx.commit() }; // Delta should contain the change let value = delta.0.store.get(&"count".to_string()).unwrap(); assert!(!value.reg.is_bottom()); } #[test] fn transaction_commit_multiple_changes() { use crate::crdts::mvreg::MvRegValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.write_register("a", MvRegValue::U64(1)); tx.write_register("b", MvRegValue::U64(2)); tx.write_register("c", MvRegValue::U64(3)); let _delta = tx.commit(); } // All changes should be applied assert!(store.store.get(&"a".to_string()).is_some()); assert!(store.store.get(&"b".to_string()).is_some()); assert!(store.store.get(&"c".to_string()).is_some()); } #[test] fn transaction_nested_map() { use crate::crdts::mvreg::MvRegValue; use crate::crdts::snapshot::ToValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.in_map("user", |user_tx| { user_tx .write_register("email", MvRegValue::String("alice@example.com".to_string())); }); let _delta = tx.commit(); } // Verify nested structure let user_value = store.store.get(&"user".to_string()).unwrap(); assert!(!user_value.map.is_bottom()); let email_value = user_value.map.get(&"email".to_string()).unwrap(); assert!(!email_value.reg.is_bottom()); assert_eq!( email_value.reg.value().unwrap(), &MvRegValue::String("alice@example.com".to_string()) ); } #[test] fn transaction_nested_map_multiple_fields() { use crate::crdts::mvreg::MvRegValue; use crate::crdts::snapshot::ToValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.in_map("user", |user_tx| { user_tx .write_register("email", MvRegValue::String("alice@example.com".to_string())); user_tx.write_register("age", MvRegValue::U64(30)); }); let _delta = tx.commit(); } // Verify both fields exist let user_value = store.store.get(&"user".to_string()).unwrap(); assert!(!user_value.map.is_bottom()); let email_value = user_value.map.get(&"email".to_string()).unwrap(); assert_eq!( email_value.reg.value().unwrap(), &MvRegValue::String("alice@example.com".to_string()) ); let age_value = user_value.map.get(&"age".to_string()).unwrap(); assert_eq!(age_value.reg.value().unwrap(), &MvRegValue::U64(30)); } #[test] fn transaction_nested_array() { use crate::crdts::mvreg::MvRegValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.in_array("items", |items_tx| { items_tx.insert_register(0, MvRegValue::String("item1".to_string())); }); let _delta = tx.commit(); } // Verify array was created let items_value = store.store.get(&"items".to_string()).unwrap(); assert!(!items_value.array.is_bottom()); } #[test] fn transaction_remove() { use crate::crdts::mvreg::MvRegValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // First, create a key { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.write_register("key", MvRegValue::String("value".to_string())); let _delta = tx.commit(); } // Verify it exists assert!(store.store.get(&"key".to_string()).is_some()); // Now remove it { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.remove("key"); let _delta = tx.commit(); } // Verify it's gone assert!(store.store.get(&"key".to_string()).is_none()); } #[test] fn transaction_clear() { use crate::crdts::mvreg::MvRegValue; let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create multiple keys { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.write_register("a", MvRegValue::U64(1)); tx.write_register("b", MvRegValue::U64(2)); tx.write_register("c", MvRegValue::U64(3)); let _delta = tx.commit(); } // Verify they exist assert!(store.store.get(&"a".to_string()).is_some()); assert!(store.store.get(&"b".to_string()).is_some()); assert!(store.store.get(&"c".to_string()).is_some()); // Clear the map { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store, id); tx.clear(); let _delta = tx.commit(); } // Verify they're all gone assert!(store.store.get(&"a".to_string()).is_none()); assert!(store.store.get(&"b".to_string()).is_none()); assert!(store.store.get(&"c".to_string()).is_none()); } // Property-based test: transaction API should produce same results as direct CRDT calls #[test] fn property_transaction_equals_direct_crdt() { use crate::crdts::mvreg::MvRegValue; // Test that write_register via transaction produces same result as direct call let mut store_tx = CausalDotStore::<OrMap<String>>::default(); let mut store_direct = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Via transaction { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store_tx, id); tx.write_register("key", MvRegValue::String("value".to_string())); let _delta = tx.commit(); } // Direct CRDT call { let delta = store_direct.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::String("value".to_string()), ctx, id), "key".to_string(), &store_direct.context, id, ); store_direct.join_or_replace_with(delta.store, &delta.context); } // Should be equivalent assert_eq!(store_tx, store_direct); } #[test] fn property_concurrent_writes_converge() { use crate::crdts::mvreg::MvRegValue; use crate::crdts::snapshot::ToValue; // Two replicas make concurrent writes to different keys let id1 = Identifier::new(0, 0); let id2 = Identifier::new(1, 0); // Replica 1 writes "a" let mut store1 = CausalDotStore::<OrMap<String>>::default(); let delta1 = { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store1, id1); tx.write_register("a", MvRegValue::U64(1)); tx.commit() }; // Replica 2 writes "b" let mut store2 = CausalDotStore::<OrMap<String>>::default(); let delta2 = { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store2, id2); tx.write_register("b", MvRegValue::U64(2)); tx.commit() }; // Exchange deltas store1.join_or_replace_with(delta2.0.store.clone(), &delta2.0.context); store2.join_or_replace_with(delta1.0.store, &delta1.0.context); // Should converge to same state (CRDT property) assert_eq!(store1, store2); // Both should have both keys assert_eq!( store1 .store .get(&"a".to_string()) .unwrap() .reg .value() .unwrap(), &MvRegValue::U64(1) ); assert_eq!( store1 .store .get(&"b".to_string()) .unwrap() .reg .value() .unwrap(), &MvRegValue::U64(2) ); } #[test] fn property_remove_then_add_idempotent() { use crate::crdts::mvreg::MvRegValue; let id1 = Identifier::new(0, 0); let _id2 = Identifier::new(1, 0); // Replica 1: Add then remove let mut store1 = CausalDotStore::<OrMap<String>>::default(); let delta1 = { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store1, id1); tx.write_register("key", MvRegValue::String("value".to_string())); tx.commit() }; let delta2 = { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store1, id1); tx.remove("key"); tx.commit() }; // Replica 2: receives deltas in reverse order (remove, then add) let mut store2 = CausalDotStore::<OrMap<String>>::default(); store2.join_or_replace_with(delta2.0.store, &delta2.0.context); store2.join_or_replace_with(delta1.0.store, &delta1.0.context); // Should converge - remove should win due to causal context assert_eq!(store1, store2); assert!(store1.store.get(&"key".to_string()).is_none()); } #[test] fn test_concurrent_type_conflicts_detected() { use crate::crdts::mvreg::MvRegValue; // Two replicas concurrently write different types to the same key let id1 = Identifier::new(0, 0); let id2 = Identifier::new(1, 0); // Replica 1: writes a register at key "data" let mut store1 = CausalDotStore::<OrMap<String>>::default(); let delta1 = { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store1, id1); tx.write_register("data", MvRegValue::String("text".to_string())); tx.commit() }; // Replica 2: writes an array at key "data" let mut store2 = CausalDotStore::<OrMap<String>>::default(); let delta2 = { let mut tx = MapTransaction::<String, NoExtensionTypes>::new(&mut store2, id2); tx.in_array("data", |data_tx| { data_tx.insert_register(0, MvRegValue::U64(42)); }); tx.commit() }; // Both replicas exchange deltas - this creates a type conflict store1.join_or_replace_with(delta2.0.store, &delta2.0.context); store2.join_or_replace_with(delta1.0.store, &delta1.0.context); // Both should converge to same state assert_eq!(store1, store2); // Reading should detect the conflict via CrdtValue::Conflicted let tx1 = MapTransaction::<String, NoExtensionTypes>::new(&mut store1, id1); match tx1.get(&"data".to_string()) { Some(CrdtValue::Conflicted(conflicts)) => { // Verify both types are present assert!( conflicts.has_register(), "Should have register from replica 1" );
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
true
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/transaction/array_transaction.rs
src/transaction/array_transaction.rs
use crate::crdts::Value; use crate::crdts::mvreg::MvRegValue; use crate::dotstores::DotStoreJoin; use crate::sentinel::DummySentinel; use crate::transaction::{CrdtValue, Delta, MapTransaction}; use crate::{CausalDotStore, ExtensionType, Identifier, OrArray, OrMap}; use std::fmt; /// Transaction for mutating an [`OrArray`]. /// /// Similar to [`MapTransaction`](crate::transaction::MapTransaction) but for arrays. /// Operations apply eagerly to a cloned store, and `commit()` swaps the modified clone /// back to the original store. /// /// See the [module documentation](crate::transaction) for details on eager application /// semantics with automatic rollback support. /// /// # Example /// /// ```rust /// use dson::{CausalDotStore, Identifier, OrArray, transaction::ArrayTransaction}; /// /// let mut store = CausalDotStore::<OrArray>::default(); /// let id = Identifier::new(0, 0); /// /// let mut tx = ArrayTransaction::new(&mut store, id); /// // Array operations... /// let delta = tx.commit(); /// ``` pub struct ArrayTransaction<'a, C = crate::crdts::NoExtensionTypes> where C: ExtensionType, { original_store: &'a mut CausalDotStore<OrArray<C>>, working_store: CausalDotStore<OrArray<C>>, id: Identifier, changes: Vec<CausalDotStore<OrArray<C>>>, } impl<'a, C> ArrayTransaction<'a, C> where C: ExtensionType, { /// Creates a new transaction for the given array store. /// /// The transaction clones the store and exclusively borrows the original until committed. /// Changes apply to the clone, enabling automatic rollback on drop. pub fn new(store: &'a mut CausalDotStore<OrArray<C>>, id: Identifier) -> Self where C: Clone, { let working_store = store.clone(); Self { working_store, original_store: store, id, changes: Vec::new(), } } /// Creates a nested transaction without cloning. /// /// Used internally for nested transactions (`insert_map`, `insert_array`). /// Nested transactions don't need rollback support since they commit /// automatically when the closure returns. The store is swapped back on commit. pub(crate) fn new_nested(store: &'a mut CausalDotStore<OrArray<C>>, id: Identifier) -> Self { // Take ownership of store contents via mem::take, leaving default in its place let working_store = std::mem::take(store); Self { working_store, original_store: store, id, changes: Vec::new(), } } /// Combines accumulated deltas, swaps the working store to the original, and returns the delta. /// /// All changes have been applied to the working store clone. This method swaps the /// working store back to the original store reference, making the changes permanent. /// The combined delta is returned for network transmission. pub fn commit(mut self) -> Delta<CausalDotStore<OrArray<C>>> where C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { // Swap the working store back to the original to make changes permanent *self.original_store = self.working_store; if self.changes.is_empty() { return Delta::new(CausalDotStore::default()); } let mut combined = self.changes.remove(0); for delta in self.changes.drain(..) { combined = combined .join(delta, &mut DummySentinel) .expect("DummySentinel is infallible"); } Delta::new(combined) } fn record_change(&mut self, delta: CausalDotStore<OrArray<C>>) { self.changes.push(delta); } /// Inserts a register value at the given index. /// /// If `idx` is greater than the array length, the value is appended. /// /// # Example /// /// ```rust /// # use dson::{CausalDotStore, Identifier, OrArray, transaction::ArrayTransaction}; /// # use dson::crdts::mvreg::MvRegValue; /// # let mut store = CausalDotStore::<OrArray>::default(); /// # let id = Identifier::new(0, 0); /// let mut tx = ArrayTransaction::new(&mut store, id); /// tx.insert_register(0, MvRegValue::String("first".to_string())); /// tx.insert_register(1, MvRegValue::U64(42)); /// let delta = tx.commit(); /// ``` pub fn insert_register(&mut self, idx: usize, value: impl Into<MvRegValue>) where C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { let delta = self.working_store.store.insert_idx_register( idx, value.into(), &self.working_store.context, self.id, ); // Apply delta to working store immediately so subsequent operations see updated state self.working_store .join_or_replace_with(delta.store.clone(), &delta.context); self.record_change(delta); } /// Removes the element at the given index. /// /// # Panics /// /// Panics if `idx >= len()`. pub fn remove(&mut self, idx: usize) where C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { let delta = self .working_store .store .remove(idx, &self.working_store.context, self.id); // Apply delta to working store immediately so subsequent operations see updated state self.working_store .join_or_replace_with(delta.store.clone(), &delta.context); self.record_change(delta); } /// Inserts a map at the given index. /// /// The closure receives a mutable reference to a `MapTransaction` for /// configuring the nested map. /// /// # Example /// /// ```rust /// # use dson::{CausalDotStore, Identifier, OrArray, transaction::ArrayTransaction}; /// # use dson::crdts::mvreg::MvRegValue; /// # let mut store = CausalDotStore::<OrArray>::default(); /// # let id = Identifier::new(0, 0); /// let mut tx = ArrayTransaction::new(&mut store, id); /// tx.insert_map(0, |task_tx| { /// task_tx.write_register("title", MvRegValue::String("Write docs".to_string())); /// task_tx.write_register("done", MvRegValue::Bool(false)); /// }); /// let delta = tx.commit(); /// ``` pub fn insert_map<F>(&mut self, idx: usize, f: F) where F: FnOnce(&mut MapTransaction<'_, String, C>), C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { // Create empty map for child let mut child_store = CausalDotStore { store: OrMap::<String, C>::default(), context: self.working_store.context.clone(), }; let mut child_tx = MapTransaction::new_nested(&mut child_store, self.id); f(&mut child_tx); let child_delta = child_tx.commit(); // Insert the map into the array let delta = self.working_store.store.insert_idx_with( idx, |_ctx, _id| child_delta.0.map_store(Value::Map), &self.working_store.context, self.id, ); // Apply delta to working store immediately so subsequent operations see updated state self.working_store .join_or_replace_with(delta.store.clone(), &delta.context); self.record_change(delta); } /// Inserts an array at the given index. /// /// The closure receives a mutable reference to an `ArrayTransaction` for /// configuring the nested array. pub fn insert_array<F>(&mut self, idx: usize, f: F) where F: FnOnce(&mut ArrayTransaction<'_, C>), C: DotStoreJoin<DummySentinel> + fmt::Debug + Clone + PartialEq, { // Create empty array for child let mut child_store = CausalDotStore { store: OrArray::<C>::default(), context: self.working_store.context.clone(), }; let mut child_tx = ArrayTransaction::new_nested(&mut child_store, self.id); f(&mut child_tx); let child_delta = child_tx.commit(); // Insert the array into the parent array let delta = self.working_store.store.insert_idx_with( idx, |_ctx, _id| child_delta.0.map_store(Value::Array), &self.working_store.context, self.id, ); // Apply delta to working store immediately so subsequent operations see updated state self.working_store .join_or_replace_with(delta.store.clone(), &delta.context); self.record_change(delta); } /// Gets the element at the given index. /// /// Returns `None` if the index is out of bounds. /// /// # Isolation Semantics /// /// This reads the current state of the array, which includes all changes /// made during this transaction. Array operations apply immediately to the store, /// so `get` sees uncommitted changes (eager/read-uncommitted semantics). /// /// This matches the behavior of [`MapTransaction::get`](crate::transaction::MapTransaction::get) /// and enables consistent behavior in nested transactions. /// /// # Example /// /// ```rust /// # use dson::{CausalDotStore, Identifier, OrArray, transaction::{ArrayTransaction, CrdtValue}}; /// # use dson::crdts::mvreg::MvRegValue; /// # let mut store = CausalDotStore::<OrArray>::default(); /// # let id = Identifier::new(0, 0); /// let mut tx = ArrayTransaction::new(&mut store, id); /// tx.insert_register(0, MvRegValue::String("first".to_string())); /// /// // Array tx sees uncommitted changes /// match tx.get(0) { /// Some(CrdtValue::Register(reg)) => { /// // Can read the value we just inserted /// } /// _ => {} /// } /// # let _ = tx.commit(); /// ``` pub fn get(&self, idx: usize) -> Option<CrdtValue<'_, usize, C>> { let value = self.working_store.store.get(idx)?; Some(CrdtValue::from_type_variant(value)) } /// Returns the number of elements in the array. pub fn len(&self) -> usize { self.working_store.store.len() } /// Returns `true` if the array is empty. pub fn is_empty(&self) -> bool { self.working_store.store.is_empty() } } #[cfg(test)] mod tests { use super::*; use crate::crdts::NoExtensionTypes; use crate::{CausalDotStore, Identifier, OrArray}; #[test] fn array_transaction_new() { let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); let _tx = ArrayTransaction::new(&mut store, id); } #[test] fn array_transaction_commit_empty() { use crate::crdts::NoExtensionTypes; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); let tx = ArrayTransaction::new(&mut store, id); let delta = tx.commit(); // Empty transaction should produce empty delta assert!(delta.0.store.is_empty()); } #[test] fn array_transaction_insert_register() { use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue, snapshot::ToValue}; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(0, MvRegValue::String("hello".to_string())); let _delta = tx.commit(); } // Verify insertion let val = store.store.get(0).expect("item should exist"); assert_eq!( val.reg.value().unwrap(), &MvRegValue::String("hello".to_string()) ); } #[test] fn array_transaction_remove() { use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue}; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); // Insert two items { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(0, MvRegValue::U64(1)); tx.insert_register(1, MvRegValue::U64(2)); let _delta = tx.commit(); } assert_eq!(store.store.len(), 2); // Remove first item { let mut tx = ArrayTransaction::new(&mut store, id); tx.remove(0); let _delta = tx.commit(); } assert_eq!(store.store.len(), 1); } #[test] fn array_transaction_sequential_inserts_preserve_order() { // Regression test: ensure sequential inserts work correctly // Bug was that index clamping used stale array length (always 0) // causing all inserts to go to position 0 use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue, snapshot::ToValue}; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(0, MvRegValue::String("first".to_string())); tx.insert_register(1, MvRegValue::String("second".to_string())); tx.insert_register(2, MvRegValue::String("third".to_string())); let _delta = tx.commit(); } assert_eq!(store.store.len(), 3); // Verify order is deterministic and correct let item0 = store.store.get(0).expect("item 0 should exist"); assert_eq!( item0.reg.value().unwrap(), &MvRegValue::String("first".to_string()) ); let item1 = store.store.get(1).expect("item 1 should exist"); assert_eq!( item1.reg.value().unwrap(), &MvRegValue::String("second".to_string()) ); let item2 = store.store.get(2).expect("item 2 should exist"); assert_eq!( item2.reg.value().unwrap(), &MvRegValue::String("third".to_string()) ); } #[test] fn array_transaction_insert_map() { use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue, snapshot::ToValue}; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_map(0, |map_tx| { map_tx.write_register("name", MvRegValue::String("Task 1".to_string())); }); let _delta = tx.commit(); } // Verify map was inserted let item = store.store.get(0).unwrap(); let name = item.map.get(&"name".to_string()).unwrap(); assert_eq!( name.reg.value().unwrap(), &MvRegValue::String("Task 1".to_string()) ); } #[test] fn array_transaction_insert_array() { use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue}; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_array(0, |nested_tx| { nested_tx.insert_register(0, MvRegValue::U64(1)); nested_tx.insert_register(1, MvRegValue::U64(2)); }); let _delta = tx.commit(); } // Verify nested array was created let item = store.store.get(0).unwrap(); assert_eq!(item.array.len(), 2); } #[test] fn array_transaction_get() { use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue, snapshot::ToValue}; use crate::transaction::CrdtValue; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(0, MvRegValue::String("hello".to_string())); let _delta = tx.commit(); } { let tx = ArrayTransaction::new(&mut store, id); let value = tx.get(0).expect("should have item at 0"); match value { CrdtValue::Register(reg) => { assert_eq!( reg.value().unwrap(), &MvRegValue::String("hello".to_string()) ); } _ => panic!("Expected Register variant"), } } } #[test] fn array_transaction_get_returns_crdt_value() { use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue, snapshot::ToValue}; use crate::transaction::CrdtValue; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(0, MvRegValue::String("hello".to_string())); let _delta = tx.commit(); } { let tx = ArrayTransaction::new(&mut store, id); let value = tx.get(0).expect("should have item at 0"); match value { CrdtValue::Register(reg) => { assert_eq!( reg.value().unwrap(), &MvRegValue::String("hello".to_string()) ); } _ => panic!("Expected Register variant, got {value:?}"), } } } #[test] fn array_transaction_get_returns_map() { use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue, snapshot::ToValue}; use crate::transaction::CrdtValue; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_map(0, |map_tx| { map_tx.write_register("field", MvRegValue::U64(42)); }); let _delta = tx.commit(); } { let tx = ArrayTransaction::new(&mut store, id); let value = tx.get(0).expect("should have item at 0"); match value { CrdtValue::Map(map) => { let field = map.get(&"field".to_string()).unwrap(); assert_eq!(field.reg.value().unwrap(), &MvRegValue::U64(42)); } _ => panic!("Expected Map variant, got {value:?}"), } } } #[test] fn array_transaction_get_returns_array() { use crate::crdts::{NoExtensionTypes, mvreg::MvRegValue}; use crate::transaction::CrdtValue; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_array(0, |nested_tx| { nested_tx.insert_register(0, MvRegValue::U64(1)); nested_tx.insert_register(1, MvRegValue::U64(2)); }); let _delta = tx.commit(); } { let tx = ArrayTransaction::new(&mut store, id); let value = tx.get(0).expect("should have item at 0"); match value { CrdtValue::Array(array) => { assert_eq!(array.len(), 2); } _ => panic!("Expected Array variant, got {value:?}"), } } } #[test] fn array_transaction_get_out_of_bounds() { use crate::crdts::NoExtensionTypes; let mut store = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); let id = Identifier::new(0, 0); let tx = ArrayTransaction::new(&mut store, id); assert!(tx.get(0).is_none()); assert!(tx.get(100).is_none()); } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/transaction/mod.rs
src/transaction/mod.rs
//! Transaction-based API for ergonomic CRDT mutations. //! //! This module provides a transaction-based API for making changes to DSON stores. //! Unlike the callback-based `api` module, transactions provide: //! //! - **Method chaining** - No nested callbacks //! - **Explicit conflict handling** - Enums force handling of type conflicts //! - **Automatic rollback** - Changes drop unless you call `commit()` //! - **Automatic delta management** - Deltas accumulate and return on commit //! //! # Example //! //! ``` //! use dson::{CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::CrdtValue}; //! use dson::crdts::snapshot::ToValue; //! //! let mut store = CausalDotStore::<OrMap<String>>::default(); //! let id = Identifier::new(0, 0); //! //! // Create a transaction //! let mut tx = store.transact(id); //! //! // Write values //! tx.write_register("name", MvRegValue::String("Alice".to_string())); //! tx.write_register("age", MvRegValue::U64(30)); //! //! // IMPORTANT: You must call commit() or changes are lost //! let delta = tx.commit(); //! //! // Read with explicit type handling //! let tx = store.transact(id); //! match tx.get(&"name".to_string()) { //! Some(CrdtValue::Register(reg)) => { //! println!("Name: {:?}", reg.value().unwrap()); //! } //! Some(CrdtValue::Conflicted(conflicts)) => { //! println!("Type conflict!"); //! } //! None => { //! println!("Key not found"); //! } //! _ => {} //! } //! ``` //! //! # Transaction Semantics //! //! Both [`MapTransaction`] and [`ArrayTransaction`] clone the store and work on the copy. //! Changes apply immediately to the clone, enabling reads within the transaction to see //! uncommitted changes. Call `commit()` to apply changes permanently. Drop the transaction //! without committing to discard all changes (automatic rollback). //! //! ## How Transactions Work //! //! - **On creation**: The store is cloned //! - **During operations**: Changes apply to the cloned store //! - **On commit**: The clone swaps back into the original store //! - **On drop**: Changes discard automatically if not committed //! //! ## Why This Design //! //! This provides: //! - **Automatic rollback**: Drop the transaction to undo changes //! - **Isolation**: Reads see uncommitted changes within the same transaction //! - **Simplicity**: What you write is what you read //! //! ## Performance Tradeoff //! //! The transaction API trades performance for ergonomics. Top-level transactions clone the store //! on creation and apply each operation eagerly to the clone. This enables rollback support //! and ensures reads within the transaction see uncommitted changes. //! //! Benchmarks on an empty map show **2-2.5x overhead** compared to the raw API: //! //! | Operation | Raw API | Transaction | Overhead | //! |-----------|---------|-------------|----------| //! | Insert | 156 ns | 347 ns | 2.2x | //! | Update | 159 ns | 344 ns | 2.2x | //! | Remove | 50 ns | 69 ns | 1.4x | //! //! The overhead stems from the clone-and-swap implementation. Top-level transactions clone the //! store on creation and apply each operation eagerly to the clone. This ensures reads within //! the transaction see uncommitted changes and enables automatic rollback on drop. //! //! ### Nested Transaction Optimization //! //! Nested transactions (`in_map`, `in_array`, `insert_map`, `insert_array`) use `mem::take` //! instead of cloning the parent store. This moves nested structures without copying: //! //! - **Shallow nesting (1-2 levels)**: Minimal impact //! - **Deep nesting (3+ levels)**: Savings from avoided parent store clones //! - **Large nested collections**: Saves proportional to parent store size //! //! The ~200-300ns overhead per operation is acceptable for most applications. For //! latency-critical single-field updates, use [`api`](crate::api). For complex mutations //! where clarity and safety outweigh microseconds, use transactions //! //! # Type Conflict Handling //! //! DSON's unique feature is preserving type conflicts. When different replicas //! concurrently write different types to the same key, DSON preserves both. //! The transaction API exposes this through the [`CrdtValue`] enum: //! //! ```no_run //! # use dson::transaction::{MapTransaction, CrdtValue}; //! # let tx: MapTransaction<String> = todo!(); //! match tx.get(&"field".to_string()) { //! Some(CrdtValue::Map(map)) => { /* single type: map */ } //! Some(CrdtValue::Array(array)) => { /* single type: array */ } //! Some(CrdtValue::Register(reg)) => { /* single type: register */ } //! Some(CrdtValue::Conflicted(c)) => { //! // Type conflict! //! if c.has_map() && c.has_array() { //! // Application must resolve //! } //! } //! None => { /* key doesn't exist */ } //! Some(CrdtValue::Empty) => { /* key exists but is empty */ } //! } //! ``` //! //! # Nested Operations //! //! The transaction API provides uniform ergonomics at all nesting levels: //! //! ``` //! # use dson::{CausalDotStore, Identifier, OrMap}; //! # use dson::crdts::mvreg::MvRegValue; //! # let mut store = CausalDotStore::<OrMap<String>>::default(); //! # let id = Identifier::new(0, 0); //! let mut tx = store.transact(id); //! //! tx.in_map("user", |user_tx| { //! user_tx.write_register("email", MvRegValue::String("alice@example.com".to_string())); //! user_tx.write_register("age", MvRegValue::U64(30)); //! //! user_tx.in_array("tags", |tags_tx| { //! tags_tx.insert_register(0, MvRegValue::String("admin".to_string())); //! // Nested transaction commits automatically when closure returns //! }); //! // Nested transaction commits automatically when closure returns //! }); //! //! // Top-level transaction requires explicit commit //! let delta = tx.commit(); //! ``` //! //! **Important**: Nested transactions (`in_map`, `in_array`, `insert_map`, `insert_array`) //! commit automatically when their closure returns. Only the top-level transaction requires //! an explicit `commit()` call. //! //! Use [`MapTransaction::in_map`] and [`MapTransaction::in_array`] for nesting. //! Use [`ArrayTransaction::insert_map`] and [`ArrayTransaction::insert_array`] //! for arrays containing collections. mod array_transaction; mod conflicted; mod crdt_value; mod delta; mod map_transaction; pub use array_transaction::ArrayTransaction; pub use conflicted::ConflictedValue; pub use crdt_value::CrdtValue; pub use delta::Delta; pub use map_transaction::MapTransaction;
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/transaction/crdt_value.rs
src/transaction/crdt_value.rs
use super::ConflictedValue; use crate::crdts::TypeVariantValue; use crate::{ExtensionType, MvReg, OrArray, OrMap}; use std::{fmt, hash::Hash}; /// Result of reading a value from a transaction. /// /// DSON preserves type conflicts, so reads must handle multiple possibilities. /// This enum forces explicit handling of: /// - Map /// - Array /// - Register /// - Concurrent type conflicts /// - Missing key /// /// # Example /// /// ```no_run /// # use dson::transaction::{MapTransaction, CrdtValue}; /// # let tx: MapTransaction<String> = todo!(); /// match tx.get(&"user".to_string()) { /// Some(CrdtValue::Map(map)) => { /* work with map */ } /// Some(CrdtValue::Conflicted(conflicts)) => { /* resolve conflict */ } /// None => { /* key doesn't exist */ } /// _ => { /* other types */ } /// } /// ``` #[derive(Debug)] pub enum CrdtValue<'tx, K, C = crate::crdts::NoExtensionTypes> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { /// The value is a map (no type conflict). Map(&'tx OrMap<String, C>), /// The value is an array (no type conflict). Array(&'tx OrArray<C>), /// The value is a register (no type conflict). Register(&'tx MvReg), /// The value has concurrent type conflicts. Conflicted(ConflictedValue<'tx, K, C>), /// The key exists but all types are empty (bottom). Empty, } impl<'tx, K, C> CrdtValue<'tx, K, C> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { /// Creates a CrdtValue by classifying a TypeVariantValue. /// /// Inspects which CRDT types are non-empty (non-bottom) and returns /// the appropriate variant: /// - If multiple types are present: `Conflicted` /// - If only one type is present: the specific variant (Map/Array/Register) /// - If all types are empty: `Empty` pub fn from_type_variant(value: &'tx TypeVariantValue<C>) -> Self { use crate::dotstores::DotStore; // Check if there's a type conflict (multiple types are non-bottom) let has_multiple_types = { let mut count = 0; if !value.map.is_bottom() { count += 1; } if !value.array.is_bottom() { count += 1; } if !value.reg.is_bottom() { count += 1; } count > 1 }; if has_multiple_types { CrdtValue::Conflicted(ConflictedValue::new(value)) } else if !value.reg.is_bottom() { CrdtValue::Register(&value.reg) } else if !value.map.is_bottom() { CrdtValue::Map(&value.map) } else if !value.array.is_bottom() { CrdtValue::Array(&value.array) } else { CrdtValue::Empty } } } #[cfg(test)] mod tests { use super::*; use crate::crdts::mvreg::MvRegValue; use crate::crdts::{NoExtensionTypes, TypeVariantValue}; use crate::dotstores::DotStore; use crate::sentinel::DummySentinel; use crate::{CausalDotStore, Identifier, OrMap}; #[test] fn from_type_variant_register_only() { // Create a TypeVariantValue with only register populated let store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let delta = store.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::U64(42), ctx, id), "key".to_string(), &store.context, id, ); let type_variant = delta.store.get(&"key".to_string()).unwrap(); // Test from_type_variant let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(type_variant); match value { CrdtValue::Register(reg) => { use crate::crdts::snapshot::ToValue; assert_eq!(reg.value().unwrap(), &MvRegValue::U64(42)); } _ => panic!("Expected Register variant"), } } #[test] fn from_type_variant_empty() { // Empty TypeVariantValue (all fields are bottom) let type_variant = TypeVariantValue::<NoExtensionTypes>::default(); let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(&type_variant); match value { CrdtValue::Empty => { /* expected */ } _ => panic!("Expected Empty variant"), } } #[test] fn from_type_variant_map_only() { // Create a TypeVariantValue with only map populated let store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create nested map let delta = store.store.apply_to_map( |map, ctx, id| { map.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::String("test".to_string()), ctx, id), "field".to_string(), ctx, id, ) }, "key".to_string(), &store.context, id, ); let type_variant = delta.store.get(&"key".to_string()).unwrap(); let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(type_variant); match value { CrdtValue::Map(map) => { assert!(!map.is_bottom()); } _ => panic!("Expected Map variant"), } } #[test] fn from_type_variant_array_only() { // Create a TypeVariantValue with only array populated let store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create array with one element let delta = store.store.apply_to_array( |array, ctx, id| array.insert_idx_register(0, MvRegValue::U64(1), ctx, id), "key".to_string(), &store.context, id, ); let type_variant = delta.store.get(&"key".to_string()).unwrap(); let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(type_variant); match value { CrdtValue::Array(array) => { assert_eq!(array.len(), 1); } _ => panic!("Expected Array variant"), } } #[test] fn from_type_variant_conflicted() { // Create a TypeVariantValue with multiple types (type conflict) let store = CausalDotStore::<OrMap<String>>::default(); let id1 = Identifier::new(0, 0); let id2 = Identifier::new(1, 0); // Replica 1 writes register let delta1 = store.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::U64(42), ctx, id), "key".to_string(), &store.context, id1, ); // Replica 2 writes array (concurrent with delta1) let delta2 = store.store.apply_to_array( |array, ctx, id| { array.insert_idx_register(0, MvRegValue::String("conflict".to_string()), ctx, id) }, "key".to_string(), &store.context, id2, ); // Join both deltas to create conflict let combined = delta1.join(delta2, &mut DummySentinel).unwrap(); let type_variant = combined.store.get(&"key".to_string()).unwrap(); let value: CrdtValue<'_, String> = CrdtValue::from_type_variant(type_variant); match value { CrdtValue::Conflicted(conflicts) => { assert!(conflicts.has_register()); assert!(conflicts.has_array()); assert_eq!(conflicts.conflict_count(), 2); } _ => panic!("Expected Conflicted variant, got {value:?}"), } } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/api/array.rs
src/api/array.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use crate::{ CausalContext, CausalDotStore, ExtensionType, Identifier, MvReg, OrArray, OrMap, crdts::{ TypeVariantValue, Value, orarray::{Position, Uid}, snapshot::{self, ToValue}, }, }; use std::{convert::Infallible, fmt}; /* /// insert(𝑖𝑑𝑥, 𝑜𝛿 𝑖 ) – given an index 𝑖𝑑𝑥 and a method 𝑜𝛿 /// 𝑖 from /// the API of some CRDT of type 𝑉 , The method assigns a /// unique id 𝑢𝑖𝑑, assigns a stable position identifier 𝑝 such that /// the new element in the sorted array appears at index 𝑖𝑑𝑥, /// and invokes apply(𝑢𝑖𝑑, 𝑜𝛿 /// 𝑖 , 𝑝). update(𝑖𝑑𝑥, 𝑜𝛿 𝑖 ) – given an index 𝑖𝑑𝑥 and a method 𝑜𝛿 𝑖 of some CRDT type 𝑉 , The method finds the 𝑢𝑖𝑑 corresponding to the element at index 𝑖𝑑𝑥, finds the position 𝑝, and invokes apply(𝑢𝑖𝑑, 𝑜𝛿 𝑖 , 𝑝). move(𝑜𝑙𝑑_𝑖𝑑𝑥, 𝑛𝑒𝑤_𝑖𝑑𝑥) – given two indexes, finds the ele- ment 𝑢𝑖𝑑 corresponding to the element at index 𝑜𝑙𝑑_𝑖𝑑𝑥, calculates the stable position identifier 𝑝 such that the el- ement in the sorted array will be at index 𝑛𝑒𝑤_𝑖𝑑𝑥, and invokes move(𝑢𝑖𝑑, 𝑝). delete(𝑖𝑑𝑥) – given an index 𝑖𝑑𝑥, finds the element 𝑢𝑖𝑑 corre- sponding to the element at index 𝑖𝑑𝑥, and invokes delete(𝑢𝑖𝑑). get(𝑖𝑑𝑥) – given an index 𝑖𝑑𝑥, finds the element 𝑢𝑖𝑑 corre- sponding to the element at index 𝑖𝑑𝑥, and invokes get(𝑢𝑖𝑑). */ /// Returns the values of this array without collapsing conflicts. pub fn values<C>(m: &OrArray<C>) -> snapshot::OrArray<snapshot::AllValues<'_, C::ValueRef<'_>>> where C: ExtensionType, { m.values() } /// Returns the values of this array assuming (and asserting) no conflicts on element values. // NOTE: A type alias won't help much here :melt:. #[allow(clippy::type_complexity)] pub fn value<C>( m: &OrArray<C>, ) -> Result< snapshot::OrArray<snapshot::CollapsedValue<'_, C::ValueRef<'_>>>, Box<snapshot::SingleValueError<<&OrArray<C> as ToValue>::LeafValue>>, > where C: ExtensionType, { m.value() } /// Creates a new array. pub fn create<C>() -> impl Fn(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where C: ExtensionType + fmt::Debug + PartialEq, { move |m, cc, id| m.create(cc, id) } /// Inserts a new element at the given index. pub fn insert<O, C>( o: O, idx: usize, ) -> impl FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where O: FnOnce(&CausalContext, Identifier) -> CausalDotStore<Value<C>>, C: ExtensionType + fmt::Debug + PartialEq, { move |m, cc, id| { let uid = cc.next_dot_for(id).into(); let p = create_position_for_index(m, idx); m.insert(uid, o, p, cc, id) } } /// Inserts a new map at the given index. pub fn insert_map<O, C>( o: O, idx: usize, ) -> impl FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where O: FnOnce(&CausalContext, Identifier) -> CausalDotStore<OrMap<String, C>>, C: ExtensionType + fmt::Debug + PartialEq, { insert(move |cc, id| (o)(cc, id).map_store(Value::Map), idx) } /// Inserts a new array at the given index. pub fn insert_array<O, C>( o: O, idx: usize, ) -> impl FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where O: FnOnce(&CausalContext, Identifier) -> CausalDotStore<OrArray<C>>, C: ExtensionType + fmt::Debug + PartialEq, { insert(move |cc, id| (o)(cc, id).map_store(Value::Array), idx) } /// Inserts a new register at the given index. pub fn insert_register<O, C>( o: O, idx: usize, ) -> impl FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where O: FnOnce(&CausalContext, Identifier) -> CausalDotStore<MvReg>, C: ExtensionType + fmt::Debug + PartialEq, { insert(move |cc, id| (o)(cc, id).map_store(Value::Register), idx) } /// Applies a function to the element at the given index. pub fn apply<O, C>( o: O, idx: usize, ) -> impl FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where O: FnOnce(&TypeVariantValue<C>, &CausalContext, Identifier) -> CausalDotStore<Value<C>>, C: ExtensionType + fmt::Debug + PartialEq, { move |m, cc, id| { let uid = uid_from_index(m, idx); assert_ne!(idx, m.len(), "index out of bounds"); let p = create_position_for_index(m, idx); m.apply(uid, o, p, cc, id) } } /// Applies a function to the map at the given index. pub fn apply_to_map<O, C>( o: O, idx: usize, ) -> impl FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where O: FnOnce(&OrMap<String, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<String, C>>, C: ExtensionType + fmt::Debug + PartialEq, { apply( move |m, cc, id| (o)(&m.map, cc, id).map_store(Value::Map), idx, ) } /// Applies a function to the array at the given index. pub fn apply_to_array<O, C>( o: O, idx: usize, ) -> impl FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where O: FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>>, C: ExtensionType + fmt::Debug + PartialEq, { apply( move |m, cc, id| (o)(&m.array, cc, id).map_store(Value::Array), idx, ) } /// Applies a function to the register at the given index. pub fn apply_to_register<O, C>( o: O, idx: usize, ) -> impl FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where O: FnOnce(&MvReg, &CausalContext, Identifier) -> CausalDotStore<MvReg>, C: ExtensionType + fmt::Debug + PartialEq, { apply( move |m, cc, id| (o)(&m.reg, cc, id).map_store(Value::Register), idx, ) } /// Moves an element from one index to another. pub fn mv<C>( from: usize, to: usize, ) -> impl Fn(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where C: ExtensionType + fmt::Debug + PartialEq, { move |m, cc, id| { let uid = uid_from_index(m, from); let p = create_position_for_index(m, to); m.mv(uid, p, cc, id) } } /// Deletes an element at the given index. pub fn delete<'s, C>( idx: usize, ) -> impl Fn(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> + 's where C: ExtensionType + fmt::Debug + PartialEq, { move |m, cc, id| { let uid = uid_from_index(m, idx); m.delete(uid, cc, id) } } /// Clears the array. pub fn clear<C>() -> impl Fn(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>> where C: ExtensionType + fmt::Debug + PartialEq, { move |m, cc, id| m.clear(cc, id) } fn ids<C>(m: &OrArray<C>) -> Vec<((), Uid, Position)> { // TODO(https://github.com/rust-lang/rust/issues/61695): use into_ok m.with_list(|_, _, _| Ok::<_, Infallible>(Some(()))) .unwrap() } /// Computes the [`Position`] a new element should have to end up at `[idx]`. /// /// Inserting a new element with the given [`Position`] will end up shifting all later elements to /// the rigth by one. For example, inserting an element with position `create_position_for_index(_, /// 0)` will make the current `[0]` be at `[1]`, the current `[1]` at `[2]`, and so on. fn create_position_for_index<C>(m: &OrArray<C>, idx: usize) -> Position { // NOTE: the original code passes cc.id() to the Position::between calls here, but that // argument is ignored, so it's removed in our implementation; // we don't have to sort all the items to resolve the first/last position. // not doing the sort saves us from the `.collect` in `with_list`, which would result in a // `Vec` that gets pretty much immediately thrown away afterwards. // TODO: cache min/max Position inside OrArray maybe? if idx == 0 { let min_p = m.iter_as_is().map(|(_, _, p)| p).min(); return Position::between(None, min_p); } if idx == m.len() { let max_p = m.iter_as_is().map(|(_, _, p)| p).max(); return Position::between(max_p, None); } assert!( idx < m.len(), "index out of bounds ({idx} when length is {})", m.len() ); // NOTE: we know here that !m.is_empty(), otherwise we'd either hit idx == 0 or the asset. let ids = ids(m); let pos_at_index = ids.get(idx).map(|(_, _, p)| *p); let pos_at_previous_index = if idx == 0 { None } else { Some( ids.get(idx - 1) .expect("we check for out-of-bounds above") .2, ) }; Position::between(pos_at_previous_index, pos_at_index) } fn uid_from_index<C>(m: &OrArray<C>, idx: usize) -> Uid { ids(m)[idx].1 }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/api/register.rs
src/api/register.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use crate::{ CausalContext, CausalDotStore, Identifier, MvReg, crdts::{ mvreg::MvRegValue, snapshot::{SingleValueError, ToValue}, }, }; /// Returns the values of this register without collapsing conflicts. pub fn values(m: &MvReg) -> impl ExactSizeIterator<Item = &MvRegValue> { m.values().into_iter() } /// Returns the value of this register assuming (and asserting) no conflicts on element values. pub fn value(m: &MvReg) -> Result<&MvRegValue, Box<SingleValueError<MvRegValue>>> { m.value() } /// Writes a value to the register. pub fn write( v: MvRegValue, ) -> impl FnMut(&MvReg, &CausalContext, Identifier) -> CausalDotStore<MvReg> { move |m, cc, id| m.write(v.clone(), cc, id) } /// Clears the register. pub fn clear() -> impl Fn(&MvReg, &CausalContext, Identifier) -> CausalDotStore<MvReg> { move |m, _cc, _id| m.clear() }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/api/timestamp.rs
src/api/timestamp.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! This module provides a `Timestamp` type for efficient encoding of UTC datetimes. //! //! The `Timestamp` is represented as a 64-bit integer of milliseconds since the //! UNIX epoch, but is constrained to a range of years from 0 to 9999. //! This allows for compact and performant representation of datetimes. use std::fmt; #[cfg(feature = "chrono")] use crate::datetime; #[cfg(feature = "chrono")] use chrono::{DateTime, Datelike, Utc}; #[cfg(feature = "chrono")] use std::str::FromStr; /// Error returned when creating or parsing a `Timestamp`. #[derive(Debug, Clone, PartialEq, Eq)] pub enum TimestampError { /// The year is outside the supported range of `0` to `9999`. InvalidYear(i32), /// The string could not be parsed as a valid RFC 3339 datetime. Parse(String), } impl fmt::Display for TimestampError { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { match self { TimestampError::InvalidYear(year) => write!( f, "invalid year {year}, supported years are between 0 and 9999 included" ), TimestampError::Parse(s) => { write!(f, "failed to parse date {s} in rfc3339 format") } } } } impl std::error::Error for TimestampError {} /// Represents a UTC datetime with millisecond precision. /// /// `Timestamp` is stored as an `i64` representing the number of milliseconds since the /// UNIX epoch. /// /// The valid range for a `Timestamp` is from `0000-01-01T00:00:00.000Z` to /// `9999-12-31T23:59:59.999Z`. #[derive(Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub struct Timestamp(i64); impl Timestamp { /// Creates a new `Timestamp` from a `chrono::DateTime<Utc>`. /// /// The datetime is truncated to millisecond precision. /// /// # Errors /// /// Returns an error if the year is outside the supported range of `0` to `9999`. #[cfg(feature = "chrono")] pub fn new(datetime: DateTime<Utc>) -> Result<Timestamp, TimestampError> { let year = datetime.year(); // NOTE: This is arguably more clear. #[expect(clippy::manual_range_contains)] if year < 0 || year > 9999 { return Err(TimestampError::InvalidYear(year)); } let truncated_timestamp = datetime.timestamp_millis(); Ok(Timestamp(truncated_timestamp)) } #[cfg(not(feature = "chrono"))] /// Creates a new `Timestamp` from an i64. This operation always succeeds. pub fn new(val: i64) -> Result<Timestamp, TimestampError> { Ok(Self(val)) } /// Creates a `Timestamp` from a number of milliseconds since the UNIX epoch. /// /// Returns `None` if the number of milliseconds corresponds to a datetime outside /// the supported range. #[cfg(feature = "chrono")] pub fn from_millis(milliseconds: i64) -> Option<Self> { (Self::MIN.as_millis()..=Self::MAX.as_millis()) .contains(&milliseconds) .then_some(Self(milliseconds)) } #[cfg(not(feature = "chrono"))] /// Creates a `Timestamp` from a number of milliseconds since the UNIX epoch. /// This operation always succeeds. pub fn from_millis(milliseconds: i64) -> Option<Self> { Some(Self(milliseconds)) } /// Returns the number of milliseconds since the UNIX epoch as an `i64`. pub fn as_millis(&self) -> i64 { self.0 } /// Converts the `Timestamp` to a `chrono::DateTime<Utc>`. #[cfg(feature = "chrono")] pub(crate) fn as_datetime(&self) -> DateTime<Utc> { DateTime::from_timestamp_millis(self.0) .expect("roundtrips with `DateTime::timestamp_millis`") } /// The minimum supported `Timestamp`: `0000-01-01T00:00:00.000Z`. #[cfg(feature = "chrono")] pub const MIN: Timestamp = Timestamp(datetime!(0000-01-01 00:00:00 Z).timestamp_millis()); /// The maximum supported `Timestamp`: `9999-12-31T23:59:59.999Z`. #[cfg(feature = "chrono")] pub const MAX: Timestamp = Timestamp(datetime!(10000-01-01 00:00:00 Z).timestamp_millis() - 1); } #[cfg(feature = "chrono")] impl fmt::Display for Timestamp { // Formats the `Timestamp` as an RFC 3339 string. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { self.as_datetime().fmt(f) } } #[cfg(not(feature = "chrono"))] impl fmt::Display for Timestamp { // Formats the `Timestamp` as an RFC 3339 string. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{}", self.0) } } impl fmt::Debug for Timestamp { // Formats the `Timestamp` as an RFC 3339 string for debugging. fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { write!(f, "{self}") } } #[cfg(all(feature = "json", feature = "chrono"))] impl From<Timestamp> for serde_json::Value { // The string is formatted according to RFC 3339 with millisecond precision. fn from(value: Timestamp) -> Self { serde_json::Value::String( value .as_datetime() .to_rfc3339_opts(chrono::SecondsFormat::Millis, true) .to_string(), ) } } #[cfg(feature = "chrono")] impl FromStr for Timestamp { type Err = TimestampError; fn from_str(s: &str) -> Result<Self, Self::Err> { let datetime = DateTime::parse_from_rfc3339(s).map_err(|_| TimestampError::Parse(s.to_string()))?; Timestamp::new(datetime.to_utc()) } } #[cfg(all(test, feature = "chrono"))] mod tests { use super::*; use chrono::{DateTime, Utc}; #[test] fn new_timestamp_truncates_at_millisecond_precision() { assert_eq!( "1996-12-19T16:39:57.123555Z".parse::<Timestamp>().unwrap(), "1996-12-19T16:39:57.123Z".parse::<Timestamp>().unwrap() ) } #[test] fn constants_are_correctly_computed() { assert_eq!( "0000-01-01T00:00:00Z".parse::<Timestamp>().unwrap(), Timestamp::MIN ); assert_eq!( "9999-12-31T23:59:59.999Z".parse::<Timestamp>().unwrap(), Timestamp::MAX ); } #[test] fn timestamp_constructors() { let unparsable_timestamp: Result<Timestamp, _> = "0000-01-01T00:00:00ZTR".parse(); assert!(unparsable_timestamp.is_err()); let out_of_range_year = DateTime::<Utc>::UNIX_EPOCH.with_year(10_000).unwrap(); assert!(Timestamp::new(out_of_range_year).is_err()); let parseable_timestamp: Result<Timestamp, _> = "0000-01-01T00:00:00Z".parse(); assert!(parseable_timestamp.is_ok()) } #[test] fn parse_accepts_any_timezone() { assert_eq!( "0000-01-01T00:00:00Z".parse::<Timestamp>().unwrap(), "0000-01-01T01:00:00+01:00".parse::<Timestamp>().unwrap() ); } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/api/map.rs
src/api/map.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. use crate::{ CausalContext, CausalDotStore, ExtensionType, Identifier, MvReg, OrArray, OrMap, crdts::{ TypeVariantValue, Value, snapshot::{self, ToValue}, }, }; use std::{borrow::Borrow, fmt, hash::Hash}; /// Returns the values of this map without collapsing conflicts. pub fn values<K, C>( m: &OrMap<K, C>, ) -> snapshot::OrMap<'_, K, snapshot::AllValues<'_, C::ValueRef<'_>>> where K: Hash + Eq + fmt::Display, C: ExtensionType, { m.values() } /// Returns the values of this map assuming (and asserting) no conflicts on element values. // NOTE: A type alias won't help much here :melt:. #[allow(clippy::type_complexity)] pub fn value<K, C>( m: &OrMap<K, C>, ) -> Result< snapshot::OrMap<'_, K, snapshot::CollapsedValue<'_, C::ValueRef<'_>>>, Box<snapshot::SingleValueError<<&OrMap<K, C> as ToValue>::LeafValue>>, > where K: Hash + Eq + fmt::Debug + fmt::Display + Clone, C: ExtensionType, { m.value() } /// Creates a new map. pub fn create<K, C>() -> impl Fn(&OrMap<K, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<K, C>> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { move |m, cc, id| m.create(cc, id) } /// Applies a function to the value at the given key. pub fn apply<K, C, O>( o: O, k: K, ) -> impl FnOnce(&OrMap<K, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<K, C>> where K: Hash + Eq + fmt::Debug + Clone, O: FnOnce(&TypeVariantValue<C>, &CausalContext, Identifier) -> CausalDotStore<Value<C>>, C: ExtensionType, { move |m, cc, id| m.apply(o, k.clone(), cc, id) } /// Applies a function to the map at the given key. pub fn apply_to_map<K, C, O>( o: O, k: K, ) -> impl FnOnce(&OrMap<K, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<K, C>> where K: Hash + Eq + fmt::Debug + Clone, O: FnOnce(&OrMap<String, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<String, C>>, C: ExtensionType, { move |m, cc, id| m.apply_to_map(o, k.clone(), cc, id) } /// Applies a function to the array at the given key. pub fn apply_to_array<K, C, O>( o: O, k: K, ) -> impl FnOnce(&OrMap<K, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<K, C>> where K: Hash + Eq + fmt::Debug + Clone, O: FnOnce(&OrArray<C>, &CausalContext, Identifier) -> CausalDotStore<OrArray<C>>, C: ExtensionType, { move |m, cc, id| m.apply_to_array(o, k.clone(), cc, id) } /// Applies a function to the register at the given key. pub fn apply_to_register<K, C, O>( o: O, k: K, ) -> impl FnOnce(&OrMap<K, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<K, C>> where K: Hash + Eq + fmt::Debug + Clone, O: FnOnce(&MvReg, &CausalContext, Identifier) -> CausalDotStore<MvReg>, C: ExtensionType, { move |m, cc, id| m.apply_to_register(o, k.clone(), cc, id) } /// Removes a key from the map. pub fn remove<Q, K, C>( k: &Q, ) -> impl Fn(&OrMap<K, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<K, C>> + '_ where K: Hash + Eq + fmt::Debug + Clone + Borrow<Q>, Q: Hash + Eq + ?Sized, C: ExtensionType, { move |m, cc, id| m.remove(k, cc, id) } /// Clears the map. pub fn clear<K, C>() -> impl Fn(&OrMap<K, C>, &CausalContext, Identifier) -> CausalDotStore<OrMap<K, C>> where K: Hash + Eq + fmt::Debug + Clone, C: ExtensionType, { move |m, cc, id| m.clear(cc, id) }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/dotstores/recording_sentinel.rs
src/dotstores/recording_sentinel.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! This module contains an implementation of Sentinel that simply records //! all calls in a human readable form. This is mostly useful for tests. use crate::{ crdts::ValueType, sentinel::{KeySentinel, Sentinel, TypeSentinel, ValueSentinel, Visit}, }; use std::{convert::Infallible, fmt::Debug}; /// A sentinel that records all calls. #[derive(Default)] pub struct RecordingSentinel { path: Vec<String>, /// A string-representation of each call that the sentinel has received. /// This is mostly useful for tests. pub changes_seen: Vec<String>, } impl RecordingSentinel { /// Create a new PeekingSentinel pub fn new() -> RecordingSentinel { RecordingSentinel { path: vec![], changes_seen: vec![], } } } impl Sentinel for RecordingSentinel { type Error = Infallible; } impl<K: Debug> Visit<K> for RecordingSentinel { fn enter(&mut self, key: &K) -> Result<(), Self::Error> { self.path.push(format!("{key:?}")); Ok(()) } fn exit(&mut self) -> Result<(), Self::Error> { self.path.pop(); Ok(()) } } impl KeySentinel for RecordingSentinel { fn create_key(&mut self) -> Result<(), Self::Error> { self.changes_seen .push(format!("create_key at {}", self.path.join("/"))); Ok(()) } fn delete_key(&mut self) -> Result<(), Self::Error> { self.changes_seen .push(format!("delete_key at {}", self.path.join("/"))); Ok(()) } } impl<V: Debug> ValueSentinel<V> for RecordingSentinel { fn set(&mut self, value: &V) -> Result<(), Self::Error> { self.changes_seen.push(format!("set {value:?}")); Ok(()) } fn unset(&mut self, value: V) -> Result<(), Self::Error> { self.changes_seen.push(format!("unset {:?}", &value)); Ok(()) } } impl<V: Debug> TypeSentinel<V> for RecordingSentinel { fn set_type(&mut self, value_type: ValueType<V>) -> Result<(), Self::Error> { self.changes_seen.push(format!("set_type {value_type:?}")); Ok(()) } fn unset_type(&mut self, value_type: ValueType<V>) -> Result<(), Self::Error> { self.changes_seen.push(format!("unset_type {value_type:?}")); Ok(()) } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/src/causal_context/interval.rs
src/causal_context/interval.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! # Interval and IntervalSet //! //! This module provides the `Interval` and `IntervalSet` data structures, which are //! used to efficiently represent sets of sequence numbers in a `CausalContext`. //! //! ## `Interval` //! //! An `Interval` represents a contiguous range of non-zero unsigned 64-bit integers. //! It is a space-efficient way to store a sequence of dots from a single actor. //! //! ## `IntervalSet` //! //! An `IntervalSet` is a collection of `Interval`s, which together represent the //! set of all dots observed from a single actor. It is implemented as a sorted //! vector of non-overlapping intervals, which allows for efficient storage and //! retrieval of sequence numbers. use std::{cmp::Ordering, num::NonZeroU64, ops::RangeInclusive}; /// Represents an interval of non-zero numbers. /// /// If end is unset, the interval contains only the starting point. /// /// We intentionally don't use an enum here so as to minimise space usage /// as much as possible (this is particularly important for serialization). #[derive(Clone, Copy, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub(super) struct Interval { /// Start of the interval (inclusive) start: NonZeroU64, #[cfg_attr( feature = "serde", serde(default, skip_serializing_if = "Option::is_none") )] /// End of the interval (inclusive) end: Option<NonZeroU64>, } impl std::fmt::Debug for Interval { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "{}", self.start)?; if let Some(end) = &self.end { write!(f, "..={end}")?; } Ok(()) } } impl PartialOrd for Interval { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { if self.end() < other.start() { Some(Ordering::Less) } else if self.start() > other.end() { Some(Ordering::Greater) } else if self.start() == other.start() && self.end() == other.end() { // total overlap Some(Ordering::Equal) } else { // partial overlap None } } } impl PartialEq<NonZeroU64> for Interval { fn eq(&self, other: &NonZeroU64) -> bool { *self == Self::point(*other) } } impl PartialOrd<NonZeroU64> for Interval { fn partial_cmp(&self, other: &NonZeroU64) -> Option<Ordering> { self.partial_cmp(&Self::point(*other)) } } impl From<NonZeroU64> for Interval { fn from(value: NonZeroU64) -> Self { Self::point(value) } } impl TryFrom<(NonZeroU64, Option<NonZeroU64>)> for Interval { type Error = IntervalError; fn try_from((start, end): (NonZeroU64, Option<NonZeroU64>)) -> Result<Self, Self::Error> { if let Some(end) = end { (end > start) .then_some(Self { start, end: Some(end), }) .ok_or(IntervalError("end must be > start")) } else { Ok(Self { start, end }) } } } #[derive(Debug)] pub struct IntervalError(&'static str); impl std::fmt::Display for IntervalError { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.0.fmt(f) } } impl std::error::Error for IntervalError {} impl TryFrom<u64> for Interval { type Error = IntervalError; fn try_from(value: u64) -> Result<Self, Self::Error> { Ok(Self::point( NonZeroU64::new(value).ok_or(IntervalError("value must be > 0"))?, )) } } impl TryFrom<RangeInclusive<u64>> for Interval { type Error = IntervalError; fn try_from(value: RangeInclusive<u64>) -> Result<Self, Self::Error> { assert!( value.start() < value.end(), "start must be < end: {value:?}" ); Ok(Self::span( NonZeroU64::new(*value.start()).ok_or(IntervalError("start must be > 0"))?, NonZeroU64::new(*value.end()).ok_or(IntervalError("end must be > 0"))?, )) } } #[derive(Debug)] pub enum IntervalDifference { Empty, Single(Interval), Split(Interval, Interval), } impl Interval { /// Creates a new [`Interval`] containing a single point. #[must_use] pub fn point(seq: NonZeroU64) -> Self { Self { start: seq, end: None, } } /// Creates a new [`Interval`] spanning more than one point. /// /// # Panics /// The given `start` must be strictly less than `end`, otherwise /// this function panics. If you must have `start` == `end`, use the /// [`Self::point`] constructor instead (or just pass `None` as the `end` /// argument). #[must_use] pub fn span(start: NonZeroU64, end: impl Into<Option<NonZeroU64>>) -> Self { let end = end.into(); if let Some(end) = end { assert!(start < end, "{start} < {end}"); } Self { start, end } } #[must_use] pub fn next_after(&self) -> NonZeroU64 { self.end.unwrap_or(self.start).saturating_add(1) } #[must_use] pub fn end(&self) -> NonZeroU64 { self.end.unwrap_or(self.start) } #[must_use] pub fn start(&self) -> NonZeroU64 { self.start } pub fn interval(&self) -> (NonZeroU64, Option<NonZeroU64>) { (self.start, self.end) } #[must_use] pub fn contains(&self, seq: NonZeroU64) -> bool { if let Some(end) = self.end { seq >= self.start && seq <= end } else { seq == self.start } } /// Returns whether `self` is a superset (A ⊇ B) of `other`. /// /// Note that if they are equal, this returns true. To test for proper /// superset (A ⊃ B) use [`Self::partial_set_cmp`] instead. #[must_use] pub fn is_superset(&self, other: &Self) -> bool { self.start() <= other.start() && other.end() <= self.end() } /// Iterator over all the sequence number that this interval holds pub fn seqs(&self) -> impl Iterator<Item = NonZeroU64> { // TODO: can simplify once https://github.com/rust-lang/rust/pull/127534 is stable (self.start.get()..=self.end.unwrap_or(self.start).get()) // SAFETY: start and end are non-zero, so all numbers in-between must be as well .map(|s| unsafe { NonZeroU64::new_unchecked(s) }) } /// Returns the partial ordering with respect to set comparison. /// /// - If `self` is a proper subset of `other` (A ⊂ B), the result is `Less`. /// - If `self` is a proper superset of `other` (A ⊃ B), the result is `Greater.` /// - If both intervals are the same (A = B), the result is `Equal`. /// /// Otherwise the result is `None`, indicating there isn't a well defined /// set hierarchy between them. This could mean there's no overlap, or that /// the overlap is partial. #[must_use] pub fn partial_set_cmp(&self, other: &Self) -> Option<Ordering> { if self.start() == other.start() && self.end() == other.end() { Some(Ordering::Equal) } else if self.start() <= other.start() && self.end() >= other.end() { Some(Ordering::Greater) } else if other.start() <= self.start() && other.end() >= self.end() { Some(Ordering::Less) } else { None } } /// Combines two intervals together, if they overlap or are adjacent. /// /// If the intervals overlap or are adjacent, the result is a single /// interval representing the union of all seqs in either one. /// /// If the intervals are disjoint (ie, have a gap in between them), this /// method returns `None`. #[must_use] pub fn merge(&self, other: &Self) -> Option<Self> { // this is fundamentally doing a traditional interval overlap check: if the later start is // before the earlier end, then the two intervals overlap. we only spice things up by adding // one to the earlier end so that we cover the case where they _just about_ touch together // (since our intervals are inclusive and our elements discrete integers, this means they // can also be merged in that case). if self.end().min(other.end()).saturating_add(1) >= self.start().max(other.start()) { let start = self.start().min(other.start()); let end = self.end().max(other.end()); if start == end { Some(Self::point(start)) } else { Some(Self::span(start, end)) } } else { None } } #[must_use] pub fn intersect(&self, other: &Self) -> Option<Self> { let start = self.start().max(other.start()); let end = self.end().min(other.end()); match start.cmp(&end) { Ordering::Less => Some(Self::span(start, end)), Ordering::Equal => Some(Self::point(start)), Ordering::Greater => None, } } #[must_use] pub fn difference(&self, other: &Self) -> IntervalDifference { // if two intervals overlap, these will indicate the start and end of the overlapping range. let later_start = self.start().max(other.start()); let earlier_end = self.end().min(other.end()); match later_start.cmp(&earlier_end) { // overlap Ordering::Less | Ordering::Equal => { let left = (self.start() < later_start).then(|| { // SAFETY: we know later_start - 1 > 0 because later_start >= self.start() and self.start() > 0 let new_end = unsafe { NonZeroU64::new_unchecked(later_start.get() - 1) }; if new_end == self.start() { Self::point(new_end) } else { Self::span(self.start(), new_end) } }); let right = (earlier_end < self.end()).then(|| { let new_start = earlier_end.saturating_add(1); if new_start == self.end() { Self::point(new_start) } else { Self::span(new_start, self.end()) } }); match (left, right) { // total overlap (None, None) => IntervalDifference::Empty, // start removed (None, Some(right)) => IntervalDifference::Single(right), // end removed (Some(left), None) => IntervalDifference::Single(left), // split in the middle (Some(left), Some(right)) => IntervalDifference::Split(left, right), } } // no overlap Ordering::Greater => IntervalDifference::Single(*self), } } #[cfg(test)] pub fn is_point(&self) -> bool { self.end.is_none() } #[cfg(test)] pub fn is_span(&self) -> bool { self.end.is_some() } /// The length of this interval. /// /// Alternatively, the number of individual integer values it contains. pub fn interval_length(&self) -> u64 { if let Some(end) = self.end { end.get() - self.start().get() + 1 } else { 1 } } } // TODO: would it be worth using a BTree here and a more traditional interval set? We chose a // vector-based interval set because we don't expect this to grow very large, but only benchmarks // will tell what is actually better. #[derive(Clone, PartialEq, Eq)] #[cfg_attr(feature = "serde", derive(::serde::Deserialize, ::serde::Serialize))] pub(super) struct IntervalSet(Vec<Interval>); impl std::fmt::Debug for IntervalSet { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { f.debug_set().entries(self.0.iter()).finish() } } impl IntervalSet { #[must_use] pub fn new() -> Self { Self(Vec::new()) } #[must_use] pub fn single(seq: NonZeroU64) -> Self { Self(Vec::from([Interval::point(seq)])) } #[must_use] pub fn with_capacity(n: usize) -> Self { Self(Vec::with_capacity(n)) } pub fn from_intervals( iter: impl IntoIterator<Item = (NonZeroU64, Option<NonZeroU64>)>, ) -> Result<Self, IntervalError> { Ok(Self( iter.into_iter() .map(Interval::try_from) .collect::<Result<Vec<_>, _>>()?, )) } #[must_use] pub fn is_empty(&self) -> bool { self.0.is_empty() } #[must_use] pub fn len(&self) -> usize { self.0.len() } #[must_use] pub fn first(&self) -> Option<Interval> { self.0.first().copied() } #[must_use] pub fn last(&self) -> Option<Interval> { self.0.last().copied() } /// The total length of all intervals, summed #[must_use] pub fn total_interval_length(&self) -> u64 { self.0.iter().map(|i| i.interval_length()).sum() } #[must_use] pub fn next_after(&self) -> NonZeroU64 { self.last() .map(|ival| ival.next_after()) .unwrap_or(NonZeroU64::MIN) } /// Iterator over all the sequence numbers of this set pub fn seqs(&self) -> impl Iterator<Item = NonZeroU64> + '_ { self.0.iter().flat_map(|ival| ival.seqs()) } /// Iterator over the raw interval ranges (start, end) of this set pub fn intervals( &self, ) -> impl ExactSizeIterator<Item = (NonZeroU64, Option<NonZeroU64>)> + '_ { self.0.iter().map(Interval::interval) } pub fn insert(&mut self, value: impl Into<Interval>) { let ival = value.into(); // find the first interval that does not strictly precede `ival`. this // means it could be adjacent to `ival`, overlap with it or be strictly // after. we cover each case below. let i = self .0 .partition_point(|s| s.end().saturating_add(1) < ival.start()); if i == self.0.len() { // all elements strictly before `ival` (not even adjacent), so // just add the new interval to the end self.0.push(ival); } else if let Some(merged) = self.0[i].merge(&ival) { // this means the intervals overlap or are adjacent self.0[i] = merged; // now the next interval could still overlap or be adjacent, so we // run compaction for the remaining vector - note this is "free", // because we'd need to shift things over anyway if we just inserted // the point without merging self.normalize_starting_at(i); } else { // this means there is a gap between d and the values before and after self.0.insert(i, ival); } } pub fn remove(&mut self, seq: NonZeroU64) -> bool { let p = self.0.partition_point(|s| *s < seq); let Some(ival) = self.0.get(p) else { // this happens if seq is _after_ end return false; }; if ival.contains(seq) { if let Some(end) = ival.end { let start = ival.start; if seq == start { let new_start = start.saturating_add(1); if new_start == end { self.0[p] = Interval::point(new_start); } else { self.0[p] = Interval::span(new_start, end); } } else { // SAFETY: we know end - 1 > 0 because end > start and start > 0 let new_end = unsafe { NonZeroU64::new_unchecked(seq.get() - 1) }; if new_end == start { self.0[p] = Interval::point(start); } else { self.0[p] = Interval::span(start, new_end); } if seq != end { if seq.saturating_add(1) == end { self.0.insert(p + 1, Interval::point(end)); } else { self.0 .insert(p + 1, Interval::span(seq.saturating_add(1), end)); } // no need to normalize now as only growth can lead to // intervals becoming mergeable, and we've only shrunk } } } else { self.0.remove(p); } true } else { false } } #[must_use] pub fn contains(&self, seq: NonZeroU64) -> bool { let p = self.0.partition_point(|s| *s < seq); if p == self.0.len() { false } else { self.0[p].contains(seq) } } /// Extends the last interval in this set by one seq. /// /// If the interval set is empty, it will be extended from 0, so // it results in {1}. pub fn extend_end_by_one(&mut self) { match self.0.last_mut() { Some(ival) => { if let Some(end) = ival.end { *ival = Interval::span(ival.start, end.saturating_add(1)); } else { *ival = Interval::span(ival.start, ival.start.saturating_add(1)); } } None => self.0.push(Interval::span(NonZeroU64::MIN, None)), } } fn normalize_starting_at(&mut self, i: usize) { let right_start = i + 1; for j in right_start..self.0.len() { if let Some(merged) = self.0[i].merge(&self.0[j]) { self.0[i] = merged; } else { if j != right_start { // this means there's a segment of merged intervals from // `i+1..j` of length n that needs to be shifted over to the // left. we achieve that shifting by "swapping" the range // from `j..` and `i+1..j` (via a rotation) and truncating. let n = j - i - 1; self.0[right_start..].rotate_left(n); self.0.truncate(self.0.len() - n); } else { // if the _first_ merge fails, there will be no chain reaction, // so we can just return. note that this assumes the function is // called when _only_ the starting interval has been modified. } return; } } // if we get here it means that everything collapsed into a single interval. nice! self.0.truncate(right_start); } // NOTE: doing this inplace turns out to be a bad idea, as it has a O(n^2) worst case due to // shifting elements when right < left. Benchmarks show degradation of up to 90%. We'll have to // live with the extra allocation (unless we switch to a different data struct maybe?). #[must_use] pub fn union(&self, other: &Self) -> Self { let mut ours = self.0.iter().peekable(); let mut theirs = other.0.iter().peekable(); let mut result = Self::with_capacity(self.0.len().max(other.0.len())); loop { let next = match (ours.peek(), theirs.peek()) { (Some(&o_ival), Some(&t_ival)) => match o_ival.partial_cmp(t_ival) { Some(Ordering::Less) => { ours.next(); *o_ival } Some(Ordering::Greater) => { theirs.next(); *t_ival } Some(Ordering::Equal) => { ours.next(); theirs.next(); *o_ival } None => match o_ival.merge(t_ival) { Some(merged) => { ours.next(); theirs.next(); merged } None => { unreachable!( "if there is no ordering defined, there must be an overlap" ) } }, }, (Some(&&next), None) => { ours.next(); next } (None, Some(&&next)) => { theirs.next(); next } (None, None) => break, }; if let Some(last) = result.0.last_mut() && let Some(merged) = last.merge(&next) { *last = merged; continue; } result.0.push(next); } result } #[must_use] pub fn intersects(&self, other: &Self) -> bool { let (mut lhs, mut rhs) = (self.0.iter().peekable(), other.0.iter().peekable()); while let (Some(&left), Some(&right)) = (lhs.peek(), rhs.peek()) { match left.partial_cmp(right) { Some(Ordering::Less) => { lhs.next(); } Some(Ordering::Greater) => { rhs.next(); } Some(Ordering::Equal) | None => return true, } } false } #[must_use] pub fn intersection(&self, other: &Self) -> Self { let mut intersection = Self::with_capacity(self.len().min(other.len())); let (mut lhs, mut rhs) = (self.0.iter().peekable(), other.0.iter().peekable()); while let (Some(&left), Some(&right)) = (lhs.peek(), rhs.peek()) { match left.partial_cmp(right) { Some(Ordering::Less) => { lhs.next(); } Some(Ordering::Greater) => { rhs.next(); } Some(Ordering::Equal) => { intersection.insert(*left); lhs.next(); rhs.next(); } None => { intersection.insert(left.intersect(right).expect("no ordering => overlaps")); // we advance only the interval that ends earlier, as the // next one might still overlap if left.end() > right.end() { rhs.next(); } else { lhs.next(); } } } } intersection } #[must_use] pub fn difference(&self, other: &Self) -> Self { let mut diff = Self::with_capacity(self.len()); let mut ours = self.0.iter().peekable(); let mut theirs = other.0.iter().peekable(); // copied once so we don't hold a borrow of the peeked reference, which is ephemeral // copied again so we can hold ownership of `second` below with the same variable let mut curr = ours.peek().copied().copied(); loop { match (curr, theirs.peek()) { (None, None) => break, (None, Some(_)) => { theirs.next(); } (Some(o_ival), None) => { diff.insert(o_ival); ours.next(); curr = ours.peek().copied().copied(); } (Some(o_ival), Some(&t_ival)) => match o_ival.partial_cmp(t_ival) { Some(Ordering::Greater) => { theirs.next(); } Some(Ordering::Less) => { diff.insert(o_ival); ours.next(); curr = ours.peek().copied().copied(); } Some(Ordering::Equal) => { ours.next(); theirs.next(); curr = ours.peek().copied().copied(); } // overlap None => { match o_ival.difference(t_ival) { // `t_ival` is a (proper) superset of `o_ival`, so nothing remains // of `o_ival`. `t_ival` may still overlap with the next from `ours`, // so we don't advance that side yet, just `ours`. IntervalDifference::Empty => { ours.next(); curr = ours.peek().copied().copied(); } // one interval remains - this could be the left side of `o_ival`, or // the right side. if it's the right side, we can't insert yet, as the // next interval from `theirs` may still overlap with it. since we don't // know, we just set `curr` and don't advance `ours`. IntervalDifference::Single(common) => { curr = Some(common); } // left gets split into two smaller intervals IntervalDifference::Split(first, second) => { // `first` interval is < `t_ival`, so we can insert now because we // know no other interval from `theirs` will overlap with it. diff.insert(first); // but `second` may still overlap with the next from `theirs`, so we // keep it as `curr` instead of advancing `ours`. curr = Some(second); // this also means it's safe to advance `theirs`, because `t_ival` // is known to end before `second` (that is, we know we'll hit the // Greater case in the next iteration, so may as well skip that // step) theirs.next(); } } // NOTE: may be tempting to advance `theirs` now, since we used the interval // to subtract, but it's possible the right interval extends past left, in // which case it may still overlap with the next left. the one exception is // in the split case above (see the comment). } }, } } diff } } impl Extend<NonZeroU64> for IntervalSet { fn extend<T: IntoIterator<Item = NonZeroU64>>(&mut self, iter: T) { for seq in iter { self.insert(seq); } } } impl FromIterator<NonZeroU64> for IntervalSet { fn from_iter<T: IntoIterator<Item = NonZeroU64>>(iter: T) -> Self { let mut new = Self::new(); new.extend(iter); new } } impl PartialOrd for IntervalSet { fn partial_cmp(&self, other: &Self) -> Option<Ordering> { let mut ours = self.0.iter().peekable(); let mut theirs = other.0.iter().peekable(); let (mut o_unique, mut t_unique) = (false, false); loop { // early exit if both sides have unique seqs if o_unique && t_unique { return None; } match (ours.peek(), theirs.peek()) { (None, None) => break, (None, Some(_)) => { t_unique = true; // from this point on, every iteration will hit this case, // until `theirs` is also exhausted, so the `o_unique` value // won't change anymore and it's safe to break early break; } (Some(_), None) => { // symmetrical of the arm above o_unique = true; break; } (Some(&o_ival), Some(&t_ival)) => { match o_ival.partial_cmp(t_ival) { Some(Ordering::Less) => { ours.next(); o_unique = true; } Some(Ordering::Greater) => { theirs.next(); t_unique = true; } Some(Ordering::Equal) => { ours.next(); theirs.next(); } None => { match o_ival.partial_set_cmp(t_ival) { Some(Ordering::Equal) => unreachable!( "covered by outer equal arm - equality definitions must match" ), // `t_ival` is a superset of `o_ival` // TODO: use a macro to ensure symmetry between Less and Greater? Some(Ordering::Less) => { if o_unique { // we know here that `t_ival` contains unique dots not in // `o_val` (because it's a superset), so if `o_unique` is // already true, it means both sides overlap partially, and // the ordering is undefined. since we know that already, we // can just return early. return None; } t_unique = true; ours.next(); // make sure we advance the lhs to the next non-overlapping item while let Some(&left) = ours.peek() { if !t_ival.is_superset(left) { break; } ours.next(); } theirs.next(); } // `o_ival` is a superset of `t_ival` Some(Ordering::Greater) => { if t_unique { // see `Less` branch for why we can return early here return None; } o_unique = true; theirs.next(); // make sure we advance the rhs to the next non-overlapping item while let Some(&right) = theirs.peek() { if !o_ival.is_superset(right) { break; } theirs.next(); } ours.next(); } None => return None, } } } } } } match (o_unique, t_unique) { (true, true) => None, (true, false) => Some(Ordering::Greater), (false, true) => Some(Ordering::Less), (false, false) => Some(Ordering::Equal), } } } #[cfg(test)] #[allow(clippy::undocumented_unsafe_blocks)] mod tests { use ahash::HashSet; use super::*; impl IntervalSet { fn count_points(&self) -> usize { self.0.iter().filter(|ival| ival.is_point()).count() } fn count_spans(&self) -> usize { self.0.iter().filter(|ival| ival.is_span()).count() } fn assert_normalized(&self) { let mut compacted = self.clone(); compacted.normalize_starting_at(0); assert_eq!(self, &compacted); } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
true
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/tests/transaction_rollback.rs
tests/transaction_rollback.rs
//! Tests for transaction rollback behavior. //! //! When a transaction is dropped without calling commit(), all changes //! should be rolled back, leaving the original store unchanged. use dson::{ CausalDotStore, Identifier, OrArray, OrMap, crdts::mvreg::MvRegValue, transaction::{ArrayTransaction, CrdtValue, MapTransaction}, }; #[test] fn map_transaction_rollback_register() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create initial state { let mut tx = MapTransaction::new(&mut store, id); tx.write_register("name", MvRegValue::String("Alice".to_string())); let _delta = tx.commit(); } // Clone store to compare later let original_store = store.clone(); // Start transaction and make changes but DON'T commit { let mut tx = MapTransaction::new(&mut store, id); tx.write_register("name", MvRegValue::String("Bob".to_string())); tx.write_register("age", MvRegValue::U64(30)); // Drop tx without calling commit() - should rollback } // Store should be unchanged assert_eq!(store, original_store); // Verify original value still present let tx = MapTransaction::new(&mut store, id); match tx.get(&"name".to_string()) { Some(CrdtValue::Register(reg)) => { use dson::crdts::snapshot::ToValue; assert_eq!( reg.value().unwrap(), &MvRegValue::String("Alice".to_string()) ); } _ => panic!("Expected register with original value"), } // Verify new key was NOT added assert!(tx.get(&"age".to_string()).is_none()); } #[test] fn map_transaction_rollback_nested_map() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create initial state with nested map { let mut tx = MapTransaction::new(&mut store, id); tx.in_map("config", |cfg_tx| { cfg_tx.write_register("version", MvRegValue::U64(1)); }); let _delta = tx.commit(); } let original_store = store.clone(); // Modify nested map but don't commit { let mut tx = MapTransaction::new(&mut store, id); tx.in_map("config", |cfg_tx| { cfg_tx.write_register("version", MvRegValue::U64(2)); cfg_tx.write_register("debug", MvRegValue::Bool(true)); }); // Drop without commit } // Store should be unchanged assert_eq!(store, original_store); // Verify original nested value use dson::crdts::snapshot::ToValue; let config = store.store.get(&"config".to_string()).unwrap(); let version = config.map.get(&"version".to_string()).unwrap(); assert_eq!(version.reg.value().unwrap(), &MvRegValue::U64(1)); assert!(config.map.get(&"debug".to_string()).is_none()); } #[test] fn map_transaction_rollback_array() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create initial state with array { let mut tx = MapTransaction::new(&mut store, id); tx.in_array("items", |arr_tx| { arr_tx.insert_register(0, MvRegValue::String("first".to_string())); }); let _delta = tx.commit(); } let original_store = store.clone(); // Modify array but don't commit { let mut tx = MapTransaction::new(&mut store, id); tx.in_array("items", |arr_tx| { arr_tx.insert_register(1, MvRegValue::String("second".to_string())); arr_tx.insert_register(2, MvRegValue::String("third".to_string())); }); // Drop without commit } // Store should be unchanged assert_eq!(store, original_store); // Verify array still has only one element use dson::crdts::snapshot::ToValue; let items = store.store.get(&"items".to_string()).unwrap(); assert_eq!(items.array.len(), 1); assert_eq!( items.array.get(0).unwrap().reg.value().unwrap(), &MvRegValue::String("first".to_string()) ); } #[test] fn array_transaction_rollback_register() { let mut store = CausalDotStore::<OrArray>::default(); let id = Identifier::new(0, 0); // Create initial state { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(0, MvRegValue::U64(1)); tx.insert_register(1, MvRegValue::U64(2)); let _delta = tx.commit(); } let original_store = store.clone(); // Modify array but don't commit { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(2, MvRegValue::U64(3)); tx.insert_register(3, MvRegValue::U64(4)); // Drop without commit } // Store should be unchanged assert_eq!(store, original_store); // Verify array still has only 2 elements use dson::crdts::snapshot::ToValue; assert_eq!(store.store.len(), 2); assert_eq!( store.store.get(0).unwrap().reg.value().unwrap(), &MvRegValue::U64(1) ); assert_eq!( store.store.get(1).unwrap().reg.value().unwrap(), &MvRegValue::U64(2) ); } #[test] fn array_transaction_rollback_nested_array() { let mut store = CausalDotStore::<OrArray>::default(); let id = Identifier::new(0, 0); // Create initial state with nested array { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_array(0, |inner_tx| { inner_tx.insert_register(0, MvRegValue::U64(1)); }); let _delta = tx.commit(); } let original_store = store.clone(); // Modify nested array but don't commit { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_array(1, |inner_tx| { inner_tx.insert_register(0, MvRegValue::U64(2)); }); // Drop without commit } // Store should be unchanged assert_eq!(store, original_store); // Verify only original nested array exists use dson::crdts::snapshot::ToValue; assert_eq!(store.store.len(), 1); let nested = &store.store.get(0).unwrap().array; assert_eq!(nested.len(), 1); assert_eq!( nested.get(0).unwrap().reg.value().unwrap(), &MvRegValue::U64(1) ); } #[test] fn array_transaction_rollback_map() { let mut store = CausalDotStore::<OrArray>::default(); let id = Identifier::new(0, 0); // Create initial state with map { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_map(0, |map_tx| { map_tx.write_register("id", MvRegValue::U64(1)); }); let _delta = tx.commit(); } let original_store = store.clone(); // Modify map but don't commit { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_map(1, |map_tx| { map_tx.write_register("id", MvRegValue::U64(2)); }); // Drop without commit } // Store should be unchanged assert_eq!(store, original_store); // Verify only original map exists use dson::crdts::snapshot::ToValue; assert_eq!(store.store.len(), 1); let map = &store.store.get(0).unwrap().map; let id_val = map.get(&"id".to_string()).unwrap(); assert_eq!(id_val.reg.value().unwrap(), &MvRegValue::U64(1)); } #[test] fn map_transaction_commit_after_rollback() { // Ensure that after a rollback, a new transaction can still commit successfully let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // First transaction: commit { let mut tx = MapTransaction::new(&mut store, id); tx.write_register("count", MvRegValue::U64(1)); let _delta = tx.commit(); } // Second transaction: rollback { let mut tx = MapTransaction::new(&mut store, id); tx.write_register("count", MvRegValue::U64(999)); // Drop without commit } // Third transaction: commit { let mut tx = MapTransaction::new(&mut store, id); tx.write_register("count", MvRegValue::U64(2)); let _delta = tx.commit(); } // Verify final value is from third transaction let tx = MapTransaction::new(&mut store, id); match tx.get(&"count".to_string()) { Some(CrdtValue::Register(reg)) => { use dson::crdts::snapshot::ToValue; assert_eq!(reg.value().unwrap(), &MvRegValue::U64(2)); } _ => panic!("Expected register"), } } #[test] fn array_transaction_commit_after_rollback() { // Ensure that after a rollback, a new transaction can still commit successfully let mut store = CausalDotStore::<OrArray>::default(); let id = Identifier::new(0, 0); // First transaction: commit { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(0, MvRegValue::U64(1)); let _delta = tx.commit(); } // Second transaction: rollback { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(1, MvRegValue::U64(999)); // Drop without commit } // Third transaction: commit { let mut tx = ArrayTransaction::new(&mut store, id); tx.insert_register(1, MvRegValue::U64(2)); let _delta = tx.commit(); } // Verify array has both committed values use dson::crdts::snapshot::ToValue; assert_eq!(store.store.len(), 2); assert_eq!( store.store.get(0).unwrap().reg.value().unwrap(), &MvRegValue::U64(1) ); assert_eq!( store.store.get(1).unwrap().reg.value().unwrap(), &MvRegValue::U64(2) ); } #[test] fn nested_transaction_panic_safety() { use dson::{ CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::MapTransaction, }; // Verify that if a nested transaction panics, the parent transaction // is not corrupted and can still be rolled back cleanly let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create initial state { let mut tx = MapTransaction::new(&mut store, id); tx.write_register("root", MvRegValue::U64(1)); let _delta = tx.commit(); } let original_store = store.clone(); // Transaction with nested panic let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(|| { let mut tx = MapTransaction::new(&mut store, id); tx.write_register("root", MvRegValue::U64(2)); tx.in_map("nested", |nested_tx| { nested_tx.write_register("field", MvRegValue::String("test".to_string())); panic!("Simulated panic in nested transaction"); }); #[allow(unreachable_code)] tx.commit() })); // Verify panic occurred assert!(result.is_err()); // Store should be unchanged - automatic rollback assert_eq!(store, original_store); // Verify original value still present use dson::crdts::snapshot::ToValue; let val = store.store.get(&"root".to_string()).unwrap(); assert_eq!(val.reg.value().unwrap(), &MvRegValue::U64(1)); // Verify nested map was not created assert!(store.store.get(&"nested".to_string()).is_none()); }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/tests/nested_transactions.rs
tests/nested_transactions.rs
//! Integration tests for nested transaction API. use dson::{ CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::MapTransaction, }; #[test] fn deeply_nested_map_array_map() { // Structure: map -> array -> map // Like: { "projects": [{ "name": "DSON", "tasks": [...] }] } let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); { let mut tx = MapTransaction::new(&mut store, id); tx.in_array("projects", |projects_tx| { projects_tx.insert_map(0, |project_tx| { project_tx.write_register("name", MvRegValue::String("DSON".to_string())); project_tx.write_register("priority", MvRegValue::U64(1)); project_tx.in_array("tasks", |tasks_tx| { tasks_tx .insert_register(0, MvRegValue::String("Implement nested TX".to_string())); tasks_tx.insert_register(1, MvRegValue::String("Write tests".to_string())); }); }); }); let _delta = tx.commit(); } // Verify structure was created use dson::crdts::snapshot::ToValue; let projects_val = store.store.get(&"projects".to_string()).unwrap(); assert_eq!(projects_val.array.len(), 1); let project = projects_val.array.get(0).unwrap(); let name = project.map.get(&"name".to_string()).unwrap(); assert_eq!( name.reg.value().unwrap(), &MvRegValue::String("DSON".to_string()) ); let tasks_val = project.map.get(&"tasks".to_string()).unwrap(); assert_eq!(tasks_val.array.len(), 2); } #[test] fn array_of_arrays() { // Test [[1, 2], [3, 4]] let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); { let mut tx = MapTransaction::new(&mut store, id); tx.in_array("matrix", |matrix_tx| { matrix_tx.insert_array(0, |row_tx| { row_tx.insert_register(0, MvRegValue::U64(1)); row_tx.insert_register(1, MvRegValue::U64(2)); }); matrix_tx.insert_array(1, |row_tx| { row_tx.insert_register(0, MvRegValue::U64(3)); row_tx.insert_register(1, MvRegValue::U64(4)); }); }); let _delta = tx.commit(); } // Verify 2x2 matrix use dson::crdts::snapshot::ToValue; let matrix = store.store.get(&"matrix".to_string()).unwrap(); assert_eq!(matrix.array.len(), 2); let row0 = matrix.array.get(0).unwrap(); assert_eq!(row0.array.len(), 2); assert_eq!( row0.array.get(0).unwrap().reg.value().unwrap(), &MvRegValue::U64(1) ); assert_eq!( row0.array.get(1).unwrap().reg.value().unwrap(), &MvRegValue::U64(2) ); let row1 = matrix.array.get(1).unwrap(); assert_eq!(row1.array.len(), 2); assert_eq!( row1.array.get(0).unwrap().reg.value().unwrap(), &MvRegValue::U64(3) ); assert_eq!( row1.array.get(1).unwrap().reg.value().unwrap(), &MvRegValue::U64(4) ); } #[test] fn concurrent_nested_modifications() { // Two replicas modify nested structures concurrently let id1 = Identifier::new(0, 0); let id2 = Identifier::new(1, 0); let mut replica1 = CausalDotStore::<OrMap<String>>::default(); let mut replica2 = CausalDotStore::<OrMap<String>>::default(); // Both create initial structure let init_delta = { let mut tx = MapTransaction::new(&mut replica1, id1); tx.in_map("config", |cfg_tx| { cfg_tx.write_register("version", MvRegValue::U64(1)); }); tx.commit() }; replica1.join_or_replace_with(init_delta.0.store.clone(), &init_delta.0.context); replica2.join_or_replace_with(init_delta.0.store, &init_delta.0.context); // Replica 1: adds array to config let delta1 = { let mut tx = MapTransaction::new(&mut replica1, id1); tx.in_map("config", |cfg_tx| { cfg_tx.in_array("features", |features_tx| { features_tx.insert_register(0, MvRegValue::String("fast".to_string())); }); }); tx.commit() }; // Replica 2: updates version concurrently let delta2 = { let mut tx = MapTransaction::new(&mut replica2, id2); tx.in_map("config", |cfg_tx| { cfg_tx.write_register("version", MvRegValue::U64(2)); }); tx.commit() }; // Exchange deltas replica1.join_or_replace_with(delta2.0.store, &delta2.0.context); replica2.join_or_replace_with(delta1.0.store, &delta1.0.context); // Both should converge assert_eq!(replica1, replica2); // Verify both changes present use dson::crdts::snapshot::ToValue; let config = replica1.store.get(&"config".to_string()).unwrap(); let version = config.map.get(&"version".to_string()).unwrap(); assert!(version.reg.value().unwrap() == &MvRegValue::U64(2)); let features = config.map.get(&"features".to_string()).unwrap(); assert_eq!(features.array.len(), 1); }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/tests/transaction_api.rs
tests/transaction_api.rs
use dson::{CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::CrdtValue}; #[test] fn simple_register_write_and_read() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Write using transaction { let mut tx = store.transact(id); tx.write_register("email", MvRegValue::String("alice@example.com".to_string())); let _delta = tx.commit(); } // Read using transaction { let tx = store.transact(id); match tx.get(&"email".to_string()) { Some(CrdtValue::Register(reg)) => { use dson::crdts::snapshot::ToValue; assert_eq!( reg.value().unwrap(), &MvRegValue::String("alice@example.com".to_string()) ); } _ => panic!("Expected register"), } } } #[test] fn two_replica_sync_with_transactions() { // Replica A let mut replica_a = CausalDotStore::<OrMap<String>>::default(); let id_a = Identifier::new(0, 0); // Replica B let mut replica_b = CausalDotStore::<OrMap<String>>::default(); let id_b = Identifier::new(1, 0); // A writes initial value let delta_a1 = { let mut tx = replica_a.transact(id_a); tx.write_register("count", MvRegValue::U64(0)); tx.commit() }; // B receives delta from A replica_b.join_or_replace_with(delta_a1.0.store, &delta_a1.0.context); // Both replicas should be in sync assert_eq!(replica_a, replica_b); // A and B concurrently increment let delta_a2 = { let mut tx = replica_a.transact(id_a); tx.write_register("count", MvRegValue::U64(1)); tx.commit() }; let delta_b1 = { let mut tx = replica_b.transact(id_b); tx.write_register("count", MvRegValue::U64(1)); tx.commit() }; // Exchange deltas replica_a.join_or_replace_with(delta_b1.0.store, &delta_b1.0.context); replica_b.join_or_replace_with(delta_a2.0.store, &delta_a2.0.context); // Both should converge assert_eq!(replica_a, replica_b); // Should have register with concurrent values let tx = replica_a.transact(id_a); match tx.get(&"count".to_string()) { Some(CrdtValue::Register(reg)) => { use dson::crdts::snapshot::ToValue; let values: Vec<_> = reg.values().into_iter().collect(); // Both concurrent writes are preserved assert_eq!(values.len(), 2); // Both values are U64(1), but from different replicas assert!(values.iter().all(|v| **v == MvRegValue::U64(1))); } _ => panic!("Expected register"), } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/benches/iai.rs
benches/iai.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. #![cfg_attr(not(target_os = "linux"), allow(dead_code, unused_imports))] use dson::{ CausalContext, CausalDotStore, Dot, Identifier, MvReg, OrArray, OrMap, api, crdts::{NoExtensionTypes, mvreg::MvRegValue}, sentinel::DummySentinel, }; use iai_callgrind::{library_benchmark, library_benchmark_group, main}; use std::hint::black_box; include!(concat!(env!("OUT_DIR"), "/random_dots.rs")); fn setup_array(n: usize) -> (Identifier, CausalDotStore<OrArray<NoExtensionTypes>>) { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); let mut omni = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); for i in 0..n { let add = api::array::insert_register( |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), i, )(&omni.store, &omni.context, omni_id); omni.consume(add, &mut DummySentinel).unwrap(); } (omni_id, omni) } #[library_benchmark] #[bench::medium(setup_array(255))] fn array_unshift((id, omni): (Identifier, CausalDotStore<OrArray<NoExtensionTypes>>)) { let omni = black_box(omni); let insert = api::array::insert_register( |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 0, )(&omni.store, &omni.context, id); black_box(insert); } #[library_benchmark] #[bench::medium(setup_array(255))] fn array_delete((id, omni): (Identifier, CausalDotStore<OrArray<NoExtensionTypes>>)) { let omni = black_box(omni); let delete = api::array::delete(128)(&omni.store, &omni.context, id); black_box(delete); } #[library_benchmark] #[bench::medium(setup_array(255))] fn array_update((id, omni): (Identifier, CausalDotStore<OrArray<NoExtensionTypes>>)) { let omni = black_box(omni); let update = api::array::apply_to_register( |old, cc, id| old.write(MvRegValue::Bool(false), cc, id), 128, )(&omni.store, &omni.context, id); black_box(update); } #[library_benchmark] #[bench::medium(setup_array(255))] fn array_insert((id, omni): (Identifier, CausalDotStore<OrArray<NoExtensionTypes>>)) { let omni = black_box(omni); let insert = api::array::insert_register( |cc, id| MvReg::default().write(MvRegValue::Bool(false), cc, id), 128, )(&omni.store, &omni.context, id); black_box(insert); } #[library_benchmark] #[bench::medium(setup_array(255))] fn array_push((id, omni): (Identifier, CausalDotStore<OrArray<NoExtensionTypes>>)) { let omni = black_box(omni); let insert = api::array::insert_register( |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), omni.store.len(), )(&omni.store, &omni.context, id); black_box(insert); } fn setup_map(n: usize) -> (Identifier, CausalDotStore<OrMap<String, NoExtensionTypes>>) { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); let mut omni = CausalDotStore::<OrMap<String, NoExtensionTypes>>::default(); for i in 0..n { let add = api::map::apply_to_register( |_, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), i.to_string(), )(&omni.store, &omni.context, omni_id); omni.consume(add, &mut DummySentinel).unwrap(); } (omni_id, omni) } fn setup_direct_crdt_map( n: usize, ) -> (Identifier, CausalDotStore<OrMap<String, NoExtensionTypes>>) { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); let mut omni = CausalDotStore::<OrMap<String, NoExtensionTypes>>::default(); for i in 0..n { let delta = omni.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), i.to_string(), &omni.context, omni_id, ); omni.consume(delta, &mut DummySentinel).unwrap(); } (omni_id, omni) } #[library_benchmark] #[bench::medium(setup_map(255))] fn map_insert((id, omni): (Identifier, CausalDotStore<OrMap<String, NoExtensionTypes>>)) { let omni = black_box(omni); let insert = api::map::apply_to_register( |_, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "duck".into(), )(&omni.store, &omni.context, id); black_box(insert); } #[library_benchmark] #[bench::medium(setup_map(255))] fn map_remove((id, omni): (Identifier, CausalDotStore<OrMap<String, NoExtensionTypes>>)) { let omni = black_box(omni); let remove = api::map::remove("128")(&omni.store, &omni.context, id); black_box(remove); } #[library_benchmark] #[bench::medium(setup_map(255))] fn map_update((id, omni): (Identifier, CausalDotStore<OrMap<String, NoExtensionTypes>>)) { let omni = black_box(omni); let update = api::map::apply_to_register( |old, cc, id| old.write(MvRegValue::Bool(true), cc, id), "128".into(), )(&omni.store, &omni.context, id); black_box(update); } #[library_benchmark] #[bench::medium(setup_direct_crdt_map(255))] fn direct_crdt_map_insert( (id, omni): (Identifier, CausalDotStore<OrMap<String, NoExtensionTypes>>), ) { let omni = black_box(omni); let insert = omni.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), "duck".to_string(), &omni.context, id, ); black_box(insert); } #[library_benchmark] #[bench::medium(setup_direct_crdt_map(255))] fn direct_crdt_map_remove( (id, omni): (Identifier, CausalDotStore<OrMap<String, NoExtensionTypes>>), ) { let omni = black_box(omni); let remove = omni.store.remove(&"128".to_string(), &omni.context, id); black_box(remove); } #[library_benchmark] #[bench::medium(setup_direct_crdt_map(255))] fn direct_crdt_map_update( (id, omni): (Identifier, CausalDotStore<OrMap<String, NoExtensionTypes>>), ) { let omni = black_box(omni); let update = omni.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), "128".to_string(), &omni.context, id, ); black_box(update); } fn setup_register() -> (Identifier, CausalDotStore<MvReg>) { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); let omni = CausalDotStore::<MvReg>::default(); let write = api::register::write(MvRegValue::Bool(false))(&omni.store, &omni.context, omni_id); (omni_id, omni.join(write, &mut DummySentinel).unwrap()) } #[library_benchmark] #[bench::bool(setup_register())] fn register_write((id, omni): (Identifier, CausalDotStore<MvReg>)) { let omni = black_box(omni); let write = api::register::write(MvRegValue::Bool(true))(&omni.store, &omni.context, id); black_box(write); } #[library_benchmark] #[bench::bool(setup_register())] fn register_clear((id, omni): (Identifier, CausalDotStore<MvReg>)) { let omni = black_box(omni); let clear = api::register::clear()(&omni.store, &omni.context, id); black_box(clear); } struct Ccs { big1: CausalContext, big2: CausalContext, small1: CausalContext, #[allow(dead_code)] small2: CausalContext, } fn setup_cc() -> Ccs { dson::enable_determinism(); let big1 = CausalContext::from_iter(BIG1.iter().copied()); let big2 = CausalContext::from_iter(BIG2.iter().copied()); let small1 = CausalContext::from_iter(SMALL1.iter().copied()); let small2 = CausalContext::from_iter(SMALL2.iter().copied()); Ccs { big1, big2, small1, small2, } } #[library_benchmark] #[bench::id(setup_cc())] fn cc_join_big_small(ccs: Ccs) { let mut ccs = black_box(ccs); ccs.big1.union(&ccs.small1); black_box(ccs); } #[library_benchmark] #[bench::id(setup_cc())] fn cc_join_big_big(ccs: Ccs) { let mut ccs = black_box(ccs); ccs.big1.union(&ccs.big2); black_box(ccs); } library_benchmark_group!( name = arrays; benchmarks = array_unshift, array_delete, array_update, array_insert ); library_benchmark_group!( name = maps; benchmarks = map_insert, map_remove, map_update ); library_benchmark_group!( name = direct_crdt_maps; benchmarks = direct_crdt_map_insert, direct_crdt_map_remove, direct_crdt_map_update ); library_benchmark_group!( name = registers; benchmarks = register_write, register_clear ); library_benchmark_group!( name = causal_contexts; benchmarks = cc_join_big_small, cc_join_big_big ); #[cfg(target_os = "linux")] main!( library_benchmark_groups = arrays, maps, direct_crdt_maps, registers, causal_contexts ); #[cfg(not(target_os = "linux"))] fn main() {}
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/benches/nested_transactions.rs
benches/nested_transactions.rs
use dson::{ CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::MapTransaction, }; use iai_callgrind::{library_benchmark, library_benchmark_group, main}; #[library_benchmark] fn nested_transaction_3_levels() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); let mut tx = MapTransaction::new(&mut store, id); tx.in_map("level1", |l1_tx| { l1_tx.in_map("level2", |l2_tx| { l2_tx.write_register("value", MvRegValue::U64(42)); }); }); let _delta = tx.commit(); } #[library_benchmark] fn direct_crdt_api_3_levels() { let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Direct CRDT operations without transaction API let delta = store.store.apply_to_map( |l1, ctx1, id1| { l1.apply_to_map( |l2, ctx2, id2| { l2.apply_to_register( |reg, ctx3, id3| reg.write(MvRegValue::U64(42), ctx3, id3), "value".to_string(), ctx2, id2, ) }, "level2".to_string(), ctx1, id1, ) }, "level1".to_string(), &store.context, id, ); store.join_or_replace_with(delta.store, &delta.context); } library_benchmark_group!( name = nested_transaction_benches; benchmarks = nested_transaction_3_levels, direct_crdt_api_3_levels ); #[cfg(target_os = "linux")] main!(library_benchmark_groups = nested_transaction_benches); #[cfg(not(target_os = "linux"))] fn main() {}
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/benches/tango.rs
benches/tango.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. // because we need this below to retain 'static on the borrows of omni #![allow(clippy::borrow_deref_ref)] use dson::{ CausalContext, CausalDotStore, Dot, Identifier, MvReg, OrArray, OrMap, api, crdts::{NoExtensionTypes, mvreg::MvRegValue}, sentinel::DummySentinel, }; use std::hint::black_box; use tango_bench::{IntoBenchmarks, benchmark_fn, tango_benchmarks, tango_main}; include!(concat!(env!("OUT_DIR"), "/random_dots.rs")); fn array_benchmarks() -> impl IntoBenchmarks { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); let mut omni = CausalDotStore::<OrArray<NoExtensionTypes>>::default(); for i in 0..255 { let add = api::array::insert_register( |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), i, )(&omni.store, &omni.context, omni_id); omni.consume(add, &mut DummySentinel).unwrap(); } let omni: &'static _ = Box::leak(Box::new(omni)); [ benchmark_fn("array::unshift", move |b| { b.iter(move || { let omni = black_box(&*omni); api::array::insert_register( |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), 0, )(&omni.store, &omni.context, omni_id) }) }), benchmark_fn("array::delete", move |b| { b.iter(move || { let omni = black_box(&*omni); api::array::delete(128)(&omni.store, &omni.context, omni_id) }) }), benchmark_fn("array::update", move |b| { b.iter(move || { let omni = black_box(&*omni); api::array::apply_to_register( |old, cc, id| old.write(MvRegValue::Bool(false), cc, id), 128, )(&omni.store, &omni.context, omni_id) }) }), benchmark_fn("array::insert", move |b| { b.iter(move || { let omni = black_box(&*omni); api::array::insert_register( |cc, id| MvReg::default().write(MvRegValue::Bool(false), cc, id), 128, )(&omni.store, &omni.context, omni_id) }) }), benchmark_fn("array::push", move |b| { b.iter(move || { let omni = black_box(&*omni); api::array::insert_register( |cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), omni.store.len(), )(&omni.store, &omni.context, omni_id) }) }), ] } fn map_benchmarks() -> impl IntoBenchmarks { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); let mut omni = CausalDotStore::<OrMap<String, NoExtensionTypes>>::default(); for i in 0..255 { let add = api::map::apply_to_register( |_, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), i.to_string(), )(&omni.store, &omni.context, omni_id); omni.consume(add, &mut DummySentinel).unwrap(); } let omni: &'static _ = Box::leak(Box::new(omni)); [ benchmark_fn("map::insert", move |b| { b.iter(move || { let omni = black_box(&*omni); api::map::apply_to_register( |_, cc, id| MvReg::default().write(MvRegValue::Bool(true), cc, id), "duck".into(), )(&omni.store, &omni.context, omni_id) }) }), benchmark_fn("map::remove", move |b| { b.iter(move || { let omni = black_box(&*omni); api::map::remove("128")(&omni.store, &omni.context, omni_id) }) }), benchmark_fn("map::update", move |b| { b.iter(move || { let omni = black_box(&*omni); api::map::apply_to_register( |old, cc, id| old.write(MvRegValue::Bool(true), cc, id), "128".into(), )(&omni.store, &omni.context, omni_id) }) }), ] } fn direct_crdt_map_benchmarks() -> impl IntoBenchmarks { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); let mut omni = CausalDotStore::<OrMap<String, NoExtensionTypes>>::default(); for i in 0..255 { let delta = omni.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), i.to_string(), &omni.context, omni_id, ); omni.consume(delta, &mut DummySentinel).unwrap(); } let omni: &'static _ = Box::leak(Box::new(omni)); [ benchmark_fn("direct-crdt::map::insert", move |b| { b.iter(move || { let omni = black_box(&*omni); omni.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), "duck".to_string(), &omni.context, omni_id, ) }) }), benchmark_fn("direct-crdt::map::remove", move |b| { b.iter(move || { let omni = black_box(&*omni); omni.store .remove(&"128".to_string(), &omni.context, omni_id) }) }), benchmark_fn("direct-crdt::map::update", move |b| { b.iter(move || { let omni = black_box(&*omni); omni.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), "128".to_string(), &omni.context, omni_id, ) }) }), ] } fn register_benchmarks() -> impl IntoBenchmarks { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); let mut omni = CausalDotStore::<MvReg>::default(); let write = api::register::write(MvRegValue::Bool(false))(&omni.store, &omni.context, omni_id); omni.consume(write, &mut DummySentinel).unwrap(); let omni: &'static _ = Box::leak(Box::new(omni)); [ benchmark_fn("register::write", move |b| { b.iter(move || { let omni = black_box(&*omni); api::register::write(MvRegValue::Bool(true))(&omni.store, &omni.context, omni_id) }) }), benchmark_fn("register::clear", move |b| { b.iter(move || { let omni = black_box(&*omni); api::register::clear()(&omni.store, &omni.context, omni_id) }) }), ] } fn transaction_map_benchmarks() -> impl IntoBenchmarks { dson::enable_determinism(); let omni_id = Identifier::new(1, 0); // Setup for single-op benchmarks (no pre-population, isolate transaction overhead) [ benchmark_fn("transaction::map::insert-empty", move |b| { b.iter(move || { let mut omni = CausalDotStore::<OrMap<String, NoExtensionTypes>>::default(); let mut tx = black_box(&mut omni).transact(omni_id); tx.write_register("duck".to_string(), MvRegValue::Bool(true)); black_box(tx.commit()) }) }), benchmark_fn("transaction::map::insert-with-setup", move |b| { b.iter(move || { let mut omni = CausalDotStore::<OrMap<String, NoExtensionTypes>>::default(); for i in 0..255 { let delta = omni.store.apply_to_register( |reg, ctx, id| reg.write(MvRegValue::Bool(true), ctx, id), i.to_string(), &omni.context, omni_id, ); omni.consume(delta, &mut DummySentinel).unwrap(); } let mut tx = black_box(&mut omni).transact(omni_id); tx.write_register("duck".to_string(), MvRegValue::Bool(true)); black_box(tx.commit()) }) }), benchmark_fn("transaction::map::remove-empty", move |b| { b.iter(move || { let mut omni = CausalDotStore::<OrMap<String, NoExtensionTypes>>::default(); let mut tx = black_box(&mut omni).transact(omni_id); tx.remove("128".to_string()); black_box(tx.commit()) }) }), benchmark_fn("transaction::map::update-empty", move |b| { b.iter(move || { let mut omni = CausalDotStore::<OrMap<String, NoExtensionTypes>>::default(); let mut tx = black_box(&mut omni).transact(omni_id); tx.write_register("128".to_string(), MvRegValue::Bool(true)); black_box(tx.commit()) }) }), ] } fn cc_benchmarks() -> impl IntoBenchmarks { dson::enable_determinism(); let big1 = CausalContext::from_iter(BIG1.iter().copied()); let big2 = CausalContext::from_iter(BIG2.iter().copied()); let small1 = CausalContext::from_iter(SMALL1.iter().copied()); let small2 = CausalContext::from_iter(SMALL2.iter().copied()); let big1: &'static _ = Box::leak(Box::new(big1)); let big2: &'static _ = Box::leak(Box::new(big2)); let small1: &'static _ = Box::leak(Box::new(small1)); let small2: &'static _ = Box::leak(Box::new(small2)); [ benchmark_fn("causal-context::join::both_same_small", move |b| { b.iter(|| { let mut left = black_box(small1.clone()); let right = black_box(&*small1); left.union(right) }) }), benchmark_fn("causal-context::join::both_small", move |b| { b.iter(|| { let mut left = black_box(small1.clone()); let right = black_box(&*small2); left.union(right) }) }), benchmark_fn("causal-context::join::left_big", move |b| { b.iter(|| { let mut left = black_box(big1.clone()); let right = black_box(&*small1); left.union(right) }) }), benchmark_fn("causal-context::join::right_big", move |b| { b.iter(|| { let mut left = black_box(small1.clone()); let right = black_box(&*big1); left.union(right) }) }), benchmark_fn("causal-context::join::both_big", move |b| { b.iter(|| { let mut left = black_box(big1.clone()); let right = black_box(&*big2); left.union(right) }) }), benchmark_fn("causal-context::join::both_same_big", move |b| { b.iter(|| { let mut left = black_box(big1.clone()); let right = black_box(&*big1); left.union(right) }) }), ] } tango_benchmarks!( array_benchmarks(), map_benchmarks(), direct_crdt_map_benchmarks(), transaction_map_benchmarks(), register_benchmarks(), cc_benchmarks() ); tango_main!();
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/examples/simple.rs
examples/simple.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! The example simulates a scenario where two replicas modify the same data and //! then synchronize their states, arriving at a consistent final result. use dson::{CausalDotStore, Identifier, OrMap, crdts::NoExtensionTypes}; use std::error::Error; fn main() -> Result<(), Box<dyn Error>> { // Create a unique identifier for replica A. let replica_a_id = Identifier::new(0, 0); // Initialize the state for replica A. The `CausalDotStore` holds the CRDT data // and its associated causal context. We use an `OrMap` (Observed-Remove Map) // with String values as our top-level CRDT. let mut replica_a_state = CausalDotStore::<OrMap<String>>::default(); // --- Replica A: Set email for "alice" --- // The following operation creates a delta that represents the change of setting // the email for the key "alice". This delta only contains the change set, not // the full state. let delta_from_a = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( |inner_map, ctx, id| { // Within the "alice" map, we apply a change to the "email" register. dson::api::map::apply_to_register( // The new value for the register. |reg, ctx, id| reg.write("alice@example.com".to_string().into(), ctx, id), "email".to_string(), )(inner_map, ctx, id) }, "alice".to_string(), )( // The operation is based on the current state of replica A. &replica_a_state.store, &replica_a_state.context, replica_a_id, ); // Apply the generated delta to replica A's own state. replica_a_state.join_or_replace_with(delta_from_a.store.clone(), &delta_from_a.context); // --- Synchronization: A -> B --- // In a real-world scenario, the `delta_from_a` would be sent over a network // to other replicas. Here, we simulate this by creating a second replica and // applying the delta to it. // Create a unique identifier for replica B. let replica_b_id = Identifier::new(1, 0); // Initialize the state for replica B. let mut replica_b_state = CausalDotStore::<OrMap<String>>::default(); // Apply the delta from replica A to replica B's state. replica_b_state.join_or_replace_with(delta_from_a.store.clone(), &delta_from_a.context); // After synchronization, the states of both replicas should be identical. assert_eq!(replica_a_state, replica_b_state); // --- Replica B: Update email for "alice" --- // Now, replica B makes a change to the same data. This will create a new // delta based on replica B's current state. let delta_from_b = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( |inner_map, ctx, id| { dson::api::map::apply_to_register( |reg, ctx, id| reg.write("bob@example.com".to_string().into(), ctx, id), "email".to_string(), )(inner_map, ctx, id) }, "alice".to_string(), )( &replica_b_state.store, &replica_b_state.context, replica_b_id, ); // Apply the new delta to replica B's own state. replica_b_state.join_or_replace_with(delta_from_b.store.clone(), &delta_from_b.context); // --- Synchronization: B -> A --- // Propagate the delta from replica B back to replica A. replica_a_state.join_or_replace_with(delta_from_b.store.clone(), &delta_from_b.context); // After this final synchronization, both replicas should once again have // identical states, reflecting the latest change made by replica B. assert_eq!(replica_a_state, replica_b_state); Ok(()) }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/examples/nested_transactions.rs
examples/nested_transactions.rs
use dson::{ CausalDotStore, Identifier, OrMap, crdts::mvreg::MvRegValue, transaction::MapTransaction, }; fn main() { println!("Nested Transaction API Demo\n"); let mut store = CausalDotStore::<OrMap<String>>::default(); let id = Identifier::new(0, 0); // Create deeply nested structure { let mut tx = MapTransaction::new(&mut store, id); // Simple register tx.write_register("app_name", MvRegValue::String("TaskManager".to_string())); // Nested map tx.in_map("settings", |settings_tx| { settings_tx.write_register("theme", MvRegValue::String("dark".to_string())); settings_tx.write_register("notifications", MvRegValue::Bool(true)); }); // Array of maps tx.in_array("users", |users_tx| { users_tx.insert_map(0, |user_tx| { user_tx.write_register("name", MvRegValue::String("Alice".to_string())); user_tx.write_register("role", MvRegValue::String("admin".to_string())); }); users_tx.insert_map(1, |user_tx| { user_tx.write_register("name", MvRegValue::String("Bob".to_string())); user_tx.write_register("role", MvRegValue::String("user".to_string())); }); }); // Deeply nested: map -> array -> map -> array tx.in_map("projects", |projects_tx| { projects_tx.in_array("active", |active_tx| { active_tx.insert_map(0, |project_tx| { project_tx .write_register("name", MvRegValue::String("Website Redesign".to_string())); project_tx.in_array("tasks", |tasks_tx| { tasks_tx .insert_register(0, MvRegValue::String("Design mockups".to_string())); tasks_tx.insert_register( 1, MvRegValue::String("Implement frontend".to_string()), ); tasks_tx.insert_register(2, MvRegValue::String("Deploy".to_string())); }); }); }); }); let _delta = tx.commit(); } println!("Created nested structure!"); println!(" - Simple register: app_name"); println!(" - Nested map: settings.theme, settings.notifications"); println!(" - Array of maps: users[0..1]"); println!(" - 4-level nesting: projects.active[0].tasks[0..2]"); // No callbacks, no manual context management. // The same simple API at every level. println!("\n✓ Nested transactions eliminate callback hell!"); }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/examples/transaction_conflicts.rs
examples/transaction_conflicts.rs
use dson::{ CausalDotStore, Identifier, OrMap, crdts::{mvreg::MvRegValue, snapshot::ToValue}, transaction::CrdtValue, }; fn main() { // Create two replicas let mut replica_a = CausalDotStore::<OrMap<String>>::default(); let mut replica_b = CausalDotStore::<OrMap<String>>::default(); let id_a = Identifier::new(0, 0); let id_b = Identifier::new(1, 0); // Replica A writes a string value let delta_a = { let mut tx = replica_a.transact(id_a); tx.write_register("data", MvRegValue::String("text value".to_string())); tx.commit() }; // Replica B concurrently writes a map at the same key let delta_b = { let mut tx = replica_b.transact(id_b); tx.in_map("data", |data_tx| { data_tx.write_register("count", MvRegValue::U64(42)); }); tx.commit() }; // Both replicas receive each other's deltas replica_a.join_or_replace_with(delta_b.0.store, &delta_b.0.context); replica_b.join_or_replace_with(delta_a.0.store, &delta_a.0.context); // Both replicas should converge to the same state assert_eq!(replica_a, replica_b); // Inspect the type conflict on replica A { let tx = replica_a.transact(id_a); match tx.get(&"data".to_string()) { Some(CrdtValue::Conflicted(conflicts)) => { let has_register = conflicts.has_register(); let has_map = conflicts.has_map(); let conflict_count = conflicts.conflict_count(); println!("Type conflict detected!"); println!(" Has register: {has_register}"); println!(" Has map: {has_map}"); println!(" Total conflicts: {conflict_count}"); // Application can access both values if let Some(reg) = conflicts.register() { if let Ok(MvRegValue::String(s)) = reg.value() { println!(" Register value: {s}"); } } if let Some(_map) = conflicts.map() { println!(" Map value is present"); } println!("\nThe transaction API makes conflicts explicit!"); println!("Your application can decide how to resolve them."); } _ => println!("Expected a type conflict"), } } }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false
helsing-ai/dson
https://github.com/helsing-ai/dson/blob/eef903f2120ed8415e5f9d1792d10c72c387f533/examples/conflicts.rs
examples/conflicts.rs
// (c) Copyright 2025 Helsing GmbH. All rights reserved. //! This example demonstrates how dson handles concurrent edits and resolves conflicts. //! We simulate two replicas of a user profile, make conflicting changes to the same fields, //! and then merge them to observe the final, converged state. use dson::{ CausalDotStore, Identifier, MvReg, OrMap, crdts::{ NoExtensionTypes, Value, mvreg::MvRegValue, snapshot::{AllValues, ToValue}, }, sentinel::DummySentinel, }; use std::error::Error; // The data model for our user profile is a map with string keys. // - "name": A Multi-Value Register (MvReg) for the user's name. Concurrent writes will be preserved as conflicts. // - "tags": An Observed-Remove Array (OrArray) of MvReg<String> for tags. // - "settings": A nested Observed-Remove Map (OrMap) for user settings. fn main() -> Result<(), Box<dyn Error>> { // SETUP: TWO REPLICAS // We create two replicas, A and B, each with a unique identifier. // Both start with an empty CausalDotStore, which will hold our OrMap-based user profile. let replica_a_id = Identifier::new(0, 0); let mut replica_a_state = CausalDotStore::<OrMap<String>>::default(); let replica_b_id = Identifier::new(1, 0); let mut replica_b_state = CausalDotStore::<OrMap<String>>::default(); // INITIAL STATE on Replica A println!("1. Replica A creates an initial user profile."); // We create a "user" map and set the "name" field to "Alice". // This operation generates a delta (`delta_a1`) representing the change. let delta_a1 = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( |map, ctx, id| { // Set name in the user map dson::api::map::apply_to_register( |reg, ctx, id| reg.write("Alice".to_string().into(), ctx, id), "name".to_string(), )(map, ctx, id) }, "user".to_string(), )( &replica_a_state.store, &replica_a_state.context, replica_a_id, ); // Apply the delta to Replica A's state. replica_a_state.join_or_replace_with(delta_a1.store.clone(), &delta_a1.context); // SYNC: REPLICA B GETS INITIAL STATE println!("2. Replica B syncs with Replica A."); // Replica B applies the delta from Replica A to get the initial state. // After this, both replicas are in sync. replica_b_state.join_or_replace_with(delta_a1.store, &delta_a1.context); assert_eq!(replica_a_state, replica_b_state); println!(" Initial state synced: {replica_a_state:?}"); // CONCURRENT EDITS println!("\n3. Replicas A and B make concurrent edits without syncing."); // On Replica A: Change the name to "Alice B." and add a "rust" tag. // These changes are based on the initial state. let delta_a2 = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( |map, ctx, id| { // 1. Change the name let map_after_name_change = dson::api::map::apply_to_register( |reg, ctx, id| reg.write("Alice B.".to_string().into(), ctx, id), "name".to_string(), )(map, ctx, id); // 2. Add a tag to the 'tags' array let map_after_tag_add = dson::api::map::apply_to_array( |array, ctx, id| { dson::api::array::insert( // Each element in the array is a register for the tag string |ctx, _id| { MvReg::default() .write("rust".to_string().into(), ctx, id) .map_store(Value::Register) }, array.len(), // Insert at the end )(array, ctx, id) }, "tags".to_string(), )( &map_after_name_change.store, &map_after_name_change.context, id, ); // Join the two operations into a single delta map_after_name_change .join(map_after_tag_add, &mut DummySentinel) .expect("DummySentinel is infallible") }, "user".to_string(), )( &replica_a_state.store, &replica_a_state.context, replica_a_id, ); // Apply the changes locally to Replica A. replica_a_state.join_or_replace_with(delta_a2.store.clone(), &delta_a2.context); println!(" Replica A: Changed name to 'Alice B.', added 'rust' tag."); // On Replica B: Change name to "Alice C." (a direct conflict with Replica A's change), // add a "crdt" tag, and add a new "dark_mode" setting. let delta_b1 = dson::api::map::apply_to_map::<_, NoExtensionTypes, _>( |map, ctx, id| { // 1. Change the name, creating a conflict with Replica A's edit. let map_after_name_change = dson::api::map::apply_to_register( |reg, ctx, id| reg.write("Alice C.".to_string().into(), ctx, id), "name".to_string(), )(map, ctx, id); // 2. Add a "crdt" tag. let map_after_tag_add = dson::api::map::apply_to_array( |array, ctx, id| { dson::api::array::insert( |ctx, id| { MvReg::default() .write("crdt".to_string().into(), ctx, id) .map_store(Value::Register) }, array.len(), )(array, ctx, id) }, "tags".to_string(), )( &map_after_name_change.store, &map_after_name_change.context, id, ); // Join the name and tag changes let delta_with_name_and_tag = map_after_name_change .join(map_after_tag_add, &mut DummySentinel) .expect("DummySentinel is infallible"); // 3. Add a "dark_mode" setting in a nested map. let delta_with_settings = dson::api::map::apply_to_map( |settings_map, ctx, id| { dson::api::map::apply_to_register( |reg, ctx, id| reg.write(true.into(), ctx, id), "dark_mode".to_string(), )(settings_map, ctx, id) }, "settings".to_string(), )( &delta_with_name_and_tag.store, &delta_with_name_and_tag.context, id, ); // Join all changes for Replica B into a final delta. delta_with_name_and_tag .join(delta_with_settings, &mut DummySentinel) .expect("DummySentinel is infallible") }, "user".to_string(), )( &replica_b_state.store, &replica_b_state.context, replica_b_id, ); // Apply the changes locally to Replica B. replica_b_state.join_or_replace_with(delta_b1.store.clone(), &delta_b1.context); println!(" Replica B: Changed name to 'Alice C.', added 'crdt' tag, enabled dark_mode."); // MERGE println!("\n4. Merging the concurrent changes."); // Replica A merges the delta from Replica B. replica_a_state.join_or_replace_with(delta_b1.store, &delta_b1.context); // Replica B merges the delta from Replica A. replica_b_state.join_or_replace_with(delta_a2.store, &delta_a2.context); // After merging, both replicas should have an identical state, demonstrating convergence. // VERIFICATION println!("\n5. Verifying the converged state."); assert_eq!(replica_a_state, replica_b_state); println!(" Replicas have converged to the same state."); println!(" Final state: {replica_a_state:?}"); // Now, let's inspect the converged data structure to see how conflicts were handled. let user_profile = replica_a_state .store .get("user") .expect("key 'user' should be present"); // --- Verify Name Conflict --- // The concurrent writes to the "name" field result in a conflict. // The MvReg preserves both values. The application can then decide how to resolve this. let name_values = user_profile .map .get("name") .unwrap() .reg .values() .into_iter() .cloned() .collect::<Vec<_>>(); assert_eq!( name_values.len(), 2, "Name should have two conflicting values" ); assert!(name_values.contains(&MvRegValue::String("Alice B.".to_string()))); assert!(name_values.contains(&MvRegValue::String("Alice C.".to_string()))); println!(" SUCCESS: Name field correctly shows conflicting values: {name_values:?}"); // --- Verify Tags Array --- // The 'tags' array should contain both "rust" and "crdt", as they were added concurrently. let tags = user_profile .map .get("tags") .expect("key 'tags' should be present"); let tag_values = tags .array .values() .iter() .map(|v| { let AllValues::Register(r) = v else { unreachable!() }; // No conflicts are expected within the tags themselves. assert_eq!(r.len(), 1); let MvRegValue::String(s) = r.get(0).unwrap() else { unreachable!() }; s.to_owned() }) .collect::<Vec<_>>(); assert_eq!(tag_values.len(), 2, "Tags array should have two elements"); assert!(tag_values.contains(&"rust".to_string())); assert!(tag_values.contains(&"crdt".to_string())); println!(" SUCCESS: Tags array correctly contains: {tag_values:?}"); // --- Verify Settings Map --- // The 'settings' map was only modified by Replica B, so it should exist with the 'dark_mode' key. let settings = user_profile .map .get("settings") .expect("key 'settings' should be present"); let dark_mode = settings .map .get("dark_mode") .expect("key 'dark_mode' should be present") .reg .value() // We expect a single value since there were no concurrent edits. .expect("should be no conflict in dark_mode setting"); assert_eq!(*dark_mode, MvRegValue::Bool(true)); println!(" SUCCESS: Settings map correctly contains: dark_mode -> {dark_mode:?}"); Ok(()) }
rust
Apache-2.0
eef903f2120ed8415e5f9d1792d10c72c387f533
2026-01-04T20:19:34.202571Z
false