repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/codegen/mod.rs | wincode/codegen/mod.rs | //! Codegen for the `wincode` crate.
use std::{
env,
fs::File,
io::{BufWriter, Error, Result},
path::{Path, PathBuf},
};
mod tuple;
/// Generate tuple implementations for `SchemaWrite` and `SchemaRead`.
fn generate_tuples(out_dir: &Path) -> Result<()> {
let out_file = File::create(out_dir.join("tuples.rs"))?;
let mut out = BufWriter::new(out_file);
tuple::generate(16, &mut out)
}
pub(crate) fn generate() -> Result<()> {
let out_dir =
PathBuf::from(env::var_os("OUT_DIR").ok_or_else(|| Error::other("OUT_DIR not set"))?);
generate_tuples(&out_dir)?;
Ok(())
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/serde.rs | wincode/src/serde.rs | #[cfg(feature = "alloc")]
use alloc::vec::Vec;
use {
crate::{
error::{ReadResult, WriteResult},
io::{Reader, Writer},
schema::{SchemaRead, SchemaWrite},
SchemaReadOwned,
},
core::mem::MaybeUninit,
};
/// Helper over [`SchemaRead`] that automatically constructs a reader
/// and initializes a destination.
///
/// # Examples
///
/// Using containers (indirect deserialization):
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use wincode::{Deserialize, containers};
/// let vec: Vec<u8> = vec![1, 2, 3];
/// let bytes = wincode::serialize(&vec).unwrap();
/// type Dst = containers::Vec<u8>;
/// let deserialized = Dst::deserialize(&bytes).unwrap();
/// assert_eq!(vec, deserialized);
/// # }
/// ```
///
/// Using direct deserialization (`T::Dst = T`):
/// ```
/// # #[cfg(feature = "alloc")] {
/// let vec: Vec<u8> = vec![1, 2, 3];
/// let bytes = wincode::serialize(&vec).unwrap();
/// let deserialized: Vec<u8> = wincode::deserialize(&bytes).unwrap();
/// assert_eq!(vec, deserialized);
/// # }
/// ```
pub trait Deserialize<'de>: SchemaRead<'de> {
/// Deserialize `bytes` into a new `Self::Dst`.
#[inline(always)]
fn deserialize(mut src: &'de [u8]) -> ReadResult<Self::Dst> {
<Self as SchemaRead<'de>>::get(&mut src)
}
/// Deserialize `bytes` into `target`.
#[inline]
fn deserialize_into(mut src: &'de [u8], dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
<Self as SchemaRead<'de>>::read(&mut src, dst)
}
}
impl<'de, T> Deserialize<'de> for T where T: SchemaRead<'de> {}
/// A variant of [`Deserialize`] for types that can be deserialized without borrowing from the reader.
pub trait DeserializeOwned: SchemaReadOwned {
/// Deserialize from the given [`Reader`] into a new `Self::Dst`.
#[inline(always)]
fn deserialize_from<'de>(
src: &mut impl Reader<'de>,
) -> ReadResult<<Self as SchemaRead<'de>>::Dst> {
<Self as SchemaRead<'de>>::get(src)
}
/// Deserialize from the given [`Reader`] into `dst`.
#[inline]
fn deserialize_from_into<'de>(
src: &mut impl Reader<'de>,
dst: &mut MaybeUninit<<Self as SchemaRead<'de>>::Dst>,
) -> ReadResult<()> {
<Self as SchemaRead<'de>>::read(src, dst)
}
}
impl<T> DeserializeOwned for T where T: SchemaReadOwned {}
/// Helper over [`SchemaWrite`] that automatically constructs a writer
/// and serializes a source.
///
/// # Examples
///
/// Using containers (indirect serialization):
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use wincode::{Serialize, containers};
/// let vec: Vec<u8> = vec![1, 2, 3];
/// type Src = containers::Vec<u8>;
/// let bytes = Src::serialize(&vec).unwrap();
/// let deserialized: Vec<u8> = wincode::deserialize(&bytes).unwrap();
/// assert_eq!(vec, deserialized);
/// # }
/// ```
///
/// Using direct serialization (`T::Src = T`):
/// ```
/// # #[cfg(feature = "alloc")] {
/// let vec: Vec<u8> = vec![1, 2, 3];
/// let bytes = wincode::serialize(&vec).unwrap();
/// let deserialized: Vec<u8> = wincode::deserialize(&bytes).unwrap();
/// assert_eq!(vec, deserialized);
/// # }
/// ```
pub trait Serialize: SchemaWrite {
/// Serialize a serializable type into a `Vec` of bytes.
#[cfg(feature = "alloc")]
fn serialize(src: &Self::Src) -> WriteResult<Vec<u8>> {
let capacity = Self::size_of(src)?;
let mut buffer = Vec::with_capacity(capacity);
let mut writer = buffer.spare_capacity_mut();
Self::serialize_into(&mut writer, src)?;
let len = writer.len();
unsafe {
#[allow(clippy::arithmetic_side_effects)]
buffer.set_len(capacity - len);
}
Ok(buffer)
}
/// Serialize a serializable type into the given byte buffer.
#[inline]
fn serialize_into(dst: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
<Self as SchemaWrite>::write(dst, src)?;
dst.finish()?;
Ok(())
}
/// Get the size in bytes of the type when serialized.
#[inline]
fn serialized_size(src: &Self::Src) -> WriteResult<u64> {
Self::size_of(src).map(|size| size as u64)
}
}
impl<T> Serialize for T where T: SchemaWrite + ?Sized {}
/// Deserialize a type from the given bytes.
///
/// This is a "simplified" version of [`Deserialize::deserialize`] that
/// requires the `T::Dst` to be `T`. In other words, a schema type
/// that deserializes to itself.
///
/// This helper exists to match the expected signature of `serde`'s
/// `Deserialize`, where types that implement `Deserialize` deserialize
/// into themselves. This will be true of a large number of schema types,
/// but wont, for example, for specialized container structures.
///
/// # Examples
///
/// ```
/// # #[cfg(feature = "alloc")] {
/// let vec: Vec<u8> = vec![1, 2, 3];
/// let bytes = wincode::serialize(&vec).unwrap();
/// let deserialized: Vec<u8> = wincode::deserialize(&bytes).unwrap();
/// assert_eq!(vec, deserialized);
/// # }
/// ```
#[inline(always)]
pub fn deserialize<'de, T>(src: &'de [u8]) -> ReadResult<T>
where
T: SchemaRead<'de, Dst = T>,
{
T::deserialize(src)
}
/// Deserialize a type from the given bytes, with the ability
/// to form mutable references for types that are [`ZeroCopy`](crate::ZeroCopy).
/// This can allow mutating the serialized data in place.
///
/// # Examples
///
/// ## Zero-copy types
/// ```
/// # #[cfg(all(feature = "alloc", feature = "derive"))] {
/// # use wincode::{SchemaWrite, SchemaRead};
/// # #[derive(Debug, PartialEq, Eq)]
/// #[derive(SchemaWrite, SchemaRead)]
/// #[repr(C)]
/// struct Data {
/// bytes: [u8; 7],
/// the_answer: u8,
/// }
///
/// let data = Data { bytes: [0; 7], the_answer: 0 };
///
/// let mut serialized = wincode::serialize(&data).unwrap();
/// let data_mut: &mut Data = wincode::deserialize_mut(&mut serialized).unwrap();
/// data_mut.bytes = *b"wincode";
/// data_mut.the_answer = 42;
///
/// let deserialized: Data = wincode::deserialize(&serialized).unwrap();
/// assert_eq!(deserialized, Data { bytes: *b"wincode", the_answer: 42 });
/// # }
/// ```
///
/// ## Mutable zero-copy members
/// ```
/// # #[cfg(all(feature = "alloc", feature = "derive"))] {
/// # use wincode::{SchemaWrite, SchemaRead};
/// # #[derive(Debug, PartialEq, Eq)]
/// #[derive(SchemaWrite, SchemaRead)]
/// struct Data {
/// bytes: [u8; 7],
/// the_answer: u8,
/// }
/// # #[derive(Debug, PartialEq, Eq)]
/// #[derive(SchemaRead)]
/// struct DataMut<'a> {
/// bytes: &'a mut [u8; 7],
/// the_answer: u8,
/// }
///
/// let data = Data { bytes: [0; 7], the_answer: 42 };
///
/// let mut serialized = wincode::serialize(&data).unwrap();
/// let data_mut: DataMut<'_> = wincode::deserialize_mut(&mut serialized).unwrap();
/// *data_mut.bytes = *b"wincode";
///
/// let deserialized: Data = wincode::deserialize(&serialized).unwrap();
/// assert_eq!(deserialized, Data { bytes: *b"wincode", the_answer: 42 });
/// # }
/// ```
#[inline(always)]
pub fn deserialize_mut<'de, T>(mut src: &'de mut [u8]) -> ReadResult<T>
where
T: SchemaRead<'de, Dst = T>,
{
<T as SchemaRead<'de>>::get(&mut src)
}
/// Deserialize a type from the given bytes into the given target.
///
/// Like [`deserialize`], but allows the caller to provide their own reader.
///
/// Because not all readers will support zero-copy deserialization, this function
/// requires [`SchemaReadOwned`] instead of [`SchemaRead`]. If you are deserializing
/// from raw bytes, always prefer [`deserialize`] for maximum flexibility.
#[inline]
pub fn deserialize_from<'de, T>(src: &mut impl Reader<'de>) -> ReadResult<T>
where
T: SchemaReadOwned<Dst = T>,
{
T::deserialize_from(src)
}
/// Serialize a type into a `Vec` of bytes.
///
/// This is a "simplified" version of [`Serialize::serialize`] that
/// requires the `T::Src` to be `T`. In other words, a schema type
/// that serializes to itself.
///
/// This helper exists to match the expected signature of `serde`'s
/// `Serialize`, where types that implement `Serialize` serialize
/// themselves. This will be true of a large number of schema types,
/// but wont, for example, for specialized container structures.
///
/// # Examples
///
/// ```
/// let vec: Vec<u8> = vec![1, 2, 3];
/// let bytes = wincode::serialize(&vec).unwrap();
/// ```
#[inline(always)]
#[cfg(feature = "alloc")]
pub fn serialize<T>(src: &T) -> WriteResult<Vec<u8>>
where
T: SchemaWrite<Src = T> + ?Sized,
{
T::serialize(src)
}
/// Serialize a type into the given writer.
///
/// Like [`serialize`], but allows the caller to provide their own writer.
#[inline]
pub fn serialize_into<T>(dst: &mut impl Writer, src: &T) -> WriteResult<()>
where
T: SchemaWrite<Src = T> + ?Sized,
{
T::serialize_into(dst, src)
}
/// Get the size in bytes of the type when serialized.
#[inline(always)]
pub fn serialized_size<T>(src: &T) -> WriteResult<u64>
where
T: SchemaWrite<Src = T> + ?Sized,
{
T::serialized_size(src)
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/lib.rs | wincode/src/lib.rs | //! wincode is a fast, bincode‑compatible serializer/deserializer focused on in‑place
//! initialization and direct memory writes.
//!
//! In short, `wincode` operates over traits that facilitate direct writes of memory
//! into final destinations (including heap-allocated buffers) without intermediate
//! staging buffers.
//!
//! # Quickstart
//!
//! `wincode` traits are implemented for many built-in types (like `Vec`, integers, etc.).
//!
//! You'll most likely want to start by using `wincode` on your own struct types, which can be
//! done easily with the derive macros.
//!
//! ```
//! # #[cfg(all(feature = "alloc", feature = "derive"))] {
//! # use serde::{Serialize, Deserialize};
//! # use wincode_derive::{SchemaWrite, SchemaRead};
//! # #[derive(Serialize, Deserialize, PartialEq, Eq, Debug)]
//! #
//! #[derive(SchemaWrite, SchemaRead)]
//! struct MyStruct {
//! data: Vec<u8>,
//! win: bool,
//! }
//!
//! let val = MyStruct { data: vec![1,2,3], win: true };
//! assert_eq!(wincode::serialize(&val).unwrap(), bincode::serialize(&val).unwrap());
//! # }
//! ```
//!
//! # Motivation
//!
//! Typical Rust API design employs a *construct-then-move* style of programming.
//! Common APIs like `Vec::push`, iterator adaptors, `Box::new` (and its `Rc`/`Arc`
//! variants), and even returning a fully-initialized struct from a function all
//! follow this pattern. While this style feels intuitive and ergonomic, it
//! inherently entails copying unless the compiler can perform elision -- which,
//! today, it generally cannot. To see why this is a consequence of the design,
//! consider the following code:
//! ```
//! # struct MyStruct;
//! # impl MyStruct {
//! # fn new() -> Self {
//! # MyStruct
//! # }
//! # }
//! Box::new(MyStruct::new());
//! ```
//! `MyStruct` must be constructed *before* it can be moved into `Box`'s allocation.
//! This is a classic code ordering problem: to avoid the copy, `Box::new` needs
//! to execute code before `MyStruct::new()` runs. `Vec::push`, iterator collection,
//! and similar APIs have this same problem.
//! (See these [design meeting notes](https://hackmd.io/XXuVXH46T8StJB_y0urnYg) or
//! or the
//! [`placement-by-return` RFC](https://github.com/PoignardAzur/rust-rfcs/blob/placement-by-return/text/0000-placement-by-return.md)
//! for a more in-depth discussion on this topic.) The result of this is that even
//! performance conscious developers routinely introduce avoidable copying without
//! realizing it. `serde` inherits these issues since it neither attempts to
//! initialize in‑place nor exposes APIs to do so.
//!
//! These patterns are not inherent limitations of Rust, but are consequences of
//! conventions and APIs that do not consider in-place initialization as part of
//! their design. The tools for in-place construction *do* exist (see
//! [`MaybeUninit`](core::mem::MaybeUninit) and raw pointer APIs), but they are
//! rarely surfaced in libraries and can be cumbersome to use (see [`addr_of_mut!`](core::ptr::addr_of_mut)),
//! so programmers are often not even aware of them or avoid them.
//!
//! `wincode` makes in-place initialization a first class design goal, and fundamentally
//! operates on [traits](#traits) that facilitate direct writes of memory.
//!
//! # Adapting foreign types
//!
//! `wincode` can also be used to implement serialization/deserialization
//! on foreign types, where serialization/deserialization schemes on those types are unoptimized (and
//! out of your control as a foreign type). For example, consider the following struct,
//! defined outside of your crate:
//! ```
//! use serde::{Serialize, Deserialize};
//!
//! # #[derive(PartialEq, Eq, Debug)]
//! #[repr(transparent)]
//! #[derive(Clone, Copy, Serialize, Deserialize)]
//! struct Address([u8; 32]);
//!
//! # #[derive(PartialEq, Eq, Debug)]
//! #[repr(transparent)]
//! #[derive(Clone, Copy, Serialize, Deserialize)]
//! struct Hash([u8; 32]);
//!
//! #[derive(Serialize, Deserialize)]
//! pub struct A {
//! pub addresses: Vec<Address>,
//! pub hash: Hash,
//! }
//! ```
//!
//! `serde`'s default, naive, implementation will perform per-element visitation of all bytes
//! in `Vec<Address>` and `Hash`. Because these fields are "plain old data", ideally we would
//! avoid per-element visitation entirely and read / write these fields in a single pass.
//! The situation worsens if this struct needs to be written into a heap allocated data structure,
//! like a `Vec<A>` or `Box<[A]>`. As discussed in [motivation](#motivation), all
//! those bytes will be initialized on the stack before being copied into the heap allocation.
//!
//! `wincode` can solve this with the following:
//! ```
//! # #[cfg(all(feature = "alloc", feature = "derive"))] {
//! # use wincode::{Serialize as _, Deserialize as _, containers::{self, Pod}};
//! # use wincode_derive::{SchemaWrite, SchemaRead};
//! mod foreign_crate {
//! // Defined in some foreign crate...
//! use serde::{Serialize, Deserialize};
//!
//! # #[derive(PartialEq, Eq, Debug)]
//! #[repr(transparent)]
//! #[derive(Clone, Copy, Serialize, Deserialize)]
//! pub struct Address(pub [u8; 32]);
//!
//! # #[derive(PartialEq, Eq, Debug)]
//! #[repr(transparent)]
//! #[derive(Clone, Copy, Serialize, Deserialize)]
//! pub struct Hash(pub [u8; 32]);
//!
//! # #[derive(PartialEq, Eq, Debug)]
//! #[derive(Serialize, Deserialize)]
//! pub struct A {
//! pub addresses: Vec<Address>,
//! pub hash: Hash,
//! }
//! }
//!
//! #[derive(SchemaWrite, SchemaRead)]
//! #[wincode(from = "foreign_crate::A")]
//! pub struct MyA {
//! addresses: Vec<Pod<foreign_crate::Address>>,
//! hash: Pod<foreign_crate::Hash>,
//! }
//!
//! let val = foreign_crate::A {
//! addresses: vec![foreign_crate::Address([0; 32]), foreign_crate::Address([1; 32])],
//! hash: foreign_crate::Hash([0; 32]),
//! };
//! let bincode_serialize = bincode::serialize(&val).unwrap();
//! let wincode_serialize = MyA::serialize(&val).unwrap();
//! assert_eq!(bincode_serialize, wincode_serialize);
//!
//! let bincode_deserialize: foreign_crate::A = bincode::deserialize(&bincode_serialize).unwrap();
//! let wincode_deserialize = MyA::deserialize(&bincode_serialize).unwrap();
//! assert_eq!(val, bincode_deserialize);
//! assert_eq!(val, wincode_deserialize);
//! # }
//! ```
//!
//! Now, when deserializing `A`:
//! - All initialization is done in-place, including heap-allocated memory
//! (true of all supported contiguous heap-allocated structures in `wincode`).
//! - Byte fields are read and written in a single pass.
//!
//! # Compatibility
//!
//! - Produces the same bytes as `bincode` for the covered shapes when using bincode's
//! default configuration, provided your [`SchemaWrite`] and [`SchemaRead`] schemas and
//! [`containers`] match the layout implied by your `serde` types.
//! - Length encodings are pluggable via [`SeqLen`](len::SeqLen).
//!
//! # Zero-copy deserialization
//!
//! `wincode`'s zero-copy deserialization is built on the following primitives:
//! - [`u8`]
//! - [`i8`]
//!
//! In addition to the following on little endian targets:
//! - [`u16`], [`i16`], [`u32`], [`i32`], [`u64`], [`i64`], [`u128`], [`i128`], [`f32`], [`f64`]
//!
//! Types with alignment greater than 1 can force the compiler to insert padding into your structs.
//! Zero-copy requires padding-free layouts; if the layout has implicit padding, `wincode` will not
//! qualify the type as zero-copy.
//!
//! ---
//!
//! Within `wincode`, any type that is composed entirely of the above primitives is
//! eligible for zero-copy deserialization. This includes arrays, slices, and structs.
//!
//! Structs deriving [`SchemaRead`] are eligible for zero-copy deserialization
//! as long as they are composed entirely of the above zero-copy types, are annotated with
//! `#[repr(transparent)]` or `#[repr(C)]`, and have no implicit padding. Use appropriate
//! field ordering or add explicit padding fields if needed to eliminate implicit padding.
//!
//! Note that tuples are **not** eligible for zero-copy deserialization, as Rust does not
//! currently guarantee tuple layout.
//!
//! ## Field reordering
//! If your struct has implicit padding, you may be able to reorder fields to avoid it.
//!
//! ```
//! #[repr(C)]
//! struct HasPadding {
//! a: u8,
//! b: u32,
//! c: u16,
//! d: u8,
//! }
//!
//! #[repr(C)]
//! struct ZeroPadding {
//! b: u32,
//! c: u16,
//! a: u8,
//! d: u8,
//! }
//! ```
//!
//! ## Explicit padding
//! You may need to add an explicit padding field if reordering fields cannot yield
//! a padding-free layout.
//!
//! ```
//! #[repr(C)]
//! struct HasPadding {
//! a: u32,
//! b: u16,
//! _pad: [u8; 2],
//! }
//! ```
//!
//! ## Examples
//!
//! ### `&[u8]`
//! ```
//! # #[cfg(all(feature = "alloc", feature = "derive"))] {
//! use wincode::{SchemaWrite, SchemaRead};
//!
//! # #[derive(Debug, PartialEq, Eq)]
//! #[derive(SchemaWrite, SchemaRead)]
//! struct ByteRef<'a> {
//! bytes: &'a [u8],
//! }
//!
//! let bytes: Vec<u8> = vec![1, 2, 3, 4, 5];
//! let byte_ref = ByteRef { bytes: &bytes };
//! let serialized = wincode::serialize(&byte_ref).unwrap();
//! let deserialized: ByteRef<'_> = wincode::deserialize(&serialized).unwrap();
//! assert_eq!(byte_ref, deserialized);
//! # }
//! ```
//!
//! ### struct newtype
//! ```
//! # #[cfg(all(feature = "alloc", feature = "derive"))] {
//! # use rand::random;
//! # use std::array;
//! use wincode::{SchemaWrite, SchemaRead};
//!
//! # #[derive(Debug, PartialEq, Eq)]
//! #[derive(SchemaWrite, SchemaRead)]
//! #[repr(transparent)]
//! struct Signature([u8; 64]);
//!
//! # #[derive(Debug, PartialEq, Eq)]
//! #[derive(SchemaWrite, SchemaRead)]
//! struct Data<'a> {
//! signature: &'a Signature,
//! data: &'a [u8],
//! }
//!
//! let signature = Signature(array::from_fn(|_| random()));
//! let data = Data {
//! signature: &signature,
//! data: &[1, 2, 3, 4, 5],
//! };
//! let serialized = wincode::serialize(&data).unwrap();
//! let deserialized: Data<'_> = wincode::deserialize(&serialized).unwrap();
//! assert_eq!(data, deserialized);
//! # }
//! ```
//!
//! ### `&[u8; N]`
//! ```
//! # #[cfg(all(feature = "alloc", feature = "derive"))] {
//! use wincode::{SchemaWrite, SchemaRead};
//!
//! # #[derive(Debug, PartialEq, Eq)]
//! #[derive(SchemaWrite, SchemaRead)]
//! struct HeaderRef<'a> {
//! magic: &'a [u8; 7],
//! }
//!
//! let header = HeaderRef { magic: b"W1NC0D3" };
//! let serialized = wincode::serialize(&header).unwrap();
//! let deserialized: HeaderRef<'_> = wincode::deserialize(&serialized).unwrap();
//! assert_eq!(header, deserialized);
//! # }
//! ```
//!
//! ## In-place mutation
//!
//! wincode supports in-place mutation of zero-copy types.
//! See [`deserialize_mut`] or [`ZeroCopy::from_bytes_mut`] for more details.
//!
//! ## `ZeroCopy` methods
//!
//! The [`ZeroCopy`] trait provides some convenience methods for
//! working with zero-copy types.
//!
//! See [`ZeroCopy::from_bytes`] and [`ZeroCopy::from_bytes_mut`] for more details.
//!
//! # Derive attributes
//!
//! ## Top level
//! |Attribute|Type|Default|Description
//! |---|---|---|---|
//! |`from`|`Type`|`None`|Indicates that type is a mapping from another type (example in previous section)|
//! |`no_suppress_unused`|`bool`|`false`|Disable unused field lints suppression. Only usable on structs with `from`.|
//! |`struct_extensions`|`bool`|`false`|Generates placement initialization helpers on `SchemaRead` struct implementations|
//! |`tag_encoding`|`Type`|`None`|Specifies the encoding/decoding schema to use for the variant discriminant. Only usable on enums.|
//!
//! ### `no_suppress_unused`
//!
//! When creating a mapping type with `#[wincode(from = "AnotherType")]`, fields are typically
//! comprised of [`containers`] (of course not strictly always true). As a result, these structs
//! purely exist for the compiler to generate optimized implementations, and are never actually
//! constructed. As a result, unused field lints will be triggered, which can be annoying.
//! By default, when `from` is used, the derive macro will generate dummy function that references all
//! the struct fields, which suppresses those lints. This function will ultimately be compiled out of your
//! build, but you can disable this by setting `no_suppress_unused` to `true`. You can also avoid
//! these lint errors with visibility modifiers (e.g., `pub`).
//!
//! Note that this only works on structs, as it is not possible to construct an arbitrary enum variant.
//!
//! ### `tag_encoding`
//!
//! Allows specifying the encoding/decoding schema to use for the variant discriminant. Only usable on enums.
//!
//! <div class="warning">
//! There is no bincode analog to this attribute.
//! Specifying this attribute will make your enum incompatible with bincode's default enum encoding.
//! If you need strict bincode compatibility, you should implement a custom <code>Deserialize</code> and
//! <code>Serialize</code> impl for your enum on the serde / bincode side.
//! </div>
//!
//! Example:
//! ```
//! # #[cfg(all(feature = "derive", feature = "alloc"))] {
//! use wincode::{SchemaWrite, SchemaRead};
//!
//! # #[derive(Debug, PartialEq, Eq)]
//! #[derive(SchemaWrite, SchemaRead)]
//! #[wincode(tag_encoding = "u8")]
//! enum Enum {
//! A,
//! B,
//! C,
//! }
//!
//! assert_eq!(&wincode::serialize(&Enum::B).unwrap(), &1u8.to_le_bytes());
//! # }
//! ```
//!
//! ### `struct_extensions`
//!
//! You may have some exotic serialization logic that requires you to implement `SchemaRead` manually
//! for a type. In these scenarios, you'll likely want to leverage some additional helper methods
//! to reduce the amount of boilerplate that is typically required when dealing with uninitialized
//! fields.
//!
//! `#[wincode(struct_extensions)]` generates a corresponding uninit builder struct for the type.
//! The name of the builder struct is the name of the type with `UninitBuilder` appended.
//! E.g., `Header` -> `HeaderUninitBuilder`.
//!
//! The builder has automatic initialization tracking that does bookkeeping of which fields have been initialized.
//! Calling `write_<field_name>` or `read_<field_name>`, for example, will mark the field as
//! initialized so that it's properly dropped if the builder is dropped on error or panic.
//!
//! The builder struct has the following methods:
//! - `from_maybe_uninit_mut`
//! - Creates a new builder from a mutable `MaybeUninit` reference to the type.
//! - `into_assume_init_mut`
//! - Assumes the builder is fully initialized, drops it, and returns a mutable reference to the inner type.
//! - `finish`
//! - Forgets the builder, disabling the drop logic.
//! - `is_init`
//! - Checks if the builder is fully initialized by checking if all field initialization bits are set.
//!
//! For each field, the builder struct provides the following methods:
//! - `uninit_<field_name>_mut`
//! - Gets a mutable `MaybeUninit` projection to the `<field_name>` slot.
//! - `read_<field_name>`
//! - Reads into a `MaybeUninit`'s `<field_name>` slot from the given [`Reader`](io::Reader).
//! - `write_<field_name>`
//! - Writes a `MaybeUninit`'s `<field_name>` slot with the given value.
//! - `init_<field_name>_with`
//! - Initializes the `<field_name>` slot with a given initializer function.
//! - `assume_init_<field_name>`
//! - Marks the `<field_name>` slot as initialized.
//!
//! #### Safety
//!
//! Correct code will call `finish` or `into_assume_init_mut` once all fields have been initialized.
//! Failing to do so will result in the initialized fields being dropped when the builder is dropped, which
//! is undefined behavior if the `MaybeUninit` is later assumed to be initialized (e.g., on successful deserialization).
//!
//! #### Example
//!
//! ```
//! # #[cfg(all(feature = "alloc", feature = "derive"))] {
//! # use wincode::{SchemaRead, SchemaWrite, io::Reader, error::ReadResult};
//! # use serde::{Serialize, Deserialize};
//! # use core::mem::MaybeUninit;
//! # #[derive(Debug, PartialEq, Eq)]
//! #[derive(SchemaRead, SchemaWrite)]
//! #[wincode(struct_extensions)]
//! struct Header {
//! num_required_signatures: u8,
//! num_signed_accounts: u8,
//! num_unsigned_accounts: u8,
//! }
//!
//! # #[derive(Debug, PartialEq, Eq)]
//! #[derive(SchemaRead, SchemaWrite)]
//! #[wincode(struct_extensions)]
//! struct Payload {
//! header: Header,
//! data: Vec<u8>,
//! }
//!
//! # #[derive(Debug, PartialEq, Eq)]
//! #[derive(SchemaWrite)]
//! struct Message {
//! payload: Payload,
//! }
//!
//! // Assume for some reason we have to manually implement `SchemaRead` for `Message`.
//! impl<'de> SchemaRead<'de> for Message {
//! type Dst = Message;
//!
//! fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
//! // Normally we have to do a big ugly cast like this
//! // to get a mutable `MaybeUninit<Payload>`.
//! let payload = unsafe {
//! &mut *(&raw mut (*dst.as_mut_ptr()).payload).cast::<MaybeUninit<Payload>>()
//! };
//! // Note that the order matters here. Values are dropped in reverse
//! // declaration order, and we need to ensure `header_builder` is dropped
//! // before `payload_builder` in the event of an error or panic.
//! let mut payload_builder = PayloadUninitBuilder::from_maybe_uninit_mut(payload);
//! unsafe {
//! // payload.header will be marked as initialized if the function succeeds.
//! payload_builder.init_header_with(|header| {
//! // Read directly into the projected MaybeUninit<Header> slot.
//! let mut header_builder = HeaderUninitBuilder::from_maybe_uninit_mut(header);
//! header_builder.read_num_required_signatures(reader)?;
//! header_builder.read_num_signed_accounts(reader)?;
//! header_builder.read_num_unsigned_accounts(reader)?;
//! header_builder.finish();
//! Ok(())
//! })?;
//! }
//! // Alternatively, we could have done `payload_builder.read_header(reader)?;`
//! // rather than reading all the fields individually.
//! payload_builder.read_data(reader)?;
//! // Message is fully initialized, so we forget the builders
//! // to avoid dropping the initialized fields.
//! payload_builder.finish();
//! Ok(())
//! }
//! }
//!
//! let msg = Message {
//! payload: Payload {
//! header: Header {
//! num_required_signatures: 1,
//! num_signed_accounts: 2,
//! num_unsigned_accounts: 3
//! },
//! data: vec![4, 5, 6, 7, 8, 9]
//! }
//! };
//! let serialized = wincode::serialize(&msg).unwrap();
//! let deserialized = wincode::deserialize(&serialized).unwrap();
//! assert_eq!(msg, deserialized);
//! # }
//! ```
//!
//! ## Field level
//! |Attribute|Type|Default|Description
//! |---|---|---|---|
//! |`with`|`Type`|`None`|Overrides the default `SchemaRead` or `SchemaWrite` implementation for the field.|
//!
//! ## Variant level (enum variants)
//! |Attribute|Type|Default|Description
//! |---|---|---|---|
//! |`tag`|`Expr`|`None`|Specifies the discriminant expression for the variant. Only usable on enums.|
//!
//! ### `tag`
//!
//! Specifies the discriminant expression for the variant. Only usable on enums.
//!
//! <div class="warning">
//! There is no bincode analog to this attribute.
//! Specifying this attribute will make your enum incompatible with bincode's default enum encoding.
//! If you need strict bincode compatibility, you should implement a custom <code>Deserialize</code> and
//! <code>Serialize</code> impl for your enum on the serde / bincode side.
//! </div>
//!
//! Example:
//! ```
//! # #[cfg(all(feature = "derive", feature = "alloc"))] {
//! use wincode::{SchemaWrite, SchemaRead};
//!
//! #[derive(SchemaWrite, SchemaRead)]
//! enum Enum {
//! #[wincode(tag = 5)]
//! A,
//! #[wincode(tag = 8)]
//! B,
//! #[wincode(tag = 13)]
//! C,
//! }
//!
//! assert_eq!(&wincode::serialize(&Enum::A).unwrap(), &5u32.to_le_bytes());
//! # }
//! ```
#![cfg_attr(docsrs, feature(doc_cfg))]
#![cfg_attr(not(feature = "std"), no_std)]
#[cfg(feature = "alloc")]
extern crate alloc;
pub mod error;
pub use error::{Error, ReadError, ReadResult, Result, WriteError, WriteResult};
pub mod io;
pub mod len;
mod schema;
pub use schema::*;
mod serde;
pub use serde::*;
#[cfg(test)]
mod proptest_config;
#[cfg(feature = "derive")]
pub use wincode_derive::*;
// Include tuple impls.
include!(concat!(env!("OUT_DIR"), "/tuples.rs"));
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/error.rs | wincode/src/error.rs | //! Error types and helpers.
use {crate::io, core::str::Utf8Error, thiserror::Error};
#[derive(Error, Debug)]
pub enum Error {
#[error(transparent)]
WriteError(#[from] WriteError),
#[error(transparent)]
ReadError(#[from] ReadError),
}
#[derive(Error, Debug)]
pub enum WriteError {
#[error(transparent)]
Io(#[from] io::WriteError),
#[error(transparent)]
InvalidUtf8Encoding(#[from] Utf8Error),
#[error("Sequence length would overflow length encoding scheme: {0}")]
LengthEncodingOverflow(&'static str),
#[error("Custom error: {0}")]
Custom(&'static str),
}
#[derive(Error, Debug)]
pub enum ReadError {
#[error(transparent)]
Io(#[from] io::ReadError),
#[error(transparent)]
InvalidUtf8Encoding(#[from] Utf8Error),
#[error("Could not cast integer type to pointer sized type")]
PointerSizedReadError,
#[error(
"Encoded sequence length exceeded preallocation limit of {limit} bytes (needed {needed} \
bytes)"
)]
PreallocationSizeLimit { needed: usize, limit: usize },
#[error("Invalid tag encoding: {0}")]
InvalidTagEncoding(usize),
#[error("Invalid bool encoding: {0}")]
InvalidBoolEncoding(u8),
#[error("Sequence length would overflow length encoding scheme: {0}")]
LengthEncodingOverflow(&'static str),
#[error("Invalid char lead: {0}")]
InvalidCharLead(u8),
#[error("Custom error: {0}")]
Custom(&'static str),
#[error("Zero-copy read would be unaligned")]
UnalignedPointerRead,
}
pub type Result<T> = core::result::Result<T, Error>;
pub type WriteResult<T> = core::result::Result<T, WriteError>;
pub type ReadResult<T> = core::result::Result<T, ReadError>;
#[cold]
pub const fn unaligned_pointer_read() -> ReadError {
ReadError::UnalignedPointerRead
}
#[cold]
pub const fn preallocation_size_limit(needed: usize, limit: usize) -> ReadError {
ReadError::PreallocationSizeLimit { needed, limit }
}
#[cold]
pub const fn read_length_encoding_overflow(max_length: &'static str) -> ReadError {
ReadError::LengthEncodingOverflow(max_length)
}
#[cold]
pub const fn write_length_encoding_overflow(max_length: &'static str) -> WriteError {
WriteError::LengthEncodingOverflow(max_length)
}
#[cold]
pub const fn pointer_sized_decode_error() -> ReadError {
ReadError::PointerSizedReadError
}
#[cold]
pub const fn invalid_bool_encoding(byte: u8) -> ReadError {
ReadError::InvalidBoolEncoding(byte)
}
#[cold]
pub const fn invalid_tag_encoding(tag: usize) -> ReadError {
ReadError::InvalidTagEncoding(tag)
}
#[cold]
pub const fn invalid_utf8_encoding(error: Utf8Error) -> ReadError {
ReadError::InvalidUtf8Encoding(error)
}
#[cold]
pub const fn invalid_char_lead(val: u8) -> ReadError {
ReadError::InvalidCharLead(val)
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/proptest_config.rs | wincode/src/proptest_config.rs | use proptest::test_runner::Config;
/// Configuration for proptest tests.
///
/// Disable FS I/O in Miri.
pub(crate) fn proptest_cfg() -> Config {
#[cfg(miri)]
{
Config {
failure_persistence: None,
// Avoid excruciatingly long test times in Miri.
cases: 5,
..Config::default()
}
}
#[cfg(not(miri))]
{
Config::default()
}
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/len.rs | wincode/src/len.rs | //! Support for heterogenous sequence length encoding.
use crate::{
error::{pointer_sized_decode_error, preallocation_size_limit, ReadResult, WriteResult},
io::{Reader, Writer},
schema::{SchemaRead, SchemaWrite},
};
/// Behavior to support heterogenous sequence length encoding.
///
/// It is possible for sequences to have different length encoding schemes.
/// This trait abstracts over that possibility, allowing users to specify
/// the length encoding scheme for a sequence.
pub trait SeqLen {
/// Read the length of a sequence from the reader, where
/// `T` is the type of the sequence elements. This can be used to
/// enforce size constraints for preallocations.
///
/// May return an error if some length condition is not met
/// (e.g., size constraints, overflow, etc.).
fn read<'de, T>(reader: &mut impl Reader<'de>) -> ReadResult<usize>;
/// Write the length of a sequence to the writer.
fn write(writer: &mut impl Writer, len: usize) -> WriteResult<()>;
/// Calculate the number of bytes needed to write the given length.
///
/// Useful for variable length encoding schemes.
fn write_bytes_needed(len: usize) -> WriteResult<usize>;
}
const DEFAULT_BINCODE_LEN_MAX_SIZE: usize = 4 << 20; // 4 MiB
/// [`SeqLen`] implementation for bincode's default fixint encoding.
///
/// The `MAX_SIZE` constant is a limit on the maximum preallocation size
/// (in bytes) for heap allocated structures. This is a safety precaution
/// against malicious input causing OOM. The default is 4 MiB. Users are
/// free to override this limit by passing a different constant or by
/// implementing their own `SeqLen` implementation.
pub struct BincodeLen<const MAX_SIZE: usize = DEFAULT_BINCODE_LEN_MAX_SIZE>;
impl<const MAX_SIZE: usize> SeqLen for BincodeLen<MAX_SIZE> {
#[inline(always)]
fn read<'de, T>(reader: &mut impl Reader<'de>) -> ReadResult<usize> {
// Bincode's default fixint encoding writes lengths as `u64`.
let len = u64::get(reader)
.and_then(|len| usize::try_from(len).map_err(|_| pointer_sized_decode_error()))?;
let needed = len
.checked_mul(size_of::<T>())
.ok_or_else(|| preallocation_size_limit(usize::MAX, MAX_SIZE))?;
if needed > MAX_SIZE {
return Err(preallocation_size_limit(needed, MAX_SIZE));
}
Ok(len)
}
#[inline(always)]
fn write(writer: &mut impl Writer, len: usize) -> WriteResult<()> {
u64::write(writer, &(len as u64))
}
#[inline(always)]
fn write_bytes_needed(_len: usize) -> WriteResult<usize> {
Ok(size_of::<u64>())
}
}
#[cfg(feature = "solana-short-vec")]
pub mod short_vec {
use {
super::*,
crate::error::{read_length_encoding_overflow, write_length_encoding_overflow},
core::{
mem::{transmute, MaybeUninit},
ptr,
},
solana_short_vec::{decode_shortu16_len, ShortU16},
};
impl<'de> SchemaRead<'de> for ShortU16 {
type Dst = Self;
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let Ok((len, read)) = decode_shortu16_len(reader.fill_buf(3)?) else {
return Err(read_length_encoding_overflow("u16::MAX"));
};
// SAFETY: `read` is the number of bytes visited by `decode_shortu16_len` to decode the length,
// which implies the reader had at least `read` bytes available.
unsafe { reader.consume_unchecked(read) };
// SAFETY: `dst` is a valid pointer to a `MaybeUninit<ShortU16>`.
let slot = unsafe { &mut *(&raw mut (*dst.as_mut_ptr()).0).cast::<MaybeUninit<u16>>() };
// SAFETY: `len` is always a valid u16. `decode_shortu16_len` casts it to a usize before returning,
// so no risk of overflow.
slot.write(len as u16);
Ok(())
}
}
impl SchemaWrite for ShortU16 {
type Src = Self;
fn size_of(src: &Self::Src) -> WriteResult<usize> {
Ok(short_u16_bytes_needed(src.0))
}
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
let val = src.0;
let needed = short_u16_bytes_needed(val);
let mut buf = [MaybeUninit::<u8>::uninit(); 3];
// SAFETY: short_u16 uses a maximum of 3 bytes, so the buffer is always large enough.
unsafe { encode_short_u16(buf.as_mut_ptr().cast::<u8>(), needed, val) };
// SAFETY: encode_short_u16 writes exactly `needed` bytes.
let buf =
unsafe { transmute::<&[MaybeUninit<u8>], &[u8]>(buf.get_unchecked(..needed)) };
writer.write(buf)?;
Ok(())
}
}
pub type ShortU16Len = ShortU16;
/// Branchless computation of the number of bytes needed to encode a short u16.
///
/// See [`solana_short_vec::ShortU16`] for more details.
#[inline(always)]
#[allow(clippy::arithmetic_side_effects)]
fn short_u16_bytes_needed(len: u16) -> usize {
1 + (len >= 0x80) as usize + (len >= 0x4000) as usize
}
#[inline(always)]
fn try_short_u16_bytes_needed<T: TryInto<u16>>(len: T) -> WriteResult<usize> {
match len.try_into() {
Ok(len) => Ok(short_u16_bytes_needed(len)),
Err(_) => Err(write_length_encoding_overflow("u16::MAX")),
}
}
/// Encode a short u16 into the given buffer.
///
/// See [`solana_short_vec::ShortU16`] for more details.
///
/// # Safety
///
/// - `dst` must be a valid for writes.
/// - `dst` must be valid for `needed` bytes.
#[inline(always)]
unsafe fn encode_short_u16(dst: *mut u8, needed: usize, len: u16) {
// From `solana_short_vec`:
//
// u16 serialized with 1 to 3 bytes. If the value is above
// 0x7f, the top bit is set and the remaining value is stored in the next
// bytes. Each byte follows the same pattern until the 3rd byte. The 3rd
// byte may only have the 2 least-significant bits set, otherwise the encoded
// value will overflow the u16.
match needed {
1 => ptr::write(dst, len as u8),
2 => {
ptr::write(dst, ((len & 0x7f) as u8) | 0x80);
ptr::write(dst.add(1), (len >> 7) as u8);
}
3 => {
ptr::write(dst, ((len & 0x7f) as u8) | 0x80);
ptr::write(dst.add(1), (((len >> 7) & 0x7f) as u8) | 0x80);
ptr::write(dst.add(2), (len >> 14) as u8);
}
_ => unreachable!(),
}
}
impl SeqLen for ShortU16Len {
#[inline(always)]
fn read<'de, T>(reader: &mut impl Reader<'de>) -> ReadResult<usize> {
let Ok((len, read)) = decode_shortu16_len(reader.fill_buf(3)?) else {
return Err(read_length_encoding_overflow("u16::MAX"));
};
unsafe { reader.consume_unchecked(read) };
Ok(len)
}
#[inline(always)]
fn write(writer: &mut impl Writer, len: usize) -> WriteResult<()> {
if len > u16::MAX as usize {
return Err(write_length_encoding_overflow("u16::MAX"));
}
<ShortU16 as SchemaWrite>::write(writer, &ShortU16(len as u16))
}
#[inline(always)]
fn write_bytes_needed(len: usize) -> WriteResult<usize> {
try_short_u16_bytes_needed(len)
}
}
#[cfg(all(test, feature = "alloc", feature = "derive"))]
mod tests {
use {
super::*,
crate::{
containers::{self, Pod},
proptest_config::proptest_cfg,
},
alloc::vec::Vec,
proptest::prelude::*,
solana_short_vec::ShortU16,
wincode_derive::{SchemaRead, SchemaWrite},
};
fn our_short_u16_encode(len: u16) -> Vec<u8> {
let needed = short_u16_bytes_needed(len);
let mut buf = Vec::with_capacity(needed);
unsafe {
encode_short_u16(buf.as_mut_ptr(), needed, len);
buf.set_len(needed);
}
buf
}
#[derive(
serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq, SchemaWrite, SchemaRead,
)]
#[wincode(internal)]
struct ShortVecStruct {
#[serde(with = "solana_short_vec")]
#[wincode(with = "containers::Vec<Pod<u8>, ShortU16Len>")]
bytes: Vec<u8>,
#[serde(with = "solana_short_vec")]
#[wincode(with = "containers::Vec<Pod<[u8; 32]>, ShortU16Len>")]
ar: Vec<[u8; 32]>,
}
#[derive(SchemaWrite, SchemaRead, serde::Serialize, serde::Deserialize)]
#[wincode(internal)]
struct ShortVecAsSchema {
short_u16: ShortU16,
}
fn strat_short_vec_struct() -> impl Strategy<Value = ShortVecStruct> {
(
proptest::collection::vec(any::<u8>(), 0..=100),
proptest::collection::vec(any::<[u8; 32]>(), 0..=16),
)
.prop_map(|(bytes, ar)| ShortVecStruct { bytes, ar })
}
proptest! {
#![proptest_config(proptest_cfg())]
#[test]
fn encode_u16_equivalence(len in 0..=u16::MAX) {
let our = our_short_u16_encode(len);
let bincode = bincode::serialize(&ShortU16(len)).unwrap();
prop_assert_eq!(our, bincode);
}
#[test]
fn test_short_vec_struct(short_vec_struct in strat_short_vec_struct()) {
let bincode_serialized = bincode::serialize(&short_vec_struct).unwrap();
let schema_serialized = crate::serialize(&short_vec_struct).unwrap();
prop_assert_eq!(&bincode_serialized, &schema_serialized);
let bincode_deserialized: ShortVecStruct = bincode::deserialize(&bincode_serialized).unwrap();
let schema_deserialized: ShortVecStruct = crate::deserialize(&schema_serialized).unwrap();
prop_assert_eq!(&short_vec_struct, &bincode_deserialized);
prop_assert_eq!(short_vec_struct, schema_deserialized);
}
#[test]
fn test_short_vec_as_schema(sv in any::<u16>()) {
let val = ShortVecAsSchema { short_u16: ShortU16(sv) };
let bincode_serialized = bincode::serialize(&val).unwrap();
let wincode_serialized = crate::serialize(&val).unwrap();
prop_assert_eq!(&bincode_serialized, &wincode_serialized);
let bincode_deserialized: ShortVecAsSchema = bincode::deserialize(&bincode_serialized).unwrap();
let wincode_deserialized: ShortVecAsSchema = crate::deserialize(&wincode_serialized).unwrap();
prop_assert_eq!(val.short_u16.0, bincode_deserialized.short_u16.0);
prop_assert_eq!(val.short_u16.0, wincode_deserialized.short_u16.0);
}
}
}
}
#[cfg(feature = "solana-short-vec")]
pub use short_vec::*;
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/io/slice.rs | wincode/src/io/slice.rs | use {super::*, core::marker::PhantomData};
/// Helpers for trusted slice operations.
pub(super) mod trusted_slice {
use super::*;
#[inline]
pub(super) fn fill_buf(bytes: &[u8], n_bytes: usize) -> &[u8] {
unsafe { bytes.get_unchecked(..n_bytes.min(bytes.len())) }
}
#[inline]
pub(super) fn fill_exact(bytes: &[u8], n_bytes: usize) -> &[u8] {
unsafe { bytes.get_unchecked(..n_bytes) }
}
#[inline]
pub(super) unsafe fn consume_unchecked(bytes: &mut &[u8], amt: usize) {
*bytes = unsafe { bytes.get_unchecked(amt..) };
}
#[inline]
pub(super) fn consume(bytes: &mut &[u8], amt: usize) {
unsafe { consume_unchecked(bytes, amt) };
}
/// Get a slice of `len` bytes for writing, advancing the writer by `len` bytes.
#[inline]
pub(super) fn get_slice_mut<'a>(
buffer: &mut &'a mut [MaybeUninit<u8>],
len: usize,
) -> &'a mut [MaybeUninit<u8>] {
let (dst, rest) = unsafe { mem::take(buffer).split_at_mut_unchecked(len) };
*buffer = rest;
dst
}
}
/// In-memory [`Reader`] that does not perform bounds checking, with zero-copy support.
///
/// Generally this should not be constructed directly, but rather by calling [`Reader::as_trusted_for`]
/// on a trusted [`Reader`]. This will ensure that the safety invariants are upheld.
///
/// # Safety
///
/// - The inner buffer must have sufficient capacity for all reads. It is UB if this is not upheld.
pub struct TrustedSliceReaderZeroCopy<'a> {
cursor: &'a [u8],
}
impl<'a> TrustedSliceReaderZeroCopy<'a> {
pub(super) const fn new(bytes: &'a [u8]) -> Self {
Self { cursor: bytes }
}
}
impl<'a> Reader<'a> for TrustedSliceReaderZeroCopy<'a> {
type Trusted<'b>
= Self
where
Self: 'b;
#[inline]
fn fill_buf(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
Ok(trusted_slice::fill_buf(self.cursor, n_bytes))
}
#[inline]
fn fill_exact(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
Ok(trusted_slice::fill_exact(self.cursor, n_bytes))
}
#[inline]
fn borrow_exact(&mut self, len: usize) -> ReadResult<&'a [u8]> {
let (src, rest) = unsafe { self.cursor.split_at_unchecked(len) };
self.cursor = rest;
Ok(src)
}
#[inline]
unsafe fn consume_unchecked(&mut self, amt: usize) {
trusted_slice::consume_unchecked(&mut self.cursor, amt);
}
#[inline]
fn consume(&mut self, amt: usize) -> ReadResult<()> {
trusted_slice::consume(&mut self.cursor, amt);
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> ReadResult<Self::Trusted<'_>> {
Ok(TrustedSliceReaderZeroCopy::new(self.borrow_exact(n_bytes)?))
}
}
/// In-memory [`Reader`] for mutable slices that does not perform bounds checking,
/// with zero-copy support.
///
/// # Safety
///
/// - The inner buffer must have sufficient capacity for all reads. It is UB if this is not upheld.
pub struct TrustedSliceReaderZeroCopyMut<'a> {
cursor: &'a mut [u8],
}
impl<'a> TrustedSliceReaderZeroCopyMut<'a> {
pub(super) const fn new(bytes: &'a mut [u8]) -> Self {
Self { cursor: bytes }
}
}
impl<'a> Reader<'a> for TrustedSliceReaderZeroCopyMut<'a> {
type Trusted<'b>
= Self
where
Self: 'b;
#[inline]
fn fill_buf(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
Ok(trusted_slice::fill_buf(self.cursor, n_bytes))
}
#[inline]
fn fill_exact(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
Ok(trusted_slice::fill_exact(self.cursor, n_bytes))
}
#[inline]
fn borrow_exact_mut(&mut self, len: usize) -> ReadResult<&'a mut [u8]> {
let (src, rest) = unsafe { mem::take(&mut self.cursor).split_at_mut_unchecked(len) };
self.cursor = rest;
Ok(src)
}
#[inline]
unsafe fn consume_unchecked(&mut self, amt: usize) {
self.cursor = unsafe { mem::take(&mut self.cursor).get_unchecked_mut(amt..) };
}
#[inline]
fn consume(&mut self, amt: usize) -> ReadResult<()> {
unsafe { Self::consume_unchecked(self, amt) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> ReadResult<Self::Trusted<'_>> {
Ok(TrustedSliceReaderZeroCopyMut::new(
self.borrow_exact_mut(n_bytes)?,
))
}
}
/// In-memory [`Reader`] that does not perform bounds checking.
///
/// Generally this should not be constructed directly, but rather by calling [`Reader::as_trusted_for`]
/// on a trusted [`Reader`]. This will ensure that the safety invariants are upheld.
///
/// Use [`TrustedSliceReaderZeroCopy`] for zero-copy support.
///
/// # Safety
///
/// - The inner buffer must have sufficient capacity for all reads. It is UB if this is not upheld.
pub struct TrustedSliceReader<'a, 'b> {
cursor: &'b [u8],
_marker: PhantomData<&'a ()>,
}
impl<'a, 'b> TrustedSliceReader<'a, 'b> {
pub(super) const fn new(bytes: &'b [u8]) -> Self {
Self {
cursor: bytes,
_marker: PhantomData,
}
}
}
impl<'a, 'b> Reader<'a> for TrustedSliceReader<'a, 'b> {
type Trusted<'c>
= Self
where
Self: 'c;
#[inline]
fn fill_buf(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
Ok(trusted_slice::fill_buf(self.cursor, n_bytes))
}
#[inline]
fn fill_exact(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
Ok(trusted_slice::fill_exact(self.cursor, n_bytes))
}
#[inline]
unsafe fn consume_unchecked(&mut self, amt: usize) {
trusted_slice::consume_unchecked(&mut self.cursor, amt);
}
#[inline]
fn consume(&mut self, amt: usize) -> ReadResult<()> {
trusted_slice::consume(&mut self.cursor, amt);
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> ReadResult<Self::Trusted<'_>> {
let (src, rest) = unsafe { self.cursor.split_at_unchecked(n_bytes) };
self.cursor = rest;
Ok(TrustedSliceReader::new(src))
}
}
impl<'a> Reader<'a> for &'a [u8] {
type Trusted<'b>
= TrustedSliceReaderZeroCopy<'a>
where
Self: 'b;
#[inline]
fn fill_buf(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
// SAFETY: we clamp the end bound to the length of the slice.
Ok(unsafe { self.get_unchecked(..n_bytes.min(self.len())) })
}
#[inline]
fn fill_exact(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
let Some(src) = self.get(..n_bytes) else {
return Err(read_size_limit(n_bytes));
};
Ok(src)
}
#[inline]
fn borrow_exact(&mut self, len: usize) -> ReadResult<&'a [u8]> {
let Some((src, rest)) = self.split_at_checked(len) else {
return Err(read_size_limit(len));
};
*self = rest;
Ok(src)
}
#[inline]
unsafe fn consume_unchecked(&mut self, amt: usize) {
*self = unsafe { self.get_unchecked(amt..) };
}
#[inline]
fn consume(&mut self, amt: usize) -> ReadResult<()> {
if self.len() < amt {
return Err(read_size_limit(amt));
}
// SAFETY: we just checked that self.len() >= amt.
unsafe { self.consume_unchecked(amt) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n: usize) -> ReadResult<Self::Trusted<'_>> {
Ok(TrustedSliceReaderZeroCopy::new(self.borrow_exact(n)?))
}
}
impl<'a> Reader<'a> for &'a mut [u8] {
type Trusted<'b>
= TrustedSliceReaderZeroCopyMut<'a>
where
Self: 'b;
#[inline]
fn fill_buf(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
// SAFETY: we clamp the end bound to the length of the slice.
Ok(unsafe { self.get_unchecked(..n_bytes.min(self.len())) })
}
#[inline]
fn fill_exact(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
let Some(src) = self.get(..n_bytes) else {
return Err(read_size_limit(n_bytes));
};
Ok(src)
}
#[inline]
fn borrow_exact_mut(&mut self, len: usize) -> ReadResult<&'a mut [u8]> {
let Some((src, rest)) = mem::take(self).split_at_mut_checked(len) else {
return Err(read_size_limit(len));
};
*self = rest;
Ok(src)
}
#[inline]
unsafe fn consume_unchecked(&mut self, amt: usize) {
*self = unsafe { mem::take(self).get_unchecked_mut(amt..) };
}
#[inline]
fn consume(&mut self, amt: usize) -> ReadResult<()> {
if self.len() < amt {
return Err(read_size_limit(amt));
}
// SAFETY: we just checked that self.len() >= amt.
unsafe { self.consume_unchecked(amt) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n: usize) -> ReadResult<Self::Trusted<'_>> {
Ok(TrustedSliceReaderZeroCopyMut::new(
self.borrow_exact_mut(n)?,
))
}
}
/// In-memory [`Writer`] that does not perform bounds checking.
///
/// Generally this should not be constructed directly, but rather by calling [`Writer::as_trusted_for`]
/// on a trusted [`Writer`]. This will ensure that the safety invariants are upheld.
///
/// # Safety
///
/// - The inner buffer must have sufficient capacity for all writes. It is UB if this is not upheld.
pub struct TrustedSliceWriter<'a> {
buffer: &'a mut [MaybeUninit<u8>],
}
#[cfg(test)]
impl core::ops::Deref for TrustedSliceWriter<'_> {
type Target = [MaybeUninit<u8>];
fn deref(&self) -> &Self::Target {
self.buffer
}
}
impl<'a> TrustedSliceWriter<'a> {
#[inline(always)]
pub(super) const fn new(buffer: &'a mut [MaybeUninit<u8>]) -> Self {
Self { buffer }
}
}
impl<'a> Writer for TrustedSliceWriter<'a> {
type Trusted<'b>
= TrustedSliceWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
let dst = trusted_slice::get_slice_mut(&mut self.buffer, src.len());
unsafe { ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), src.len()) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
Ok(TrustedSliceWriter::new(trusted_slice::get_slice_mut(
&mut self.buffer,
n_bytes,
)))
}
}
/// Get a slice of `len` bytes for writing, advancing the writer by `len` bytes, or
/// returning an error if the input slice does not have at least `len` bytes remaining.
#[inline]
fn advance_slice_mut_checked<'a, T>(
input: &mut &'a mut [T],
len: usize,
) -> WriteResult<&'a mut [T]> {
let Some((dst, rest)) = mem::take(input).split_at_mut_checked(len) else {
return Err(write_size_limit(len));
};
*input = rest;
Ok(dst)
}
/// Get a slice of `len` bytes for writing returning an error if the input slice does not have
/// at least `len` bytes remaining.
#[inline]
fn get_slice_mut_checked<T>(input: &mut [T], len: usize) -> WriteResult<&'_ mut [T]> {
let Some((dst, _)) = input.split_at_mut_checked(len) else {
return Err(write_size_limit(len));
};
Ok(dst)
}
impl Writer for &mut [MaybeUninit<u8>] {
type Trusted<'b>
= TrustedSliceWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
let dst = advance_slice_mut_checked(self, src.len())?;
unsafe { ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), src.len()) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
Ok(TrustedSliceWriter::new(advance_slice_mut_checked(
self, n_bytes,
)?))
}
}
impl Writer for [MaybeUninit<u8>] {
type Trusted<'b>
= TrustedSliceWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
let dst = get_slice_mut_checked(self, src.len())?;
unsafe { ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), src.len()) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
Ok(TrustedSliceWriter::new(get_slice_mut_checked(
self, n_bytes,
)?))
}
}
impl Writer for &mut [u8] {
type Trusted<'b>
= TrustedSliceWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
let dst = advance_slice_mut_checked(self, src.len())?;
// Avoid the bounds check of `copy_from_slice` by using `copy_nonoverlapping`,
// since we already bounds check in `get_slice_mut`.
unsafe { ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len()) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
let buf = advance_slice_mut_checked(self, n_bytes)?;
// SAFETY: we just created a slice of `n_bytes` initialized bytes, so casting to
// `&mut [MaybeUninit<u8>]` is safe.
let buf = unsafe { transmute::<&mut [u8], &mut [MaybeUninit<u8>]>(buf) };
Ok(TrustedSliceWriter::new(buf))
}
}
impl Writer for [u8] {
type Trusted<'b>
= TrustedSliceWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
let dst = get_slice_mut_checked(self, src.len())?;
// Avoid the bounds check of `copy_from_slice` by using `copy_nonoverlapping`,
// since we already bounds check in `get_slice_mut`.
unsafe { ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr(), src.len()) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
let buf = get_slice_mut_checked(self, n_bytes)?;
// SAFETY: we just created a slice of `n_bytes` initialized bytes, so casting to
// `&mut [MaybeUninit<u8>]` is safe.
let buf = unsafe { transmute::<&mut [u8], &mut [MaybeUninit<u8>]>(buf) };
Ok(TrustedSliceWriter::new(buf))
}
}
#[cfg(all(test, feature = "alloc"))]
mod tests {
#![allow(clippy::arithmetic_side_effects)]
use {super::*, crate::proptest_config::proptest_cfg, alloc::vec::Vec, proptest::prelude::*};
#[test]
fn test_reader_peek() {
let mut reader = b"hello" as &[u8];
assert!(matches!(reader.peek(), Ok(&b'h')));
}
#[test]
fn test_reader_peek_empty() {
let mut reader = b"" as &[u8];
assert!(matches!(reader.peek(), Err(ReadError::ReadSizeLimit(1))));
}
/// Execute the given block with supported readers.
macro_rules! with_readers {
($bytes:expr, |$reader:ident| $body:block) => {{
{
let mut $reader = $bytes.as_slice();
$body
}
{
let mut $reader = TrustedSliceReaderZeroCopy::new($bytes);
$body
}
{
let mut $reader = Cursor::new($bytes);
$body
}
}};
}
/// Execute the given block with readers that will bounds check (and thus not panic).
macro_rules! with_untrusted_readers {
($bytes:expr, |$reader:ident| $body:block) => {{
{
let mut $reader = $bytes.as_slice();
$body
}
}};
}
/// Execute the given block with slice reference writer and trusted slice writer for the given buffer.
macro_rules! with_writers {
($buffer:expr, |$writer:ident| $body:block) => {{
{
let $writer = &mut $buffer.spare_capacity_mut();
$body
$buffer.clear();
}
{
let mut $writer = TrustedSliceWriter::new($buffer.spare_capacity_mut());
$body
$buffer.clear();
}
{
let _capacity = $buffer.capacity();
$buffer.resize(_capacity, 0);
let $writer = &mut $buffer.as_mut_slice();
$body
$buffer.clear();
}
}};
}
// Execute the given block with slice writer of the given preallocated buffer.
macro_rules! with_known_len_writers {
($buffer:expr, |$writer:ident| $body_write:block, $body_check:expr) => {{
let capacity = $buffer.capacity();
{
$buffer.resize(capacity, 0);
$buffer.fill(0);
let $writer = $buffer.as_mut_slice();
$body_write
$body_check;
$buffer.clear();
}
{
$buffer.fill(0);
$buffer.clear();
let $writer = $buffer.spare_capacity_mut();
$body_write
unsafe { $buffer.set_len(capacity) }
$body_check;
}
}};
}
proptest! {
#![proptest_config(proptest_cfg())]
#[test]
fn test_reader_copy_into_slice(bytes in any::<Vec<u8>>()) {
with_readers!(&bytes, |reader| {
let mut vec = Vec::with_capacity(bytes.len());
let half = bytes.len() / 2;
let dst = vec.spare_capacity_mut();
reader.copy_into_slice(&mut dst[..half]).unwrap();
unsafe { reader.as_trusted_for(bytes.len() - half) }
.unwrap()
.copy_into_slice(&mut dst[half..])
.unwrap();
unsafe { vec.set_len(bytes.len()) };
prop_assert_eq!(&vec, &bytes);
});
}
#[test]
fn test_reader_fill_exact(bytes in any::<Vec<u8>>()) {
with_readers!(&bytes, |reader| {
let read = reader.fill_exact(bytes.len()).unwrap();
prop_assert_eq!(&read, &bytes);
});
}
#[test]
fn slice_reader_fill_exact_input_too_large(bytes in any::<Vec<u8>>()) {
with_untrusted_readers!(&bytes, |reader| {
prop_assert!(matches!(reader.fill_exact(bytes.len() + 1), Err(ReadError::ReadSizeLimit(x)) if x == bytes.len() + 1));
});
}
#[test]
fn test_reader_copy_into_slice_input_too_large(bytes in any::<Vec<u8>>()) {
with_untrusted_readers!(&bytes, |reader| {
let mut vec = Vec::with_capacity(bytes.len() + 1);
let dst = vec.spare_capacity_mut();
prop_assert!(matches!(reader.copy_into_slice(dst), Err(ReadError::ReadSizeLimit(x)) if x == bytes.len() + 1));
});
}
#[test]
fn test_reader_consume(bytes in any::<Vec<u8>>()) {
with_readers!(&bytes, |reader| {
reader.consume(bytes.len()).unwrap();
prop_assert!(matches!(reader.fill_buf(1), Ok(&[])));
});
}
#[test]
fn test_reader_consume_input_too_large(bytes in any::<Vec<u8>>()) {
let mut reader = bytes.as_slice();
prop_assert!(matches!(reader.consume(bytes.len() + 1), Err(ReadError::ReadSizeLimit(x)) if x == bytes.len() + 1));
}
#[test]
fn test_reader_copy_into_t(ints in proptest::collection::vec(any::<u64>(), 0..=100)) {
let bytes = ints.iter().flat_map(|int| int.to_le_bytes()).collect::<Vec<u8>>();
with_readers!(&bytes, |reader| {
for int in &ints {
let mut val = MaybeUninit::<u64>::uninit();
unsafe { reader.copy_into_t(&mut val).unwrap() };
unsafe { prop_assert_eq!(val.assume_init(), *int) };
}
});
}
#[test]
fn test_reader_copy_into_slice_t(ints in proptest::collection::vec(any::<u64>(), 0..=100)) {
let bytes = ints.iter().flat_map(|int| int.to_le_bytes()).collect::<Vec<u8>>();
with_readers!(&bytes, |reader| {
let mut vals: Vec<u64> = Vec::with_capacity(ints.len());
let dst = vals.spare_capacity_mut();
unsafe { reader.copy_into_slice_t(dst).unwrap() };
unsafe { vals.set_len(ints.len()) };
prop_assert_eq!(&vals, &ints);
});
}
#[test]
fn test_writer_write(bytes in any::<Vec<u8>>()) {
let capacity = bytes.len();
let mut buffer = Vec::with_capacity(capacity);
with_writers!(&mut buffer, |writer| {
writer.write(&bytes).unwrap();
let written = capacity - writer.len();
unsafe { buffer.set_len(written) };
prop_assert_eq!(&buffer, &bytes);
});
with_known_len_writers!(&mut buffer, |writer| {
writer.write(&bytes).unwrap();
}, prop_assert_eq!(&buffer, &bytes));
}
#[test]
fn test_writer_write_input_too_large(bytes in proptest::collection::vec(any::<u8>(), 1..=100)) {
let mut buffer = Vec::with_capacity(bytes.len() - 1);
let writer = &mut buffer.spare_capacity_mut();
prop_assert!(matches!(writer.write(&bytes), Err(WriteError::WriteSizeLimit(x)) if x == bytes.len()));
let writer = buffer.spare_capacity_mut();
prop_assert!(matches!(writer.write(&bytes), Err(WriteError::WriteSizeLimit(x)) if x == bytes.len()));
}
#[test]
fn test_writer_write_t(int in any::<u64>()) {
let capacity = 8;
let mut buffer = Vec::with_capacity(capacity);
with_writers!(&mut buffer, |writer| {
unsafe { writer.write_t(&int).unwrap() };
let written = capacity - writer.len();
unsafe { buffer.set_len(written) };
prop_assert_eq!(&buffer, &int.to_le_bytes());
});
with_known_len_writers!(&mut buffer, |writer| {
unsafe { writer.write_t(&int).unwrap() };
}, prop_assert_eq!(&buffer, &int.to_le_bytes()));
}
#[test]
fn test_writer_write_slice_t(ints in proptest::collection::vec(any::<u64>(), 0..=100)) {
let bytes = ints.iter().flat_map(|int| int.to_le_bytes()).collect::<Vec<u8>>();
let capacity = bytes.len();
let mut buffer = Vec::with_capacity(capacity);
with_writers!(&mut buffer, |writer| {
unsafe { writer.write_slice_t(&ints).unwrap() };
let written = capacity - writer.len();
unsafe { buffer.set_len(written) };
prop_assert_eq!(&buffer, &bytes);
});
with_known_len_writers!(&mut buffer, |writer| {
unsafe { writer.write_slice_t(&ints).unwrap() };
}, prop_assert_eq!(&buffer, &bytes));
}
}
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/io/cursor.rs | wincode/src/io/cursor.rs | use super::*;
#[cfg(feature = "alloc")]
use {alloc::vec::Vec, core::slice::from_raw_parts_mut};
/// `Cursor` wraps an in-memory buffer, providing [`Reader`] and [`Writer`] functionality
/// for types implementing <code>[AsRef]<\[u8]></code>.
///
/// This can be especially useful for wrapping [`Reader`]s and [`Writer`]s that are consumed by
/// reading or writing like `&[u8]` or `&mut [MaybeUninit<u8>]`, making them reusable.
///
/// # Examples
///
/// Using `Cursor` to write to a `MaybeUninit<[u8; N]>`.
///
/// ```
/// # use rand::random;
/// # use core::mem::MaybeUninit;
/// use wincode::io::{Cursor, Reader, Writer};
///
/// fn rand_bytes() -> [u8; 8] {
/// random::<u64>().to_le_bytes()
/// }
///
/// let mut data = MaybeUninit::<[u8; 8]>::uninit();
///
/// let mut cursor = Cursor::new(&mut data);
/// let bytes = rand_bytes();
/// cursor.write(&bytes).unwrap();
/// assert_eq!(unsafe { data.assume_init() }, bytes);
///
/// // We can write over the same buffer multiple times with a new Cursor.
/// let mut cursor = Cursor::new(&mut data);
/// let bytes = rand_bytes();
/// cursor.write(&bytes).unwrap();
/// assert_eq!(unsafe { data.assume_init() }, bytes);
/// ```
///
/// Using `Cursor` to write to a `Vec`'s spare capacity.
///
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use rand::random;
/// use wincode::io::{Cursor, Reader, Writer};
///
/// # fn rand_bytes() -> [u8; 8] {
/// # random::<u64>().to_le_bytes()
/// # }
/// let mut data = Vec::with_capacity(8);
///
/// let mut cursor = Cursor::new(&mut data);
/// let bytes = rand_bytes();
/// cursor.write(&bytes).unwrap();
/// assert_eq!(data, bytes);
///
/// // We can write over the same buffer multiple times with a new Cursor.
/// let mut cursor = Cursor::new(&mut data);
/// let bytes = rand_bytes();
/// cursor.write(&bytes).unwrap();
/// assert_eq!(data, bytes);
/// # }
/// ```
pub struct Cursor<T> {
inner: T,
pos: usize,
}
impl<T> Cursor<T> {
pub const fn new(inner: T) -> Self {
Self { inner, pos: 0 }
}
/// Creates a new cursor at the given position.
pub const fn new_at(inner: T, pos: usize) -> Self {
Self { inner, pos }
}
/// Sets the position of the cursor.
pub const fn set_position(&mut self, pos: usize) {
self.pos = pos;
}
/// Consumes the cursor and returns the inner value.
pub fn into_inner(self) -> T {
self.inner
}
/// Returns the current position of the cursor.
pub const fn position(&self) -> usize {
self.pos
}
}
impl<T> Cursor<T>
where
T: AsRef<[u8]>,
{
/// Returns a slice of the remaining bytes in the cursor.
#[inline]
fn cur_slice(&self) -> &[u8] {
let slice = self.inner.as_ref();
// SAFETY: `pos` is less than or equal to the length of the slice.
unsafe { slice.get_unchecked(self.pos.min(slice.len())..) }
}
/// Returns the number of bytes remaining in the cursor.
#[inline]
fn cur_len(&self) -> usize {
self.inner.as_ref().len().saturating_sub(self.pos)
}
/// Split the cursor at `mid` and consume the left slice.
#[inline]
fn consume_slice_checked(&mut self, mid: usize) -> ReadResult<&[u8]> {
let slice = self.inner.as_ref();
// SAFETY: `pos` is less than or equal to the length of the slice.
let cur = unsafe { slice.get_unchecked(self.pos.min(slice.len())..) };
let Some(left) = cur.get(..mid) else {
return Err(read_size_limit(mid));
};
// SAFETY: We just created a slice of `pos..pos + mid` bytes from the cursor, so `pos + mid` is valid.
self.pos = unsafe { self.pos.unchecked_add(mid) };
Ok(left)
}
}
impl<'a, T> Reader<'a> for Cursor<T>
where
T: AsRef<[u8]>,
{
type Trusted<'b>
= TrustedSliceReader<'a, 'b>
where
Self: 'b;
#[inline]
fn fill_buf(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
let src = self.cur_slice();
// SAFETY: we clamp the end bound to the length of the slice.
Ok(unsafe { src.get_unchecked(..n_bytes.min(src.len())) })
}
#[inline]
fn fill_exact(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
let Some(src) = self.cur_slice().get(..n_bytes) else {
return Err(read_size_limit(n_bytes));
};
Ok(src)
}
#[inline]
unsafe fn consume_unchecked(&mut self, amt: usize) {
self.pos = unsafe { self.pos.unchecked_add(amt) };
}
fn consume(&mut self, amt: usize) -> ReadResult<()> {
if self.cur_len() < amt {
return Err(read_size_limit(amt));
}
// SAFETY: We just checked that `cur_len() >= amt`.
unsafe { self.consume_unchecked(amt) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> ReadResult<Self::Trusted<'_>> {
Ok(TrustedSliceReader::new(
self.consume_slice_checked(n_bytes)?,
))
}
}
/// Helper functions for writing to `Cursor<&mut [MaybeUninit<u8>]>` and `Cursor<&mut MaybeUninit<[u8; N]>>`.
mod uninit_slice {
use super::*;
/// Get a mutable slice of the remaining bytes in the cursor.
#[inline]
pub(super) fn cur_slice_mut(
inner: &mut [MaybeUninit<u8>],
pos: usize,
) -> &mut [MaybeUninit<u8>] {
// SAFETY: `pos` is less than or equal to the length of the slice.
unsafe { inner.get_unchecked_mut(pos.min(inner.len())..) }
}
/// Get a mutable slice of `len` bytes from the cursor at the current position,
/// returning an error if the slice does not have at least `len` bytes remaining.
#[inline]
pub(super) fn get_slice_mut_checked(
inner: &mut [MaybeUninit<u8>],
pos: usize,
len: usize,
) -> WriteResult<&mut [MaybeUninit<u8>]> {
let Some(dst) = cur_slice_mut(inner, pos).get_mut(..len) else {
return Err(write_size_limit(len));
};
Ok(dst)
}
/// Write `src` to the cursor at the current position and advance the position by `src.len()`.
pub(super) fn write(
inner: &mut [MaybeUninit<u8>],
pos: &mut usize,
src: &[u8],
) -> WriteResult<()> {
let len = src.len();
let dst = get_slice_mut_checked(inner, *pos, len)?;
// SAFETY: dst is a valid slice of `len` bytes.
unsafe { ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), len) };
// SAFETY: We just wrote `len` bytes to the slice of `pos..pos + len`, so `pos + len` is valid.
*pos = unsafe { pos.unchecked_add(len) };
Ok(())
}
#[inline]
pub(super) fn as_trusted_for<'a>(
inner: &'a mut [MaybeUninit<u8>],
pos: &mut usize,
n_bytes: usize,
) -> WriteResult<TrustedSliceWriter<'a>> {
let dst = get_slice_mut_checked(inner, *pos, n_bytes)?;
// SAFETY: We just created a slice of `pos..pos + n_bytes`, so `pos + n_bytes` is valid.
*pos = unsafe { pos.unchecked_add(n_bytes) };
Ok(TrustedSliceWriter::new(dst))
}
}
impl Writer for Cursor<&mut [MaybeUninit<u8>]> {
type Trusted<'b>
= TrustedSliceWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
uninit_slice::write(self.inner, &mut self.pos, src)
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
uninit_slice::as_trusted_for(self.inner, &mut self.pos, n_bytes)
}
}
impl<const N: usize> Cursor<&mut MaybeUninit<[u8; N]>> {
#[inline(always)]
// `core::mem::transpose` is not yet stabilized.
pub(super) const fn transpose(inner: &mut MaybeUninit<[u8; N]>) -> &mut [MaybeUninit<u8>; N] {
// SAFETY: MaybeUninit<[u8; N]> is equivalent to [MaybeUninit<u8>; N].
unsafe { transmute::<&mut MaybeUninit<[u8; N]>, &mut [MaybeUninit<u8>; N]>(inner) }
}
}
impl<const N: usize> Writer for Cursor<&mut MaybeUninit<[u8; N]>> {
type Trusted<'b>
= TrustedSliceWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
uninit_slice::write(Self::transpose(self.inner), &mut self.pos, src)
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
uninit_slice::as_trusted_for(Self::transpose(self.inner), &mut self.pos, n_bytes)
}
}
/// Helper functions for writing to `Cursor<&mut Vec<u8>>` and `Cursor<Vec<u8>>`.
#[cfg(feature = "alloc")]
mod vec {
use super::*;
/// Grow the vector if necessary to accommodate the given `needed` bytes.
///
/// Note this differs from [`Vec::reserve`] in that it reserves relative to the cursor's
/// current position, rather than the initialized length of the vector. The `Cursor<Vec<u8>>`
/// implementation overwrites existing elements of the vector, so growing relative to length
/// would unnecessarily over-allocate memory.
///
/// # Panics
///
/// Panics if the new capacity exceeds `isize::MAX` _bytes_.
#[inline]
pub(super) fn maybe_grow(inner: &mut Vec<u8>, pos: usize, needed: usize) -> WriteResult<()> {
let Some(required) = pos.checked_add(needed) else {
return Err(write_size_limit(needed));
};
if required > inner.capacity() {
grow(inner, required);
}
#[cold]
fn grow(inner: &mut Vec<u8>, required: usize) {
// SAFETY: We just checked that `required > inner.capacity()` (which is greater than
// or equal to `inner.len()`), so this will not underflow.
let additional = unsafe { required.unchecked_sub(inner.len()) };
inner.reserve(additional);
}
Ok(())
}
/// Add `len` to the cursor's position and update the length of the vector if necessary.
///
/// # SAFETY:
/// - Must be called after a successful write to the vector.
pub(super) unsafe fn add_len(inner: &mut Vec<u8>, pos: &mut usize, len: usize) {
// SAFETY: We just wrote `len` bytes to the vector, so `pos + len` is valid.
let next_pos = unsafe { pos.unchecked_add(len) };
// If pos exceeds the length of the vector, we just wrote to uninitialized capacity,
// which is now initialized.
if next_pos > inner.len() {
unsafe {
inner.set_len(next_pos);
}
}
*pos = next_pos;
}
/// Write `src` to the vector at the current position and advance the position by `src.len()`.
pub(super) fn write(inner: &mut Vec<u8>, pos: &mut usize, src: &[u8]) -> WriteResult<()> {
maybe_grow(inner, *pos, src.len())?;
// SAFETY: We just ensured at least `pos + src.len()` capacity is available.
unsafe { ptr::copy_nonoverlapping(src.as_ptr(), inner.as_mut_ptr().add(*pos), src.len()) };
// SAFETY: We just wrote `src.len()` bytes to the vector.
unsafe { add_len(inner, pos, src.len()) };
Ok(())
}
/// Advance the position by `n_bytes` and return a [`TrustedSliceWriter`] that can elide bounds
/// checking within that `n_bytes` window.
#[inline]
pub(super) fn as_trusted_for<'a>(
inner: &'a mut Vec<u8>,
pos: &'a mut usize,
n_bytes: usize,
) -> WriteResult<TrustedVecWriter<'a>> {
maybe_grow(inner, *pos, n_bytes)?;
let buf = unsafe {
from_raw_parts_mut(
inner.as_mut_ptr().cast::<MaybeUninit<u8>>(),
inner.capacity(),
)
};
Ok(TrustedVecWriter::new(buf, pos))
}
#[inline]
pub(super) fn finish(inner: &mut Vec<u8>, pos: &mut usize) {
if *pos > inner.len() {
unsafe {
inner.set_len(*pos);
}
}
}
}
/// Trusted writer for `Cursor<&mut Vec<u8>>` or `Cursor<Vec<u8>>` that continues
/// overwriting the vector's memory.
///
/// Generally this should not be constructed directly, but rather by calling [`Writer::as_trusted_for`]
/// on a trusted [`Writer`]. This will ensure that the safety invariants are upheld.
///
/// Note that this does *not* update the length of the vector, as it only contains a reference to the
/// vector's memory via `&mut [MaybeUninit<u8>]`, but it will update the _position_ of the cursor.
/// Vec implementations will synchronize the length and position on subsequent writes or when the
/// writer is finished. Benchmarks showed a roughly 2x performance improvement using this method
/// rather than taking a `&mut Vec<u8>` directly.
///
/// # Safety
///
/// - This will _not_ grow the vector, as it assumes the caller has already reserved enough capacity.
/// The `inner` buffer must have sufficient capacity for all writes. It is UB if this is not upheld.
#[cfg(feature = "alloc")]
pub struct TrustedVecWriter<'a> {
inner: &'a mut [MaybeUninit<u8>],
pos: &'a mut usize,
}
#[cfg(feature = "alloc")]
impl<'a> TrustedVecWriter<'a> {
pub fn new(inner: &'a mut [MaybeUninit<u8>], pos: &'a mut usize) -> Self {
Self { inner, pos }
}
}
#[cfg(feature = "alloc")]
impl<'a> Writer for TrustedVecWriter<'a> {
type Trusted<'b>
= TrustedVecWriter<'b>
where
Self: 'b;
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
// SAFETY: Creator of this writer ensures we have sufficient capacity for all writes.
unsafe {
ptr::copy_nonoverlapping(
src.as_ptr().cast(),
self.inner.as_mut_ptr().add(*self.pos),
src.len(),
)
};
*self.pos = unsafe { self.pos.unchecked_add(src.len()) };
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, _n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
Ok(TrustedVecWriter::new(self.inner, self.pos))
}
}
/// Writer implementation for `&mut Vec<u8>` that overwrites the underlying vector's memory.
/// The vector will grow as needed.
///
/// # Examples
///
/// Overwriting an existing vector.
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use wincode::io::{Cursor, Writer};
/// let mut vec = vec![0; 3];
/// let mut cursor = Cursor::new(&mut vec);
/// let bytes = [1, 2, 3, 4];
/// cursor.write(&bytes).unwrap();
/// assert_eq!(&vec, &[1, 2, 3, 4]);
/// # }
/// ```
///
/// Growing a vector.
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use wincode::io::{Cursor, Writer};
/// let mut vec = vec![];
/// let mut cursor = Cursor::new(&mut vec);
/// let bytes = [1, 2, 3];
/// cursor.write(&bytes).unwrap();
/// assert_eq!(&vec, &[1, 2, 3]);
/// # }
/// ```
#[cfg(feature = "alloc")]
impl Writer for Cursor<&mut Vec<u8>> {
type Trusted<'b>
= TrustedVecWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
vec::write(self.inner, &mut self.pos, src)
}
#[inline]
fn finish(&mut self) -> WriteResult<()> {
vec::finish(self.inner, &mut self.pos);
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
vec::as_trusted_for(self.inner, &mut self.pos, n_bytes)
}
}
/// Writer implementation for `Vec<u8>` that overwrites the underlying vector's memory.
/// The vector will grow as needed.
/// # Examples
///
/// Overwriting an existing vector.
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use wincode::io::{Cursor, Writer};
/// let mut cursor = Cursor::new(vec![0; 3]);
/// let bytes = [1, 2, 3, 4];
/// cursor.write(&bytes).unwrap();
/// assert_eq!(cursor.into_inner(), &[1, 2, 3, 4]);
/// # }
/// ```
///
/// Growing a vector.
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use wincode::io::{Cursor, Writer};
/// let mut cursor = Cursor::new(vec![]);
/// let bytes = [1, 2, 3];
/// cursor.write(&bytes).unwrap();
/// assert_eq!(cursor.into_inner(), &[1, 2, 3]);
/// # }
/// ```
#[cfg(feature = "alloc")]
impl Writer for Cursor<Vec<u8>> {
type Trusted<'b>
= TrustedVecWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
vec::write(&mut self.inner, &mut self.pos, src)
}
#[inline]
fn finish(&mut self) -> WriteResult<()> {
vec::finish(&mut self.inner, &mut self.pos);
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
vec::as_trusted_for(&mut self.inner, &mut self.pos, n_bytes)
}
}
#[cfg(all(test, feature = "alloc"))]
mod tests {
#![allow(clippy::arithmetic_side_effects)]
use {super::*, crate::proptest_config::proptest_cfg, alloc::vec, proptest::prelude::*};
proptest! {
#![proptest_config(proptest_cfg())]
#[test]
fn cursor_read_no_panic_no_ub_check(bytes in any::<Vec<u8>>(), pos in any::<usize>()) {
let mut cursor = Cursor::new_at(&bytes, pos);
let buf = cursor.fill_buf(bytes.len()).unwrap();
if pos > bytes.len() {
// fill-buf should return an empty slice if the position
// is greater than the length of the bytes.
prop_assert_eq!(buf, &[]);
} else {
prop_assert_eq!(buf, &bytes[pos..]);
}
let res = cursor.fill_exact(bytes.len());
if pos > bytes.len() && !bytes.is_empty() {
prop_assert!(matches!(res, Err(ReadError::ReadSizeLimit(x)) if x == bytes.len()));
} else {
prop_assert_eq!(res.unwrap(), &bytes[pos.min(bytes.len())..]);
}
}
#[test]
fn cursor_zero_len_ops_ok(bytes in any::<Vec<u8>>(), pos in any::<usize>()) {
let mut cursor = Cursor::new_at(&bytes, pos);
let start = cursor.position();
// fill_exact(0) is always Ok and does not advance.
let fe = cursor.fill_exact(0).unwrap();
prop_assert_eq!(fe.len(), 0);
prop_assert_eq!(cursor.position(), start);
// consume(0) is always Ok and does not advance.
prop_assert!(cursor.consume(0).is_ok());
prop_assert_eq!(cursor.position(), start);
// as_trusted_for(0) is always Ok and does not advance.
let start2 = cursor.position();
let mut trusted = unsafe { <Cursor<_> as Reader>::as_trusted_for(&mut cursor, 0) }.unwrap();
// Trusted reader on a 0-window should behave like EOF for >0, but allow zero-length reads.
prop_assert_eq!(trusted.fill_buf(1).unwrap(), &[]);
prop_assert_eq!(trusted.fill_exact(0).unwrap().len(), 0);
prop_assert_eq!(cursor.position(), start2);
}
#[test]
fn cursor_as_trusted_for_remaining_advances_to_len(bytes in any::<Vec<u8>>(), pos in any::<usize>()) {
// Clamp pos to be within [0, len] so the request is valid.
let len = bytes.len();
let pos = if len == 0 { 0 } else { pos % (len + 1) };
let mut cursor = Cursor::new_at(&bytes, pos);
let remaining = len.saturating_sub(pos);
let _trusted = unsafe { <Cursor<_> as Reader>::as_trusted_for(&mut cursor, remaining) }.unwrap();
// After consuming the exact remaining, position should be exactly len.
prop_assert_eq!(cursor.position(), len);
}
#[test]
fn cursor_extremal_pos_max_zero_len_ok(bytes in any::<Vec<u8>>()) {
let mut cursor = Cursor::new_at(&bytes, usize::MAX);
// With extremal position, fill_buf should be empty and peek should error.
prop_assert_eq!(cursor.fill_buf(1).unwrap(), &[]);
prop_assert!(matches!(cursor.peek(), Err(ReadError::ReadSizeLimit(1))));
// Zero-length ops still succeed and do not advance.
let start = cursor.position();
prop_assert!(cursor.fill_exact(0).is_ok());
prop_assert!(cursor.consume(0).is_ok());
let _trusted = unsafe { <Cursor<_> as Reader>::as_trusted_for(&mut cursor, 0) }.unwrap();
prop_assert_eq!(cursor.position(), start);
}
#[test]
fn uninit_slice_write_no_panic_no_ub_check(bytes in any::<Vec<u8>>(), pos in any::<usize>()) {
let mut output: Vec<u8> = Vec::with_capacity(bytes.len());
let mut cursor = Cursor::new_at(output.spare_capacity_mut(), pos);
let res = cursor.write(&bytes);
if pos > bytes.len() && !bytes.is_empty() {
prop_assert!(matches!(res, Err(WriteError::WriteSizeLimit(x)) if x == bytes.len()));
} else if pos == 0 {
prop_assert_eq!(output, bytes);
}
}
#[test]
fn vec_write_no_panic_no_ub_check(bytes in any::<Vec<u8>>(), pos in any::<u16>()) {
let pos = pos as usize;
let mut output: Vec<u8> = Vec::new();
let mut cursor = Cursor::new_at(&mut output, pos);
// Vec impl grows, so it should be valid to write to any position within memory limits.
cursor.write(&bytes).unwrap();
prop_assert_eq!(&output[pos..], &bytes);
}
#[test]
fn cursor_write_vec_new(bytes in any::<Vec<u8>>()) {
let mut cursor = Cursor::new(Vec::new());
cursor.write(&bytes).unwrap();
prop_assert_eq!(&cursor.inner, &bytes);
let mut vec = Vec::with_capacity(bytes.len());
let mut cursor = Cursor::new(vec.spare_capacity_mut());
cursor.write(&bytes).unwrap();
unsafe { vec.set_len(bytes.len()) };
prop_assert_eq!(&vec, &bytes);
}
#[test]
fn cursor_write_existing_vec(bytes in any::<Vec<u8>>()) {
let mut cursor = Cursor::new(vec![0; bytes.len()]);
cursor.write(&bytes).unwrap();
prop_assert_eq!(&cursor.inner, &bytes);
}
#[test]
fn cursor_write_existing_grow_vec(bytes in any::<Vec<u8>>()) {
let mut cursor = Cursor::new(vec![0; bytes.len() / 2]);
cursor.write(&bytes).unwrap();
prop_assert_eq!(&cursor.inner, &bytes);
}
#[test]
fn cursor_write_partial_vec(bytes in any::<Vec<u8>>()) {
let mut cursor = Cursor::new(vec![1; bytes.len()]);
let half = bytes.len() - bytes.len() / 2;
cursor.write(&bytes[..half]).unwrap();
prop_assert_eq!(&cursor.inner[..half], &bytes[..half]);
// Remaining bytes are untouched
prop_assert_eq!(&cursor.inner[half..], &vec![1; bytes.len() - half]);
cursor.write(&bytes[half..]).unwrap();
prop_assert_eq!(&cursor.inner, &bytes);
}
#[test]
fn cursor_write_trusted_vec(bytes in any::<Vec<u8>>()) {
let mut cursor = Cursor::new(vec![1; bytes.len()]);
let half = bytes.len() - bytes.len() / 2;
cursor.write(&bytes[..half]).unwrap();
unsafe { <Cursor<_> as Writer>::as_trusted_for(&mut cursor, bytes.len() - half) }
.unwrap()
.write(&bytes[half..])
.unwrap();
cursor.finish().unwrap();
prop_assert_eq!(&cursor.inner, &bytes);
}
#[test]
fn cursor_write_trusted_grow_vec(bytes in any::<Vec<u8>>()) {
let mut cursor = Cursor::new(vec![1; bytes.len() / 2]);
let half = bytes.len() - bytes.len() / 2;
cursor.write(&bytes[..half]).unwrap();
unsafe { <Cursor<_> as Writer>::as_trusted_for(&mut cursor, bytes.len() - half) }
.unwrap()
.write(&bytes[half..])
.unwrap();
cursor.finish().unwrap();
prop_assert_eq!(&cursor.inner, &bytes);
}
#[test]
fn cursor_write_trusted_oversized_vec(bytes in any::<Vec<u8>>()) {
let mut cursor = Cursor::new(vec![1; bytes.len() * 2]);
let half = bytes.len() - bytes.len() / 2;
cursor.write(&bytes[..half]).unwrap();
unsafe { <Cursor<_> as Writer>::as_trusted_for(&mut cursor, bytes.len() - half) }
.unwrap()
.write(&bytes[half..])
.unwrap();
cursor.finish().unwrap();
prop_assert_eq!(&cursor.inner[..bytes.len()], &bytes);
// Remaining bytes are untouched
prop_assert_eq!(&cursor.inner[bytes.len()..], &vec![1; bytes.len()]);
}
}
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/io/vec.rs | wincode/src/io/vec.rs | use {super::*, alloc::vec::Vec};
/// Trusted writer for `Vec<u8>` that continues appending to the vector's spare capacity.
///
/// Generally this should not be constructed directly, but rather by calling [`Writer::as_trusted_for`]
/// on a [`Vec<u8>`]. This will ensure that the safety invariants are upheld.
///
/// # Safety
///
/// - This will _not_ grow the vector, and it will not bounds check writes, as it assumes the caller has
/// already reserved enough capacity. The `inner` Vec must have sufficient capacity for all writes.
/// It is UB if this is not upheld.
pub struct TrustedVecWriter<'a> {
inner: &'a mut Vec<u8>,
}
impl<'a> TrustedVecWriter<'a> {
const fn new(inner: &'a mut Vec<u8>) -> Self {
Self { inner }
}
}
impl<'a> Writer for TrustedVecWriter<'a> {
type Trusted<'b>
= TrustedVecWriter<'b>
where
Self: 'b;
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
let spare = self.inner.spare_capacity_mut();
// SAFETY: Creator of this writer ensures we have sufficient capacity for all writes.
unsafe { ptr::copy_nonoverlapping(src.as_ptr(), spare.as_mut_ptr().cast(), src.len()) };
// SAFETY: We just wrote `src.len()` bytes to the vector.
unsafe {
self.inner
.set_len(self.inner.len().unchecked_add(src.len()))
};
Ok(())
}
unsafe fn as_trusted_for(&mut self, _n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
Ok(TrustedVecWriter::new(self.inner))
}
}
/// Writer implementation for `Vec<u8>` that appends to the vector. The vector will grow as needed.
///
/// # Examples
///
/// Writing to a new vector.
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use wincode::io::Writer;
/// let mut vec = Vec::new();
/// let bytes = [1, 2, 3];
/// vec.write(&bytes).unwrap();
/// assert_eq!(vec, &[1, 2, 3]);
/// # }
/// ```
///
/// Writing to an existing vector.
/// ```
/// # #[cfg(feature = "alloc")] {
/// # use wincode::io::Writer;
/// let mut vec = vec![1, 2, 3];
/// let bytes = [4, 5, 6];
/// vec.write(&bytes).unwrap();
/// assert_eq!(vec, &[1, 2, 3, 4, 5, 6]);
/// # }
/// ```
///
impl Writer for Vec<u8> {
type Trusted<'b>
= TrustedVecWriter<'b>
where
Self: 'b;
#[inline]
fn write(&mut self, src: &[u8]) -> WriteResult<()> {
self.extend_from_slice(src);
Ok(())
}
#[inline]
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>> {
self.reserve(n_bytes);
// `TrustedVecWriter` will update the length of the vector as it writes.
Ok(TrustedVecWriter::new(self))
}
}
#[cfg(all(test, feature = "alloc"))]
mod tests {
#![allow(clippy::arithmetic_side_effects)]
use {super::*, crate::proptest_config::proptest_cfg, alloc::vec, proptest::prelude::*};
proptest! {
#![proptest_config(proptest_cfg())]
#[test]
fn vec_writer_write_new(bytes in proptest::collection::vec(any::<u8>(), 0..=100)) {
let mut vec = Vec::new();
vec.write(&bytes).unwrap();
prop_assert_eq!(vec, bytes);
}
#[test]
fn vec_writer_write_existing(bytes in proptest::collection::vec(any::<u8>(), 0..=100)) {
let mut vec = vec![0; 5];
vec.write(&bytes).unwrap();
prop_assert_eq!(&vec[..5], &[0; 5]);
prop_assert_eq!(&vec[5..], bytes);
}
#[test]
fn vec_writer_trusted(bytes in proptest::collection::vec(any::<u8>(), 0..=100)) {
let mut vec = Vec::new();
let half = bytes.len() / 2;
let quarter = half / 2;
vec.write(&bytes[..half]).unwrap();
let mut t1 = unsafe { vec.as_trusted_for(bytes.len() - half) }.unwrap();
t1
.write(&bytes[half..half + quarter])
.unwrap();
let mut t2 = unsafe { t1.as_trusted_for(quarter) }.unwrap();
t2.write(&bytes[half + quarter..]).unwrap();
prop_assert_eq!(vec, bytes);
}
#[test]
fn vec_writer_trusted_existing(bytes in proptest::collection::vec(any::<u8>(), 0..=100)) {
let mut vec = vec![0; 5];
let half = bytes.len() / 2;
let quarter = half / 2;
vec.write(&bytes[..half]).unwrap();
let mut t1 = unsafe { vec.as_trusted_for(bytes.len() - half) }.unwrap();
t1
.write(&bytes[half..half + quarter])
.unwrap();
let mut t2 = unsafe { t1.as_trusted_for(quarter) }.unwrap();
t2.write(&bytes[half + quarter..]).unwrap();
prop_assert_eq!(&vec[..5], &[0; 5]);
prop_assert_eq!(&vec[5..], bytes);
}
#[test]
fn test_writer_write_from_t(int in any::<u64>()) {
let mut writer = Vec::new();
unsafe { writer.write_t(&int).unwrap() };
prop_assert_eq!(writer, int.to_le_bytes());
}
#[test]
fn test_writer_write_slice_t(ints in proptest::collection::vec(any::<u64>(), 0..=100)) {
let bytes = ints.iter().flat_map(|int| int.to_le_bytes()).collect::<Vec<u8>>();
let mut writer = Vec::new();
unsafe { writer.write_slice_t(&ints).unwrap() };
prop_assert_eq!(writer, bytes);
}
}
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/io/mod.rs | wincode/src/io/mod.rs | //! [`Reader`] and [`Writer`] implementations.
use {
core::{
mem::{self, transmute, MaybeUninit},
ptr,
slice::from_raw_parts,
},
thiserror::Error,
};
#[derive(Error, Debug)]
pub enum ReadError {
#[error("Attempting to read {0} bytes")]
ReadSizeLimit(usize),
#[error(
"Unsupported zero-copy operation: reader does not support deserializing zero-copy types"
)]
UnsupportedZeroCopy,
#[cfg(feature = "std")]
#[error(transparent)]
Io(#[from] std::io::Error),
}
pub type ReadResult<T> = core::result::Result<T, ReadError>;
#[cold]
const fn read_size_limit(len: usize) -> ReadError {
ReadError::ReadSizeLimit(len)
}
/// Trait for structured reading of bytes from a source into potentially uninitialized memory.
///
/// # Advancement semantics
/// - `fill_*` methods never advance.
/// - `copy_into_*` and `borrow_*` methods advance by the number of bytes read.
/// - [`Reader::as_trusted_for`] advances the parent by the number of bytes requested.
///
/// # Zero-copy semantics
/// Only implement [`Reader::borrow_exact`] for sources where stable borrows into the backing storage are possible.
/// Callers should prefer [`Reader::fill_exact`] to remain compatible with readers that don’t support zero-copy.
/// Returns [`ReadError::UnsupportedZeroCopy`] for readers that do not support zero-copy.
pub trait Reader<'a> {
/// A variant of the [`Reader`] that can elide bounds checking within a given window.
///
/// Trusted variants of the [`Reader`] should generally not be constructed directly,
/// but rather by calling [`Reader::as_trusted_for`] on a trusted [`Reader`].
/// This will ensure that the safety invariants are upheld.
type Trusted<'b>: Reader<'a>
where
Self: 'b;
/// Return up to `n_bytes` from the internal buffer without advancing. Implementations may
/// read more data internally to satisfy future requests. Returns fewer than `n_bytes` at EOF.
///
/// This is _not_ required to return exactly `n_bytes`, it is required to return _up to_ `n_bytes`.
/// Use [`Reader::fill_exact`] if you need exactly `n_bytes`.
fn fill_buf(&mut self, n_bytes: usize) -> ReadResult<&[u8]>;
/// Return exactly `n_bytes` without advancing.
///
/// Errors if the source cannot provide enough bytes.
fn fill_exact(&mut self, n_bytes: usize) -> ReadResult<&[u8]> {
let src = self.fill_buf(n_bytes)?;
if src.len() != n_bytes {
return Err(read_size_limit(n_bytes));
}
Ok(src)
}
/// Return exactly `N` bytes as `&[u8; N]` without advancing.
///
/// Errors if fewer than `N` bytes are available.
fn fill_array<const N: usize>(&mut self) -> ReadResult<&[u8; N]> {
let src = self.fill_exact(N)?;
// SAFETY:
// - `fill_exact` ensures we read N bytes.
Ok(unsafe { &*src.as_ptr().cast::<[u8; N]>() })
}
/// Zero-copy: return a borrowed slice of exactly `len` bytes and advance by `len`.
///
/// The returned slice is tied to `'a`. Prefer [`Reader::fill_exact`] unless you truly need zero-copy.
/// Errors for readers that don't support zero-copy.
#[inline]
fn borrow_exact(&mut self, len: usize) -> ReadResult<&'a [u8]> {
Self::borrow_exact_mut(self, len).map(|s| &*s)
}
/// Zero-copy: return a borrowed mutable slice of exactly `len` bytes and advance by `len`.
///
/// Errors for readers that don't support zero-copy.
#[expect(unused_variables)]
fn borrow_exact_mut(&mut self, len: usize) -> ReadResult<&'a mut [u8]> {
Err(ReadError::UnsupportedZeroCopy)
}
/// Advance by exactly `amt` bytes without bounds checks.
///
/// May panic if fewer than `amt` bytes remain.
///
/// # Safety
///
/// - `amt` must be less than or equal to the number of bytes remaining in the reader.
unsafe fn consume_unchecked(&mut self, amt: usize);
/// Advance the reader exactly `amt` bytes, returning an error if the source does not have enough bytes.
fn consume(&mut self, amt: usize) -> ReadResult<()>;
/// Advance the parent by `n_bytes` and return a [`Reader`] that can elide bounds checks within
/// that `n_bytes` window.
///
/// Implementors must:
/// - Ensure that either at least `n_bytes` bytes are available backing the
/// returned reader, or return an error.
/// - Arrange that the returned `Trusted` reader's methods operate within
/// that `n_bytes` window (it may buffer or prefetch arbitrarily).
///
/// Note:
/// - `as_trusted_for` is intended for callers that know they will operate
/// within a fixed-size window and want to avoid intermediate bounds checks.
/// - If you simply want to advance the parent by `n_bytes` without using
/// a trusted window, prefer `consume(n_bytes)` instead.
///
/// # Safety
///
/// The caller must ensure that, through the returned reader, they do not
/// cause more than `n_bytes` bytes to be logically read or consumed
/// without performing additional bounds checks.
///
/// Concretely:
/// - The total number of bytes accessed/consumed via the `Trusted` reader
/// (`fill_*`, `copy_into_*`, `consume`, etc.) must be **<= `n_bytes`**.
///
/// Violating this is undefined behavior, because `Trusted` readers are
/// permitted to elide bounds checks within the `n_bytes` window; reading past the
/// `n_bytes` window may read past the end of the underlying buffer.
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> ReadResult<Self::Trusted<'_>>;
/// Return a reference to the next byte without advancing.
///
/// May buffer more bytes if necessary. Errors if no bytes remain.
#[inline]
fn peek(&mut self) -> ReadResult<&u8> {
self.fill_buf(1)?.first().ok_or_else(|| read_size_limit(1))
}
/// Copy and consume exactly `dst.len()` bytes from the [`Reader`] into `dst`.
///
/// # Safety
///
/// - `dst` must not overlap with the internal buffer.
#[inline]
fn copy_into_slice(&mut self, dst: &mut [MaybeUninit<u8>]) -> ReadResult<()> {
let src = self.fill_exact(dst.len())?;
// SAFETY:
// - `fill_exact` must do the appropriate bounds checking.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr().cast(), dst.as_mut_ptr(), dst.len());
self.consume_unchecked(dst.len());
}
Ok(())
}
/// Copy and consume exactly `N` bytes from the [`Reader`] into `dst`.
///
/// # Safety
///
/// - `dst` must not overlap with the internal buffer.
#[inline]
fn copy_into_array<const N: usize>(
&mut self,
dst: &mut MaybeUninit<[u8; N]>,
) -> ReadResult<()> {
let src = self.fill_array::<N>()?;
// SAFETY:
// - `fill_array` must do the appropriate bounds checking.
unsafe {
ptr::copy_nonoverlapping(src, dst.as_mut_ptr(), 1);
self.consume_unchecked(N);
}
Ok(())
}
/// Copy and consume exactly `size_of::<T>()` bytes from the [`Reader`] into `dst`.
///
/// # Safety
///
/// - `T` must be initialized by reads of `size_of::<T>()` bytes.
/// - `dst` must not overlap with the internal buffer.
#[inline]
unsafe fn copy_into_t<T>(&mut self, dst: &mut MaybeUninit<T>) -> ReadResult<()> {
let src = self.fill_exact(size_of::<T>())?;
// SAFETY:
// - `fill_exact` must do the appropriate bounds checking.
unsafe {
ptr::copy_nonoverlapping(src.as_ptr(), dst.as_mut_ptr().cast(), size_of::<T>());
self.consume_unchecked(size_of::<T>());
}
Ok(())
}
/// Copy and consume exactly `dst.len() * size_of::<T>()` bytes from the [`Reader`] into `dst`.
///
/// # Safety
///
/// - `T` must be initialized by reads of `size_of::<T>()` bytes.
/// - `dst` must not overlap with the internal buffer.
#[inline]
unsafe fn copy_into_slice_t<T>(&mut self, dst: &mut [MaybeUninit<T>]) -> ReadResult<()> {
let len = size_of_val(dst);
let bytes = self.fill_exact(len)?;
// SAFETY:
// - `fill_exact` must do the appropriate bounds checking.
unsafe {
ptr::copy_nonoverlapping(bytes.as_ptr(), dst.as_mut_ptr().cast(), len);
self.consume_unchecked(len);
}
Ok(())
}
}
#[derive(Error, Debug)]
pub enum WriteError {
#[error("Attempting to write {0} bytes")]
WriteSizeLimit(usize),
#[cfg(feature = "std")]
#[error(transparent)]
Io(#[from] std::io::Error),
}
#[cold]
const fn write_size_limit(len: usize) -> WriteError {
WriteError::WriteSizeLimit(len)
}
pub type WriteResult<T> = core::result::Result<T, WriteError>;
/// Trait for structured writing of bytes into a source of potentially uninitialized memory.
pub trait Writer {
/// A variant of the [`Writer`] that can elide bounds checking within a given window.
///
/// Trusted variants of the [`Writer`] should generally not be constructed directly,
/// but rather by calling [`Writer::as_trusted_for`] on a trusted [`Writer`].
/// This will ensure that the safety invariants are upheld.
type Trusted<'a>: Writer
where
Self: 'a;
/// Finalize the writer by performing any required cleanup or flushing.
///
/// # Regarding trusted writers
///
/// Trusted writers are not guaranteed to live as long as the parent [`Writer`] that
/// created them, and are typically short-lived. wincode will call `finish` after
/// trusted writers have completed their work, so they may rely on `finish` perform
/// local cleanup when needed. Importantly, trusted writers must not perform actions
/// that would invalidate the parent [`Writer`].
///
/// For example, a file writer may buffer internally and delegate to trusted
/// sub-writers with their own buffers. These trusted writers should not close
/// the underlying file descriptor or other parent-owned resources, as that would
/// invalidate the parent writer.
fn finish(&mut self) -> WriteResult<()> {
Ok(())
}
/// Write exactly `src.len()` bytes from the given `src` into the writer.
fn write(&mut self, src: &[u8]) -> WriteResult<()>;
/// Advance the parent by `n_bytes` and return a [`Writer`] that can elide bounds checks within
/// that `n_bytes` window.
///
/// Implementors must:
/// - Ensure that either at least `n_bytes` bytes are available backing the
/// returned writer, or return an error.
/// - Arrange that the returned `Trusted` writer's methods operate within
/// that `n_bytes` window (it may buffer or prefetch arbitrarily).
///
/// Note:
/// - `as_trusted_for` is intended for callers that know they will operate
/// within an exact-size window and want to avoid intermediate bounds checks.
///
/// # Safety
///
/// The caller must treat the returned writer as having exclusive access to
/// exactly `n_bytes` bytes of **uninitialized** output space in the parent,
/// and must:
///
/// - Ensure that no write performed through the `Trusted` writer can
/// address memory outside of that `n_bytes` window.
/// - Ensure that, before the `Trusted` writer is finished or the parent
/// writer is used again, **every byte** in that `n_bytes` window has
/// been initialized at least once via the `Trusted` writer.
/// - Call [`Writer::finish`] on the `Trusted` writer when writing is complete and
/// before the parent writer is used again.
///
/// Concretely:
/// - All writes performed via the `Trusted` writer (`write`, `write_t`,
/// `write_slice_t`, etc.) must stay within the `[0, n_bytes)` region of
/// the reserved space.
/// - It is permitted to overwrite the same bytes multiple times, but the
/// union of all bytes written must cover the entire `[0, n_bytes)` window.
///
/// Violating this is undefined behavior, because:
/// - `Trusted` writers are permitted to elide bounds checks within the
/// `n_bytes` window; writing past the window may write past the end of
/// the underlying destination.
/// - Failing to initialize all `n_bytes` may leave uninitialized memory in
/// the destination that later safe code assumes to be fully initialized.
unsafe fn as_trusted_for(&mut self, n_bytes: usize) -> WriteResult<Self::Trusted<'_>>;
/// Write `T` as bytes into the source.
///
/// # Safety
///
/// - `T` must be plain ol' data.
#[inline]
unsafe fn write_t<T: ?Sized>(&mut self, src: &T) -> WriteResult<()> {
let src = from_raw_parts((src as *const T).cast::<u8>(), size_of_val(src));
self.write(src)?;
Ok(())
}
/// Write `[T]` as bytes into the source.
///
/// # Safety
///
/// - `T` must be plain ol' data.
#[inline]
unsafe fn write_slice_t<T>(&mut self, src: &[T]) -> WriteResult<()> {
let len = size_of_val(src);
let src = from_raw_parts(src.as_ptr().cast::<u8>(), len);
self.write(src)?;
Ok(())
}
}
mod cursor;
mod slice;
#[cfg(feature = "alloc")]
mod vec;
pub use {cursor::Cursor, slice::*};
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/schema/mod.rs | wincode/src/schema/mod.rs | //! Schema traits.
//!
//! # Example
//!
//! ```
//! # #[cfg(all(feature = "solana-short-vec", feature = "alloc"))] {
//! # use rand::prelude::*;
//! # use wincode::{Serialize, Deserialize, len::{BincodeLen, ShortU16Len}, containers::{self, Pod}};
//! # use wincode_derive::{SchemaWrite, SchemaRead};
//! # use std::array;
//!
//! # #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq)]
//! #[repr(transparent)]
//! #[derive(Clone, Copy)]
//! struct Signature([u8; 32]);
//! # #[derive(serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq)]
//! #[repr(transparent)]
//! #[derive(Clone, Copy)]
//! struct Address([u8; 32]);
//!
//! # #[derive(SchemaWrite, SchemaRead, serde::Serialize, serde::Deserialize, Debug, PartialEq, Eq)]
//! struct MyStruct {
//! #[wincode(with = "containers::Vec<Pod<_>, BincodeLen>")]
//! signature: Vec<Signature>,
//! #[serde(with = "solana_short_vec")]
//! #[wincode(with = "containers::Vec<Pod<_>, ShortU16Len>")]
//! address: Vec<Address>,
//! }
//!
//! let my_struct = MyStruct {
//! signature: (0..10).map(|_| Signature(array::from_fn(|_| random()))).collect(),
//! address: (0..10).map(|_| Address(array::from_fn(|_| random()))).collect(),
//! };
//! let bincode_serialized = bincode::serialize(&my_struct).unwrap();
//! let wincode_serialized = wincode::serialize(&my_struct).unwrap();
//! assert_eq!(bincode_serialized, wincode_serialized);
//!
//! let bincode_deserialized: MyStruct = bincode::deserialize(&bincode_serialized).unwrap();
//! let wincode_deserialized: MyStruct = wincode::deserialize(&wincode_serialized).unwrap();
//! assert_eq!(bincode_deserialized, wincode_deserialized);
//! # }
//! ```
use {
crate::{
error::{ReadResult, WriteResult},
io::*,
len::SeqLen,
},
core::mem::MaybeUninit,
};
pub mod containers;
mod impls;
/// Indicates what kind of assumptions can be made when encoding or decoding a type.
///
/// Readers and writers may use this to optimize their behavior.
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum TypeMeta {
/// The type has a statically known serialized size.
///
/// Specifying this variant can have significant performance benefits, as it can allow
/// writers to prefetch larger chunks of memory such that subsequent read/write operations
/// in those chunks can be performed at once without intermediate bounds checks.
///
/// Specifying this variant incorrectly will almost certainly result in a panic at runtime.
///
/// Take care not to specify this on variable length types, like `Vec` or `String`, as their
/// serialized size will vary based on their length.
Static {
/// The static serialized size of the type.
size: usize,
/// Whether the type is eligible for zero-copy encoding/decoding.
///
/// This indicates that the type has no invalid bit patterns, no layout requirements, no endianness
/// checks, etc. This is a very strong claim that should be used judiciously.
///
/// Specifying this incorrectly may trigger UB.
zero_copy: bool,
},
/// The type has a dynamic size, and no optimizations can be made.
Dynamic,
}
impl TypeMeta {
#[inline(always)]
pub(crate) const fn size_assert_zero_copy(self) -> usize {
match self {
TypeMeta::Static {
size,
zero_copy: true,
} => size,
_ => panic!("Type is not zero-copy"),
}
}
}
/// Types that can be written (serialized) to a [`Writer`].
pub trait SchemaWrite {
type Src: ?Sized;
const TYPE_META: TypeMeta = TypeMeta::Dynamic;
/// Get the serialized size of `Self::Src`.
fn size_of(src: &Self::Src) -> WriteResult<usize>;
/// Write `Self::Src` to `writer`.
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()>;
}
/// Types that can be read (deserialized) from a [`Reader`].
pub trait SchemaRead<'de> {
type Dst;
const TYPE_META: TypeMeta = TypeMeta::Dynamic;
/// Read into `dst` from `reader`.
///
/// # Safety
///
/// - Implementation must properly initialize the `Self::Dst`.
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()>;
/// Read `Self::Dst` from `reader` into a new `Self::Dst`.
#[inline(always)]
fn get(reader: &mut impl Reader<'de>) -> ReadResult<Self::Dst> {
let mut value = MaybeUninit::uninit();
Self::read(reader, &mut value)?;
// SAFETY: `read` must properly initialize the `Self::Dst`.
Ok(unsafe { value.assume_init() })
}
}
/// Marker trait for types that can be deserialized via direct borrows from a [`Reader`].
///
/// <div class="warning">
/// You should not manually implement this trait for your own type unless you absolutely
/// know what you're doing. The derive macros will automatically implement this trait for your type
/// if it is eligible for zero-copy deserialization.
/// </div>
///
/// # Safety
///
/// - The type must not have any invalid bit patterns, no layout requirements, no endianness checks, etc.
pub unsafe trait ZeroCopy: 'static {
/// Get a reference to a type from the given bytes.
///
/// # Examples
///
/// ```
/// # #[cfg(all(feature = "alloc", feature = "derive"))] {
/// # use wincode::{SchemaWrite, SchemaRead, ZeroCopy};
/// # #[derive(Debug, PartialEq, Eq)]
/// #[derive(SchemaWrite, SchemaRead)]
/// #[repr(C)]
/// struct Data {
/// bytes: [u8; 7],
/// the_answer: u8,
/// }
///
/// let data = Data { bytes: *b"wincode", the_answer: 42 };
///
/// let serialized = wincode::serialize(&data).unwrap();
/// let data_ref = Data::from_bytes(&serialized).unwrap();
///
/// assert_eq!(data_ref, &data);
/// # }
/// ```
#[inline(always)]
fn from_bytes<'de>(mut bytes: &'de [u8]) -> ReadResult<&'de Self>
where
Self: SchemaRead<'de, Dst = Self> + Sized,
{
<&Self as SchemaRead<'de>>::get(&mut bytes)
}
/// Get a mutable reference to a type from the given bytes.
///
/// # Examples
///
/// ```
/// # #[cfg(all(feature = "alloc", feature = "derive"))] {
/// # use wincode::{SchemaWrite, SchemaRead, ZeroCopy};
/// # #[derive(Debug, PartialEq, Eq)]
/// #[derive(SchemaWrite, SchemaRead)]
/// #[repr(C)]
/// struct Data {
/// bytes: [u8; 7],
/// the_answer: u8,
/// }
///
/// let data = Data { bytes: [0; 7], the_answer: 0 };
///
/// let mut serialized = wincode::serialize(&data).unwrap();
/// let data_mut = Data::from_bytes_mut(&mut serialized).unwrap();
/// data_mut.bytes = *b"wincode";
/// data_mut.the_answer = 42;
///
/// let deserialized: Data = wincode::deserialize(&serialized).unwrap();
/// assert_eq!(deserialized, Data { bytes: *b"wincode", the_answer: 42 });
/// # }
/// ```
#[inline(always)]
fn from_bytes_mut<'de>(mut bytes: &'de mut [u8]) -> ReadResult<&'de mut Self>
where
Self: SchemaRead<'de, Dst = Self> + Sized,
{
<&mut Self as SchemaRead<'de>>::get(&mut bytes)
}
}
/// A type that can be read (deserialized) from a [`Reader`] without borrowing from it.
pub trait SchemaReadOwned: for<'de> SchemaRead<'de> {}
impl<T> SchemaReadOwned for T where T: for<'de> SchemaRead<'de> {}
#[inline(always)]
#[allow(clippy::arithmetic_side_effects)]
fn size_of_elem_iter<'a, T, Len>(
value: impl ExactSizeIterator<Item = &'a T::Src>,
) -> WriteResult<usize>
where
Len: SeqLen,
T: SchemaWrite + 'a,
{
if let TypeMeta::Static { size, .. } = T::TYPE_META {
return Ok(Len::write_bytes_needed(value.len())? + size * value.len());
}
// Extremely unlikely a type-in-memory's size will overflow usize::MAX.
Ok(Len::write_bytes_needed(value.len())?
+ (value
.map(T::size_of)
.try_fold(0usize, |acc, x| x.map(|x| acc + x))?))
}
#[inline(always)]
#[allow(clippy::arithmetic_side_effects)]
/// Variant of [`size_of_elem_iter`] specialized for slices, which can opt into
/// an optimized implementation for bytes (`u8`s).
fn size_of_elem_slice<T, Len>(value: &[T::Src]) -> WriteResult<usize>
where
Len: SeqLen,
T: SchemaWrite,
T::Src: Sized,
{
size_of_elem_iter::<T, Len>(value.iter())
}
#[inline(always)]
fn write_elem_iter<'a, T, Len>(
writer: &mut impl Writer,
src: impl ExactSizeIterator<Item = &'a T::Src>,
) -> WriteResult<()>
where
Len: SeqLen,
T: SchemaWrite + 'a,
{
if let TypeMeta::Static { size, .. } = T::TYPE_META {
#[allow(clippy::arithmetic_side_effects)]
let needed = Len::write_bytes_needed(src.len())? + size * src.len();
// SAFETY: `needed` is the size of the encoded length plus the size of the items.
// `Len::write` and len writes of `T::Src` will write `needed` bytes,
// fully initializing the trusted window.
let mut writer = unsafe { writer.as_trusted_for(needed) }?;
Len::write(&mut writer, src.len())?;
for item in src {
T::write(&mut writer, item)?;
}
writer.finish()?;
return Ok(());
}
Len::write(writer, src.len())?;
for item in src {
T::write(writer, item)?;
}
Ok(())
}
#[inline(always)]
#[allow(clippy::arithmetic_side_effects)]
/// Variant of [`write_elem_iter`] specialized for slices, which can opt into
/// an optimized implementation for bytes (`u8`s).
fn write_elem_slice<T, Len>(writer: &mut impl Writer, src: &[T::Src]) -> WriteResult<()>
where
Len: SeqLen,
T: SchemaWrite,
T::Src: Sized,
{
if let TypeMeta::Static {
size,
zero_copy: true,
} = T::TYPE_META
{
let needed = Len::write_bytes_needed(src.len())? + src.len() * size;
// SAFETY: `needed` is the size of the encoded length plus the size of the slice (bytes).
// `Len::write` and `writer.write(src)` will write `needed` bytes,
// fully initializing the trusted window.
let writer = &mut unsafe { writer.as_trusted_for(needed) }?;
Len::write(writer, src.len())?;
// SAFETY: `T::Src` is zero-copy eligible (no invalid bit patterns, no layout requirements, no endianness checks, etc.).
unsafe { writer.write_slice_t(src)? };
writer.finish()?;
return Ok(());
}
write_elem_iter::<T, Len>(writer, src.iter())
}
#[cfg(all(test, feature = "std", feature = "derive"))]
mod tests {
#![allow(clippy::arithmetic_side_effects, deprecated)]
use {
crate::{
containers::{self, Elem, Pod},
deserialize, deserialize_mut,
error::{self, invalid_tag_encoding},
io::{Reader, Writer},
proptest_config::proptest_cfg,
serialize, Deserialize, ReadResult, SchemaRead, SchemaWrite, Serialize, TypeMeta,
WriteResult, ZeroCopy,
},
core::{marker::PhantomData, ptr},
proptest::prelude::*,
std::{
cell::Cell,
collections::{BinaryHeap, VecDeque},
mem::MaybeUninit,
ops::{Deref, DerefMut},
rc::Rc,
result::Result,
sync::Arc,
},
};
#[cfg(target_endian = "little")]
#[derive(
serde::Serialize,
serde::Deserialize,
Debug,
PartialEq,
Eq,
Ord,
PartialOrd,
SchemaWrite,
SchemaRead,
proptest_derive::Arbitrary,
Hash,
Clone,
Copy,
)]
#[wincode(internal)]
#[repr(C)]
struct StructZeroCopy {
a: u128,
b: i128,
c: u64,
d: i64,
e: u32,
f: i32,
ar1: [u8; 8],
g: u16,
h: i16,
ar2: [u8; 12],
i: u8,
j: i8,
ar3: [u8; 14],
}
#[cfg(not(target_endian = "little"))]
#[derive(
serde::Serialize,
serde::Deserialize,
Debug,
PartialEq,
Eq,
Ord,
PartialOrd,
SchemaWrite,
SchemaRead,
proptest_derive::Arbitrary,
Hash,
Clone,
Copy,
)]
#[wincode(internal)]
#[repr(C)]
struct StructZeroCopy {
byte: u8,
ar: [u8; 32],
}
#[derive(
serde::Serialize,
serde::Deserialize,
Debug,
PartialEq,
Eq,
Ord,
PartialOrd,
SchemaWrite,
SchemaRead,
proptest_derive::Arbitrary,
Hash,
)]
#[wincode(internal)]
struct StructStatic {
a: u64,
b: bool,
e: [u8; 32],
}
#[derive(
serde::Serialize,
serde::Deserialize,
Debug,
PartialEq,
Eq,
Ord,
PartialOrd,
SchemaWrite,
SchemaRead,
proptest_derive::Arbitrary,
Hash,
)]
#[wincode(internal)]
struct StructNonStatic {
a: u64,
b: bool,
e: String,
}
#[test]
fn struct_zero_copy_derive_size() {
#[cfg(target_endian = "little")]
let size = size_of::<u128>()
+ size_of::<i128>()
+ size_of::<u64>()
+ size_of::<i64>()
+ size_of::<u32>()
+ size_of::<i32>()
+ size_of::<[u8; 8]>()
+ size_of::<u16>()
+ size_of::<i16>()
+ size_of::<[u8; 12]>()
+ size_of::<u8>()
+ size_of::<i8>()
+ size_of::<[u8; 14]>();
#[cfg(not(target_endian = "little"))]
let size = size_of::<u8>() + size_of::<[u8; 32]>();
let expected = TypeMeta::Static {
size,
zero_copy: true,
};
assert_eq!(<StructZeroCopy as SchemaWrite>::TYPE_META, expected);
assert_eq!(<StructZeroCopy as SchemaRead<'_>>::TYPE_META, expected);
}
#[test]
fn struct_zero_copy_transparent_derive_size() {
#[derive(SchemaWrite, SchemaRead)]
#[wincode(internal)]
#[repr(transparent)]
struct Address([u8; 32]);
let expected = TypeMeta::Static {
size: size_of::<[u8; 32]>(),
zero_copy: true,
};
assert_eq!(<Address as SchemaWrite>::TYPE_META, expected);
assert_eq!(<Address as SchemaRead<'_>>::TYPE_META, expected);
}
#[test]
fn struct_static_derive_size() {
let expected = TypeMeta::Static {
size: size_of::<u64>() + size_of::<bool>() + size_of::<[u8; 32]>(),
zero_copy: false,
};
assert_eq!(<StructStatic as SchemaWrite>::TYPE_META, expected);
assert_eq!(<StructStatic as SchemaRead<'_>>::TYPE_META, expected);
}
#[test]
fn struct_non_static_derive_size() {
let expected = TypeMeta::Dynamic;
assert_eq!(<StructNonStatic as SchemaWrite>::TYPE_META, expected);
assert_eq!(<StructNonStatic as SchemaRead<'_>>::TYPE_META, expected);
}
thread_local! {
/// TL counter for tracking drops (or lack thereof -- a leak).
static TL_DROP_COUNT: Cell<isize> = const { Cell::new(0) };
}
fn get_tl_drop_count() -> isize {
TL_DROP_COUNT.with(|cell| cell.get())
}
fn tl_drop_count_inc() {
TL_DROP_COUNT.with(|cell| cell.set(cell.get() + 1));
}
fn tl_drop_count_dec() {
TL_DROP_COUNT.with(|cell| cell.set(cell.get() - 1));
}
fn tl_drop_count_reset() {
TL_DROP_COUNT.with(|cell| cell.set(0));
}
#[must_use]
#[derive(Debug)]
/// Guard for test set up that will ensure that the TL counter is 0 at the start and end of the test.
struct TLDropGuard;
impl TLDropGuard {
fn new() -> Self {
assert_eq!(
get_tl_drop_count(),
0,
"TL counter drifted from zero -- another test may have leaked"
);
Self
}
}
impl Drop for TLDropGuard {
#[track_caller]
fn drop(&mut self) {
let v = get_tl_drop_count();
if !std::thread::panicking() {
assert_eq!(
v, 0,
"TL counter drifted from zero -- this test might have leaked"
);
}
tl_drop_count_reset();
}
}
#[derive(Debug, PartialEq, Eq)]
/// A `SchemaWrite` and `SchemaRead` that will increment the TL counter when constructed.
struct DropCounted;
impl Arbitrary for DropCounted {
type Parameters = ();
type Strategy = Just<Self>;
fn arbitrary_with(_args: Self::Parameters) -> Self::Strategy {
Just(Self::new())
}
}
impl DropCounted {
const TAG_BYTE: u8 = 0;
fn new() -> Self {
tl_drop_count_inc();
Self
}
}
impl Clone for DropCounted {
fn clone(&self) -> Self {
tl_drop_count_inc();
Self
}
}
impl Drop for DropCounted {
fn drop(&mut self) {
tl_drop_count_dec();
}
}
impl SchemaWrite for DropCounted {
type Src = Self;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 1,
zero_copy: false,
};
fn size_of(_src: &Self::Src) -> WriteResult<usize> {
Ok(1)
}
fn write(writer: &mut impl Writer, _src: &Self::Src) -> WriteResult<()> {
u8::write(writer, &Self::TAG_BYTE)?;
Ok(())
}
}
impl<'de> SchemaRead<'de> for DropCounted {
type Dst = Self;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 1,
zero_copy: false,
};
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
reader.consume(1)?;
// This will increment the counter.
dst.write(DropCounted::new());
Ok(())
}
}
/// A `SchemaRead` that will always error on read.
#[derive(Debug, Clone, Copy, PartialEq, Eq, proptest_derive::Arbitrary)]
struct ErrorsOnRead;
impl ErrorsOnRead {
const TAG_BYTE: u8 = 1;
}
impl SchemaWrite for ErrorsOnRead {
type Src = Self;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 1,
zero_copy: false,
};
fn size_of(_src: &Self::Src) -> WriteResult<usize> {
Ok(1)
}
fn write(writer: &mut impl Writer, _src: &Self::Src) -> WriteResult<()> {
u8::write(writer, &Self::TAG_BYTE)
}
}
impl<'de> SchemaRead<'de> for ErrorsOnRead {
type Dst = Self;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 1,
zero_copy: false,
};
fn read(
reader: &mut impl Reader<'de>,
_dst: &mut MaybeUninit<Self::Dst>,
) -> ReadResult<()> {
reader.consume(1)?;
Err(error::ReadError::PointerSizedReadError)
}
}
#[derive(Debug, Clone, PartialEq, Eq, proptest_derive::Arbitrary)]
enum DropCountedMaybeError {
DropCounted(DropCounted),
ErrorsOnRead(ErrorsOnRead),
}
impl SchemaWrite for DropCountedMaybeError {
type Src = Self;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 1,
zero_copy: false,
};
fn size_of(src: &Self::Src) -> WriteResult<usize> {
match src {
DropCountedMaybeError::DropCounted(v) => DropCounted::size_of(v),
DropCountedMaybeError::ErrorsOnRead(v) => ErrorsOnRead::size_of(v),
}
}
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
match src {
DropCountedMaybeError::DropCounted(v) => DropCounted::write(writer, v),
DropCountedMaybeError::ErrorsOnRead(v) => ErrorsOnRead::write(writer, v),
}
}
}
impl<'de> SchemaRead<'de> for DropCountedMaybeError {
type Dst = Self;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 1,
zero_copy: false,
};
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let byte = u8::get(reader)?;
match byte {
DropCounted::TAG_BYTE => {
dst.write(DropCountedMaybeError::DropCounted(DropCounted::new()));
Ok(())
}
ErrorsOnRead::TAG_BYTE => Err(error::ReadError::PointerSizedReadError),
_ => Err(invalid_tag_encoding(byte as usize)),
}
}
}
#[test]
fn drop_count_sanity() {
let _guard = TLDropGuard::new();
// Ensure our incrementing counter works
let serialized = { serialize(&[DropCounted::new(), DropCounted::new()]).unwrap() };
let _deserialized: [DropCounted; 2] = deserialize(&serialized).unwrap();
assert_eq!(get_tl_drop_count(), 2);
}
#[test]
fn drop_count_maybe_error_sanity() {
let _guard = TLDropGuard::new();
let serialized =
{ serialize(&[DropCountedMaybeError::DropCounted(DropCounted::new())]).unwrap() };
let _deserialized: [DropCountedMaybeError; 1] = deserialize(&serialized).unwrap();
assert_eq!(get_tl_drop_count(), 1);
let serialized = {
serialize(&[
DropCountedMaybeError::DropCounted(DropCounted::new()),
DropCountedMaybeError::ErrorsOnRead(ErrorsOnRead),
])
.unwrap()
};
let _deserialized: ReadResult<[DropCountedMaybeError; 2]> = deserialize(&serialized);
}
/// Test that the derive macro handles drops of initialized fields on partially initialized structs.
#[test]
fn test_struct_derive_handles_partial_drop() {
/// Represents a struct that would leak if the derive macro didn't handle drops of initialized fields
/// on error.
#[derive(SchemaWrite, SchemaRead, proptest_derive::Arbitrary, Debug, PartialEq, Eq)]
#[wincode(internal)]
struct CouldLeak {
data: DropCountedMaybeError,
data2: DropCountedMaybeError,
data3: DropCountedMaybeError,
}
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(could_leak: CouldLeak)| {
let serialized = serialize(&could_leak).unwrap();
let deserialized = CouldLeak::deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(could_leak, deserialized);
}
});
}
// Odd use case, but it's technically valid so we test it.
#[test]
fn test_vec_of_references_borrows_from_input() {
#[derive(
SchemaWrite, SchemaRead, Debug, PartialEq, Eq, proptest_derive::Arbitrary, Clone, Copy,
)]
#[wincode(internal)]
#[repr(transparent)]
struct BigBytes([u8; 512]);
proptest!(proptest_cfg(), |(vec in proptest::collection::vec(any::<BigBytes>(), 0..=8))| {
// Serialize as owned bytes.
let bytes = serialize(&vec).unwrap();
let borrowed: Vec<&BigBytes> = deserialize(&bytes).unwrap();
prop_assert_eq!(borrowed.len(), vec.len());
let start = bytes.as_ptr().addr();
let end = start + bytes.len();
for (i, r) in borrowed.iter().enumerate() {
// Values match
prop_assert_eq!(**r, vec[i]);
// References point into the input buffer
let p = ptr::from_ref(*r).addr();
prop_assert!(p >= start && p < end);
}
});
}
// Odd use case, but it's technically valid so we test it.
#[test]
fn test_boxed_slice_of_references_borrows_from_input() {
#[derive(
SchemaWrite, SchemaRead, Debug, PartialEq, Eq, proptest_derive::Arbitrary, Clone, Copy,
)]
#[wincode(internal)]
#[repr(transparent)]
struct BigBytes([u8; 512]);
proptest!(proptest_cfg(), |(vec in proptest::collection::vec(any::<BigBytes>(), 0..=8))| {
let boxed: Box<[BigBytes]> = vec.into_boxed_slice();
let bytes = serialize(&boxed).unwrap();
let borrowed: Box<[&BigBytes]> = deserialize(&bytes).unwrap();
prop_assert_eq!(borrowed.len(), boxed.len());
let start = bytes.as_ptr().addr();
let end = start + bytes.len();
for (i, &r) in borrowed.iter().enumerate() {
prop_assert_eq!(*r, boxed[i]);
let p = ptr::from_ref(r).addr();
prop_assert!(p >= start && p < end);
}
});
}
/// Test that the derive macro handles drops of initialized fields on partially initialized enums.
#[test]
fn test_enum_derive_handles_partial_drop() {
/// Represents an enum that would leak if the derive macro didn't handle drops of initialized fields
/// on error.
#[derive(SchemaWrite, SchemaRead, proptest_derive::Arbitrary, Debug, PartialEq, Eq)]
#[wincode(internal)]
enum CouldLeak {
A {
a: DropCountedMaybeError,
b: DropCountedMaybeError,
},
B(
DropCountedMaybeError,
DropCountedMaybeError,
DropCountedMaybeError,
),
C(DropCountedMaybeError),
D,
}
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(could_leak: CouldLeak)| {
let serialized = serialize(&could_leak).unwrap();
let deserialized = CouldLeak::deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(could_leak, deserialized);
}
});
}
#[test]
fn test_tuple_handles_partial_drop() {
let _guard = TLDropGuard::new();
let serialized =
{ serialize(&(DropCounted::new(), DropCounted::new(), ErrorsOnRead)).unwrap() };
let deserialized = <(DropCounted, DropCounted, ErrorsOnRead)>::deserialize(&serialized);
assert!(deserialized.is_err());
}
#[test]
fn test_vec_handles_partial_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(vec in proptest::collection::vec(any::<DropCountedMaybeError>(), 0..100))| {
let serialized = serialize(&vec).unwrap();
let deserialized = <Vec<DropCountedMaybeError>>::deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(vec, deserialized);
}
});
}
#[test]
fn test_vec_deque_handles_partial_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(vec in proptest::collection::vec_deque(any::<DropCountedMaybeError>(), 0..100))| {
let serialized = serialize(&vec).unwrap();
let deserialized = <VecDeque<DropCountedMaybeError>>::deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(vec, deserialized);
}
});
}
#[test]
fn test_boxed_slice_handles_partial_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(slice in proptest::collection::vec(any::<DropCountedMaybeError>(), 0..100).prop_map(|vec| vec.into_boxed_slice()))| {
let serialized = serialize(&slice).unwrap();
let deserialized = <Box<[DropCountedMaybeError]>>::deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(slice, deserialized);
}
});
}
#[test]
fn test_rc_slice_handles_partial_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(slice in proptest::collection::vec(any::<DropCountedMaybeError>(), 0..100).prop_map(Rc::from))| {
let serialized = serialize(&slice).unwrap();
let deserialized = <Rc<[DropCountedMaybeError]>>::deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(slice, deserialized);
}
});
}
#[test]
fn test_arc_slice_handles_partial_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(slice in proptest::collection::vec(any::<DropCountedMaybeError>(), 0..100).prop_map(Arc::from))| {
let serialized = serialize(&slice).unwrap();
let deserialized = <Arc<[DropCountedMaybeError]>>::deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(slice, deserialized);
}
});
}
#[test]
fn test_arc_handles_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(data in any::<DropCountedMaybeError>().prop_map(Rc::from))| {
let serialized = serialize(&data).unwrap();
let deserialized = deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(data, deserialized);
}
});
}
#[test]
fn test_rc_handles_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(data in any::<DropCountedMaybeError>().prop_map(Rc::from))| {
let serialized = serialize(&data).unwrap();
let deserialized = deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(data, deserialized);
}
});
}
#[test]
fn test_box_handles_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(data in any::<DropCountedMaybeError>().prop_map(Box::new))| {
let serialized = serialize(&data).unwrap();
let deserialized = deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(data, deserialized);
}
});
}
#[test]
fn test_array_handles_partial_drop() {
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(array in proptest::array::uniform32(any::<DropCountedMaybeError>()))| {
let serialized = serialize(&array).unwrap();
let deserialized = <[DropCountedMaybeError; 32]>::deserialize(&serialized);
if let Ok(deserialized) = deserialized {
prop_assert_eq!(array, deserialized);
}
});
}
#[test]
fn test_struct_extensions_builder_handles_partial_drop() {
#[derive(SchemaWrite, SchemaRead, Debug, proptest_derive::Arbitrary)]
#[wincode(internal, struct_extensions)]
struct Test {
a: DropCounted,
b: DropCounted,
c: DropCounted,
}
{
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(test: Test)| {
let serialized = serialize(&test).unwrap();
let mut test = MaybeUninit::<Test>::uninit();
let reader = &mut serialized.as_slice();
let mut builder = TestUninitBuilder::from_maybe_uninit_mut(&mut test);
builder.read_a(reader)?.read_b(reader)?;
prop_assert!(!builder.is_init());
// Struct is not fully initialized, so the two initialized fields should be dropped.
});
}
#[derive(SchemaWrite, SchemaRead, Debug, proptest_derive::Arbitrary)]
#[wincode(internal, struct_extensions)]
// Same test, but with a tuple struct.
struct TestTuple(DropCounted, DropCounted);
{
let _guard = TLDropGuard::new();
proptest!(proptest_cfg(), |(test: TestTuple)| {
let serialized = serialize(&test).unwrap();
let mut test = MaybeUninit::<TestTuple>::uninit();
let reader = &mut serialized.as_slice();
let mut builder = TestTupleUninitBuilder::from_maybe_uninit_mut(&mut test);
builder.read_0(reader)?;
prop_assert!(!builder.is_init());
// Struct is not fully initialized, so the first initialized field should be dropped.
});
}
}
#[test]
fn test_struct_extensions_nested_builder_handles_partial_drop() {
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | true |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/schema/impls.rs | wincode/src/schema/impls.rs | //! Blanket implementations for std types.
//!
//! Because the blanket implementations must be entirely general (e.g., we
//! need to support `Vec<T>` for any `T`), we can't make any assumptions about
//! the "Plain Old Data" nature of `T`, so all sequences will treat constituent
//! elements of `T` as opaque. Of course users can use `std::vec::Vec<Pod<T>>`,
//! which will certainly speed things up for POD elements of sequences, but
//! the optimization will only be _per_ element.
//!
//! Additionally, we have to assume [`BincodeLen`] for all sequences, because
//! there is no way to specify a different length encoding without one of the
//! [`containers`].
#[cfg(feature = "std")]
use std::{
collections::{HashMap, HashSet},
hash::Hash,
};
use {
crate::{
containers::SliceDropGuard,
error::{
invalid_bool_encoding, invalid_char_lead, invalid_tag_encoding, invalid_utf8_encoding,
pointer_sized_decode_error, read_length_encoding_overflow, unaligned_pointer_read,
ReadResult, WriteResult,
},
io::{Reader, Writer},
len::{BincodeLen, SeqLen},
schema::{size_of_elem_slice, write_elem_slice, SchemaRead, SchemaWrite, ZeroCopy},
TypeMeta,
},
core::{
marker::PhantomData,
mem::{self, transmute, MaybeUninit},
},
};
#[cfg(feature = "alloc")]
use {
crate::{
containers::{self},
error::WriteError,
schema::{size_of_elem_iter, write_elem_iter},
},
alloc::{
boxed::Box,
collections::{BTreeMap, BTreeSet, BinaryHeap, LinkedList, VecDeque},
rc::Rc,
string::String,
sync::Arc,
vec::Vec,
},
};
macro_rules! impl_int {
($type:ty, zero_copy: $zero_copy:expr) => {
impl SchemaWrite for $type {
type Src = $type;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: size_of::<$type>(),
#[cfg(target_endian = "little")]
zero_copy: true,
#[cfg(not(target_endian = "little"))]
zero_copy: $zero_copy,
};
#[inline(always)]
fn size_of(_src: &Self::Src) -> WriteResult<usize> {
Ok(size_of::<$type>())
}
#[inline(always)]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
Ok(writer.write(&src.to_le_bytes())?)
}
}
impl<'de> SchemaRead<'de> for $type {
type Dst = $type;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: size_of::<$type>(),
#[cfg(target_endian = "little")]
zero_copy: true,
#[cfg(not(target_endian = "little"))]
zero_copy: $zero_copy,
};
#[inline(always)]
fn read(
reader: &mut impl Reader<'de>,
dst: &mut MaybeUninit<Self::Dst>,
) -> ReadResult<()> {
// SAFETY: integer is plain ol' data.
let bytes = reader.fill_array::<{ size_of::<$type>() }>()?;
// bincode defaults to little endian encoding.
dst.write(<$type>::from_le_bytes(*bytes));
unsafe { reader.consume_unchecked(size_of::<$type>()) };
Ok(())
}
}
};
($type:ty as $cast:ty) => {
impl SchemaWrite for $type {
type Src = $type;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: size_of::<$cast>(),
zero_copy: false,
};
#[inline]
fn size_of(_src: &Self::Src) -> WriteResult<usize> {
Ok(size_of::<$cast>())
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
let src = *src as $cast;
// bincode defaults to little endian encoding.
// noop on LE machines.
Ok(writer.write(&src.to_le_bytes())?)
}
}
impl<'de> SchemaRead<'de> for $type {
type Dst = $type;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: size_of::<$cast>(),
zero_copy: false,
};
#[inline]
fn read(
reader: &mut impl Reader<'de>,
dst: &mut MaybeUninit<Self::Dst>,
) -> ReadResult<()> {
let casted = <$cast>::get(reader)?;
let val = casted
.try_into()
.map_err(|_| pointer_sized_decode_error())?;
dst.write(val);
Ok(())
}
}
};
}
// SAFETY:
// - u8 is a canonical zero-copy type: no endianness, no layout, no validation.
unsafe impl ZeroCopy for u8 {}
// SAFETY:
// - i8 is similarly a canonical zero-copy type: no endianness, no layout, no validation.
unsafe impl ZeroCopy for i8 {}
macro_rules! impl_numeric_zero_copy {
($($ty:ty),+ $(,)?) => {
$(
unsafe impl ZeroCopy for $ty {}
)+
};
}
// SAFETY: Primitive numeric types with fixed size. Only valid on little endian
// platforms because Bincode specifies little endian integer encoding.
#[cfg(target_endian = "little")]
impl_numeric_zero_copy!(u16, i16, u32, i32, u64, i64, u128, i128, f32, f64);
impl_int!(u8, zero_copy: true);
impl_int!(i8, zero_copy: true);
impl_int!(u16, zero_copy: false);
impl_int!(i16, zero_copy: false);
impl_int!(u32, zero_copy: false);
impl_int!(i32, zero_copy: false);
impl_int!(u64, zero_copy: false);
impl_int!(i64, zero_copy: false);
impl_int!(u128, zero_copy: false);
impl_int!(i128, zero_copy: false);
impl_int!(f32, zero_copy: false);
impl_int!(f64, zero_copy: false);
impl_int!(usize as u64);
impl_int!(isize as i64);
impl SchemaWrite for bool {
type Src = bool;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: size_of::<bool>(),
zero_copy: false,
};
#[inline]
fn size_of(_src: &Self::Src) -> WriteResult<usize> {
Ok(size_of::<u8>())
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
unsafe { Ok(writer.write_t(&(*src as u8))?) }
}
}
impl<'de> SchemaRead<'de> for bool {
type Dst = bool;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: size_of::<bool>(),
zero_copy: false,
};
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
// SAFETY: u8 is plain ol' data.
let byte = u8::get(reader)?;
match byte {
0 => {
dst.write(false);
}
1 => {
dst.write(true);
}
_ => return Err(invalid_bool_encoding(byte)),
}
Ok(())
}
}
impl SchemaWrite for char {
type Src = char;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
let mut buf = [0; 4];
let str = src.encode_utf8(&mut buf);
Ok(str.len())
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
let mut buf = [0; 4];
let str = src.encode_utf8(&mut buf);
writer.write(str.as_bytes())?;
Ok(())
}
}
impl<'de> SchemaRead<'de> for char {
type Dst = char;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let b0 = *reader.peek()?;
let len = match b0 {
0x00..=0x7F => 1,
0xC2..=0xDF => 2,
0xE0..=0xEF => 3,
0xF0..=0xF4 => 4,
_ => return Err(invalid_char_lead(b0)),
};
if len == 1 {
unsafe { reader.consume_unchecked(1) };
dst.write(b0 as char);
return Ok(());
}
let buf = reader.fill_exact(len)?;
// TODO: Could implement a manual decoder that avoids UTF-8 validate + chars()
// and instead performs the UTF-8 validity checks and produces a `char` directly.
// Some quick micro-benchmarking revealed a roughly 2x speedup is possible,
// but this is on the order of a 1-2ns/byte delta.
let str = core::str::from_utf8(buf).map_err(invalid_utf8_encoding)?;
let c = str.chars().next().unwrap();
unsafe { reader.consume_unchecked(len) };
dst.write(c);
Ok(())
}
}
impl<T> SchemaWrite for PhantomData<T> {
type Src = PhantomData<T>;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 0,
zero_copy: true,
};
#[inline]
fn size_of(_src: &Self::Src) -> WriteResult<usize> {
Ok(0)
}
#[inline]
fn write(_writer: &mut impl Writer, _src: &Self::Src) -> WriteResult<()> {
Ok(())
}
}
impl<'de, T> SchemaRead<'de> for PhantomData<T> {
type Dst = PhantomData<T>;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 0,
zero_copy: true,
};
#[inline]
fn read(_reader: &mut impl Reader<'de>, _dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
Ok(())
}
}
impl SchemaWrite for () {
type Src = ();
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 0,
zero_copy: true,
};
#[inline]
fn size_of(_src: &Self::Src) -> WriteResult<usize> {
Ok(0)
}
#[inline]
fn write(_writer: &mut impl Writer, _src: &Self::Src) -> WriteResult<()> {
Ok(())
}
}
impl<'de> SchemaRead<'de> for () {
type Dst = ();
const TYPE_META: TypeMeta = TypeMeta::Static {
size: 0,
zero_copy: true,
};
#[inline]
fn read(_reader: &mut impl Reader<'de>, _dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
Ok(())
}
}
#[cfg(feature = "alloc")]
impl<T> SchemaWrite for Vec<T>
where
T: SchemaWrite,
T::Src: Sized,
{
type Src = Vec<T::Src>;
#[inline]
fn size_of(value: &Self::Src) -> WriteResult<usize> {
<containers::Vec<T, BincodeLen>>::size_of(value)
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
<containers::Vec<T, BincodeLen>>::write(writer, value)
}
}
#[cfg(feature = "alloc")]
impl<'de, T> SchemaRead<'de> for Vec<T>
where
T: SchemaRead<'de>,
{
type Dst = Vec<T::Dst>;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
<containers::Vec<T, BincodeLen>>::read(reader, dst)
}
}
#[cfg(feature = "alloc")]
impl<T> SchemaWrite for VecDeque<T>
where
T: SchemaWrite,
T::Src: Sized,
{
type Src = VecDeque<T::Src>;
#[inline]
fn size_of(value: &Self::Src) -> WriteResult<usize> {
<containers::VecDeque<T, BincodeLen>>::size_of(value)
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
<containers::VecDeque<T, BincodeLen>>::write(writer, value)
}
}
#[cfg(feature = "alloc")]
impl<'de, T> SchemaRead<'de> for VecDeque<T>
where
T: SchemaRead<'de>,
{
type Dst = VecDeque<T::Dst>;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
<containers::VecDeque<T, BincodeLen>>::read(reader, dst)
}
}
impl<T> SchemaWrite for [T]
where
T: SchemaWrite,
T::Src: Sized,
{
type Src = [T::Src];
#[inline]
fn size_of(value: &Self::Src) -> WriteResult<usize> {
size_of_elem_slice::<T, BincodeLen>(value)
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
write_elem_slice::<T, BincodeLen>(writer, value)
}
}
// SAFETY:
// - [T; N] where T: ZeroCopy is trivially zero-copy. The length is constant,
// so there is no length encoding.
unsafe impl<const N: usize, T> ZeroCopy for [T; N] where T: ZeroCopy {}
impl<'de, T, const N: usize> SchemaRead<'de> for [T; N]
where
T: SchemaRead<'de>,
{
type Dst = [T::Dst; N];
const TYPE_META: TypeMeta = const {
match T::TYPE_META {
TypeMeta::Static { size, zero_copy } => TypeMeta::Static {
size: N * size,
zero_copy,
},
TypeMeta::Dynamic => TypeMeta::Dynamic,
}
};
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
if let TypeMeta::Static {
zero_copy: true, ..
} = T::TYPE_META
{
// SAFETY: `T::Dst` is zero-copy eligible (no invalid bit patterns, no layout requirements, no endianness checks, etc.).
unsafe { reader.copy_into_t(dst)? };
return Ok(());
}
// SAFETY: MaybeUninit<[T::Dst; N]> trivially converts to [MaybeUninit<T::Dst>; N].
let dst =
unsafe { transmute::<&mut MaybeUninit<Self::Dst>, &mut [MaybeUninit<T::Dst>; N]>(dst) };
let base = dst.as_mut_ptr();
let mut guard = SliceDropGuard::<T::Dst>::new(base);
if let TypeMeta::Static { size, .. } = Self::TYPE_META {
// SAFETY: `Self::TYPE_META` specifies a static size, which is `N * static_size_of(T)`.
// `N` reads of `T` will consume `size` bytes, fully consuming the trusted window.
let reader = &mut unsafe { reader.as_trusted_for(size) }?;
for i in 0..N {
let slot = unsafe { &mut *base.add(i) };
T::read(reader, slot)?;
guard.inc_len();
}
} else {
for i in 0..N {
let slot = unsafe { &mut *base.add(i) };
T::read(reader, slot)?;
guard.inc_len();
}
}
mem::forget(guard);
Ok(())
}
}
impl<T, const N: usize> SchemaWrite for [T; N]
where
T: SchemaWrite,
T::Src: Sized,
{
type Src = [T::Src; N];
const TYPE_META: TypeMeta = const {
match T::TYPE_META {
TypeMeta::Static { size, zero_copy } => TypeMeta::Static {
size: N * size,
zero_copy,
},
TypeMeta::Dynamic => TypeMeta::Dynamic,
}
};
#[inline]
#[allow(clippy::arithmetic_side_effects)]
fn size_of(value: &Self::Src) -> WriteResult<usize> {
if let TypeMeta::Static { size, .. } = Self::TYPE_META {
return Ok(size);
}
// Extremely unlikely a type-in-memory's size will overflow usize::MAX.
value
.iter()
.map(T::size_of)
.try_fold(0usize, |acc, x| x.map(|x| acc + x))
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
match Self::TYPE_META {
TypeMeta::Static {
zero_copy: true, ..
} => {
// SAFETY: `T::Src` is zero-copy eligible (no invalid bit patterns, no layout requirements, no endianness checks, etc.).
unsafe { writer.write_slice_t(value)? };
}
TypeMeta::Static {
size,
zero_copy: false,
} => {
// SAFETY: `Self::TYPE_META` specifies a static size, which is `N * static_size_of(T)`.
// `N` writes of `T` will write `size` bytes, fully initializing the trusted window.
let writer = &mut unsafe { writer.as_trusted_for(size) }?;
for item in value {
T::write(writer, item)?;
}
writer.finish()?;
}
TypeMeta::Dynamic => {
for item in value {
T::write(writer, item)?;
}
}
}
Ok(())
}
}
impl<'de, T> SchemaRead<'de> for Option<T>
where
T: SchemaRead<'de>,
{
type Dst = Option<T::Dst>;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let variant = u8::get(reader)?;
match variant {
0 => dst.write(Option::None),
1 => dst.write(Option::Some(T::get(reader)?)),
_ => return Err(invalid_tag_encoding(variant as usize)),
};
Ok(())
}
}
impl<T> SchemaWrite for Option<T>
where
T: SchemaWrite,
T::Src: Sized,
{
type Src = Option<T::Src>;
#[inline]
#[allow(clippy::arithmetic_side_effects)]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
match src {
// Extremely unlikely a type-in-memory's size will overflow usize::MAX.
Option::Some(value) => Ok(1 + T::size_of(value)?),
Option::None => Ok(1),
}
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
match value {
Option::Some(value) => {
u8::write(writer, &1)?;
T::write(writer, value)
}
Option::None => u8::write(writer, &0),
}
}
}
impl<'de, T, E> SchemaRead<'de> for Result<T, E>
where
T: SchemaRead<'de>,
E: SchemaRead<'de>,
{
type Dst = Result<T::Dst, E::Dst>;
const TYPE_META: TypeMeta = match (T::TYPE_META, E::TYPE_META) {
(TypeMeta::Static { size: t_size, .. }, TypeMeta::Static { size: e_size, .. })
if t_size == e_size =>
{
TypeMeta::Static {
size: size_of::<u32>() + t_size,
zero_copy: false,
}
}
_ => TypeMeta::Dynamic,
};
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let variant = u32::get(reader)?;
match variant {
0 => dst.write(Result::Ok(T::get(reader)?)),
1 => dst.write(Result::Err(E::get(reader)?)),
_ => return Err(invalid_tag_encoding(variant as usize)),
};
Ok(())
}
}
impl<T, E> SchemaWrite for Result<T, E>
where
T: SchemaWrite,
E: SchemaWrite,
T::Src: Sized,
E::Src: Sized,
{
type Src = Result<T::Src, E::Src>;
const TYPE_META: TypeMeta = match (T::TYPE_META, E::TYPE_META) {
(TypeMeta::Static { size: t_size, .. }, TypeMeta::Static { size: e_size, .. })
if t_size == e_size =>
{
TypeMeta::Static {
size: size_of::<u32>() + t_size,
zero_copy: false,
}
}
_ => TypeMeta::Dynamic,
};
#[inline]
#[allow(clippy::arithmetic_side_effects)]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
match src {
// Extremely unlikely a type-in-memory's size will overflow usize::MAX.
Result::Ok(value) => Ok(size_of::<u32>() + T::size_of(value)?),
Result::Err(error) => Ok(size_of::<u32>() + E::size_of(error)?),
}
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
match value {
Result::Ok(value) => {
u32::write(writer, &0)?;
T::write(writer, value)
}
Result::Err(error) => {
u32::write(writer, &1)?;
E::write(writer, error)
}
}
}
}
impl<'a, T> SchemaWrite for &'a T
where
T: SchemaWrite,
T: ?Sized,
{
type Src = &'a T::Src;
const TYPE_META: TypeMeta = T::TYPE_META;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
T::size_of(*src)
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
T::write(writer, *value)
}
}
macro_rules! impl_heap_container {
($container:ident) => {
#[cfg(feature = "alloc")]
impl<T> SchemaWrite for $container<T>
where
T: SchemaWrite,
{
type Src = $container<T::Src>;
const TYPE_META: TypeMeta = const {
match T::TYPE_META {
TypeMeta::Static { size, .. } => TypeMeta::Static {
size,
zero_copy: false,
},
TypeMeta::Dynamic => TypeMeta::Dynamic,
}
};
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
T::size_of(src)
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
T::write(writer, value)
}
}
#[cfg(feature = "alloc")]
impl<'de, T> SchemaRead<'de> for $container<T>
where
T: SchemaRead<'de>,
{
type Dst = $container<T::Dst>;
const TYPE_META: TypeMeta = const {
match T::TYPE_META {
TypeMeta::Static { size, .. } => TypeMeta::Static {
size,
zero_copy: false,
},
TypeMeta::Dynamic => TypeMeta::Dynamic,
}
};
#[inline]
fn read(
reader: &mut impl Reader<'de>,
dst: &mut MaybeUninit<Self::Dst>,
) -> ReadResult<()> {
struct DropGuard<T>(*mut MaybeUninit<T>);
impl<T> Drop for DropGuard<T> {
#[inline]
fn drop(&mut self) {
drop(unsafe { $container::from_raw(self.0) });
}
}
let mem = $container::<T::Dst>::new_uninit();
let ptr = $container::into_raw(mem) as *mut _;
let guard: DropGuard<T::Dst> = DropGuard(ptr);
T::read(reader, unsafe { &mut *ptr })?;
mem::forget(guard);
unsafe {
// SAFETY: `T::read` must properly initialize the `T::Dst`.
dst.write($container::from_raw(ptr).assume_init());
}
Ok(())
}
}
};
}
impl_heap_container!(Box);
impl_heap_container!(Rc);
impl_heap_container!(Arc);
#[cfg(feature = "alloc")]
impl<T> SchemaWrite for Box<[T]>
where
T: SchemaWrite,
T::Src: Sized,
{
type Src = Box<[T::Src]>;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
<containers::Box<[T], BincodeLen>>::size_of(src)
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
<containers::Box<[T], BincodeLen>>::write(writer, value)
}
}
#[cfg(feature = "alloc")]
impl<T> SchemaWrite for Rc<[T]>
where
T: SchemaWrite,
T::Src: Sized,
{
type Src = Rc<[T::Src]>;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
<containers::Rc<[T], BincodeLen>>::size_of(src)
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
<containers::Rc<[T], BincodeLen>>::write(writer, value)
}
}
#[cfg(feature = "alloc")]
impl<T> SchemaWrite for Arc<[T]>
where
T: SchemaWrite,
T::Src: Sized,
{
type Src = Arc<[T::Src]>;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
<containers::Arc<[T], BincodeLen>>::size_of(src)
}
#[inline]
fn write(writer: &mut impl Writer, value: &Self::Src) -> WriteResult<()> {
<containers::Arc<[T], BincodeLen>>::write(writer, value)
}
}
#[cfg(feature = "alloc")]
impl<'de, T> SchemaRead<'de> for Box<[T]>
where
T: SchemaRead<'de>,
{
type Dst = Box<[T::Dst]>;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
<containers::Box<[T], BincodeLen>>::read(reader, dst)
}
}
#[cfg(feature = "alloc")]
impl<'de, T> SchemaRead<'de> for Rc<[T]>
where
T: SchemaRead<'de>,
{
type Dst = Rc<[T::Dst]>;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
<containers::Rc<[T], BincodeLen>>::read(reader, dst)
}
}
#[cfg(feature = "alloc")]
impl<'de, T> SchemaRead<'de> for Arc<[T]>
where
T: SchemaRead<'de>,
{
type Dst = Arc<[T::Dst]>;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
<containers::Arc<[T], BincodeLen>>::read(reader, dst)
}
}
impl SchemaWrite for str {
type Src = str;
#[inline]
#[allow(clippy::arithmetic_side_effects)]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
// Extremely unlikely a type-in-memory's size will overflow usize::MAX.
Ok(<BincodeLen>::write_bytes_needed(src.len())? + src.len())
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
<BincodeLen>::write(writer, src.len())?;
writer.write(src.as_bytes())?;
Ok(())
}
}
#[cfg(feature = "alloc")]
impl SchemaWrite for String {
type Src = String;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
<str>::size_of(src)
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
<str>::write(writer, src)
}
}
impl<'de> SchemaRead<'de> for &'de str {
type Dst = &'de str;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let len = <BincodeLen>::read::<u8>(reader)?;
let bytes = reader.borrow_exact(len)?;
match core::str::from_utf8(bytes) {
Ok(s) => {
dst.write(s);
Ok(())
}
Err(e) => Err(invalid_utf8_encoding(e)),
}
}
}
#[cfg(feature = "alloc")]
impl<'de> SchemaRead<'de> for String {
type Dst = String;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let len = <BincodeLen>::read::<u8>(reader)?;
let bytes = reader.fill_exact(len)?.to_vec();
unsafe { reader.consume_unchecked(len) };
match String::from_utf8(bytes) {
Ok(s) => {
dst.write(s);
Ok(())
}
Err(e) => Err(invalid_utf8_encoding(e.utf8_error())),
}
}
}
/// Implement `SchemaWrite` and `SchemaRead` for types that may be iterated over sequentially.
///
/// Generally this should only be used on types for which we cannot provide an optimized implementation,
/// and where the most optimal implementation is simply iterating over the type to write or collecting
/// to read -- typically non-contiguous sequences like `HashMap` or `BTreeMap` (or their set variants).
macro_rules! impl_seq {
($feature: literal, $target: ident<$key: ident : $($constraint:path)|*, $value: ident>, $with_capacity: expr) => {
#[cfg(feature = $feature)]
impl<$key, $value> SchemaWrite for $target<$key, $value>
where
$key: SchemaWrite,
$key::Src: Sized,
$value: SchemaWrite,
$value::Src: Sized,
{
type Src = $target<$key::Src, $value::Src>;
#[inline]
#[allow(clippy::arithmetic_side_effects)]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
if let (TypeMeta::Static { size: key_size, .. }, TypeMeta::Static { size: value_size, .. }) = ($key::TYPE_META, $value::TYPE_META) {
return Ok(<BincodeLen>::write_bytes_needed(src.len())? + (key_size + value_size) * src.len());
}
Ok(<BincodeLen>::write_bytes_needed(src.len())? +
src
.iter()
.try_fold(
0usize,
|acc, (k, v)|
Ok::<_, WriteError>(
acc
+ $key::size_of(k)?
+ $value::size_of(v)?
)
)?
)
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
if let (TypeMeta::Static { size: key_size, .. }, TypeMeta::Static { size: value_size, .. }) = ($key::TYPE_META, $value::TYPE_META) {
let len = src.len();
#[allow(clippy::arithmetic_side_effects)]
let needed = <BincodeLen>::write_bytes_needed(len)? + (key_size + value_size) * len;
// SAFETY: `$key::TYPE_META` and `$value::TYPE_META` specify static sizes, so `len` writes of `($key::Src, $value::Src)`
// and `<BincodeLen>::write` will write `needed` bytes, fully initializing the trusted window.
let writer = &mut unsafe { writer.as_trusted_for(needed) }?;
<BincodeLen>::write(writer, len)?;
for (k, v) in src.iter() {
$key::write(writer, k)?;
$value::write(writer, v)?;
}
writer.finish()?;
return Ok(());
}
<BincodeLen>::write(writer, src.len())?;
for (k, v) in src.iter() {
$key::write(writer, k)?;
$value::write(writer, v)?;
}
Ok(())
}
}
#[cfg(feature = $feature)]
impl<'de, $key, $value> SchemaRead<'de> for $target<$key, $value>
where
$key: SchemaRead<'de>,
$value: SchemaRead<'de>
$(,$key::Dst: $constraint+)*,
{
type Dst = $target<$key::Dst, $value::Dst>;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let len = <BincodeLen>::read::<($key::Dst, $value::Dst)>(reader)?;
let map = if let (TypeMeta::Static { size: key_size, .. }, TypeMeta::Static { size: value_size, .. }) = ($key::TYPE_META, $value::TYPE_META) {
#[allow(clippy::arithmetic_side_effects)]
// SAFETY: `$key::TYPE_META` and `$value::TYPE_META` specify static sizes, so `len` reads of `($key::Dst, $value::Dst)`
// will consume `(key_size + value_size) * len` bytes, fully consuming the trusted window.
let reader = &mut unsafe { reader.as_trusted_for((key_size + value_size) * len) }?;
let mut map = $with_capacity(len);
for _ in 0..len {
let k = $key::get(reader)?;
let v = $value::get(reader)?;
map.insert(k, v);
}
map
} else {
let mut map = $with_capacity(len);
for _ in 0..len {
let k = $key::get(reader)?;
let v = $value::get(reader)?;
map.insert(k, v);
}
map
};
dst.write(map);
Ok(())
}
}
};
($feature: literal, $target: ident <$key: ident : $($constraint:path)|*>, $with_capacity: expr, $insert: ident) => {
#[cfg(feature = $feature)]
impl<$key: SchemaWrite> SchemaWrite for $target<$key>
where
$key::Src: Sized,
{
type Src = $target<$key::Src>;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
size_of_elem_iter::<$key, BincodeLen>(src.iter())
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
write_elem_iter::<$key, BincodeLen>(writer, src.iter())
}
}
#[cfg(feature = $feature)]
impl<'de, $key> SchemaRead<'de> for $target<$key>
where
$key: SchemaRead<'de>
$(,$key::Dst: $constraint+)*,
{
type Dst = $target<$key::Dst>;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let len = <BincodeLen>::read::<$key::Dst>(reader)?;
let map = match $key::TYPE_META {
TypeMeta::Static { size, .. } => {
#[allow(clippy::arithmetic_side_effects)]
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | true |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/src/schema/containers.rs | wincode/src/schema/containers.rs | //! This module provides specialized implementations of standard library collection types that
//! provide control over the length encoding (see [`SeqLen`](crate::len::SeqLen)), as well
//! as special case opt-in raw-copy overrides (see [`Pod`]).
//!
//! # Examples
//! Raw byte vec with solana short vec length encoding:
//!
//! ```
//! # #[cfg(all(feature = "solana-short-vec", feature = "alloc"))] {
//! # use wincode::{containers::self, len::ShortU16Len};
//! # use wincode_derive::SchemaWrite;
//! # use serde::Serialize;
//! # use solana_short_vec;
//! #[derive(Serialize, SchemaWrite)]
//! struct MyStruct {
//! #[serde(with = "solana_short_vec")]
//! #[wincode(with = "containers::Vec<_, ShortU16Len>")]
//! vec: Vec<u8>,
//! }
//!
//! let my_struct = MyStruct {
//! vec: vec![1, 2, 3],
//! };
//! let wincode_bytes = wincode::serialize(&my_struct).unwrap();
//! let bincode_bytes = bincode::serialize(&my_struct).unwrap();
//! assert_eq!(wincode_bytes, bincode_bytes);
//! # }
//! ```
//!
//! Vector with struct elements and custom length encoding:
//!
//! ```
//! # #[cfg(all(feature = "solana-short-vec", feature = "alloc", feature = "derive"))] {
//! # use wincode_derive::SchemaWrite;
//! # use wincode::{containers::self, len::ShortU16Len};
//! # use serde::Serialize;
//! # use solana_short_vec;
//! #[derive(Serialize, SchemaWrite)]
//! struct Point {
//! x: u64,
//! y: u64,
//! }
//!
//! #[derive(Serialize, SchemaWrite)]
//! struct MyStruct {
//! #[serde(with = "solana_short_vec")]
//! #[wincode(with = "containers::Vec<Point, ShortU16Len>")]
//! vec: Vec<Point>,
//! }
//!
//! let my_struct = MyStruct {
//! vec: vec![Point { x: 1, y: 2 }, Point { x: 3, y: 4 }],
//! };
//! let wincode_bytes = wincode::serialize(&my_struct).unwrap();
//! let bincode_bytes = bincode::serialize(&my_struct).unwrap();
//! assert_eq!(wincode_bytes, bincode_bytes);
//! # }
//! ```
use {
crate::{
error::{ReadResult, WriteResult},
io::{Reader, Writer},
schema::{SchemaRead, SchemaWrite},
TypeMeta, ZeroCopy,
},
core::{marker::PhantomData, mem::MaybeUninit, ptr},
};
#[cfg(feature = "alloc")]
use {
crate::{
len::{BincodeLen, SeqLen},
schema::{size_of_elem_iter, size_of_elem_slice, write_elem_iter, write_elem_slice},
},
alloc::{boxed::Box as AllocBox, collections, rc::Rc as AllocRc, sync::Arc as AllocArc, vec},
core::mem::{self, ManuallyDrop},
};
/// A [`Vec`](std::vec::Vec) with a customizable length encoding.
#[cfg(feature = "alloc")]
pub struct Vec<T, Len = BincodeLen>(PhantomData<Len>, PhantomData<T>);
/// A [`VecDeque`](std::collections::VecDeque) with a customizable length encoding.
#[cfg(feature = "alloc")]
pub struct VecDeque<T, Len = BincodeLen>(PhantomData<Len>, PhantomData<T>);
/// A [`Box<[T]>`](std::boxed::Box) with a customizable length encoding.
///
/// # Examples
///
/// ```
/// # #[cfg(all(feature = "alloc", feature = "derive", feature = "solana-short-vec"))] {
/// # use wincode::{containers, len::ShortU16Len};
/// # use wincode_derive::{SchemaWrite, SchemaRead};
/// # use serde::{Serialize, Deserialize};
/// # use std::array;
/// #[derive(Serialize, SchemaWrite, Clone, Copy)]
/// #[repr(transparent)]
/// struct Address([u8; 32]);
///
/// #[derive(Serialize, SchemaWrite)]
/// struct MyStruct {
/// #[serde(with = "solana_short_vec")]
/// #[wincode(with = "containers::Box<[Address], ShortU16Len>")]
/// address: Box<[Address]>
/// }
///
/// let my_struct = MyStruct {
/// address: vec![Address(array::from_fn(|i| i as u8)); 10].into_boxed_slice(),
/// };
/// let wincode_bytes = wincode::serialize(&my_struct).unwrap();
/// let bincode_bytes = bincode::serialize(&my_struct).unwrap();
/// assert_eq!(wincode_bytes, bincode_bytes);
/// # }
/// ```
#[cfg(feature = "alloc")]
pub struct Box<T: ?Sized, Len = BincodeLen>(PhantomData<T>, PhantomData<Len>);
#[cfg(feature = "alloc")]
/// Like [`Box`], for [`Rc`].
pub struct Rc<T: ?Sized, Len = BincodeLen>(PhantomData<T>, PhantomData<Len>);
#[cfg(feature = "alloc")]
/// Like [`Box`], for [`Arc`].
pub struct Arc<T: ?Sized, Len = BincodeLen>(PhantomData<T>, PhantomData<Len>);
/// Indicates that the type is an element of a sequence, composable with [`containers`](self).
///
/// Prefer [`Pod`] for types representable as raw bytes.
#[deprecated(
since = "0.2.0",
note = "Elem is no longer needed for container usage. Use `T` directly instead."
)]
pub struct Elem<T>(PhantomData<T>);
/// Indicates that the type is represented by raw bytes and does not have any invalid bit patterns.
///
/// By opting into `Pod`, you are telling wincode that it can serialize and deserialize a type
/// with a single memcpy -- it wont pay attention to things like struct layout, endianness, or anything
/// else that would require validity or bit pattern checks. This is a very strong claim to make,
/// so be sure that your type adheres to those requirements.
///
/// Composable with sequence [`containers`](self) or compound types (structs, tuples) for
/// an optimized read/write implementation.
///
///
/// This can be useful outside of sequences as well, for example on newtype structs
/// containing byte arrays with `#[repr(transparent)]`.
///
/// ---
/// 💡 **Note:** as of `wincode` `0.2.0`, `Pod` is no longer needed for types that wincode can determine
/// are "Pod-safe".
///
/// This includes:
/// - [`u8`]
/// - [`[u8; N]`](prim@array)
/// - structs comprised of the above, and;
/// - annotated with `#[derive(SchemaWrite)]` or `#[derive(SchemaRead)]`, and;
/// - annotated with `#[repr(transparent)]` or `#[repr(C)]`.
///
/// Similarly, using built-in std collections like `Vec<T>` or `Box<[T]>` where `T` is one of the
/// above will also be automatically optimized.
///
/// You'll really only need to reach for [`Pod`] when dealing with foreign types for which you cannot
/// derive `SchemaWrite` or `SchemaRead`. Or you're in a controlled scenario where you explicitly
/// want to avoid endianness or layout checks.
///
/// # Safety
///
/// - The type must allow any bit pattern (e.g., no `bool`s, no `char`s, etc.)
/// - If used on a compound type like a struct, all fields must be also be `Pod`, its
/// layout must be guaranteed (via `#[repr(transparent)]` or `#[repr(C)]`), and the struct
/// must not have any padding.
/// - Must not contain references or pointers (includes types like `Vec` or `Box`).
/// - Note, you may use `Pod` *inside* types like `Vec` or `Box`, e.g., `Vec<Pod<T>>` or `Box<[Pod<T>]>`,
/// but specifying `Pod` on the outer type is invalid.
///
/// # Examples
///
/// A repr-transparent newtype struct containing a byte array where you cannot derive `SchemaWrite` or `SchemaRead`:
/// ```
/// # #[cfg(all(feature = "alloc", feature = "derive"))] {
/// # use wincode::{containers::{self, Pod}};
/// # use wincode_derive::{SchemaWrite, SchemaRead};
/// # use serde::{Serialize, Deserialize};
/// # use std::array;
/// #[derive(Serialize, Deserialize, Clone, Copy)]
/// #[repr(transparent)]
/// struct Address([u8; 32]);
///
/// #[derive(Serialize, Deserialize, SchemaWrite, SchemaRead)]
/// struct MyStruct {
/// #[wincode(with = "Pod<_>")]
/// address: Address
/// }
///
/// let my_struct = MyStruct {
/// address: Address(array::from_fn(|i| i as u8)),
/// };
/// let wincode_bytes = wincode::serialize(&my_struct).unwrap();
/// let bincode_bytes = bincode::serialize(&my_struct).unwrap();
/// assert_eq!(wincode_bytes, bincode_bytes);
/// # }
/// ```
pub struct Pod<T: Copy + 'static>(PhantomData<T>);
// SAFETY:
// - By using `Pod`, user asserts that the type is zero-copy, given the contract of Pod:
// - The type's in‑memory representation is exactly its serialized bytes.
// - It can be safely initialized by memcpy (no validation, no endianness/layout work).
// - Does not contain references or pointers.
unsafe impl<T> ZeroCopy for Pod<T> where T: Copy + 'static {}
impl<T> SchemaWrite for Pod<T>
where
T: Copy + 'static,
{
type Src = T;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: size_of::<T>(),
zero_copy: true,
};
#[inline]
fn size_of(_src: &Self::Src) -> WriteResult<usize> {
Ok(size_of::<T>())
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
// SAFETY: `T` is plain ol' data.
unsafe { Ok(writer.write_t(src)?) }
}
}
impl<'de, T> SchemaRead<'de> for Pod<T>
where
T: Copy + 'static,
{
type Dst = T;
const TYPE_META: TypeMeta = TypeMeta::Static {
size: size_of::<T>(),
zero_copy: true,
};
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
// SAFETY: `T` is plain ol' data.
unsafe { Ok(reader.copy_into_t(dst)?) }
}
}
// Provide `SchemaWrite` implementation for `Elem<T>` for backwards compatibility.
//
// Container impls use blanket implementations over `T` where `T` is `SchemaWrite`,
// so this preserves existing behavior, such that `Elem<T>` behaves exactly like `T`.
#[allow(deprecated)]
impl<T> SchemaWrite for Elem<T>
where
T: SchemaWrite,
{
type Src = T::Src;
const TYPE_META: TypeMeta = T::TYPE_META;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
T::size_of(src)
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
T::write(writer, src)
}
}
// Provide `SchemaRead` implementation for `Elem<T>` for backwards compatibility.
//
// Container impls use blanket implementations over `T` where `T` is `SchemaRead`,
// so this preserves existing behavior, such that `Elem<T>` behaves exactly like `T`.
#[allow(deprecated)]
impl<'de, T> SchemaRead<'de> for Elem<T>
where
T: SchemaRead<'de>,
{
type Dst = T::Dst;
const TYPE_META: TypeMeta = T::TYPE_META;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
T::read(reader, dst)
}
}
#[cfg(feature = "alloc")]
impl<T, Len> SchemaWrite for Vec<T, Len>
where
Len: SeqLen,
T: SchemaWrite,
T::Src: Sized,
{
type Src = vec::Vec<T::Src>;
#[inline(always)]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
size_of_elem_slice::<T, Len>(src)
}
#[inline(always)]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
write_elem_slice::<T, Len>(writer, src)
}
}
#[cfg(feature = "alloc")]
impl<'de, T, Len> SchemaRead<'de> for Vec<T, Len>
where
Len: SeqLen,
T: SchemaRead<'de>,
{
type Dst = vec::Vec<T::Dst>;
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let len = Len::read::<T::Dst>(reader)?;
let mut vec: vec::Vec<T::Dst> = vec::Vec::with_capacity(len);
match T::TYPE_META {
TypeMeta::Static {
zero_copy: true, ..
} => {
let spare_capacity = vec.spare_capacity_mut();
// SAFETY: T::Dst is zero-copy eligible (no invalid bit patterns, no layout requirements, no endianness checks, etc.).
unsafe { reader.copy_into_slice_t(spare_capacity)? };
// SAFETY: `copy_into_slice_t` fills the entire spare capacity or errors.
unsafe { vec.set_len(len) };
}
TypeMeta::Static {
size,
zero_copy: false,
} => {
let mut ptr = vec.as_mut_ptr().cast::<MaybeUninit<T::Dst>>();
#[allow(clippy::arithmetic_side_effects)]
// SAFETY: `T::TYPE_META` specifies a static size, so `len` reads of `T::Dst`
// will consume `size * len` bytes, fully consuming the trusted window.
let mut reader = unsafe { reader.as_trusted_for(size * len) }?;
for i in 0..len {
T::read(&mut reader, unsafe { &mut *ptr })?;
unsafe {
ptr = ptr.add(1);
#[allow(clippy::arithmetic_side_effects)]
// i <= len
vec.set_len(i + 1);
}
}
}
TypeMeta::Dynamic => {
let mut ptr = vec.as_mut_ptr().cast::<MaybeUninit<T::Dst>>();
for i in 0..len {
T::read(reader, unsafe { &mut *ptr })?;
unsafe {
ptr = ptr.add(1);
#[allow(clippy::arithmetic_side_effects)]
// i <= len
vec.set_len(i + 1);
}
}
}
}
dst.write(vec);
Ok(())
}
}
pub(crate) struct SliceDropGuard<T> {
ptr: *mut MaybeUninit<T>,
initialized_len: usize,
}
impl<T> SliceDropGuard<T> {
pub(crate) fn new(ptr: *mut MaybeUninit<T>) -> Self {
Self {
ptr,
initialized_len: 0,
}
}
#[inline(always)]
#[allow(clippy::arithmetic_side_effects)]
pub(crate) fn inc_len(&mut self) {
self.initialized_len += 1;
}
}
impl<T> Drop for SliceDropGuard<T> {
#[inline(always)]
fn drop(&mut self) {
unsafe {
ptr::drop_in_place(ptr::slice_from_raw_parts_mut(
self.ptr.cast::<T>(),
self.initialized_len,
));
}
}
}
macro_rules! impl_heap_slice {
($container:ident => $target:ident) => {
#[cfg(feature = "alloc")]
impl<T, Len> SchemaWrite for $container<[T], Len>
where
Len: SeqLen,
T: SchemaWrite,
T::Src: Sized,
{
type Src = $target<[T::Src]>;
#[inline(always)]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
size_of_elem_slice::<T, Len>(src)
}
#[inline(always)]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
write_elem_slice::<T, Len>(writer, src)
}
}
#[cfg(feature = "alloc")]
impl<'de, T, Len> SchemaRead<'de> for $container<[T], Len>
where
Len: SeqLen,
T: SchemaRead<'de>,
{
type Dst = $target<[T::Dst]>;
#[inline(always)]
fn read(
reader: &mut impl Reader<'de>,
dst: &mut MaybeUninit<Self::Dst>,
) -> ReadResult<()> {
/// Drop guard for `TypeMeta::Static { zero_copy: true }` types.
///
/// In this case we do not need to drop items individually, as
/// the container will be initialized by a single memcpy.
struct DropGuardRawCopy<T>(*mut [MaybeUninit<T>]);
impl<T> Drop for DropGuardRawCopy<T> {
#[inline]
fn drop(&mut self) {
// SAFETY:
// - `self.0` is a valid pointer to the container created
// by `$target::into_raw`.
// - `drop` is only called in this drop guard, and the drop guard
// is forgotten if reading succeeds.
let container = unsafe { $target::from_raw(self.0) };
drop(container);
}
}
/// Drop guard for `TypeMeta::Static { zero_copy: false } | TypeMeta::Dynamic` types.
///
/// In this case we need to drop items individually, as
/// the container will be initialized by a series of reads.
struct DropGuardElemCopy<T> {
inner: ManuallyDrop<SliceDropGuard<T>>,
fat: *mut [MaybeUninit<T>],
}
impl<T> DropGuardElemCopy<T> {
#[inline(always)]
fn new(fat: *mut [MaybeUninit<T>], raw: *mut MaybeUninit<T>) -> Self {
Self {
inner: ManuallyDrop::new(SliceDropGuard::new(raw)),
// We need to store the fat pointer to deallocate the container.
fat,
}
}
}
impl<T> Drop for DropGuardElemCopy<T> {
#[inline]
fn drop(&mut self) {
// SAFETY: `ManuallyDrop::drop` is only called in this drop guard.
unsafe {
// Drop the initialized elements first.
ManuallyDrop::drop(&mut self.inner);
}
// SAFETY:
// - `self.fat` is a valid pointer to the container created with `$target::into_raw`.
// - `drop` is only called in this drop guard, and the drop guard is forgotten if read succeeds.
let container = unsafe { $target::from_raw(self.fat) };
drop(container);
}
}
let len = Len::read::<T::Dst>(reader)?;
let mem = $target::<[T::Dst]>::new_uninit_slice(len);
let fat = $target::into_raw(mem) as *mut [MaybeUninit<T::Dst>];
match T::TYPE_META {
TypeMeta::Static {
zero_copy: true, ..
} => {
let guard = DropGuardRawCopy(fat);
// SAFETY: `fat` is a valid pointer to the container created with `$target::into_raw`.
let dst = unsafe { &mut *fat };
// SAFETY: T is zero-copy eligible (no invalid bit patterns, no layout requirements, no endianness checks, etc.).
unsafe { reader.copy_into_slice_t(dst)? };
mem::forget(guard);
}
TypeMeta::Static {
size,
zero_copy: false,
} => {
// SAFETY: `fat` is a valid pointer to the container created with `$target::into_raw`.
let raw_base = unsafe { (*fat).as_mut_ptr() };
let mut guard: DropGuardElemCopy<T::Dst> =
DropGuardElemCopy::new(fat, raw_base);
// SAFETY: `T::TYPE_META` specifies a static size, so `len` reads of `T::Dst`
// will consume `size * len` bytes, fully consuming the trusted window.
#[allow(clippy::arithmetic_side_effects)]
let reader = &mut unsafe { reader.as_trusted_for(size * len) }?;
for i in 0..len {
// SAFETY:
// - `raw_base` is a valid pointer to the container created with `$target::into_raw`.
// - The container is initialized with capacity for `len` elements, and `i` is guaranteed to be
// less than `len`.
let slot = unsafe { &mut *raw_base.add(i) };
T::read(reader, slot)?;
guard.inner.inc_len();
}
mem::forget(guard);
}
TypeMeta::Dynamic => {
// SAFETY: `fat` is a valid pointer to the container created with `$target::into_raw`.
let raw_base = unsafe { (*fat).as_mut_ptr() };
let mut guard: DropGuardElemCopy<T::Dst> =
DropGuardElemCopy::new(fat, raw_base);
for i in 0..len {
// SAFETY:
// - `raw_base` is a valid pointer to the container created with `$target::into_raw`.
// - The container is initialized with capacity for `len` elements, and `i` is guaranteed to be
// less than `len`.
let slot = unsafe { &mut *raw_base.add(i) };
T::read(reader, slot)?;
guard.inner.inc_len();
}
mem::forget(guard);
}
}
// SAFETY:
// - `fat` is a valid pointer to the container created with `$target::into_raw`.
// - the pointer memory is only deallocated in the drop guard, and the drop guard
// is forgotten if reading succeeds.
let container = unsafe { $target::from_raw(fat) };
// SAFETY: `container` is fully initialized if read succeeds.
let container = unsafe { container.assume_init() };
dst.write(container);
Ok(())
}
}
};
}
impl_heap_slice!(Box => AllocBox);
impl_heap_slice!(Rc => AllocRc);
impl_heap_slice!(Arc => AllocArc);
#[cfg(feature = "alloc")]
impl<T, Len> SchemaWrite for VecDeque<T, Len>
where
Len: SeqLen,
T: SchemaWrite,
T::Src: Sized,
{
type Src = collections::VecDeque<T::Src>;
#[inline(always)]
fn size_of(value: &Self::Src) -> WriteResult<usize> {
size_of_elem_iter::<T, Len>(value.iter())
}
#[inline(always)]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
if let TypeMeta::Static {
size,
zero_copy: true,
} = T::TYPE_META
{
#[allow(clippy::arithmetic_side_effects)]
let needed = Len::write_bytes_needed(src.len())? + src.len() * size;
// SAFETY: `needed` is the size of the encoded length plus the size of the items.
// `Len::write` and `len` writes of `T::Src` will write `needed` bytes,
// fully initializing the trusted window.
let writer = &mut unsafe { writer.as_trusted_for(needed) }?;
Len::write(writer, src.len())?;
let (front, back) = src.as_slices();
// SAFETY:
// - `T` is zero-copy eligible (no invalid bit patterns, no layout requirements, no endianness checks, etc.).
// - `front` and `back` are valid non-overlapping slices.
unsafe {
writer.write_slice_t(front)?;
writer.write_slice_t(back)?;
}
writer.finish()?;
return Ok(());
}
write_elem_iter::<T, Len>(writer, src.iter())
}
}
#[cfg(feature = "alloc")]
impl<'de, T, Len> SchemaRead<'de> for VecDeque<T, Len>
where
Len: SeqLen,
T: SchemaRead<'de>,
{
type Dst = collections::VecDeque<T::Dst>;
#[inline(always)]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
// Leverage the contiguous read optimization of `Vec`.
// From<Vec<T>> for VecDeque<T> is basically free.
let vec = <Vec<T, Len>>::get(reader)?;
dst.write(vec.into());
Ok(())
}
}
#[cfg(feature = "alloc")]
/// A [`BinaryHeap`](alloc::collections::BinaryHeap) with a customizable length encoding.
pub struct BinaryHeap<T, Len = BincodeLen>(PhantomData<Len>, PhantomData<T>);
#[cfg(feature = "alloc")]
impl<T, Len> SchemaWrite for BinaryHeap<T, Len>
where
Len: SeqLen,
T: SchemaWrite,
T::Src: Sized,
{
type Src = collections::BinaryHeap<T::Src>;
#[inline(always)]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
size_of_elem_slice::<T, Len>(src.as_slice())
}
#[inline(always)]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
write_elem_slice::<T, Len>(writer, src.as_slice())
}
}
#[cfg(feature = "alloc")]
impl<'de, T, Len> SchemaRead<'de> for BinaryHeap<T, Len>
where
Len: SeqLen,
T: SchemaRead<'de>,
T::Dst: Ord,
{
type Dst = collections::BinaryHeap<T::Dst>;
#[inline(always)]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
let vec = <Vec<T, Len>>::get(reader)?;
// Leverage the vec impl.
dst.write(collections::BinaryHeap::from(vec));
Ok(())
}
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode/benches/benchmarks.rs | wincode/benches/benchmarks.rs | use {
criterion::{criterion_group, criterion_main, BenchmarkId, Criterion, Throughput},
serde::{Deserialize, Serialize},
std::{collections::HashMap, hint::black_box},
wincode::{deserialize, serialize, serialize_into, serialized_size, SchemaRead, SchemaWrite},
};
#[derive(Serialize, Deserialize, SchemaWrite, SchemaRead, Clone)]
struct SimpleStruct {
id: u64,
value: u64,
flag: bool,
}
#[repr(C)]
#[derive(Clone, Copy, SchemaWrite, SchemaRead, Serialize, Deserialize)]
struct PodStruct {
a: [u8; 32],
b: [u8; 16],
c: [u8; 8],
}
/// verification helper: ensures wincode output matches bincode
fn verify_serialize_into<T>(data: &T) -> Vec<u8>
where
T: SchemaWrite<Src = T> + Serialize + ?Sized,
{
let serialized = bincode::serialize(data).unwrap();
assert_eq!(serialize(data).unwrap(), serialized);
let size = serialized_size(data).unwrap() as usize;
let mut buffer = vec![0u8; size];
serialize_into(&mut buffer.as_mut_slice(), data).unwrap();
assert_eq!(&buffer[..], &serialized[..]);
serialized
}
/// this allocation happens outside the benchmark loop to measure only
fn create_bench_buffer<T>(data: &T) -> Vec<u8>
where
T: SchemaWrite<Src = T> + ?Sized,
{
let size = serialized_size(data).unwrap() as usize;
vec![0u8; size]
}
fn bench_primitives_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("Primitives");
group.throughput(Throughput::Elements(1));
let data = 0xDEADBEEFCAFEBABEu64;
let serialized = verify_serialize_into(&data);
// In-place serialization (measures pure serialization, no allocation)
group.bench_function("u64/wincode/serialize_into", |b| {
let mut buffer = create_bench_buffer(&data);
b.iter(|| serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(&data)).unwrap());
});
group.bench_function("u64/bincode/serialize_into", |b| {
let mut buffer = create_bench_buffer(&data);
b.iter(|| {
bincode::serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(&data))
.unwrap()
});
});
group.bench_function("u64/wincode/serialize", |b| {
b.iter(|| serialize(black_box(&data)).unwrap());
});
group.bench_function("u64/bincode/serialize", |b| {
b.iter(|| bincode::serialize(black_box(&data)).unwrap());
});
group.bench_function("u64/wincode/serialized_size", |b| {
b.iter(|| serialized_size(black_box(&data)).unwrap());
});
group.bench_function("u64/bincode/serialized_size", |b| {
b.iter(|| bincode::serialized_size(black_box(&data)).unwrap());
});
group.bench_function("u64/wincode/deserialize", |b| {
b.iter(|| deserialize::<u64>(black_box(&serialized)).unwrap());
});
group.bench_function("u64/bincode/deserialize", |b| {
b.iter(|| bincode::deserialize::<u64>(black_box(&serialized)).unwrap());
});
group.finish();
}
fn bench_vec_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("Vec<u64>");
for size in [100, 1_000, 10_000] {
let data: Vec<u64> = (0..size).map(|i| i as u64).collect();
let data_size = serialized_size(&data).unwrap();
group.throughput(Throughput::Bytes(data_size));
let serialized = verify_serialize_into(&data);
group.bench_with_input(
BenchmarkId::new("wincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d)).unwrap()
})
},
);
group.bench_with_input(
BenchmarkId::new("bincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
bincode::serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d))
.unwrap()
})
},
);
// Allocating serialization
group.bench_with_input(
BenchmarkId::new("wincode/serialize", size),
&data,
|b, d| b.iter(|| serialize(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/serialize", size),
&data,
|b, d| b.iter(|| bincode::serialize(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("wincode/serialized_size", size),
&data,
|b, d| b.iter(|| serialized_size(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/serialized_size", size),
&data,
|b, d| b.iter(|| bincode::serialized_size(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("wincode/deserialize", size),
&serialized,
|b, s| b.iter(|| deserialize::<Vec<u64>>(black_box(s)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/deserialize", size),
&serialized,
|b, s| b.iter(|| bincode::deserialize::<Vec<u64>>(black_box(s)).unwrap()),
);
}
group.finish();
}
fn bench_struct_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("SimpleStruct");
group.throughput(Throughput::Elements(1));
let data = SimpleStruct {
id: 12345,
value: 0xDEADBEEF,
flag: true,
};
let serialized = verify_serialize_into(&data);
group.bench_function("wincode/serialize_into", |b| {
let mut buffer = create_bench_buffer(&data);
b.iter(|| serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(&data)).unwrap());
});
group.bench_function("bincode/serialize_into", |b| {
let mut buffer = create_bench_buffer(&data);
b.iter(|| {
bincode::serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(&data))
.unwrap()
});
});
group.bench_function("wincode/serialize", |b| {
b.iter(|| serialize(black_box(&data)).unwrap());
});
group.bench_function("bincode/serialize", |b| {
b.iter(|| bincode::serialize(black_box(&data)).unwrap());
});
group.bench_function("wincode/serialized_size", |b| {
b.iter(|| serialized_size(black_box(&data)).unwrap());
});
group.bench_function("bincode/serialized_size", |b| {
b.iter(|| bincode::serialized_size(black_box(&data)).unwrap());
});
group.bench_function("wincode/deserialize", |b| {
b.iter(|| deserialize::<SimpleStruct>(black_box(&serialized)).unwrap());
});
group.bench_function("bincode/deserialize", |b| {
b.iter(|| bincode::deserialize::<SimpleStruct>(black_box(&serialized)).unwrap());
});
group.finish();
}
fn bench_pod_struct_single_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("PodStruct");
group.throughput(Throughput::Elements(1));
let data = PodStruct {
a: [42u8; 32],
b: [17u8; 16],
c: [99u8; 8],
};
let serialized = verify_serialize_into(&data);
group.bench_function("wincode/serialize_into", |b| {
let mut buffer = create_bench_buffer(&data);
b.iter(|| serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(&data)).unwrap());
});
group.bench_function("bincode/serialize_into", |b| {
let mut buffer = create_bench_buffer(&data);
b.iter(|| {
bincode::serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(&data))
.unwrap()
});
});
group.bench_function("wincode/serialize", |b| {
b.iter(|| serialize(black_box(&data)).unwrap());
});
group.bench_function("bincode/serialize", |b| {
b.iter(|| bincode::serialize(black_box(&data)).unwrap());
});
group.bench_function("wincode/serialized_size", |b| {
b.iter(|| serialized_size(black_box(&data)).unwrap());
});
group.bench_function("bincode/serialized_size", |b| {
b.iter(|| bincode::serialized_size(black_box(&data)).unwrap());
});
group.bench_function("wincode/deserialize", |b| {
b.iter(|| deserialize::<PodStruct>(black_box(&serialized)).unwrap());
});
group.bench_function("bincode/deserialize", |b| {
b.iter(|| bincode::deserialize::<PodStruct>(black_box(&serialized)).unwrap());
});
group.finish();
}
fn bench_hashmap_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("HashMap<u64, u64>");
for size in [100, 1_000] {
let data: HashMap<u64, u64> = (0..size).map(|i: u64| (i, i.wrapping_mul(2))).collect();
group.throughput(Throughput::Elements(size));
let serialized = verify_serialize_into(&data);
group.bench_with_input(
BenchmarkId::new("wincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d)).unwrap()
})
},
);
group.bench_with_input(
BenchmarkId::new("bincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
bincode::serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d))
.unwrap()
})
},
);
group.bench_with_input(
BenchmarkId::new("wincode/serialize", size),
&data,
|b, d| b.iter(|| serialize(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/serialize", size),
&data,
|b, d| b.iter(|| bincode::serialize(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("wincode/serialized_size", size),
&data,
|b, d| b.iter(|| serialized_size(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/serialized_size", size),
&data,
|b, d| b.iter(|| bincode::serialized_size(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("wincode/deserialize", size),
&serialized,
|b, s| b.iter(|| deserialize::<HashMap<u64, u64>>(black_box(s)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/deserialize", size),
&serialized,
|b, s| b.iter(|| bincode::deserialize::<HashMap<u64, u64>>(black_box(s)).unwrap()),
);
}
group.finish();
}
fn bench_hashmap_pod_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("HashMap<[u8; 16], PodStruct>");
for size in [100, 1_000] {
let data: HashMap<[u8; 16], PodStruct> = (0..size)
.map(|i| {
let mut key = [0u8; 16];
key[0] = i as u8;
key[1] = (i >> 8) as u8;
(
key,
PodStruct {
a: [i as u8; 32],
b: [i as u8; 16],
c: [i as u8; 8],
},
)
})
.collect();
group.throughput(Throughput::Elements(size));
let serialized = verify_serialize_into(&data);
group.bench_with_input(
BenchmarkId::new("wincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d)).unwrap()
})
},
);
group.bench_with_input(
BenchmarkId::new("bincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
bincode::serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d))
.unwrap()
})
},
);
group.bench_with_input(
BenchmarkId::new("wincode/serialize", size),
&data,
|b, d| b.iter(|| serialize(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/serialize", size),
&data,
|b, d| b.iter(|| bincode::serialize(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("wincode/serialized_size", size),
&data,
|b, d| b.iter(|| serialized_size(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/serialized_size", size),
&data,
|b, d| b.iter(|| bincode::serialized_size(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("wincode/deserialize", size),
&serialized,
|b, s| b.iter(|| deserialize::<HashMap<[u8; 16], PodStruct>>(black_box(s)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/deserialize", size),
&serialized,
|b, s| {
b.iter(|| {
bincode::deserialize::<HashMap<[u8; 16], PodStruct>>(black_box(s)).unwrap()
})
},
);
}
group.finish();
}
fn bench_pod_struct_comparison(c: &mut Criterion) {
let mut group = c.benchmark_group("Vec<PodStruct>");
for size in [1_000, 10_000] {
let data: Vec<PodStruct> = (0..size)
.map(|i| PodStruct {
a: [i as u8; 32],
b: [i as u8; 16],
c: [i as u8; 8],
})
.collect();
let data_size = serialized_size(&data).unwrap();
group.throughput(Throughput::Bytes(data_size));
let serialized = verify_serialize_into(&data);
// In-place serialization
group.bench_with_input(
BenchmarkId::new("wincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d)).unwrap()
})
},
);
group.bench_with_input(
BenchmarkId::new("bincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
bincode::serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d))
.unwrap()
})
},
);
group.bench_with_input(
BenchmarkId::new("wincode/serialize", size),
&data,
|b, d| b.iter(|| serialize(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/serialize", size),
&data,
|b, d| b.iter(|| bincode::serialize(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("wincode/serialized_size", size),
&data,
|b, d| b.iter(|| serialized_size(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/serialized_size", size),
&data,
|b, d| b.iter(|| bincode::serialized_size(black_box(d)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("wincode/deserialize", size),
&serialized,
|b, s| b.iter(|| deserialize::<Vec<PodStruct>>(black_box(s)).unwrap()),
);
group.bench_with_input(
BenchmarkId::new("bincode/deserialize", size),
&serialized,
|b, s| b.iter(|| bincode::deserialize::<Vec<PodStruct>>(black_box(s)).unwrap()),
);
}
group.finish();
}
// Unit enum - only discriminant serialized, size known at compile time.
#[derive(Serialize, Deserialize, SchemaWrite, SchemaRead, Clone, Copy, PartialEq)]
enum UnitEnum {
A,
B,
C,
D,
}
// All variants same size (2x u64) - enables static size optimization.
#[derive(Serialize, Deserialize, SchemaWrite, SchemaRead, Clone, PartialEq)]
enum SameSizedEnum {
Transfer { amount: u64, fee: u64 },
Stake { lamports: u64, rent: u64 },
Withdraw { amount: u64, timestamp: u64 },
Close { refund: u64, slot: u64 },
}
// Different sized variants - baseline for comparison.
#[derive(Serialize, Deserialize, SchemaWrite, SchemaRead, Clone, PartialEq)]
enum MixedSizedEnum {
Small { flag: u8 },
Medium { value: u64 },
Large { x: u64, y: u64, z: u64 },
}
// Macro to reduce duplication across enum benchmarks.
macro_rules! bench_enum {
($fn_name:ident, $group_name:literal, $type:ty, $data:expr) => {
fn $fn_name(c: &mut Criterion) {
let mut group = c.benchmark_group($group_name);
let data: $type = $data;
let data_size = serialized_size(&data).unwrap();
group.throughput(Throughput::Bytes(data_size));
let serialized = verify_serialize_into(&data);
group.bench_function("wincode/serialize_into", |b| {
let mut buffer = create_bench_buffer(&data);
b.iter(|| {
serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(&data)).unwrap()
});
});
group.bench_function("bincode/serialize_into", |b| {
let mut buffer = create_bench_buffer(&data);
b.iter(|| {
bincode::serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(&data))
.unwrap()
});
});
group.bench_function("wincode/serialize", |b| {
b.iter(|| serialize(black_box(&data)).unwrap());
});
group.bench_function("bincode/serialize", |b| {
b.iter(|| bincode::serialize(black_box(&data)).unwrap());
});
group.bench_function("wincode/serialized_size", |b| {
b.iter(|| serialized_size(black_box(&data)).unwrap());
});
group.bench_function("bincode/serialized_size", |b| {
b.iter(|| bincode::serialized_size(black_box(&data)).unwrap());
});
group.bench_function("wincode/deserialize", |b| {
b.iter(|| deserialize::<$type>(black_box(&serialized)).unwrap());
});
group.bench_function("bincode/deserialize", |b| {
b.iter(|| bincode::deserialize::<$type>(black_box(&serialized)).unwrap());
});
group.finish();
}
};
}
// Macro to reduce duplication across Vec enum benchmarks.
macro_rules! bench_vec_enum {
($fn_name:ident, $group_name:literal, $type:ty, $data_gen:expr) => {
fn $fn_name(c: &mut Criterion) {
let mut group = c.benchmark_group($group_name);
for size in [100, 1_000, 10_000] {
let data: Vec<$type> = $data_gen(size);
let data_size = serialized_size(&data).unwrap();
group
.bench_with_input(
BenchmarkId::new("wincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
serialize_into(black_box(&mut buffer.as_mut_slice()), black_box(d))
.unwrap()
})
},
)
.throughput(Throughput::Bytes(data_size));
group
.bench_with_input(
BenchmarkId::new("bincode/serialize_into", size),
&data,
|b, d| {
let mut buffer = create_bench_buffer(d);
b.iter(|| {
bincode::serialize_into(
black_box(&mut buffer.as_mut_slice()),
black_box(d),
)
.unwrap()
})
},
)
.throughput(Throughput::Bytes(data_size));
group
.bench_with_input(
BenchmarkId::new("wincode/serialize", size),
&data,
|b, d| b.iter(|| serialize(black_box(d)).unwrap()),
)
.throughput(Throughput::Bytes(data_size));
group
.bench_with_input(
BenchmarkId::new("bincode/serialize", size),
&data,
|b, d| b.iter(|| bincode::serialize(black_box(d)).unwrap()),
)
.throughput(Throughput::Bytes(data_size));
group
.bench_with_input(
BenchmarkId::new("wincode/serialized_size", size),
&data,
|b, d| b.iter(|| serialized_size(black_box(d)).unwrap()),
)
.throughput(Throughput::Bytes(data_size));
group
.bench_with_input(
BenchmarkId::new("bincode/serialized_size", size),
&data,
|b, d| b.iter(|| bincode::serialized_size(black_box(d)).unwrap()),
)
.throughput(Throughput::Bytes(data_size));
let serialized = verify_serialize_into(&data);
group
.bench_with_input(
BenchmarkId::new("wincode/deserialize", size),
&serialized,
|b, s| b.iter(|| deserialize::<Vec<$type>>(black_box(s)).unwrap()),
)
.throughput(Throughput::Bytes(data_size));
group
.bench_with_input(
BenchmarkId::new("bincode/deserialize", size),
&serialized,
|b, s| b.iter(|| bincode::deserialize::<Vec<$type>>(black_box(s)).unwrap()),
)
.throughput(Throughput::Bytes(data_size));
}
group.finish();
}
};
}
bench_enum!(
bench_unit_enum_comparison,
"UnitEnum",
UnitEnum,
UnitEnum::C
);
bench_enum!(
bench_same_sized_enum_comparison,
"SameSizedEnum",
SameSizedEnum,
SameSizedEnum::Transfer {
amount: 1_000_000,
fee: 5000
}
);
bench_enum!(
bench_mixed_sized_enum_comparison,
"MixedSizedEnum",
MixedSizedEnum,
MixedSizedEnum::Large {
x: 111,
y: 222,
z: 333
}
);
bench_vec_enum!(
bench_vec_unit_enum_comparison,
"Vec<UnitEnum>",
UnitEnum,
|size| {
(0..size)
.map(|i| match i % 4 {
0 => UnitEnum::A,
1 => UnitEnum::B,
2 => UnitEnum::C,
_ => UnitEnum::D,
})
.collect()
}
);
bench_vec_enum!(
bench_vec_same_sized_enum_comparison,
"Vec<SameSizedEnum>",
SameSizedEnum,
|size| {
(0..size)
.map(|i| match i % 4 {
0 => SameSizedEnum::Transfer {
amount: i as u64,
fee: 5000,
},
1 => SameSizedEnum::Stake {
lamports: i as u64,
rent: 1000,
},
2 => SameSizedEnum::Withdraw {
amount: i as u64,
timestamp: i as u64,
},
_ => SameSizedEnum::Close {
refund: i as u64,
slot: i as u64,
},
})
.collect()
}
);
bench_vec_enum!(
bench_vec_mixed_sized_enum_comparison,
"Vec<MixedSizedEnum>",
MixedSizedEnum,
|size| {
(0..size)
.map(|i| match i % 3 {
0 => MixedSizedEnum::Small { flag: i as u8 },
1 => MixedSizedEnum::Medium { value: i as u64 },
_ => MixedSizedEnum::Large {
x: i as u64,
y: i as u64,
z: i as u64,
},
})
.collect()
}
);
criterion_group!(
benches,
bench_primitives_comparison,
bench_vec_comparison,
bench_struct_comparison,
bench_pod_struct_single_comparison,
bench_hashmap_comparison,
bench_hashmap_pod_comparison,
bench_pod_struct_comparison,
bench_unit_enum_comparison,
bench_same_sized_enum_comparison,
bench_mixed_sized_enum_comparison,
bench_vec_unit_enum_comparison,
bench_vec_same_sized_enum_comparison,
bench_vec_mixed_sized_enum_comparison,
);
criterion_main!(benches);
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode-derive/src/lib.rs | wincode-derive/src/lib.rs | //! Derive macros for `SchemaWrite` and `SchemaRead`.
//!
//! Note using this on packed structs is UB.
//!
//! Refer to the [`wincode`](https://docs.rs/wincode) crate for examples.
use {
proc_macro::TokenStream,
syn::{parse_macro_input, DeriveInput},
};
mod common;
mod schema_read;
mod schema_write;
/// Implement `SchemaWrite` for a struct or enum.
#[proc_macro_derive(SchemaWrite, attributes(wincode))]
pub fn derive_schema_write(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
match schema_write::generate(input) {
Ok(tokens) => tokens.into(),
Err(e) => e.write_errors().into(),
}
}
/// Implement `SchemaRead` for a struct or enum.
#[proc_macro_derive(SchemaRead, attributes(wincode))]
pub fn derive_schema_read(input: TokenStream) -> TokenStream {
let input = parse_macro_input!(input as DeriveInput);
match schema_read::generate(input) {
Ok(tokens) => tokens.into(),
Err(e) => e.write_errors().into(),
}
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode-derive/src/schema_write.rs | wincode-derive/src/schema_write.rs | use {
crate::common::{
default_tag_encoding, extract_repr, get_crate_name, get_src_dst, suppress_unused_fields,
Field, FieldsExt, SchemaArgs, StructRepr, TraitImpl, Variant, VariantsExt,
},
darling::{
ast::{Data, Fields, Style},
Error, FromDeriveInput, Result,
},
proc_macro2::TokenStream,
quote::quote,
syn::{parse_quote, DeriveInput, Type},
};
fn impl_struct(
fields: &Fields<Field>,
repr: &StructRepr,
) -> (TokenStream, TokenStream, TokenStream) {
if fields.is_empty() {
return (quote! {Ok(0)}, quote! {Ok(())}, quote! {None});
}
let target = fields.iter().map(|field| field.target_resolved());
let ident = fields.struct_member_ident_iter();
let writes = fields
.iter()
.enumerate()
.map(|(i, field)| {
let ident = field.struct_member_ident(i);
let target = field.target_resolved();
quote! { <#target as SchemaWrite>::write(writer, &src.#ident)?; }
})
.collect::<Vec<_>>();
let type_meta_impl = fields.type_meta_impl(TraitImpl::SchemaWrite, repr);
(
quote! {
if let TypeMeta::Static { size, .. } = <Self as SchemaWrite>::TYPE_META {
return Ok(size);
}
let mut total = 0usize;
#(
total += <#target as SchemaWrite>::size_of(&src.#ident)?;
)*
Ok(total)
},
quote! {
match <Self as SchemaWrite>::TYPE_META {
TypeMeta::Static { size, .. } => {
// SAFETY: `size` is the serialized size of the struct, which is the sum
// of the serialized sizes of the fields.
// Calling `write` on each field will write exactly `size` bytes,
// fully initializing the trusted window.
let writer = &mut unsafe { writer.as_trusted_for(size) }?;
#(#writes)*
writer.finish()?;
}
TypeMeta::Dynamic => {
#(#writes)*
}
}
Ok(())
},
type_meta_impl,
)
}
fn impl_enum(
enum_ident: &Type,
variants: &[Variant],
tag_encoding: Option<&Type>,
) -> (TokenStream, TokenStream, TokenStream) {
if variants.is_empty() {
return (quote! {Ok(0)}, quote! {Ok(())}, quote! {TypeMeta::Dynamic});
}
let default_tag_encoding = default_tag_encoding();
let tag_encoding = tag_encoding.unwrap_or(&default_tag_encoding);
let mut size_of_impl = Vec::with_capacity(variants.len());
let mut write_impl = Vec::with_capacity(variants.len());
let type_meta_impl = variants.type_meta_impl(TraitImpl::SchemaWrite, tag_encoding);
for (i, variant) in variants.iter().enumerate() {
let variant_ident = &variant.ident;
let fields = &variant.fields;
let discriminant = variant.discriminant(i);
// Bincode always encodes the discriminant using the index of the field order.
let size_of_discriminant = quote! {
#tag_encoding::size_of(&#discriminant)?
};
let write_discriminant = quote! {
#tag_encoding::write(writer, &#discriminant)?;
};
let (size, write) = match fields.style {
style @ (Style::Struct | Style::Tuple) => {
let target = fields.iter().map(|field| field.target_resolved());
let ident = fields.enum_member_ident_iter(None);
let write = fields
.iter()
.zip(ident.clone())
.map(|(field, ident)| {
let target = field.target_resolved();
quote! {
<#target as SchemaWrite>::write(writer, #ident)?;
}
})
.collect::<Vec<_>>();
let ident_destructure = ident.clone();
let match_case = if style.is_struct() {
quote! {
#enum_ident::#variant_ident{#(#ident_destructure),*}
}
} else {
quote! {
#enum_ident::#variant_ident(#(#ident_destructure),*)
}
};
// Prefix disambiguation needed, as our match statement will destructure enum variant identifiers.
let static_anon_idents = fields
.member_anon_ident_iter(Some("__"))
.collect::<Vec<_>>();
let static_targets = fields
.iter()
.map(|field| {
let target = field.target_resolved();
quote! {<#target as SchemaWrite>::TYPE_META}
})
.collect::<Vec<_>>();
(
quote! {
#match_case => {
if let (TypeMeta::Static { size: disc_size, .. } #(,TypeMeta::Static { size: #static_anon_idents, .. })*) = (<#tag_encoding as SchemaWrite>::TYPE_META #(,#static_targets)*) {
return Ok(disc_size + #(#static_anon_idents)+*);
}
let mut total = #size_of_discriminant;
#(
total += <#target as SchemaWrite>::size_of(#ident)?;
)*
Ok(total)
}
},
quote! {
#match_case => {
if let (TypeMeta::Static { size: disc_size, .. } #(,TypeMeta::Static { size: #static_anon_idents, .. })*) = (<#tag_encoding as SchemaWrite>::TYPE_META #(,#static_targets)*) {
let summed_sizes = disc_size + #(#static_anon_idents)+*;
// SAFETY: `summed_sizes` is the sum of the static sizes of the fields + the discriminant size,
// which is the serialized size of the variant.
// Writing the discriminant and then calling `write` on each field will write
// exactly `summed_sizes` bytes, fully initializing the trusted window.
let writer = &mut unsafe { writer.as_trusted_for(summed_sizes) }?;
#write_discriminant;
#(#write)*
writer.finish()?;
return Ok(());
}
#write_discriminant;
#(#write)*
Ok(())
}
},
)
}
Style::Unit => (
quote! {
#enum_ident::#variant_ident => {
Ok(#size_of_discriminant)
}
},
quote! {
#enum_ident::#variant_ident => {
#write_discriminant;
Ok(())
}
},
),
};
size_of_impl.push(size);
write_impl.push(write);
}
(
quote! {
match src {
#(#size_of_impl)*
}
},
quote! {
match src {
#(#write_impl)*
}
},
quote! {
#type_meta_impl
},
)
}
pub(crate) fn generate(input: DeriveInput) -> Result<TokenStream> {
let repr = extract_repr(&input, TraitImpl::SchemaWrite)?;
let args = SchemaArgs::from_derive_input(&input)?;
let (impl_generics, ty_generics, where_clause) = args.generics.split_for_impl();
let ident = &args.ident;
let crate_name = get_crate_name(&args);
let src_dst = get_src_dst(&args);
let field_suppress = suppress_unused_fields(&args);
let (size_of_impl, write_impl, type_meta_impl) = match &args.data {
Data::Struct(fields) => {
if args.tag_encoding.is_some() {
return Err(Error::custom("`tag_encoding` is only supported for enums"));
}
// Only structs are eligible being marked zero-copy, so only the struct
// impl needs the repr.
impl_struct(fields, &repr)
}
Data::Enum(v) => {
let enum_ident = match &args.from {
Some(from) => from,
None => &parse_quote!(Self),
};
impl_enum(enum_ident, v, args.tag_encoding.as_ref())
}
};
Ok(quote! {
const _: () = {
use #crate_name::{SchemaWrite, WriteResult, io::Writer, TypeMeta};
impl #impl_generics #crate_name::SchemaWrite for #ident #ty_generics #where_clause {
type Src = #src_dst;
#[allow(clippy::arithmetic_side_effects)]
const TYPE_META: TypeMeta = #type_meta_impl;
#[inline]
fn size_of(src: &Self::Src) -> WriteResult<usize> {
#size_of_impl
}
#[inline]
fn write(writer: &mut impl Writer, src: &Self::Src) -> WriteResult<()> {
#write_impl
}
}
};
#field_suppress
})
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode-derive/src/common.rs | wincode-derive/src/common.rs | use {
darling::{
ast::{Data, Fields, Style},
FromDeriveInput, FromField, FromVariant, Result,
},
proc_macro2::{Span, TokenStream},
quote::quote,
std::{
borrow::Cow,
collections::VecDeque,
fmt::{self, Display},
},
syn::{
parse_quote,
spanned::Spanned,
visit::{self, Visit},
visit_mut::{self, VisitMut},
DeriveInput, Expr, ExprLit, GenericArgument, Generics, Ident, Lifetime, Lit, LitInt,
Member, Path, Type, TypeImplTrait, TypeParamBound, TypeReference, TypeTraitObject,
Visibility,
},
};
#[derive(FromField)]
#[darling(attributes(wincode), forward_attrs)]
pub(crate) struct Field {
pub(crate) ident: Option<Ident>,
pub(crate) ty: Type,
/// Per-field `SchemaRead` and `SchemaWrite` override.
///
/// This is how users can opt in to optimized `SchemaRead` and `SchemaWrite` implementations
/// for a particular field.
///
/// For example:
/// ```ignore
/// struct Foo {
/// #[wincode(with = "Pod<_>")]
/// x: [u8; u64],
/// }
/// ```
#[darling(default)]
pub(crate) with: Option<Type>,
}
pub(crate) trait TypeExt {
/// Replace any lifetimes on this type with the given lifetime.
///
/// For example, we can transform:
/// ```ignore
/// &'a str -> &'de str
/// ```
fn with_lifetime(&self, ident: &str) -> Type;
/// Replace any inference tokens on this type with the fully qualified generic arguments
/// of the given `infer` type.
///
/// For example, we can transform:
/// ```ignore
/// let target = parse_quote!(Pod<_>);
/// let actual = parse_quote!([u8; u64]);
/// assert_eq!(target.with_infer(actual), parse_quote!(Pod<[u8; u64]>));
/// ```
fn with_infer(&self, infer: &Type) -> Type;
/// Gather all the lifetimes on this type.
fn lifetimes(&self) -> Vec<&Lifetime>;
}
impl TypeExt for Type {
fn with_lifetime(&self, ident: &str) -> Type {
let mut this = self.clone();
ReplaceLifetimes(ident).visit_type_mut(&mut this);
this
}
fn with_infer(&self, infer: &Type) -> Type {
let mut this = self.clone();
// First, collect the generic arguments of the `infer` type.
let mut stack = GenericStack::new();
stack.visit_type(infer);
// If there are no generic arguments on self, infer the given `infer` type itself.
if stack.0.is_empty() {
stack.0.push_back(infer);
}
// Perform the replacement.
let mut infer = InferGeneric::from(stack);
infer.visit_type_mut(&mut this);
this
}
fn lifetimes(&self) -> Vec<&Lifetime> {
let mut lifetimes = Vec::new();
GatherLifetimes(&mut lifetimes).visit_type(self);
lifetimes
}
}
#[derive(Debug, Clone, Copy)]
pub(crate) enum TraitImpl {
SchemaRead,
SchemaWrite,
}
impl Display for TraitImpl {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?}", self)
}
}
impl Field {
/// Get the target type for a field.
///
/// If the field has a `with` attribute, return it.
/// Otherwise, return the type.
pub(crate) fn target(&self) -> &Type {
if let Some(with) = &self.with {
with
} else {
&self.ty
}
}
/// Get the target type for a field with any inference tokens resolved.
///
/// Users may annotate a field using `with` attributes that contain inference tokens,
/// such as `Pod<_>`. This method will resolve those inference tokens to the actual type.
///
/// The following will resolve to `Pod<[u8; u64]>` for `x`:
///
/// ```ignore
/// struct Foo {
/// #[wincode(with = "Pod<_>")]
/// x: [u8; u64],
/// }
/// ```
pub(crate) fn target_resolved(&self) -> Type {
self.target().with_infer(&self.ty)
}
/// Get the identifier for a struct member.
///
/// If the field has a named identifier, return it.
/// Otherwise (tuple struct), return an anonymous identifier with the given index.
pub(crate) fn struct_member_ident(&self, index: usize) -> Member {
if let Some(ident) = &self.ident {
ident.clone().into()
} else {
index.into()
}
}
/// Like [`Self::struct_member_ident`], but return a `String`.
pub(crate) fn struct_member_ident_to_string(&self, index: usize) -> String {
if let Some(ident) = &self.ident {
ident.to_string()
} else {
index.to_string()
}
}
}
pub(crate) trait FieldsExt {
fn type_meta_impl(&self, trait_impl: TraitImpl, repr: &StructRepr) -> TokenStream;
/// Get an iterator over the identifiers for the struct members.
///
/// If the field has a named identifier, return it.
/// Otherwise (tuple struct), return an anonymous identifier.
fn struct_member_ident_iter(&self) -> impl Iterator<Item = Member>;
/// Get an iterator over type members as anonymous identifiers.
///
/// If `prefix` is provided, the identifiers will be prefixed with the given str.
///
/// Useful for tuple destructuring where using an index of a tuple struct as an identifier would
/// incorrectly match a literal integer.
///
/// E.g., given the struct:
/// ```
/// struct Foo(u8, u16);
/// ```
/// Iterating over the identifiers would yield [0, 1].
///
/// Using these integer identifiers in a match statement when determining static size, for example, is incorrect:
/// ```ignore
/// if let (TypeMeta::Static { size: 0, .. }) = (<field as SchemaWrite>::TYPE_META) {
/// ```
///
/// You actually want an anonymous identifier, like `a`, `b`, etc.
fn member_anon_ident_iter(&self, prefix: Option<&str>) -> impl Iterator<Item = Ident>;
/// Get an iterator over the identifiers for the enum members.
///
/// If the field has a named identifier, return it.
/// Otherwise (tuple enum), return an anonymous identifier.
///
/// Note this is unnecessary for unit enums, as they will not have fields.
fn enum_member_ident_iter(
&self,
prefix: Option<&str>,
) -> impl Iterator<Item = Cow<'_, Ident>> + Clone;
}
impl FieldsExt for Fields<Field> {
/// Generate the `TYPE_META` implementation for a struct.
fn type_meta_impl(&self, trait_impl: TraitImpl, repr: &StructRepr) -> TokenStream {
let tuple_expansion = match trait_impl {
TraitImpl::SchemaRead => {
let items = self.iter().map(|field| {
let target = field.target_resolved().with_lifetime("de");
quote! { <#target as SchemaRead<'de>>::TYPE_META }
});
quote! { #(#items),* }
}
TraitImpl::SchemaWrite => {
let items = self.iter().map(|field| {
let target = field.target_resolved();
quote! { <#target as SchemaWrite>::TYPE_META }
});
quote! { #(#items),* }
}
};
// No need to prefix, as this is only used in a struct context, where the static size is
// known at compile time.
let anon_idents = self.member_anon_ident_iter(None).collect::<Vec<_>>();
let zero_copy_idents = self.member_anon_ident_iter(Some("zc_")).collect::<Vec<_>>();
let is_zero_copy_eligible = repr.is_zero_copy_eligible();
// Extract sizes and zero-copy flags from the TYPE_META implementations of the fields of the struct.
// We can use this in aggregate to determine the static size and zero-copy eligibility of the struct.
//
// - The static size of a struct is the sum of the static sizes of its fields.
// - The zero-copy eligibility of a struct is the logical AND of the zero-copy eligibility flags of its fields
// and the zero-copy eligibility the struct representation (e.g., `#[repr(transparent)]` or `#[repr(C)]`).
quote! {
// This will simultaneously only match if all fields are `TypeMeta::Static`, and extract the sizes and zero-copy flags
// for each field.
// If any field is not `TypeMeta::Static`, the entire match will fail, and we will fall through to the `Dynamic` case.
if let (#(TypeMeta::Static { size: #anon_idents, zero_copy: #zero_copy_idents }),*) = (#tuple_expansion) {
let serialized_size = #(#anon_idents)+*;
// Bincode never serializes padding, so for types to qualify for zero-copy, the summed serialized size of
// the fields must be equal to the in-memory size of the type. This is because zero-copy types
// may be read/written directly using their in-memory representation; padding disqualifies a type
// from this kind of optimization.
let no_padding = serialized_size == core::mem::size_of::<Self>();
TypeMeta::Static { size: serialized_size, zero_copy: no_padding && #is_zero_copy_eligible && #(#zero_copy_idents)&&* }
} else {
TypeMeta::Dynamic
}
}
}
fn struct_member_ident_iter(&self) -> impl Iterator<Item = Member> {
self.iter()
.enumerate()
.map(|(i, f)| f.struct_member_ident(i))
}
fn member_anon_ident_iter(&self, prefix: Option<&str>) -> impl Iterator<Item = Ident> {
anon_ident_iter(prefix).take(self.len())
}
fn enum_member_ident_iter(
&self,
prefix: Option<&str>,
) -> impl Iterator<Item = Cow<'_, Ident>> + Clone {
let mut alpha = anon_ident_iter(prefix);
self.iter().map(move |field| {
if let Some(ident) = &field.ident {
Cow::Borrowed(ident)
} else {
Cow::Owned(
alpha
.next()
.expect("alpha iterator should never be exhausted"),
)
}
})
}
}
fn anon_ident_iter(prefix: Option<&str>) -> impl Iterator<Item = Ident> + Clone + use<'_> {
let prefix = prefix.unwrap_or("");
('a'..='z').cycle().enumerate().map(move |(i, ch)| {
let wrap = i / 26;
let name = if wrap == 0 {
format!("{}{}", prefix, ch)
} else {
format!("{}{}{}", prefix, ch, wrap - 1)
};
Ident::new(&name, Span::call_site())
})
}
#[derive(FromVariant)]
#[darling(attributes(wincode), forward_attrs)]
pub(crate) struct Variant {
pub(crate) ident: Ident,
pub(crate) fields: Fields<Field>,
#[darling(default)]
pub(crate) tag: Option<Expr>,
}
impl Variant {
/// Get the discriminant expression for the variant.
///
/// If the variant has a `tag` attribute, return it.
/// Otherwise, return an integer literal with the given field index (the bincode default).
pub(crate) fn discriminant(&self, field_index: usize) -> Cow<'_, Expr> {
self.tag.as_ref().map(Cow::Borrowed).unwrap_or_else(|| {
Cow::Owned(Expr::Lit(ExprLit {
lit: Lit::Int(LitInt::new(&field_index.to_string(), Span::call_site())),
attrs: vec![],
}))
})
}
}
pub(crate) trait VariantsExt {
/// Generate the `TYPE_META` implementation for an enum.
fn type_meta_impl(&self, trait_impl: TraitImpl, tag_encoding: &Type) -> TokenStream;
}
impl VariantsExt for &[Variant] {
fn type_meta_impl(&self, trait_impl: TraitImpl, tag_encoding: &Type) -> TokenStream {
if self.is_empty() {
return quote! { TypeMeta::Static { size: 0, zero_copy: false } };
}
// Enums have a statically known size in a very specific case: all variants have the same serialized size.
// This holds trivially for enums where all variants are unit enums (the size is just the size of the discriminant).
// In other cases, we need to compute the size of each variant and check if they are all equal.
// Otherwise, the enum is dynamic.
//
// Enums are never zero-copy, as the discriminant may have invalid bit patterns.
let idents = anon_ident_iter(Some("variant_"))
.take(self.len())
.collect::<Vec<_>>();
let tag_expr = match trait_impl {
TraitImpl::SchemaRead => quote! { <#tag_encoding as SchemaRead<'de>>::TYPE_META },
TraitImpl::SchemaWrite => quote! { <#tag_encoding as SchemaWrite>::TYPE_META },
};
let variant_type_metas = self
.iter()
.zip(&idents)
.map(|(variant, ident)| match variant.fields.style {
Style::Struct | Style::Tuple => {
// Gather the `TYPE_META` implementations for each field of the variant.
let fields_type_meta_expansion = match trait_impl {
TraitImpl::SchemaRead => {
let items= variant.fields.iter().map(|field| {
let target = field.target_resolved().with_lifetime("de");
quote! { <#target as SchemaRead<'de>>::TYPE_META }
});
quote! { #(#items),* }
},
TraitImpl::SchemaWrite => {
let items= variant.fields.iter().map(|field| {
let target = field.target_resolved();
quote! { <#target as SchemaWrite>::TYPE_META }
});
quote! { #(#items),* }
},
};
let anon_idents = variant.fields.member_anon_ident_iter(None).collect::<Vec<_>>();
// Assign the `TYPE_META` to a local variant identifier (`#ident`).
quote! {
// Extract the discriminant size and the sizes of the fields.
//
// If all the fields are `TypeMeta::Static`, the variant is static.
// Otherwise, the variant is dynamic.
let #ident = if let (TypeMeta::Static { size: disc_size, .. }, #(TypeMeta::Static { size: #anon_idents, .. }),*) = (#tag_expr, #fields_type_meta_expansion) {
// Sum the discriminant size and the sizes of the fields.
TypeMeta::Static { size: disc_size + #(#anon_idents)+*, zero_copy: false }
} else {
TypeMeta::Dynamic
};
}
}
Style::Unit => {
// For unit enums, the `TypeMeta` is just the `TypeMeta` of the discriminant.
//
// We always override the zero-copy flag to `false`, due to discriminants having potentially
// invalid bit patterns.
quote! {
let #ident = match #tag_expr {
TypeMeta::Static { size, .. } => {
TypeMeta::Static { size, zero_copy: false }
}
TypeMeta::Dynamic => TypeMeta::Dynamic,
};
}
}
});
quote! {
const {
// Declare the `TypeMeta` implementations for each variant.
#(#variant_type_metas)*
// Place the local bindings for the variant identifiers in an array for iteration.
let variant_sizes = [#(#idents),*];
/// Iterate over all the variant `TypeMeta`s and check if they are all `TypeMeta::Static`
/// and have the same size.
///
/// This logic is broken into a function so that we can use `return`.
const fn choose(variant_sizes: &[TypeMeta]) -> TypeMeta {
// If there is only one variant, it's safe to use that variant's `TypeMeta`.
//
// Note we check if there are 0 variants at the top of this function and exit early.
if variant_sizes.len() == 1 {
return variant_sizes[0];
}
let mut i = 1;
// Can't use a `for` loop in a const context.
while i < variant_sizes.len() {
match (variant_sizes[i], variant_sizes[0]) {
// Iff every variant is `TypeMeta::Static` and has the same size, we can assume the type is static.
(TypeMeta::Static { size: s1, .. }, TypeMeta::Static { size: s2, .. }) if s1 == s2 => {
// Check the next variant.
i += 1;
}
_ => {
// If any variant is not `TypeMeta::Static` or has a different size, the enum is dynamic.
return TypeMeta::Dynamic;
}
}
}
// If we made it here, all variants are `TypeMeta::Static` and have the same size,
// so we can return the first one.
variant_sizes[0]
}
choose(&variant_sizes)
}
}
}
}
pub(crate) type ImplBody = Data<Variant, Field>;
/// Generate code to suppress unused field lints.
///
/// If `from` is specified, the user is creating a mapping type, in which case those struct/enum
/// fields will almost certainly be unused, as they exist purely to describe the mapping. This will
/// trigger unused field lints.
///
/// Create a private, never-called item that references the fields to avoid unused field lints.
/// Users can disable this by setting `no_suppress_unused`.
pub(crate) fn suppress_unused_fields(args: &SchemaArgs) -> TokenStream {
if args.from.is_none() || args.no_suppress_unused {
return quote! {};
}
match &args.data {
Data::Struct(fields) if !fields.is_empty() => {
let idents = fields.struct_member_ident_iter();
let ident = &args.ident;
let (impl_generics, ty_generics, where_clause) = args.generics.split_for_impl();
quote! {
const _: () = {
#[allow(dead_code, unused_variables)]
fn __wincode_use_fields #impl_generics (value: &#ident #ty_generics) #where_clause {
let _ = ( #( &value.#idents ),* );
}
};
}
}
// We can't suppress the lint on on enum variants, as that would require being able to
// construct an arbitrary enum variant, which we can't do. Users will have to manually
// add a `#[allow(unused)]` / `#[allow(dead_code)]` attribute to the enum variant if they want to
// suppress the lint, or make it public.
_ => {
quote! {}
}
}
}
/// Get the path to `wincode` based on the `internal` flag.
pub(crate) fn get_crate_name(args: &SchemaArgs) -> Path {
if args.internal {
parse_quote!(crate)
} else {
parse_quote!(::wincode)
}
}
/// Get the target `Src` or `Dst` type for a `SchemaRead` or `SchemaWrite` implementation.
///
/// If `from` is specified, the user is implementing `SchemaRead` or `SchemaWrite` on a foreign type,
/// so we return the `from` type.
/// Otherwise, we return the ident + ty_generics (target is `Self`).
pub(crate) fn get_src_dst(args: &SchemaArgs) -> Cow<'_, Type> {
if let Some(from) = args.from.as_ref() {
Cow::Borrowed(from)
} else {
Cow::Owned(parse_quote!(Self))
}
}
/// Get the fully qualified target `Src` or `Dst` type for a `SchemaRead` or `SchemaWrite` implementation.
///
/// Like [`Self::get_src_dst`], but rather than producing `Self` when implementing a local type,
/// we return the fully qualified type.
pub(crate) fn get_src_dst_fully_qualified(args: &SchemaArgs) -> Cow<'_, Type> {
if let Some(from) = args.from.as_ref() {
Cow::Borrowed(from)
} else {
let ident = &args.ident;
let (_, ty_generics, _) = args.generics.split_for_impl();
Cow::Owned(parse_quote!(#ident #ty_generics))
}
}
#[derive(FromDeriveInput)]
#[darling(attributes(wincode), forward_attrs)]
pub(crate) struct SchemaArgs {
pub(crate) ident: Ident,
pub(crate) generics: Generics,
pub(crate) data: ImplBody,
pub(crate) vis: Visibility,
/// Used to determine the `wincode` path.
///
/// If `internal` is `true`, the generated code will use the `crate::` path.
/// Otherwise, it will use the `wincode` path.
#[darling(default)]
pub(crate) internal: bool,
/// Specifies whether the type's implementations should map to another type.
///
/// Useful for implementing `SchemaRead` and `SchemaWrite` on foreign types.
#[darling(default)]
pub(crate) from: Option<Type>,
/// Specifies whether to suppress unused field lints on structs.
///
/// Only applicable if `from` is specified.
#[darling(default)]
pub(crate) no_suppress_unused: bool,
/// Specifies whether to generate placement initialization struct helpers on `SchemaRead` implementations.
#[darling(default)]
pub(crate) struct_extensions: bool,
/// Specifies the encoding to use for enum discriminants.
///
/// If specified, the enum discriminants will be encoded using the given type's `SchemaWrite`
/// and `SchemaRead` implementations.
/// Otherwise, the enum discriminants will be encoded using the default encoding (`u32`).
#[darling(default)]
pub(crate) tag_encoding: Option<Type>,
}
/// The default encoding to use for enum discriminants.
///
/// Bincode's default discriminant encoding is `u32`.
///
/// Note in the public APIs we refer to `tag` to mean the discriminant encoding
/// for friendlier naming.
#[inline]
pub(crate) fn default_tag_encoding() -> Type {
parse_quote!(u32)
}
/// Metadata about the `#[repr]` attribute on a struct.
#[derive(Default)]
pub(crate) struct StructRepr {
layout: Layout,
}
#[derive(Default)]
pub(crate) enum Layout {
#[default]
Rust,
Transparent,
C,
}
impl StructRepr {
/// Check if this `#[repr]` attribute is eligible for zero-copy deserialization.
///
/// Zero-copy deserialization is only supported for `#[repr(transparent)]` and `#[repr(C)]` structs.
pub(crate) fn is_zero_copy_eligible(&self) -> bool {
matches!(self.layout, Layout::Transparent | Layout::C)
}
}
/// Extract the `#[repr]` attribute from the derive input, returning an error if the type is packed (not supported).
pub(crate) fn extract_repr(input: &DeriveInput, trait_impl: TraitImpl) -> Result<StructRepr> {
let mut struct_repr = StructRepr::default();
for attr in &input.attrs {
if !attr.path().is_ident("repr") {
continue;
}
attr.parse_nested_meta(|meta| {
if meta.path.is_ident("packed") {
return Err(meta.error(format!(
"`{trait_impl}` cannot be derived for types annotated with `#[repr(packed)]` \
or `#[repr(packed(n))]`"
)));
}
// Rust will reject a struct with both `#[repr(transparent)]` and `#[repr(C)]`, so we
// don't need to check for conflicts here.
if meta.path.is_ident("C") {
struct_repr.layout = Layout::C;
return Ok(());
}
if meta.path.is_ident("transparent") {
struct_repr.layout = Layout::Transparent;
return Ok(());
}
// Parse left over input.
_ = meta.input.parse::<TokenStream>();
Ok(())
})?;
}
Ok(struct_repr)
}
/// Visitor to recursively collect the generic arguments of a type.
struct GenericStack<'ast>(VecDeque<&'ast Type>);
impl<'ast> GenericStack<'ast> {
fn new() -> Self {
Self(VecDeque::new())
}
}
impl<'ast> Visit<'ast> for GenericStack<'ast> {
fn visit_generic_argument(&mut self, ga: &'ast GenericArgument) {
if let GenericArgument::Type(t) = ga {
match t {
Type::Slice(slice) => {
self.0.push_back(&slice.elem);
return;
}
Type::Array(array) => {
self.0.push_back(&array.elem);
return;
}
Type::Path(tp)
if tp.path.segments.iter().any(|seg| {
matches!(seg.arguments, syn::PathArguments::AngleBracketed(_))
}) =>
{
// Has generics, recurse.
}
_ => self.0.push_back(t),
}
}
// Not a type argument, recurse as normal.
visit::visit_generic_argument(self, ga);
}
}
/// Visitor to recursively replace inference tokens with the collected generic arguments.
struct InferGeneric<'ast>(VecDeque<&'ast Type>);
impl<'ast> From<GenericStack<'ast>> for InferGeneric<'ast> {
fn from(stack: GenericStack<'ast>) -> Self {
Self(stack.0)
}
}
impl<'ast> VisitMut for InferGeneric<'ast> {
fn visit_generic_argument_mut(&mut self, ga: &mut GenericArgument) {
if let GenericArgument::Type(Type::Infer(_)) = ga {
let ty = self
.0
.pop_front()
.expect("wincode-derive: inference mismatch: not enough collected types for `_`")
.clone();
*ga = GenericArgument::Type(ty);
}
visit_mut::visit_generic_argument_mut(self, ga);
}
fn visit_type_array_mut(&mut self, array: &mut syn::TypeArray) {
if let Type::Infer(_) = &*array.elem {
let ty = self
.0
.pop_front()
.expect("wincode-derive: inference mismatch: not enough collected types for `_`")
.clone();
*array.elem = ty;
}
visit_mut::visit_type_array_mut(self, array);
}
}
/// Visitor to recursively replace a given type's lifetimes with the given lifetime name.
struct ReplaceLifetimes<'a>(&'a str);
impl ReplaceLifetimes<'_> {
/// Replace the lifetime with `'de`, preserving the span.
fn replace(&self, t: &mut Lifetime) {
t.ident = Ident::new(self.0, t.ident.span());
}
fn new_from_reference(&self, t: &mut TypeReference) {
t.lifetime = Some(Lifetime {
apostrophe: t.and_token.span(),
ident: Ident::new(self.0, t.and_token.span()),
})
}
}
impl VisitMut for ReplaceLifetimes<'_> {
fn visit_type_reference_mut(&mut self, t: &mut TypeReference) {
match &mut t.lifetime {
Some(l) => self.replace(l),
// Lifetime may be elided. Prefer being explicit, as the implicit lifetime
// may refer to a lifetime that is not `'de` (e.g., 'a on some type `Foo<'a>`).
None => {
self.new_from_reference(t);
}
}
visit_mut::visit_type_reference_mut(self, t);
}
fn visit_generic_argument_mut(&mut self, ga: &mut GenericArgument) {
if let GenericArgument::Lifetime(l) = ga {
self.replace(l);
}
visit_mut::visit_generic_argument_mut(self, ga);
}
fn visit_type_trait_object_mut(&mut self, t: &mut TypeTraitObject) {
for bd in &mut t.bounds {
if let TypeParamBound::Lifetime(l) = bd {
self.replace(l);
}
}
visit_mut::visit_type_trait_object_mut(self, t);
}
fn visit_type_impl_trait_mut(&mut self, t: &mut TypeImplTrait) {
for bd in &mut t.bounds {
if let TypeParamBound::Lifetime(l) = bd {
self.replace(l);
}
}
visit_mut::visit_type_impl_trait_mut(self, t);
}
}
struct GatherLifetimes<'a, 'ast>(&'a mut Vec<&'ast Lifetime>);
impl<'ast> Visit<'ast> for GatherLifetimes<'_, 'ast> {
fn visit_lifetime(&mut self, l: &'ast Lifetime) {
self.0.push(l);
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_infer_generic() {
let src: Type = parse_quote!(Foo<_>);
let infer = parse_quote!(Bar<u8>);
assert_eq!(src.with_infer(&infer), parse_quote!(Foo<u8>));
let src: Type = parse_quote!(Foo<_, _>);
let infer = parse_quote!(Bar<u8, u16>);
assert_eq!(src.with_infer(&infer), parse_quote!(Foo<u8, u16>));
let src: Type = parse_quote!(Pod<_>);
let infer = parse_quote!([u8; u64]);
assert_eq!(src.with_infer(&infer), parse_quote!(Pod<[u8; u64]>));
let src: Type = parse_quote!(containers::Vec<containers::Pod<_>>);
let infer = parse_quote!(Vec<u8>);
assert_eq!(
src.with_infer(&infer),
parse_quote!(containers::Vec<containers::Pod<u8>>)
);
let src: Type = parse_quote!(containers::Box<[Pod<_>]>);
let infer = parse_quote!(Box<[u8]>);
assert_eq!(
src.with_infer(&infer),
parse_quote!(containers::Box<[Pod<u8>]>)
);
let src: Type = parse_quote!(containers::Box<[Pod<[_; 32]>]>);
let infer = parse_quote!(Box<[u8; 32]>);
assert_eq!(
src.with_infer(&infer),
parse_quote!(containers::Box<[Pod<[u8; 32]>]>)
);
// Not an actual use-case, but added for robustness.
let src: Type = parse_quote!(containers::Vec<containers::Box<[containers::Pod<_>]>>);
let infer = parse_quote!(Vec<Box<[u8]>>);
assert_eq!(
src.with_infer(&infer),
parse_quote!(containers::Vec<containers::Box<[containers::Pod<u8>]>>)
);
// Similarly, not a an actual use-case.
let src: Type =
parse_quote!(Pair<containers::Box<[containers::Pod<_>]>, containers::Pod<_>>);
let infer: Type = parse_quote!(Pair<Box<[Foo<Bar<u8>>]>, u16>);
assert_eq!(
src.with_infer(&infer),
parse_quote!(
Pair<containers::Box<[containers::Pod<Foo<Bar<u8>>>]>, containers::Pod<u16>>
)
)
}
#[test]
fn test_override_ref_lifetime() {
let target: Type = parse_quote!(Foo<'a>);
assert_eq!(target.with_lifetime("de"), parse_quote!(Foo<'de>));
let target: Type = parse_quote!(&'a str);
assert_eq!(target.with_lifetime("de"), parse_quote!(&'de str));
}
#[test]
fn test_anon_ident_iter() {
let mut iter = anon_ident_iter(None);
assert_eq!(iter.next().unwrap().to_string(), "a");
assert_eq!(iter.nth(25).unwrap().to_string(), "a0");
assert_eq!(iter.next().unwrap().to_string(), "b0");
assert_eq!(iter.nth(24).unwrap().to_string(), "a1");
}
#[test]
fn test_gather_lifetimes() {
let ty: Type = parse_quote!(&'a Foo);
let lt: Lifetime = parse_quote!('a);
assert_eq!(ty.lifetimes(), vec![<]);
let ty: Type = parse_quote!(&'a Foo<'b, 'c>);
let (a, b, c) = (parse_quote!('a), parse_quote!('b), parse_quote!('c));
assert_eq!(ty.lifetimes(), vec![&a, &b, &c]);
}
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
anza-xyz/wincode | https://github.com/anza-xyz/wincode/blob/9f0ffa346d95c31b94486b7bfea724b73330c42f/wincode-derive/src/schema_read.rs | wincode-derive/src/schema_read.rs | use {
crate::common::{
default_tag_encoding, extract_repr, get_crate_name, get_src_dst,
get_src_dst_fully_qualified, suppress_unused_fields, Field, FieldsExt, SchemaArgs,
StructRepr, TraitImpl, TypeExt, Variant, VariantsExt,
},
darling::{
ast::{Data, Fields, Style},
Error, FromDeriveInput, Result,
},
proc_macro2::{Span, TokenStream},
quote::{format_ident, quote},
syn::{
parse_quote, punctuated::Punctuated, DeriveInput, GenericParam, Generics, LitInt, LitStr,
Path, PredicateType, Type, WhereClause, WherePredicate,
},
};
fn impl_struct(
args: &SchemaArgs,
fields: &Fields<Field>,
repr: &StructRepr,
) -> (TokenStream, TokenStream) {
if fields.is_empty() {
return (quote! {}, quote! {TypeMeta::Dynamic});
}
let num_fields = fields.len();
let read_impl = fields
.iter()
.enumerate()
.map(|(i, field)| {
let ident = field.struct_member_ident(i);
let target = field.target_resolved().with_lifetime("de");
let hint = if field.with.is_some() {
// Fields annotated with `with` may need help determining the pointer cast.
//
// This allows correct inference in `with` attributes, for example:
// ```
// struct Foo {
// #[wincode(with = "Pod<_>")]
// x: [u8; u64],
// }
// ```
let ty = field.ty.with_lifetime("de");
quote! { MaybeUninit<#ty> }
} else {
quote! { MaybeUninit<_> }
};
let init_count = if i == num_fields - 1 {
quote! {}
} else {
quote! { *init_count += 1; }
};
quote! {
<#target as SchemaRead<'de>>::read(
reader,
unsafe { &mut *(&raw mut (*dst_ptr).#ident).cast::<#hint>() }
)?;
#init_count
}
})
.collect::<Vec<_>>();
let type_meta_impl = fields.type_meta_impl(TraitImpl::SchemaRead, repr);
let drop_guard = (0..fields.len()).map(|i| {
// Generate code to drop already initialized fields in reverse order.
let drop = fields.fields[..i]
.iter()
.rev()
.enumerate()
.map(|(j, field)| {
let ident = field.struct_member_ident(i - 1 - j);
quote! {
ptr::drop_in_place(&raw mut (*dst_ptr).#ident);
}
});
let cnt = i as u8;
if i == 0 {
quote! {
0 => {}
}
} else {
quote! {
#cnt => {
unsafe { #(#drop)* }
}
}
}
});
let dst = get_src_dst_fully_qualified(args);
let (impl_generics, ty_generics, _) = args.generics.split_for_impl();
let init_guard = quote! {
let dst_ptr = dst.as_mut_ptr();
let mut guard = DropGuard {
init_count: 0,
dst_ptr,
};
let init_count = &mut guard.init_count;
};
(
quote! {
struct DropGuard #impl_generics {
init_count: u8,
dst_ptr: *mut #dst,
}
impl #impl_generics Drop for DropGuard #ty_generics {
#[cold]
fn drop(&mut self) {
let dst_ptr = self.dst_ptr;
let init_count = self.init_count;
match init_count {
#(#drop_guard)*
// Impossible, given the `init_count` is bounded by the number of fields.
_ => { debug_assert!(false, "init_count out of bounds"); },
}
}
}
match <Self as SchemaRead<'de>>::TYPE_META {
TypeMeta::Static { size, .. } => {
// SAFETY: `size` is the serialized size of the struct, which is the sum
// of the serialized sizes of the fields.
// Calling `read` on each field will consume exactly `size` bytes,
// fully consuming the trusted window.
let reader = &mut unsafe { reader.as_trusted_for(size) }?;
#init_guard
#(#read_impl)*
mem::forget(guard);
}
TypeMeta::Dynamic => {
#init_guard
#(#read_impl)*
mem::forget(guard);
}
}
},
quote! {
#type_meta_impl
},
)
}
/// Include placement initialization helpers for structs.
///
/// This adds some convenience methods to structs that can avoid a lot of boilerplate when
/// implementing custom `SchemaRead` implementations. In particular, provide methods that
/// deal with projecting subfields of structs into `MaybeUninit`s. Without this,
/// users have to write a litany of `&mut *(&raw mut (*dst_ptr).field).cast()` to
/// access MaybeUninit struct fields.
///
/// For example:
/// ```ignore
/// #[derive(SchemaRead)]
/// struct Header {
/// num_required_signatures: u8,
/// num_signed_accounts: u8,
/// num_unsigned_accounts: u8,
/// }
///
/// #[derive(SchemaRead)]
/// struct Body {
/// header: Header,
/// }
///
/// struct Message {
/// body: Body,
/// }
///
/// impl<'de> SchemaRead<'de> for Message {
/// type Dst = Message;
///
/// fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
/// // Some more complicated logic not capturable by the macro...
/// let mut body = MaybeUninit::<Body>::uninit();
/// // Project a mutable MaybeUninit<Header> from the MaybeUninit<Body>.
/// let header = Body::get_uninit_header_mut(&mut body);
/// // ...
/// }
/// }
/// ```
///
/// We cannot do this for enums, given the lack of facilities for placement initialization.
fn impl_struct_extensions(args: &SchemaArgs, crate_name: &Path) -> Result<TokenStream> {
if !args.struct_extensions {
return Ok(quote! {});
}
let Data::Struct(fields) = &args.data else {
return Err(Error::custom(
"`struct_extensions` is only supported for structs",
));
};
if fields.is_empty() {
return Ok(quote! {});
}
let struct_ident = &args.ident;
let vis = &args.vis;
let dst = get_src_dst(args);
let impl_generics = append_de_lifetime(&args.generics);
let (_, ty_generics, where_clause) = args.generics.split_for_impl();
let builder_ident = format_ident!("{struct_ident}UninitBuilder");
let helpers = fields.iter().enumerate().map(|(i, field)| {
let ty = field.ty.with_lifetime("de");
let target = field.target_resolved().with_lifetime("de");
let ident = field.struct_member_ident(i);
let ident_string = field.struct_member_ident_to_string(i);
let uninit_mut_ident = format_ident!("uninit_{}_mut", ident_string);
let read_field_ident = format_ident!("read_{}", ident_string);
let write_uninit_field_ident = format_ident!("write_uninit_{}", ident_string);
let deprecated_note = LitStr::new(
&format!("Use `{builder_ident}` builder methods instead"),
Span::call_site(),
);
let field_projection_type = if args.from.is_some() {
// If the user is defining a mapping type, we need the type system to resolve the
// projection destination.
quote! { <#ty as SchemaRead<'de>>::Dst }
} else {
// Otherwise we can use the type directly.
// This makes the generated type more scrutable.
quote! { #ty }
};
quote! {
#[inline(always)]
#[deprecated(since = "0.2.2", note = #deprecated_note)]
#vis fn #uninit_mut_ident(dst: &mut MaybeUninit<#dst>) -> &mut MaybeUninit<#field_projection_type> {
unsafe { &mut *(&raw mut (*dst.as_mut_ptr()).#ident).cast() }
}
#[inline(always)]
#[deprecated(since = "0.2.2", note = #deprecated_note)]
#vis fn #read_field_ident(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<#dst>) -> ReadResult<()> {
<#target as SchemaRead<'de>>::read(reader, Self::#uninit_mut_ident(dst))
}
#[inline(always)]
#[deprecated(since = "0.2.2", note = #deprecated_note)]
#vis fn #write_uninit_field_ident(val: #field_projection_type, dst: &mut MaybeUninit<#dst>) {
Self::#uninit_mut_ident(dst).write(val);
}
}
});
// We modify the generics to add a lifetime parameter for the inner `MaybeUninit` struct.
let mut builder_generics = args.generics.clone();
// Add the lifetime for the inner `&mut MaybeUninit` struct.
builder_generics
.params
.push(GenericParam::Lifetime(parse_quote!('_wincode_inner)));
let builder_dst = get_src_dst_fully_qualified(args);
let (builder_impl_generics, builder_ty_generics, builder_where_clause) =
builder_generics.split_for_impl();
// Determine how many bits are needed to track the initialization state of the fields.
let (builder_bit_set_ty, builder_bit_set_bits): (Type, u32) = match fields.len() {
len if len <= 8 => (parse_quote!(u8), u8::BITS),
len if len <= 16 => (parse_quote!(u16), u16::BITS),
len if len <= 32 => (parse_quote!(u32), u32::BITS),
len if len <= 64 => (parse_quote!(u64), u64::BITS),
len if len <= 128 => (parse_quote!(u128), u128::BITS),
_ => {
return Err(Error::custom(
"`struct_extensions` is only supported for structs with up to 128 fields",
))
}
};
let builder_struct_decl = {
// `split_for_impl` will strip default type and const parameters, so we collect them manually
// to preserve the declarations on the original struct.
let generic_type_params = builder_generics.type_params();
let generic_lifetimes = builder_generics.lifetimes();
let generic_const = builder_generics.const_params();
let where_clause = builder_generics.where_clause.as_ref();
quote! {
/// A helper struct that provides convenience methods for reading and writing to a `MaybeUninit` struct
/// with a bit-set tracking the initialization state of the fields.
///
/// The builder will drop all initialized fields in reverse order on drop. When the struct is fully initialized,
/// you **must** call `finish` or `into_assume_init_mut` to forget the builder. Otherwise, all the
/// initialized fields will be dropped when the builder is dropped.
#[must_use]
#vis struct #builder_ident < #(#generic_lifetimes,)* #(#generic_const,)* #(#generic_type_params,)* > #where_clause {
inner: &'_wincode_inner mut core::mem::MaybeUninit<#builder_dst>,
init_set: #builder_bit_set_ty,
}
}
};
let builder_drop_impl = {
// Drop all initialized fields in reverse order.
let drops = fields.iter().rev().enumerate().map(|(index, field)| {
// Compute the actual index relative to the reversed iterator.
let real_index = fields.len() - 1 - index;
let field_ident = field.struct_member_ident(real_index);
// The corresponding bit for the field.
let bit_set_index = LitInt::new(&(1u128 << real_index).to_string(), Span::call_site());
quote! {
if self.init_set & #bit_set_index != 0 {
// SAFETY: We are dropping an initialized field.
unsafe {
ptr::drop_in_place(&raw mut (*dst_ptr).#field_ident);
}
}
}
});
quote! {
impl #builder_impl_generics Drop for #builder_ident #builder_ty_generics #builder_where_clause {
fn drop(&mut self) {
let dst_ptr = self.inner.as_mut_ptr();
#(#drops)*
}
}
}
};
let builder_impl = {
let is_fully_init_mask = if fields.len() as u32 == builder_bit_set_bits {
quote!(#builder_bit_set_ty::MAX)
} else {
let field_bits = LitInt::new(&fields.len().to_string(), Span::call_site());
quote!(((1 as #builder_bit_set_ty) << #field_bits) - 1)
};
quote! {
impl #builder_impl_generics #builder_ident #builder_ty_generics #builder_where_clause {
#vis const fn from_maybe_uninit_mut(inner: &'_wincode_inner mut MaybeUninit<#builder_dst>) -> Self {
Self {
inner,
init_set: 0,
}
}
/// Check if the builder is fully initialized.
///
/// This will check if all field initialization bits are set.
#[inline]
#vis const fn is_init(&self) -> bool {
self.init_set == #is_fully_init_mask
}
/// Assume the builder is fully initialized, and return a mutable reference to the inner `MaybeUninit` struct.
///
/// The builder will be forgotten, so the drop logic will not longer run.
///
/// # Safety
///
/// Calling this when the content is not yet fully initialized causes undefined behavior: it is up to the caller
/// to guarantee that the `MaybeUninit<T>` really is in an initialized state.
#[inline]
#vis unsafe fn into_assume_init_mut(mut self) -> &'_wincode_inner mut #builder_dst {
let mut this = ManuallyDrop::new(self);
// SAFETY: reference lives beyond the scope of the builder, and builder is forgotten.
let inner = unsafe { ptr::read(&mut this.inner) };
// SAFETY: Caller asserts the `MaybeUninit<T>` is in an initialized state.
unsafe {
inner.assume_init_mut()
}
}
/// Forget the builder, disabling the drop logic.
#[inline]
#vis const fn finish(self) {
mem::forget(self);
}
}
}
};
// Generate the helper methods for the builder.
let builder_helpers = fields.iter().enumerate().map(|(i, field)| {
let ty = &field.ty;
let target_reader_bound = field.target_resolved().with_lifetime("de");
let ident = field.struct_member_ident(i);
let ident_string = field.struct_member_ident_to_string(i);
let uninit_mut_ident = format_ident!("uninit_{ident_string}_mut");
let read_field_ident = format_ident!("read_{ident_string}");
let write_uninit_field_ident = format_ident!("write_{ident_string}");
let assume_init_field_ident = format_ident!("assume_init_{ident_string}");
let init_with_field_ident = format_ident!("init_{ident_string}_with");
let lifetimes = ty.lifetimes();
// We must always extract the `Dst` from the type because `SchemaRead` implementations need
// not necessarily write to `Self` -- they write to `Self::Dst`, which isn't necessarily `Self`
// (e.g., in the case of container types).
let field_projection_type = if lifetimes.is_empty() {
quote!(<#ty as SchemaRead<'_>>::Dst)
} else {
let lt = lifetimes[0];
// Even though a type may have multiple distinct lifetimes, we force them to be uniform
// for a `SchemaRead` cast because an implementation of `SchemaRead` must bind all lifetimes
// to the lifetime of the reader (and will not be implemented over multiple distinct lifetimes).
let ty = ty.with_lifetime(<.ident.to_string());
quote!(<#ty as SchemaRead<#lt>>::Dst)
};
// The bit index for the field.
let index_bit = LitInt::new(&(1u128 << i).to_string(), Span::call_site());
let set_index_bit = quote! {
self.init_set |= #index_bit;
};
quote! {
/// Get a mutable reference to the maybe uninitialized field.
#[inline]
#vis const fn #uninit_mut_ident(&mut self) -> &mut MaybeUninit<#field_projection_type> {
// SAFETY:
// - `self.inner` is a valid reference to a `MaybeUninit<#builder_dst>`.
// - We return the field as `&mut MaybeUninit<#target>`, so
// the field is never exposed as initialized.
unsafe { &mut *(&raw mut (*self.inner.as_mut_ptr()).#ident).cast() }
}
/// Write a value to the maybe uninitialized field.
#[inline]
#vis const fn #write_uninit_field_ident(&mut self, val: #field_projection_type) -> &mut Self {
self.#uninit_mut_ident().write(val);
#set_index_bit
self
}
/// Read a value from the reader into the maybe uninitialized field.
#[inline]
#vis fn #read_field_ident <'de>(&mut self, reader: &mut impl Reader<'de>) -> ReadResult<&mut Self> {
// SAFETY:
// - `self.inner` is a valid reference to a `MaybeUninit<#builder_dst>`.
// - We return the field as `&mut MaybeUninit<#target>`, so
// the field is never exposed as initialized.
let proj = unsafe { &mut *(&raw mut (*self.inner.as_mut_ptr()).#ident).cast() };
<#target_reader_bound as SchemaRead<'de>>::read(reader, proj)?;
#set_index_bit
Ok(self)
}
/// Initialize the field with a given initializer function.
///
/// # Safety
///
/// The caller must guarantee that the initializer function fully initializes the field.
#[inline]
#vis unsafe fn #init_with_field_ident(&mut self, mut initializer: impl FnMut(&mut MaybeUninit<#field_projection_type>) -> ReadResult<()>) -> ReadResult<&mut Self> {
initializer(self.#uninit_mut_ident())?;
#set_index_bit
Ok(self)
}
/// Mark the field as initialized.
///
/// # Safety
///
/// Caller must guarantee the field has been fully initialized prior to calling this.
#[inline]
#vis const unsafe fn #assume_init_field_ident(&mut self) -> &mut Self {
#set_index_bit
self
}
}
});
Ok(quote! {
const _: () = {
use {
core::{mem::{MaybeUninit, ManuallyDrop, self}, ptr, marker::PhantomData},
#crate_name::{SchemaRead, ReadResult, TypeMeta, io::Reader, error,},
};
impl #impl_generics #struct_ident #ty_generics #where_clause {
#(#helpers)*
}
#builder_drop_impl
#builder_impl
impl #builder_impl_generics #builder_ident #builder_ty_generics #builder_where_clause {
#(#builder_helpers)*
}
};
#builder_struct_decl
})
}
fn impl_enum(
enum_ident: &Type,
variants: &[Variant],
tag_encoding: Option<&Type>,
) -> (TokenStream, TokenStream) {
if variants.is_empty() {
return (quote! {Ok(())}, quote! {TypeMeta::Dynamic});
}
let default_tag_encoding = default_tag_encoding();
let tag_encoding = tag_encoding.unwrap_or(&default_tag_encoding);
let type_meta_impl = variants.type_meta_impl(TraitImpl::SchemaRead, tag_encoding);
let read_impl = variants.iter().enumerate().map(|(i, variant)| {
let variant_ident = &variant.ident;
let fields = &variant.fields;
let discriminant = variant.discriminant(i);
match fields.style {
style @ (Style::Struct | Style::Tuple) => {
// No prefix disambiguation needed, as we are matching on a discriminant integer.
let idents = fields.enum_member_ident_iter(None).collect::<Vec<_>>();
let read = fields
.iter()
.zip(&idents)
.map(|(field, ident)| {
let target = field.target_resolved().with_lifetime("de");
// Unfortunately we can't avoid temporaries for arbitrary enums, as Rust does not provide
// facilities for placement initialization on enums.
//
// In the future, we may be able to support an attribute that allows users to opt into
// a macro-generated shadowed enum that wraps all variant fields with `MaybeUninit`, which
// could be used to facilitate direct reads. The user would have to guarantee layout on
// their type (a la `#[repr(C)]`), or roll the dice on non-guaranteed layout -- so it would need to be opt-in.
quote! {
let #ident = <#target as SchemaRead<'de>>::get(reader)?;
}
})
.collect::<Vec<_>>();
// No prefix disambiguation needed, as we are matching on a discriminant integer.
let static_anon_idents = fields.member_anon_ident_iter(None).collect::<Vec<_>>();
let static_targets = fields.iter().map(|field| {
let target = field.target_resolved().with_lifetime("de");
quote! {<#target as SchemaRead<'de>>::TYPE_META}
});
let constructor = if style.is_struct() {
quote! {
#enum_ident::#variant_ident{#(#idents),*}
}
} else {
quote! {
#enum_ident::#variant_ident(#(#idents),*)
}
};
quote! {
#discriminant => {
if let (#(TypeMeta::Static { size: #static_anon_idents, .. }),*) = (#(#static_targets),*) {
let summed_sizes = #(#static_anon_idents)+*;
// SAFETY: `summed_sizes` is the sum of the static sizes of the fields,
// which is the serialized size of the variant.
// Calling `read` on each field will consume exactly `summed_sizes` bytes,
// fully consuming the trusted window.
let reader = &mut unsafe { reader.as_trusted_for(summed_sizes) }?;
#(#read)*
dst.write(#constructor);
} else {
#(#read)*
dst.write(#constructor);
}
}
}
}
Style::Unit => quote! {
#discriminant => {
dst.write(#enum_ident::#variant_ident);
}
},
}
});
(
quote! {
let discriminant = #tag_encoding::get(reader)?;
match discriminant {
#(#read_impl)*
_ => return Err(error::invalid_tag_encoding(discriminant as usize)),
}
},
quote! {
#type_meta_impl
},
)
}
/// Extend the `'de` lifetime to all lifetime parameters in the generics.
///
/// This enforces that the `SchemaRead` lifetime (`'de`) and thus its
/// `Reader<'de>` (the source bytes) extends to all lifetime parameters
/// in the derived type.
///
/// For example, given the following type:
/// ```
/// struct Foo<'a> {
/// x: &'a str,
/// }
/// ```
///
/// We must ensure `'de` outlives all other lifetimes in the generics.
fn append_de_lifetime(generics: &Generics) -> Generics {
let mut generics = generics.clone();
if generics.lifetimes().next().is_none() {
generics
.params
.push(GenericParam::Lifetime(parse_quote!('de)));
return generics;
}
let lifetimes = generics.lifetimes();
// Ensure `'de` outlives other lifetimes in the generics.
generics
.params
.push(GenericParam::Lifetime(parse_quote!('de: #(#lifetimes)+*)));
generics
}
pub(crate) fn generate(input: DeriveInput) -> Result<TokenStream> {
let repr = extract_repr(&input, TraitImpl::SchemaRead)?;
let args = SchemaArgs::from_derive_input(&input)?;
let appended_generics = append_de_lifetime(&args.generics);
let (impl_generics, _, _) = appended_generics.split_for_impl();
let (_, ty_generics, where_clause) = args.generics.split_for_impl();
let ident = &args.ident;
let crate_name = get_crate_name(&args);
let src_dst = get_src_dst(&args);
let field_suppress = suppress_unused_fields(&args);
let struct_extensions = impl_struct_extensions(&args, &crate_name)?;
let (read_impl, type_meta_impl) = match &args.data {
Data::Struct(fields) => {
if args.tag_encoding.is_some() {
return Err(Error::custom("`tag_encoding` is only supported for enums"));
}
// Only structs are eligible being marked zero-copy, so only the struct
// impl needs the repr.
impl_struct(&args, fields, &repr)
}
Data::Enum(v) => {
let enum_ident = match &args.from {
Some(from) => from,
None => &parse_quote!(Self),
};
impl_enum(enum_ident, v, args.tag_encoding.as_ref())
}
};
// Provide a `ZeroCopy` impl for the type if its `repr` is eligible and all its fields are zero-copy.
let zero_copy_impl = match &args.data {
Data::Struct(_)
if repr.is_zero_copy_eligible()
// Generics will trigger "cannot use type generics in const context".
// Unfortunate, but generics in a zero-copy context are presumably a more niche use-case,
// so we'll deal with it for now.
&& args.generics.type_params().next().is_none()
// Types containing references are not zero-copy eligible.
&& args.generics.lifetimes().next().is_none() =>
{
let mut bounds = Punctuated::new();
bounds.push(parse_quote!(IsTrue));
let zero_copy_predicate = WherePredicate::Type(PredicateType {
// Workaround for https://github.com/rust-lang/rust/issues/48214.
lifetimes: Some(parse_quote!(for<'_wincode_internal>)),
// Piggyback on the existing TypeMeta zero-copy predicate.
// The type itself will only be zero-copy if its TypeMeta is Static and its zero_copy flag is true,
// which entails all its fields being zero-copy and the struct not having any padding.
bounded_ty: parse_quote!(
Assert<
{
matches!(<#ident as SchemaRead<'_>>::TYPE_META, TypeMeta::Static { zero_copy: true, .. })
},
>
),
colon_token: parse_quote![:],
bounds,
});
let (impl_generics, ty_generics, where_clause) = args.generics.split_for_impl();
let mut where_clause = where_clause.cloned();
match &mut where_clause {
Some(where_clause) => {
where_clause.predicates.push(zero_copy_predicate);
}
None => {
where_clause = Some(WhereClause {
where_token: parse_quote!(where),
predicates: Punctuated::from_iter([zero_copy_predicate]),
});
}
}
quote! {
// Ugly, but functional.
struct Assert<const B: bool>;
trait IsTrue {}
impl IsTrue for Assert<true> {}
unsafe impl #impl_generics ZeroCopy for #ident #ty_generics #where_clause {}
}
}
_ => quote!(),
};
Ok(quote! {
const _: () = {
use core::{ptr, mem::{self, MaybeUninit}};
use #crate_name::{SchemaRead, ReadResult, TypeMeta, io::Reader, error, ZeroCopy};
#zero_copy_impl
impl #impl_generics SchemaRead<'de> for #ident #ty_generics #where_clause {
type Dst = #src_dst;
#[allow(clippy::arithmetic_side_effects)]
const TYPE_META: TypeMeta = #type_meta_impl;
#[inline]
fn read(reader: &mut impl Reader<'de>, dst: &mut MaybeUninit<Self::Dst>) -> ReadResult<()> {
#read_impl
Ok(())
}
}
};
#struct_extensions
#field_suppress
})
}
| rust | Apache-2.0 | 9f0ffa346d95c31b94486b7bfea724b73330c42f | 2026-01-04T20:24:02.028790Z | false |
rust-lang/libz-sys | https://github.com/rust-lang/libz-sys/blob/fc9f6504e415a3d0787d9c84db9a043ad3f050d4/build.rs | build.rs | use std::env;
use std::fs;
use std::path::PathBuf;
fn main() {
println!("cargo:rerun-if-env-changed=LIBZ_SYS_STATIC");
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-changed=zng/cmake.rs");
println!("cargo:rerun-if-changed=zng/cc.rs");
let host = env::var("HOST").unwrap();
let target = env::var("TARGET").unwrap();
let host_and_target_contain = |s| host.contains(s) && target.contains(s);
let want_ng = cfg!(any(
feature = "zlib-ng",
feature = "zlib-ng-no-cmake-experimental-community-maintained"
)) && !cfg!(feature = "stock-zlib");
if want_ng && target != "wasm32-unknown-unknown" {
return build_zlib_ng(&target, true);
}
// All android compilers should come with libz by default, so let's just use
// the one already there. Likewise, Haiku and OpenHarmony always ship with libz,
// so we can link to it even when cross-compiling.
if target.contains("android") || target.contains("haiku") || target.ends_with("-ohos") {
println!("cargo:rustc-link-lib=z");
return;
}
let want_static = should_link_static();
// Don't run pkg-config if we're linking statically (we'll build below) and
// also don't run pkg-config on FreeBSD/DragonFly. That'll end up printing
// `-L /usr/lib` which wreaks havoc with linking to an OpenSSL in /usr/local/lib
// (Ports, etc.)
if !want_static &&
!target.contains("msvc") && // pkg-config just never works here
!(host_and_target_contain("freebsd") ||
host_and_target_contain("dragonfly"))
{
// Don't print system lib dirs to cargo since this interferes with other
// packages adding non-system search paths to link against libraries
// that are also found in a system-wide lib dir.
let zlib = pkg_config::Config::new()
.cargo_metadata(true)
.print_system_libs(false)
.probe("zlib");
match zlib {
Ok(zlib) => {
if !zlib.include_paths.is_empty() {
let paths = zlib
.include_paths
.iter()
.map(|s| s.display().to_string())
.collect::<Vec<_>>();
println!("cargo:include={}", paths.join(","));
}
}
Err(e) => {
println!("cargo:warning=Could not find zlib include paths via pkg-config: {}", e)
}
}
}
if target.contains("windows") {
if try_vcpkg() {
return;
}
}
let mut cfg = cc::Build::new();
// Situations where we build unconditionally.
//
// - MSVC basically never has zlib preinstalled
// - MinGW picks up a bunch of weird paths we don't like
// - Explicit opt-in via `want_static`
if target.contains("msvc")
|| target.contains("pc-windows-gnu")
|| want_static
{
return build_zlib(&mut cfg, &target);
}
// If we've gotten this far we're probably a pretty standard platform.
// Almost all platforms here ship libz by default, but some don't have
// pkg-config files that we would find above.
//
// In any case test if zlib is actually installed and if so we link to it,
// otherwise continue below to build things.
if zlib_installed(&mut cfg) {
println!("cargo:rustc-link-lib=z");
return;
}
// For convenience fallback to building zlib if attempting to link zlib failed
build_zlib(&mut cfg, &target)
}
fn build_zlib(cfg: &mut cc::Build, target: &str) {
let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap());
let lib = dst.join("lib");
cfg.warnings(false).out_dir(&lib).include("src/zlib");
cfg.file("src/zlib/adler32.c")
.file("src/zlib/compress.c")
.file("src/zlib/crc32.c")
.file("src/zlib/deflate.c")
.file("src/zlib/infback.c")
.file("src/zlib/inffast.c")
.file("src/zlib/inflate.c")
.file("src/zlib/inftrees.c")
.file("src/zlib/trees.c")
.file("src/zlib/uncompr.c")
.file("src/zlib/zutil.c");
if !cfg!(feature = "libc") || target.starts_with("wasm32") {
cfg.define("Z_SOLO", None);
} else {
cfg.file("src/zlib/gzclose.c")
.file("src/zlib/gzlib.c")
.file("src/zlib/gzread.c")
.file("src/zlib/gzwrite.c");
}
if !target.contains("windows") {
cfg.define("STDC", None);
cfg.define("_LARGEFILE64_SOURCE", None);
cfg.define("_POSIX_SOURCE", None);
cfg.flag("-fvisibility=hidden");
}
if target.contains("apple") {
cfg.define("_C99_SOURCE", None);
}
if target.contains("solaris") {
cfg.define("_XOPEN_SOURCE", "700");
}
cfg.compile("z");
fs::create_dir_all(dst.join("include")).unwrap();
fs::copy("src/zlib/zlib.h", dst.join("include/zlib.h")).unwrap();
fs::copy("src/zlib/zconf.h", dst.join("include/zconf.h")).unwrap();
fs::create_dir_all(lib.join("pkgconfig")).unwrap();
let zlib_h = fs::read_to_string(dst.join("include/zlib.h")).unwrap();
let version = zlib_h
.lines()
.find(|l| l.contains("ZLIB_VERSION"))
.unwrap()
.split("\"")
.nth(1)
.unwrap();
fs::write(
lib.join("pkgconfig/zlib.pc"),
fs::read_to_string("src/zlib/zlib.pc.in")
.unwrap()
.replace("@prefix@", dst.to_str().unwrap())
.replace("@includedir@", "${prefix}/include")
.replace("@libdir@", "${prefix}/lib")
.replace("@VERSION@", version),
)
.unwrap();
println!("cargo:root={}", dst.to_str().unwrap());
println!("cargo:rustc-link-search=native={}", lib.to_str().unwrap());
println!("cargo:include={}/include", dst.to_str().unwrap());
}
#[cfg(any(
feature = "zlib-ng",
feature = "zlib-ng-no-cmake-experimental-community-maintained"
))]
mod zng {
#[cfg_attr(feature = "zlib-ng", path = "cmake.rs")]
#[cfg_attr(
all(
feature = "zlib-ng-no-cmake-experimental-community-maintained",
not(feature = "zlib-ng")
),
path = "cc.rs"
)]
mod build_zng;
pub(super) use build_zng::build_zlib_ng;
}
fn build_zlib_ng(_target: &str, _compat: bool) {
#[cfg(any(
feature = "zlib-ng",
feature = "zlib-ng-no-cmake-experimental-community-maintained"
))]
zng::build_zlib_ng(_target, _compat);
}
fn try_vcpkg() -> bool {
// see if there is a vcpkg tree with zlib installed
match vcpkg::Config::new()
.emit_includes(true)
.find_package("zlib")
{
Ok(zlib) => {
if !zlib.include_paths.is_empty() {
let paths = zlib
.include_paths
.iter()
.map(|s| s.display().to_string())
.collect::<Vec<_>>();
println!("cargo:include={}", paths.join(","));
}
true
}
Err(e) => {
println!("note, vcpkg did not find zlib: {}", e);
false
}
}
}
fn zlib_installed(cfg: &mut cc::Build) -> bool {
let mut cmd = cfg.get_compiler().to_command();
cmd.arg("src/smoke.c")
.arg("-g0")
.arg("-o")
.arg("/dev/null")
.arg("-lz");
println!("running {:?}", cmd);
if let Ok(status) = cmd.status() {
if status.success() {
return true;
}
}
false
}
/// The environment variable `LIBZ_SYS_STATIC` is first checked for a value of `0` (false) or `1` (true),
/// before considering the `static` feature when no explicit ENV value was detected.
/// When `libz-sys` is a transitive dependency from a crate that forces static linking via the `static` feature,
/// this enables the build environment to revert that preference via `LIBZ_SYS_STATIC=0`.
/// The default is otherwise `false`.
fn should_link_static() -> bool {
let has_static_env: Option<&'static str> = option_env!("LIBZ_SYS_STATIC");
let has_static_cfg = cfg!(feature = "static");
has_static_env
.and_then(|s: &str| s.parse::<u8>().ok())
.and_then(|b| match b {
0 => Some(false),
1 => Some(true),
_ => None,
})
.unwrap_or(has_static_cfg)
}
| rust | Apache-2.0 | fc9f6504e415a3d0787d9c84db9a043ad3f050d4 | 2026-01-04T20:23:59.805655Z | false |
rust-lang/libz-sys | https://github.com/rust-lang/libz-sys/blob/fc9f6504e415a3d0787d9c84db9a043ad3f050d4/src/lib.rs | src/lib.rs | #![allow(non_camel_case_types)]
#![allow(non_snake_case)]
use std::os::raw::{c_char, c_int, c_long, c_uchar, c_uint, c_ulong, c_void};
// Macro for variances between zlib-ng in native mode and either zlib or zlib-ng in zlib compat
// mode. Note in particular that zlib-ng in compat mode does *not* use the zng case.
#[cfg(not(zng))]
macro_rules! if_zng {
($_zng:tt, $not_zng:tt) => {
$not_zng
};
}
#[cfg(zng)]
macro_rules! if_zng {
($zng:tt, $_not_zng:tt) => {
$zng
};
}
// zlib uses unsigned long for various sizes; zlib-ng uses size_t.
type z_size = if_zng!(usize, c_ulong);
// zlib stores Adler-32 and CRC-32 checksums in unsigned long; zlib-ng uses uint32_t.
type z_checksum = if_zng!(u32, c_ulong);
pub type alloc_func = unsafe extern "C" fn(voidpf, uInt, uInt) -> voidpf;
pub type Bytef = u8;
pub type free_func = unsafe extern "C" fn(voidpf, voidpf);
#[cfg(any(zng, feature = "libc"))]
pub type gzFile = *mut gzFile_s;
pub type in_func = unsafe extern "C" fn(*mut c_void, *mut *const c_uchar) -> c_uint;
pub type out_func = unsafe extern "C" fn(*mut c_void, *mut c_uchar, c_uint) -> c_int;
pub type uInt = c_uint;
pub type uLong = c_ulong;
pub type uLongf = c_ulong;
pub type voidp = *mut c_void;
pub type voidpc = *const c_void;
pub type voidpf = *mut c_void;
#[cfg(any(zng, feature = "libc"))]
pub enum gzFile_s {}
pub enum internal_state {}
#[cfg(all(
not(zng),
feature = "libc",
not(all(target_family = "wasm", target_os = "unknown"))
))]
pub type z_off_t = libc::off_t;
#[cfg(all(
not(zng),
feature = "libc",
all(target_family = "wasm", target_os = "unknown")
))]
pub type z_off_t = c_long;
#[cfg(all(zng, windows, not(target_env = "gnu")))]
pub type z_off_t = i64;
#[cfg(all(zng, not(all(windows, not(target_env = "gnu")))))]
pub type z_off_t = libc::off_t;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct gz_header {
pub text: c_int,
pub time: uLong,
pub xflags: c_int,
pub os: c_int,
pub extra: *mut Bytef,
pub extra_len: uInt,
pub extra_max: uInt,
pub name: *mut Bytef,
pub name_max: uInt,
pub comment: *mut Bytef,
pub comm_max: uInt,
pub hcrc: c_int,
pub done: c_int,
}
pub type gz_headerp = *mut gz_header;
#[repr(C)]
#[derive(Copy, Clone)]
pub struct z_stream {
pub next_in: *mut Bytef,
pub avail_in: uInt,
pub total_in: z_size,
pub next_out: *mut Bytef,
pub avail_out: uInt,
pub total_out: z_size,
pub msg: *mut c_char,
pub state: *mut internal_state,
pub zalloc: alloc_func,
pub zfree: free_func,
pub opaque: voidpf,
pub data_type: c_int,
pub adler: z_checksum,
pub reserved: uLong,
}
pub type z_streamp = *mut z_stream;
// Ideally, this should instead use a macro that parses the whole block of externs, and generates
// the appropriate link_name attributes, without duplicating the function names. However, ctest2
// can't parse that.
#[cfg(not(zng))]
macro_rules! zng_prefix {
($name:expr) => {
stringify!($name)
};
}
#[cfg(zng)]
macro_rules! zng_prefix {
($name:expr) => {
concat!("zng_", stringify!($name))
};
}
extern "C" {
#[link_name = zng_prefix!(adler32)]
pub fn adler32(adler: z_checksum, buf: *const Bytef, len: uInt) -> z_checksum;
#[link_name = zng_prefix!(crc32)]
pub fn crc32(crc: z_checksum, buf: *const Bytef, len: uInt) -> z_checksum;
#[link_name = zng_prefix!(deflate)]
pub fn deflate(strm: z_streamp, flush: c_int) -> c_int;
#[link_name = zng_prefix!(deflateBound)]
pub fn deflateBound(strm: z_streamp, sourceLen: uLong) -> uLong;
#[link_name = zng_prefix!(deflateCopy)]
pub fn deflateCopy(dest: z_streamp, source: z_streamp) -> c_int;
#[link_name = zng_prefix!(deflateEnd)]
pub fn deflateEnd(strm: z_streamp) -> c_int;
#[link_name = zng_prefix!(deflateParams)]
pub fn deflateParams(strm: z_streamp, level: c_int, strategy: c_int) -> c_int;
#[link_name = zng_prefix!(deflatePrime)]
pub fn deflatePrime(strm: z_streamp, bits: c_int, value: c_int) -> c_int;
#[link_name = zng_prefix!(deflateReset)]
pub fn deflateReset(strm: z_streamp) -> c_int;
#[link_name = zng_prefix!(deflateSetDictionary)]
pub fn deflateSetDictionary(
strm: z_streamp,
dictionary: *const Bytef,
dictLength: uInt,
) -> c_int;
#[link_name = zng_prefix!(deflateSetHeader)]
pub fn deflateSetHeader(strm: z_streamp, head: gz_headerp) -> c_int;
#[link_name = zng_prefix!(deflateTune)]
pub fn deflateTune(
strm: z_streamp,
good_length: c_int,
max_lazy: c_int,
nice_length: c_int,
max_chain: c_int,
) -> c_int;
#[link_name = zng_prefix!(inflate)]
pub fn inflate(strm: z_streamp, flush: c_int) -> c_int;
#[link_name = zng_prefix!(inflateBack)]
pub fn inflateBack(
strm: z_streamp,
_in: in_func,
in_desc: *mut c_void,
out: out_func,
out_desc: *mut c_void,
) -> c_int;
#[link_name = zng_prefix!(inflateBackEnd)]
pub fn inflateBackEnd(strm: z_streamp) -> c_int;
#[link_name = zng_prefix!(inflateCopy)]
pub fn inflateCopy(dest: z_streamp, source: z_streamp) -> c_int;
#[link_name = zng_prefix!(inflateEnd)]
pub fn inflateEnd(strm: z_streamp) -> c_int;
#[link_name = zng_prefix!(inflateGetHeader)]
pub fn inflateGetHeader(strm: z_streamp, head: gz_headerp) -> c_int;
#[link_name = zng_prefix!(inflateMark)]
pub fn inflateMark(strm: z_streamp) -> c_long;
#[link_name = zng_prefix!(inflatePrime)]
pub fn inflatePrime(strm: z_streamp, bits: c_int, value: c_int) -> c_int;
#[link_name = zng_prefix!(inflateReset)]
pub fn inflateReset(strm: z_streamp) -> c_int;
#[link_name = zng_prefix!(inflateReset2)]
pub fn inflateReset2(strm: z_streamp, windowBits: c_int) -> c_int;
#[link_name = zng_prefix!(inflateSetDictionary)]
pub fn inflateSetDictionary(
strm: z_streamp,
dictionary: *const Bytef,
dictLength: uInt,
) -> c_int;
#[link_name = zng_prefix!(inflateSync)]
pub fn inflateSync(strm: z_streamp) -> c_int;
#[link_name = zng_prefix!(zlibCompileFlags)]
pub fn zlibCompileFlags() -> uLong;
// The above set of functions currently target 1.2.3.4 (what's present on Ubuntu
// 12.04, but there's some other APIs that were added later. Should figure out
// how to expose them...
//
// Added in 1.2.5.1
//
// pub fn deflatePending(strm: z_streamp,
// pending: *mut c_uint,
// bits: *mut c_int) -> c_int;
//
// Addedin 1.2.7.1
// pub fn inflateGetDictionary(strm: z_streamp,
// dictionary: *mut Bytef,
// dictLength: *mut uInt) -> c_int;
//
// Added in 1.2.3.5
// pub fn gzbuffer(file: gzFile, size: c_uint) -> c_int;
// pub fn gzclose_r(file: gzFile) -> c_int;
// pub fn gzclose_w(file: gzFile) -> c_int;
// pub fn gzoffset(file: gzFile) -> z_off_t;
}
extern "C" {
#[link_name = if_zng!("zlibng_version", "zlibVersion")]
pub fn zlibVersion() -> *const c_char;
}
#[cfg(not(zng))]
extern "C" {
pub fn deflateInit_(
strm: z_streamp,
level: c_int,
version: *const c_char,
stream_size: c_int,
) -> c_int;
pub fn deflateInit2_(
strm: z_streamp,
level: c_int,
method: c_int,
windowBits: c_int,
memLevel: c_int,
strategy: c_int,
version: *const c_char,
stream_size: c_int,
) -> c_int;
pub fn inflateBackInit_(
strm: z_streamp,
windowBits: c_int,
window: *mut c_uchar,
version: *const c_char,
stream_size: c_int,
) -> c_int;
pub fn inflateInit_(strm: z_streamp, version: *const c_char, stream_size: c_int) -> c_int;
pub fn inflateInit2_(
strm: z_streamp,
windowBits: c_int,
version: *const c_char,
stream_size: c_int,
) -> c_int;
}
#[cfg(zng)]
extern "C" {
pub fn zng_deflateInit(strm: z_streamp, level: c_int) -> c_int;
pub fn zng_deflateInit2(
strm: z_streamp,
level: c_int,
method: c_int,
windowBits: c_int,
memLevel: c_int,
strategy: c_int,
) -> c_int;
pub fn zng_inflateBackInit(strm: z_streamp, windowBits: c_int, window: *mut c_uchar) -> c_int;
pub fn zng_inflateInit(strm: z_streamp) -> c_int;
pub fn zng_inflateInit2(strm: z_streamp, windowBits: c_int) -> c_int;
}
// These methods are required to keep BC with original zlib API since zlib-ng 2.1 that changed API
#[cfg(zng)]
#[inline(always)]
pub unsafe fn inflateInit2_(
strm: z_streamp,
windowBits: c_int,
_version: *const c_char,
_stream_size: c_int,
) -> c_int {
zng_inflateInit2(strm, windowBits)
}
#[cfg(zng)]
#[inline(always)]
pub unsafe fn inflateInit_(strm: z_streamp, _version: *const c_char, _stream_size: c_int) -> c_int {
zng_inflateInit(strm)
}
#[cfg(zng)]
#[inline(always)]
pub unsafe fn inflateBackInit_(
strm: z_streamp,
windowBits: c_int,
window: *mut c_uchar,
_version: *const c_char,
_stream_size: c_int,
) -> c_int {
zng_inflateBackInit(strm, windowBits, window)
}
#[cfg(zng)]
#[inline(always)]
pub unsafe fn deflateInit2_(
strm: z_streamp,
level: c_int,
method: c_int,
windowBits: c_int,
memLevel: c_int,
strategy: c_int,
_version: *const c_char,
_stream_size: c_int,
) -> c_int {
zng_deflateInit2(strm, level, method, windowBits, memLevel, strategy)
}
#[cfg(zng)]
#[inline]
pub unsafe fn deflateInit_(
strm: z_streamp,
level: c_int,
_version: *const c_char,
_stream_size: c_int,
) -> c_int {
zng_deflateInit(strm, level)
}
#[cfg(any(zng, feature = "libc"))]
extern "C" {
#[link_name = zng_prefix!(adler32_combine)]
pub fn adler32_combine(adler1: z_checksum, adler2: z_checksum, len2: z_off_t) -> z_checksum;
#[link_name = zng_prefix!(compress)]
pub fn compress(
dest: *mut Bytef,
destLen: *mut z_size,
source: *const Bytef,
sourceLen: z_size,
) -> c_int;
#[link_name = zng_prefix!(compress2)]
pub fn compress2(
dest: *mut Bytef,
destLen: *mut z_size,
source: *const Bytef,
sourceLen: z_size,
level: c_int,
) -> c_int;
#[link_name = zng_prefix!(compressBound)]
pub fn compressBound(sourceLen: z_size) -> z_size;
#[link_name = zng_prefix!(crc32_combine)]
pub fn crc32_combine(crc1: z_checksum, crc2: z_checksum, len2: z_off_t) -> z_checksum;
#[link_name = zng_prefix!(gzdirect)]
pub fn gzdirect(file: gzFile) -> c_int;
#[link_name = zng_prefix!(gzdopen)]
pub fn gzdopen(fd: c_int, mode: *const c_char) -> gzFile;
#[link_name = zng_prefix!(gzclearerr)]
pub fn gzclearerr(file: gzFile);
#[link_name = zng_prefix!(gzclose)]
pub fn gzclose(file: gzFile) -> c_int;
#[link_name = zng_prefix!(gzeof)]
pub fn gzeof(file: gzFile) -> c_int;
#[link_name = zng_prefix!(gzerror)]
pub fn gzerror(file: gzFile, errnum: *mut c_int) -> *const c_char;
#[link_name = zng_prefix!(gzflush)]
pub fn gzflush(file: gzFile, flush: c_int) -> c_int;
#[link_name = zng_prefix!(gzgetc)]
pub fn gzgetc(file: gzFile) -> c_int;
#[link_name = zng_prefix!(gzgets)]
pub fn gzgets(file: gzFile, buf: *mut c_char, len: c_int) -> *mut c_char;
#[link_name = zng_prefix!(gzopen)]
pub fn gzopen(path: *const c_char, mode: *const c_char) -> gzFile;
#[link_name = zng_prefix!(gzputc)]
pub fn gzputc(file: gzFile, c: c_int) -> c_int;
#[link_name = zng_prefix!(gzputs)]
pub fn gzputs(file: gzFile, s: *const c_char) -> c_int;
#[link_name = zng_prefix!(gzread)]
pub fn gzread(file: gzFile, buf: voidp, len: c_uint) -> c_int;
#[link_name = zng_prefix!(gzrewind)]
pub fn gzrewind(file: gzFile) -> c_int;
#[link_name = zng_prefix!(gzseek)]
pub fn gzseek(file: gzFile, offset: z_off_t, whence: c_int) -> z_off_t;
#[link_name = zng_prefix!(gzsetparams)]
pub fn gzsetparams(file: gzFile, level: c_int, strategy: c_int) -> c_int;
#[link_name = zng_prefix!(gztell)]
pub fn gztell(file: gzFile) -> z_off_t;
#[link_name = zng_prefix!(gzungetc)]
pub fn gzungetc(c: c_int, file: gzFile) -> c_int;
#[link_name = zng_prefix!(gzwrite)]
pub fn gzwrite(file: gzFile, buf: voidpc, len: c_uint) -> c_int;
#[link_name = zng_prefix!(uncompress)]
pub fn uncompress(
dest: *mut Bytef,
destLen: *mut z_size,
source: *const Bytef,
sourceLen: z_size,
) -> c_int;
}
pub const Z_NO_FLUSH: c_int = 0;
pub const Z_PARTIAL_FLUSH: c_int = 1;
pub const Z_SYNC_FLUSH: c_int = 2;
pub const Z_FULL_FLUSH: c_int = 3;
pub const Z_FINISH: c_int = 4;
pub const Z_BLOCK: c_int = 5;
pub const Z_TREES: c_int = 6;
pub const Z_OK: c_int = 0;
pub const Z_STREAM_END: c_int = 1;
pub const Z_NEED_DICT: c_int = 2;
pub const Z_ERRNO: c_int = -1;
pub const Z_STREAM_ERROR: c_int = -2;
pub const Z_DATA_ERROR: c_int = -3;
pub const Z_MEM_ERROR: c_int = -4;
pub const Z_BUF_ERROR: c_int = -5;
pub const Z_VERSION_ERROR: c_int = -6;
pub const Z_NO_COMPRESSION: c_int = 0;
pub const Z_BEST_SPEED: c_int = 1;
pub const Z_BEST_COMPRESSION: c_int = 9;
pub const Z_DEFAULT_COMPRESSION: c_int = -1;
pub const Z_FILTERED: c_int = 1;
pub const Z_HUFFMAN_ONLY: c_int = 2;
pub const Z_RLE: c_int = 3;
pub const Z_FIXED: c_int = 4;
pub const Z_DEFAULT_STRATEGY: c_int = 0;
pub const Z_BINARY: c_int = 0;
pub const Z_TEXT: c_int = 1;
pub const Z_ASCII: c_int = Z_TEXT;
pub const Z_UNKNOWN: c_int = 2;
pub const Z_DEFLATED: c_int = 8;
| rust | Apache-2.0 | fc9f6504e415a3d0787d9c84db9a043ad3f050d4 | 2026-01-04T20:23:59.805655Z | false |
rust-lang/libz-sys | https://github.com/rust-lang/libz-sys/blob/fc9f6504e415a3d0787d9c84db9a043ad3f050d4/systest/build.rs | systest/build.rs | use std::env;
fn main() {
let zng = env::var("CARGO_PKG_NAME").unwrap() == "systest-zng";
let mut cfg = ctest2::TestGenerator::new();
cfg.define("WITH_GZFILEOP", Some("ON"));
let (header, dep_include) = if zng {
("zlib-ng.h", "DEP_Z_NG_INCLUDE")
} else {
("zlib.h", "DEP_Z_INCLUDE")
};
cfg.header(header);
if let Some(s) = env::var_os(dep_include) {
cfg.include(s);
}
if zng {
println!("cargo:rustc-cfg=zng");
// The link_name argument does not seem to get populated.
cfg.fn_cname(|rust, _| {
if rust == "zlibVersion" {
return "zlibng_version".to_string();
}
if rust.starts_with("zng_") {
rust.to_string()
} else {
format!("zng_{}", rust)
}
});
cfg.cfg("zng", None);
}
cfg.type_name(move |n, _, _| {
if zng {
if n == "gz_header" || n == "gz_headerp" {
return format!("zng_{}", n);
} else if n == "z_stream" {
return "zng_stream".to_string();
} else if n == "z_streamp" {
return "zng_streamp".to_string();
} else if n == "z_size" {
return "size_t".to_string();
} else if n == "z_checksum" {
return "uint32_t".to_string();
} else if n == "z_off_t" {
return "z_off64_t".to_string();
}
} else {
if n == "z_size" {
return "unsigned long".to_string();
} else if n == "z_checksum" {
return "unsigned long".to_string();
}
}
if n == "internal_state" {
format!("struct {}", n)
} else {
n.to_string()
}
});
cfg.skip_signededness(|ty| match ty {
"gz_headerp" | "voidpf" | "voidcf" | "voidp" | "out_func" | "voidpc" | "gzFile"
| "in_func" | "free_func" | "alloc_func" | "z_streamp" => true,
_ => false,
});
cfg.skip_field_type(|s, field| s == "z_stream" && (field == "next_in" || field == "msg"));
cfg.generate("../src/lib.rs", "all.rs");
}
| rust | Apache-2.0 | fc9f6504e415a3d0787d9c84db9a043ad3f050d4 | 2026-01-04T20:23:59.805655Z | false |
rust-lang/libz-sys | https://github.com/rust-lang/libz-sys/blob/fc9f6504e415a3d0787d9c84db9a043ad3f050d4/systest/src/main.rs | systest/src/main.rs | #![allow(bad_style, improper_ctypes)]
use libc::*;
#[cfg(not(zng))]
use libz_sys::*;
#[cfg(zng)]
use libz_ng_sys::*;
include!(concat!(env!("OUT_DIR"), "/all.rs"));
| rust | Apache-2.0 | fc9f6504e415a3d0787d9c84db9a043ad3f050d4 | 2026-01-04T20:23:59.805655Z | false |
rust-lang/libz-sys | https://github.com/rust-lang/libz-sys/blob/fc9f6504e415a3d0787d9c84db9a043ad3f050d4/zng/cc.rs | zng/cc.rs | use std::{
env, fs,
io::Write as _,
path::{Path, PathBuf},
};
struct Build {
cfg: cc::Build,
is_msvc: bool,
}
impl Build {
fn new(cfg: cc::Build) -> Self {
let is_msvc = cfg.try_get_compiler().unwrap().is_like_msvc();
Self { cfg, is_msvc }
}
fn append(&mut self, root: Option<&str>, files: &[&str]) {
let root = root.map_or(String::new(), |s| {
assert!(!s.ends_with('/'), "remove trailing slash");
format!("{s}/")
});
self.cfg.files(
files
.into_iter()
.map(|fname| format!("src/zlib-ng/{root}{fname}.c")),
);
}
fn mflag(
&mut self,
non_msvc: impl Into<Option<&'static str>>,
msvc: impl Into<Option<&'static str>>,
) {
let Some(flag) = (if self.is_msvc {
msvc.into()
} else {
non_msvc.into()
}) else {
return;
};
self.cfg.flag(flag);
}
}
impl std::ops::Deref for Build {
type Target = cc::Build;
fn deref(&self) -> &Self::Target {
&self.cfg
}
}
impl std::ops::DerefMut for Build {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.cfg
}
}
/// Replicate the behavior of cmake/make/configure of stripping out the
/// @ZLIB_SYMBOL_PREFIX@ since we don't want or need it
fn strip_symbol_prefix(input: &Path, output: &Path, get_version: bool) -> String {
let contents = fs::read_to_string(input)
.map_err(|err| format!("failed to read {input:?}: {err}"))
.unwrap();
let mut h =
std::io::BufWriter::new(fs::File::create(output).expect("failed to create zlib include"));
use std::io::IoSlice;
let mut write = |bufs: &[IoSlice]| {
// write_all_vectored is unstable
for buf in bufs {
h.write_all(&buf).unwrap();
}
};
let mut version = None;
for line in contents.lines() {
if let Some((begin, end)) = line.split_once("@ZLIB_SYMBOL_PREFIX@") {
write(&[
IoSlice::new(begin.as_bytes()),
IoSlice::new(end.as_bytes()),
IoSlice::new(b"\n"),
]);
} else {
write(&[IoSlice::new(line.as_bytes()), IoSlice::new(b"\n")]);
}
if get_version {
if line.contains("ZLIBNG_VERSION") && line.contains("#define") {
version = Some(line.split('"').nth(1).unwrap().to_owned());
}
}
}
if get_version {
version.expect("failed to detect ZLIBNG_VERSION")
} else {
String::new()
}
}
pub fn build_zlib_ng(target: &str, compat: bool) {
let mut cfg = cc::Build::new();
let dst = PathBuf::from(env::var_os("OUT_DIR").unwrap());
let lib = dst.join("lib");
cfg.warnings(false).out_dir(&lib);
let mut cfg = Build::new(cfg);
cfg.append(
None,
&[
"adler32",
"compress",
"cpu_features",
"crc32",
"crc32_braid_comb",
"deflate",
"deflate_fast",
"deflate_huff",
"deflate_medium",
"deflate_quick",
"deflate_rle",
"deflate_slow",
"deflate_stored",
"functable",
// GZFILEOP
"gzlib",
"gzwrite",
"infback",
"inflate",
"inftrees",
"insert_string",
"insert_string_roll",
"trees",
"uncompr",
"zutil",
],
);
cfg.append(
Some("arch/generic"),
&[
"adler32_c",
"adler32_fold_c",
"chunkset_c",
"compare256_c",
"crc32_braid_c",
"crc32_fold_c",
"slide_hash_c",
],
);
if compat {
cfg.define("ZLIB_COMPAT", None);
}
cfg.define("WITH_GZFILEOP", None);
{
let mut build = dst.join("build");
fs::create_dir_all(&build).unwrap();
build.push("gzread.c");
strip_symbol_prefix(Path::new("src/zlib-ng/gzread.c.in"), &build, false);
cfg.file(build);
}
let msvc = target.ends_with("pc-windows-msvc");
cfg.std("c11");
// This can be made configurable if it is an issue but most of these would
// only fail if the user was on a decade old+ libc impl
if !msvc {
cfg.define("HAVE_ALIGNED_ALLOC", None)
.define("HAVE_ATTRIBUTE_ALIGNED", None)
.define("HAVE_BUILTIN_CTZ", None)
.define("HAVE_BUILTIN_CTZLL", None)
.define("HAVE_THREAD_LOCAL", None)
.define("HAVE_VISIBILITY_HIDDEN", None)
.define("HAVE_VISIBILITY_INTERNAL", None)
.define("_LARGEFILE64_SOURCE", "1")
.define("__USE_LARGEFILE64", None);
// Turn implicit functions into errors, this would indicate eg. a
// define is not set
cfg.flag("-Werror-implicit-function-declaration");
}
if !target.contains("windows") {
cfg.define("STDC", None)
.define("_POSIX_SOURCE", None)
.define("HAVE_POSIX_MEMALIGN", None)
.flag("-fvisibility=hidden");
}
let is_apple = target.contains("apple");
if is_apple {
cfg.define("_C99_SOURCE", None);
} else if target.contains("solaris") {
cfg.define("_XOPEN_SOURCE", "700");
}
let target_os = env::var("CARGO_CFG_TARGET_OS").unwrap();
let arch = env::var("CARGO_CFG_TARGET_ARCH").expect("failed to retrieve target arch");
let features = env::var("CARGO_CFG_TARGET_FEATURE").unwrap();
let is_linux_or_android = matches!(target_os.as_str(), "linux" | "android");
if is_linux_or_android {
cfg.define("HAVE_SYS_AUXV_H", None);
}
match arch.as_str() {
"x86_64" | "i686" => {
cfg.define("X86_FEATURES", None);
cfg.file("src/zlib-ng/arch/x86/x86_features.c");
let is_64 = arch.as_str() == "x86_64";
// AVX2
cfg.define("X86_AVX2", None);
cfg.append(
Some("arch/x86"),
&[
"chunkset_avx2",
"compare256_avx2",
"adler32_avx2",
"slide_hash_avx2",
],
);
cfg.mflag("-mavx2", "/arch:AVX2");
// SSE2
cfg.define("X86_SSE2", None);
cfg.append(
Some("arch/x86"),
&["chunkset_sse2", "compare256_sse2", "slide_hash_sse2"],
);
cfg.mflag("-msse2", (!is_64).then_some("/arch:SSE2"));
// SSE3
cfg.define("X86_SSSE3", None);
cfg.append(Some("arch/x86"), &["adler32_ssse3", "chunkset_ssse3"]);
cfg.mflag("-msse3", "/arch:SSE3");
// SSE4.2
cfg.define("X86_SSE42", None);
cfg.append(Some("arch/x86"), &["adler32_sse42"]);
cfg.mflag("-msse4.2", "/arch:SSE4.2");
// AVX-512
{
for def in &[
"X86_AVX512",
"X86_MASK_INTRIN",
"X86_AVX512VNNI",
"X86_VPCLMULQDQ_CRC",
] {
cfg.define(def, None);
}
cfg.append(
Some("arch/x86"),
&["adler32_avx512", "adler32_avx512_vnni", "crc32_vpclmulqdq"],
);
if cfg.is_msvc {
cfg.flag("/arch:AVX512");
} else {
// The zlib-ng cmake scripts to check target features claim that GCC doesn't
// generate good code unless mtune is set, not sure if this is still the
// case, but we faithfully replicate it just in case
for flag in &[
"-mavx512f",
"-mavx512dq",
"-mavx512bw",
"-mavx512vl",
"-mavx512vnni",
"-mvpclmulqdq",
"-mtune=cascadelake",
] {
cfg.flag(flag);
}
}
}
// Misc
cfg.define("X86_PCLMULQDQ_CRC", None);
cfg.append(Some("arch/x86"), &["crc32_pclmulqdq"]);
cfg.mflag("-mpclmul", None);
cfg.mflag("-mxsave", None);
}
"aarch64" | "arm" => {
let is_aarch64 = arch == "aarch64";
cfg.define("ARM_FEATURES", None);
cfg.file("src/zlib-ng/arch/arm/arm_features.c");
// Support runtime detection on linux/android
if is_linux_or_android {
cfg.define("ARM_AUXV_HAS_CRC32", None);
if !is_aarch64 {
cfg.define("ARM_AUXV_HAS_NEON", None);
}
}
// According to the cmake macro, MSVC is missing the crc32 intrinsic
// for arm, don't know if that is still true though
if !cfg.is_msvc || is_aarch64 {
cfg.define("ARM_ACLE", None).define("HAVE_ARM_ACLE_H", None);
cfg.append(Some("arch/arm"), &["crc32_acle"]);
// When targeting aarch64 we already need to specify +simd, so
// we do that once later in this block
if !is_aarch64 {
cfg.mflag("-march=armv8-a+crc", None);
cfg.define("ARM_ASM_HWCAP", None);
}
}
// neon
// Fix armv7-unknown-linux-musleabi and arm-unknown-linux-musleabi by only
// passing in ARM_NEON if that target is enabled.
// Disable for apple targets due to https://github.com/rust-lang/libz-sys/issues/230
if !is_apple && features.split(",").any(|name| name == "neon") {
cfg.define("ARM_NEON", None);
}
// NOTE: These intrinsics were only added in gcc 9.4, which is _relatively_
// recent, and if the define is not set zlib-ng just provides its
// own implements, so maybe in a couple of years this can be toggled on
// if building with cc is merged it makes sense to put compiler intrinsic/header
// probing in a separate crate that can then be used here to enable
// those intrinsics if the compiler supports them
// * vld1q_u16_x4
// * vld1q_u8_x4
// * vst1q_u16_x4
// cfg.define("ARM_NEON_HASLD4", None)
if cfg.is_msvc {
cfg.define("__ARM_NEON__", None);
}
cfg.append(
Some("arch/arm"),
&[
"adler32_neon",
"chunkset_neon",
"compare256_neon",
"slide_hash_neon",
],
);
cfg.mflag(
if is_aarch64 {
"-march=armv8-a+crc+simd"
} else {
"-mfpu=neon"
},
None,
);
}
"s390x" => {
for def in &[
"S390_FEATURES",
"S390_DFLTCC_DEFLATE",
"S390_DFLTCC_INFLATE",
"S390_CRC32_VX",
] {
cfg.define(def, None);
}
cfg.flag("-DDFLTCC_LEVEL_MASK=0x7e");
cfg.append(
Some("arch/s390"),
&[
"crc32-vx",
"dfltcc_common",
"dfltcc_deflate",
"dfltcc_inflate",
"s390_features",
],
);
}
_ => {
// NOTE: PowerPC and Riscv
// zlib-ng can use intrinsics for both of these targets, however neither
// of them are currently checked in CI, they will still work without
// using the intrinsics, they will just be slower
// PowerPC - <github issue here>
// Riscv - <github issue here>
}
}
let include = dst.join("include");
fs::create_dir_all(&include).unwrap();
let (zconf_h, zlib_h, mangle) = if compat {
("zconf.h", "zlib.h", "zlib_name_mangling.h")
} else {
fs::copy("src/zlib-ng/zconf-ng.h.in", include.join("zconf-ng.h")).unwrap();
("zconf-ng.h", "zlib-ng.h", "zlib_name_mangling-ng.h")
};
if msvc {
fs::copy(format!("src/zlib-ng/{zconf_h}.in"), include.join(zconf_h)).unwrap();
} else {
// If we don't do this then _some_ 32-bit targets will have an incorrect
// size for off_t if they don't _also_ define `HAVE_UNISTD_H`, so we
// copy configure/cmake here
let new_zconf = fs::read_to_string(format!("src/zlib-ng/{zconf_h}.in"))
.expect("failed to read zconf.h.in")
.replace(
"#ifdef HAVE_UNISTD_H /* may be set to #if 1 by configure/cmake/etc */",
&format!(
"#if 1 /* was set to #if 1 by {}:{}:{} */",
file!(),
line!(),
column!()
),
);
fs::write(include.join(zconf_h), new_zconf).unwrap();
}
fs::copy(
"src/zlib-ng/zlib_name_mangling.h.empty",
include.join(mangle),
)
.unwrap();
let version = strip_symbol_prefix(
Path::new(&format!("src/zlib-ng/{zlib_h}.in")),
&include.join(zlib_h),
true,
);
cfg.include(&include).include("src/zlib-ng");
if let Err(err) = cfg.try_compile("z") {
let version = if !cfg.is_msvc {
match std::process::Command::new(cfg.get_compiler().path())
.arg("--version")
.output()
{
Ok(output) => String::from_utf8_lossy(&output.stdout).into_owned(),
Err(_err) => "unknown".into(),
}
} else {
"msvc".into()
};
eprintln!("{err}");
panic!(
"failed to compile zlib-ng with cc: detected compiler version as \n---\n{}---",
version
);
}
fs::create_dir_all(lib.join("pkgconfig")).unwrap();
fs::write(
lib.join("pkgconfig/zlib.pc"),
fs::read_to_string("src/zlib-ng/zlib.pc.in")
.unwrap()
.replace("@prefix@", dst.to_str().unwrap())
.replace("@includedir@", "${prefix}/include")
.replace("@libdir@", "${prefix}/lib")
.replace("@VERSION@", &version),
)
.unwrap();
println!("cargo:root={}", dst.display());
println!("cargo:rustc-link-search=native={}", lib.display());
println!("cargo:include={}", include.display());
if !compat {
println!("cargo:rustc-cfg=zng");
}
}
#[allow(dead_code)]
fn main() {
let target = env::var("TARGET").unwrap();
build_zlib_ng(&target, false);
}
| rust | Apache-2.0 | fc9f6504e415a3d0787d9c84db9a043ad3f050d4 | 2026-01-04T20:23:59.805655Z | false |
rust-lang/libz-sys | https://github.com/rust-lang/libz-sys/blob/fc9f6504e415a3d0787d9c84db9a043ad3f050d4/zng/cmake.rs | zng/cmake.rs | use std::env;
pub fn build_zlib_ng(target: &str, compat: bool) {
let mut cmake = cmake::Config::new("src/zlib-ng");
cmake
.define("BUILD_SHARED_LIBS", "OFF")
.define("ZLIB_COMPAT", if compat { "ON" } else { "OFF" })
.define("ZLIB_ENABLE_TESTS", "OFF")
.define("WITH_GZFILEOP", "ON");
if target.contains("s390x") {
// Enable hardware compression on s390x.
cmake
.define("WITH_DFLTCC_DEFLATE", "1")
.define("WITH_DFLTCC_INFLATE", "1")
.cflag("-DDFLTCC_LEVEL_MASK=0x7e");
}
if target.contains("riscv") {
// Check if we should pass on an explicit boolean value of the WITH_RVV build option.
// See: https://github.com/zlib-ng/zlib-ng?tab=readme-ov-file#advanced-build-options
if let Ok(value) = env::var("RISCV_WITH_RVV") {
match value.trim().to_uppercase().as_str() {
"OFF" | "NO" | "FALSE" | "0" => {
// Force RVV off. This turns off RVV entirely, as well as the runtime check for it.
// This is not usually necessary, but can be useful for building binaries portable
// to systems that do not support RVV but where auto-detection fails to identify
// this (as in https://github.com/zlib-ng/zlib-ng/issues/1705).
cmake.define("WITH_RVV", "OFF");
}
"ON" | "YES" | "TRUE" | "1" => {
// Try to use RVV, but still don't do so if a runtime check finds it unavailable.
// This has the same effect as omitting WITH_RVV, unless it has already been set.
cmake.define("WITH_RVV", "ON");
}
_ => {}
}
}
}
if target == "i686-pc-windows-msvc" {
cmake.define("CMAKE_GENERATOR_PLATFORM", "Win32");
}
// libz-ng uses the GNUInstallDirs convention, so we can use the following
// to ensure libraries are placed in a consistent place in the
// installation dir.
cmake.define("CMAKE_INSTALL_LIBDIR", "lib");
let install_dir = cmake.build();
let includedir = install_dir.join("include");
let libdir = install_dir.join("lib");
println!(
"cargo:rustc-link-search=native={}",
libdir.to_str().unwrap()
);
let mut debug_suffix = "";
let libname = if target.contains("windows") && target.contains("msvc") {
if env::var("OPT_LEVEL").unwrap() == "0" {
debug_suffix = "d";
}
"zlibstatic"
} else {
"z"
};
println!(
"cargo:rustc-link-lib=static={}{}{}",
libname,
if compat { "" } else { "-ng" },
debug_suffix,
);
println!("cargo:root={}", install_dir.to_str().unwrap());
println!("cargo:include={}", includedir.to_str().unwrap());
if !compat {
println!("cargo:rustc-cfg=zng");
}
}
#[allow(dead_code)]
fn main() {
let target = env::var("TARGET").unwrap();
build_zlib_ng(&target, false);
}
| rust | Apache-2.0 | fc9f6504e415a3d0787d9c84db9a043ad3f050d4 | 2026-01-04T20:23:59.805655Z | false |
brycx/checkpwn | https://github.com/brycx/checkpwn/blob/0fde0d153d61b8d9f34685372c4faa1dc91ed015/src/config.rs | src/config.rs | use dirs_next::config_dir;
use serde::{Deserialize, Serialize};
use std::{fs, io::Write, path::PathBuf};
const CHECKPWN_CONFIG_FILE_NAME: &str = "checkpwn.yml";
const CHECKPWN_CONFIG_DIR: &str = "checkpwn";
#[derive(Serialize, Deserialize, Debug)]
pub struct Config {
pub api_key: String,
}
#[derive(Debug)]
pub struct ConfigPaths {
pub config_file_path: PathBuf,
}
impl Config {
pub fn new() -> Config {
Config {
api_key: "".to_string(),
}
}
pub fn get_config_path(&self) -> Option<ConfigPaths> {
match config_dir() {
Some(mut dir) => {
dir.push(CHECKPWN_CONFIG_DIR);
dir.push(CHECKPWN_CONFIG_FILE_NAME);
Some(ConfigPaths {
config_file_path: dir,
})
}
None => None,
}
}
fn build_path(&self) -> Result<(), Box<dyn std::error::Error>> {
let mut path = self
.get_config_path()
.expect("Failed to determine configuration file path.");
path.config_file_path.pop(); //remove the filename so we don't accidentally create it as a directory
fs::create_dir_all(&path.config_file_path)?;
Ok(())
}
#[cfg(debug_assertions)]
pub fn load_config(&mut self) -> Result<(), Box<dyn std::error::Error>> {
// If in CI, the key is in env. Local tests use the config file.
match std::env::var("API_KEY") {
Ok(api_key) => {
self.api_key = api_key;
Ok(())
}
Err(std::env::VarError::NotPresent) => {
let path = self
.get_config_path()
.expect("Failed to determine configuration file path.");
let config_string = fs::read_to_string(&path.config_file_path)?;
let config_yml: Config = serde_yaml::from_str(&config_string)?;
self.api_key = config_yml.api_key;
Ok(())
}
_ => panic!("CI API KEY WAS NOT UTF8"),
}
}
#[cfg(not(debug_assertions))]
pub fn load_config(&mut self) -> Result<(), Box<dyn std::error::Error>> {
let path = self
.get_config_path()
.expect("Failed to determine configuration file path.");
let config_string = fs::read_to_string(&path.config_file_path)?;
let config_yml: Config = serde_yaml::from_str(&config_string)?;
self.api_key = config_yml.api_key;
Ok(())
}
pub fn save_config(&self, api_key: &str) -> Result<(), Box<dyn std::error::Error>> {
let path: ConfigPaths = self
.get_config_path()
.expect("Failed to determine configuration file path.");
self.build_path()?;
let new_config = Config {
api_key: api_key.to_string(),
};
let config_to_write = serde_yaml::to_string(&new_config)?;
let mut config_file = fs::File::create(&path.config_file_path)?;
config_file.write_all(config_to_write.as_bytes())?;
Ok(())
}
}
| rust | MIT | 0fde0d153d61b8d9f34685372c4faa1dc91ed015 | 2026-01-04T20:24:03.194273Z | false |
brycx/checkpwn | https://github.com/brycx/checkpwn/blob/0fde0d153d61b8d9f34685372c4faa1dc91ed015/src/errors.rs | src/errors.rs | // MIT License
// Copyright (c) 2018-2022 brycx
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
/// All the different errors for checkpwn;
/// Errors that are meant to be internal or or unreachable print this.
pub const USAGE_ERROR: &str =
"Usage: checkpwn { pass | acc (<username> | <email> | <filename>.ls) | register <apikey> }";
pub const READ_FILE_ERROR: &str = "Error reading local file";
pub const BUFREADER_ERROR: &str = "Failed to read file in to BufReader";
pub const READLINE_ERROR: &str = "Failed to read line from file";
pub const MISSING_API_KEY: &str = "Failed to read or parse the configuration file 'checkpwn.yml'. You need to register an API key to be able to check accounts";
/// Set panic hook, to have .unwrap(), etc, return the custom panic message.
macro_rules! set_checkpwn_panic {
($x:expr) => {
// Set new hook with custom message
panic::set_hook(Box::new(|_| {
println!(
"\nThe following error was encountered: {:?}\n\
\nIf you think this is a bug, please report it in the project repository.",
$x
);
}));
};
}
| rust | MIT | 0fde0d153d61b8d9f34685372c4faa1dc91ed015 | 2026-01-04T20:24:03.194273Z | false |
brycx/checkpwn | https://github.com/brycx/checkpwn/blob/0fde0d153d61b8d9f34685372c4faa1dc91ed015/src/main.rs | src/main.rs | // MIT License
// Copyright (c) 2018-2022 brycx
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
#![forbid(unsafe_code)]
#![deny(clippy::mem_forget)]
#![warn(
rust_2018_idioms,
trivial_casts,
unused_qualifications,
overflowing_literals
)]
mod config;
#[macro_use]
mod errors;
use anyhow::Result;
use checkpwn_lib::Password;
use colored::Colorize;
use std::fs::File;
use std::io::{BufReader, Error};
#[cfg(test)]
use assert_cmd::prelude::*;
use std::env;
use std::io::{stdin, BufRead};
use std::panic;
#[cfg(test)]
use std::process::Command;
use zeroize::Zeroize;
fn main() -> Result<()> {
// Set custom usage panic message
set_checkpwn_panic!(errors::USAGE_ERROR);
assert!(env::args().len() >= 2);
assert!(env::args().len() < 4);
let mut argvs: Vec<String> = env::args().collect();
match argvs[1].to_lowercase().as_str() {
"acc" => {
assert_eq!(argvs.len(), 3);
acc_check(&argvs[2])?;
}
"pass" => {
assert_eq!(argvs.len(), 2);
let hashed_password = Password::new(&rpassword::prompt_password("Password: ")?)?;
let is_breached = checkpwn_lib::check_password(&hashed_password)?;
breach_report(is_breached, "", true);
}
"register" => {
assert_eq!(argvs.len(), 3);
let configuration = config::Config::new();
let config_path = configuration
.get_config_path()
.expect("Failed to determine configuration file path.");
if !config_path.config_file_path.exists() {
match configuration.save_config(&argvs[2]) {
Ok(()) => println!("Successfully saved client configuration."),
Err(e) => panic!("Encountered error saving client configuration: {}", e),
}
} else {
println!(
"A configuration file already exists. Do you want to overwrite it? [y/n]: "
);
let mut overwrite_choice = String::new();
stdin().read_line(&mut overwrite_choice)?;
overwrite_choice = overwrite_choice.to_lowercase();
match overwrite_choice.trim() {
"y" => match configuration.save_config(&argvs[2]) {
Ok(()) => println!("Successfully saved new client configuration."),
Err(e) => panic!("Encountered error saving client configuration: {}", e),
},
"n" => println!("Configuration unchanged. Exiting client."),
_ => panic!("Invalid choice. Please enter 'y' for 'yes' or 'n' for 'no'."),
}
}
}
_ => panic!(),
};
// Zero out the collected arguments, in case the user accidentally inputs sensitive info
argvs.iter_mut().zeroize();
Ok(())
}
/// Make a breach report based on a u16 status code and print result to terminal.
fn breach_report(breached: bool, searchterm: &str, is_password: bool) {
// Do not display password in terminal
let request_key = if is_password { "********" } else { searchterm };
if breached {
println!(
"Breach status for {}: {}",
request_key.cyan(),
"BREACH FOUND".red()
);
} else {
println!(
"Breach status for {}: {}",
request_key.cyan(),
"NO BREACH FOUND".green()
);
}
}
/// Read file into buffer.
fn read_file(path: &str) -> Result<BufReader<File>, Error> {
set_checkpwn_panic!(errors::READ_FILE_ERROR);
let file_path = File::open(path).unwrap();
Ok(BufReader::new(file_path))
}
/// Strip all whitespace and all newlines from a given string.
fn strip(string: &str) -> String {
string
.replace('\n', "")
.replace(' ', "")
.replace('\'', "")
.replace('\t', "")
}
/// HIBP breach request used for `acc` arguments.
fn acc_breach_request(searchterm: &str, api_key: &str) -> Result<(), checkpwn_lib::CheckpwnError> {
let is_breached = checkpwn_lib::check_account(searchterm, api_key)?;
breach_report(is_breached, searchterm, false);
Ok(())
}
fn acc_check(data_search: &str) -> Result<(), checkpwn_lib::CheckpwnError> {
// NOTE: checkpwn_lib handles any sleeping so we don't exceed the rate limit.
set_checkpwn_panic!(errors::MISSING_API_KEY);
let mut config = config::Config::new();
config.load_config().unwrap();
// Check if user wants to check a local list
if data_search.ends_with(".ls") {
set_checkpwn_panic!(errors::BUFREADER_ERROR);
let file = read_file(data_search).unwrap();
for line_iter in file.lines() {
set_checkpwn_panic!(errors::READLINE_ERROR);
let line = strip(&line_iter.unwrap());
if line.is_empty() {
continue;
}
acc_breach_request(&line, &config.api_key)?;
}
} else {
acc_breach_request(data_search, &config.api_key)?;
}
Ok(())
}
#[test]
fn test_strip_white_new() {
let string_1 = String::from("fkljjsdjlksfdklj dfiwj wefwefwfe");
let string_2 = String::from("derbrererer\n");
let string_3 = String::from("dee\nwfweww rb tte rererer\n");
assert_eq!(&strip(&string_1), "fkljjsdjlksfdkljdfiwjwefwefwfe");
assert_eq!(&strip(&string_2), "derbrererer");
assert_eq!(&strip(&string_3), "deewfwewwrbtterererer");
}
#[test]
fn test_cli_acc_breach() {
let res = Command::new("cargo")
.args(&["run", "acc", "test@example.com"])
.unwrap();
assert!(String::from_utf8_lossy(&res.stdout).contains("BREACH FOUND"));
}
#[test]
fn test_cli_acc_no_breach() {
use rand::prelude::*;
let mut rng = thread_rng();
let mut email_user: [char; 8] = ['a'; 8];
let mut email_domain: [char; 8] = ['a'; 8];
rng.fill(&mut email_user);
rng.fill(&mut email_domain);
let rnd_email = format!(
"{:?}@{:?}.com",
email_user.iter().collect::<String>(),
email_domain.iter().collect::<String>()
);
let res = Command::new("cargo")
.args(&["run", "acc", &rnd_email])
.unwrap();
assert!(
String::from_utf8_lossy(&res.stdout).contains("NO BREACH FOUND"),
"Found breach for {:?}",
rnd_email
);
}
#[test]
#[should_panic]
fn test_cli_arg_fail() {
Command::new("cargo")
.args(&["run", "wrong", "test@example.com"])
.unwrap()
.assert()
.failure();
}
#[test]
#[should_panic]
fn test_cli_arg_fail_2() {
Command::new("cargo")
.args(&["run"])
.unwrap()
.assert()
.failure();
}
#[test]
#[should_panic]
fn test_cli_arg_fail_3() {
Command::new("cargo")
.args(&["run", "wrong", "test@example.com", "too much"])
.unwrap()
.assert()
.failure();
}
#[test]
fn test_cli_arg_ok() {
Command::new("cargo")
.args(&["run", "acc", "test@example.com"])
.unwrap()
.assert()
.success();
}
| rust | MIT | 0fde0d153d61b8d9f34685372c4faa1dc91ed015 | 2026-01-04T20:24:03.194273Z | false |
luqmana/wireguard-uwp-rs | https://github.com/luqmana/wireguard-uwp-rs/blob/328e622fb613d611bb022874a6535e2846ac6640/plugin/src/config.rs | plugin/src/config.rs | //! Config parsing.
use std::net::IpAddr;
use boringtun::crypto::x25519::{X25519PublicKey, X25519SecretKey};
use ipnetwork::IpNetwork;
use serde::Deserialize;
use serde_with::{serde_as, DisplayFromStr};
/// A fully-parsed config
#[derive(Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct WireGuardConfig {
/// Local interface configuration
pub interface: InterfaceConfig,
/// Remote peer configuration
pub peer: PeerConfig,
}
impl WireGuardConfig {
/// Parse the config from the given string or return an error.
pub fn from_str(s: &str) -> Result<WireGuardConfig, quick_xml::DeError> {
quick_xml::de::from_str(s)
}
}
/// Local VPN interface specific configuration
#[serde_as]
#[derive(Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct InterfaceConfig {
/// Our local private key
#[serde_as(as = "DisplayFromStr")]
pub private_key: X25519SecretKey,
/// Addresses to assign to local VPN interface
pub address: Vec<IpNetwork>,
/// DNS servers
#[serde(default)]
#[serde(rename = "DNS")]
pub dns_servers: Vec<IpAddr>,
/// DNS Search Domains
#[serde(default)]
#[serde(rename = "DNSSearch")]
pub search_domains: Vec<String>,
}
/// Remote peer specific configuration
#[serde_as]
#[derive(Deserialize)]
#[serde(rename_all = "PascalCase")]
pub struct PeerConfig {
/// The remote endpoint's public key
#[serde_as(as = "DisplayFromStr")]
pub public_key: X25519PublicKey,
/// The port the remote endpoint is listening
pub port: u16,
/// The list of addresses that will get routed to the remote endpoint
#[serde(rename = "AllowedIPs")]
pub allowed_ips: Vec<IpNetwork>,
/// The list of addresses that won't get routed to the remote endpoint
#[serde(default)]
#[serde(rename = "ExcludedIPs")]
pub excluded_ips: Vec<IpNetwork>,
/// The interval at which to send KeepAlive packets.
pub persistent_keepalive: Option<u16>,
/// An optional pre-shared key to enable an additional layer of security
#[serde(default)]
#[serde(deserialize_with = "from_base64")]
pub preshared_key: Option<[u8; 32]>,
}
/// Try to parse the base64 encoded pre-shared key from the config
/// into the raw bytes it represents.
fn from_base64<'de, D>(deserializer: D) -> Result<Option<[u8; 32]>, D::Error>
where
D: serde::Deserializer<'de>,
{
use serde::de::Error;
match Option::<String>::deserialize(deserializer) {
Ok(s) => match s {
Some(s) => match base64::decode(&s) {
Ok(b) => match b.try_into() {
Ok(b) => Ok(Some(b)),
Err(_) => Err(Error::custom("invalid pre-shared key")),
},
Err(e) => Err(Error::custom(e.to_string())),
},
None => Ok(None),
},
Err(e) => Err(e),
}
}
| rust | Apache-2.0 | 328e622fb613d611bb022874a6535e2846ac6640 | 2026-01-04T20:24:02.148210Z | false |
luqmana/wireguard-uwp-rs | https://github.com/luqmana/wireguard-uwp-rs/blob/328e622fb613d611bb022874a6535e2846ac6640/plugin/src/lib.rs | plugin/src/lib.rs | //! This crate contains the `IVpnPlugIn` implementation for our UWP VPN plugin app.
#![windows_subsystem = "windows"]
#![allow(non_snake_case)] // Windows naming conventions
mod background;
mod config;
mod logging;
mod plugin;
mod utils;
| rust | Apache-2.0 | 328e622fb613d611bb022874a6535e2846ac6640 | 2026-01-04T20:24:02.148210Z | false |
luqmana/wireguard-uwp-rs | https://github.com/luqmana/wireguard-uwp-rs/blob/328e622fb613d611bb022874a6535e2846ac6640/plugin/src/utils.rs | plugin/src/utils.rs | //! Utilities and helper types that don't quite fit anywhere else.
use std::sync::atomic::{AtomicU32, Ordering};
use windows::{
self as Windows,
core::*,
Foundation::Collections::{IIterable, IIterator, IVector, IVectorView},
Networking::Vpn::VpnPacketBuffer,
Win32::Foundation::{E_BOUNDS, E_NOTIMPL},
Win32::System::WinRT::IBufferByteAccess,
};
/// A simple wrapper around `Vec` which implements the `IVector`, `IVectorView` and
/// `IIterable` interfaces.
#[implement(
Windows::Foundation::Collections::IIterable<T>,
Windows::Foundation::Collections::IVector<T>,
Windows::Foundation::Collections::IVectorView<T>
)]
pub struct Vector<T: RuntimeType + 'static>(Vec<T::DefaultType>);
impl<T: RuntimeType + 'static> Vector<T> {
pub fn new(v: Vec<T::DefaultType>) -> Vector<T> {
Vector(v)
}
fn First(&self) -> Result<IIterator<T>> {
Ok(VectorIterator::<T> {
it: self.cast()?,
curr: AtomicU32::new(0),
}
.into())
}
fn GetView(&self) -> Result<IVectorView<T>> {
Ok(self.cast()?)
}
fn GetAt(&self, index: u32) -> Result<T> {
self.0
.get(index as usize)
// SAFETY: `DefaultType` is a super trait of `RuntimeType`.
.map(|el| unsafe { DefaultType::from_default(el) })
.transpose()?
.ok_or(Error::from(E_BOUNDS))
}
fn Size(&self) -> Result<u32> {
u32::try_from(self.0.len()).map_err(|_| Error::from(E_BOUNDS))
}
fn IndexOf(&self, value: &T::DefaultType, index: &mut u32) -> Result<bool> {
if let Some(idx) = self.0.iter().position(|el| el == value) {
*index = u32::try_from(idx).map_err(|_| Error::from(E_BOUNDS))?;
Ok(true)
} else {
Ok(false)
}
}
fn GetMany(&self, start: u32, items: &mut [T::DefaultType]) -> Result<u32> {
let sz = u32::try_from(self.0.len()).map_err(|_| Error::from(E_BOUNDS))?;
if start >= sz {
return Err(Error::from(E_BOUNDS));
}
let mut count = 0;
for (item, el) in items.into_iter().zip(self.0[start as usize..].iter()) {
*item = el.clone();
count += 1;
}
Ok(count)
}
fn SetAt(&self, _index: u32, _value: &T::DefaultType) -> Result<()> {
Err(E_NOTIMPL.into())
}
fn InsertAt(&self, _index: u32, _value: &T::DefaultType) -> Result<()> {
Err(E_NOTIMPL.into())
}
fn RemoveAt(&self, _index: u32) -> Result<()> {
Err(E_NOTIMPL.into())
}
fn Append(&self, _value: &T::DefaultType) -> Result<()> {
Err(E_NOTIMPL.into())
}
fn RemoveAtEnd(&self) -> Result<()> {
Err(E_NOTIMPL.into())
}
fn Clear(&self) -> Result<()> {
Err(E_NOTIMPL.into())
}
fn ReplaceAll(&self, _values: &[T::DefaultType]) -> Result<()> {
Err(E_NOTIMPL.into())
}
}
impl<'a, T: RuntimeType + 'static> IntoParam<'a, IVectorView<T>> for Vector<T> {
fn into_param(self) -> Param<'a, IVectorView<T>> {
Param::Owned(self.into())
}
}
impl<'a, T: RuntimeType + 'static> IntoParam<'a, IVector<T>> for Vector<T> {
fn into_param(self) -> Param<'a, IVector<T>> {
Param::Owned(self.into())
}
}
/// `IIterator` wrapper for `Vector`
#[implement(Windows::Foundation::Collections::IIterator<T>)]
struct VectorIterator<T: RuntimeType + 'static> {
/// The underlying object we're iteratoring over
it: IIterable<T>,
/// The current position of the iterator
curr: AtomicU32,
}
impl<T: RuntimeType + 'static> VectorIterator<T> {
fn Current(&self) -> Result<T> {
// SAFETY: We know this must be our `Vector` type
let vec = unsafe { Vector::to_impl(&self.it) };
vec.GetAt(self.curr.load(Ordering::Relaxed))
}
fn HasCurrent(&self) -> Result<bool> {
// SAFETY: We know this must be our `Vector` type
let vec = unsafe { Vector::to_impl(&self.it) };
Ok(vec.0.len() > self.curr.load(Ordering::Relaxed) as usize)
}
fn MoveNext(&self) -> Result<bool> {
// SAFETY: We know this must be our `Vector` type
let vec = unsafe { Vector::to_impl(&self.it) };
let old = self.curr.fetch_add(1, Ordering::Relaxed) as usize;
Ok(vec.0.len() > old + 1)
}
fn GetMany(&self, items: &mut [T::DefaultType]) -> Result<u32> {
// SAFETY: We know this must be our `Vector` type
let vec = unsafe { Vector::to_impl(&self.it) };
vec.GetMany(0, items)
}
}
pub trait IBufferExt {
/// Get a slice to an `IBuffer`'s underlying buffer.
///
/// NOTE: This returns a slice with the length set to the IBuffer's Length and not Capacity.
fn get_buf(&self) -> Result<&[u8]>;
/// Get a mutable slice to an `IBuffer`'s underlying buffer.
///
/// NOTE: This returns a slice with the length set to the IBuffer's Capacity and not Length.
///
/// TODO: Is this safe?
/// For `VpnPacketBuffer` at least, the buffer should be initialized & zeroed.
fn get_buf_mut(&mut self) -> Result<&mut [u8]>;
}
impl IBufferExt for VpnPacketBuffer {
fn get_buf(&self) -> Result<&[u8]> {
let buffer = self.Buffer()?;
let len = buffer.Length()?;
let rawBuffer = buffer.cast::<IBufferByteAccess>()?;
Ok(unsafe {
// SAFETY: Any type that implements `IBuffer` must also implement `IBufferByteAccess`
// to return the buffer as an array of bytes.
std::slice::from_raw_parts(rawBuffer.Buffer()?, len as usize)
})
}
fn get_buf_mut(&mut self) -> Result<&mut [u8]> {
let buffer = self.Buffer()?;
let cap = buffer.Capacity()?;
let rawBuffer = buffer.cast::<IBufferByteAccess>()?;
Ok(unsafe {
// SAFETY: Any type that implements `IBuffer` must also implement `IBufferByteAccess`
// to return the buffer as an array of bytes.
std::slice::from_raw_parts_mut(rawBuffer.Buffer()?, cap as usize)
})
}
}
macro_rules! debug_log {
($fmt:tt) => {
unsafe {
use windows::Win32::Foundation::PSTR;
use windows::Win32::System::Diagnostics::Debug::OutputDebugStringA;
let mut msg = format!(concat!($fmt, "\n\0"));
OutputDebugStringA(PSTR(msg.as_mut_ptr()));
}
};
($fmt:tt, $($arg:tt)*) => {
unsafe {
use windows::Win32::Foundation::PSTR;
use windows::Win32::System::Diagnostics::Debug::OutputDebugStringA;
let mut msg = format!(concat!($fmt, "\n\0"), $($arg)*);
OutputDebugStringA(PSTR(msg.as_mut_ptr()));
}
};
}
pub(crate) use debug_log;
| rust | Apache-2.0 | 328e622fb613d611bb022874a6535e2846ac6640 | 2026-01-04T20:24:02.148210Z | false |
luqmana/wireguard-uwp-rs | https://github.com/luqmana/wireguard-uwp-rs/blob/328e622fb613d611bb022874a6535e2846ac6640/plugin/src/logging.rs | plugin/src/logging.rs | //! Logging primitives along with our ETW Trace Provider.
use win_etw_macros::trace_logging_provider;
/// The collection of ETW events our plugin emits.
#[allow(non_snake_case)]
#[trace_logging_provider(guid = "c4522a55-401f-4b81-93f9-aa0d1db734c4")]
pub trait WireGuardUWPEvents {
/// `Connect` event emitted once we've successfully connected
#[event(level = "info")]
fn connected(remote_host: &str, remote_port: u16);
/// Event emitted if we've failed during `Connect`
#[event(level = "error")]
fn connect_fail(code: u32, msg: &str);
/// Event emitted for `Disconnect`
#[event(level = "warn")]
fn disconnect(code: u32, msg: &str);
// Noisy packet encap/decap events
/// Packet encap begin event.
/// Indicates how many outgoing packets are ready to be encapsulated.
#[event(level = "verbose")]
fn encapsulate_begin(packets: u32);
/// Packet encap end event.
/// Indicates how many frames we sent to the remote endpoint.
#[event(level = "verbose")]
fn encapsulate_end(frames: u32);
/// Frame decap begin event.
/// Indicates the size of the frame received from the remote endpoint.
#[event(level = "verbose")]
fn decapsulate_begin(frame_sz: u32);
/// Frame decap end event.
/// Indicates how many packets were decapsulated and how many frames sent to the remote.
#[event(level = "verbose")]
fn decapsulate_end(packets: u32, control_frames: u32);
/// KeepAlive packet event.
/// Indicates how many bytes destined for remote.
#[event(level = "info")]
fn keepalive(packet_sz: u32);
}
| rust | Apache-2.0 | 328e622fb613d611bb022874a6535e2846ac6640 | 2026-01-04T20:24:02.148210Z | false |
luqmana/wireguard-uwp-rs | https://github.com/luqmana/wireguard-uwp-rs/blob/328e622fb613d611bb022874a6535e2846ac6640/plugin/src/background.rs | plugin/src/background.rs | //! The entrypoint for the background task where our actual VPN plugin runs.
use std::mem::ManuallyDrop;
use windows::{
self as Windows,
core::*,
ApplicationModel::Background::IBackgroundTaskInstance,
ApplicationModel::Core::CoreApplication,
Networking::Vpn::{IVpnPlugIn, VpnChannel},
Win32::Foundation::{E_INVALIDARG, E_NOINTERFACE, E_UNEXPECTED, S_OK},
Win32::System::WinRT::IActivationFactory,
};
/// The WinRT Activatable Class which acts as the entrypoint for the background tasks
/// which get invoked to handle the actual VPN tunnel.
#[implement(Windows::ApplicationModel::Background::IBackgroundTask)]
pub struct VpnBackgroundTask;
impl VpnBackgroundTask {
fn Run(&self, task: &Option<IBackgroundTaskInstance>) -> Result<()> {
let task = task.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let deferral = task.GetDeferral()?;
// Grab existing plugin instance from in-memory app properties or create a new one
let app_props = CoreApplication::Properties()?;
let plugin = if app_props.HasKey("plugin")? {
app_props.Lookup("plugin")?.cast()?
} else {
let plugin: IVpnPlugIn = super::plugin::VpnPlugin::new().into();
app_props.Insert("plugin", plugin.clone())?;
plugin
};
// Call into VPN platform with the plugin object
VpnChannel::ProcessEventAsync(plugin, task.TriggerDetails()?)?;
deferral.Complete()?;
Ok(())
}
}
/// A factory object to generate `VpnBackgroundTask`.
///
/// Returned by `DllGetActivationFactory` when the system attempts to get an
/// instance of `VpnBackgroundTask`.
#[implement(Windows::Win32::System::WinRT::IActivationFactory)]
struct VpnBackgroundTaskFactory;
impl VpnBackgroundTaskFactory {
/// Creates and returns a new instance of `VpnBackgroundTask`.
fn ActivateInstance(&self) -> Result<IInspectable> {
Ok(VpnBackgroundTask.into())
}
}
/// Called by any consumers of this library attempting to get instances of any activatable
/// Windows Runtime classes we support.
///
/// When the system is ready to launch our VPN background task, it needs to get a reference
/// to our `VpnBackgroundTask` object. It can do so because as part of our `AppxManifest.xml`
/// we list out which Activatable Classes (VpnBackgroundTask) we want registered during App
/// installation. Furthermore, we specify that the component is hosted in our DLL. From there,
/// it knows to query us via the `DllGetActivationFactory` function we export to get some
/// object implementing `IActivationFactory` which knows how to create new instances of the
/// target WinRT runtime class.
///
/// Since `activatableClassId` is an _In_ parameter, the caller is responsible for freeing it.
/// But the HSTRING wrapper from the windows crate has a `Drop` impl which will attempt to free
/// it once it goes out of scope. Unfortunately, that would be a double-free once we've returned
/// back to the caller who would also attempt to free it. Hence, we transparently wrap the HSTRING
/// with ManuallyDrop to skip any free'ing on the Rust side.
#[no_mangle]
pub unsafe extern "system" fn DllGetActivationFactory(
activatableClassId: ManuallyDrop<HSTRING>,
factory: *mut Option<IActivationFactory>,
) -> HRESULT {
if activatableClassId.is_empty() || factory.is_null() {
return E_INVALIDARG;
}
// Return the appropriate factory based on which class was requested
if *activatableClassId == "WireGuard-UWP.VpnBackgroundTask" {
*factory = Some(VpnBackgroundTaskFactory.into());
} else {
*factory = None;
return E_NOINTERFACE;
}
S_OK
}
| rust | Apache-2.0 | 328e622fb613d611bb022874a6535e2846ac6640 | 2026-01-04T20:24:02.148210Z | false |
luqmana/wireguard-uwp-rs | https://github.com/luqmana/wireguard-uwp-rs/blob/328e622fb613d611bb022874a6535e2846ac6640/plugin/src/plugin.rs | plugin/src/plugin.rs | //! Our implementation of `IVpnPlugIn` which is the bulk of the UWP VPN plugin.
use std::sync::{Arc, RwLock};
use std::time::Duration;
use boringtun::noise::{Tunn, TunnResult};
use ipnetwork::IpNetwork;
use windows::{
self as Windows,
core::*,
Networking::Sockets::*,
Networking::Vpn::*,
Networking::*,
Win32::Foundation::{E_BOUNDS, E_INVALIDARG, E_UNEXPECTED},
};
use crate::config::WireGuardConfig;
use crate::logging::WireGuardUWPEvents;
use crate::utils::{debug_log, IBufferExt, Vector};
struct Inner {
tunn: Option<Box<Tunn>>,
}
impl Inner {
fn new() -> Self {
Self { tunn: None }
}
}
/// The VPN plugin object which provides the hooks that the UWP VPN platform will call into.
#[implement(Windows::Networking::Vpn::IVpnPlugIn)]
pub struct VpnPlugin {
inner: RwLock<Inner>,
etw_logger: WireGuardUWPEvents,
}
impl VpnPlugin {
pub fn new() -> Self {
Self {
inner: RwLock::new(Inner::new()),
etw_logger: WireGuardUWPEvents::new(),
}
}
/// Called by the platform so that we may connect and setup the VPN tunnel.
fn Connect(&self, channel: &Option<VpnChannel>) -> Result<()> {
// Call out to separate method so that we can capture any errors
if let Err(err) = self.connect_inner(channel) {
self.etw_logger
.connect_fail(None, err.code().0, &err.to_string());
Err(err)
} else {
Ok(())
}
}
/// Internal `Connect` implementation.
fn connect_inner(&self, channel: &Option<VpnChannel>) -> Result<()> {
let channel = channel.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let mut inner = self.inner.write().unwrap();
let config = channel.Configuration()?;
// Grab custom config field from VPN profile and try to parse the config
// In theory this would totally be fine to deal with as INI to match
// most other wireguard config, but it's a bit of pain since a number of
// places assume this will be XML...
let wg_config = match WireGuardConfig::from_str(&config.CustomField()?.to_string()) {
Ok(conf) => conf,
Err(err) => {
channel.SetErrorMessage(format!("failed to parse config: {}", err))?;
return Err(Error::from(E_INVALIDARG));
}
};
let static_private = Arc::new(wg_config.interface.private_key);
let peer_static_public = Arc::new(wg_config.peer.public_key);
let persistent_keepalive = wg_config.peer.persistent_keepalive;
let preshared_key = wg_config.peer.preshared_key;
// Grab interface addresses
let iface_addrs = wg_config.interface.address;
// Now massage em into the right form
let (ipv4, ipv6) = iface_addrs
.into_iter()
.partition::<Vec<_>, _>(IpNetwork::is_ipv4);
let ipv4_addrs = ipv4
.into_iter()
.map(|ip| HostName::CreateHostName(ip.ip().to_string()))
.collect::<Result<Vec<_>>>()?
.into_iter()
.map(Some)
.collect::<Vec<_>>();
let ipv4_addrs = if ipv4_addrs.is_empty() {
None
} else {
Some(Vector::new(ipv4_addrs).into())
};
let ipv6_addrs = ipv6
.into_iter()
.map(|ip| HostName::CreateHostName(ip.ip().to_string()))
.collect::<Result<Vec<_>>>()?
.into_iter()
.map(Some)
.collect::<Vec<_>>();
let ipv6_addrs = if ipv6_addrs.is_empty() {
None
} else {
Some(Vector::new(ipv6_addrs).into())
};
let build_routes = |routes: Vec<IpNetwork>| -> Result<_> {
let mut ipv4 = vec![];
let mut ipv6 = vec![];
for ip in routes {
let route = VpnRoute::CreateVpnRoute(
HostName::CreateHostName(ip.network().to_string())?,
ip.prefix(),
)?;
if ip.is_ipv4() {
ipv4.push(Some(route));
} else {
ipv6.push(Some(route));
}
}
Ok((ipv4, ipv6))
};
let routes = VpnRouteAssignment::new()?;
// Grab AllowedIPs and build routes from it
let (allowed_ipv4, allowed_ipv6) = build_routes(wg_config.peer.allowed_ips)?;
if !allowed_ipv4.is_empty() {
routes.SetIpv4InclusionRoutes(Vector::new(allowed_ipv4))?;
}
if !allowed_ipv6.is_empty() {
routes.SetIpv6InclusionRoutes(Vector::new(allowed_ipv6))?;
}
// Grab ExcludedIPs to determine exclusion routes
let (excluded_ipv4, excluded_ipv6) = build_routes(wg_config.peer.excluded_ips)?;
if !excluded_ipv4.is_empty() {
routes.SetIpv4ExclusionRoutes(Vector::new(excluded_ipv4))?;
}
if !excluded_ipv6.is_empty() {
routes.SetIpv6ExclusionRoutes(Vector::new(excluded_ipv6))?;
}
// Setup DNS
let namespace_assignment = VpnNamespaceAssignment::new()?;
let dns_servers = wg_config
.interface
.dns_servers
.into_iter()
.map(|server| HostName::CreateHostName(server.to_string()))
.collect::<Result<Vec<_>>>()?
.into_iter()
.map(Some)
.collect::<Vec<_>>();
let search_domains = wg_config.interface.search_domains;
let namespace_count = search_domains.len() + !dns_servers.is_empty() as usize;
let mut namespaces = Vec::with_capacity(namespace_count);
// Add the search domains as suffix NRPT rules so that
// they get added to the virtual interface's
// Connection-Specific DNS Suffix Search List.
for mut search_domain in search_domains {
// Prefix with . to make it a suffix rule
search_domain.insert(0, '.');
let dns_servers = Vector::new(dns_servers.clone());
let namespace =
VpnNamespaceInfo::CreateVpnNamespaceInfo(search_domain, dns_servers, None)?;
namespaces.push(Some(namespace));
}
if !dns_servers.is_empty() {
// We set the namespace name to '.' so it applies to everything instead of
// a specific set of domains (see NRPT)
let dns_servers = Vector::new(dns_servers);
let namespace = VpnNamespaceInfo::CreateVpnNamespaceInfo(".", dns_servers, None)?;
namespaces.push(Some(namespace));
}
namespace_assignment.SetNamespaceList(Vector::new(namespaces))?;
// Create WG tunnel object
let tunn = Tunn::new(
static_private,
peer_static_public,
preshared_key,
persistent_keepalive,
0, // Peer index. we only have one peer
None, // TODO: No rate limiter
)
// TODO: is E_UNEXPECTED the right error here?
.map_err(|e| Error::new(E_UNEXPECTED, e.into()))?;
// Stuff it into our inner state
// Just forget the previous tunn state and start over (if one exists at all)
if let Some(_) = std::mem::replace(&mut inner.tunn, Some(tunn)) {
debug_log!("Replacing leftover tunn state.");
}
// Create socket and register with VPN platform
let sock = DatagramSocket::new()?;
channel.AddAndAssociateTransport(&sock, None)?;
// Just use the first server listed to connect to remote endpoint
let server = config.ServerHostNameList()?.GetAt(0)?;
let port = wg_config.peer.port;
debug_log!("Server: {} Port: {}", server.ToString()?.to_string(), port);
// We "block" here with the call to `.get()` but given this is a UDP socket
// connect isn't actually something that will hang (DNS aside perhaps?).
sock.ConnectAsync(&server, port.to_string())?.get()?;
// Kick off the VPN setup
channel.Start(
ipv4_addrs,
ipv6_addrs,
None, // Interface ID portion of IPv6 address for VPN tunnel
routes,
namespace_assignment,
1500, // MTU size of VPN tunnel interface
1600, // Max frame size of incoming buffers from remote endpoint
false, // Disable low cost network monitoring
sock, // Pass in the socket to the remote endpoint
None, // No secondary socket used.
)?;
// Log successful connection
self.etw_logger
.connected(None, &server.ToString()?.to_string(), port);
Ok(())
}
/// Called by the platform to indicate we should disconnect and cleanup the VPN tunnel.
fn Disconnect(&self, channel: &Option<VpnChannel>) -> Result<()> {
// Call out to separate method so that we can capture any errors
if let Err(err) = self.disconnect_inner(channel) {
self.etw_logger
.disconnect(None, err.code().0, &err.to_string());
Err(err)
} else {
self.etw_logger.disconnect(None, 0, "Operation successful.");
Ok(())
}
}
/// Internal `Disconnect` implementation.
fn disconnect_inner(&self, channel: &Option<VpnChannel>) -> Result<()> {
let channel = channel.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let mut inner = self.inner.write().unwrap();
inner.tunn = None;
channel.Stop()?;
Ok(())
}
/// Called by the platform to indicate there are outgoing packets ready to be encapsulated.
///
/// `packets` contains outgoing L3 IP packets that we should encapsulate in whatever protocol
/// dependant manner before placing them in `encapsulatedPackets` so that they may be sent to
/// the remote endpoint.
fn Encapsulate(
&self,
channel: &Option<VpnChannel>,
packets: &Option<VpnPacketBufferList>,
encapsulatedPackets: &Option<VpnPacketBufferList>,
) -> Result<()> {
let channel = channel.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let packets = packets.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let encapsulatedPackets = encapsulatedPackets
.as_ref()
.ok_or(Error::from(E_UNEXPECTED))?;
let inner = self.inner.read().unwrap();
let tunn = if let Some(tunn) = &inner.tunn {
&**tunn
} else {
// We haven't initalized tunn yet, just return
return Ok(());
};
let mut ret_buffers = vec![];
let mut encap_err = None;
// Usually this would be called in the background by some periodic timer
// but a UWP VPN plugin will get suspended if there's no traffic and that
// includes any background threads or such we could create.
// So we may find ourselves with a stale session and need to do a new
// handshake. Thus, we just call this opportunistically here before
// trying to encapsulate.
if tunn.time_since_last_handshake() >= Some(Duration::from_millis(250)) {
const HANDSHAKE_INIT_SZ: usize = 148;
let mut handshake_buf = [0u8; HANDSHAKE_INIT_SZ];
match tunn.update_timers(&mut handshake_buf) {
// Session still valid, nothing more to do.
TunnResult::Done => (),
// Encountered an error, bail out
TunnResult::Err(err) => {
return Err(Error::new(
E_UNEXPECTED,
format!("update_timers error: {:?}", err).into(),
));
}
// Looks like we need to get things updated
TunnResult::WriteToNetwork(packet) => {
// Request a new buffer
let mut handshake_buffer = channel.GetVpnSendPacketBuffer()?;
// Copy data over and update length on WinRT buffer
handshake_buffer.get_buf_mut()?[..packet.len()].copy_from_slice(packet);
let new_len = u32::try_from(packet.len()).map_err(|_| Error::from(E_BOUNDS))?;
handshake_buffer.Buffer()?.SetLength(new_len)?;
// Now queue it up to be sent
encapsulatedPackets.Append(handshake_buffer)?;
}
// Impossible cases for update_timers
TunnResult::WriteToTunnelV4(_, _) | TunnResult::WriteToTunnelV6(_, _) => {
panic!("unexpected result from update_timers");
}
}
}
let packets_sz = packets.Size()?;
self.etw_logger.encapsulate_begin(None, packets_sz);
// Process outgoing packets from VPN tunnel.
// TODO: Not using the simpler `for packet in packets` because
// `packets.First()?` fails with E_NOINTERFACE for some reason.
for _ in 0..packets_sz {
let packet = packets.RemoveAtBegin()?;
let src = packet.get_buf()?;
// Grab a destination buffer for the encapsulated packet
let mut encapPacket = channel.GetVpnSendPacketBuffer()?;
let dst = encapPacket.get_buf_mut()?;
// Try to encapsulate packet
let res = tunn.encapsulate(src, dst);
if let TunnResult::WriteToNetwork(packet) = res {
// Packet was encap'd successfully, make sure to update length on the WinRT side
let new_len = u32::try_from(packet.len()).map_err(|_| Error::from(E_BOUNDS))?;
drop(packet);
encapPacket.Buffer()?.SetLength(new_len)?;
// Now, tack it onto `encapsulatedPackets` to send to remote endpoint
encapsulatedPackets.Append(encapPacket)?;
} else {
match res {
// Handled above
TunnResult::WriteToNetwork(_) => {}
// Packet was queued while we complete the handshake
TunnResult::Done => {}
// Encountered an error while trying to encapsulate
TunnResult::Err(err) => {
if encap_err.is_none() {
encap_err = Some(Error::new(
E_UNEXPECTED,
format!("encap error: {:?}", err).into(),
));
}
}
// Impossible cases for encapsulate
TunnResult::WriteToTunnelV4(_, _) | TunnResult::WriteToTunnelV6(_, _) => {
panic!("unexpected result from encapsulate")
}
}
// We must return the `encapPacket` we requested
ret_buffers.push(encapPacket);
}
// Note: this loop does not consume the items in packets which is important
// as ANY `VpnPacketBuffer` we get (whether as some argument to a `IVpnPlugIn`
// method or via methods on `VpnChannel`) we are expected to return to the
// platform. Since we're not en/decapsulating in-place, it works out to leave
// the buffers in `packets` so that the platform may clean them up.
packets.Append(packet)?;
}
self.etw_logger
.encapsulate_end(None, encapsulatedPackets.Size()?);
// Just stick the unneeded buffers onto `packets` so the platform can clean them up
for packet in ret_buffers {
packets.Append(packet)?;
}
// If we encountered an error, return it
if let Some(err) = encap_err {
Err(err)
} else {
Ok(())
}
}
/// Called by the platform to indicate we've received a frame from the remote endpoint.
///
/// `buffer` will contain whatever data we received from the remote endpoint which may
/// either contain control or data payloads. For data payloads, we will decapsulate into
/// 1 (or more) L3 IP packet(s) before returning them to the platform by placing them in
/// `decapsulatedPackets`, making them ready to be injected into the virtual tunnel. If
/// we need to send back control payloads or otherwise back to the remote endpoint, we
/// may place such frames into `controlPackets`.
fn Decapsulate(
&self,
channel: &Option<VpnChannel>,
buffer: &Option<VpnPacketBuffer>,
decapsulatedPackets: &Option<VpnPacketBufferList>,
controlPackets: &Option<VpnPacketBufferList>,
) -> Result<()> {
let channel = channel.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let buffer = buffer.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let decapsulatedPackets = decapsulatedPackets
.as_ref()
.ok_or(Error::from(E_UNEXPECTED))?;
let controlPackets = controlPackets.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let inner = self.inner.read().unwrap();
let tunn = if let Some(tunn) = &inner.tunn {
&**tunn
} else {
// We haven't initalized tunn yet, just return
return Ok(());
};
self.etw_logger
.decapsulate_begin(None, buffer.Buffer()?.Length()?);
// Allocate a buffer for the decapsulate packet
let mut decapPacket = channel.GetVpnReceivePacketBuffer()?;
let dst = decapPacket.get_buf_mut()?;
// Get a slice to the datagram we just received from the remote endpoint and try to decap
let datagram = buffer.get_buf()?;
let res = tunn.decapsulate(None, datagram, dst);
match res {
// Nothing to do with this decap result
TunnResult::Done => {
// TODO: Return unused `decapPacket` buffer
}
// Encountered an error while trying to decapsulate
TunnResult::Err(err) => {
// TODO: Return unused `decapPacket` buffer
return Err(Error::new(
E_UNEXPECTED,
format!("encap error: {:?}", err).into(),
));
}
// We need to send response back to remote endpoint
TunnResult::WriteToNetwork(packet) => {
// Make sure to update length on WinRT buffer
let new_len = u32::try_from(packet.len()).map_err(|_| Error::from(E_BOUNDS))?;
drop(packet);
// TODO: technically, we really should've used `GetVpnSendPacketBuffer` for this
// buffer but boringtun doesn't really have a way to know in advance if it'll
// be giving back control packets instead of data packets.
// We could just use temp buffers and copy as appropriate?
let controlPacket = decapPacket;
controlPacket.Buffer()?.SetLength(new_len)?;
// Tack onto `controlPackets` so that they get sent to remote endpoint
controlPackets.Append(controlPacket)?;
// We need to probe for any more packets queued to send
loop {
// Allocate a buffer for control packet
let mut controlPacket = channel.GetVpnSendPacketBuffer()?;
let dst = controlPacket.get_buf_mut()?;
let res = tunn.decapsulate(None, &[], dst);
if let TunnResult::WriteToNetwork(packet) = res {
// Make sure to update length on WinRT buffer
let new_len =
u32::try_from(packet.len()).map_err(|_| Error::from(E_BOUNDS))?;
drop(packet);
controlPacket.Buffer()?.SetLength(new_len)?;
controlPackets.Append(controlPacket)?;
} else {
// TODO: Return unused `controlPacket` buffer
// Nothing more to do
break;
}
}
}
// Successfully decapsulated data packet
TunnResult::WriteToTunnelV4(packet, _) | TunnResult::WriteToTunnelV6(packet, _) => {
// Make sure to update length on WinRT buffer
let new_len = u32::try_from(packet.len()).map_err(|_| Error::from(E_BOUNDS))?;
drop(packet);
decapPacket.Buffer()?.SetLength(new_len)?;
// Tack onto `decapsulatedPackets` to inject into VPN interface
decapsulatedPackets.Append(decapPacket)?;
}
}
self.etw_logger
.decapsulate_end(None, decapsulatedPackets.Size()?, controlPackets.Size()?);
Ok(())
}
/// Called by the platform from time to time so that we may send some keepalive payload.
///
/// If we decide we want to send any keepalive payload, we place it in `keepAlivePacket`.
fn GetKeepAlivePayload(
&self,
channel: &Option<VpnChannel>,
keepAlivePacket: &mut Option<VpnPacketBuffer>,
) -> Result<()> {
let channel = channel.as_ref().ok_or(Error::from(E_UNEXPECTED))?;
let inner = self.inner.read().unwrap();
let tunn = if let Some(tunn) = &inner.tunn {
&**tunn
} else {
// We haven't initalized tunn yet, just return
return Ok(());
};
*keepAlivePacket = None;
// Grab a buffer for the keepalive packet
let mut kaPacket = channel.GetVpnSendPacketBuffer()?;
let dst = kaPacket.get_buf_mut()?;
// Any packets we need to send out?
match tunn.update_timers(dst) {
// Nothing to do right now
TunnResult::Done => {
// TODO: Return unused `kaPacket` buffer
}
// Encountered an error, bail out
TunnResult::Err(err) => {
// TODO: Return unused `kaPacket` buffer
return Err(Error::new(
// TODO: Better error than `E_UNEXPECTED`?
E_UNEXPECTED,
format!("update_timers error: {:?}", err).into(),
));
}
// We got something to send to the remote
TunnResult::WriteToNetwork(packet) => {
// Make sure to update length on WinRT buffer
let new_len = u32::try_from(packet.len()).map_err(|_| Error::from(E_BOUNDS))?;
drop(packet);
kaPacket.Buffer()?.SetLength(new_len)?;
self.etw_logger.keepalive(None, new_len);
// Place the packet in the out param to send to remote
*keepAlivePacket = Some(kaPacket);
}
// Impossible cases for update_timers
TunnResult::WriteToTunnelV4(_, _) | TunnResult::WriteToTunnelV6(_, _) => {
panic!("unexpected result from update_timers")
}
}
Ok(())
}
}
| rust | Apache-2.0 | 328e622fb613d611bb022874a6535e2846ac6640 | 2026-01-04T20:24:02.148210Z | false |
luqmana/wireguard-uwp-rs | https://github.com/luqmana/wireguard-uwp-rs/blob/328e622fb613d611bb022874a6535e2846ac6640/app/src/main.rs | app/src/main.rs | //! This crate contains the foreground portion of our UWP VPN plugin app.
//!
//! We use XAML programmatically to generate the UI.
#![windows_subsystem = "windows"]
#![allow(non_snake_case)] // Windows naming conventions
use windows::{
self as Windows,
core::*,
ApplicationModel::Activation::LaunchActivatedEventArgs,
Foundation::Uri,
Win32::System::Com::{CoInitializeEx, COINIT_MULTITHREADED},
UI::Xaml::{Application, ApplicationInitializationCallback},
};
/// Encapsulates our app and overrides the relevant lifecycle management methods.
#[implement(
extend Windows::UI::Xaml::Application,
override OnLaunched
)]
struct App;
impl App {
/// This method get invoked when the app is initially launched.
fn OnLaunched(&self, _args: &Option<LaunchActivatedEventArgs>) -> Result<()> {
use Windows::{
UI::Xaml::Controls::{Grid, Page, TextBlock},
UI::Xaml::Documents::{Hyperlink, LineBreak, Run},
UI::Xaml::Media::SolidColorBrush,
UI::Xaml::Thickness,
UI::Xaml::Window,
};
// Create the initial UI
let content = TextBlock::new()?;
let inline_content = content.Inlines()?;
inline_content.Append({
let run = Run::new()?;
run.SetFontSize(32.)?;
let color = SolidColorBrush::new()?;
color.SetColor(Windows::UI::Color {
A: 0xFF,
R: 0xFC,
G: 51,
B: 0x85,
})?;
run.SetForeground(color)?;
run.SetText("WireGuard + UWP + Rust")?;
run
})?;
inline_content.Append(LineBreak::new()?)?;
inline_content.Append(LineBreak::new()?)?;
inline_content.Append({
let run = Run::new()?;
run.SetText("No profiles found ")?;
run
})?;
inline_content.Append({
let add_link = Hyperlink::new()?;
add_link.Inlines()?.Append({
let run = Run::new()?;
run.SetText("add one")?;
run
})?;
add_link.SetNavigateUri(Uri::CreateUri("ms-settings:network-vpn")?)?;
add_link
})?;
inline_content.Append({
let run = Run::new()?;
run.SetText("!")?;
run
})?;
let root = Page::new()?;
root.SetContent({
let grid = Grid::new()?;
grid.SetPadding(Thickness {
Left: 40.,
Top: 40.,
Right: 40.,
Bottom: 40.,
})?;
grid.Children()?.Append(content)?;
grid
})?;
// Grab the ambient Window created for our UWP app and set the content
let window = Window::Current()?;
window.SetContent(root)?;
window.Activate()
}
}
fn main() -> Result<()> {
// We must initialize a COM MTA before initializing the rest of the App
unsafe {
CoInitializeEx(std::ptr::null_mut(), COINIT_MULTITHREADED)?;
}
// Go ahead with the XAML application initialization.
// `Windows::UI::Xaml::Application` (which `App` derives from) is responsible for setting up
// the CoreWindow and Dispatcher for us before calling our overridden OnLaunched/OnActivated.
Application::Start(ApplicationInitializationCallback::new(|_| {
App.new().map(|_| ())
}))
}
| rust | Apache-2.0 | 328e622fb613d611bb022874a6535e2846ac6640 | 2026-01-04T20:24:02.148210Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/torus.rs | src/torus.rs | use std::f32::consts::PI;
use super::generators::{IndexedPolygon, SharedVertex};
use super::{MapVertex, Quad, Vertex};
use crate::math::Vector3;
///
#[derive(Clone, Copy)]
pub struct Torus {
idx: usize,
radius: f32,
tubular_radius: f32,
radial_segments: usize,
tubular_segments: usize,
}
impl Torus {
/// Creates a new torus.
///
/// # Arguments
///
/// - `radius` is the radius from the center [0, 0, 0] to the center of the tubular radius
/// - `tubular_radius` is the radius to the surface from the toridal
/// - `tubular_segments` is the number of segments that wrap around the tube, it must be at least 3
/// - `radial_segments` is the number of tube segments requested to generate, it must be at least 3
///
/// # Panics
///
/// This function panics if `tubular_segments` or `radial_segments` is less than 3.
pub fn new(
radius: f32,
tubular_radius: f32,
radial_segments: usize,
tubular_segments: usize,
) -> Self {
assert!(tubular_segments > 2 && radial_segments > 2);
Torus {
idx: 0,
radius,
tubular_radius,
radial_segments,
tubular_segments,
}
}
}
impl Iterator for Torus {
type Item = Quad<Vertex>;
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
fn next(&mut self) -> Option<Self::Item> {
if self.idx < self.indexed_polygon_count() {
let idx = self.idx;
self.idx += 1;
Some(
self.indexed_polygon(idx)
.map_vertex(|i| self.shared_vertex(i)),
)
} else {
None
}
}
}
impl ExactSizeIterator for Torus {
fn len(&self) -> usize {
self.indexed_polygon_count() - self.idx
}
}
impl SharedVertex<Vertex> for Torus {
fn shared_vertex(&self, idx: usize) -> Vertex {
let (h, u) = (
(idx / self.tubular_segments) as f32,
(idx % self.tubular_segments) as f32,
);
let alpha = u * 2. * PI / self.tubular_segments as f32;
let beta = h * 2. * PI / self.radial_segments as f32;
let gamma = self.radius + self.tubular_radius * alpha.cos();
Vertex {
pos: [
gamma * beta.cos(),
self.tubular_radius * alpha.sin(),
-gamma * beta.sin(),
]
.into(),
normal: Vector3::new(
alpha.cos() * beta.cos(),
alpha.sin(),
-alpha.cos() * beta.sin(),
)
.normalized()
.into(),
}
}
fn shared_vertex_count(&self) -> usize {
self.tubular_segments * self.radial_segments + 1
}
}
impl IndexedPolygon<Quad<usize>> for Torus {
fn indexed_polygon(&self, idx: usize) -> Quad<usize> {
// check for wrap around the end end
let ncol = if self.indexed_polygon_count() - idx > self.tubular_segments {
self.tubular_segments as isize
} else {
-((self.indexed_polygon_count() - self.tubular_segments) as isize)
};
// check for wrap around the end end
let nrow = if idx % self.tubular_segments != self.tubular_segments - 1 {
1isize
} else {
1isize - (self.tubular_segments as isize)
};
let idx = idx as isize;
Quad::new(idx, idx + ncol, idx + nrow + ncol, idx + nrow).map_vertex(|x| x as usize)
}
fn indexed_polygon_count(&self) -> usize {
self.tubular_segments * self.radial_segments
}
}
#[test]
fn test_torus_len() {
let mut torus = Torus::new(2.0, 2.0, 6, 5);
assert_eq!(30, torus.len());
torus.next();
assert_eq!(29, torus.len());
assert_eq!(29, torus.count());
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/triangulate.rs | src/triangulate.rs | use std::collections::VecDeque;
use crate::Polygon::{self, PolyQuad, PolyTri};
use crate::{Quad, Triangle};
/// Provides a way to convert a polygon down to triangles.
pub trait EmitTriangles {
/// The content of each point in the triangle.
type Vertex;
/// Convert a polygon to one or more triangles, each triangle
/// is returned by calling `emit`.
fn emit_triangles<F>(&self, emit: F)
where
F: FnMut(Triangle<Self::Vertex>);
}
impl<T: Clone> EmitTriangles for Quad<T> {
type Vertex = T;
fn emit_triangles<F>(&self, mut emit: F)
where
F: FnMut(Triangle<T>),
{
let &Quad {
ref x,
ref y,
ref z,
ref w,
} = self;
emit(Triangle::new(x.clone(), y.clone(), z.clone()));
emit(Triangle::new(z.clone(), w.clone(), x.clone()));
}
}
impl<T: Clone> EmitTriangles for Triangle<T> {
type Vertex = T;
fn emit_triangles<F>(&self, mut emit: F)
where
F: FnMut(Triangle<T>),
{
emit(self.clone());
}
}
impl<T: Clone> EmitTriangles for Polygon<T> {
type Vertex = T;
fn emit_triangles<F>(&self, emit: F)
where
F: FnMut(Triangle<T>),
{
match self {
PolyTri(t) => t.emit_triangles(emit),
PolyQuad(q) => q.emit_triangles(emit),
}
}
}
/// A trait to easily convert any polygon [`Iterator`] into a triangle [`Iterator`].
/// This is useful since Quads and other geometry are not supported by modern graphics pipelines like OpenGL.
pub trait Triangulate<T, V> {
/// Convert an [`Iterator`] of polygons into an [`Iterator`] of triangles.
fn triangulate(self) -> TriangulateIterator<T, V>;
}
impl<V, P: EmitTriangles<Vertex = V>, T: Iterator<Item = P>> Triangulate<T, V> for T {
fn triangulate(self) -> TriangulateIterator<T, V> {
TriangulateIterator::new(self)
}
}
/// An [`Iterator`] that turns an [`Iterator`] of polygons into an [`Iterator`] of triangles.
///
/// This `struct` is created by the [`triangulate`] method on [`Triangulate`].
///
/// [`triangulate`]: trait.Triangulate.html#method.triangulate
/// [`Triangulate`]: trait.Triangulate.html
pub struct TriangulateIterator<SRC, V> {
source: SRC,
buffer: VecDeque<Triangle<V>>,
}
impl<V, U: EmitTriangles<Vertex = V>, SRC: Iterator<Item = U>> TriangulateIterator<SRC, V> {
fn new(src: SRC) -> TriangulateIterator<SRC, V> {
TriangulateIterator {
source: src,
buffer: VecDeque::new(),
}
}
}
impl<V, U: EmitTriangles<Vertex = V>, SRC: Iterator<Item = U>> Iterator
for TriangulateIterator<SRC, V>
{
type Item = Triangle<V>;
fn size_hint(&self) -> (usize, Option<usize>) {
let (n, _) = self.source.size_hint();
(n, None)
}
fn next(&mut self) -> Option<Self::Item> {
loop {
if let v @ Some(_) = self.buffer.pop_front() {
break v;
}
self.source
.next()?
.emit_triangles(|v| self.buffer.push_back(v));
}
}
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/lib.rs | src/lib.rs | //! `Genmesh`'s is a library that offers ways to generate and manipulate vertex streams.
//!
//! The core problem that this library solves is to find a nice way to build meshes that
//! does not just result in throwing all the vertices and indices into a `Vec<T>` and
//! calling it done. While doing so is simple from a library writers point of view, the
//! consumer will often have to translate that buffer to the format that they need before
//! it can be used. This produces needless buffering that can be avoided.
//!
//! `Genmesh`'s solution is to utilize the `Iterator` trait to build a vertex processing
//! pipeline. The `Iterator` trait has a number of useful functions like `zip`, `map` and
//! `collect` that are useful in themselves. `Genmesh` includes a number of traits that
//! can be used with the built in `Iterator` traits to build the meshes that your engine
//! needs.
#![deny(missing_docs)]
#![allow(clippy::many_single_char_names)]
pub use poly::{
EmitLines, EmitVertices, Line, Lines, LinesIterator, MapToVertices, MapToVerticesIter,
MapVertex, Polygon, Quad, Triangle, Vertices, VerticesIterator,
};
pub use triangulate::{EmitTriangles, Triangulate, TriangulateIterator};
pub use indexer::{Indexer, LruIndexer};
pub use neighbors::Neighbors;
mod math;
mod generator;
mod indexer;
mod neighbors;
mod poly;
mod triangulate;
mod circle;
mod cone;
mod cube;
mod cylinder;
mod icosphere;
mod plane;
mod sphere;
mod torus;
/// A collection of utilties that can be used to build
/// meshes programmatically.
pub mod generators {
pub use super::circle::Circle;
pub use super::cone::Cone;
pub use super::cube::Cube;
pub use super::cylinder::Cylinder;
pub use super::generator::{
IndexedPolygon, IndexedPolygonIterator, SharedVertex, SharedVertexIterator,
};
pub use super::icosphere::IcoSphere;
pub use super::plane::Plane;
pub use super::sphere::SphereUv;
pub use super::torus::Torus;
}
/// Common vertex position type.
pub type Position = mint::Vector3<f32>;
/// Common vertex normal type.
pub type Normal = mint::Vector3<f32>;
/// Common vertex type.
#[derive(Clone, Copy, Debug, PartialEq)]
pub struct Vertex {
/// Vertex position
pub pos: Position,
/// Vertex normal
pub normal: Normal,
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/math.rs | src/math.rs | use std::ops;
#[derive(Copy, Clone, Debug)]
pub struct Vector3 {
x: f32,
y: f32,
z: f32,
}
impl Vector3 {
#[inline]
pub fn new(x: f32, y: f32, z: f32) -> Self {
Vector3 { x, y, z }
}
#[inline]
pub fn magnitude_squared(self) -> f32 {
self.x * self.x + self.y * self.y + self.z * self.z
}
#[inline]
pub fn magnitude(self) -> f32 {
self.magnitude_squared().sqrt()
}
#[inline]
pub fn normalized(self) -> Vector3 {
let mag = self.magnitude();
Vector3::new(self.x / mag, self.y / mag, self.z / mag)
}
#[inline]
pub fn cross(self, Vector3 { x, y, z }: Vector3) -> Vector3 {
Vector3::new(
self.x.mul_add(z, -self.z * y),
self.y.mul_add(x, -self.x * z),
self.z.mul_add(y, -self.y * x),
)
}
}
impl From<Vector3> for [f32; 3] {
#[inline]
fn from(Vector3 { x, y, z }: Vector3) -> Self {
[x, y, z]
}
}
impl From<mint::Vector3<f32>> for Vector3 {
#[inline]
fn from(mint::Vector3 { x, y, z }: mint::Vector3<f32>) -> Self {
Vector3 { x, y, z }
}
}
impl From<Vector3> for mint::Vector3<f32> {
#[inline]
fn from(Vector3 { x, y, z }: Vector3) -> Self {
mint::Vector3 { x, y, z }
}
}
impl ops::Sub for Vector3 {
type Output = Self;
#[inline]
fn sub(self, Vector3 { x, y, z }: Self) -> Self::Output {
Vector3::new(self.x - x, self.y - y, self.z - z)
}
}
impl ops::AddAssign for Vector3 {
fn add_assign(&mut self, Vector3 { x, y, z }: Self) {
self.x += x;
self.y += y;
self.z += z;
}
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/cone.rs | src/cone.rs | use std::f32::consts::{self, FRAC_1_SQRT_2};
use super::generators::{IndexedPolygon, SharedVertex};
use super::{MapVertex, Triangle, Vertex};
const TWO_PI: f32 = consts::PI * 2.;
#[derive(Debug)]
enum VertexSection {
Tip(usize),
TopRadius(usize),
BottomRadius(usize),
BottomCenter,
}
/// The `Cone` mesh will create a mesh that goes from 1 to -1.
/// The bottom will be a circle around [0, 0, -1] with a radius
/// of 1, all coords on the bottom will follow the plane equation `-z-1=0`.
/// The tip of the cone will always be at coord [0, 0, 1].
pub struct Cone {
u: usize,
sub_u: usize,
}
impl Cone {
/// Creates a new cone.
///
/// # Arguments
///
/// - `u` is the number of subdivisions around the radius of the cone,
/// it must be at least 2
///
/// # Panics
///
/// This function panics if `u` is less than 2.
pub fn new(u: usize) -> Self {
assert!(u >= 2);
Cone { u: 0, sub_u: u }
}
fn vertex(&self, sec: VertexSection) -> Vertex {
let divisions = TWO_PI / self.sub_u as f32;
match sec {
VertexSection::Tip(i) => {
// the normal is in the middle of the two divisions
// so we add half a subdivision
let pos = divisions * i as f32 + divisions / 2.;
Vertex {
pos: [0., 0., 1.].into(),
normal: [
pos.cos() * FRAC_1_SQRT_2,
pos.sin() * FRAC_1_SQRT_2,
-FRAC_1_SQRT_2,
]
.into(),
}
}
VertexSection::TopRadius(i) => {
let pos = divisions * i as f32;
Vertex {
pos: [pos.cos(), pos.sin(), -1.].into(),
normal: [
pos.cos() * FRAC_1_SQRT_2,
pos.sin() * FRAC_1_SQRT_2,
-FRAC_1_SQRT_2,
]
.into(),
}
}
VertexSection::BottomRadius(i) => {
let pos = divisions * i as f32;
Vertex {
pos: [pos.cos(), pos.sin(), -1.].into(),
normal: [0., 0., -1.].into(),
}
}
VertexSection::BottomCenter => Vertex {
pos: [0., 0., -1.].into(),
normal: [0., 0., -1.].into(),
},
}
}
fn index(&self, sec: VertexSection) -> usize {
match sec {
VertexSection::Tip(i) => i,
VertexSection::TopRadius(i) => i + self.sub_u,
VertexSection::BottomRadius(i) => i + self.sub_u * 2,
VertexSection::BottomCenter => self.sub_u * 3,
}
}
fn rev_index(&self, idx: usize) -> VertexSection {
if idx < self.sub_u {
VertexSection::Tip(idx)
} else if idx < self.sub_u * 2 {
VertexSection::TopRadius(idx - self.sub_u)
} else if idx < self.sub_u * 3 {
VertexSection::BottomRadius(idx - self.sub_u * 2)
} else {
VertexSection::BottomCenter
}
}
}
impl Iterator for Cone {
type Item = Triangle<Vertex>;
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
fn next(&mut self) -> Option<Self::Item> {
if self.u < self.sub_u * 2 {
let idx = self.u;
self.u += 1;
Some(
self.indexed_polygon(idx)
.map_vertex(|i| self.shared_vertex(i)),
)
} else {
None
}
}
}
impl ExactSizeIterator for Cone {
fn len(&self) -> usize {
self.sub_u * 2 - self.u
}
}
impl SharedVertex<Vertex> for Cone {
fn shared_vertex(&self, idx: usize) -> Vertex {
self.vertex(self.rev_index(idx))
}
fn shared_vertex_count(&self) -> usize {
// a unique vertex for every subdivide at the top
// a unique vertex for every radius, top
// a unique vertex for every radius, bottom
// one for the bottom most vertex
self.sub_u * 3 + 1
}
}
impl IndexedPolygon<Triangle<usize>> for Cone {
fn indexed_polygon(&self, idx: usize) -> Triangle<usize> {
// top
if idx < self.sub_u {
let next = if idx != self.sub_u - 1 { idx + 1 } else { 0 };
Triangle::new(
self.index(VertexSection::Tip(idx)),
self.index(VertexSection::TopRadius(idx)),
self.index(VertexSection::TopRadius(next)),
)
// bottom
} else {
let idx = idx - self.sub_u;
let next = if idx != self.sub_u - 1 { idx + 1 } else { 0 };
Triangle::new(
self.index(VertexSection::BottomCenter),
self.index(VertexSection::BottomRadius(next)),
self.index(VertexSection::BottomRadius(idx)),
)
}
}
fn indexed_polygon_count(&self) -> usize {
// a face for every subdivide on the top, and one for every
// subdivide around the bottom circle.
self.sub_u * 2
}
}
#[test]
fn test_cone_len() {
let mut cone = Cone::new(5);
assert_eq!(10, cone.len());
cone.next();
assert_eq!(9, cone.len());
assert_eq!(9, cone.count());
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/poly.rs | src/poly.rs | use std::collections::VecDeque;
use std::marker::PhantomData;
/// A polygon with 4 points. Maps to `GL_QUADS`.
#[derive(Clone, Debug, PartialEq, Eq, Copy)]
pub struct Quad<T> {
/// The first point of the quad
pub x: T,
/// The second point of the quad
pub y: T,
/// The third point of the quad
pub z: T,
/// The fourth point of the quad
pub w: T,
}
impl<T> Quad<T> {
/// Create a new `Quad` with the supplied vertices.
pub fn new(v0: T, v1: T, v2: T, v3: T) -> Self {
Quad {
x: v0,
y: v1,
z: v2,
w: v3,
}
}
}
/// A polygon with 3 points. Maps to `GL_TRIANGLE`.
#[derive(Clone, Debug, PartialEq, Eq, Copy)]
pub struct Triangle<T> {
/// the first point of the triangle
pub x: T,
/// the second point of the triangle
pub y: T,
/// the third point of the triangle
pub z: T,
}
impl<T> Triangle<T> {
/// Create a new `Triangle` with the supplied vertices.
pub fn new(v0: T, v1: T, v2: T) -> Self {
Triangle {
x: v0,
y: v1,
z: v2,
}
}
}
/// This is All-the-types container. This exists since some generators
/// produce both [`Triangles`] and [`Quads`].
///
/// [`Triangles`]: struct.Triangle.html
/// [`Quads`]: struct.Quad.html
#[derive(Debug, Clone, PartialEq, Copy)]
pub enum Polygon<T> {
/// A wrapped triangle
PolyTri(Triangle<T>),
/// A wrapped quad
PolyQuad(Quad<T>),
}
/// The core mechanism of the [`Vertices`] trait. This is a mechanism for unwrapping
/// a polygon extracting all of the vertices that it bound together.
///
/// [`Vertices`]: trait.Vertices.html
pub trait EmitVertices<T> {
/// Consume a [`Polygon`], each vertex is emitted to the parent function by
/// calling the supplied lambda function.
///
/// [`Polygon`]: enum.Polygon.html
fn emit_vertices<F>(self, f: F)
where
F: FnMut(T);
}
impl<T> EmitVertices<T> for Line<T> {
fn emit_vertices<F>(self, mut emit: F)
where
F: FnMut(T),
{
let Line { x, y } = self;
emit(x);
emit(y);
}
}
impl<T> EmitVertices<T> for Triangle<T> {
fn emit_vertices<F>(self, mut emit: F)
where
F: FnMut(T),
{
let Triangle { x, y, z } = self;
emit(x);
emit(y);
emit(z);
}
}
impl<T> EmitVertices<T> for Quad<T> {
fn emit_vertices<F>(self, mut emit: F)
where
F: FnMut(T),
{
let Quad { x, y, z, w } = self;
emit(x);
emit(y);
emit(z);
emit(w);
}
}
impl<T> EmitVertices<T> for Polygon<T> {
fn emit_vertices<F>(self, emit: F)
where
F: FnMut(T),
{
use self::Polygon::{PolyQuad, PolyTri};
match self {
PolyTri(p) => p.emit_vertices(emit),
PolyQuad(p) => p.emit_vertices(emit),
}
}
}
/// Supplies a way to convert an [`Iterator`] of [`polygons`] to an [`Iterator`]
/// of vertices. Useful for when you need to write the vertices into
/// a graphics pipeline.
///
/// [`polygons`]: enum.Polygon.html
pub trait Vertices<SRC, V> {
/// Convert a polygon [`Iterator`] to a vertices [`Iterator`].
fn vertices(self) -> VerticesIterator<SRC, V>;
}
impl<V, P: EmitVertices<V>, T: Iterator<Item = P>> Vertices<T, V> for T {
fn vertices(self) -> VerticesIterator<T, V> {
VerticesIterator {
source: self,
buffer: VecDeque::new(),
}
}
}
/// An [`Iterator`] that breaks a [`Polygon`] down into its individual
/// vertices.
///
/// This `struct` is created by the [`vertices`] method on [`Vertices`].
///
/// [`Polygon`]: enum.Polygon.html
/// [`Vertices`]: trait.Vertices.html
/// [`vertices`]: trait.Vertices.html#method.vertices
pub struct VerticesIterator<SRC, V> {
source: SRC,
buffer: VecDeque<V>,
}
impl<V, U: EmitVertices<V>, SRC: Iterator<Item = U>> Iterator for VerticesIterator<SRC, V> {
type Item = V;
fn next(&mut self) -> Option<V> {
loop {
if let v @ Some(_) = self.buffer.pop_front() {
break v;
}
self.source
.next()?
.emit_vertices(|v| self.buffer.push_back(v));
}
}
}
/// Equivalent of `map` but per-vertex.
pub trait MapVertex<T, U> {
/// `Output` should be a container of the same shape of the type.
/// It's internal values should reflect any transformation the map did.
type Output;
/// Map a function to each vertex in a [`Polygon`] creating a new [`Polygon`].
///
/// [`Polygon`]: enum.Polygon.html
fn map_vertex<F>(self, f: F) -> Self::Output
where
F: FnMut(T) -> U;
}
impl<T: Clone, U> MapVertex<T, U> for Line<T> {
type Output = Line<U>;
fn map_vertex<F>(self, mut map: F) -> Line<U>
where
F: FnMut(T) -> U,
{
let Line { x, y } = self;
Line {
x: map(x),
y: map(y),
}
}
}
impl<T: Clone, U> MapVertex<T, U> for Triangle<T> {
type Output = Triangle<U>;
fn map_vertex<F>(self, mut map: F) -> Triangle<U>
where
F: FnMut(T) -> U,
{
let Triangle { x, y, z } = self;
Triangle {
x: map(x),
y: map(y),
z: map(z),
}
}
}
impl<T: Clone, U> MapVertex<T, U> for Quad<T> {
type Output = Quad<U>;
fn map_vertex<F>(self, mut map: F) -> Quad<U>
where
F: FnMut(T) -> U,
{
let Quad { x, y, z, w } = self;
Quad {
x: map(x),
y: map(y),
z: map(z),
w: map(w),
}
}
}
impl<T: Clone, U> MapVertex<T, U> for Polygon<T> {
type Output = Polygon<U>;
fn map_vertex<F>(self, map: F) -> Polygon<U>
where
F: FnMut(T) -> U,
{
use self::Polygon::{PolyQuad, PolyTri};
match self {
PolyTri(p) => PolyTri(p.map_vertex(map)),
PolyQuad(p) => PolyQuad(p.map_vertex(map)),
}
}
}
/// This acts very similar to a vertex shader. It gives a way to manipulate
/// and modify the vertices in a [`Polygon`]. This is useful if you need to
/// scale the mesh using a matrix multiply, or just for modifying the type of
/// each vertex.
///
/// [`Polygon`]: enum.Polygon.html
pub trait MapToVertices<T, U>: Sized {
/// `Output` should be a a container of the same shape of the type.
/// It's internal values should reflect any transformation the map did.
type Output;
/// Produces an [`Iterator`] of mapped polygons from an [`Iterator`] of polygons.
/// Each vertex in the process is modified with the supplied function.
fn vertex<F>(self, map: F) -> MapToVerticesIter<Self, T, U, F>
where
F: FnMut(T) -> U;
}
impl<VIn, VOut, P, POut: MapVertex<VIn, VOut, Output = P>, T: Iterator<Item = POut>>
MapToVertices<VIn, VOut> for T
{
type Output = P;
fn vertex<F>(self, map: F) -> MapToVerticesIter<T, VIn, VOut, F>
where
F: FnMut(VIn) -> VOut,
{
MapToVerticesIter {
src: self,
f: map,
phantom: PhantomData,
}
}
}
/// An [`Iterator`] that maps vertices with a given function.
///
/// This `struct` is created by the [`vertex`] method on [`MapToVertices`].
///
/// [`vertex`]: trait.MapToVertices.html#method.vertex
/// [`MapToVertices`]: trait.MapToVertices.html
pub struct MapToVerticesIter<SRC, T, U, F: FnMut(T) -> U> {
src: SRC,
f: F,
phantom: PhantomData<(T, U)>,
}
impl<
'a,
P,
POut: MapVertex<T, U, Output = P>,
SRC: Iterator<Item = POut>,
T,
U,
F: FnMut(T) -> U,
> Iterator for MapToVerticesIter<SRC, T, U, F>
{
type Item = P;
fn size_hint(&self) -> (usize, Option<usize>) {
self.src.size_hint()
}
fn next(&mut self) -> Option<P> {
self.src.next().map(|x| x.map_vertex(|x| (self.f)(x)))
}
}
/// Represents a line.
#[derive(Clone, Debug, PartialEq, Eq, Copy, Hash)]
pub struct Line<T> {
/// The first point
pub x: T,
/// The second point
pub y: T,
}
impl<T> Line<T> {
/// Create a new line using point x and y.
pub fn new(x: T, y: T) -> Self {
Line { x, y }
}
}
/// Convert a [`Polygon`] into it's fragments.
///
/// [`Polygon`]: enum.Polygon.html
pub trait EmitLines {
/// The Vertex defines the corners of a [`Polygon`].
///
/// [`Polygon`]: enum.Polygon.html
type Vertex;
/// Convert a polygon into lines, each [`Line`] is emitted via
/// calling of the callback of `emit`. This allows for
/// a variable amount of lines to be returned.
///
/// [`Polygon`]: enum.Polygon.html
/// [`Line`]: struct.Line.html
fn emit_lines<E>(self, emit: E)
where
E: FnMut(Line<Self::Vertex>);
}
impl<T: Clone> EmitLines for Triangle<T> {
type Vertex = T;
fn emit_lines<E>(self, mut emit: E)
where
E: FnMut(Line<T>),
{
emit(Line::new(self.x.clone(), self.y.clone()));
emit(Line::new(self.y, self.z.clone()));
emit(Line::new(self.z, self.x));
}
}
impl<T: Clone> EmitLines for Quad<T> {
type Vertex = T;
fn emit_lines<E>(self, mut emit: E)
where
E: FnMut(Line<T>),
{
emit(Line::new(self.x.clone(), self.y.clone()));
emit(Line::new(self.y, self.z.clone()));
emit(Line::new(self.z, self.w.clone()));
emit(Line::new(self.w, self.x));
}
}
impl<T: Clone> EmitLines for Polygon<T> {
type Vertex = T;
fn emit_lines<E>(self, emit: E)
where
E: FnMut(Line<T>),
{
match self {
Polygon::PolyTri(x) => x.emit_lines(emit),
Polygon::PolyQuad(x) => x.emit_lines(emit),
}
}
}
/// Supplies a way to convert an [`Iterator`] of [`polygons`] into an [`Iterator`] of
/// the [`polygons`] lines
///
/// [`polygons`]: enum.Polygon.html
pub trait Lines: Sized {
/// The type of each point in the lines.
type Vertex;
/// Convert the [`Iterator`] into a [`LinesIterator`].
///
/// [`LinesIterator`]: struct.LinesIterator.html
fn lines(self) -> LinesIterator<Self, Self::Vertex>;
}
impl<T, P, V> Lines for T
where
T: Iterator<Item = P>,
P: EmitLines<Vertex = V>,
{
type Vertex = V;
fn lines(self) -> LinesIterator<T, V> {
LinesIterator {
source: self,
buffer: VecDeque::new(),
}
}
}
/// An [`Iterator`] that turns polygons into an [`Iterator`] of lines.
///
/// This `struct` is created by the [`lines`] method on [`Lines`].
///
/// [`lines`]: trait.Lines.html#method.lines
/// [`Lines`]: trait.Lines.html
pub struct LinesIterator<I, V> {
source: I,
buffer: VecDeque<Line<V>>,
}
impl<I, P, V> Iterator for LinesIterator<I, V>
where
I: Iterator<Item = P>,
P: EmitLines<Vertex = V>,
{
type Item = Line<V>;
fn size_hint(&self) -> (usize, Option<usize>) {
let (n, _) = self.source.size_hint();
(n, None)
}
fn next(&mut self) -> Option<Line<V>> {
loop {
if let v @ Some(_) = self.buffer.pop_front() {
break v;
}
self.source.next()?.emit_lines(|v| self.buffer.push_back(v));
}
}
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/icosphere.rs | src/icosphere.rs | //! Icosahedral sphere
use std::collections::HashMap;
use crate::generators::{IndexedPolygon, SharedVertex};
use crate::{math::Vector3, Triangle, Vertex};
/// Icosahedral sphere with radius 1, centered at (0., 0., 0.).
#[derive(Clone, Debug)]
pub struct IcoSphere {
i: usize,
vertices: Vec<[f32; 3]>,
faces: Vec<[usize; 3]>,
}
// The vertices of a regular icosahedron can be visualised as lying at the corner points of 3
// orthogonal rectangles in 3D space.
// https://en.wikipedia.org/wiki/Regular_icosahedron#/media/File:Icosahedron-golden-rectangles.svg
// for a visualisation of this.
// These rectangles are all the same size, and are golden rectangles, which means their sides have
// the golden ratio: 1 : (1 + sqrt(5)) / 2. If we take those values directly however, we will not
// get a unit sphere, therefore we need to normalize the vector (0, 1, (1 + sqrt(5)) / 2). This
// gives us the values below. These values are the half dimensions of the orthogonal rectangles
// from which we get the corner points that define a unit icosahedral sphere.
const T: f32 = 0.85065080835204;
const X: f32 = 0.5257311121191336;
const VERTICES: [[f32; 3]; 12] = [
// corners of the rectangle in the XY plane
[-X, T, 0.],
[X, T, 0.],
[-X, -T, 0.],
[X, -T, 0.],
// corners of the rectangle in the YZ plane
[0., -X, T],
[0., X, T],
[0., -X, -T],
[0., X, -T],
// corners of the rectangle in the XZ plane
[T, 0., -X],
[T, 0., X],
[-T, 0., -X],
[-T, 0., X],
];
const FACES: [[usize; 3]; 20] = [
// 5 faces around point 0
[0, 11, 5],
[0, 5, 1],
[0, 1, 7],
[0, 7, 10],
[0, 10, 11],
// 5 faces adjacent to the faces around point 0
[1, 5, 9],
[5, 11, 4],
[11, 10, 2],
[10, 7, 6],
[7, 1, 8],
// 5 faces around point 3
[3, 9, 4],
[3, 4, 2],
[3, 2, 6],
[3, 6, 8],
[3, 8, 9],
// 5 faces adjacent to the faces around point 3
[4, 9, 5],
[2, 4, 11],
[6, 2, 10],
[8, 6, 7],
[9, 8, 1],
];
impl IcoSphere {
/// Creates a unit sphere with 20 faces and 12 vertices.
pub fn new() -> Self {
Self {
i: 0,
vertices: VERTICES.to_vec(),
faces: FACES.to_vec(),
}
}
/// Create a unit sphere with subdivision, resulting in 20 * 4^N faces, where N is the number of
/// subdivisions.
///
/// # Arguments
///
/// - `subdivides` is the number of subdivisions to perform
pub fn subdivide(subdivides: usize) -> Self {
let mut vertices = VERTICES.to_vec();
let mut faces = FACES.to_vec();
for _ in 0..subdivides {
let (v, f) = subdivide_impl(vertices, faces);
vertices = v;
faces = f;
}
Self {
i: 0,
vertices,
faces,
}
}
fn vert(&self, index: usize) -> Vertex {
Vertex {
pos: self.vertices[index].into(),
normal: self.vertices[index].into(),
}
}
}
impl Default for IcoSphere {
fn default() -> Self {
Self::new()
}
}
fn subdivide_impl(
mut vertices: Vec<[f32; 3]>,
faces: Vec<[usize; 3]>,
) -> (Vec<[f32; 3]>, Vec<[usize; 3]>) {
let mut lookup = HashMap::<(usize, usize), usize>::default();
let mut new_faces = Vec::<[usize; 3]>::default();
for face in &faces {
let mut mid: [usize; 3] = [0; 3];
for i in 0..3 {
let pair = (face[i], face[(i + 1) % 3]);
// find new vertex on the edge
mid[i] = match lookup.get(&pair) {
Some(i) => *i,
None => vertices.len(),
};
if mid[i] == vertices.len() {
lookup.insert(pair, mid[i]);
lookup.insert((pair.1, pair.0), mid[i]);
let new = new_point(vertices[pair.0], vertices[pair.1]);
vertices.push(new);
}
}
new_faces.push([face[0], mid[0], mid[2]]);
new_faces.push([face[1], mid[1], mid[0]]);
new_faces.push([face[2], mid[2], mid[1]]);
new_faces.push([mid[0], mid[1], mid[2]]);
}
(vertices, new_faces)
}
fn new_point(start: [f32; 3], end: [f32; 3]) -> [f32; 3] {
Vector3::new(start[0] + end[0], start[1] + end[1], start[2] + end[2])
.normalized()
.into()
}
impl Iterator for IcoSphere {
type Item = Triangle<Vertex>;
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
fn next(&mut self) -> Option<Self::Item> {
if self.i == self.faces.len() {
return None;
}
let face = self.faces[self.i];
let x = self.vert(face[0]);
let y = self.vert(face[1]);
let z = self.vert(face[2]);
self.i += 1;
Some(Triangle::new(x, y, z))
}
}
impl ExactSizeIterator for IcoSphere {
fn len(&self) -> usize {
self.faces.len() - self.i
}
}
impl SharedVertex<Vertex> for IcoSphere {
fn shared_vertex_count(&self) -> usize {
self.vertices.len()
}
fn shared_vertex(&self, idx: usize) -> Vertex {
self.vert(idx)
}
}
impl IndexedPolygon<Triangle<usize>> for IcoSphere {
fn indexed_polygon_count(&self) -> usize {
self.faces.len()
}
fn indexed_polygon(&self, idx: usize) -> Triangle<usize> {
Triangle::new(self.faces[idx][0], self.faces[idx][1], self.faces[idx][2])
}
}
#[test]
fn test_icosphere_len() {
let mut ico = IcoSphere::new();
assert_eq!(20, ico.len());
ico.next();
assert_eq!(19, ico.len());
assert_eq!(19, ico.count());
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/neighbors.rs | src/neighbors.rs | //! This is a utility to search out and work in the mesh as a whole rather
//! then polygon by polygon.
use std::collections::{HashMap, HashSet};
use crate::poly::{EmitLines, Line, Triangle};
use crate::{math::Vector3, Normal};
/// Neighbors search accelerating structure.
pub struct Neighbors<T> {
/// Mesh vertices.
pub vertices: Vec<T>,
/// Mesh polygons.
pub polygons: Vec<Triangle<usize>>,
shares_edge: HashMap<Line<usize>, Vec<usize>>,
shares_vertex: HashMap<usize, Vec<usize>>,
}
impl<T> Neighbors<T> {
/// Builds a Neighbors search based on the supplied vertices
/// and supplied triangle list.
pub fn new(vertices: Vec<T>, polygons: Vec<Triangle<usize>>) -> Self {
let mut shares_edge = HashMap::new();
let mut shares_vertex = HashMap::new();
for (i, p) in polygons.iter().enumerate() {
p.clone().emit_lines(|line| {
shares_vertex.entry(line.x).or_insert_with(Vec::new).push(i);
shares_vertex.entry(line.y).or_insert_with(Vec::new).push(i);
shares_edge.entry(line).or_insert_with(Vec::new).push(i);
});
}
Neighbors {
vertices,
polygons,
shares_edge,
shares_vertex,
}
}
/// Returns the vector and triangle list used to create the Neighbors.
pub fn split(self) -> (Vec<T>, Vec<Triangle<usize>>) {
(self.vertices, self.polygons)
}
/// Looks up the index of every polygon that contains
/// vertex `t`, this can be used to calculate new faces.
pub fn vertex_neighbors(&self, t: &usize) -> Option<&[usize]> {
self.shares_vertex.get(t).map(|x| &x[..])
}
/// Looks up the index of every [`Polygon`] that is a neighbor of the
/// [`Polygon`] at index `i`. This can be used to prep data for a Geometry
/// shader (eg triangle_adjacency).
///
/// [`Polygon`]: enum.Polygon.html
pub fn polygon_neighbors(&self, i: usize) -> Option<HashSet<usize>> {
self.polygons.get(i).map(|x| {
let mut v = HashSet::new();
x.clone().emit_lines(|line| {
if let Some(x) = self.shares_edge.get(&line) {
for &i in x {
v.insert(i);
}
}
});
v.remove(&i);
v
})
}
/// Calculate the normal for a face. This is a `flat` shading.
///
/// You must supply a function that can be used to lookup
/// the position which is needed to calculate the normal.
pub fn normal_for_face<F>(&self, i: usize, mut f: F) -> Normal
where
F: FnMut(&T) -> Normal,
{
let Triangle { x, y, z } = self.polygons[i];
let x = Vector3::from(f(&self.vertices[x]));
let y = Vector3::from(f(&self.vertices[y]));
let z = Vector3::from(f(&self.vertices[z]));
let a = z - x;
let b = z - y;
a.cross(b).normalized().into()
}
/// Calculate the normal for a vertex based on the average
/// of its neighbors. This is a `smooth` shading.
///
/// You must supply a function that can be used to lookup
/// the position which is needed to calculate the normal.
pub fn normal_for_vertex<F>(&self, i: usize, mut f: F) -> Normal
where
F: FnMut(&T) -> Normal,
{
let mut normal = Vector3::new(0f32, 0., 0.);
for &face in &self.shares_vertex[&i] {
normal += Vector3::from(self.normal_for_face(face, &mut f));
}
normal.normalized().into()
}
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/plane.rs | src/plane.rs | use super::generators::{IndexedPolygon, SharedVertex};
use super::{Quad, Vertex};
/// Represents a 2D plane with origin of (0, 0), from 1 to -1.
#[derive(Clone, Copy)]
pub struct Plane {
subdivide_x: usize,
subdivide_y: usize,
x: usize,
y: usize,
}
impl Plane {
/// Creates a new plane.
pub fn new() -> Plane {
Plane {
subdivide_x: 1,
subdivide_y: 1,
x: 0,
y: 0,
}
}
/// Creates a subdivided plane. This can be used to build
/// a grid of points.
///
/// # Arguments
///
/// - `x` is the number of subdivisions in the x axis, must be at least 1
/// - `y` is the number of subdivisions in the y axis, must be at least 1
///
/// # Panics
///
/// This function panics if either `x` or `y` is zero.
pub fn subdivide(x: usize, y: usize) -> Plane {
assert!(x > 0 && y > 0);
Plane {
subdivide_x: x,
subdivide_y: y,
x: 0,
y: 0,
}
}
fn vert(&self, x: usize, y: usize) -> Vertex {
let sx = self.subdivide_x as f32;
let sy = self.subdivide_y as f32;
let x = (2. / sx) * x as f32 - 1.;
let y = (2. / sy) * y as f32 - 1.;
Vertex {
pos: [x, y, 0.0].into(),
normal: [0., 0., 1.].into(),
}
}
}
impl Default for Plane {
fn default() -> Self {
Self::new()
}
}
impl Iterator for Plane {
type Item = Quad<Vertex>;
fn next(&mut self) -> Option<Quad<Vertex>> {
if self.x == self.subdivide_x {
self.y += 1;
if self.y >= self.subdivide_y {
return None;
}
self.x = 0;
}
let x = self.vert(self.x, self.y);
let y = self.vert(self.x + 1, self.y);
let z = self.vert(self.x + 1, self.y + 1);
let w = self.vert(self.x, self.y + 1);
self.x += 1;
Some(Quad::new(x, y, z, w))
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl ExactSizeIterator for Plane {
fn len(&self) -> usize {
(self.subdivide_y - self.y - 1) * self.subdivide_x + (self.subdivide_x - self.x)
}
}
impl SharedVertex<Vertex> for Plane {
fn shared_vertex(&self, idx: usize) -> Vertex {
let y = idx / (self.subdivide_x + 1);
let x = idx % (self.subdivide_x + 1);
self.vert(x, y)
}
fn shared_vertex_count(&self) -> usize {
(self.subdivide_x + 1) * (self.subdivide_y + 1)
}
}
impl IndexedPolygon<Quad<usize>> for Plane {
fn indexed_polygon(&self, idx: usize) -> Quad<usize> {
let y = idx / self.subdivide_x;
let x = idx % self.subdivide_x;
let base = y * (self.subdivide_x + 1) + x;
Quad::new(
base,
base + 1,
base + self.subdivide_x + 2,
base + self.subdivide_x + 1,
)
}
fn indexed_polygon_count(&self) -> usize {
self.subdivide_x * self.subdivide_y
}
}
#[test]
fn test_shared_vertex_count() {
let plane = Plane::new();
assert_eq!(plane.shared_vertex_count(), 4);
assert_eq!(plane.indexed_polygon_count(), 1);
let plane = Plane::subdivide(2, 2);
assert_eq!(plane.shared_vertex_count(), 9);
assert_eq!(plane.indexed_polygon_count(), 4);
let plane = Plane::subdivide(4, 4);
assert_eq!(plane.shared_vertex_count(), 25);
assert_eq!(plane.indexed_polygon_count(), 16);
}
#[test]
fn test_plane_len() {
let mut plane = Plane::subdivide(2, 2);
assert_eq!(4, plane.len());
plane.next();
assert_eq!(3, plane.len());
assert_eq!(3, plane.count());
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/circle.rs | src/circle.rs | use std::f32::consts::PI;
use crate::generators::{IndexedPolygon, SharedVertex};
use crate::Polygon::{self, PolyTri};
use crate::{Triangle, Vertex};
/// Represents a circle in the XY plane with radius of 1, centered at (0, 0, 0)
#[derive(Clone, Copy)]
pub struct Circle {
u: usize,
sub_u: usize,
}
impl Circle {
/// Creates a new sphere.
///
/// # Arguments
///
/// - `u` is the number of points around the circle, it must be at least 4
///
/// # Panics
///
/// This function panics if `u` is less than 4.
pub fn new(u: usize) -> Self {
assert!(u > 3);
Circle { u: 1, sub_u: u }
}
fn vert(&self, u: usize) -> Vertex {
if u == 0 {
Vertex {
pos: [0., 0., 0.].into(),
normal: [0., 0., 1.].into(),
}
} else {
let u = ((u - 1) as f32 / self.sub_u as f32) * PI * 2.;
let p = [u.cos(), u.sin(), 0.];
Vertex {
pos: p.into(),
normal: [0., 0., 1.].into(),
}
}
}
}
impl Iterator for Circle {
type Item = Polygon<Vertex>;
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
fn next(&mut self) -> Option<Self::Item> {
use std::cmp::Ordering;
match self.u.cmp(&self.sub_u) {
Ordering::Less => {
self.u += 1;
Some(PolyTri(Triangle::new(
self.vert(0),
self.vert(self.u - 1),
self.vert(self.u),
)))
}
Ordering::Equal => {
self.u += 1;
Some(PolyTri(Triangle::new(
self.vert(0),
self.vert(self.u - 1),
self.vert(1),
)))
}
Ordering::Greater => None,
}
}
}
impl ExactSizeIterator for Circle {
fn len(&self) -> usize {
self.sub_u - self.u + 1
}
}
impl SharedVertex<Vertex> for Circle {
fn shared_vertex(&self, idx: usize) -> Vertex {
self.vert(idx)
}
fn shared_vertex_count(&self) -> usize {
self.sub_u + 1
}
}
impl IndexedPolygon<Polygon<usize>> for Circle {
fn indexed_polygon(&self, idx: usize) -> Polygon<usize> {
if idx == self.sub_u - 1 {
PolyTri(Triangle::new(0, self.sub_u, 1))
} else {
PolyTri(Triangle::new(
0,
(idx + 1) % (self.sub_u + 1),
(idx + 2) % (self.sub_u + 1),
))
}
}
fn indexed_polygon_count(&self) -> usize {
self.sub_u
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
#[allow(clippy::approx_constant)]
fn test_circle() {
let circle = Circle::new(8);
assert_eq!((8, Some(8)), circle.size_hint());
assert_eq!(9, circle.shared_vertex_count());
assert_eq!(8, circle.indexed_polygon_count());
assert_eq!(
Some(&Vertex {
pos: [0.707107, -0.70710653, 0.0].into(),
normal: [0., 0., 1.].into()
}),
circle.shared_vertex_iter().collect::<Vec<_>>().last()
);
let polys = circle.indexed_polygon_iter().collect::<Vec<_>>();
assert_eq!(PolyTri(Triangle { x: 0, y: 1, z: 2 }), polys[0]);
assert_eq!(Some(&PolyTri(Triangle { x: 0, y: 8, z: 1 })), polys.last());
}
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/generator.rs | src/generator.rs | use std::marker::PhantomData;
use std::ops::Range;
/// The `SharedVertex` trait is meant to be used with the [`IndexedPolygon`] trait.
/// This trait is meant as a way to calculate the shared vertices that are
/// required to build the implementors mesh.
///
/// [`IndexedPolygon`]: trait.IndexedPolygon.html
pub trait SharedVertex<V>: Sized {
/// Returns the shared vertex at offset `i`.
fn shared_vertex(&self, i: usize) -> V;
/// Returns the number of shared vertices required to represent the mesh.
fn shared_vertex_count(&self) -> usize;
/// Create an [`Iterator`] that returns each shared vertex that is required to
/// build the mesh.
fn shared_vertex_iter(&self) -> SharedVertexIterator<Self, V> {
SharedVertexIterator {
base: self,
idx: 0..self.shared_vertex_count(),
phantom_v: PhantomData,
}
}
}
/// An [`Iterator`] that yields the shared vertices of the mesh.
///
/// This `struct` is created by the [`shared_vertex_iter`] method on [`SharedVertex`].
///
/// [`shared_vertex_iter`]: trait.SharedVertex.html#method.shared_vertex_iter
/// [`SharedVertex`]: trait.SharedVertex.html
pub struct SharedVertexIterator<'a, T: 'a, V> {
base: &'a T,
idx: Range<usize>,
phantom_v: PhantomData<V>,
}
impl<'a, T: SharedVertex<V>, V> Iterator for SharedVertexIterator<'a, T, V> {
type Item = V;
fn size_hint(&self) -> (usize, Option<usize>) {
self.idx.size_hint()
}
fn next(&mut self) -> Option<V> {
self.idx.next().map(|idx| self.base.shared_vertex(idx))
}
}
impl<'a, T: SharedVertex<V>, V> ExactSizeIterator for SharedVertexIterator<'a, T, V> {
fn len(&self) -> usize {
self.idx.len()
}
}
/// The `IndexedPolygon` trait is used with the [`SharedVertex`] trait in order to build
/// a mesh. `IndexedPolygon` calculates each polygon face required to build an implementors mesh.
/// Each face is always returned in indexed form that points to the correct vertice supplied
/// by the [`SharedVertex`] trait.
///
/// [`SharedVertex`]: trait.SharedVertex.html
pub trait IndexedPolygon<V>: Sized {
/// Returns a polygon with indices to the shared vertex.
fn indexed_polygon(&self, i: usize) -> V;
/// Returns the number of polygons that are needed to represent this mesh.
fn indexed_polygon_count(&self) -> usize;
/// Creates an [`Iterator`] that will return a polygon for each face in the source mesh.
fn indexed_polygon_iter(&self) -> IndexedPolygonIterator<Self, V> {
IndexedPolygonIterator {
base: self,
idx: 0..self.indexed_polygon_count(),
phantom_v: PhantomData,
}
}
}
/// An [`Iterator`] that yields the indices of the mesh
///
/// This `struct` is created by the [`indexed_polygon_iter`] method on [`IndexedPolygon`].
///
/// [`indexed_polygon_iter`]: trait.IndexedPolygon.html#method.indexed_polygon_iter
/// [`IndexedPolygon`]: trait.IndexedPolygon.html
pub struct IndexedPolygonIterator<'a, T: 'a, V> {
base: &'a T,
idx: Range<usize>,
phantom_v: PhantomData<V>,
}
impl<'a, T: IndexedPolygon<V>, V> Iterator for IndexedPolygonIterator<'a, T, V> {
type Item = V;
fn size_hint(&self) -> (usize, Option<usize>) {
self.idx.size_hint()
}
fn next(&mut self) -> Option<V> {
self.idx.next().map(|idx| self.base.indexed_polygon(idx))
}
}
impl<'a, T: IndexedPolygon<V>, V> ExactSizeIterator for IndexedPolygonIterator<'a, T, V> {
fn len(&self) -> usize {
self.idx.len()
}
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/cylinder.rs | src/cylinder.rs | use crate::generators::{IndexedPolygon, SharedVertex};
use crate::{Normal, Polygon, Position, Quad, Triangle, Vertex};
use std::f32::consts::PI;
/// Represents a cylinder with radius of 1, height of 2,
/// and centered at (0, 0, 0) pointing up (to 0, 0, 1).
#[derive(Clone, Copy)]
pub struct Cylinder {
u: usize,
h: isize,
sub_u: usize,
sub_h: isize,
}
const TOP: Vertex = Vertex {
pos: Position {
x: 0.,
y: 0.,
z: 1.,
},
normal: Normal {
x: 0.,
y: 0.,
z: 1.,
},
};
const BOT: Vertex = Vertex {
pos: Position {
x: 0.,
y: 0.,
z: -1.,
},
normal: Normal {
x: 0.,
y: 0.,
z: -1.,
},
};
impl Cylinder {
/// Creates a new cylinder.
///
/// # Arguments
///
/// - `u` is the number of points across the radius, it must be at least 2
///
/// # Panics
///
/// This function panics if `u` is less than 2.
pub fn new(u: usize) -> Self {
assert!(u > 1);
Cylinder {
u: 0,
h: -1,
sub_u: u,
sub_h: 1,
}
}
/// Creates a new subdivided cylinder.
///
/// # Arguments
///
/// - `u` is the number of points across the radius, it must be at least 2
/// - `h` is the number of segments across the height, it must be non-zero
///
/// # Panics
///
/// This function panics if `u` is less than 2 or if `h` is 0.
pub fn subdivide(u: usize, h: usize) -> Self {
assert!(u > 1 && h > 0);
Cylinder {
u: 0,
h: -1,
sub_u: u,
sub_h: h as isize,
}
}
fn vert(&self, u: usize, h: isize) -> Vertex {
debug_assert!(u <= self.sub_u);
let a = (u as f32 / self.sub_u as f32) * PI * 2.;
let n = [a.cos(), a.sin(), 0.];
let (hc, normal) = if h < 0 {
debug_assert_eq!(h, -1);
(0, [0., 0., -1.])
} else if h > self.sub_h {
debug_assert_eq!(h, self.sub_h + 1);
(self.sub_h, [0., 0., 1.])
} else {
(h, n)
};
let z = (hc as f32 / self.sub_h as f32) * 2. - 1.;
Vertex {
pos: [n[0], n[1], z].into(),
normal: normal.into(),
}
}
}
impl Iterator for Cylinder {
type Item = Polygon<Vertex>;
fn next(&mut self) -> Option<Self::Item> {
if self.u == self.sub_u {
if self.h >= self.sub_h {
return None;
}
self.u = 0;
self.h += 1;
}
let u = self.u;
self.u += 1;
// mathematically, reaching `u + 1 == sub_u` should trivially resolve,
// because sin(2pi) == sin(0), but rounding errors go in the way.
let u1 = self.u % self.sub_u;
Some(if self.h < 0 {
let x = self.vert(u, self.h);
let y = self.vert(u1, self.h);
Polygon::PolyTri(Triangle::new(x, BOT, y))
} else if self.h == self.sub_h {
let x = self.vert(u, self.h + 1);
let y = self.vert(u1, self.h + 1);
Polygon::PolyTri(Triangle::new(x, y, TOP))
} else {
let x = self.vert(u, self.h);
let y = self.vert(u1, self.h);
let z = self.vert(u1, self.h + 1);
let w = self.vert(u, self.h + 1);
Polygon::PolyQuad(Quad::new(x, y, z, w))
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl ExactSizeIterator for Cylinder {
fn len(&self) -> usize {
self.sub_u * (1 + self.sub_h - self.h) as usize - self.u
}
}
impl SharedVertex<Vertex> for Cylinder {
fn shared_vertex(&self, idx: usize) -> Vertex {
if idx == 0 {
BOT
} else if idx == self.shared_vertex_count() - 1 {
TOP
} else {
// skip the bottom center
let idx = idx - 1;
let u = idx % self.sub_u;
let h = (idx / self.sub_u) as isize - 1;
self.vert(u, h)
}
}
fn shared_vertex_count(&self) -> usize {
(3 + self.sub_h) as usize * self.sub_u + 2
}
}
impl IndexedPolygon<Polygon<usize>> for Cylinder {
fn indexed_polygon(&self, idx: usize) -> Polygon<usize> {
let u = idx % self.sub_u;
let u1 = (idx + 1) % self.sub_u;
let h = (idx / self.sub_u) as isize - 1;
let base = 1 + idx - u;
if h < 0 {
let start = 0;
Polygon::PolyTri(Triangle::new(base + u, start, base + u1))
} else if h == self.sub_h {
// We need to to select the next vertex loop over, which
// has the correct normals.
let base = base + self.sub_u;
let end = self.shared_vertex_count() - 1;
Polygon::PolyTri(Triangle::new(base + u, base + u1, end))
} else {
Polygon::PolyQuad(Quad::new(
base + u,
base + u1,
base + u1 + self.sub_u,
base + u + self.sub_u,
))
}
}
fn indexed_polygon_count(&self) -> usize {
(2 + self.sub_h) as usize * self.sub_u
}
}
#[test]
fn test_cylinder_len() {
let mut cylinder = Cylinder::new(5);
assert_eq!(15, cylinder.len());
cylinder.next();
assert_eq!(14, cylinder.len());
assert_eq!(14, cylinder.count());
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/cube.rs | src/cube.rs | use std::ops::Range;
use crate::generators::{IndexedPolygon, SharedVertex};
use crate::{MapVertex, Normal, Position, Quad, Vertex};
/// A perfect cube, centered at (0, 0, 0) with each face starting at 1/-1 away from the origin
#[derive(Clone)]
pub struct Cube {
range: Range<usize>,
}
impl Cube {
/// Creates a new cube.
pub fn new() -> Self {
Cube { range: 0..6 }
}
fn vert(&self, idx: usize) -> Position {
let x = if idx & 4 == 4 { 1. } else { -1. };
let y = if idx & 2 == 2 { 1. } else { -1. };
let z = if idx & 1 == 1 { 1. } else { -1. };
[x, y, z].into()
}
fn face_indexed(&self, idx: usize) -> (Normal, Quad<usize>) {
match idx {
0 => ([1., 0., 0.].into(), Quad::new(0b110, 0b111, 0b101, 0b100)),
1 => ([-1., 0., 0.].into(), Quad::new(0b000, 0b001, 0b011, 0b010)),
2 => ([0., 1., 0.].into(), Quad::new(0b011, 0b111, 0b110, 0b010)),
3 => ([0., -1., 0.].into(), Quad::new(0b100, 0b101, 0b001, 0b000)),
4 => ([0., 0., 1.].into(), Quad::new(0b101, 0b111, 0b011, 0b001)),
5 => ([0., 0., -1.].into(), Quad::new(0b000, 0b010, 0b110, 0b100)),
idx => panic!("{} face is higher then 6", idx),
}
}
fn face(&self, idx: usize) -> Quad<Vertex> {
let (no, quad) = self.face_indexed(idx);
quad.map_vertex(|i| Vertex {
pos: self.vert(i),
normal: no,
})
}
}
impl Default for Cube {
fn default() -> Self {
Self::new()
}
}
impl Iterator for Cube {
type Item = Quad<Vertex>;
fn next(&mut self) -> Option<Quad<Vertex>> {
self.range.next().map(|idx| self.face(idx))
}
fn size_hint(&self) -> (usize, Option<usize>) {
self.range.size_hint()
}
}
impl ExactSizeIterator for Cube {
fn len(&self) -> usize {
self.range.len()
}
}
impl SharedVertex<Vertex> for Cube {
fn shared_vertex(&self, idx: usize) -> Vertex {
let (no, quad) = self.face_indexed(idx / 4);
let vid = match idx % 4 {
0 => quad.x,
1 => quad.y,
2 => quad.z,
3 => quad.w,
_ => unreachable!(),
};
Vertex {
pos: self.vert(vid),
normal: no,
}
}
fn shared_vertex_count(&self) -> usize {
24
}
}
impl IndexedPolygon<Quad<usize>> for Cube {
fn indexed_polygon(&self, idx: usize) -> Quad<usize> {
Quad::new(idx * 4, idx * 4 + 1, idx * 4 + 2, idx * 4 + 3)
}
fn indexed_polygon_count(&self) -> usize {
6
}
}
#[test]
fn test_cube_len() {
let mut cube = Cube::new();
assert_eq!(6, cube.len());
cube.next();
assert_eq!(5, cube.len());
assert_eq!(5, cube.count());
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/indexer.rs | src/indexer.rs | /// A trait defining how to define an Indexer. An indexer is an object
/// that collects vertices and emits indices for the given vertex. The intent
/// is that an Indexer can find redundent vertices and deduplicate them
/// by returning aliased indices.
pub trait Indexer<T> {
/// Converts a vertex into an index.
fn index(&mut self, v: T) -> usize;
}
/// An `LruIndexer` is useful for creating an indexed steam from a stream of
/// vertices. Each vertex that is indexed is only being compared against the
/// vertices which are contained in the cache. If a vertex is not found, the
/// LruIndexer will `emit` a new vertex and return the index of that new vertex.
///
/// The oldest sample by time used will be dropped if a new vertex is found.
pub struct LruIndexer<T, F: FnMut(usize, T)> {
index: usize,
max: usize,
cache: Vec<(T, usize)>,
emit: F,
}
impl<T, F: FnMut(usize, T)> LruIndexer<T, F> {
/// Creates a new `LruIndexer` with a given window size limited by the `size` parameter.
/// It is recommended to keep this small, since lookup is done in N time
///
/// If a new vertex is found, `emit` will be called. `emit` will be supplied with a
/// vertex and an index that was used.
pub fn new(size: usize, emit: F) -> LruIndexer<T, F> {
LruIndexer {
index: 0,
max: size,
cache: Vec::new(),
emit,
}
}
}
impl<T: PartialEq + Clone, F: FnMut(usize, T)> Indexer<T> for LruIndexer<T, F> {
fn index(&mut self, new: T) -> usize {
let mut found = None;
for (i, &(ref v, idx)) in self.cache.iter().enumerate() {
if v == &new {
found = Some((idx, i));
break;
}
}
match found {
Some((index, i)) => {
let item = self.cache.remove(i);
self.cache.push(item);
index
}
None => {
if self.cache.len() >= self.max {
self.cache.remove(0);
}
let index = self.index;
self.index += 1;
self.cache.push((new.clone(), index));
(self.emit)(index, new);
index
}
}
}
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/src/sphere.rs | src/sphere.rs | use std::f32::consts::PI;
use crate::generators::{IndexedPolygon, SharedVertex};
use crate::Polygon::{self, PolyQuad, PolyTri};
use crate::{Quad, Triangle, Vertex};
/// Represents a sphere with radius of 1, centered at (0, 0, 0).
#[derive(Clone, Copy)]
pub struct SphereUv {
u: usize,
v: usize,
sub_u: usize,
sub_v: usize,
}
impl SphereUv {
/// Creates a new sphere.
///
/// # Arguments
///
/// - `u` is the number of points across the equator of the sphere, must be at least 2
/// - `v` is the number of points from pole to pole, must be at least 2
///
/// # Panics
///
/// This function panics if `u` or `v` are less than 2 respectively.
pub fn new(u: usize, v: usize) -> Self {
assert!(u > 1 && v > 1);
SphereUv {
u: 0,
v: 0,
sub_u: u,
sub_v: v,
}
}
fn vert(&self, u: usize, v: usize) -> Vertex {
let u = (u as f32 / self.sub_u as f32) * PI * 2.;
let v = (v as f32 / self.sub_v as f32) * PI;
let p = [u.cos() * v.sin(), u.sin() * v.sin(), v.cos()];
Vertex {
pos: p.into(),
normal: p.into(),
}
}
}
impl Iterator for SphereUv {
type Item = Polygon<Vertex>;
fn next(&mut self) -> Option<Self::Item> {
if self.u == self.sub_u {
self.u = 0;
self.v += 1;
if self.v == self.sub_v {
return None;
}
}
// mathematically, reaching `u + 1 == sub_u` should trivially resolve,
// because sin(2pi) == sin(0), but rounding errors go in the way.
let u1 = (self.u + 1) % self.sub_u;
let x = self.vert(self.u, self.v);
let y = self.vert(self.u, self.v + 1);
let z = self.vert(u1, self.v + 1);
let w = self.vert(u1, self.v);
let v = self.v;
self.u += 1;
Some(if v == 0 {
PolyTri(Triangle::new(x, y, z))
} else if v == self.sub_v - 1 {
// overriding z to force u == 0 for consistency
let z = self.vert(0, self.sub_v);
PolyTri(Triangle::new(z, w, x))
} else {
PolyQuad(Quad::new(x, y, z, w))
})
}
fn size_hint(&self) -> (usize, Option<usize>) {
(self.len(), Some(self.len()))
}
}
impl ExactSizeIterator for SphereUv {
fn len(&self) -> usize {
(self.sub_v - self.v - 1) * self.sub_u + (self.sub_u - self.u)
}
}
impl SharedVertex<Vertex> for SphereUv {
fn shared_vertex(&self, idx: usize) -> Vertex {
if idx == 0 {
self.vert(0, 0)
} else if idx == self.shared_vertex_count() - 1 {
self.vert(0, self.sub_v)
} else {
// since the bottom verts all map to the same
// we jump over them in index space
let idx = idx - 1;
let u = idx % self.sub_u;
let v = idx / self.sub_u;
self.vert(u, v + 1)
}
}
fn shared_vertex_count(&self) -> usize {
(self.sub_v - 1) * (self.sub_u) + 2
}
}
impl IndexedPolygon<Polygon<usize>> for SphereUv {
fn indexed_polygon(&self, idx: usize) -> Polygon<usize> {
let f = |u: usize, v: usize| {
if v == 0 {
0
} else if self.sub_v == v {
(self.sub_v - 1) * (self.sub_u) + 1
} else {
(v - 1) * self.sub_u + (u % self.sub_u) + 1
}
};
let u = idx % self.sub_u;
let v = idx / self.sub_u;
if v == 0 {
PolyTri(Triangle::new(f(u, v), f(u, v + 1), f(u + 1, v + 1)))
} else if self.sub_v - 1 == v {
PolyTri(Triangle::new(f(u + 1, v + 1), f(u + 1, v), f(u, v)))
} else {
PolyQuad(Quad::new(
f(u, v),
f(u, v + 1),
f(u + 1, v + 1),
f(u + 1, v),
))
}
}
fn indexed_polygon_count(&self) -> usize {
self.sub_v * self.sub_u
}
}
#[test]
fn test_sphere_len() {
let mut sphere = SphereUv::new(5, 5);
assert_eq!(25, sphere.len());
sphere.next();
assert_eq!(24, sphere.len());
assert_eq!(24, sphere.count());
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/tests/test.rs | tests/test.rs | extern crate genmesh;
use genmesh::{
EmitTriangles, Indexer, LruIndexer, MapToVertices, Quad, Triangle, Triangulate, Vertex,
Vertices,
};
use genmesh::generators::Plane;
#[test]
fn quad_vertex() {
let input = &[Quad::new(0usize, 1, 2, 3), Quad::new(1usize, 2, 3, 4)];
let output = &[
Quad::new(false, true, false, true),
Quad::new(true, false, true, false),
];
let transformed = input.iter().cloned().vertex(|v| v % 2 != 0);
for (x, y) in transformed.zip(output.iter().cloned()) {
assert_eq!(x, y);
}
}
#[test]
fn quad_vertex_two_stages() {
let input = &[Quad::new(0usize, 1, 2, 3), Quad::new(1usize, 2, 3, 4)];
let output = &[
Quad::new(false, true, false, true),
Quad::new(true, false, true, false),
];
let transformed = input
.iter()
.cloned()
.vertex(|v| v as u8)
.vertex(|v| v % 2 != 0);
for (x, y) in transformed.zip(output.iter().cloned()) {
assert_eq!(x, y);
}
}
#[test]
fn quad_poly_simple() {
let input = &[Quad::new(0usize, 1, 2, 3), Quad::new(1usize, 2, 3, 4)];
let output = &[Quad::new(0isize, 1, 2, 0), Quad::new(0isize, 2, 3, 0)];
let transformed = input
.iter()
.cloned()
.map(|v| Quad::new(0isize, v.y as isize, v.z as isize, 0));
for (x, y) in transformed.zip(output.iter().cloned()) {
assert_eq!(x, y);
}
}
#[test]
fn triangle_vertex() {
let input = &[Triangle::new(0usize, 1, 2), Triangle::new(1usize, 2, 3)];
let output = &[
Triangle::new(false, true, false),
Triangle::new(true, false, true),
];
let transformed = input.iter().cloned().vertex(|v| v % 2 != 0);
for (x, y) in transformed.zip(output.iter().cloned()) {
assert_eq!(x, y);
}
}
#[test]
fn triangle_vertex_two_stages() {
let input = &[Triangle::new(0usize, 1, 2), Triangle::new(1usize, 2, 3)];
let output = &[
Triangle::new(false, true, false),
Triangle::new(true, false, true),
];
let transformed = input
.iter()
.cloned()
.vertex(|v| v as u8)
.vertex(|v| v % 2 != 0);
for (x, y) in transformed.zip(output.iter().cloned()) {
assert_eq!(x, y);
}
}
#[test]
fn triangle_poly_simple() {
let input = &[Triangle::new(0usize, 1, 2), Triangle::new(1usize, 2, 3)];
let output = &[Triangle::new(0isize, 1, 2), Triangle::new(0isize, 2, 3)];
let transformed = input
.iter()
.cloned()
.map(|v| Triangle::new(0isize, v.y as isize, v.z as isize));
for (x, y) in transformed.zip(output.iter().cloned()) {
assert_eq!(x, y);
}
}
#[test]
fn to_triangles() {
let q = Quad::new(0usize, 1, 2, 3);
let mut result = Vec::new();
q.emit_triangles(|v| result.push(v));
assert_eq!(
result,
vec![Triangle::new(0usize, 1, 2), Triangle::new(2usize, 3, 0)]
);
let t = Triangle::new(0usize, 1, 2);
let mut result = Vec::new();
t.emit_triangles(|v| result.push(v));
assert_eq!(result, vec![Triangle::new(0usize, 1, 2)]);
}
#[test]
fn plane() {
let mut plane = Plane::new();
let a = plane.next().unwrap();
assert_eq!(a.x.pos, [-1f32, -1., 0.].into());
assert_eq!(a.y.pos, [1f32, -1., 0.].into());
assert_eq!(a.z.pos, [1f32, 1., 0.].into());
assert_eq!(a.w.pos, [-1f32, 1., 0.].into());
}
//TODO: LRU tests changed once the normals got introduced to the `Cube`.
// these tests may need to be revised now.
#[test]
fn lru_indexer() {
let mut vectices: Vec<Vertex> = Vec::new();
let indexes: Vec<usize> = {
let mut indexer = LruIndexer::new(8, |_, v| vectices.push(v));
Plane::subdivide(1, 3)
.vertex(|v| indexer.index(v))
.vertices()
.collect()
};
assert_eq!(8, vectices.len());
assert_eq!(3 * 4, indexes.len());
let mut vectices: Vec<Vertex> = Vec::new();
let indexes: Vec<usize> = {
let mut indexer = LruIndexer::new(4, |_, v| vectices.push(v));
Plane::subdivide(1, 3)
.triangulate()
.vertex(|v| indexer.index(v))
.vertices()
.collect()
};
assert_eq!(8, vectices.len());
assert_eq!(3 * 3 * 2, indexes.len());
}
#[test]
fn emit_lines() {
use genmesh::{EmitLines, Line, Lines};
let mut lines = Vec::new();
let triangle = Triangle::new(0i8, 1, 2);
triangle.emit_lines(|x| lines.push(x));
assert_eq!(3, lines.len());
assert_eq!(Line::new(0, 1), lines[0]);
assert_eq!(Line::new(1, 2), lines[1]);
assert_eq!(Line::new(2, 0), lines[2]);
let mut lines = Vec::new();
let quad = Quad::new(0i8, 1, 2, 3);
quad.emit_lines(|x| lines.push(x));
assert_eq!(4, lines.len());
assert_eq!(Line::new(0, 1), lines[0]);
assert_eq!(Line::new(1, 2), lines[1]);
assert_eq!(Line::new(2, 3), lines[2]);
assert_eq!(Line::new(3, 0), lines[3]);
let quads = [Quad::new(0i8, 1, 2, 3), Quad::new(4i8, 5, 6, 7)];
let lines: Vec<Line<i8>> = quads.iter().copied().lines().collect();
assert_eq!(8, lines.len());
assert_eq!(Line::new(0, 1), lines[0]);
assert_eq!(Line::new(1, 2), lines[1]);
assert_eq!(Line::new(2, 3), lines[2]);
assert_eq!(Line::new(3, 0), lines[3]);
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/tests/winding.rs | tests/winding.rs | use std::collections::HashSet;
use cgmath::InnerSpace;
use genmesh::{generators, EmitLines, Line, Lines, MapToVertices, Vertex};
#[derive(Debug)]
struct Edge {
dir: cgmath::Vector3<f32>,
mid: cgmath::Vector3<f32>,
nor: cgmath::Vector3<f32>,
}
impl Edge {
fn new(line: Line<Vertex>) -> Self {
let Line {
x: Vertex { pos: x, normal: nx },
y: Vertex { pos: y, normal: ny },
} = line;
Edge {
dir: cgmath::vec3(y.x - x.x, y.y - x.y, y.z - x.z),
mid: cgmath::vec3(y.x + x.x, y.y + x.y, y.z + x.z) * 0.5,
nor: cgmath::vec3(nx.x + ny.x, nx.y + ny.y, nx.z + ny.z),
}
}
/// Check that the corner `(self, e)` has outward winding order
/// (thus, it's normal is in the same hemisphere as it's offset).
fn check_to(&self, e: &Edge) {
let normal = self.dir.cross(e.dir);
let mid = (self.mid + e.mid) * 0.5;
assert!(normal.dot(mid) > 0.0 && e.nor.dot(mid) > 0.0);
}
}
/// Make sure that all the polygons in the `poly_iter` have the outward
/// winding order relative to the origin of the coordinate system.
/// This is a simplified (and incomplete) convex shape test.
fn test_outward<P, I>(poly_iter: I)
where
P: EmitLines<Vertex = Vertex> + ::std::fmt::Debug,
I: Iterator<Item = P>,
{
let mut edges = Vec::new();
for poly in poly_iter {
edges.clear();
poly.emit_lines(|l| edges.push(Edge::new(l)));
// check last-first corner first, since it wraps
edges.last().unwrap().check_to(&edges[0]);
// check all the non-wrapping corners
for (a, b) in edges.iter().zip(edges[1..].iter()) {
a.check_to(b);
}
}
}
// this will check that a primitive is closed, and that every edge of
// every polygon making up the primitive has a paired neighbor. This
// does not test that a polygon has the correcting winding direction
// just that the winding directions are consistent for the polygon.
//
// This is based on ftp://ftp.sgi.com/opengl/contrib/blythe/advanced99/notes/node16.html
//
fn test_closed<P, I>(poly_iter: I)
where
P: EmitLines<Vertex = Vertex> + ::std::fmt::Debug,
I: Iterator<Item = P>,
{
// convert the vertex to something that we can use to find approximate
// polygons. This is mostly to get past the fact that f32 is a cursed
// type in rust and can not be used as a key.
fn to_checkable(vertex: Vertex) -> [i32; 3] {
[
(vertex.pos.x * 1000000.) as i32,
(vertex.pos.y * 1000000.) as i32,
(vertex.pos.z * 1000000.) as i32,
]
}
let mut lines = HashSet::new();
for line in poly_iter.lines().vertex(to_checkable) {
// if the line was in the set we found the matching pair
// which is one less pair that we are looking for
if !lines.remove(&line) {
// if didn't find the pair, we flip the line around and put it into
// the search table.
lines.insert(Line {
x: line.y,
y: line.x,
});
}
}
// if we found all the pairs, we should be valid and a closed geometry
// this means that there is no polygon who's neighbor switches winding
// direction, but it does not mean that the polygon is correct. They
// all could be backwards. So this still requires a secondary inspection.
assert_eq!(lines.len(), 0);
}
#[test]
fn wind_plane() {
// the plane is not closed, so no point in testing for a closed
// shape.
test_outward(generators::Plane::new().vertex(|mut v| {
v.pos.z = 1.;
v
}));
test_outward(generators::Plane::subdivide(3, 4).vertex(|mut v| {
v.pos.z = 1.;
v
}));
}
#[test]
fn gen_cube() {
test_outward(generators::Cube::new());
test_closed(generators::Cube::new());
}
#[test]
fn gen_cylinder() {
test_outward(generators::Cylinder::new(5));
test_closed(generators::Cylinder::new(5));
test_outward(generators::Cylinder::subdivide(3, 4));
test_closed(generators::Cylinder::subdivide(3, 4));
}
#[test]
fn gen_sphere_uv() {
test_outward(generators::SphereUv::new(4, 3));
test_closed(generators::SphereUv::new(4, 3));
}
#[test]
fn gen_ico_sphere() {
test_outward(generators::IcoSphere::new());
test_closed(generators::IcoSphere::new());
test_outward(generators::IcoSphere::subdivide(3));
test_closed(generators::IcoSphere::subdivide(3));
}
#[test]
fn gen_cone() {
test_outward(generators::Cone::new(8));
test_closed(generators::Cone::new(8));
}
#[test]
fn gen_torus() {
// we don't do an outward test because the primitive is not
// convex and will fail this test.
test_closed(generators::Torus::new(10.0, 5.0, 8, 8));
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/tests/generate.rs | tests/generate.rs | use genmesh::{generators, EmitTriangles, MapVertex, Triangulate};
use std::fmt::Debug;
/// Test a generator by comparing two triangular meshes:
/// 1) by using the `Iterator` implementation of the given generator
/// 2) by producing shared vertices and sampling them with the
/// produced indexed polygons.
fn test<F, P, G>(generator: G)
where
F: EmitTriangles,
F::Vertex: Clone + Copy + Debug + PartialEq,
P: EmitTriangles<Vertex = usize>,
G: generators::SharedVertex<F::Vertex> + generators::IndexedPolygon<P> + Iterator<Item = F>,
{
let vertices: Vec<_> = generator.shared_vertex_iter().collect();
let f1: Vec<_> = generator
.indexed_polygon_iter()
.triangulate()
.map(|f| f.map_vertex(|u| vertices[u]))
.collect();
let f0: Vec<_> = generator.triangulate().collect();
assert_eq!(f0.len(), f1.len());
for (i, (p0, p1)) in f0.iter().zip(f1.iter()).enumerate() {
assert_eq!(p0, p1, "Mismatched polygon[{}]", i);
}
}
#[test]
fn gen_plane() {
test(generators::Plane::new());
test(generators::Plane::subdivide(3, 4));
}
#[test]
fn gen_cube() {
test(generators::Cube::new());
}
#[test]
fn gen_cylinder() {
test(generators::Cylinder::new(5));
test(generators::Cylinder::subdivide(3, 4));
}
#[test]
fn gen_sphere_uv() {
test(generators::SphereUv::new(4, 3));
}
#[test]
fn gen_ico_sphere() {
test(generators::IcoSphere::new());
test(generators::IcoSphere::subdivide(3));
}
#[test]
fn gen_cone() {
test(generators::Cone::new(8));
}
#[test]
fn gen_torus() {
test(generators::Torus::new(1., 0.5, 8, 8));
}
#[test]
fn gen_circle() {
test(generators::Circle::new(4))
}
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
gfx-rs/genmesh | https://github.com/gfx-rs/genmesh/blob/94443dff35d348f63f4f75638166f3e1ef3f3d16/benches/bench.rs | benches/bench.rs | // Copyright GFX Developers 2014-2017
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use criterion::{black_box, criterion_group, criterion_main, Bencher, Criterion};
extern crate genmesh;
use genmesh::generators::{IndexedPolygon, SharedVertex};
use genmesh::generators::{Plane, SphereUv};
use genmesh::*;
fn plane(bench: &mut Bencher) {
bench.iter(|| {
let plane = Plane::new();
for i in plane.shared_vertex_iter() {
black_box(i);
}
for i in plane.indexed_polygon_iter() {
black_box(i);
}
});
}
fn plane_16x16_index(bench: &mut Bencher) {
bench.iter(|| {
let plane = Plane::subdivide(16, 16);
for i in plane.indexed_polygon_iter() {
black_box(i);
}
});
}
fn plane_256x256_index(bench: &mut Bencher) {
bench.iter(|| {
let plane = Plane::subdivide(256, 256);
for i in plane.indexed_polygon_iter() {
black_box(i);
}
});
}
fn plane_16x16_vertex(bench: &mut Bencher) {
bench.iter(|| {
let plane = Plane::subdivide(16, 16);
for i in plane.shared_vertex_iter() {
black_box(i);
}
});
}
fn plane_256x256_vertex(bench: &mut Bencher) {
bench.iter(|| {
let plane = Plane::subdivide(256, 256);
for i in plane.shared_vertex_iter() {
black_box(i);
}
});
}
fn plane_16x16_index_triangulate(bench: &mut Bencher) {
bench.iter(|| {
let plane = Plane::subdivide(16, 16);
for i in plane.indexed_polygon_iter().triangulate() {
black_box(i);
}
});
}
fn plane_256x256_index_triangulate(bench: &mut Bencher) {
bench.iter(|| {
let plane = Plane::subdivide(256, 256);
for i in plane.indexed_polygon_iter().triangulate() {
black_box(i);
}
});
}
fn sphere_16x16_index(bench: &mut Bencher) {
bench.iter(|| {
let plane = SphereUv::new(16, 16);
for i in plane.indexed_polygon_iter() {
black_box(i);
}
});
}
fn sphere_256x256_index(bench: &mut Bencher) {
bench.iter(|| {
let plane = SphereUv::new(256, 256);
for i in plane.indexed_polygon_iter() {
black_box(i);
}
});
}
fn sphere_16x16_vertex(bench: &mut Bencher) {
bench.iter(|| {
let plane = SphereUv::new(16, 16);
for i in plane.shared_vertex_iter() {
black_box(i);
}
});
}
fn sphere_256x256_vertex(bench: &mut Bencher) {
bench.iter(|| {
let plane = SphereUv::new(256, 256);
for i in plane.shared_vertex_iter() {
black_box(i);
}
});
}
fn sphere_16x16_index_triangulate(bench: &mut Bencher) {
bench.iter(|| {
let plane = SphereUv::new(16, 16);
for i in plane.indexed_polygon_iter().triangulate() {
black_box(i);
}
});
}
fn sphere_256x256_index_triangulate(bench: &mut Bencher) {
bench.iter(|| {
let plane = SphereUv::new(256, 256);
for i in plane.indexed_polygon_iter().triangulate() {
black_box(i);
}
});
}
fn criterion_benchmark(c: &mut Criterion) {
c.bench_function("plane", plane);
c.bench_function("plane_16x16_index", plane_16x16_index);
c.bench_function("plane_16x16_vertex", plane_16x16_vertex);
c.bench_function(
"plane_16x16_index_triangulate",
plane_16x16_index_triangulate,
);
c.bench_function("plane_256x256_index", plane_256x256_index);
c.bench_function("plane_256x256_vertex", plane_256x256_vertex);
c.bench_function(
"plane_256x256_index_triangulate",
plane_256x256_index_triangulate,
);
c.bench_function("sphere_16x16_index", sphere_16x16_index);
c.bench_function("sphere_16x16_vertex", sphere_16x16_vertex);
c.bench_function(
"sphere_16x16_index_triangulate",
sphere_16x16_index_triangulate,
);
c.bench_function("sphere_256x256_index", sphere_256x256_index);
c.bench_function("sphere_256x256_vertex", sphere_256x256_vertex);
c.bench_function(
"sphere_256x256_index_triangulate",
sphere_256x256_index_triangulate,
);
}
criterion_group!(benches, criterion_benchmark);
criterion_main!(benches);
| rust | Apache-2.0 | 94443dff35d348f63f4f75638166f3e1ef3f3d16 | 2026-01-04T20:23:55.648184Z | false |
frol/flatc-rust | https://github.com/frol/flatc-rust/blob/16934d819a9abb7c84378e44add06b558aaebb13/src/lib.rs | src/lib.rs | //! This crate provides a programmatical way to invoke `flatc` command (e.g. from `build.rs`) to
//! generate Rust (or, in fact, any other language) helpers to work with FlatBuffers.
//!
//! NOTE: You will still need
//! [`flatc` utility](https://google.github.io/flatbuffers/flatbuffers_guide_using_schema_compiler.html)
//! version [1.10.0+](https://github.com/google/flatbuffers/releases/tag/v1.10.0) installed (there
//! are [windows binary releases](https://github.com/google/flatbuffers/releases), `flatbuffers`
//! packages for [conda](https://anaconda.org/conda-forge/flatbuffers) [Windows, Linux, MacOS],
//! [Arch Linux](https://www.archlinux.org/packages/community/x86_64/flatbuffers/)).
//!
//! # Examples
//!
//! ## Minimal useful example
//!
//! Let's assume you have `input.fbs` specification file in `flatbuffers` folder, and you want to
//! generate Rust helpers into `flatbuffers-helpers-for-rust` folder:
//!
//! ```
//! use std::path::Path;
//!
//! use flatc_rust;
//!
//! # fn try_main() -> flatc_rust::Result<()> {
//! #
//! flatc_rust::run(flatc_rust::Args {
//! lang: "rust", // `rust` is the default, but let's be explicit
//! inputs: &[Path::new("./flatbuffers/input.fbs")],
//! out_dir: Path::new("./flatbuffers-helpers-for-rust/"),
//! ..Default::default()
//! })?;
//! #
//! # Ok(())
//! # }
//! # try_main().ok();
//! ```
//!
//! ## Build scripts (`build.rs`) integration
//!
//! It is common to have FlatBuffers specifications as a single source of truth, and thus, it is
//! wise to build up-to-date helpers when you build your project. There is a built-in support for
//! [build scripts in Cargo], so you don't need to sacrifice the usual workflow (`cargo build /
//! cargo run`) in order to generate the helpers.
//!
//! 1. Create `build.rs` in the root of your project (along side with `Cargo.toml`) or follow the
//! official documentation about build scripts.
//! 2. Adapt the following example to fit your needs and put it into `build.rs`:
//!
//! ```no_run
//! extern crate flatc_rust; // or just `use flatc_rust;` with Rust 2018 edition.
//!
//! use std::path::Path;
//!
//! fn main() {
//! println!("cargo:rerun-if-changed=src/message.fbs");
//! flatc_rust::run(flatc_rust::Args {
//! inputs: &[Path::new("src/message.fbs")],
//! out_dir: Path::new("target/flatbuffers/"),
//! ..Default::default()
//! }).expect("flatc");
//! }
//! ```
//! 3. Add `flatc-rust` into `[build-dependencies]` section in `Cargo.toml`:
//!
//! ```toml
//! [build-dependencies]
//! flatc-rust = "*"
//! ```
//! 4. Add `flatbuffers` into `[dependencies]` section in `Cargo.toml`:
//!
//! ```toml
//! [dependencies]
//! flatbuffers = "0.5"
//! ```
//! 5. Include the generated helpers in your `main.rs` or `lib.rs`:
//!
//! ```ignore
//! #[allow(non_snake_case)]
//! #[path = "../target/flatbuffers/message_generated.rs"]
//! pub mod message_flatbuffers;
//! ```
//! 5. Use the helpers like any regular Rust module ([example projects])
//!
//! [build scripts in Cargo]: https://doc.rust-lang.org/cargo/reference/build-scripts.html
//! [example projects]: https://github.com/frol/flatc-rust/tree/master/examples
//!
//! ## Usage in external projects
//!
//! There is [a benchmark of FlatBuffers vs other serialization
//! frameworks](https://github.com/erickt/rust-serialization-benchmarks/pull/7), which is based on
//! `flatc-rust` integration.
#![deny(missing_docs)]
#![deny(unsafe_code)]
use std::ffi::OsString;
use std::io;
use std::path::{Path, PathBuf};
use std::process;
use log::info;
/// The default Error type of the crate
pub type Error = io::Error;
/// The default Result type of the crate
pub type Result<T> = io::Result<T>;
fn err_other<E>(error: E) -> Error
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Error::new(io::ErrorKind::Other, error)
}
/// This structure represents the arguments passed to `flatc`
///
/// # Example
///
/// ```
/// use std::path::Path;
///
/// let flatc_args = flatc_rust::Args {
/// lang: "rust",
/// inputs: &[Path::new("./src/input.fbs")],
/// out_dir: Path::new("./flatbuffers-helpers-for-rust/"),
/// ..Default::default()
/// };
/// ```
#[derive(Debug, Clone, Copy)]
pub struct Args<'a> {
/// Specify the programming language (`rust` is the default)
pub lang: &'a str,
/// List of `.fbs` files to compile [required to be non-empty]
pub inputs: &'a [&'a Path],
/// Output path for the generated helpers (`-o PATH` parameter) [required]
pub out_dir: &'a Path,
/// Search for includes in the specified paths (`-I PATH` parameter)
pub includes: &'a [&'a Path],
/// Set the flatc '--binary' flag
pub binary: bool,
/// Set the flatc '--schema' flag
pub schema: bool,
/// Set the flatc '--json' flag
pub json: bool,
/// Extra args to pass to flatc
pub extra: &'a [&'a str],
}
impl Default for Args<'_> {
fn default() -> Self {
Self {
lang: "rust",
out_dir: Path::new(""),
includes: &[],
inputs: &[],
binary: false,
schema: false,
json: false,
extra: &[],
}
}
}
/// Programmatic interface (API) for `flatc` command.
///
/// NOTE: You may only need a small helper function [`run`].
///
/// [`run`]: fn.run.html
pub struct Flatc {
exec: PathBuf,
}
impl Flatc {
/// New `flatc` command from `$PATH`
pub fn from_env_path() -> Flatc {
Flatc {
exec: PathBuf::from("flatc"),
}
}
/// New `flatc` command from specified path
pub fn from_path<P: std::convert::Into<PathBuf>>(path: P) -> Flatc {
Flatc { exec: path.into() }
}
/// Check `flatc` command found and valid
pub fn check(&self) -> Result<()> {
self.version().map(|_| ())
}
fn spawn(&self, cmd: &mut process::Command) -> io::Result<process::Child> {
info!("spawning command {:?}", cmd);
cmd.spawn()
.map_err(|e| Error::new(e.kind(), format!("failed to spawn `{:?}`: {}", cmd, e)))
}
/// Obtain `flatc` version
pub fn version(&self) -> Result<Version> {
let child = self.spawn(
process::Command::new(&self.exec)
.stdin(process::Stdio::null())
.stdout(process::Stdio::piped())
.stderr(process::Stdio::piped())
.args(["--version"]),
)?;
let output = child.wait_with_output()?;
if !output.status.success() {
return Err(err_other("flatc failed with error"));
}
let output = String::from_utf8(output.stdout).map_err(err_other)?;
let output = output
.lines()
.next()
.ok_or_else(|| err_other("output is empty"))?;
let prefix = "flatc version ";
if !output.starts_with(prefix) {
return Err(err_other("output does not start with prefix"));
}
let output = &output[prefix.len()..];
let first_char = output
.chars()
.next()
.ok_or_else(|| err_other("version is empty"))?;
if !first_char.is_ascii_digit() {
return Err(err_other("version does not start with digit"));
}
Ok(Version {
version: output.to_owned(),
})
}
/// Execute `flatc` command with given args, check it completed correctly.
fn run_with_args(&self, args: Vec<OsString>) -> Result<()> {
let mut cmd = process::Command::new(&self.exec);
cmd.stdin(process::Stdio::null());
cmd.args(args);
let mut child = self.spawn(&mut cmd)?;
if !child.wait()?.success() {
return Err(err_other(format!(
"flatc ({:?}) exited with non-zero exit code",
cmd
)));
}
Ok(())
}
/// Execute configured `flatc` with given args
pub fn run(&self, args: Args) -> Result<()> {
let mut cmd_args: Vec<OsString> = Vec::new();
if args.out_dir.as_os_str().is_empty() {
return Err(err_other("out_dir is empty"));
}
cmd_args.push({
let mut arg = OsString::with_capacity(args.lang.len() + 3);
arg.push("--");
arg.push(args.lang);
arg
});
if args.binary {
cmd_args.push("--binary".into());
}
if args.schema {
cmd_args.push("--schema".into());
}
if args.json {
cmd_args.push("--json".into());
}
for extra_arg in args.extra {
cmd_args.push(extra_arg.into());
}
if args.lang.is_empty() {
return Err(err_other("lang is empty"));
}
for include in args.includes.iter() {
cmd_args.push("-I".into());
cmd_args.push(include.into());
}
cmd_args.push("-o".into());
cmd_args.push(
args.out_dir
.to_str()
.ok_or_else(|| {
Error::new(
io::ErrorKind::Other,
"only UTF-8 convertable paths are supported",
)
})?
.into(),
);
if args.inputs.is_empty() {
return Err(err_other("input is empty"));
}
cmd_args.extend(args.inputs.iter().map(|input| input.into()));
self.run_with_args(cmd_args)
}
}
/// Execute `flatc` found in `$PATH` with given args
///
/// # Examples
///
/// Please, refer to [the root crate documentation](index.html#examples).
pub fn run(args: Args) -> Result<()> {
let flatc = Flatc::from_env_path();
// First check with have good `flatc`
flatc.check()?;
flatc.run(args)
}
/// FlatBuffers (flatc) version.
pub struct Version {
version: String,
}
impl Version {
/// Version getter
pub fn version(&self) -> &str {
&self.version
}
}
#[cfg(test)]
mod test {
use tempfile;
use super::*;
#[test]
fn version() {
Flatc::from_env_path().version().expect("version");
}
#[test]
fn run_can_produce_output() -> io::Result<()> {
let temp_dir = tempfile::Builder::new().prefix("flatc-rust").tempdir()?;
let input_path = temp_dir.path().join("test.fbs");
std::fs::write(&input_path, "table Test { text: string; } root_type Test;")
.expect("test input fbs file could not be written");
run(Args {
lang: "rust",
inputs: &[&input_path],
out_dir: temp_dir.path(),
..Default::default()
})
.expect("run");
let output_path = input_path.with_file_name("test_generated.rs");
assert!(output_path.exists());
assert_ne!(output_path.metadata().unwrap().len(), 0);
Ok(())
}
}
| rust | Apache-2.0 | 16934d819a9abb7c84378e44add06b558aaebb13 | 2026-01-04T20:24:00.919195Z | false |
frol/flatc-rust | https://github.com/frol/flatc-rust/blob/16934d819a9abb7c84378e44add06b558aaebb13/examples/tutorial/build.rs | examples/tutorial/build.rs | extern crate flatc_rust; // or just `use flatc_rust;` with Rust 2018 edition.
use std::path::Path;
fn main() {
println!("cargo:rerun-if-changed=flatbuffers/monster.fbs");
flatc_rust::run(flatc_rust::Args {
inputs: &[Path::new("flatbuffers/monster.fbs")],
out_dir: Path::new("target/flatbuffers/"),
..Default::default()
})
.expect("flatc");
}
| rust | Apache-2.0 | 16934d819a9abb7c84378e44add06b558aaebb13 | 2026-01-04T20:24:00.919195Z | false |
frol/flatc-rust | https://github.com/frol/flatc-rust/blob/16934d819a9abb7c84378e44add06b558aaebb13/examples/tutorial/src/main.rs | examples/tutorial/src/main.rs | /*
* Copyright 2018 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#[allow(dead_code, unused_imports)]
#[path = "../target/flatbuffers/monster_generated.rs"]
mod monster_flatbuffers;
use self::monster_flatbuffers::my_game::sample::{
root_as_monster, Color, Equipment, Monster, MonsterArgs, Vec3, Weapon, WeaponArgs,
};
fn main() {
// Build up a serialized buffer algorithmically.
// Initialize it with a capacity of 1024 bytes.
let mut builder = flatbuffers::FlatBufferBuilder::with_capacity(1024);
// Serialize some weapons for the Monster: A 'sword' and an 'axe'.
let weapon_one_name = builder.create_string("Sword");
let weapon_two_name = builder.create_string("Axe");
// Use the `Weapon::create` shortcut to create Weapons with named field
// arguments.
let sword = Weapon::create(
&mut builder,
&WeaponArgs {
name: Some(weapon_one_name),
damage: 3,
},
);
let axe = Weapon::create(
&mut builder,
&WeaponArgs {
name: Some(weapon_two_name),
damage: 5,
},
);
// Name of the Monster.
let name = builder.create_string("Orc");
// Inventory.
let inventory = builder.create_vector(&[0u8, 1, 2, 3, 4, 5, 6, 7, 8, 9]);
// Create a FlatBuffer `vector` that contains offsets to the sword and axe
// we created above.
let weapons = builder.create_vector(&[sword, axe]);
// Create the path vector of Vec3 objects:
//let x = Vec3::new(1.0, 2.0, 3.0);
//let y = Vec3::new(4.0, 5.0, 6.0);
//let path = builder.create_vector(&[x, y]);
// Note that, for convenience, it is also valid to create a vector of
// references to structs, like this:
// let path = builder.create_vector(&[&x, &y]);
// Create the monster using the `Monster::create` helper function. This
// function accepts a `MonsterArgs` struct, which supplies all of the data
// needed to build a `Monster`. To supply empty/default fields, just use the
// Rust built-in `Default::default()` function, as demononstrated below.
let orc = Monster::create(
&mut builder,
&MonsterArgs {
pos: Some(&Vec3::new(1.0f32, 2.0f32, 3.0f32)),
mana: 150,
hp: 80,
name: Some(name),
inventory: Some(inventory),
color: Color::Red,
weapons: Some(weapons),
equipped_type: Equipment::Weapon,
equipped: Some(axe.as_union_value()),
//path: Some(path),
..Default::default()
},
);
// Serialize the root of the object, without providing a file identifier.
builder.finish(orc, None);
// We now have a FlatBuffer we can store on disk or send over a network.
// ** file/network code goes here :) **
// Instead, we're going to access it right away (as if we just received it).
// This must be called after `finish()`.
let buf = builder.finished_data(); // Of type `&[u8]`
// Get access to the root:
let monster = root_as_monster(buf).unwrap();
// Get and test some scalar types from the FlatBuffer.
let hp = monster.hp();
let mana = monster.mana();
let name = monster.name();
assert_eq!(hp, 80);
assert_eq!(mana, 150); // default
assert_eq!(name, Some("Orc"));
// Get and test a field of the FlatBuffer's `struct`.
assert!(monster.pos().is_some());
let pos = monster.pos().unwrap();
let x = pos.x();
let y = pos.y();
let z = pos.z();
assert_eq!(x, 1.0f32);
assert_eq!(y, 2.0f32);
assert_eq!(z, 3.0f32);
// Get an element from the `inventory` FlatBuffer's `vector`.
assert!(monster.inventory().is_some());
let inv = monster.inventory().unwrap();
// Note that this vector is returned as a slice, because direct access for
// this type, a u8 vector, is safe on all platforms:
let third_item = inv.get(2);
assert_eq!(third_item, 2);
// Get and test the `weapons` FlatBuffers's `vector`.
assert!(monster.weapons().is_some());
let weps = monster.weapons().unwrap();
//let weps_len = weps.len();
let wep2 = weps.get(1);
let second_weapon_name = wep2.name();
let second_weapon_damage = wep2.damage();
assert_eq!(second_weapon_name, Some("Axe"));
assert_eq!(second_weapon_damage, 5);
// Get and test the `Equipment` union (`equipped` field).
assert_eq!(monster.equipped_type(), Equipment::Weapon);
let equipped = monster.equipped_as_weapon().unwrap();
let weapon_name = equipped.name();
let weapon_damage = equipped.damage();
assert_eq!(weapon_name, Some("Axe"));
assert_eq!(weapon_damage, 5);
// Get and test the `path` FlatBuffers's `vector`.
//assert_eq!(monster.path().unwrap().len(), 2);
//assert_eq!(monster.path().unwrap()[0].x(), 1.0);
//assert_eq!(monster.path().unwrap()[1].x(), 4.0);
println!("The FlatBuffer was successfully created and accessed!");
}
| rust | Apache-2.0 | 16934d819a9abb7c84378e44add06b558aaebb13 | 2026-01-04T20:24:00.919195Z | false |
sile/patricia_tree | https://github.com/sile/patricia_tree/blob/3191dba6c7fe30f502006e4c195a0d4c577a53e7/src/node.rs | src/node.rs | //! A node which represents a subtree of a patricia tree.
use crate::{BorrowedBytes, Bytes};
use alloc::alloc::{Layout, alloc, dealloc, handle_alloc_error};
use alloc::vec::Vec;
use core::marker::PhantomData;
use core::{mem, ptr, slice};
macro_rules! assert_some {
($expr:expr) => {
if let Some(value) = $expr {
value
} else {
panic!("`{}` must be `Some(..)`", stringify!($expr));
}
};
}
#[derive(Debug, Clone, Copy)]
pub(crate) struct Flags(u8);
impl Flags {
pub(crate) const VALUE_ALLOCATED: Flags = Flags(0b0000_0001);
pub(crate) const VALUE_INITIALIZED: Flags = Flags(0b0000_0010);
pub(crate) const CHILD_ALLOCATED: Flags = Flags(0b0000_0100);
pub(crate) const CHILD_INITIALIZED: Flags = Flags(0b0000_1000);
pub(crate) const SIBLING_ALLOCATED: Flags = Flags(0b0001_0000);
pub(crate) const SIBLING_INITIALIZED: Flags = Flags(0b0010_0000);
const VALID_BITS_MASK: u8 = 0b0011_1111; // Mask of all valid flag bits.
const fn empty() -> Self {
Flags(0)
}
pub(crate) const fn from_bits_truncate(bits: u8) -> Self {
Flags(bits & Self::VALID_BITS_MASK)
}
pub(crate) const fn bits(self) -> u8 {
self.0
}
pub(crate) const fn contains(self, other: Flags) -> bool {
(self.0 & other.0) == other.0
}
const fn intersects(self, other: Flags) -> bool {
(self.0 & other.0) != 0
}
fn insert(&mut self, other: Flags) {
self.0 |= other.0;
}
fn set(&mut self, other: Flags, value: bool) {
if value {
self.0 |= other.0;
} else {
self.0 &= !other.0;
}
}
}
impl core::ops::BitOr for Flags {
type Output = Self;
fn bitor(self, other: Self) -> Self {
Flags(self.0 | other.0)
}
}
const FLAGS_OFFSET: isize = 0;
const LABEL_LEN_OFFSET: isize = 1;
const LABEL_OFFSET: isize = 2;
const MAX_LABEL_LEN: usize = 255;
/// A node which represents a subtree of a patricia tree.
///
/// Note that this is a low level building block.
/// Usually it is recommended to use more high level data structures (e.g., `PatriciaTree`).
#[derive(Debug)]
pub struct Node<V> {
// layout:
// - flags: u8
// - label_len: u8
// - label: [u8; label_len]
// - value: Option<V>
// - child: Option<Node<V>>
// - sibling: Option<Node<V>>
ptr: *mut u8,
_value: PhantomData<V>,
}
unsafe impl<V: Send> Send for Node<V> {}
unsafe impl<V: Sync> Sync for Node<V> {}
impl<V> Node<V> {
/// Makes a new node which represents an empty tree.
pub fn root() -> Self {
Node::new(b"", None, None, None)
}
/// Makes a new node.
pub fn new(
mut label: &[u8],
mut value: Option<V>,
mut child: Option<Self>,
sibling: Option<Self>,
) -> Self {
if label.len() > MAX_LABEL_LEN {
child = Some(Node::new(&label[MAX_LABEL_LEN..], value, child, None));
label = &label[..MAX_LABEL_LEN];
value = None;
}
let mut flags = Flags::empty();
let mut layout = Self::initial_layout(label.len());
let value = value.map(|value| {
flags.insert(Flags::VALUE_ALLOCATED | Flags::VALUE_INITIALIZED);
let (new_layout, offset) = layout.extend(Layout::new::<V>()).expect("unreachable");
layout = new_layout;
(value, offset)
});
let child = child.map(|child| {
flags.insert(Flags::CHILD_ALLOCATED | Flags::CHILD_INITIALIZED);
let (new_layout, offset) = layout.extend(Layout::new::<Self>()).expect("unreachable");
layout = new_layout;
(child, offset)
});
let sibling = sibling.map(|sibling| {
flags.insert(Flags::SIBLING_ALLOCATED | Flags::SIBLING_INITIALIZED);
let (new_layout, offset) = layout.extend(Layout::new::<Self>()).expect("unreachable");
layout = new_layout;
(sibling, offset)
});
unsafe {
let ptr = alloc(layout.pad_to_align());
if ptr.is_null() {
handle_alloc_error(layout)
}
ptr::write(ptr.offset(FLAGS_OFFSET), flags.bits());
ptr::write(ptr.offset(LABEL_LEN_OFFSET), label.len() as u8);
ptr::copy_nonoverlapping(label.as_ptr(), ptr.offset(LABEL_OFFSET), label.len());
if let Some((value, offset)) = value {
ptr::write(ptr.add(offset) as _, value);
}
if let Some((child, offset)) = child {
ptr::write(ptr.add(offset) as _, child);
}
if let Some((sibling, offset)) = sibling {
ptr::write(ptr.add(offset) as _, sibling);
}
Node {
ptr,
_value: PhantomData,
}
}
}
#[cfg(feature = "serde")]
pub(crate) fn new_for_decoding(flags: Flags, label_len: u8) -> Self {
let mut init_flags = Flags::empty();
let mut layout = Self::initial_layout(label_len as usize);
if flags.contains(Flags::VALUE_INITIALIZED) {
init_flags.insert(Flags::VALUE_ALLOCATED);
layout = layout.extend(Layout::new::<V>()).expect("unreachable").0;
}
if flags.contains(Flags::CHILD_INITIALIZED) {
init_flags.insert(Flags::CHILD_ALLOCATED);
layout = layout.extend(Layout::new::<Self>()).expect("unreachable").0;
}
if flags.contains(Flags::SIBLING_INITIALIZED) {
init_flags.insert(Flags::SIBLING_ALLOCATED);
layout = layout.extend(Layout::new::<Self>()).expect("unreachable").0;
}
let ptr = unsafe { alloc(layout.pad_to_align()) };
assert_ne!(ptr, ptr::null_mut());
unsafe {
ptr::write(ptr.offset(FLAGS_OFFSET), init_flags.bits());
ptr::write(ptr.offset(LABEL_LEN_OFFSET), label_len);
}
Node {
ptr,
_value: PhantomData,
}
}
/// Returns the label of this node.
pub fn label(&self) -> &[u8] {
unsafe {
let label_len = *self.ptr.offset(LABEL_LEN_OFFSET) as usize;
slice::from_raw_parts(self.ptr.offset(LABEL_OFFSET), label_len)
}
}
#[cfg(feature = "serde")]
pub(crate) fn label_mut(&mut self) -> &mut [u8] {
unsafe {
let label_len = *self.ptr.offset(LABEL_LEN_OFFSET) as usize;
slice::from_raw_parts_mut(self.ptr.offset(LABEL_OFFSET), label_len)
}
}
/// Returns the reference to the value of this node.
pub fn value(&self) -> Option<&V> {
if let Some(offset) = self.value_offset() {
if self.flags().contains(Flags::VALUE_INITIALIZED) {
unsafe {
let value = self.ptr.offset(offset) as *const V;
return Some(&*value);
}
}
}
None
}
/// Returns the mutable reference to the value of this node.
pub fn value_mut(&mut self) -> Option<&mut V> {
if let Some(offset) = self.value_offset() {
if self.flags().contains(Flags::VALUE_INITIALIZED) {
unsafe {
let value = self.ptr.offset(offset) as *mut V;
return Some(&mut *value);
}
}
}
None
}
/// Returns the reference to the child of this node.
pub fn child(&self) -> Option<&Self> {
if let Some(offset) = self.child_offset() {
if self.flags().contains(Flags::CHILD_INITIALIZED) {
unsafe {
let child = self.ptr.offset(offset) as *const Self;
return Some(&*child);
}
}
}
None
}
/// Returns the mutable reference to the child of this node.
pub fn child_mut(&mut self) -> Option<&mut Self> {
if let Some(offset) = self.child_offset() {
if self.flags().contains(Flags::CHILD_INITIALIZED) {
unsafe {
let child = self.ptr.offset(offset) as *mut Self;
return Some(&mut *child);
}
}
}
None
}
/// Returns the reference to the sibling of this node.
pub fn sibling(&self) -> Option<&Self> {
if let Some(offset) = self.sibling_offset() {
if self.flags().contains(Flags::SIBLING_INITIALIZED) {
unsafe {
let sibling = self.ptr.offset(offset) as *const Self;
return Some(&*sibling);
}
}
}
None
}
/// Returns the mutable reference to the sibling of this node.
pub fn sibling_mut(&mut self) -> Option<&mut Self> {
if let Some(offset) = self.sibling_offset() {
if self.flags().contains(Flags::SIBLING_INITIALIZED) {
unsafe {
let sibling = self.ptr.offset(offset) as *mut Self;
return Some(&mut *sibling);
}
}
}
None
}
/// Returns mutable references to the node itself with its sibling and child
pub fn as_mut(&mut self) -> NodeMut<'_, V> {
let mut sibling_result = None;
let mut child_result = None;
let mut value_result = None;
if let Some(offset) = self.child_offset() {
if self.flags().contains(Flags::CHILD_INITIALIZED) {
unsafe {
let child = self.ptr.offset(offset) as *mut Self;
child_result.replace(&mut *child);
}
}
}
if let Some(offset) = self.sibling_offset() {
if self.flags().contains(Flags::SIBLING_INITIALIZED) {
unsafe {
let sibling = self.ptr.offset(offset) as *mut Self;
sibling_result.replace(&mut *sibling);
}
}
}
if let Some(offset) = self.value_offset() {
if self.flags().contains(Flags::VALUE_INITIALIZED) {
unsafe {
let value = self.ptr.offset(offset) as *mut V;
value_result.replace(&mut *value);
}
}
}
NodeMut {
label: self.label(),
sibling: sibling_result,
child: child_result,
value: value_result,
}
}
/// Takes the value out of this node.
pub fn take_value(&mut self) -> Option<V> {
if let Some(offset) = self.value_offset() {
if self.flags().contains(Flags::VALUE_INITIALIZED) {
self.set_flags(Flags::VALUE_INITIALIZED, false);
unsafe {
let value = self.ptr.offset(offset) as *const V;
return Some(ptr::read(value));
}
}
}
None
}
/// Takes the child out of this node.
pub fn take_child(&mut self) -> Option<Self> {
if let Some(offset) = self.child_offset() {
if self.flags().contains(Flags::CHILD_INITIALIZED) {
self.set_flags(Flags::CHILD_INITIALIZED, false);
unsafe {
let child = self.ptr.offset(offset) as *mut Self;
return Some(ptr::read(child));
}
}
}
None
}
/// Takes the sibling out of this node.
pub fn take_sibling(&mut self) -> Option<Self> {
if let Some(offset) = self.sibling_offset() {
if self.flags().contains(Flags::SIBLING_INITIALIZED) {
self.set_flags(Flags::SIBLING_INITIALIZED, false);
unsafe {
let sibling = self.ptr.offset(offset) as *mut Self;
return Some(ptr::read(sibling));
}
}
}
None
}
/// Sets the value of this node.
pub fn set_value(&mut self, value: V) {
self.take_value();
if let Some(offset) = self.value_offset() {
self.set_flags(Flags::VALUE_INITIALIZED, true);
unsafe { ptr::write(self.ptr.offset(offset) as _, value) };
} else {
let child = self.take_child();
let sibling = self.take_sibling();
let node = Node::new(self.label(), Some(value), child, sibling);
*self = node;
}
}
/// Sets the child of this node.
pub fn set_child(&mut self, child: Self) {
self.take_child();
if let Some(offset) = self.child_offset() {
self.set_flags(Flags::CHILD_INITIALIZED, true);
unsafe { ptr::write(self.ptr.offset(offset) as _, child) };
} else {
let value = self.take_value();
let sibling = self.take_sibling();
let node = Node::new(self.label(), value, Some(child), sibling);
*self = node;
}
}
/// Sets the sibling of this node.
pub fn set_sibling(&mut self, sibling: Self) {
self.take_sibling();
if let Some(offset) = self.sibling_offset() {
self.set_flags(Flags::SIBLING_INITIALIZED, true);
unsafe { ptr::write(self.ptr.offset(offset) as _, sibling) };
} else {
let value = self.take_value();
let child = self.take_child();
let node = Node::new(self.label(), value, child, Some(sibling));
*self = node;
}
}
/// Gets an iterator which traverses the nodes in this tree, in depth first order.
pub fn iter(&self) -> Iter<'_, V> {
Iter {
stack: vec![(0, self)],
}
}
/// Gets a mutable iterator which traverses the nodes in this tree, in depth first order.
pub fn iter_mut(&mut self) -> IterMut<'_, V> {
IterMut {
stack: vec![(0, self)],
}
}
pub(crate) fn iter_descendant(&self) -> Iter<'_, V> {
Iter {
stack: vec![(0, self)],
}
}
pub(crate) fn iter_descendant_mut(&mut self) -> IterMut<'_, V> {
IterMut {
stack: vec![(0, self)],
}
}
pub(crate) fn common_prefixes<'a, 'b, K>(
&'a self,
key: &'b K,
) -> CommonPrefixesIter<'a, 'b, K, V>
where
K: ?Sized + BorrowedBytes,
{
CommonPrefixesIter {
key,
stack: vec![(0, self)],
}
}
pub(crate) fn common_prefixes_owned<K: Bytes>(
&self,
key: K,
) -> CommonPrefixesIterOwned<'_, K, V> {
CommonPrefixesIterOwned {
key,
stack: vec![(0, self)],
}
}
pub(crate) fn get<K: ?Sized + BorrowedBytes>(&self, key: &K) -> Option<&V> {
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
if common_prefix_len == self.label().len() {
if next.is_empty() {
self.value()
} else {
self.child().and_then(|child| child.get(next))
}
} else if common_prefix_len == 0 && key.cmp_first_item(self.label()).is_ge() {
self.sibling().and_then(|sibling| sibling.get(next))
} else {
None
}
}
pub(crate) fn get_mut<K: ?Sized + BorrowedBytes>(&mut self, key: &K) -> Option<&mut V> {
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
if common_prefix_len == self.label().len() {
if next.is_empty() {
self.value_mut()
} else {
self.child_mut().and_then(|child| child.get_mut(next))
}
} else if common_prefix_len == 0 && key.cmp_first_item(self.label()).is_ge() {
self.sibling_mut().and_then(|sibling| sibling.get_mut(next))
} else {
None
}
}
pub(crate) fn longest_common_prefix_len<K: ?Sized + BorrowedBytes>(
&self,
key: &K,
offset: usize,
) -> usize {
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
let next_offset = offset + common_prefix_len;
if common_prefix_len == self.label().len() {
if next.is_empty() {
next_offset
} else {
self.child()
.map(|child| child.longest_common_prefix_len(next, next_offset))
.unwrap_or(next_offset)
}
} else if common_prefix_len == 0 && key.cmp_first_item(self.label()).is_ge() {
self.sibling()
.map(|sibling| sibling.longest_common_prefix_len(next, offset))
.unwrap_or(next_offset)
} else {
next_offset
}
}
pub(crate) fn get_longest_common_prefix<K: ?Sized + BorrowedBytes>(
&self,
key: &K,
offset: usize,
) -> Option<(usize, &V)> {
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
if common_prefix_len == self.label().len() {
let offset = offset + common_prefix_len;
if next.is_empty() {
self.value().map(|v| (offset, v))
} else {
self.child()
.and_then(|child| child.get_longest_common_prefix(next, offset))
.or_else(|| self.value().map(|v| (offset, v)))
}
} else if common_prefix_len == 0 && key.cmp_first_item(self.label()).is_ge() {
self.sibling()
.and_then(|sibling| sibling.get_longest_common_prefix(next, offset))
} else {
None
}
}
pub(crate) fn get_longest_common_prefix_mut<K: ?Sized + BorrowedBytes>(
&mut self,
key: &K,
offset: usize,
) -> Option<(usize, &mut V)> {
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
if common_prefix_len == self.label().len() {
let offset = offset + common_prefix_len;
if next.is_empty() {
self.value_mut().map(|v| (offset, v))
} else {
let this = self.as_mut();
this.child
.and_then(|child| child.get_longest_common_prefix_mut(next, offset))
.or_else(|| this.value.map(|v| (offset, v)))
}
} else if common_prefix_len == 0 && key.cmp_first_item(self.label()).is_ge() {
self.sibling_mut()
.and_then(|sibling| sibling.get_longest_common_prefix_mut(next, offset))
} else {
None
}
}
pub(crate) fn get_prefix_node<K: ?Sized + BorrowedBytes>(
&self,
key: &K,
) -> Option<(usize, &Self)> {
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
if next.is_empty() {
Some((common_prefix_len, self))
} else if common_prefix_len == self.label().len() {
self.child().and_then(|child| child.get_prefix_node(next))
} else if common_prefix_len == 0 && key.cmp_first_item(self.label()).is_ge() {
self.sibling()
.and_then(|sibling| sibling.get_prefix_node(next))
} else {
None
}
}
pub(crate) fn get_prefix_node_mut<K: ?Sized + BorrowedBytes>(
&mut self,
key: &K,
) -> Option<(usize, &mut Self)> {
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
if next.is_empty() {
Some((common_prefix_len, self))
} else if common_prefix_len == self.label().len() {
self.child_mut()
.and_then(|child| child.get_prefix_node_mut(next))
} else if common_prefix_len == 0 && key.cmp_first_item(self.label()).is_ge() {
self.sibling_mut()
.and_then(|sibling| sibling.get_prefix_node_mut(next))
} else {
None
}
}
pub(crate) fn split_by_prefix<K: ?Sized + BorrowedBytes>(
&mut self,
prefix: &K,
level: usize,
) -> Option<Self> {
let (next, common_prefix_len) = prefix.strip_common_prefix_and_len(self.label());
if common_prefix_len == prefix.as_bytes().len() {
let value = self.take_value();
let child = self.take_child();
let node = Node::new(&self.label()[common_prefix_len..], value, child, None);
if let Some(sibling) = self.take_sibling() {
*self = sibling;
}
Some(node)
} else if common_prefix_len == self.label().len() {
self.child_mut()
.and_then(|child| child.split_by_prefix(next, level + 1))
.inspect(|_old| {
self.try_reclaim_child();
self.try_merge_with_child(level);
})
} else if common_prefix_len == 0 && prefix.cmp_first_item(self.label()).is_ge() {
self.sibling_mut()
.and_then(|sibling| sibling.split_by_prefix(next, level))
.inspect(|_old| {
self.try_reclaim_sibling();
})
} else {
None
}
}
pub(crate) fn remove<K: ?Sized + BorrowedBytes>(&mut self, key: &K, level: usize) -> Option<V> {
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
if common_prefix_len == self.label().len() {
if next.is_empty() {
self.take_value().inspect(|_old| {
self.try_merge_with_child(level);
})
} else {
self.child_mut()
.and_then(|child| child.remove(next, level + 1))
.inspect(|_old| {
self.try_reclaim_child();
self.try_merge_with_child(level);
})
}
} else if common_prefix_len == 0 && key.cmp_first_item(self.label()).is_ge() {
self.sibling_mut()
.and_then(|sibling| sibling.remove(next, level))
.inspect(|_old| {
self.try_reclaim_sibling();
})
} else {
None
}
}
pub(crate) fn insert<K: ?Sized + BorrowedBytes>(&mut self, key: &K, value: V) -> Option<V> {
if key.cmp_first_item(self.label()).is_lt() {
let this = Node {
ptr: self.ptr,
_value: PhantomData,
};
let node = Node::new(key.as_bytes(), Some(value), None, Some(this));
self.ptr = node.ptr;
mem::forget(node);
return None;
}
let (next, common_prefix_len) = key.strip_common_prefix_and_len(self.label());
let is_label_matched = common_prefix_len == self.label().len();
if next.as_bytes().is_empty() {
if is_label_matched {
let old = self.take_value();
self.set_value(value);
old
} else {
self.split_at(common_prefix_len);
self.set_value(value);
None
}
} else if is_label_matched {
if let Some(child) = self.child_mut() {
return child.insert(next, value);
}
let child = Node::new(next.as_bytes(), Some(value), None, None);
self.set_child(child);
None
} else if common_prefix_len == 0 {
if let Some(sibling) = self.sibling_mut() {
return sibling.insert(next, value);
}
let sibling = Node::new(next.as_bytes(), Some(value), None, None);
self.set_sibling(sibling);
None
} else {
self.split_at(common_prefix_len);
assert_some!(self.child_mut()).insert(next, value);
None
}
}
pub(crate) fn flags(&self) -> Flags {
Flags::from_bits_truncate(unsafe { *self.ptr })
}
fn set_flags(&mut self, other: Flags, value: bool) {
let mut flags = self.flags();
flags.set(other, value);
unsafe { ptr::write(self.ptr, flags.bits()) };
}
fn label_len(&self) -> usize {
unsafe { *self.ptr.offset(LABEL_LEN_OFFSET) as usize }
}
fn value_offset(&self) -> Option<isize> {
let flags = self.flags();
if flags.contains(Flags::VALUE_ALLOCATED) {
let layout = Self::initial_layout(self.label_len());
let offset = layout.extend(Layout::new::<V>()).expect("unreachable").1;
Some(offset as isize)
} else {
None
}
}
fn child_offset(&self) -> Option<isize> {
let flags = self.flags();
if flags.contains(Flags::CHILD_ALLOCATED) {
let mut layout = Self::initial_layout(self.label_len());
if flags.contains(Flags::VALUE_ALLOCATED) {
layout = layout.extend(Layout::new::<V>()).expect("unreachable").0;
}
let offset = layout.extend(Layout::new::<Self>()).expect("unreachable").1;
Some(offset as isize)
} else {
None
}
}
fn sibling_offset(&self) -> Option<isize> {
let flags = self.flags();
if flags.contains(Flags::SIBLING_ALLOCATED) {
let mut layout = Self::initial_layout(self.label_len());
if flags.contains(Flags::VALUE_ALLOCATED) {
layout = layout.extend(Layout::new::<V>()).expect("unreachable").0;
}
if flags.contains(Flags::CHILD_ALLOCATED) {
layout = layout.extend(Layout::new::<Self>()).expect("unreachable").0;
}
let offset = layout.extend(Layout::new::<Self>()).expect("unreachable").1;
Some(offset as isize)
} else {
None
}
}
fn split_at(&mut self, position: usize) {
debug_assert!(position < self.label_len());
let value = self.take_value();
let child = self.take_child();
let sibling = self.take_sibling();
let child = Node::new(&self.label()[position..], value, child, None);
let parent = Node::new(&self.label()[..position], None, Some(child), sibling);
*self = parent;
}
fn try_reclaim_sibling(&mut self) {
let flags = assert_some!(self.sibling()).flags();
if flags.intersects(Flags::VALUE_INITIALIZED | Flags::CHILD_INITIALIZED) {
return;
}
if let Some(sibling) = self.take_sibling().and_then(|mut n| n.take_sibling()) {
self.set_sibling(sibling);
}
}
fn try_reclaim_child(&mut self) {
let flags = assert_some!(self.child()).flags();
if flags.intersects(Flags::VALUE_INITIALIZED | Flags::CHILD_INITIALIZED) {
return;
}
if let Some(child) = self.take_child().and_then(|mut n| n.take_sibling()) {
self.set_child(child);
}
}
pub(crate) fn try_merge_with_child(&mut self, level: usize) {
if level == 0 {
return;
}
if self.flags().contains(Flags::VALUE_INITIALIZED)
|| !self.flags().contains(Flags::CHILD_INITIALIZED)
{
return;
}
let flags = assert_some!(self.child()).flags();
if !flags.contains(Flags::SIBLING_INITIALIZED)
&& (self.label_len() + assert_some!(self.child()).label_len()) <= MAX_LABEL_LEN
{
let mut child = assert_some!(self.take_child());
let sibling = self.take_sibling();
let value = child.take_value();
let grandchild = child.take_child();
let mut label = Vec::with_capacity(self.label_len() + child.label_len());
label.extend(self.label());
label.extend(child.label());
let node = Self::new(&label, value, grandchild, sibling);
*self = node;
}
}
#[inline]
fn initial_layout(label_len: usize) -> Layout {
Layout::from_size_align(LABEL_OFFSET as usize + label_len, 1).expect("unreachable")
}
}
impl<V> Drop for Node<V> {
fn drop(&mut self) {
let _ = self.take_value();
let _ = self.take_child();
let _ = self.take_sibling();
let mut layout = Self::initial_layout(self.label_len());
if self.flags().contains(Flags::VALUE_ALLOCATED) {
layout = layout.extend(Layout::new::<V>()).expect("unreachable").0;
}
if self.flags().contains(Flags::CHILD_ALLOCATED) {
layout = layout.extend(Layout::new::<Self>()).expect("unreachable").0;
}
if self.flags().contains(Flags::SIBLING_ALLOCATED) {
layout = layout.extend(Layout::new::<Self>()).expect("unreachable").0;
}
unsafe { dealloc(self.ptr, layout.pad_to_align()) }
}
}
impl<V: Clone> Clone for Node<V> {
fn clone(&self) -> Self {
let label = self.label();
let value = self.value().cloned();
let child = self.child().cloned();
let sibling = self.sibling().cloned();
Node::new(label, value, child, sibling)
}
}
impl<V> IntoIterator for Node<V> {
type Item = (usize, Node<V>);
type IntoIter = IntoIter<V>;
fn into_iter(self) -> Self::IntoIter {
IntoIter {
stack: vec![(0, self)],
}
}
}
/// An iterator which traverses the nodes in a tree, in depth first order.
///
/// The first element of an item is the level of the traversing node.
#[derive(Debug)]
pub struct Iter<'a, V: 'a> {
stack: Vec<(usize, &'a Node<V>)>,
}
impl<'a, V: 'a> Iterator for Iter<'a, V> {
type Item = (usize, &'a Node<V>);
fn next(&mut self) -> Option<Self::Item> {
if let Some((level, node)) = self.stack.pop() {
if level != 0 {
if let Some(sibling) = node.sibling() {
self.stack.push((level, sibling));
}
}
if let Some(child) = node.child() {
self.stack.push((level + 1, child));
}
Some((level, node))
} else {
None
}
}
}
/// A mutable iterator which traverses the nodes in a tree, in depth first order.
///
/// The first element of an item is the level of the traversing node.
#[derive(Debug)]
pub struct IterMut<'a, V: 'a> {
stack: Vec<(usize, &'a mut Node<V>)>,
}
/// A reference to an immediate node (without child or sibling) with its
/// label and a mutable reference to its value, if present.
pub struct NodeMut<'a, V: 'a> {
label: &'a [u8],
value: Option<&'a mut V>,
sibling: Option<&'a mut Node<V>>,
child: Option<&'a mut Node<V>>,
}
impl<'a, V: 'a> NodeMut<'a, V> {
/// Returns the label of the node.
pub fn label(&self) -> &'a [u8] {
self.label
}
/// Converts into a mutable reference to the value.
pub fn into_value_mut(self) -> Option<&'a mut V> {
self.value
}
}
impl<'a, V: 'a> Iterator for IterMut<'a, V> {
type Item = (usize, NodeMut<'a, V>);
fn next(&mut self) -> Option<Self::Item> {
if let Some((level, node)) = self.stack.pop() {
let mut node = node.as_mut();
if level != 0 {
if let Some(sibling) = node.sibling.take() {
self.stack.push((level, sibling));
}
}
if let Some(child) = node.child.take() {
self.stack.push((level + 1, child));
}
Some((level, node))
} else {
None
}
}
}
/// An iterator over entries in that collects all values up to
/// until the key stops matching.
#[derive(Debug)]
pub(crate) struct CommonPrefixesIter<'a, 'b, K: ?Sized, V> {
key: &'b K,
stack: Vec<(usize, &'a Node<V>)>,
}
impl<'a, K, V> Iterator for CommonPrefixesIter<'a, '_, K, V>
where
K: ?Sized + BorrowedBytes,
{
type Item = (usize, &'a Node<V>);
fn next(&mut self) -> Option<Self::Item> {
while let Some((offset, node)) = self.stack.pop() {
let key = self.key.strip_n_prefix(offset);
let (_next, common_prefix_len) = key.strip_common_prefix_and_len(node.label());
if common_prefix_len == 0 && key.cmp_first_item(node.label()).is_ge() {
if let Some(sibling) = node.sibling() {
self.stack.push((offset, sibling));
}
}
if common_prefix_len == node.label().len() {
let prefix_len = offset + common_prefix_len;
if let Some(child) = node.child() {
| rust | MIT | 3191dba6c7fe30f502006e4c195a0d4c577a53e7 | 2026-01-04T20:24:05.347227Z | true |
sile/patricia_tree | https://github.com/sile/patricia_tree/blob/3191dba6c7fe30f502006e4c195a0d4c577a53e7/src/lib.rs | src/lib.rs | //! Memory-efficient data structures based on patricia tree (a.k.a, radix tree).
//!
//! A common prefixes of the keys in a patricia tree are represented by a shared path.
//! So if the prefixes of the key set is highly redundant,
//! the memory usage of the resulting patricia tree will be drastically less than
//! more generic data structures (e.g., `BTreeMap`).
//!
//! See [Radix tree](https://en.wikipedia.org/wiki/Radix_tree) for more details.
//!
//! # Examples
//!
//! ```
//! use patricia_tree::PatriciaMap;
//!
//! let mut map = PatriciaMap::new();
//! map.insert("foo", 1);
//! map.insert("bar", 2);
//! map.insert("baz", 3);
//! assert_eq!(map.len(), 3);
//!
//! assert_eq!(map.get("foo"), Some(&1));
//! assert_eq!(map.get("bar"), Some(&2));
//! assert_eq!(map.get("baz"), Some(&3));
//! ```
#![warn(missing_docs)]
#![expect(clippy::cast_ptr_alignment)]
#![cfg_attr(not(feature = "std"), no_std)]
#[macro_use]
extern crate alloc;
use alloc::borrow::ToOwned;
use alloc::string::String;
use alloc::vec::Vec;
use core::cmp::Ordering;
pub use map::{GenericPatriciaMap, PatriciaMap, StringPatriciaMap};
pub use set::{GenericPatriciaSet, PatriciaSet, StringPatriciaSet};
pub mod map;
pub mod set;
mod node;
#[cfg(feature = "serde")]
mod serialization;
mod tree;
/// This trait represents a bytes type that can be used as the key type of patricia trees.
pub trait Bytes {
/// Borrowed type of this type.
type Borrowed: ?Sized + BorrowedBytes + ToOwned<Owned = Self>;
}
impl Bytes for Vec<u8> {
type Borrowed = [u8];
}
impl Bytes for String {
type Borrowed = str;
}
/// Borrowed type of [`Bytes`].
pub trait BorrowedBytes {
/// Returns the byte representation of this instance.
fn as_bytes(&self) -> &[u8];
/// Returns `true` if the given bytes is a valid representation of this type, otherwise `false`.
fn is_valid_bytes(bytes: &[u8]) -> bool;
/// Converts the given bytes to an instance of this type.
///
/// Caller can assume that `is_valid_bytes(bytes)` is `true`.
fn from_bytes(bytes: &[u8]) -> &Self;
/// Returns a suffix of this instance not containing the common prefix with the given bytes.
fn strip_common_prefix(&self, bytes: &[u8]) -> &Self;
/// Same as [`strip_common_prefix()`], but also returns the length of the common prefix.
fn strip_common_prefix_and_len(&self, bytes: &[u8]) -> (&Self, usize) {
let next = self.strip_common_prefix(bytes);
let common_prefix_len = self.as_bytes().len() - next.as_bytes().len();
(next, common_prefix_len)
}
/// Compares the first item of this instance with the first item represented in the the given bytes.
fn cmp_first_item(&self, bytes: &[u8]) -> Ordering;
/// Returns `true` if this instance is empty, otherwise `false`.
fn is_empty(&self) -> bool {
self.as_bytes().is_empty()
}
/// Returns a suffix of this instance not containing the first `n` bytes.
fn strip_n_prefix(&self, n: usize) -> &Self;
}
impl BorrowedBytes for [u8] {
fn as_bytes(&self) -> &[u8] {
self
}
fn is_valid_bytes(_bytes: &[u8]) -> bool {
true
}
fn from_bytes(bytes: &[u8]) -> &Self {
bytes
}
fn strip_common_prefix(&self, bytes: &[u8]) -> &Self {
let i = self
.iter()
.zip(bytes.iter())
.take_while(|(a, b)| a == b)
.count();
&self[i..]
}
fn cmp_first_item(&self, bytes: &[u8]) -> Ordering {
self.first().cmp(&bytes.first())
}
fn strip_n_prefix(&self, n: usize) -> &Self {
&self[n..]
}
}
impl BorrowedBytes for str {
fn as_bytes(&self) -> &[u8] {
self.as_bytes()
}
fn is_valid_bytes(bytes: &[u8]) -> bool {
core::str::from_utf8(bytes).is_ok()
}
fn from_bytes(bytes: &[u8]) -> &Self {
core::str::from_utf8(bytes).expect("unreachable")
}
fn strip_common_prefix(&self, bytes: &[u8]) -> &Self {
for (i, c) in self.char_indices() {
let n = c.len_utf8();
if self.as_bytes()[i..i + n]
.iter()
.ne(bytes[i..].iter().take(n))
{
return &self[i..];
}
}
""
}
fn cmp_first_item(&self, bytes: &[u8]) -> Ordering {
self.chars()
.next()
.cmp(&Self::from_bytes(bytes).chars().next())
}
fn strip_n_prefix(&self, n: usize) -> &Self {
&self[n..]
}
}
| rust | MIT | 3191dba6c7fe30f502006e4c195a0d4c577a53e7 | 2026-01-04T20:24:05.347227Z | false |
sile/patricia_tree | https://github.com/sile/patricia_tree/blob/3191dba6c7fe30f502006e4c195a0d4c577a53e7/src/serialization.rs | src/serialization.rs | use crate::node::{Flags, Node};
use crate::{BorrowedBytes, GenericPatriciaMap, GenericPatriciaSet};
use alloc::borrow::{Cow, ToOwned};
use alloc::vec::Vec;
use core::borrow::Borrow;
use core::marker::PhantomData;
use serde::de::{Error, Visitor};
use serde::{Deserialize, Deserializer, Serialize, Serializer};
impl<T> Serialize for GenericPatriciaSet<T> {
/// In order to serialize a [PatriciaSet], make sure you installed the crate
/// with the feature `serde`.
///
/// For example, in your `Cargo.toml`:
/// ```toml
/// [dependencies]
/// patricia_tree = { version = "*", features = ["serde"] }
/// ```
///
/// Read more about serialization / deserialization at the [serde] crate.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.as_node().serialize(serializer)
}
}
impl<K, V: Serialize> Serialize for GenericPatriciaMap<K, V> {
/// In order to serialize a [PatriciaMap], make sure you installed the crate
/// with the feature `serde`.
///
/// For example, in your `Cargo.toml`:
/// ```toml
/// [dependencies]
/// patricia_tree = { version = "*", features = ["serde"] }
/// ```
///
/// Read more about serialization / deserialization at the [serde] crate.
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
self.as_node().serialize(serializer)
}
}
impl<T: Serialize> Serialize for Node<T> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
let mut tree_bytes = Vec::new();
let mut values = Vec::new();
let mut stack = vec![(0u16, self)];
while let Some((level, node)) = stack.pop() {
tree_bytes.push(node.flags().bits());
tree_bytes.push(node.label().len() as u8);
tree_bytes.push((level >> 8) as u8);
tree_bytes.push(level as u8);
tree_bytes.extend(node.label());
if let Some(value) = node.value() {
values.push(value);
}
if let Some(sibling) = node.sibling() {
stack.push((level, sibling));
}
if let Some(child) = node.child() {
stack.push((level + 1, child));
}
}
(Bytes(Cow::Owned(tree_bytes)), values).serialize(serializer)
}
}
impl<'de, T: crate::Bytes> Deserialize<'de> for GenericPatriciaSet<T> {
/// In order to deserialize a [PatriciaSet], make sure you installed the crate
/// with the feature `serde`.
///
/// For example, in your `Cargo.toml`:
/// ```toml
/// [dependencies]
/// patricia_tree = { version = "*", features = ["serde"] }
/// ```
///
/// Read more about serialization / deserialization at the [serde] crate.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
KeyAndNode::<T, ()>::deserialize(deserializer)
.map(|x| GenericPatriciaSet::from_node(x.node))
}
}
impl<'de, K: crate::Bytes, V: Deserialize<'de>> Deserialize<'de> for GenericPatriciaMap<K, V> {
/// In order to serialize a [PatriciaMap], make sure you installed the crate
/// with the feature `serde`.
///
/// For example, in your `Cargo.toml`:
/// ```toml
/// [dependencies]
/// patricia_tree = { version = "*", features = ["serde"] }
/// ```
///
/// Read more about serialization / deserialization at the [serde] crate.
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
KeyAndNode::<K, V>::deserialize(deserializer).map(|x| GenericPatriciaMap::from_node(x.node))
}
}
#[derive(Debug)]
struct KeyAndNode<K, V> {
node: Node<V>,
_key: PhantomData<K>,
}
impl<'de, K: crate::Bytes, V: Deserialize<'de>> Deserialize<'de> for KeyAndNode<K, V> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
let (tree_bytes, mut values): (Bytes<'de>, Vec<V>) =
Deserialize::deserialize(deserializer)?;
values.reverse();
let mut tree_bytes = tree_bytes.0.as_ref();
let mut stack = Vec::new();
while !tree_bytes.is_empty() {
if tree_bytes.len() < 4 {
return Err(D::Error::custom("unexpected EOS"));
}
let flags = Flags::from_bits_truncate(tree_bytes[0]);
let label_len = usize::from(tree_bytes[1]);
let level = (u16::from(tree_bytes[2]) << 8) | u16::from(tree_bytes[3]);
tree_bytes = &tree_bytes[4..];
if tree_bytes.len() < label_len {
return Err(D::Error::custom("unexpected EOS"));
}
let mut node = Node::<V>::new_for_decoding(flags, label_len as u8);
node.label_mut().copy_from_slice(&tree_bytes[..label_len]);
if !K::Borrowed::is_valid_bytes(node.label()) {
return Err(D::Error::custom(format!(
"malformed label bytes: {:?}",
node.label()
)));
}
tree_bytes = &tree_bytes[label_len..];
if flags.contains(Flags::VALUE_INITIALIZED) {
let value = values
.pop()
.ok_or_else(|| D::Error::custom("too few values"))?;
node.set_value(value);
}
stack.push((level, node));
while let Some((level, node)) = stack.pop() {
let flags = node.flags();
let has_child_or_sibling = (flags.contains(Flags::CHILD_ALLOCATED)
&& !flags.contains(Flags::CHILD_INITIALIZED))
|| (flags.contains(Flags::SIBLING_ALLOCATED)
&& !flags.contains(Flags::SIBLING_INITIALIZED));
if has_child_or_sibling {
stack.push((level, node));
break;
}
if let Some((last_level, last_node)) = stack.last_mut() {
if level == *last_level {
last_node.set_sibling(node);
} else if level == *last_level + 1 {
last_node.set_child(node);
} else {
return Err(D::Error::custom("invalid data"));
}
} else if level == 0 {
return Ok(KeyAndNode {
node,
_key: PhantomData,
});
} else {
return Err(D::Error::custom("invalid data"));
}
}
}
Err(D::Error::custom("invalid data"))
}
}
struct Bytes<'a>(Cow<'a, [u8]>);
impl Serialize for Bytes<'_> {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: Serializer,
{
serializer.serialize_bytes(self.0.borrow())
}
}
impl<'de> Deserialize<'de> for Bytes<'de> {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: Deserializer<'de>,
{
deserializer.deserialize_bytes(BytesVisitor)
}
}
struct BytesVisitor;
impl<'de> Visitor<'de> for BytesVisitor {
type Value = Bytes<'de>;
fn expecting(&self, formatter: &mut core::fmt::Formatter) -> core::fmt::Result {
write!(formatter, "a byte string")
}
fn visit_borrowed_bytes<E>(self, v: &'de [u8]) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Bytes(Cow::Borrowed(v)))
}
fn visit_bytes<E>(self, v: &[u8]) -> Result<Self::Value, E>
where
E: Error,
{
Ok(Bytes(Cow::Owned(v.to_owned())))
}
fn visit_seq<A>(self, mut seq: A) -> Result<Self::Value, A::Error>
where
A: serde::de::SeqAccess<'de>,
{
let mut bytes = Vec::new();
while let Some(byte) = seq.next_element()? {
bytes.push(byte);
}
Ok(Bytes(Cow::Owned(bytes)))
}
}
#[cfg(test)]
mod tests {
use crate::PatriciaMap;
#[test]
fn serde_works() {
let mut input = vec![
(Vec::from("foo"), 1u32),
("bar".into(), 2),
("baz".into(), 3),
];
input.sort();
let map: PatriciaMap<u32> = input.iter().cloned().collect();
let serialized = serde_json::to_vec(&map).unwrap();
let map: PatriciaMap<u32> = serde_json::from_slice(serialized.as_slice()).unwrap();
assert_eq!(map.len(), 3);
assert_eq!(map.into_iter().collect::<Vec<_>>(), input);
}
#[test]
fn large_serde_works() {
let mut input = (0..10000u32)
.map(|i| (i.to_string().into_bytes(), i))
.collect::<Vec<_>>();
input.sort();
let map: PatriciaMap<u32> = input.iter().cloned().collect();
let serialized = serde_json::to_vec(&map).unwrap();
let map: PatriciaMap<u32> = serde_json::from_slice(serialized.as_slice()).unwrap();
assert_eq!(map.len(), 10000);
assert_eq!(map.into_iter().collect::<Vec<_>>(), input);
}
}
| rust | MIT | 3191dba6c7fe30f502006e4c195a0d4c577a53e7 | 2026-01-04T20:24:05.347227Z | false |
sile/patricia_tree | https://github.com/sile/patricia_tree/blob/3191dba6c7fe30f502006e4c195a0d4c577a53e7/src/tree.rs | src/tree.rs | use alloc::vec::Vec;
use crate::{
BorrowedBytes, Bytes,
node::{self, Node, NodeMut},
};
#[derive(Debug, Clone)]
pub struct PatriciaTree<V> {
root: Node<V>,
len: usize,
}
impl<V> PatriciaTree<V> {
pub fn new() -> Self {
PatriciaTree {
root: Node::root(),
len: 0,
}
}
#[cfg(any(test, feature = "serde"))]
pub fn root(&self) -> &Node<V> {
&self.root
}
#[cfg(test)]
pub fn into_root(self) -> Node<V> {
self.root
}
pub fn insert<K: ?Sized + BorrowedBytes>(&mut self, key: &K, value: V) -> Option<V> {
if let Some(old) = self.root.insert(key, value) {
Some(old)
} else {
self.len += 1;
None
}
}
pub fn get<K: ?Sized + BorrowedBytes>(&self, key: &K) -> Option<&V> {
self.root.get(key)
}
pub fn get_mut<K: ?Sized + BorrowedBytes>(&mut self, key: &K) -> Option<&mut V> {
self.root.get_mut(key)
}
pub fn longest_common_prefix_len<K: ?Sized + BorrowedBytes>(&self, key: &K) -> usize {
self.root.longest_common_prefix_len(key, 0)
}
pub fn get_longest_common_prefix<'a, K: ?Sized + BorrowedBytes>(
&self,
key: &'a K,
) -> Option<(&'a [u8], &V)> {
self.root
.get_longest_common_prefix(key, 0)
.map(|(n, v)| (&key.as_bytes()[..n], v))
}
pub fn get_longest_common_prefix_mut<'a, K: ?Sized + BorrowedBytes>(
&mut self,
key: &'a K,
) -> Option<(&'a [u8], &mut V)> {
self.root
.get_longest_common_prefix_mut(key, 0)
.map(|(n, v)| (&key.as_bytes()[..n], v))
}
pub fn iter_prefix<K: ?Sized + BorrowedBytes>(
&self,
prefix: &K,
) -> Option<(usize, Nodes<'_, V>)> {
if let Some((common_prefix_len, node)) = self.root.get_prefix_node(prefix) {
let nodes = Nodes {
nodes: node.iter_descendant(),
label_lens: Vec::new(),
};
Some((prefix.as_bytes().len() - common_prefix_len, nodes))
} else {
None
}
}
pub fn iter_prefix_mut<K: ?Sized + BorrowedBytes>(
&mut self,
prefix: &K,
) -> Option<(usize, NodesMut<'_, V>)> {
if let Some((common_prefix_len, node)) = self.root.get_prefix_node_mut(prefix) {
let nodes = NodesMut {
nodes: node.iter_descendant_mut(),
label_lens: Vec::new(),
};
Some((prefix.as_bytes().len() - common_prefix_len, nodes))
} else {
None
}
}
pub(crate) fn common_prefixes<'a, 'b, K>(
&'a self,
key: &'b K,
) -> node::CommonPrefixesIter<'a, 'b, K, V>
where
K: ?Sized + BorrowedBytes,
{
self.root.common_prefixes(key)
}
pub(crate) fn common_prefixes_owned<K>(&self, key: K) -> node::CommonPrefixesIterOwned<'_, K, V>
where
K: Bytes + AsRef<K::Borrowed>,
{
self.root.common_prefixes_owned(key)
}
pub fn remove<K: ?Sized + BorrowedBytes>(&mut self, key: &K) -> Option<V> {
if let Some(old) = self.root.remove(key, 0) {
self.len -= 1;
Some(old)
} else {
None
}
}
pub fn split_by_prefix<K: ?Sized + BorrowedBytes>(&mut self, prefix: &K) -> Self {
if let Some(splitted_root) = self.root.split_by_prefix(prefix, 0) {
let mut splitted_root = Node::new(prefix.as_bytes(), None, Some(splitted_root), None);
splitted_root.try_merge_with_child(1);
let splitted = Self::from(Node::new(b"", None, Some(splitted_root), None));
self.len -= splitted.len();
splitted
} else {
Self::new()
}
}
pub fn clear(&mut self) {
self.root = Node::root();
self.len = 0;
}
pub fn len(&self) -> usize {
self.len
}
pub fn nodes(&self) -> Nodes<'_, V> {
Nodes {
nodes: self.root.iter(),
label_lens: Vec::new(),
}
}
pub fn nodes_mut(&mut self) -> NodesMut<'_, V> {
NodesMut {
nodes: self.root.iter_mut(),
label_lens: Vec::new(),
}
}
pub fn into_nodes(self) -> IntoNodes<V> {
IntoNodes {
nodes: self.root.into_iter(),
label_lens: Vec::new(),
}
}
}
impl<V> Default for PatriciaTree<V> {
fn default() -> Self {
Self::new()
}
}
impl<V> From<Node<V>> for PatriciaTree<V> {
fn from(f: Node<V>) -> Self {
let mut this = PatriciaTree { root: f, len: 0 };
let count = this.nodes().filter(|n| n.1.value().is_some()).count();
this.len = count;
this
}
}
#[derive(Debug)]
pub struct Nodes<'a, V: 'a> {
nodes: node::Iter<'a, V>,
label_lens: Vec<usize>,
}
impl<'a, V: 'a> Iterator for Nodes<'a, V> {
type Item = (usize, &'a Node<V>);
fn next(&mut self) -> Option<Self::Item> {
if let Some((level, node)) = self.nodes.next() {
self.label_lens.resize(level + 1, 0);
self.label_lens[level] = node.label().len();
let parent_key_len = self.label_lens.iter().take(level).sum();
Some((parent_key_len, node))
} else {
None
}
}
}
#[derive(Debug)]
pub struct NodesMut<'a, V: 'a> {
nodes: node::IterMut<'a, V>,
label_lens: Vec<usize>,
}
impl<'a, V: 'a> Iterator for NodesMut<'a, V> {
type Item = (usize, NodeMut<'a, V>);
fn next(&mut self) -> Option<Self::Item> {
if let Some((level, node)) = self.nodes.next() {
self.label_lens.resize(level + 1, 0);
self.label_lens[level] = node.label().len();
let parent_key_len = self.label_lens.iter().take(level).sum();
Some((parent_key_len, node))
} else {
None
}
}
}
#[derive(Debug)]
pub struct IntoNodes<V> {
nodes: node::IntoIter<V>,
label_lens: Vec<usize>,
}
impl<V> Iterator for IntoNodes<V> {
type Item = (usize, Node<V>);
fn next(&mut self) -> Option<Self::Item> {
if let Some((level, node)) = self.nodes.next() {
self.label_lens.resize(level + 1, 0);
self.label_lens[level] = node.label().len();
let parent_key_len = self.label_lens.iter().take(level).sum();
Some((parent_key_len, node))
} else {
None
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn it_works() {
let mut tree = PatriciaTree::new();
assert_eq!(tree.insert("".as_bytes(), 1), None);
assert_eq!(tree.insert("".as_bytes(), 2), Some(1));
assert_eq!(tree.insert("foo".as_bytes(), 3), None);
assert_eq!(tree.insert("foo".as_bytes(), 4), Some(3));
assert_eq!(tree.insert("foobar".as_bytes(), 5), None);
assert_eq!(tree.insert("bar".as_bytes(), 6), None);
assert_eq!(tree.insert("baz".as_bytes(), 7), None);
assert_eq!(tree.insert("bar".as_bytes(), 7), Some(6));
assert_eq!(tree.insert("baz".as_bytes(), 8), Some(7));
assert_eq!(tree.get("".as_bytes()), Some(&2));
assert_eq!(tree.get("foo".as_bytes()), Some(&4));
assert_eq!(tree.get("foobar".as_bytes()), Some(&5));
assert_eq!(tree.get("bar".as_bytes()), Some(&7));
assert_eq!(tree.get("baz".as_bytes()), Some(&8));
assert_eq!(tree.get("qux".as_bytes()), None);
let tree2 = tree.clone();
assert_eq!(tree2.get("".as_bytes()), Some(&2));
assert_eq!(tree2.get("foo".as_bytes()), Some(&4));
assert_eq!(tree2.get("foobar".as_bytes()), Some(&5));
assert_eq!(tree2.get("bar".as_bytes()), Some(&7));
assert_eq!(tree2.get("baz".as_bytes()), Some(&8));
assert_eq!(tree.remove("".as_bytes()), Some(2));
assert_eq!(tree.remove("foo".as_bytes()), Some(4));
assert_eq!(tree.remove("foobar".as_bytes()), Some(5));
assert_eq!(tree.remove("bar".as_bytes()), Some(7));
assert_eq!(tree.remove("baz".as_bytes()), Some(8));
assert_eq!(tree.remove("qux".as_bytes()), None);
assert_eq!(tree.get("".as_bytes()), None);
assert_eq!(tree.get("foo".as_bytes()), None);
assert_eq!(tree.get("foobar".as_bytes()), None);
assert_eq!(tree.get("bar".as_bytes()), None);
assert_eq!(tree.get("baz".as_bytes()), None);
assert_eq!(tree.get("qux".as_bytes()), None);
assert_eq!(tree2.get("".as_bytes()), Some(&2));
assert_eq!(tree2.get("foo".as_bytes()), Some(&4));
assert_eq!(tree2.get("foobar".as_bytes()), Some(&5));
assert_eq!(tree2.get("bar".as_bytes()), Some(&7));
assert_eq!(tree2.get("baz".as_bytes()), Some(&8));
}
}
| rust | MIT | 3191dba6c7fe30f502006e4c195a0d4c577a53e7 | 2026-01-04T20:24:05.347227Z | false |
sile/patricia_tree | https://github.com/sile/patricia_tree/blob/3191dba6c7fe30f502006e4c195a0d4c577a53e7/src/map.rs | src/map.rs | //! A map based on a patricia tree.
use crate::node;
#[cfg(any(test, feature = "serde"))]
use crate::node::Node;
use crate::tree::{self, PatriciaTree};
use crate::{BorrowedBytes, Bytes};
use alloc::borrow::ToOwned;
use alloc::string::String;
use alloc::vec::Vec;
use core::fmt;
use core::iter::FromIterator;
use core::marker::PhantomData;
/// Patricia tree based map with [`Vec<u8>`] as key.
pub type PatriciaMap<V> = GenericPatriciaMap<Vec<u8>, V>;
/// Patricia tree based map with [`String`] as key.
pub type StringPatriciaMap<V> = GenericPatriciaMap<String, V>;
/// Patricia tree based map.
pub struct GenericPatriciaMap<K, V> {
tree: PatriciaTree<V>,
_key: PhantomData<K>,
}
impl<K, V> GenericPatriciaMap<K, V> {
/// Makes a new empty `PatriciaMap` instance.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// assert!(map.is_empty());
///
/// map.insert("foo", 10);
/// assert_eq!(map.len(), 1);
/// assert_eq!(map.get("foo"), Some(&10));
///
/// map.remove("foo");
/// assert_eq!(map.get("foo"), None);
/// ```
pub fn new() -> Self {
GenericPatriciaMap {
tree: PatriciaTree::new(),
_key: PhantomData,
}
}
/// Clears this map, removing all values.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// map.clear();
/// assert!(map.is_empty());
/// ```
pub fn clear(&mut self) {
self.tree.clear();
}
/// Returns the number of elements in this map.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// map.insert("bar", 2);
/// assert_eq!(map.len(), 2);
/// ```
pub fn len(&self) -> usize {
self.tree.len()
}
/// Returns `true` if this map contains no elements.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// assert!(map.is_empty());
///
/// map.insert("foo", 1);
/// assert!(!map.is_empty());
///
/// map.clear();
/// assert!(map.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len() == 0
}
#[cfg(feature = "serde")]
pub(crate) fn from_node(node: Node<V>) -> Self {
Self {
tree: node.into(),
_key: PhantomData,
}
}
#[cfg(any(test, feature = "serde"))]
pub(crate) fn as_node(&self) -> &Node<V> {
self.tree.root()
}
#[cfg(test)]
pub(crate) fn into_node(self) -> Node<V> {
self.tree.into_root()
}
}
impl<K: Bytes, V> GenericPatriciaMap<K, V> {
/// Returns `true` if this map contains a value for the specified key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// assert!(map.contains_key("foo"));
/// assert!(!map.contains_key("bar"));
/// ```
pub fn contains_key<Q: AsRef<K::Borrowed>>(&self, key: Q) -> bool {
self.tree.get(key.as_ref()).is_some()
}
/// Returns a reference to the value corresponding to the key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// assert_eq!(map.get("foo"), Some(&1));
/// assert_eq!(map.get("bar"), None);
/// ```
pub fn get<Q: AsRef<K::Borrowed>>(&self, key: Q) -> Option<&V> {
self.tree.get(key.as_ref())
}
/// Returns a mutable reference to the value corresponding to the key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// map.get_mut("foo").map(|v| *v = 2);
/// assert_eq!(map.get("foo"), Some(&2));
/// ```
pub fn get_mut<Q: AsRef<K::Borrowed>>(&mut self, key: Q) -> Option<&mut V> {
self.tree.get_mut(key.as_ref())
}
/// Finds the longest common prefix of `key` and the keys in this map,
/// and returns a reference to the entry whose key matches the prefix.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// map.insert("foobar", 2);
/// assert_eq!(map.get_longest_common_prefix("fo"), None);
/// assert_eq!(map.get_longest_common_prefix("foo"), Some(("foo".as_bytes(), &1)));
/// assert_eq!(map.get_longest_common_prefix("fooba"), Some(("foo".as_bytes(), &1)));
/// assert_eq!(map.get_longest_common_prefix("foobar"), Some(("foobar".as_bytes(), &2)));
/// assert_eq!(map.get_longest_common_prefix("foobarbaz"), Some(("foobar".as_bytes(), &2)));
/// ```
pub fn get_longest_common_prefix<'a, Q>(&self, key: &'a Q) -> Option<(&'a K::Borrowed, &V)>
where
Q: ?Sized + AsRef<K::Borrowed>,
{
let (key, value) = self.tree.get_longest_common_prefix(key.as_ref())?;
Some((K::Borrowed::from_bytes(key), value))
}
/// Finds the longest common prefix of `key` and the keys in this map,
/// and returns a mutable reference to the entry whose key matches the prefix.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// map.insert("foobar", 2);
/// assert_eq!(map.get_longest_common_prefix_mut("fo"), None);
/// assert_eq!(map.get_longest_common_prefix_mut("foo"), Some(("foo".as_bytes(), &mut 1)));
/// *map.get_longest_common_prefix_mut("foo").unwrap().1 = 3;
/// assert_eq!(map.get_longest_common_prefix_mut("fooba"), Some(("foo".as_bytes(), &mut 3)));
/// assert_eq!(map.get_longest_common_prefix_mut("foobar"), Some(("foobar".as_bytes(), &mut 2)));
/// *map.get_longest_common_prefix_mut("foobar").unwrap().1 = 4;
/// assert_eq!(map.get_longest_common_prefix_mut("foobarbaz"), Some(("foobar".as_bytes(), &mut 4)));
/// ```
pub fn get_longest_common_prefix_mut<'a, Q>(
&mut self,
key: &'a Q,
) -> Option<(&'a K::Borrowed, &mut V)>
where
Q: ?Sized + AsRef<K::Borrowed>,
{
let (key, value) = self.tree.get_longest_common_prefix_mut(key.as_ref())?;
Some((K::Borrowed::from_bytes(key), value))
}
/// Returns the longest common prefix length of `key` and the keys in this map.
///
/// Unlike `get_longest_common_prefix()`, this method does not check if there is a key that matches the prefix in this map.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// map.insert("foobar", 2);
/// assert_eq!(map.longest_common_prefix_len("fo"), 2);
/// assert_eq!(map.longest_common_prefix_len("foo"), 3);
/// assert_eq!(map.longest_common_prefix_len("fooba"), 5);
/// assert_eq!(map.longest_common_prefix_len("foobar"), 6);
/// assert_eq!(map.longest_common_prefix_len("foobarbaz"), 6);
/// assert_eq!(map.longest_common_prefix_len("foba"), 2);
/// ```
pub fn longest_common_prefix_len<Q>(&self, key: &Q) -> usize
where
Q: ?Sized + AsRef<K::Borrowed>,
{
self.tree.longest_common_prefix_len(key.as_ref())
}
/// Inserts a key-value pair into this map.
///
/// If the map did not have this key present, `None` is returned.
/// If the map did have this key present, the value is updated, and the old value is returned.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// assert_eq!(map.insert("foo", 1), None);
/// assert_eq!(map.get("foo"), Some(&1));
/// assert_eq!(map.insert("foo", 2), Some(1));
/// assert_eq!(map.get("foo"), Some(&2));
/// ```
pub fn insert<Q: AsRef<K::Borrowed>>(&mut self, key: Q, value: V) -> Option<V> {
self.tree.insert(key.as_ref(), value)
}
/// Removes a key from this map, returning the value at the key if the key was previously in it.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map = PatriciaMap::new();
/// map.insert("foo", 1);
/// assert_eq!(map.remove("foo"), Some(1));
/// assert_eq!(map.remove("foo"), None);
/// ```
pub fn remove<Q: AsRef<K::Borrowed>>(&mut self, key: Q) -> Option<V> {
self.tree.remove(key.as_ref())
}
/// Returns an iterator that collects all entries in the map up to a certain key.
///
/// # Example
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut t = PatriciaMap::new();
/// t.insert("a", vec!["a"]);
/// t.insert("x", vec!["x"]);
/// t.insert("ab", vec!["b"]);
/// t.insert("abc", vec!["c"]);
/// t.insert("abcd", vec!["d"]);
/// t.insert("abcdf", vec!["f"]);
/// assert!(t
/// .common_prefixes(b"abcde")
/// .map(|(_, v)| v)
/// .flatten()
/// .eq(vec![&"a", &"b", &"c", &"d"].into_iter()));
/// ```
pub fn common_prefixes<'a, 'b, Q>(
&'a self,
key: &'b Q,
) -> CommonPrefixesIter<'a, 'b, K::Borrowed, V>
where
Q: ?Sized + AsRef<K::Borrowed>,
{
CommonPrefixesIter {
key_bytes: key.as_ref().as_bytes(),
iterator: self.tree.common_prefixes(key.as_ref()),
}
}
/// Returns an iterator that collects all values of entries in the map up to a certain key.
///
/// # Example
///
/// ```
/// use patricia_tree::PatriciaMap;
/// let mut t = PatriciaMap::new();
/// t.insert("a", vec!["a"]);
/// t.insert("x", vec!["x"]);
/// t.insert("ab", vec!["b"]);
/// t.insert("abc", vec!["c"]);
/// t.insert("abcd", vec!["d"]);
/// t.insert("abcdf", vec!["f"]);
/// assert!(t
/// .common_prefix_values(b"abcde")
/// .flatten()
/// .eq(vec![&"a", &"b", &"c", &"d"].into_iter()));
/// ```
pub fn common_prefix_values<'a, 'b, Q>(&'a self, key: &'b Q) -> impl Iterator<Item = &'a V>
where
Q: ?Sized + AsRef<K::Borrowed>,
<K as Bytes>::Borrowed: 'b,
{
self.tree
.common_prefixes(key.as_ref())
.filter_map(|(_, n)| n.value())
}
/// Returns an iterator that collects all values of entries in the map up to a certain key.
/// Takes owned key value so that iterator is not tied to key lifetime
///
/// # Example
///
/// ```
/// use patricia_tree::PatriciaMap;
/// let mut t = PatriciaMap::new();
/// t.insert("a", vec!["a"]);
/// t.insert("x", vec!["x"]);
/// t.insert("ab", vec!["b"]);
/// t.insert("abc", vec!["c"]);
/// t.insert("abcd", vec!["d"]);
/// t.insert("abcdf", vec!["f"]);
/// assert!(t
/// .common_prefix_values_owned(b"abcde".to_vec())
/// .flatten()
/// .eq(vec![&"a", &"b", &"c", &"d"].into_iter()));
/// ```
pub fn common_prefix_values_owned(&self, key: K) -> impl Iterator<Item = &V>
where
K: AsRef<K::Borrowed>,
{
self.tree
.common_prefixes_owned(key)
.filter_map(|(_, n)| n.value())
}
/// Splits the map into two at the given prefix.
///
/// The returned map contains all the entries of which keys are prefixed by `prefix`.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut a = PatriciaMap::new();
/// a.insert("rust", 1);
/// a.insert("ruby", 2);
/// a.insert("bash", 3);
/// a.insert("erlang", 4);
/// a.insert("elixir", 5);
///
/// let b = a.split_by_prefix("e");
/// assert_eq!(a.len(), 3);
/// assert_eq!(b.len(), 2);
///
/// assert_eq!(a.keys().collect::<Vec<_>>(), [b"bash", b"ruby", b"rust"]);
/// assert_eq!(b.keys().collect::<Vec<_>>(), [b"elixir", b"erlang"]);
/// ```
pub fn split_by_prefix<Q: AsRef<K::Borrowed>>(&mut self, prefix: Q) -> Self {
let subtree = self.tree.split_by_prefix(prefix.as_ref());
GenericPatriciaMap {
tree: subtree,
_key: PhantomData,
}
}
/// Gets an iterator over the entries of this map, sorted by key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let map: PatriciaMap<_> =
/// vec![("foo", 1), ("bar", 2), ("baz", 3)].into_iter().collect();
/// assert_eq!(vec![(Vec::from("bar"), &2), ("baz".into(), &3), ("foo".into(), &1)],
/// map.iter().collect::<Vec<_>>());
/// ```
pub fn iter(&self) -> Iter<'_, K, V> {
Iter::new(self.tree.nodes(), Vec::new())
}
/// Gets a mutable iterator over the entries of this map, soretd by key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map: PatriciaMap<_> =
/// vec![("foo", 1), ("bar", 2), ("baz", 3)].into_iter().collect();
/// for (_, v) in map.iter_mut() {
/// *v += 10;
/// }
/// assert_eq!(map.get("bar"), Some(&12));
/// ```
pub fn iter_mut(&mut self) -> IterMut<'_, K, V> {
IterMut::new(self.tree.nodes_mut(), Vec::new())
}
/// Gets an iterator over the keys of this map, in sorted order.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let map: PatriciaMap<_> =
/// vec![("foo", 1), ("bar", 2), ("baz", 3)].into_iter().collect();
/// assert_eq!(vec![Vec::from("bar"), "baz".into(), "foo".into()],
/// map.keys().collect::<Vec<_>>());
/// ```
pub fn keys(&self) -> Keys<'_, K, V> {
Keys(self.iter())
}
/// Gets an iterator over the values of this map, in order by key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let map: PatriciaMap<_> =
/// vec![("foo", 1), ("bar", 2), ("baz", 3)].into_iter().collect();
/// assert_eq!(vec![2, 3, 1],
/// map.values().cloned().collect::<Vec<_>>());
/// ```
pub fn values(&self) -> Values<'_, V> {
Values {
nodes: self.tree.nodes(),
}
}
/// Gets a mutable iterator over the values of this map, in order by key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map: PatriciaMap<_> =
/// vec![("foo", 1), ("bar", 2), ("baz", 3)].into_iter().collect();
/// for v in map.values_mut() {
/// *v += 10;
/// }
/// assert_eq!(vec![12, 13, 11],
/// map.values().cloned().collect::<Vec<_>>());
/// ```
pub fn values_mut(&mut self) -> ValuesMut<'_, V> {
ValuesMut {
nodes: self.tree.nodes_mut(),
}
}
}
impl<K: Bytes, V> GenericPatriciaMap<K, V> {
/// Gets an iterator over the entries having the given prefix of this map, sorted by key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let map: PatriciaMap<_> =
/// vec![("foo", 1), ("bar", 2), ("baz", 3)].into_iter().collect();
/// assert_eq!(vec![(Vec::from("bar"), &2), ("baz".into(), &3)],
/// map.iter_prefix(b"ba").collect::<Vec<_>>());
/// ```
pub fn iter_prefix<'a>(&'a self, prefix: &K::Borrowed) -> impl Iterator<Item = (K, &'a V)> {
self.tree
.iter_prefix(prefix)
.into_iter()
.flat_map(move |(prefix_len, nodes)| {
Iter::<K, V>::new(nodes, Vec::from(&prefix.as_bytes()[..prefix_len]))
})
}
/// Gets a mutable iterator over the entries having the given prefix of this map, sorted by key.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaMap;
///
/// let mut map: PatriciaMap<_> =
/// vec![("foo", 1), ("bar", 2), ("baz", 3)].into_iter().collect();
/// assert_eq!(vec![(Vec::from("bar"), &mut 2), ("baz".into(), &mut 3)],
/// map.iter_prefix_mut(b"ba").collect::<Vec<_>>());
/// ```
pub fn iter_prefix_mut<'a>(
&'a mut self,
prefix: &K::Borrowed,
) -> impl Iterator<Item = (K, &'a mut V)> {
self.tree
.iter_prefix_mut(prefix)
.into_iter()
.flat_map(move |(prefix_len, nodes)| {
IterMut::<K, V>::new(nodes, Vec::from(&prefix.as_bytes()[..prefix_len]))
})
}
}
impl<K: Bytes + fmt::Debug, V: fmt::Debug> fmt::Debug for GenericPatriciaMap<K, V> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_map().entries(self.iter()).finish()
}
}
impl<K, V: Clone> Clone for GenericPatriciaMap<K, V> {
fn clone(&self) -> Self {
Self {
tree: self.tree.clone(),
_key: PhantomData,
}
}
}
impl<K, V> Default for GenericPatriciaMap<K, V> {
fn default() -> Self {
Self::new()
}
}
impl<K: Bytes, V> IntoIterator for GenericPatriciaMap<K, V> {
type Item = (K, V);
type IntoIter = IntoIter<K, V>;
fn into_iter(self) -> Self::IntoIter {
IntoIter {
nodes: self.tree.into_nodes(),
key_bytes: Vec::new(),
_key: PhantomData,
}
}
}
impl<K, Q, V> FromIterator<(Q, V)> for GenericPatriciaMap<K, V>
where
K: Bytes,
Q: AsRef<K::Borrowed>,
{
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = (Q, V)>,
{
let mut map = GenericPatriciaMap::new();
for (k, v) in iter {
map.insert(k, v);
}
map
}
}
impl<K, Q, V> Extend<(Q, V)> for GenericPatriciaMap<K, V>
where
K: Bytes,
Q: AsRef<K::Borrowed>,
{
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = (Q, V)>,
{
for (k, v) in iter {
self.insert(k, v);
}
}
}
/// An iterator over a `PatriciaMap`'s entries.
#[derive(Debug)]
pub struct Iter<'a, K, V: 'a> {
nodes: tree::Nodes<'a, V>,
key_bytes: Vec<u8>,
key_offset: usize,
_key: PhantomData<K>,
}
impl<'a, K, V: 'a> Iter<'a, K, V> {
fn new(nodes: tree::Nodes<'a, V>, key: Vec<u8>) -> Self {
let key_offset = key.len();
Self {
nodes,
key_bytes: key,
key_offset,
_key: PhantomData,
}
}
}
impl<'a, K: Bytes, V: 'a> Iterator for Iter<'a, K, V> {
type Item = (K, &'a V);
fn next(&mut self) -> Option<Self::Item> {
for (key_len, node) in &mut self.nodes {
self.key_bytes.truncate(self.key_offset + key_len);
self.key_bytes.extend(node.label());
if let Some(value) = node.value() {
return Some((K::Borrowed::from_bytes(&self.key_bytes).to_owned(), value));
}
}
None
}
}
/// An owning iterator over a `PatriciaMap`'s entries.
#[derive(Debug)]
pub struct IntoIter<K, V> {
nodes: tree::IntoNodes<V>,
key_bytes: Vec<u8>,
_key: PhantomData<K>,
}
impl<K: Bytes, V> Iterator for IntoIter<K, V> {
type Item = (K, V);
fn next(&mut self) -> Option<Self::Item> {
for (key_len, mut node) in &mut self.nodes {
self.key_bytes.truncate(key_len);
self.key_bytes.extend(node.label());
if let Some(value) = node.take_value() {
return Some((K::Borrowed::from_bytes(&self.key_bytes).to_owned(), value));
}
}
None
}
}
/// A mutable iterator over a `PatriciaMap`'s entries.
#[derive(Debug)]
pub struct IterMut<'a, K, V: 'a> {
nodes: tree::NodesMut<'a, V>,
key_bytes: Vec<u8>,
key_offset: usize,
_key: PhantomData<K>,
}
impl<'a, K, V: 'a> IterMut<'a, K, V> {
fn new(nodes: tree::NodesMut<'a, V>, key: Vec<u8>) -> Self {
let key_offset = key.len();
Self {
nodes,
key_bytes: key,
key_offset,
_key: PhantomData,
}
}
}
impl<'a, K: Bytes, V: 'a> Iterator for IterMut<'a, K, V> {
type Item = (K, &'a mut V);
fn next(&mut self) -> Option<Self::Item> {
for (key_len, node) in &mut self.nodes {
self.key_bytes.truncate(self.key_offset + key_len);
self.key_bytes.extend(node.label());
if let Some(value) = node.into_value_mut() {
return Some((K::Borrowed::from_bytes(&self.key_bytes).to_owned(), value));
}
}
None
}
}
/// An iterator over a `PatriciaMap`'s keys.
#[derive(Debug)]
pub struct Keys<'a, K, V: 'a>(Iter<'a, K, V>);
impl<'a, K: Bytes, V: 'a> Iterator for Keys<'a, K, V> {
type Item = K;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|(k, _)| k)
}
}
/// An iterator over a `PatriciaMap`'s values.
#[derive(Debug)]
pub struct Values<'a, V: 'a> {
nodes: tree::Nodes<'a, V>,
}
impl<'a, V: 'a> Iterator for Values<'a, V> {
type Item = &'a V;
fn next(&mut self) -> Option<Self::Item> {
for (_, node) in &mut self.nodes {
if let Some(value) = node.value() {
return Some(value);
}
}
None
}
}
/// A mutable iterator over a `PatriciaMap`'s values.
#[derive(Debug)]
pub struct ValuesMut<'a, V: 'a> {
nodes: tree::NodesMut<'a, V>,
}
impl<'a, V: 'a> Iterator for ValuesMut<'a, V> {
type Item = &'a mut V;
fn next(&mut self) -> Option<Self::Item> {
for (_, node) in &mut self.nodes {
if let Some(value) = node.into_value_mut() {
return Some(value);
}
}
None
}
}
/// An iterator over entries in a `PatriciaMap` that share a common prefix with
/// a given key.
#[derive(Debug)]
pub struct CommonPrefixesIter<'a, 'b, K: ?Sized, V> {
key_bytes: &'b [u8],
iterator: node::CommonPrefixesIter<'a, 'b, K, V>,
}
impl<'a, 'b, K, V> Iterator for CommonPrefixesIter<'a, 'b, K, V>
where
K: 'b + ?Sized + BorrowedBytes,
{
type Item = (&'b K, &'a V);
fn next(&mut self) -> Option<Self::Item> {
for (prefix_len, n) in self.iterator.by_ref() {
if let Some(v) = n.value() {
return Some((K::from_bytes(&self.key_bytes[..prefix_len]), v));
}
}
None
}
}
#[cfg(test)]
mod tests {
use super::*;
use rand::seq::SliceRandom;
#[test]
fn it_works() {
let input = [
("7", 7),
("43", 43),
("92", 92),
("37", 37),
("31", 31),
("21", 21),
("0", 0),
("35", 35),
("47", 47),
("82", 82),
("61", 61),
("9", 9),
];
let mut map = PatriciaMap::new();
for &(ref k, v) in input.iter() {
assert_eq!(map.insert(k, v), None);
assert_eq!(map.get(k), Some(&v));
}
}
#[test]
fn debug_works() {
let map: PatriciaMap<_> = vec![("foo", 1), ("bar", 2), ("baz", 3)]
.into_iter()
.collect();
assert_eq!(
format!("{map:?}"),
"{[98, 97, 114]: 2, [98, 97, 122]: 3, [102, 111, 111]: 1}"
);
}
#[test]
fn clear_works() {
let mut map = PatriciaMap::new();
assert!(map.is_empty());
map.insert("foo", 1);
assert!(!map.is_empty());
map.clear();
assert!(map.is_empty());
}
#[test]
fn into_iter_works() {
let map: PatriciaMap<_> = vec![("foo", 1), ("bar", 2), ("baz", 3)]
.into_iter()
.collect();
assert_eq!(
map.into_iter().collect::<Vec<_>>(),
[(Vec::from("bar"), 2), ("baz".into(), 3), ("foo".into(), 1)]
);
}
#[test]
fn iter_mut_works() {
let mut map: PatriciaMap<_> = vec![("foo", 1), ("bar", 2), ("baz", 3)]
.into_iter()
.collect();
for (_key, x) in map.iter_mut() {
(*x) *= 2;
}
assert_eq!(
map.into_iter().collect::<Vec<_>>(),
[(Vec::from("bar"), 4), ("baz".into(), 6), ("foo".into(), 2)]
);
}
#[test]
#[cfg_attr(miri, ignore)]
fn large_map_works() {
let mut input = (0..10000).map(|i| (i.to_string(), i)).collect::<Vec<_>>();
input.shuffle(&mut rand::rng());
// Insert
let mut map = input.iter().cloned().collect::<PatriciaMap<_>>();
assert_eq!(map.len(), input.len());
// Get
for &(ref k, v) in input.iter() {
assert_eq!(map.get(k), Some(&v));
}
// Remove
for &(ref k, v) in input.iter().take(input.len() / 2) {
assert_eq!(map.remove(k), Some(v));
assert_eq!(map.remove(k), None);
}
for (k, _) in input.iter().take(input.len() / 2) {
assert_eq!(map.get(k), None);
}
for &(ref k, v) in input.iter().skip(input.len() / 2) {
assert_eq!(map.get(k), Some(&v));
}
// Insert
for &(ref k, v) in input.iter().take(input.len() / 2) {
assert_eq!(map.insert(k, v), None);
}
for &(ref k, v) in input.iter().skip(input.len() / 2) {
assert_eq!(map.insert(k, v), Some(v));
}
// Get
for &(ref k, v) in input.iter() {
assert_eq!(map.get(k), Some(&v));
}
}
#[test]
fn test_common_word_prefixes() {
let mut t = PatriciaMap::new();
t.insert(".com.foo.", vec!["b"]);
t.insert(".", vec!["a"]);
t.insert(".com.foo.bar.", vec!["c"]);
t.insert("..", vec!["e"]);
t.insert("x", vec!["d"]);
let results = t
.common_prefixes(b".com.foo.bar.baz.")
.flat_map(|(_, v)| v)
.cloned()
.collect::<Vec<_>>();
assert!(results.iter().eq(vec![&"a", &"b", &"c"].into_iter()));
}
#[test]
fn test_letter_prefixes() {
let mut t = PatriciaMap::new();
t.insert("x", vec!["x"]);
t.insert("a", vec!["a"]);
t.insert("ab", vec!["b"]);
t.insert("abc", vec!["c"]);
t.insert("abcd", vec!["d"]);
t.insert("abcdf", vec!["f"]);
let results = t
.common_prefixes(b"abcde")
.flat_map(|(_, v)| v)
.cloned()
.collect::<Vec<_>>();
assert!(results.iter().eq(vec![&"a", &"b", &"c", &"d"].into_iter()));
}
#[test]
fn test_common_prefixes() {
let mut t = PatriciaMap::new();
t.insert("b", vec!["b"]);
t.insert("a", vec!["a"]);
t.insert("c", vec!["c"]);
t.insert("..", vec!["e"]);
t.insert("x", vec!["d"]);
let results = t
.common_prefixes(b"abc")
.flat_map(|(k, v)| {
unsafe {
println!("{:?}", std::str::from_utf8_unchecked(k));
}
v
})
.cloned()
.collect::<Vec<_>>();
dbg!(&results);
assert!(results.iter().eq(vec![&"a"].into_iter()));
let mut t = PatriciaMap::new();
t.insert("ab", vec!["b"]);
t.insert("a", vec!["a"]);
t.insert("abc", vec!["c"]);
t.insert("..", vec!["e"]);
t.insert("x", vec!["d"]);
let results = t
.common_prefixes(b"abcd")
.flat_map(|(_, v)| v)
.cloned()
.collect::<Vec<_>>();
assert!(results.iter().eq(vec![&"a", &"b", &"c"].into_iter()));
let mut list = PatriciaMap::new();
list.insert(b".com.foocatnetworks.".as_ref(), vec![0_u16]);
list.insert(b".com.foocatnetworks.foo.".as_ref(), vec![1]);
list.insert(b".com.foocatnetworks.foo.baz.".as_ref(), vec![2]);
list.insert(b".com.google.".as_ref(), vec![0]);
list.insert(b".com.cisco.".as_ref(), vec![0]);
list.insert(b".org.wikipedia.".as_ref(), vec![0]);
let results = list
.common_prefixes(b".com.foocatnetworks.foo.baz.")
.flat_map(|(_, v)| v)
.cloned()
.collect::<Vec<_>>();
assert!(vec![0_u16, 1, 2].into_iter().eq(results.into_iter()));
}
#[test]
fn string_patricia_map_works() {
// Insert as bytes.
let mut t = PatriciaMap::new();
t.insert("🌏🗻", ()); // [240,159,140,143,240,159,151,187]
t.insert("🌏🍔", ()); // [240,159,140,143,240,159,141,148]
let first_label = t.as_node().child().unwrap().label();
assert!(std::str::from_utf8(first_label).is_err());
assert_eq!(first_label, [240, 159, 140, 143, 240, 159]);
// Insert as string.
let mut t = StringPatriciaMap::new();
t.insert("🌏🗻", ());
t.insert("🌏🍔", ());
let first_label = t.as_node().child().unwrap().label();
assert_eq!(std::str::from_utf8(first_label).ok(), Some("🌏"));
}
#[test]
fn issue21() {
let mut map = PatriciaMap::new();
map.insert("1", 0);
map.insert("2", 0);
map.remove("2");
map.insert("2", 0);
assert_eq!(map.len(), map.iter().count());
assert_eq!(map.len(), map.iter_mut().count());
}
#[test]
fn issue35() {
let mut map = StringPatriciaMap::<u8>::new();
map.insert("インターポール", 1);
map.insert("インターポル", 2);
map.insert("インターリーブ", 3);
map.insert("インターン", 4);
assert_eq!(map.get("インターポール"), Some(&1));
assert_eq!(map.get("インターポル"), Some(&2));
}
#[test]
fn issue42_iter_prefix() {
let mut map = StringPatriciaMap::new();
map.insert("a0/b0", 0);
map.insert("a1/b1", 0);
let items: Vec<_> = {
let prefix = "a0".to_owned();
map.iter_prefix(&prefix).collect()
};
assert_eq!(items, vec![("a0/b0".to_owned(), &0)])
}
#[test]
fn issue42_iter_prefix_mut() {
let mut map = StringPatriciaMap::new();
map.insert("a0/b0", 0);
map.insert("a1/b1", 0);
let items: Vec<_> = {
let prefix = "a0".to_owned();
map.iter_prefix_mut(&prefix).collect()
};
assert_eq!(items, vec![("a0/b0".to_owned(), &mut 0)])
}
#[test]
fn issue42_common_prefix_values() {
let mut map = StringPatriciaMap::new();
map.insert("a0/b0", 0);
map.insert("a1/b1", 0);
let items: Vec<_> = {
let prefix = "a0/b0/c0".to_owned();
map.common_prefix_values(&prefix).collect()
};
assert_eq!(items, vec![&0])
}
#[test]
fn test_owned_impl_iter() {
struct TestTrie<T> {
map: GenericPatriciaMap<Vec<u8>, T>,
}
impl<T> TestTrie<T> {
#[expect(dead_code)]
fn common_prefix_test(&self, domain: &[u8]) -> impl Iterator<Item = &T> {
let domain = domain.to_vec();
self.map.common_prefix_values_owned(domain)
}
}
}
}
| rust | MIT | 3191dba6c7fe30f502006e4c195a0d4c577a53e7 | 2026-01-04T20:24:05.347227Z | false |
sile/patricia_tree | https://github.com/sile/patricia_tree/blob/3191dba6c7fe30f502006e4c195a0d4c577a53e7/src/set.rs | src/set.rs | //! A set based on a patricia tree.
use crate::Bytes;
use crate::map::{self, GenericPatriciaMap};
#[cfg(any(feature = "serde", test))]
use crate::node::Node;
use alloc::string::String;
use alloc::vec::Vec;
use core::fmt;
use core::iter::FromIterator;
/// Patricia tree based set with [`Vec<u8>`] as key.
pub type PatriciaSet = GenericPatriciaSet<Vec<u8>>;
/// Patricia tree based set with [`String`] as key.
pub type StringPatriciaSet = GenericPatriciaSet<String>;
/// Patricia tree based set.
pub struct GenericPatriciaSet<T> {
map: GenericPatriciaMap<T, ()>,
}
impl<T> GenericPatriciaSet<T> {
/// Makes a new empty [`GenericPatriciaSet`] instance.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let set = PatriciaSet::new();
/// assert!(set.is_empty());
/// ```
pub fn new() -> Self {
GenericPatriciaSet {
map: GenericPatriciaMap::new(),
}
}
/// Returns the number of elements in this set.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// set.insert("foo");
/// set.insert("bar");
/// assert_eq!(set.len(), 2);
/// ```
pub fn len(&self) -> usize {
self.map.len()
}
/// Returns true if this set contains no elements.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// assert!(set.is_empty());
///
/// set.insert("foo");
/// assert!(!set.is_empty());
///
/// set.clear();
/// assert!(set.is_empty());
/// ```
pub fn is_empty(&self) -> bool {
self.len() == 0
}
/// Clears this set, removing all values.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// set.insert("foo");
/// set.clear();
/// assert!(set.is_empty());
/// ```
pub fn clear(&mut self) {
self.map.clear();
}
#[cfg(feature = "serde")]
pub(crate) fn from_node(node: Node<()>) -> Self {
Self {
map: GenericPatriciaMap::from_node(node),
}
}
#[cfg(any(test, feature = "serde"))]
pub(crate) fn as_node(&self) -> &Node<()> {
self.map.as_node()
}
#[cfg(test)]
pub(crate) fn into_node(self) -> Node<()> {
self.map.into_node()
}
}
impl<T: Bytes> GenericPatriciaSet<T> {
/// Returns `true` if this set contains a value.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// set.insert("foo");
/// assert!(set.contains("foo"));
/// assert!(!set.contains("bar"));
/// ```
pub fn contains<U: AsRef<T::Borrowed>>(&self, value: U) -> bool {
self.map.get(value).is_some()
}
/// Finds the longest common prefix of `value` and the elements in this set.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
///
/// set.insert("foo");
/// set.insert("foobar");
/// assert_eq!(set.get_longest_common_prefix("fo"), None);
/// assert_eq!(set.get_longest_common_prefix("foo"), Some("foo".as_bytes()));
/// assert_eq!(set.get_longest_common_prefix("fooba"), Some("foo".as_bytes()));
/// assert_eq!(set.get_longest_common_prefix("foobar"), Some("foobar".as_bytes()));
/// assert_eq!(set.get_longest_common_prefix("foobarbaz"), Some("foobar".as_bytes()));
/// ```
pub fn get_longest_common_prefix<'a, U>(&self, value: &'a U) -> Option<&'a T::Borrowed>
where
U: ?Sized + AsRef<T::Borrowed>,
{
self.map.get_longest_common_prefix(value).map(|x| x.0)
}
/// Returns the longest common prefix length of `value` and the elements in this set.
///
/// Unlike `get_longest_common_prefix()`, this method does not check if there is a element that matches the prefix in this set.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// set.insert("foo");
/// set.insert("foobar");
/// assert_eq!(set.longest_common_prefix_len("fo"), 2);
/// assert_eq!(set.longest_common_prefix_len("foo"), 3);
/// assert_eq!(set.longest_common_prefix_len("fooba"), 5);
/// assert_eq!(set.longest_common_prefix_len("foobar"), 6);
/// assert_eq!(set.longest_common_prefix_len("foobarbaz"), 6);
/// assert_eq!(set.longest_common_prefix_len("foba"), 2);
/// ```
pub fn longest_common_prefix_len<U>(&self, value: &U) -> usize
where
U: ?Sized + AsRef<T::Borrowed>,
{
self.map.longest_common_prefix_len(value)
}
/// Adds a value to this set.
///
/// If the set did not have this value present, `true` is returned.
/// If the set did have this value present, `false` is returned, and the entry is not updated.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// assert!(set.insert("foo"));
/// assert!(!set.insert("foo"));
/// assert_eq!(set.len(), 1);
/// ```
pub fn insert<U: AsRef<T::Borrowed>>(&mut self, value: U) -> bool {
self.map.insert(value, ()).is_none()
}
/// Removes a value from the set. Returns `true` is the value was present in this set.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// set.insert("foo");
/// assert_eq!(set.remove("foo"), true);
/// assert_eq!(set.remove("foo"), false);
/// ```
pub fn remove<U: AsRef<T::Borrowed>>(&mut self, value: U) -> bool {
self.map.remove(value).is_some()
}
/// Splits the set into two at the given prefix.
///
/// The returned set contains all the entries that prefixed by `prefix`.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut a = PatriciaSet::new();
/// a.insert("rust");
/// a.insert("ruby");
/// a.insert("python");
/// a.insert("erlang");
///
/// let b = a.split_by_prefix("ru");
///
/// assert_eq!(a.iter().collect::<Vec<_>>(), [b"erlang", b"python"]);
/// assert_eq!(b.iter().collect::<Vec<_>>(), [b"ruby", b"rust"]);
/// ```
pub fn split_by_prefix<U: AsRef<T::Borrowed>>(&mut self, prefix: U) -> Self {
GenericPatriciaSet {
map: self.map.split_by_prefix(prefix),
}
}
/// Gets an iterator over the contents of this set, in sorted order.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// set.insert("foo");
/// set.insert("bar");
/// set.insert("baz");
///
/// assert_eq!(set.iter().collect::<Vec<_>>(), [Vec::from("bar"), "baz".into(), "foo".into()]);
/// ```
pub fn iter(&self) -> Iter<'_, T> {
Iter(self.map.keys())
}
}
impl<T: Bytes> GenericPatriciaSet<T> {
/// Gets an iterator over the contents having the given prefix of this set, in sorted order.
///
/// # Examples
///
/// ```
/// use patricia_tree::PatriciaSet;
///
/// let mut set = PatriciaSet::new();
/// set.insert("foo");
/// set.insert("bar");
/// set.insert("baz");
///
/// assert_eq!(set.iter_prefix(b"ba").collect::<Vec<_>>(), [Vec::from("bar"), "baz".into()]);
/// ```
pub fn iter_prefix<'a, 'b>(&'a self, prefix: &'b T::Borrowed) -> impl 'a + Iterator<Item = T>
where
'b: 'a,
{
self.map.iter_prefix(prefix).map(|(k, _)| k)
}
}
impl<T: Bytes + fmt::Debug> fmt::Debug for GenericPatriciaSet<T> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
f.debug_set().entries(self.iter()).finish()
}
}
impl<T> Clone for GenericPatriciaSet<T> {
fn clone(&self) -> Self {
GenericPatriciaSet {
map: self.map.clone(),
}
}
}
impl<T> Default for GenericPatriciaSet<T> {
fn default() -> Self {
GenericPatriciaSet::new()
}
}
impl<T: Bytes> IntoIterator for GenericPatriciaSet<T> {
type Item = T;
type IntoIter = IntoIter<T>;
fn into_iter(self) -> Self::IntoIter {
IntoIter(self.map.into_iter())
}
}
impl<T: Bytes, U: AsRef<T::Borrowed>> FromIterator<U> for GenericPatriciaSet<T> {
fn from_iter<I>(iter: I) -> Self
where
I: IntoIterator<Item = U>,
{
let mut set = GenericPatriciaSet::new();
for t in iter {
set.insert(t);
}
set
}
}
impl<T: Bytes, U: AsRef<T::Borrowed>> Extend<U> for GenericPatriciaSet<T> {
fn extend<I>(&mut self, iter: I)
where
I: IntoIterator<Item = U>,
{
for t in iter {
self.insert(t);
}
}
}
/// An Iterator over a `PatriciaSet`'s items.
#[derive(Debug)]
pub struct Iter<'a, T>(map::Keys<'a, T, ()>);
impl<T: Bytes> Iterator for Iter<'_, T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.0.next()
}
}
/// An owning iterator over a `PatriciaSet`'s items.
#[derive(Debug)]
pub struct IntoIter<T>(map::IntoIter<T, ()>);
impl<T: Bytes> Iterator for IntoIter<T> {
type Item = T;
fn next(&mut self) -> Option<Self::Item> {
self.0.next().map(|(k, _)| k)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn debug_works() {
let set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
assert_eq!(
format!("{set:?}"),
"{[98, 97, 114], [98, 97, 122], [102, 111, 111]}"
);
}
#[test]
fn clear_works() {
let mut set = PatriciaSet::new();
set.insert("foo");
assert!(!set.is_empty());
set.clear();
assert!(set.is_empty());
}
#[test]
fn into_iter_works() {
let set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
assert_eq!(
set.into_iter().collect::<Vec<_>>(),
[Vec::from("bar"), "baz".into(), "foo".into()]
);
}
#[test]
fn split_by_prefix_works() {
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("");
assert!(set.is_empty());
assert_eq!(
splitted_set.iter().collect::<Vec<_>>(),
[b"bar", b"baz", b"foo"]
);
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("f");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"bar", b"baz"]);
assert_eq!(splitted_set.iter().collect::<Vec<_>>(), [b"foo"]);
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("fo");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"bar", b"baz"]);
assert_eq!(splitted_set.iter().collect::<Vec<_>>(), [b"foo"]);
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("foo");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"bar", b"baz"]);
assert_eq!(splitted_set.iter().collect::<Vec<_>>(), [b"foo"]);
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("b");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"foo"]);
assert_eq!(splitted_set.iter().collect::<Vec<_>>(), [b"bar", b"baz"]);
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("ba");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"foo"]);
assert_eq!(splitted_set.iter().collect::<Vec<_>>(), [b"bar", b"baz"]);
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("bar");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"baz", b"foo"]);
assert_eq!(splitted_set.iter().collect::<Vec<_>>(), [b"bar"]);
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let mut splitted_set = set.split_by_prefix("baz");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"bar", b"foo"]);
assert_eq!(splitted_set.iter().collect::<Vec<_>>(), [b"baz"]);
splitted_set.insert("aaa");
assert_eq!(splitted_set.iter().collect::<Vec<_>>(), [b"aaa", b"baz"]);
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("bazz");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"bar", b"baz", b"foo"]);
assert!(splitted_set.is_empty());
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("for");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"bar", b"baz", b"foo"]);
assert!(splitted_set.is_empty());
let mut set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let splitted_set = set.split_by_prefix("qux");
assert_eq!(set.iter().collect::<Vec<_>>(), [b"bar", b"baz", b"foo"]);
assert!(splitted_set.is_empty());
}
#[test]
fn iter_prefix_works() {
fn assert_iter_prefix(set: &PatriciaSet, prefix: &str) {
let actual = set.iter_prefix(prefix.as_bytes()).collect::<Vec<_>>();
let expected = set
.iter()
.filter(|key| key.starts_with(prefix.as_bytes()))
.collect::<Vec<_>>();
assert_eq!(actual, expected);
}
let set: PatriciaSet = vec!["foo", "bar", "baz"].into_iter().collect();
let prefixes = [
"", "a", "b", "ba", "bar", "baz", "bax", "c", "f", "fo", "foo",
];
for prefix in &prefixes {
assert_iter_prefix(&set, prefix);
}
let set: PatriciaSet = vec![
"JavaScript",
"Python",
"Java",
"C++",
"Swift",
"TypeScript",
"Go",
"SQL",
"Ruby",
"R",
"PHP",
"Perl",
"Kotlin",
"C#",
"Rust",
"Scheme",
"Erlang",
"Scala",
"Elixir",
"Haskell",
]
.into_iter()
.collect();
let prefixes = [
"", "P", "Py", "J", "Jav", "Java", "JavaS", "Rusti", "E", "El", "H", "S", "Sc",
];
for prefix in &prefixes {
assert_iter_prefix(&set, prefix);
}
}
}
| rust | MIT | 3191dba6c7fe30f502006e4c195a0d4c577a53e7 | 2026-01-04T20:24:05.347227Z | false |
sile/patricia_tree | https://github.com/sile/patricia_tree/blob/3191dba6c7fe30f502006e4c195a0d4c577a53e7/benches/bench.rs | benches/bench.rs | use criterion::{BatchSize, Criterion, criterion_group, criterion_main};
use patricia_tree::PatriciaSet;
use rand::{Rng, seq::IndexedRandom};
use std::{
collections::{BTreeSet, HashSet},
hint::black_box,
};
fn bench_insertion(c: &mut Criterion) {
let mut group = c.benchmark_group("insertion");
group.bench_function("PatriciaSet", |b| {
let mut set = PatriciaSet::new();
let mut rng = rand::rng();
b.iter(|| {
set.insert(black_box(rng.random::<u64>().to_string()));
})
});
group.bench_function("HashSet", |b| {
let mut set = HashSet::new();
let mut rng = rand::rng();
b.iter(|| {
set.insert(black_box(rng.random::<u64>().to_string()));
})
});
group.bench_function("BTreeSet", |b| {
let mut set = BTreeSet::new();
let mut rng = rand::rng();
b.iter(|| {
set.insert(black_box(rng.random::<u64>().to_string()));
})
});
group.finish();
}
fn bench_retrieval(c: &mut Criterion) {
const MAX: u64 = 1_000_000;
let mut group = c.benchmark_group("retrieval");
let mut set = PatriciaSet::new();
let mut rng = rand::rng();
// Pre-populate the set
for _ in 0..MAX / 2 {
set.insert((rng.random::<u64>() % MAX).to_string());
}
group.bench_function("PatriciaSet", |b| {
b.iter(|| {
set.contains(black_box((rng.random::<u64>() % MAX).to_string()));
})
});
let mut hash_set = HashSet::new();
for _ in 0..MAX / 2 {
hash_set.insert((rng.random::<u64>() % MAX).to_string());
}
group.bench_function("HashSet", |b| {
b.iter(|| {
hash_set.contains(black_box(&(rng.random::<u64>() % MAX).to_string()));
})
});
let mut btree_set = BTreeSet::new();
for _ in 0..MAX / 2 {
btree_set.insert((rng.random::<u64>() % MAX).to_string());
}
group.bench_function("BTreeSet", |b| {
b.iter(|| {
btree_set.contains(black_box(&(rng.random::<u64>() % MAX).to_string()));
})
});
group.finish();
}
fn bench_removal(c: &mut Criterion) {
let mut group = c.benchmark_group("removal");
const MAX: u64 = 100_000;
let mut values = Vec::with_capacity(MAX as usize);
for i in 0..MAX {
values.push(i.to_string());
}
let patricia_set: PatriciaSet = values.iter().cloned().collect();
let hashset: HashSet<String> = values.iter().cloned().collect();
let btreeset: BTreeSet<String> = values.iter().cloned().collect();
group.bench_function("PatriciaSet", |b| {
b.iter_batched_ref(
// setup
|| {
let val = values.choose(&mut rand::rng()).unwrap().clone();
(patricia_set.clone(), val.clone())
},
// time removal
|(set, val)| {
set.remove(black_box(val));
},
BatchSize::SmallInput,
)
});
group.bench_function("HashSet", |b| {
b.iter_batched_ref(
// setup
|| {
let val = values.choose(&mut rand::rng()).unwrap().clone();
(hashset.clone(), val.clone())
},
|(set, val)| {
set.remove(black_box(&*val));
},
BatchSize::SmallInput,
)
});
group.bench_function("BTreeSet", |b| {
b.iter_batched_ref(
// setup
|| {
let val = values.choose(&mut rand::rng()).unwrap().clone();
(btreeset.clone(), val.clone())
},
|(set, val)| {
set.remove(black_box(&*val));
},
BatchSize::SmallInput,
)
});
group.finish();
}
criterion_group!(benches, bench_insertion, bench_retrieval, bench_removal);
criterion_main!(benches);
| rust | MIT | 3191dba6c7fe30f502006e4c195a0d4c577a53e7 | 2026-01-04T20:24:05.347227Z | false |
sile/patricia_tree | https://github.com/sile/patricia_tree/blob/3191dba6c7fe30f502006e4c195a0d4c577a53e7/examples/insert_lines.rs | examples/insert_lines.rs | use patricia_tree::PatriciaSet;
use std::collections::{BTreeSet, HashSet};
use std::io::BufRead;
fn main() -> noargs::Result<()> {
let mut args = noargs::raw_args();
noargs::HELP_FLAG.take_help(&mut args);
let kind = noargs::opt("kind")
.doc("Data structure kindt")
.ty("patricia | hash | btree | count")
.default("patricia")
.take(&mut args)
.then(|a| {
let value = a.value();
match value {
"patricia" | "hash" | "btree" | "count" => Ok(value.to_string()),
_ => Err("must be one of: patricia, hash, btree, count"),
}
})?;
if let Some(help) = args.finish()? {
print!("{help}");
return Ok(());
}
match kind.as_str() {
"patricia" => {
let mut set = PatriciaSet::new();
each_line(|line| {
set.insert(line);
});
println!("# LINES: {}", set.len());
}
"hash" => {
let mut set = HashSet::new();
each_line(|line| {
set.insert(line);
});
println!("# LINES: {}", set.len());
}
"btree" => {
let mut set = BTreeSet::new();
each_line(|line| {
set.insert(line);
});
println!("# LINES: {}", set.len());
}
"count" => {
let mut count = 0;
each_line(|_| {
count += 1;
});
println!("# LINES: {count}");
}
_ => unreachable!(),
}
Ok(())
}
fn each_line<F>(mut f: F)
where
F: FnMut(String),
{
let stdin = std::io::stdin();
for line in stdin.lock().lines() {
f(line.unwrap());
}
}
| rust | MIT | 3191dba6c7fe30f502006e4c195a0d4c577a53e7 | 2026-01-04T20:24:05.347227Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/errors.rs | src/errors.rs | use std::fmt::{Debug, Display};
use flexi_logger::FlexiLoggerError;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum ErrorKind {
#[error("[EpubError]: {0}")]
EpubError(String),
#[error("[HTTPError]: {0}")]
HTTPError(String),
#[error("[IOError]: {0}")]
IOError(String),
#[error("[UTF8Error]: {0}")]
UTF8Error(String),
#[error("[ReadabilityError]: {0}")]
ReadabilityError(String),
}
#[derive(Error, Debug)]
#[error("{kind}")]
/// Used to represent errors from downloading images. Errors from here are used solely for debugging
/// as they are considered recoverable.
pub struct ImgError {
kind: ErrorKind,
url: Option<String>,
}
impl ImgError {
pub fn with_kind(kind: ErrorKind) -> Self {
ImgError { url: None, kind }
}
pub fn set_url(&mut self, url: &str) {
self.url = Some(url.to_string());
}
pub fn url(&self) -> &Option<String> {
&self.url
}
}
impl From<ErrorKind> for ImgError {
fn from(kind: ErrorKind) -> Self {
ImgError::with_kind(kind)
}
}
impl From<surf::Error> for ImgError {
fn from(err: surf::Error) -> Self {
ImgError::with_kind(ErrorKind::HTTPError(err.to_string()))
}
}
impl From<url::ParseError> for ImgError {
fn from(err: url::ParseError) -> Self {
ImgError::with_kind(ErrorKind::HTTPError(err.to_string()))
}
}
impl From<std::io::Error> for ImgError {
fn from(err: std::io::Error) -> Self {
ImgError::with_kind(ErrorKind::IOError(err.to_string()))
}
}
#[derive(Error, Debug)]
#[error("{kind}")]
pub struct PaperoniError {
article_source: Option<String>,
kind: ErrorKind,
}
impl PaperoniError {
pub fn with_kind(kind: ErrorKind) -> Self {
PaperoniError {
article_source: None,
kind,
}
}
pub fn kind(&self) -> &ErrorKind {
&self.kind
}
pub fn article_source(&self) -> &Option<String> {
&self.article_source
}
pub fn set_article_source(&mut self, article_source: &str) {
self.article_source = Some(article_source.to_owned());
}
}
impl From<ErrorKind> for PaperoniError {
fn from(kind: ErrorKind) -> Self {
PaperoniError::with_kind(kind)
}
}
impl From<epub_builder::Error> for PaperoniError {
fn from(err: epub_builder::Error) -> Self {
PaperoniError::with_kind(ErrorKind::EpubError(err.description().to_owned()))
}
}
impl From<surf::Error> for PaperoniError {
fn from(err: surf::Error) -> Self {
PaperoniError::with_kind(ErrorKind::HTTPError(err.to_string()))
}
}
impl From<url::ParseError> for PaperoniError {
fn from(err: url::ParseError) -> Self {
PaperoniError::with_kind(ErrorKind::HTTPError(err.to_string()))
}
}
impl From<std::io::Error> for PaperoniError {
fn from(err: std::io::Error) -> Self {
PaperoniError::with_kind(ErrorKind::IOError(err.to_string()))
}
}
impl From<std::str::Utf8Error> for PaperoniError {
fn from(err: std::str::Utf8Error) -> Self {
PaperoniError::with_kind(ErrorKind::UTF8Error(err.to_string()))
}
}
#[derive(Debug, Error)]
pub enum LogError {
#[error(transparent)]
FlexiError(#[from] FlexiLoggerError),
#[error("Unable to get user directories for logging purposes")]
UserDirectoriesError,
#[error("Can't create log directory: {0}")]
CreateLogDirectoryError(#[from] std::io::Error),
}
// dumb hack to allow for comparing errors in testing.
// derive macros cannot be used because underlying errors like io::Error do not derive PartialEq
impl PartialEq for LogError {
fn eq(&self, other: &Self) -> bool {
format!("{:?}", self) == format!("{:?}", other)
}
}
#[derive(Debug, Error)]
pub enum CliError<BuilderError: Debug + Display> {
#[error("Failed to open file with urls: {0}")]
UrlFileError(#[from] std::io::Error),
#[error("Failed to parse max connection value: {0}")]
InvalidMaxConnectionCount(#[from] std::num::ParseIntError),
#[error("No urls were provided")]
NoUrls,
#[error("Failed to build cli application: {0}")]
AppBuildError(BuilderError),
#[error("Invalid output path name for merged epubs: {0}")]
InvalidOutputPath(String),
#[error("Wrong output directory")]
WrongOutputDirectory,
#[error("Output directory does not exist")]
OutputDirectoryNotExists,
#[error("Unable to start logger!\n{0}")]
LogError(#[from] LogError),
#[error("The --inline-toc flag can only be used when exporting to epub")]
WrongExportInliningToC,
#[error("The --inline-images flag can only be used when exporting to html")]
WrongExportInliningImages,
}
// dumb hack to allow for comparing errors in testing.
// derive macros cannot be used because underlying errors like io::Error do not derive PartialEq
impl<T: Debug + Display> PartialEq for CliError<T> {
fn eq(&self, other: &Self) -> bool {
format!("{:?}", self) == format!("{:?}", other)
}
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/extractor.rs | src/extractor.rs | use itertools::Itertools;
use kuchiki::{traits::*, NodeRef};
use crate::errors::PaperoniError;
use crate::moz_readability::{MetaData, Readability};
/// A tuple of the url and an Option of the resource's MIME type
pub type ResourceInfo = (String, Option<String>);
pub struct Article {
node_ref_opt: Option<NodeRef>,
pub img_urls: Vec<ResourceInfo>,
readability: Readability,
pub url: String,
}
impl Article {
/// Create a new instance of an HTML extractor given an HTML string
pub fn from_html(html_str: &str, url: &str) -> Self {
Self {
node_ref_opt: None,
img_urls: Vec::new(),
readability: Readability::new(html_str),
url: url.to_string(),
}
}
/// Locates and extracts the HTML in a document which is determined to be
/// the source of the content
pub fn extract_content(&mut self) -> Result<(), PaperoniError> {
self.readability.parse(&self.url)?;
if let Some(article_node_ref) = &self.readability.article_node {
let template = r#"
<!DOCTYPE html>
<html>
<head>
<link rel="stylesheet" href="stylesheet.css" type="text/css"></link>
</head>
<body>
</body>
</html>
"#;
let doc = kuchiki::parse_html().one(template);
let body = doc.select_first("body").unwrap();
body.as_node().append(article_node_ref.clone());
self.node_ref_opt = Some(doc);
}
Ok(())
}
/// Traverses the DOM tree of the content and retrieves the IMG URLs
pub fn extract_img_urls(&mut self) {
if let Some(content_ref) = &self.node_ref_opt {
self.img_urls = content_ref
.select("img")
.unwrap()
.filter_map(|img_ref| {
let attrs = img_ref.attributes.borrow();
attrs
.get("src")
.filter(|val| !(val.is_empty() || val.starts_with("data:image")))
.map(ToString::to_string)
})
.unique()
.map(|val| (val, None))
.collect();
}
}
/// Returns the extracted article [NodeRef]. It should only be called *AFTER* calling parse
pub fn node_ref(&self) -> &NodeRef {
self.node_ref_opt.as_ref().expect(
"Article node doesn't exist. This may be because the document has not been parsed",
)
}
pub fn metadata(&self) -> &MetaData {
&self.readability.metadata
}
}
#[cfg(test)]
mod test {
use super::*;
const TEST_HTML: &'static str = r#"
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8">
<meta name="description" content="A sample document">
<meta name="keywords" content="test,Rust">
<meta name="author" content="Paperoni">
<title>Testing Paperoni</title>
</head>
<body>
<header>
<!-- Unimportant information -->
<h1>Testing Paperoni</h1>
</header>
<article>
<h1>Starting out</h1>
<p>Some Lorem Ipsum text here</p>
<p>Observe this picture</p>
<img src="./img.jpg" alt="Random image">
<img src="data:image/png;base64,lJGWEIUQOIQWIDYVIVEDYFOUYQFWD">
</article>
<footer>
<p>Made in HTML</p>
</footer>
</body>
</html>
"#;
#[test]
fn test_extract_img_urls() {
let mut article = Article::from_html(TEST_HTML, "http://example.com/");
article
.extract_content()
.expect("Article extraction failed unexpectedly");
article.extract_img_urls();
assert!(article.img_urls.len() > 0);
assert_eq!(
vec![("http://example.com/img.jpg".to_string(), None)],
article.img_urls
);
}
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/html.rs | src/html.rs | use std::{
collections::{BTreeMap, HashSet},
fs::{self, File},
path::Path,
};
use base64::encode;
use comfy_table::{Attribute, Cell, CellAlignment, Color, ContentArrangement, Table};
use html5ever::{LocalName, Namespace, QualName};
use indicatif::{ProgressBar, ProgressStyle};
use kuchiki::{traits::*, NodeRef};
use log::{debug, error, info};
use crate::{
cli::{self, AppConfig, CSSConfig},
errors::PaperoniError,
extractor::Article,
moz_readability::MetaData,
};
const HEAD_ELEM_NOT_FOUND: &str =
"Unable to get <head> element to inline css. Ensure that the root node is the HTML document.";
const BASE_HTML_TEMPLATE: &str = r#"<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
</head>
<body></body>
</html>"#;
pub fn generate_html_exports(
articles: Vec<Article>,
app_config: &AppConfig,
successful_articles_table: &mut Table,
) -> Result<(), Vec<PaperoniError>> {
if articles.is_empty() {
return Ok(());
}
let bar = if app_config.can_disable_progress_bar {
ProgressBar::hidden()
} else {
let enabled_bar = ProgressBar::new(articles.len() as u64);
let style = ProgressStyle::default_bar().template(
"{spinner:.cyan} [{elapsed_precise}] {bar:40.white} {:>8} html {pos}/{len:7} {msg:.green}",
);
enabled_bar.set_style(style);
if !articles.is_empty() {
enabled_bar.set_message("Generating html files");
}
enabled_bar
};
let mut errors: Vec<PaperoniError> = Vec::new();
match app_config.merged {
Some(ref name) => {
successful_articles_table.set_header(vec![Cell::new("Table of Contents")
.add_attribute(Attribute::Bold)
.set_alignment(CellAlignment::Center)
.fg(Color::Green)]);
debug!("Creating {:?}", name);
let base_html_elem = kuchiki::parse_html().one(BASE_HTML_TEMPLATE);
let body_elem = base_html_elem.select_first("body").unwrap();
let base_path = Path::new(app_config.output_directory.as_deref().unwrap_or("."));
let img_dirs_path_name = name.trim_end_matches(".html");
let imgs_dir_path = base_path.join(img_dirs_path_name);
if !(app_config.is_inlining_images || imgs_dir_path.exists()) {
info!("Creating imgs dir in {:?} for {}", imgs_dir_path, name);
if let Err(e) = std::fs::create_dir(&imgs_dir_path) {
error!("Unable to create imgs dir for HTML file");
let err: PaperoniError = e.into();
errors.push(err);
return Err(errors);
};
}
for (idx, article) in articles.iter().enumerate() {
let article_elem = article
.node_ref()
.select_first("div[id=\"readability-page-1\"]")
.unwrap();
let title = article.metadata().title();
let mut elem_attr = article_elem.attributes.borrow_mut();
if let Some(id_attr) = elem_attr.get_mut("id") {
*id_attr = format!("readability-page-{}", idx);
}
if app_config.is_inlining_images {
info!("Inlining images for {}", title);
let result = update_imgs_base64(article);
if let Err(e) = result {
let mut err: PaperoniError = e.into();
err.set_article_source(title);
error!("Unable to copy images to imgs dir for {}", title);
errors.push(err);
}
info!("Completed inlining images for {}", title);
} else {
info!("Copying images to imgs dir for {}", title);
let result = update_img_urls(article, &imgs_dir_path).map_err(|e| {
let mut err: PaperoniError = e.into();
err.set_article_source(title);
err
});
if let Err(e) = result {
error!("Unable to copy images to imgs dir for {}", title);
errors.push(e);
} else {
info!("Successfully copied images to imgs dir for {}", title);
}
}
bar.inc(1);
successful_articles_table.add_row(vec![title]);
body_elem.as_node().append(article_elem.as_node().clone());
debug!("Added {} to the export HTML file", title);
}
insert_title_elem(&base_html_elem, name);
insert_appendix(
&base_html_elem,
articles
.iter()
.map(|article| (article.metadata(), article.url.as_str()))
.collect(),
);
inline_css(&base_html_elem, &app_config.css_config);
remove_existing_stylesheet_link(&base_html_elem);
info!("Added title, footer and inlined styles for {}", name);
info!("Creating export HTML file: {}", name);
if let Err(mut err) = File::create(name)
.and_then(|mut out_file| base_html_elem.serialize(&mut out_file))
.map_err(|e| -> PaperoniError { e.into() })
{
error!("Failed to serialize articles to file: {}", name);
err.set_article_source(&name);
errors.push(err);
bar.finish_with_message("html generation failed");
return Err(errors);
};
bar.finish_with_message("Generated html file\n");
debug!("Created {:?}", name);
println!("Created {:?}", name);
}
None => {
successful_articles_table
.set_header(vec![Cell::new("Downloaded articles")
.add_attribute(Attribute::Bold)
.set_alignment(CellAlignment::Center)
.fg(Color::Green)])
.set_content_arrangement(ContentArrangement::Dynamic);
let mut file_names: HashSet<String> = HashSet::new();
for article in &articles {
let mut file_name = format!(
"{}/{}.html",
app_config.output_directory.as_deref().unwrap_or("."),
article
.metadata()
.title()
.replace("/", " ")
.replace("\\", " ")
);
if file_names.contains(&file_name) {
info!("Article name {:?} already exists", file_name);
file_name = format!(
"{}/{}_{}.html",
app_config.output_directory.as_deref().unwrap_or("."),
article
.metadata()
.title()
.replace("/", " ")
.replace("\\", " "),
file_names.len()
);
info!("Renamed to {:?}", file_name);
}
file_names.insert(file_name.clone());
debug!("Creating {:?}", file_name);
let export_article = || -> Result<(), PaperoniError> {
let mut out_file = File::create(&file_name)?;
if app_config.is_inlining_images {
update_imgs_base64(article)?;
} else {
let base_path =
Path::new(app_config.output_directory.as_deref().unwrap_or("."));
let imgs_dir_name = article.metadata().title();
if !base_path.join(imgs_dir_name).exists() {
std::fs::create_dir(base_path.join(imgs_dir_name))?;
}
let imgs_dir_path = base_path.join(imgs_dir_name);
update_img_urls(article, &imgs_dir_path)?;
}
let utf8_encoding =
NodeRef::new_element(create_qualname("meta"), BTreeMap::new());
if let Some(elem_node) = utf8_encoding.as_element() {
let mut elem_attrs = elem_node.attributes.borrow_mut();
elem_attrs.insert("charset", "UTF-8".into());
}
if let Ok(head_elem) = article.node_ref().select_first("head") {
let head_elem_node = head_elem.as_node();
head_elem_node.append(utf8_encoding);
};
insert_title_elem(article.node_ref(), article.metadata().title());
insert_appendix(article.node_ref(), vec![(article.metadata(), &article.url)]);
inline_css(article.node_ref(), &app_config.css_config);
remove_existing_stylesheet_link(article.node_ref());
article.node_ref().serialize(&mut out_file)?;
Ok(())
};
if let Err(mut err) = export_article() {
err.set_article_source(&article.url);
errors.push(err);
}
debug!("Created {:?}", file_name);
bar.inc(1);
successful_articles_table.add_row(vec![article.metadata().title()]);
}
bar.finish_with_message("Generated HTML files\n");
}
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
fn create_qualname(name: &str) -> QualName {
QualName::new(
None,
Namespace::from("http://www.w3.org/1999/xhtml"),
LocalName::from(name),
)
}
/// Updates the src attribute of `<img>` elements with a base64 encoded string of the image data
fn update_imgs_base64(article: &Article) -> Result<(), std::io::Error> {
let temp_dir = std::env::temp_dir();
for (img_url, mime_type) in &article.img_urls {
let img_path = temp_dir.join(img_url);
let img_bytes = std::fs::read(img_path)?;
let img_base64_str = format!(
"data:image:{};base64,{}",
mime_type.as_deref().unwrap_or("image/*"),
encode(img_bytes)
);
let img_elems = article
.node_ref()
.select(&format!("img[src=\"{}\"]", img_url))
.unwrap();
for img_elem in img_elems {
let mut img_attr = img_elem.attributes.borrow_mut();
if let Some(src_attr) = img_attr.get_mut("src") {
*src_attr = img_base64_str.clone();
}
}
}
Ok(())
}
/// Updates the src attribute of `<img>` elements to the new `imgs_dir_path` and copies the image to the new file location
fn update_img_urls(article: &Article, imgs_dir_path: &Path) -> Result<(), std::io::Error> {
let temp_dir = std::env::temp_dir();
for (img_url, _) in &article.img_urls {
let (from, to) = (temp_dir.join(img_url), imgs_dir_path.join(img_url));
info!("Copying {:?} to {:?}", from, to);
fs::copy(from, to)?;
let img_elems = article
.node_ref()
.select(&format!("img[src=\"{}\"]", img_url))
.unwrap();
for img_elem in img_elems {
let mut img_attr = img_elem.attributes.borrow_mut();
if let Some(src_attr) = img_attr.get_mut("src") {
*src_attr = imgs_dir_path.join(img_url).to_str().unwrap().into();
}
}
}
Ok(())
}
/// Creates a `<title>` element in an HTML document with the value set to the article's title
fn insert_title_elem(root_node: &NodeRef, title: &str) {
let title_content = NodeRef::new_text(title);
let title_elem = NodeRef::new_element(create_qualname("title"), BTreeMap::new());
title_elem.append(title_content);
match root_node.select_first("head") {
Ok(head_elem) => {
head_elem.as_node().append(title_elem);
}
Err(_) => {
debug!("{}", HEAD_ELEM_NOT_FOUND);
let html_elem = root_node.select_first("html").unwrap();
let head_elem = NodeRef::new_element(create_qualname("head"), BTreeMap::new());
head_elem.append(title_elem);
html_elem.as_node().prepend(head_elem);
}
}
}
/// Creates the appendix in an HTML document where article sources are added in a `<footer>` element
fn insert_appendix(root_node: &NodeRef, article_links: Vec<(&MetaData, &str)>) {
let link_tags: String = article_links
.iter()
.map(|(meta_data, url)| {
let article_name = if !meta_data.title().is_empty() {
meta_data.title()
} else {
url
};
format!("<a href=\"{}\">{}</a><br></br>", url, article_name)
})
.collect();
let footer_inner_html = format!(
"<footer><h2>Appendix</h2><h3>Article sources</h3>{}</footer>",
link_tags
);
let footer_container =
kuchiki::parse_fragment(create_qualname("div"), Vec::new()).one(footer_inner_html);
let footer_elem = footer_container.select_first("footer").unwrap();
root_node.append(footer_elem.as_node().clone());
}
/// Inlines the CSS stylesheets into the HTML article node
fn inline_css(root_node: &NodeRef, css_config: &CSSConfig) {
let body_stylesheet = include_str!("./assets/body.min.css");
let header_stylesheet = include_str!("./assets/headers.min.css");
let mut css_str = String::new();
match css_config {
cli::CSSConfig::NoHeaders => {
css_str.push_str(body_stylesheet);
}
cli::CSSConfig::All => {
css_str.push_str(body_stylesheet);
css_str.push_str(header_stylesheet);
}
cli::CSSConfig::None => {
return;
}
}
let css_html_str = format!("<style>{}</style>", css_str);
let style_container =
kuchiki::parse_fragment(create_qualname("div"), Vec::new()).one(css_html_str);
let style_elem = style_container.select_first("style").unwrap();
let head_elem = root_node.select_first("head").expect(HEAD_ELEM_NOT_FOUND);
head_elem.as_node().prepend(style_elem.as_node().to_owned());
}
/// Removes the <link> of the stylesheet. This is used when inlining styles
fn remove_existing_stylesheet_link(root_node: &NodeRef) {
if let Ok(style_link_elem) = root_node.select_first("link[href=\"stylesheet.css\"]") {
style_link_elem.as_node().detach();
};
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_insert_title_elem() {
let title = "Sample title";
let html_str = r#"<html><head><meta charset="UTF-8"/></head><body></body></html>"#;
let doc = kuchiki::parse_html().one(html_str);
assert_eq!(0, doc.select("title").unwrap().count());
insert_title_elem(&doc, title);
assert_eq!(1, doc.select("title").unwrap().count());
assert_eq!(title, doc.select_first("title").unwrap().text_contents());
}
#[test]
fn test_create_qualname() {
let name = "div";
assert_eq!(
create_qualname(name),
QualName::new(
None,
Namespace::from("http://www.w3.org/1999/xhtml"),
LocalName::from(name)
)
);
}
#[test]
fn test_inline_css() {
let html_str = r#"<html>
<head><meta charset="UTF-8"/></head>
<body>
<p>Lorem ipsum sample text goes here.</p>
</body>
</html>"#;
let doc = kuchiki::parse_html().one(html_str);
let body_stylesheet = include_str!("./assets/body.min.css");
let header_stylesheet = include_str!("./assets/headers.min.css");
assert_eq!(0, doc.select("style").unwrap().count());
inline_css(&doc, &CSSConfig::None);
assert_eq!(0, doc.select("style").unwrap().count());
inline_css(&doc, &CSSConfig::NoHeaders);
assert_eq!(1, doc.select("style").unwrap().count());
let style_elem = doc.select_first("style").unwrap();
assert_eq!(body_stylesheet, style_elem.text_contents());
let doc = kuchiki::parse_html().one(html_str);
inline_css(&doc, &CSSConfig::All);
assert_eq!(1, doc.select("style").unwrap().count());
let style_elem = doc.select_first("style").unwrap();
assert_eq!(
format!("{}{}", body_stylesheet, header_stylesheet),
style_elem.text_contents()
);
}
#[test]
fn test_remove_existing_stylesheet_link() {
let html_str = r#"<html>
<head><link href="stylesheet.css"></link></head>
<body><p>Lorem ipsum sample text goes here.</p></body></html>"#;
let doc = kuchiki::parse_html().one(html_str);
assert_eq!(1, doc.select("link").unwrap().count());
remove_existing_stylesheet_link(&doc);
assert_eq!(0, doc.select("link").unwrap().count());
}
#[test]
fn test_insert_appendix() {
let html_str = r#"<html>
<head><meta charset="UTF-8"/></head>
<body>
<p>Lorem ipsum sample text goes here.</p>
</body>
</html>"#;
let doc = kuchiki::parse_html().one(html_str);
let meta_data = MetaData::new();
assert_eq!(0, doc.select("footer").unwrap().count());
insert_appendix(&doc, vec![(&meta_data, "http://example.org")]);
assert_eq!(1, doc.select("footer").unwrap().count());
assert_eq!(1, doc.select("footer > h2").unwrap().count());
assert_eq!(
"Appendix",
doc.select_first("footer > h2").unwrap().text_contents()
);
assert_eq!(1, doc.select("footer > h3").unwrap().count());
assert_eq!(
"Article sources",
doc.select_first("footer > h3").unwrap().text_contents()
);
assert_eq!(1, doc.select("a").unwrap().count());
let anchor_elem = doc.select_first("a").unwrap();
assert_eq!("http://example.org", anchor_elem.text_contents());
let anchor_attrs = anchor_elem.attributes.borrow();
assert_eq!(Some("http://example.org"), anchor_attrs.get("href"));
}
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/epub.rs | src/epub.rs | use std::collections::HashMap;
use std::fs::File;
use comfy_table::{Attribute, Cell, CellAlignment, Color, ContentArrangement, Table};
use epub_builder::{EpubBuilder, EpubContent, TocElement, ZipLibrary};
use html5ever::tendril::fmt::Slice;
use indicatif::{ProgressBar, ProgressStyle};
use kuchiki::NodeRef;
use log::{debug, error, info};
use crate::{cli::AppConfig, errors::PaperoniError, extractor::Article};
lazy_static! {
static ref ESC_SEQ_REGEX: regex::Regex = regex::Regex::new(r#"(&|<|>|'|")"#).unwrap();
static ref VALID_ATTR_CHARS_REGEX: regex::Regex = regex::Regex::new(r#"[a-z0-9\-_:]"#).unwrap();
}
pub fn generate_epubs(
articles: Vec<Article>,
app_config: &AppConfig,
successful_articles_table: &mut Table,
) -> Result<(), Vec<PaperoniError>> {
if articles.is_empty() {
return Ok(());
}
let bar = if app_config.can_disable_progress_bar {
ProgressBar::hidden()
} else {
let enabled_bar = ProgressBar::new(articles.len() as u64);
let style = ProgressStyle::default_bar().template(
"{spinner:.cyan} [{elapsed_precise}] {bar:40.white} {:>8} epub {pos}/{len:7} {msg:.green}",
);
enabled_bar.set_style(style);
if !articles.is_empty() {
enabled_bar.set_message("Generating epubs");
}
enabled_bar
};
let mut errors: Vec<PaperoniError> = Vec::new();
match app_config.merged {
Some(ref name) => {
successful_articles_table.set_header(vec![Cell::new("Table of Contents")
.add_attribute(Attribute::Bold)
.set_alignment(CellAlignment::Center)
.fg(Color::Green)]);
let mut epub = match EpubBuilder::new(match ZipLibrary::new() {
Ok(zip_library) => zip_library,
Err(err) => {
let mut paperoni_err: PaperoniError = err.into();
paperoni_err.set_article_source(name);
errors.push(paperoni_err);
return Err(errors);
}
}) {
Ok(epub) => epub,
Err(err) => {
let mut paperoni_err: PaperoniError = err.into();
paperoni_err.set_article_source(name);
errors.push(paperoni_err);
return Err(errors);
}
};
debug!("Creating {:?}", name);
if app_config.inline_toc {
epub.inline_toc();
}
match add_stylesheets(&mut epub, app_config) {
Ok(_) => (),
Err(e) => {
error!("Unable to add stylesheets to epub file");
let mut paperoni_err: PaperoniError = e.into();
paperoni_err.set_article_source(name);
errors.push(paperoni_err);
return Err(errors);
}
}
articles
.iter()
.enumerate()
.fold(&mut epub, |epub, (idx, article)| {
let mut article_result = || -> Result<(), PaperoniError> {
let content_url = format!("article_{}.xhtml", idx);
let mut xhtml_buf = Vec::new();
let header_level_tocs =
get_header_level_toc_vec(&content_url, article.node_ref());
serialize_to_xhtml(article.node_ref(), &mut xhtml_buf)?;
let xhtml_str = std::str::from_utf8(&xhtml_buf)?;
let section_name = article.metadata().title();
let mut content = EpubContent::new(&content_url, xhtml_str.as_bytes())
.title(replace_escaped_characters(section_name));
for toc_element in header_level_tocs {
content = content.child(toc_element);
}
epub.metadata("title", replace_escaped_characters(name))?;
epub.add_content(content)?;
info!("Adding images for {:?}", name);
article.img_urls.iter().for_each(|img| {
// TODO: Add error handling and return errors as a vec
let mut file_path = std::env::temp_dir();
file_path.push(&img.0);
let img_buf = File::open(&file_path).expect("Can't read file");
epub.add_resource(
file_path.file_name().unwrap(),
img_buf,
img.1.as_ref().unwrap(),
)
.unwrap();
});
info!("Added images for {:?}", name);
Ok(())
};
if let Err(mut error) = article_result() {
error.set_article_source(&article.url);
errors.push(error);
}
bar.inc(1);
successful_articles_table.add_row(vec![article.metadata().title()]);
epub
});
let appendix = generate_appendix(articles.iter().collect());
if let Err(err) = epub.add_content(
EpubContent::new("appendix.xhtml", appendix.as_bytes())
.title(replace_escaped_characters("Article Sources")),
) {
let mut paperoni_err: PaperoniError = err.into();
paperoni_err.set_article_source(&name);
errors.push(paperoni_err);
return Err(errors);
}
let mut out_file = File::create(&name).unwrap();
match epub.generate(&mut out_file) {
Ok(_) => (),
Err(err) => {
let mut paperoni_err: PaperoniError = err.into();
paperoni_err.set_article_source(&name);
errors.push(paperoni_err);
error!("Failed to generate epub: {}", name);
bar.finish_with_message("epub generation failed\n");
return Err(errors);
}
}
bar.finish_with_message("Generated epub\n");
debug!("Created {:?}", name);
println!("Created {:?}", name);
}
None => {
successful_articles_table
.set_header(vec![Cell::new("Downloaded articles")
.add_attribute(Attribute::Bold)
.set_alignment(CellAlignment::Center)
.fg(Color::Green)])
.set_content_arrangement(ContentArrangement::Dynamic);
for article in &articles {
let mut result = || -> Result<(), PaperoniError> {
let mut epub = EpubBuilder::new(ZipLibrary::new()?)?;
let file_name = format!(
"{}/{}.epub",
app_config.output_directory.as_deref().unwrap_or("."),
article
.metadata()
.title()
.replace("/", " ")
.replace("\\", " ")
);
debug!("Creating {:?}", file_name);
let mut out_file = File::create(&file_name).unwrap();
let mut xhtml_buf = Vec::new();
let header_level_tocs =
get_header_level_toc_vec("index.xhtml", article.node_ref());
serialize_to_xhtml(article.node_ref(), &mut xhtml_buf)
.expect("Unable to serialize to xhtml");
let xhtml_str = std::str::from_utf8(&xhtml_buf).unwrap();
if let Some(author) = article.metadata().byline() {
epub.metadata("author", replace_escaped_characters(author))?;
}
add_stylesheets(&mut epub, app_config)?;
let title = replace_escaped_characters(article.metadata().title());
epub.metadata("title", &title)?;
let mut content =
EpubContent::new("index.xhtml", xhtml_str.as_bytes()).title(title);
for toc_element in header_level_tocs {
content = content.child(toc_element);
}
epub.add_content(content)?;
for img in &article.img_urls {
let mut file_path = std::env::temp_dir();
file_path.push(&img.0);
let img_buf = File::open(&file_path).expect("Can't read image file");
epub.add_resource(
file_path.file_name().unwrap(),
img_buf,
img.1.as_ref().unwrap(),
)?;
}
let appendix = generate_appendix(vec![&article]);
epub.add_content(
EpubContent::new("appendix.xhtml", appendix.as_bytes())
.title(replace_escaped_characters("Article Source")),
)?;
epub.generate(&mut out_file)?;
bar.inc(1);
successful_articles_table.add_row(vec![article.metadata().title()]);
debug!("Created {:?}", file_name);
Ok(())
};
if let Err(mut error) = result() {
error.set_article_source(&article.url);
errors.push(error);
}
}
bar.finish_with_message("Generated epubs\n");
}
}
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
/// Replaces characters that have to be escaped before adding to the epub's metadata
fn replace_escaped_characters(value: &str) -> String {
value
.replace("&", "&")
.replace("<", "<")
.replace(">", ">")
}
fn add_stylesheets<T: epub_builder::Zip>(
epub: &mut EpubBuilder<T>,
app_config: &AppConfig,
) -> Result<(), epub_builder::Error> {
let body_stylesheet: &[u8] = include_bytes!("./assets/body.min.css");
let header_stylesheet: &[u8] = include_bytes!("./assets/headers.min.css");
match app_config.css_config {
crate::cli::CSSConfig::All => {
epub.stylesheet([header_stylesheet, body_stylesheet].concat().as_bytes())?;
Ok(())
}
crate::cli::CSSConfig::NoHeaders => {
epub.stylesheet(body_stylesheet.as_bytes())?;
Ok(())
}
_ => Ok(()),
}
}
//TODO: The type signature of the argument should change as it requires that merged articles create an entirely new Vec of references
fn generate_appendix(articles: Vec<&Article>) -> String {
let link_tags: String = articles
.iter()
.map(|article| {
let article_name = if !article.metadata().title().is_empty() {
article.metadata().title()
} else {
&article.url
};
format!(
"<a href=\"{}\">{}</a><br></br>",
replace_escaped_characters(&article.url),
replace_escaped_characters(article_name)
)
})
.collect();
let template = format!(
r#"<html xmlns="http://www.w3.org/1999/xhtml" xmlns:epub="http://www.idpf.org/2007/ops">
<head>
<link rel="stylesheet" href="stylesheet.css" type="text/css"></link>
</head>
<body>
<h2>Appendix</h2><h3>Article sources</h3>
{}
</body>
</html>"#,
link_tags
);
template
}
/// Adds an id attribute to header elements and assigns a value based on
/// the hash of the text content. Headers with id attributes are not modified.
/// The headers here are known to have text because the grabbed article from
/// readability removes headers with no text.
fn generate_header_ids(root_node: &NodeRef) {
let headers = root_node
.select("h1, h2, h3, h4")
.expect("Unable to create selector for headings");
let headers_no_id = headers.filter(|node_data_ref| {
let attrs = node_data_ref.attributes.borrow();
!attrs.contains("id")
|| attrs
.get("id")
.map(|val| !VALID_ATTR_CHARS_REGEX.is_match(&val))
.unwrap()
});
for header in headers_no_id {
let mut attrs = header.attributes.borrow_mut();
let text = header.text_contents();
// The value of the id begins with an underscore because the hexadecimal
// digest might start with a number which would make it an invalid id
// when querying with selectors
let value = format!("_{:x}", md5::compute(text));
attrs.insert("id", value);
}
}
/// Returns a vector of `TocElement` from a NodeRef used for adding to the Table of Contents for navigation
fn get_header_level_toc_vec(content_url: &str, article: &NodeRef) -> Vec<TocElement> {
// Depth starts from 1
const HEADER_LEVEL_MAX_DEPTH: usize = 4;
let mut headers_vec: Vec<TocElement> = Vec::new();
let mut header_levels = HashMap::with_capacity(HEADER_LEVEL_MAX_DEPTH);
header_levels.insert("h1", 1);
header_levels.insert("h2", 2);
header_levels.insert("h3", 3);
header_levels.insert("h4", 4);
generate_header_ids(article);
let headings = article
.select("h1, h2, h3, h4")
.expect("Unable to create selector for headings");
// The header list will be generated using some sort of backtracking algorithm
// There will be a stack of maximum size 4 (since it only goes to h4 now)
let mut stack: Vec<Option<TocElement>> = std::iter::repeat(None)
.take(HEADER_LEVEL_MAX_DEPTH)
.collect::<_>();
for heading in headings {
let elem_name: &str = &heading.name.local;
let attrs = heading.attributes.borrow();
let id = attrs
.get("id")
.map(ToOwned::to_owned)
.expect("Unable to get id value in get_header_level_toc_vec");
let url = format!("{}#{}", content_url, id);
let level = header_levels[elem_name];
let index = level - 1;
if let Some(mut existing_toc) = stack.get_mut(index).take().cloned().flatten() {
// If a toc element already exists at that header level, consume all the toc elements
// of a lower hierarchy e.g if the existing toc is a h2, then the h3 and h4 in the stack
// will be consumed.
// We collapse the children by folding from the right to the left of the stack.
let descendants_levels = HEADER_LEVEL_MAX_DEPTH - level;
let folded_descendants = stack
.iter_mut()
.rev()
.take(descendants_levels)
.map(|toc_elem| toc_elem.take())
.filter(|toc_elem| toc_elem.is_some())
.map(|toc_elem| toc_elem.unwrap())
.reduce(|child, parent| parent.child(child));
if let Some(child) = folded_descendants {
existing_toc = existing_toc.child(child);
};
// Find the nearest ancestor to embed into.
// If this toc_elem was a h1, then just add it to the headers_vec
if index == 0 {
headers_vec.push(existing_toc);
} else {
// Otherwise, find the nearest ancestor to add it to. If none exists, add it to the headers_vec
let first_ancestor = stack
.iter_mut()
.take(level - 1)
.map(|toc_elem| toc_elem.as_mut())
.rfind(|toc_elem| toc_elem.is_some())
.flatten();
match first_ancestor {
Some(ancestor_toc_elem) => {
*ancestor_toc_elem = ancestor_toc_elem.clone().child(existing_toc);
}
None => {
headers_vec.push(existing_toc);
}
}
}
}
if let Some(toc_elem) = stack.get_mut(index) {
*toc_elem = Some(TocElement::new(
url,
replace_escaped_characters(&heading.text_contents()),
));
}
}
let folded_stack = stack
.into_iter()
.rev()
.filter(|toc_elem| toc_elem.is_some())
.map(|opt_toc_elem| opt_toc_elem.unwrap())
.reduce(|child, parent| parent.child(child));
if let Some(toc_elem) = folded_stack {
headers_vec.push(toc_elem)
}
headers_vec
}
/// Serializes a NodeRef to a string that is XHTML compatible
/// The only DOM nodes serialized are Text and Element nodes
fn serialize_to_xhtml<W: std::io::Write>(
node_ref: &NodeRef,
mut w: &mut W,
) -> Result<(), PaperoniError> {
{
// Add XHTML attributes
let html_elem = node_ref
.select_first("html")
.expect("Unable to get <html> element in article");
let mut html_attrs = html_elem.attributes.borrow_mut();
html_attrs.insert("xmlns", "http://www.w3.org/1999/xhtml".into());
html_attrs.insert("xmlns:epub", "http://www.idpf.org/2007/ops".into());
}
let mut escape_map = HashMap::new();
escape_map.insert("<", "<");
escape_map.insert(">", ">");
escape_map.insert("&", "&");
escape_map.insert("\"", """);
escape_map.insert("'", "'");
for edge in node_ref.traverse_inclusive() {
match edge {
kuchiki::iter::NodeEdge::Start(n) => match n.data() {
kuchiki::NodeData::Text(rc_text) => {
let text = rc_text.borrow();
let esc_text = ESC_SEQ_REGEX
.replace_all(&text, |captures: ®ex::Captures| escape_map[&captures[1]]);
write!(&mut w, "{}", esc_text)?;
}
kuchiki::NodeData::Element(elem_data) => {
let attrs = elem_data.attributes.borrow();
let attrs_str = attrs
.map
.iter()
.filter(|(k, _)| {
let attr_key: &str = &k.local;
attr_key.is_ascii() && VALID_ATTR_CHARS_REGEX.is_match(attr_key)
})
.map(|(k, v)| {
format!(
"{}=\"{}\"",
k.local,
ESC_SEQ_REGEX
.replace_all(&v.value, |captures: ®ex::Captures| {
escape_map[&captures[1]]
})
)
})
.fold("".to_string(), |acc, val| acc + " " + &val);
write!(&mut w, "<{}{}>", &elem_data.name.local, attrs_str)?;
}
_ => (),
},
kuchiki::iter::NodeEdge::End(n) => match n.data() {
kuchiki::NodeData::Element(elem_data) => {
write!(&mut w, "</{}>", &elem_data.name.local)?;
}
_ => (),
},
}
}
Ok(())
}
#[cfg(test)]
mod test {
use kuchiki::traits::*;
use super::{generate_header_ids, get_header_level_toc_vec, replace_escaped_characters};
#[test]
fn test_replace_escaped_characters() {
let mut value = "Lorem ipsum";
assert_eq!(replace_escaped_characters(value), "Lorem ipsum");
value = "Memory safe > memory unsafe";
assert_eq!(
replace_escaped_characters(value),
"Memory safe > memory unsafe"
);
value = "Author Name <author@mail.example>";
assert_eq!(
replace_escaped_characters(value),
"Author Name <author@mail.example>"
);
}
#[test]
fn test_generate_header_ids() {
let html_str = r#"
<!DOCTYPE html>
<html>
<body>
<h1>Heading 1</h1>
<h2 id="heading-2">Heading 2</h2>
<h2 id="heading-2-again">Heading 2 again</h2>
<h4>Heading 4</h4>
<h1>Heading 1 again</h1>
<h3 class="heading">Heading 3</h3>
</body>
</html>
"#;
let doc = kuchiki::parse_html().one(html_str);
generate_header_ids(&doc);
let mut headers = doc.select("h1, h2, h3, h4").unwrap();
let all_headers_have_ids = headers.all(|node_data_ref| {
let attrs = node_data_ref.attributes.borrow();
if let Some(id) = attrs.get("id") {
!id.trim().is_empty()
} else {
false
}
});
assert_eq!(true, all_headers_have_ids);
let selector = format!("h1#_{:x}", md5::compute("Heading 1"));
assert_eq!(true, doc.select_first(&selector).is_ok());
let selector = format!("h1#_{:x}", md5::compute("Heading 1 again"));
assert_eq!(true, doc.select_first(&selector).is_ok());
let selector = "h2#heading-2-again";
assert_eq!(true, doc.select_first(selector).is_ok());
}
#[test]
fn test_get_header_level_toc_vec() {
// NOTE: Due to `TocElement` not implementing PartialEq, the tests here
// will need to be manually written to cover for this
let html_str = r#"
<!DOCTYPE html>
<html>
<body>
<p>Lorem ipsum</p>
</body>
</html>
"#;
let doc = kuchiki::parse_html().one(html_str);
let toc_vec = get_header_level_toc_vec("index.xhtml", &doc);
assert_eq!(0, toc_vec.len());
let html_str = r#"
<!DOCTYPE html>
<html>
<body>
<h1 id="heading-1">Heading 1</h1>
<p>Lorem ipsum</p>
<div>
<h2 id="heading-2">Heading 2</h2>
<p>Lorem ipsum</p>
<p>Lorem ipsum</p>
</div>
<h3 id="subheading-3">Subheading 3</h2>
<p>Lorem ipsum</p>
<h1 id="heading-2">Second Heading 1</h2>
<p>Lorem ipsum</p>
</body>
</html>
"#;
let doc = kuchiki::parse_html().one(html_str);
let toc_vec = get_header_level_toc_vec("index.xhtml", &doc);
assert_eq!(2, toc_vec.len());
let first_h1_toc = toc_vec.first().unwrap();
assert_eq!("Heading 1", first_h1_toc.title);
assert_eq!(1, first_h1_toc.children.len());
let h2_toc = first_h1_toc.children.first().unwrap();
assert_eq!("Heading 2", h2_toc.title);
assert_eq!(1, h2_toc.children.len());
let h3_toc = h2_toc.children.first().unwrap();
assert_eq!("Subheading 3", h3_toc.title);
assert_eq!(0, h3_toc.children.len());
let last_h1_toc = toc_vec.last().unwrap();
assert_eq!("Second Heading 1", last_h1_toc.title);
assert_eq!(0, last_h1_toc.children.len());
let html_str = r#"
<!DOCTYPE html>
<html>
<body>
<h1 id="heading-1">Heading 1</h1>
<p>Lorem ipsum</p>
<div>
<h2 id="heading-2">Heading 2</h2>
<p>Lorem ipsum</p>
<p>Lorem ipsum</p>
<h3 id="subheading-3">Subheading 3</h2>
<p>Lorem ipsum</p>
</div>
<h2 id="heading-2">Heading 2</h2>
<p>Lorem ipsum</p>
<h4 id="subheading-4">Subheading 4</h4>
<h2 id="conclusion">Conclusion</h2>
</body>
</html>
"#;
let doc = kuchiki::parse_html().one(html_str);
let toc_vec = get_header_level_toc_vec("index.xhtml", &doc);
assert_eq!(1, toc_vec.len());
let h1_toc = toc_vec.first().unwrap();
assert_eq!("Heading 1", h1_toc.title);
assert_eq!(3, h1_toc.children.len());
let first_h2_toc = h1_toc.children.first().unwrap();
assert_eq!("Heading 2", first_h2_toc.title);
assert_eq!(1, first_h2_toc.children.len());
let h3_toc = first_h2_toc.children.first().unwrap();
assert_eq!("Subheading 3", h3_toc.title);
assert_eq!(0, h3_toc.children.len());
}
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/http.rs | src/http.rs | use async_std::io::prelude::*;
use async_std::task;
use async_std::{fs::File, stream};
use futures::StreamExt;
use indicatif::ProgressBar;
use log::warn;
use log::{debug, info};
use url::Url;
use crate::cli::AppConfig;
use crate::errors::{ErrorKind, ImgError, PaperoniError};
use crate::extractor::Article;
type HTMLResource = (String, String);
pub fn download(
app_config: &AppConfig,
bar: &ProgressBar,
partial_downloads: &mut Vec<PartialDownload>,
errors: &mut Vec<PaperoniError>,
) -> Vec<Article> {
task::block_on(async {
let urls_iter = app_config.urls.iter().map(|url| fetch_html(url));
let mut responses = stream::from_iter(urls_iter).buffered(app_config.max_conn);
let mut articles = Vec::new();
while let Some(fetch_result) = responses.next().await {
match fetch_result {
Ok((url, html)) => {
debug!("Extracting {}", &url);
let mut extractor = Article::from_html(&html, &url);
bar.set_message("Extracting...");
match extractor.extract_content() {
Ok(_) => {
extractor.extract_img_urls();
if let Err(img_errors) =
download_images(&mut extractor, &Url::parse(&url).unwrap(), &bar)
.await
{
partial_downloads
.push(PartialDownload::new(&url, extractor.metadata().title()));
warn!(
"{} image{} failed to download for {}",
img_errors.len(),
if img_errors.len() > 1 { "s" } else { "" },
url
);
for img_error in img_errors {
warn!(
"{}\n\t\tReason {}",
img_error.url().as_ref().unwrap(),
img_error
);
}
}
articles.push(extractor);
}
Err(mut e) => {
e.set_article_source(&url);
errors.push(e);
}
}
}
Err(e) => errors.push(e),
}
bar.inc(1);
}
articles
})
}
pub async fn fetch_html(url: &str) -> Result<HTMLResource, PaperoniError> {
let client = surf::Client::new();
debug!("Fetching {}", url);
let process_request = async {
let mut redirect_count: u8 = 0;
let base_url = Url::parse(&url)?;
let mut url = base_url.clone();
while redirect_count < 5 {
redirect_count += 1;
let req = surf::get(&url);
let mut res = client.send(req).await?;
if res.status().is_redirection() {
if let Some(location) = res.header(surf::http::headers::LOCATION) {
match Url::parse(location.last().as_str()) {
Ok(valid_url) => {
info!("Redirecting {} to {}", url, valid_url);
url = valid_url
}
Err(e) => match e {
url::ParseError::RelativeUrlWithoutBase => {
match base_url.join(location.last().as_str()) {
Ok(joined_url) => {
info!("Redirecting {} to {}", url, joined_url);
url = joined_url;
}
Err(e) => return Err(e.into()),
}
}
e => return Err(e.into()),
},
};
}
} else if res.status().is_success() {
if let Some(mime) = res.content_type() {
if mime.essence() == "text/html" {
debug!("Successfully fetched {}", url);
return Ok((url.to_string(), res.body_string().await?));
} else {
let msg = format!(
"Invalid HTTP response. Received {} instead of text/html",
mime.essence()
);
return Err(ErrorKind::HTTPError(msg).into());
}
} else {
return Err(ErrorKind::HTTPError("Unknown HTTP response".to_owned()).into());
}
} else {
let msg = format!("Request failed: HTTP {}", res.status());
return Err(ErrorKind::HTTPError(msg).into());
}
}
Err(ErrorKind::HTTPError("Unable to fetch HTML".to_owned()).into())
};
process_request.await.map_err(|mut error: PaperoniError| {
error.set_article_source(url);
error
})
}
type ImgItem<'a> = (&'a str, String, Option<String>);
async fn process_img_response<'a>(
img_response: &mut surf::Response,
url: &'a str,
) -> Result<ImgItem<'a>, ImgError> {
if !img_response.status().is_success() {
let kind = ErrorKind::HTTPError(format!(
"Non-success HTTP status code ({})",
img_response.status()
));
return Err(ImgError::with_kind(kind));
}
let img_content: Vec<u8> = match img_response.body_bytes().await {
Ok(bytes) => bytes,
Err(e) => return Err(e.into()),
};
let img_mime = img_response
.content_type()
.map(|mime| mime.essence().to_string());
if let Some(mime_str) = &img_mime {
if !mime_str.starts_with("image/") {
return Err(ErrorKind::HTTPError(format!(
"Invalid image MIME type: {} for {}",
mime_str, url
))
.into());
}
}
let img_ext = match img_response
.content_type()
.map(|mime| map_mime_subtype_to_ext(mime.subtype()).to_string())
{
Some(mime_str) => mime_str,
None => return Err(ErrorKind::HTTPError("Image has no Content-Type".to_owned()).into()),
};
let mut img_path = std::env::temp_dir();
img_path.push(format!("{}.{}", hash_url(url), &img_ext));
let mut img_file = match File::create(&img_path).await {
Ok(file) => file,
Err(e) => return Err(e.into()),
};
match img_file.write_all(&img_content).await {
Ok(_) => (),
Err(e) => return Err(e.into()),
}
Ok((
url,
img_path
.file_name()
.map(|os_str_name| {
os_str_name
.to_str()
.expect("Unable to get image file name")
.to_string()
})
.unwrap(),
img_mime,
))
}
pub async fn download_images(
extractor: &mut Article,
article_origin: &Url,
bar: &ProgressBar,
) -> Result<(), Vec<ImgError>> {
if extractor.img_urls.len() > 0 {
debug!(
"Downloading {} images for {}",
extractor.img_urls.len(),
article_origin
);
}
let img_count = extractor.img_urls.len();
let imgs_req_iter = extractor
.img_urls
.iter()
.map(|(url, _)| {
(
url,
surf::Client::new()
.with(surf::middleware::Redirect::default())
.get(get_absolute_url(&url, article_origin)),
)
})
.enumerate()
.map(|(img_idx, (url, req))| async move {
bar.set_message(format!(
"Downloading images [{}/{}]",
img_idx + 1,
img_count
));
match req.await {
Ok(mut img_response) => {
let process_response =
process_img_response(&mut img_response, url.as_ref()).await;
process_response.map_err(|mut e: ImgError| {
e.set_url(url);
e
})
}
Err(e) => {
let mut img_err: ImgError = e.into();
img_err.set_url(url);
Err(img_err)
}
}
});
// A utility closure used when update the value of an image source after downloading is successful
let replace_existing_img_src = |img_item: ImgItem| -> (String, Option<String>) {
let (img_url, img_path, img_mime) = img_item;
let img_ref = extractor
.node_ref()
.select_first(&format!("img[src='{}']", img_url))
.expect("Image node does not exist");
let mut img_node = img_ref.attributes.borrow_mut();
*img_node.get_mut("src").unwrap() = img_path.clone();
// srcset is removed because readers such as Foliate then fail to display
// the image already downloaded and stored in src
img_node.remove("srcset");
(img_path, img_mime)
};
let imgs_req_iter = stream::from_iter(imgs_req_iter)
.buffered(10)
.collect::<Vec<Result<_, ImgError>>>()
.await;
let mut errors = Vec::new();
let mut replaced_imgs = Vec::new();
for img_req_result in imgs_req_iter {
match img_req_result {
Ok(img_req) => replaced_imgs.push(replace_existing_img_src(img_req)),
Err(e) => errors.push(e),
}
}
extractor.img_urls = replaced_imgs;
if errors.is_empty() {
Ok(())
} else {
Err(errors)
}
}
pub struct PartialDownload {
pub link: String,
pub title: String,
}
impl PartialDownload {
pub fn new(link: &str, title: &str) -> Self {
Self {
link: link.into(),
title: title.into(),
}
}
}
/// Handles getting the extension from a given MIME subtype.
fn map_mime_subtype_to_ext(subtype: &str) -> &str {
if subtype == ("svg+xml") {
return "svg";
} else if subtype == "x-icon" {
"ico"
} else {
subtype
}
}
/// Utility for hashing URLs. This is used to help store files locally with unique values
fn hash_url(url: &str) -> String {
format!("{:x}", md5::compute(url.as_bytes()))
}
fn get_absolute_url(url: &str, request_url: &Url) -> String {
if Url::parse(url).is_ok() {
url.to_owned()
} else if url.starts_with("/") {
Url::parse(&format!(
"{}://{}",
request_url.scheme(),
request_url.host_str().unwrap()
))
.unwrap()
.join(url)
.unwrap()
.into()
} else {
request_url.join(url).unwrap().into()
}
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_map_mime_type_to_ext() {
let mime_subtypes = vec![
"apng", "bmp", "gif", "x-icon", "jpeg", "png", "svg+xml", "tiff", "webp",
];
let exts = mime_subtypes
.into_iter()
.map(|mime_type| map_mime_subtype_to_ext(mime_type))
.collect::<Vec<_>>();
assert_eq!(
vec!["apng", "bmp", "gif", "ico", "jpeg", "png", "svg", "tiff", "webp"],
exts
);
}
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/cli.rs | src/cli.rs | use std::{fs, num::NonZeroUsize, path::Path};
use chrono::{DateTime, Local};
use clap::{load_yaml, App, ArgMatches};
use flexi_logger::LevelFilter as LogLevel;
use itertools::Itertools;
type Error = crate::errors::CliError<AppConfigBuilderError>;
const DEFAULT_MAX_CONN: usize = 8;
#[derive(derive_builder::Builder, Debug)]
pub struct AppConfig {
/// Article urls
pub urls: Vec<String>,
pub max_conn: usize,
/// Path to file of multiple articles into a single article
pub merged: Option<String>,
// TODO: Change type to Path
pub output_directory: Option<String>,
pub log_level: LogLevel,
pub can_disable_progress_bar: bool,
pub start_time: DateTime<Local>,
pub is_logging_to_file: bool,
pub inline_toc: bool,
pub css_config: CSSConfig,
pub export_type: ExportType,
pub is_inlining_images: bool,
}
impl AppConfig {
pub fn init_with_cli() -> Result<AppConfig, Error> {
let yaml_config = load_yaml!("cli_config.yml");
let app = App::from_yaml(yaml_config).version(clap::crate_version!());
Self::try_from(app.get_matches())
}
fn init_merge_file(self) -> Result<Self, Error> {
self.merged
.as_deref()
.map(fs::File::create)
.transpose()
.err()
.map(|err| Err(Error::InvalidOutputPath(err.to_string())))
.unwrap_or(Ok(self))
}
fn init_logger(self) -> Result<Self, Error> {
use crate::logs;
logs::init_logger(self.log_level, &self.start_time, self.is_logging_to_file)
.map(|_| self)
.map_err(Error::LogError)
}
}
use std::convert::TryFrom;
impl<'a> TryFrom<ArgMatches<'a>> for AppConfig {
type Error = Error;
fn try_from(arg_matches: ArgMatches<'a>) -> Result<Self, Self::Error> {
AppConfigBuilder::default()
.urls({
let url_filter = |url: &str| {
let url = url.trim();
if !url.is_empty() {
Some(url.to_owned())
} else {
None
}
};
let direct_urls = arg_matches
.values_of("urls")
.and_then(|urls| urls.map(url_filter).collect::<Option<Vec<_>>>())
.unwrap_or(Vec::new());
let file_urls = arg_matches
.value_of("file")
.map(fs::read_to_string)
.transpose()?
.and_then(|content| content.lines().map(url_filter).collect::<Option<Vec<_>>>())
.unwrap_or(Vec::new());
let urls = [direct_urls, file_urls]
.concat()
.into_iter()
.unique()
.collect_vec();
if !urls.is_empty() {
Ok(urls)
} else {
Err(Error::NoUrls)
}
}?)
.max_conn(match arg_matches.value_of("max-conn") {
Some(max_conn) => max_conn.parse::<NonZeroUsize>()?.get(),
None => DEFAULT_MAX_CONN,
})
.merged(arg_matches.value_of("output-name").map(|name| {
let file_ext = format!(".{}", arg_matches.value_of("export").unwrap_or("epub"));
if name.ends_with(&file_ext) {
name.to_owned()
} else {
name.to_string() + &file_ext
}
}))
.can_disable_progress_bar(
arg_matches.is_present("verbosity") && !arg_matches.is_present("log-to-file"),
)
.log_level(match arg_matches.occurrences_of("verbosity") {
0 => {
if !arg_matches.is_present("log-to-file") {
LogLevel::Off
} else {
LogLevel::Debug
}
}
1 => LogLevel::Error,
2 => LogLevel::Warn,
3 => LogLevel::Info,
4..=u64::MAX => LogLevel::Debug,
})
.is_logging_to_file(arg_matches.is_present("log-to-file"))
.inline_toc(
(if arg_matches.is_present("inline-toc") {
if arg_matches.value_of("export") == Some("epub") {
Ok(true)
} else {
Err(Error::WrongExportInliningToC)
}
} else {
Ok(false)
})?,
)
.output_directory(
arg_matches
.value_of("output-directory")
.map(|output_directory| {
let path = Path::new(output_directory);
if !path.exists() {
// TODO: Create the directory
Err(Error::OutputDirectoryNotExists)
} else if !path.is_dir() {
Err(Error::WrongOutputDirectory)
} else {
Ok(output_directory.to_owned())
}
})
.transpose()?,
)
.start_time(Local::now())
.css_config(
match (
arg_matches.is_present("no-css"),
arg_matches.is_present("no-header-css"),
) {
(true, _) => CSSConfig::None,
(_, true) => CSSConfig::NoHeaders,
_ => CSSConfig::All,
},
)
.export_type({
let export_type = arg_matches.value_of("export").unwrap_or("epub");
if export_type == "html" {
ExportType::HTML
} else {
ExportType::EPUB
}
})
.is_inlining_images(
(if arg_matches.is_present("inline-images") {
if arg_matches.value_of("export") == Some("html") {
Ok(true)
} else {
Err(Error::WrongExportInliningImages)
}
} else {
Ok(false)
})?,
)
.try_init()
}
}
impl AppConfigBuilder {
pub fn try_init(&self) -> Result<AppConfig, Error> {
self.build()
.map_err(Error::AppBuildError)?
.init_logger()?
.init_merge_file()
}
}
#[derive(Clone, Debug)]
pub enum CSSConfig {
All,
NoHeaders,
None,
}
#[derive(Clone, Debug)]
pub enum ExportType {
HTML,
EPUB,
}
#[cfg(test)]
mod test {
use super::*;
#[test]
fn test_clap_config_errors() {
let yaml_config = load_yaml!("cli_config.yml");
let app = App::from_yaml(yaml_config);
// It returns Ok when only a url is passed
let result = app
.clone()
.get_matches_from_safe(vec!["paperoni", "http://example.org"]);
assert!(result.is_ok());
// It returns an error when no args are passed
let result = app.clone().get_matches_from_safe(vec!["paperoni"]);
assert!(result.is_err());
assert_eq!(
clap::ErrorKind::MissingArgumentOrSubcommand,
result.unwrap_err().kind
);
// It returns an error when both output-dir and merge are used
let result = app.clone().get_matches_from_safe(vec![
"paperoni",
"http://example.org",
"--merge",
"foo",
"--output-dir",
"~",
]);
assert!(result.is_err());
assert_eq!(clap::ErrorKind::ArgumentConflict, result.unwrap_err().kind);
// It returns an error when both no-css and no-header-css are used
let result = app.clone().get_matches_from_safe(vec![
"paperoni",
"http://example.org",
"--no-css",
"--no-header-css",
]);
assert!(result.is_err());
assert_eq!(clap::ErrorKind::ArgumentConflict, result.unwrap_err().kind);
// It returns an error when inline-toc is used without merge
let result = app.clone().get_matches_from_safe(vec![
"paperoni",
"http://example.org",
"--inline-toc",
]);
assert!(result.is_err());
assert_eq!(
clap::ErrorKind::MissingRequiredArgument,
result.unwrap_err().kind
);
// It returns an error when inline-images is used without export
let result = app.clone().get_matches_from_safe(vec![
"paperoni",
"http://example.org",
"--inline-images",
]);
assert!(result.is_err());
assert_eq!(
clap::ErrorKind::MissingRequiredArgument,
result.unwrap_err().kind
);
// It returns an error when export is given an invalid value
let result = app.clone().get_matches_from_safe(vec![
"paperoni",
"http://example.org",
"--export",
"pdf",
]);
assert!(result.is_err());
assert_eq!(clap::ErrorKind::InvalidValue, result.unwrap_err().kind);
// It returns an error when a max-conn is given a negative number.
let result = app.clone().get_matches_from_safe(vec![
"paperoni",
"http://example.org",
"--max-conn",
"-1",
]);
assert!(result.is_err());
// The cli is configured not to accept negative numbers so passing "-1" would have it be read as a flag called 1
assert_eq!(clap::ErrorKind::UnknownArgument, result.unwrap_err().kind);
}
#[test]
fn test_init_with_cli() {
let yaml_config = load_yaml!("cli_config.yml");
let app = App::from_yaml(yaml_config);
// It returns an error when the urls passed are whitespace
let matches = app.clone().get_matches_from(vec!["paperoni", ""]);
let app_config = AppConfig::try_from(matches);
assert!(app_config.is_err());
assert_eq!(Error::NoUrls, app_config.unwrap_err());
// It returns an error when inline-toc is used when exporting to HTML
let matches = app.clone().get_matches_from(vec![
"paperoni",
"http://example.org",
"--merge",
"foo",
"--export",
"html",
"--inline-toc",
]);
let app_config = AppConfig::try_from(matches);
assert!(app_config.is_err());
assert_eq!(Error::WrongExportInliningToC, app_config.unwrap_err());
// It returns an Ok when inline-toc is used when exporting to epub
let matches = app.clone().get_matches_from(vec![
"paperoni",
"http://example.org",
"--merge",
"foo",
"--export",
"epub",
"--inline-toc",
]);
assert!(AppConfig::try_from(matches).is_ok());
// It returns an error when inline-images is used when exporting to epub
}
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/main.rs | src/main.rs | #[macro_use]
extern crate lazy_static;
use std::process::exit;
use colored::Colorize;
use comfy_table::presets::{UTF8_FULL, UTF8_HORIZONTAL_BORDERS_ONLY};
use comfy_table::{ContentArrangement, Table};
use http::download;
use indicatif::{ProgressBar, ProgressStyle};
mod cli;
mod epub;
mod errors;
mod extractor;
mod html;
/// This module is responsible for async HTTP calls for downloading
/// the HTML content and images
mod http;
mod logs;
mod moz_readability;
use cli::AppConfig;
use epub::generate_epubs;
use html::generate_html_exports;
use logs::display_summary;
fn main() {
let app_config = match cli::AppConfig::init_with_cli() {
Ok(app_config) => app_config,
Err(err) => {
eprintln!("{}: {}", "ERROR".bold().bright_red(), err);
exit(1);
}
};
if !app_config.urls.is_empty() {
run(app_config);
}
}
fn run(app_config: AppConfig) {
let mut errors = Vec::new();
let mut partial_downloads = Vec::new();
if let Some(dir_name) = &app_config.output_directory {
let noun = if app_config.urls.len() > 1 {
"articles"
} else {
"article"
};
println!("Downloading {} to {}", noun, dir_name);
}
let bar = if app_config.can_disable_progress_bar {
ProgressBar::hidden()
} else {
let enabled_bar = ProgressBar::new(app_config.urls.len() as u64);
let style = ProgressStyle::default_bar().template(
"{spinner:.cyan} [{elapsed_precise}] {bar:40.white} {:>8} link {pos}/{len:7} {msg:.yellow/white}",
);
enabled_bar.set_style(style);
enabled_bar.enable_steady_tick(500);
enabled_bar
};
let articles = download(&app_config, &bar, &mut partial_downloads, &mut errors);
bar.finish_with_message("Downloaded articles");
let mut successful_articles_table = Table::new();
successful_articles_table
.load_preset(UTF8_FULL)
.load_preset(UTF8_HORIZONTAL_BORDERS_ONLY)
.set_content_arrangement(ContentArrangement::Dynamic);
match app_config.export_type {
cli::ExportType::EPUB => {
match generate_epubs(articles, &app_config, &mut successful_articles_table) {
Ok(_) => (),
Err(gen_epub_errors) => {
errors.extend(gen_epub_errors);
}
};
}
cli::ExportType::HTML => {
match generate_html_exports(articles, &app_config, &mut successful_articles_table) {
Ok(_) => (),
Err(gen_html_errors) => errors.extend(gen_html_errors),
}
}
}
let has_errors = !errors.is_empty() || !partial_downloads.is_empty();
display_summary(
app_config.urls.len(),
successful_articles_table,
partial_downloads,
errors,
);
if app_config.is_logging_to_file {
println!(
"Log written to paperoni_{}.log\n",
app_config.start_time.format("%Y-%m-%d_%H-%M-%S")
);
} else if has_errors && !app_config.is_logging_to_file {
println!("\nRun paperoni with the --log-to-file flag to create a log file");
}
if has_errors {
std::process::exit(1);
}
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/logs.rs | src/logs.rs | use std::fs;
use chrono::{DateTime, Local};
use colored::*;
use comfy_table::presets::UTF8_HORIZONTAL_BORDERS_ONLY;
use comfy_table::{Cell, CellAlignment, ContentArrangement, Table};
use flexi_logger::{FileSpec, LevelFilter};
use log::error;
use crate::errors::PaperoniError;
pub fn display_summary(
initial_article_count: usize,
successful_articles_table: Table,
partial_downloads: Vec<PartialDownload>,
errors: Vec<PaperoniError>,
) {
let partial_downloads_count = partial_downloads.len();
let successfully_downloaded_count =
initial_article_count - partial_downloads_count - errors.len();
println!(
"{}",
short_summary(DownloadCount::new(
initial_article_count,
successfully_downloaded_count,
partial_downloads_count,
errors.len()
))
.bold()
);
if successfully_downloaded_count > 0 {
println!("{}", successful_articles_table);
}
if partial_downloads_count > 0 {
println!("\n{}", "Partially failed downloads".yellow().bold());
let mut table_partial = Table::new();
table_partial
.load_preset(UTF8_HORIZONTAL_BORDERS_ONLY)
.set_header(vec![
Cell::new("Link").set_alignment(CellAlignment::Center),
Cell::new("Title").set_alignment(CellAlignment::Center),
])
.set_content_arrangement(ContentArrangement::Dynamic);
for partial in partial_downloads {
table_partial.add_row(vec![&partial.link, &partial.title]);
}
println!("{}", table_partial);
}
if !errors.is_empty() {
println!("\n{}", "Failed article downloads".bright_red().bold());
let mut table_failed = Table::new();
table_failed
.load_preset(UTF8_HORIZONTAL_BORDERS_ONLY)
.set_header(vec![
Cell::new("Link").set_alignment(CellAlignment::Center),
Cell::new("Reason").set_alignment(CellAlignment::Center),
])
.set_content_arrangement(ContentArrangement::Dynamic);
for error in errors {
let error_source = error
.article_source()
.clone()
.unwrap_or_else(|| "<unknown link>".to_string());
table_failed.add_row(vec![&error_source, &format!("{}", error.kind())]);
error!("{}\n - {}", error, error_source);
}
println!("{}", table_failed);
}
}
/// Returns a string summary of the total number of failed and successful article downloads
fn short_summary(download_count: DownloadCount) -> String {
if download_count.total
!= download_count.successful + download_count.failed + download_count.partial
{
panic!("initial_count must be equal to the sum of failed and successful count")
}
let get_noun = |count: usize| if count == 1 { "article" } else { "articles" };
let get_summary = |count, label, color: Color| {
if count == 0 {
return "".to_string();
};
{
if count == 1 && count == download_count.total {
"Article".to_string() + label
} else if count == download_count.total {
"All ".to_string() + get_noun(count) + label
} else {
count.to_string() + " " + get_noun(count) + label
}
}
.color(color)
.to_string()
};
let mut summary = get_summary(
download_count.successful,
" downloaded successfully",
Color::BrightGreen,
);
let partial_summary = get_summary(
download_count.partial,
" partially failed to download",
Color::Yellow,
);
if !summary.is_empty() && !partial_summary.is_empty() {
summary = summary + ", " + &partial_summary;
} else {
summary = summary + &partial_summary;
}
let failed_summary = get_summary(download_count.failed, " failed to download", Color::Red);
if !summary.is_empty() && !failed_summary.is_empty() {
summary = summary + ", " + &failed_summary;
} else {
summary = summary + &failed_summary;
}
summary
}
struct DownloadCount {
total: usize,
successful: usize,
partial: usize,
failed: usize,
}
impl DownloadCount {
fn new(total: usize, successful: usize, partial: usize, failed: usize) -> Self {
Self {
total,
successful,
partial,
failed,
}
}
}
use crate::errors::LogError as Error;
use crate::http::PartialDownload;
pub fn init_logger(
log_level: LevelFilter,
start_time: &DateTime<Local>,
is_logging_to_file: bool,
) -> Result<(), Error> {
use directories::UserDirs;
use flexi_logger::LogSpecBuilder;
match UserDirs::new() {
Some(user_dirs) => {
let home_dir = user_dirs.home_dir();
let paperoni_dir = home_dir.join(".paperoni");
let log_dir = paperoni_dir.join("logs");
let log_spec = LogSpecBuilder::new().module("paperoni", log_level).build();
let formatted_timestamp = start_time.format("%Y-%m-%d_%H-%M-%S");
let mut logger = flexi_logger::Logger::with(log_spec);
if is_logging_to_file {
if !paperoni_dir.is_dir() || !log_dir.is_dir() {
fs::create_dir_all(&log_dir)?;
}
logger = logger.log_to_file(
FileSpec::default()
.directory(log_dir)
.discriminant(formatted_timestamp.to_string())
.suppress_timestamp(),
);
}
logger.start()?;
Ok(())
}
None => Err(Error::UserDirectoriesError),
}
}
#[cfg(test)]
mod tests {
use super::{short_summary, DownloadCount};
use colored::*;
#[test]
fn test_short_summary() {
assert_eq!(
short_summary(DownloadCount::new(1, 1, 0, 0)),
"Article downloaded successfully".bright_green().to_string()
);
assert_eq!(
short_summary(DownloadCount::new(1, 0, 0, 1)),
"Article failed to download".red().to_string()
);
assert_eq!(
short_summary(DownloadCount::new(10, 10, 0, 0)),
"All articles downloaded successfully"
.bright_green()
.to_string()
);
assert_eq!(
short_summary(DownloadCount::new(10, 0, 0, 10)),
"All articles failed to download".red().to_string()
);
assert_eq!(
short_summary(DownloadCount::new(10, 8, 0, 2)),
format!(
"{}, {}",
"8 articles downloaded successfully".bright_green(),
"2 articles failed to download".red()
)
);
assert_eq!(
short_summary(DownloadCount::new(10, 1, 0, 9)),
format!(
"{}, {}",
"1 article downloaded successfully".bright_green(),
"9 articles failed to download".red()
)
);
assert_eq!(
short_summary(DownloadCount::new(7, 6, 0, 1)),
format!(
"{}, {}",
"6 articles downloaded successfully".bright_green(),
"1 article failed to download".red()
)
);
assert_eq!(
short_summary(DownloadCount::new(7, 4, 2, 1)),
format!(
"{}, {}, {}",
"4 articles downloaded successfully".bright_green(),
"2 articles partially failed to download".yellow(),
"1 article failed to download".red()
)
);
assert_eq!(
short_summary(DownloadCount::new(12, 6, 6, 0)),
format!(
"{}, {}",
"6 articles downloaded successfully".bright_green(),
"6 articles partially failed to download".yellow()
)
);
assert_eq!(
short_summary(DownloadCount::new(5, 0, 4, 1)),
format!(
"{}, {}",
"4 articles partially failed to download".yellow(),
"1 article failed to download".red()
)
);
assert_eq!(
short_summary(DownloadCount::new(4, 0, 4, 0)),
"All articles partially failed to download"
.yellow()
.to_string()
);
}
#[test]
#[should_panic(
expected = "initial_count must be equal to the sum of failed and successful count"
)]
fn test_short_summary_panics_on_invalid_input() {
short_summary(DownloadCount::new(0, 12, 0, 43));
}
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/moz_readability/regexes.rs | src/moz_readability/regexes.rs | /// This module contains regular expressions frequently used by moz_readability
/// All regexes that only test if a `&str` matches the regex are preceded by the
/// word "is_match". All other regexes are publicly accessible.
use regex::Regex;
pub fn is_match_byline(match_str: &str) -> bool {
lazy_static! {
static ref BYLINE_REGEX: Regex =
Regex::new(r"(?i)byline|author|dateline|writtenby|p-author").unwrap();
}
BYLINE_REGEX.is_match(match_str)
}
pub fn is_match_positive(match_str: &str) -> bool {
lazy_static! {
static ref POSITIVE_REGEX: Regex = Regex::new(r"(?i)article|body|content|entry|hentry|h-entry|main|page|pagination|post|text|blog|story").unwrap();
}
POSITIVE_REGEX.is_match(match_str)
}
pub fn is_match_negative(match_str: &str) -> bool {
lazy_static! {
static ref NEGATIVE_REGEX: Regex = Regex::new(r"(?i)hidden|^hid$| hid$| hid |^hid |banner|combx|comment|com-|contact|foot|footer|footnote|gdpr|masthead|media|meta|outbrain|promo|related|scroll|share|shoutbox|sidebar|skyscraper|sponsor|shopping|tags|tool|widget").unwrap();
}
NEGATIVE_REGEX.is_match(match_str)
}
pub fn is_match_videos(match_str: &str) -> bool {
lazy_static! {
static ref VIDEOS_REGEX: Regex = Regex::new(r"(?i)//(www\.)?((dailymotion|youtube|youtube-nocookie|player\.vimeo|v\.qq)\.com|(archive|upload\.wikimedia)\.org|player\.twitch\.tv)").unwrap();
}
VIDEOS_REGEX.is_match(match_str)
}
pub fn is_match_unlikely(match_str: &str) -> bool {
lazy_static! {
static ref UNLIKELY_REGEX: Regex = Regex::new(r"(?i)-ad-|ai2html|banner|breadcrumbs|combx|comment|community|cover-wrap|disqus|extra|footer|gdpr|header|legends|menu|related|remark|replies|rss|shoutbox|sidebar|skyscraper|social|sponsor|supplemental|ad-break|agegate|pagination|pager|popup|yom-remote").unwrap();
}
UNLIKELY_REGEX.is_match(match_str)
}
pub fn is_match_ok_maybe(match_str: &str) -> bool {
lazy_static! {
static ref OK_MAYBE_REGEX: Regex =
Regex::new(r"(?i)and|article|body|column|content|main|shadow").unwrap();
}
OK_MAYBE_REGEX.is_match(match_str)
}
pub fn is_match_node_content(match_str: &str) -> bool {
lazy_static! {
static ref NODE_CONTENT_REGEX: Regex = Regex::new(r"\.( |$)").unwrap();
}
NODE_CONTENT_REGEX.is_match(match_str)
}
pub fn is_match_share_elems(match_str: &str) -> bool {
lazy_static! {
static ref SHARE_ELEMS_REGEX: Regex =
Regex::new(r"(?i)(\b|_)(share|sharedaddy)(\b|_)").unwrap();
}
SHARE_ELEMS_REGEX.is_match(match_str)
}
pub fn is_match_has_content(match_str: &str) -> bool {
lazy_static! {
static ref HAS_CONTENT_REGEX: Regex = Regex::new(r"\S$").unwrap();
}
HAS_CONTENT_REGEX.is_match(match_str)
}
pub fn is_match_img_ext(match_str: &str) -> bool {
lazy_static! {
static ref IMG_EXT_REGEX: Regex = Regex::new(r"(?i)\.(jpg|jpeg|png|webp)").unwrap();
}
IMG_EXT_REGEX.is_match(match_str)
}
pub fn is_match_srcset(match_str: &str) -> bool {
lazy_static! {
static ref SRCSET_REGEX: Regex = Regex::new(r"\.(jpg|jpeg|png|webp)\s+\d").unwrap();
}
SRCSET_REGEX.is_match(match_str)
}
pub fn is_match_src_regex(match_str: &str) -> bool {
lazy_static! {
static ref SRC_REGEX: Regex = Regex::new(r"^\s*\S+\.(jpg|jpeg|png|webp)\S*\s*$").unwrap();
}
SRC_REGEX.is_match(match_str)
}
pub fn is_match_name_pattern(match_str: &str) -> bool {
lazy_static! {
static ref NAME_PATTERN_REGEX: Regex = Regex::new(r"(?i)\s*(?:(dc|dcterm|og|twitter|weibo:(article|webpage))\s*[\.:]\s*)?(author|creator|description|title|site_name)\s*$").unwrap();
}
NAME_PATTERN_REGEX.is_match(match_str)
}
pub fn is_match_title_separator(match_str: &str) -> bool {
lazy_static! {
static ref TITLE_SEPARATOR_REGEX: Regex = Regex::new(r" [\|\-\\/>»] ").unwrap();
}
TITLE_SEPARATOR_REGEX.is_match(match_str)
}
pub fn is_match_has_title_separator(match_str: &str) -> bool {
lazy_static! {
static ref HAS_TITLE_SEPARATOR_REGEX: Regex = Regex::new(r" [\\/>»] ").unwrap();
}
HAS_TITLE_SEPARATOR_REGEX.is_match(match_str)
}
lazy_static! {
pub static ref NORMALIZE_REGEX: Regex = Regex::new(r"\s{2,}").unwrap();
pub static ref B64_DATA_URL_REGEX: Regex =
Regex::new(r"(?i)^data:\s*([^\s;,]+)\s*;\s*base64\s*").unwrap();
pub static ref BASE64_REGEX: Regex = Regex::new(r"(?i)base64\s*").unwrap();
pub static ref PROPERTY_REGEX: Regex = Regex::new(
r"(?i)\s*(dc|dcterm|og|twitter)\s*:\s*(author|creator|description|title|site_name)\s*"
)
.unwrap();
pub static ref SRCSET_CAPTURE_REGEX: Regex =
Regex::new(r"(\S+)(\s+[\d.]+[xw])?(\s*(?:,|$))").unwrap();
pub static ref REPLACE_WHITESPACE_REGEX: Regex = Regex::new(r"\s").unwrap();
pub static ref REPLACE_DOT_REGEX: Regex = Regex::new(r"\.").unwrap();
pub static ref REPLACE_HTML_ESCAPE_REGEX: Regex =
Regex::new("&(quot|amp|apos|lt|gt);").unwrap();
pub static ref REPLACE_HEX_REGEX: Regex =
Regex::new(r"(?i)&#(?:x([0-9a-z]{1,4})|([0-9]{1,4}));").unwrap();
pub static ref REPLACE_START_SEPARATOR_REGEX: Regex =
Regex::new(r"(?i)(?P<start>.*)[\|\-\\/>»] .*").unwrap();
pub static ref REPLACE_END_SEPARATOR_REGEX: Regex =
Regex::new(r"(?i)[^\|\-\\/>»]*[\|\-\\/>»](?P<end>.*)").unwrap();
pub static ref REPLACE_MULTI_SEPARATOR_REGEX: Regex = Regex::new(r"[\|\-\\/>»]+").unwrap();
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | false |
hipstermojo/paperoni | https://github.com/hipstermojo/paperoni/blob/796a34a34c365bc06191bf634918b71eaee7bd5d/src/moz_readability/mod.rs | src/moz_readability/mod.rs | use std::collections::{BTreeMap, HashMap, HashSet};
use std::str::FromStr;
use html5ever::{LocalName, Namespace, QualName};
use kuchiki::{
iter::{Descendants, Elements, Select},
traits::*,
NodeData, NodeRef,
};
use log::info;
use url::Url;
use crate::errors::{ErrorKind, PaperoniError};
const DEFAULT_CHAR_THRESHOLD: usize = 500;
const FLAG_STRIP_UNLIKELYS: u32 = 0x1;
const FLAG_WEIGHT_CLASSES: u32 = 0x2;
const FLAG_CLEAN_CONDITIONALLY: u32 = 0x4;
const READABILITY_SCORE: &'static str = "readability-score";
const HTML_NS: &'static str = "http://www.w3.org/1999/xhtml";
// TODO: Change to HashSet
const PHRASING_ELEMS: [&str; 39] = [
"abbr", "audio", "b", "bdo", "br", "button", "cite", "code", "data", "datalist", "dfn", "em",
"embed", "i", "img", "input", "kbd", "label", "mark", "math", "meter", "noscript", "object",
"output", "progress", "q", "ruby", "samp", "script", "select", "small", "span", "strong",
"sub", "sup", "textarea", "time", "var", "wbr",
];
// TODO: Change to HashSet
const DEFAULT_TAGS_TO_SCORE: [&str; 9] =
["section", "h2", "h3", "h4", "h5", "h6", "p", "td", "pre"];
// TODO: Change to HashSet
const ALTER_TO_DIV_EXCEPTIONS: [&str; 4] = ["div", "article", "section", "p"];
const PRESENTATIONAL_ATTRIBUTES: [&str; 12] = [
"align",
"background",
"bgcolor",
"border",
"cellpadding",
"cellspacing",
"frame",
"hspace",
"rules",
"style",
"valign",
"vspace",
];
const DATA_TABLE_DESCENDANTS: [&str; 5] = ["col", "colgroup", "tfoot", "thead", "th"];
// TODO: Change to HashSet
const DEPRECATED_SIZE_ATTRIBUTE_ELEMS: [&str; 5] = ["table", "th", "td", "hr", "pre"];
pub mod regexes;
pub struct Readability {
root_node: NodeRef,
byline: Option<String>,
article_title: String,
pub article_node: Option<NodeRef>,
article_dir: Option<String>,
flags: u32,
pub metadata: MetaData,
}
#[derive(Debug, PartialEq)]
struct SizeInfo {
rows: usize,
columns: usize,
}
impl Readability {
pub fn new(html_str: &str) -> Self {
Self {
root_node: kuchiki::parse_html().one(html_str),
byline: None,
article_title: "".into(),
article_node: None,
article_dir: None,
flags: FLAG_STRIP_UNLIKELYS | FLAG_WEIGHT_CLASSES | FLAG_CLEAN_CONDITIONALLY,
metadata: MetaData::new(),
}
}
pub fn parse(&mut self, url: &str) -> Result<(), PaperoniError> {
self.unwrap_no_script_tags();
self.remove_scripts();
self.prep_document();
self.metadata = self.get_article_metadata();
self.article_title = self.metadata.title.clone();
self.grab_article()?;
self.post_process_content(url);
Ok(())
}
/// Recursively check if node is image, or if node contains exactly only one image
/// whether as a direct child or as its descendants.
fn is_single_image(node_ref: &NodeRef) -> bool {
if let Some(element) = node_ref.as_element() {
if &element.name.local == "img" {
return true;
}
}
if node_ref.children().filter(Self::has_content).count() != 1
|| !node_ref.text_contents().trim().is_empty()
{
return false;
}
return Readability::is_single_image(
&node_ref
.children()
.filter(Self::has_content)
.next()
.expect("Unable to get first child which should exist"),
);
}
fn has_content(node_ref: &NodeRef) -> bool {
match node_ref.data() {
NodeData::Text(text) => !text.borrow().trim().is_empty(),
_ => true,
}
}
/// Find all <noscript> that are located after <img> nodes, and which contain only one <img> element.
/// Replace the first image with the image from inside the <noscript> tag, and remove the <noscript> tag.
/// This improves the quality of the images we use on some sites (e.g. Medium).
fn unwrap_no_script_tags(&mut self) {
if let Ok(imgs) = self.root_node.select("img") {
let mut nodes = imgs.filter(|img_node_ref| {
let img_attrs = img_node_ref.attributes.borrow();
!img_attrs.map.iter().any(|(name, attr)| {
&name.local == "src"
|| &name.local == "srcset"
|| &name.local == "data-src"
|| &name.local == "data-srcset"
|| regexes::is_match_img_ext(&attr.value)
})
});
let mut node_ref = nodes.next();
while let Some(img_ref) = node_ref {
node_ref = nodes.next();
img_ref.as_node().detach();
}
}
if let Ok(noscripts) = self.root_node.select("noscript") {
for noscript in noscripts {
let inner_node_ref = kuchiki::parse_fragment(
QualName::new(None, Namespace::from(HTML_NS), LocalName::from("div")),
Vec::new(),
)
.one(noscript.text_contents());
if !Self::is_single_image(&inner_node_ref) {
continue;
}
if let Some(mut prev_elem) = noscript.as_node().previous_sibling() {
// TODO: Fix this to have a better way of extracting nodes that are elements
while prev_elem.as_element().is_none() {
match prev_elem.previous_sibling() {
Some(new_prev) => prev_elem = new_prev,
None => break,
};
}
if Self::is_single_image(&prev_elem) && prev_elem.as_element().is_some() {
let prev_img = if &prev_elem.as_element().unwrap().name.local != "img" {
prev_elem.select_first("img").unwrap().as_node().clone()
} else {
prev_elem.clone()
};
let new_img = inner_node_ref.select_first("img").unwrap();
let prev_attrs = prev_img.as_element().unwrap().attributes.borrow();
let prev_attrs = prev_attrs.map.iter().filter(|(attr, val)| {
!val.value.trim().is_empty()
&& (&attr.local == "src"
|| &attr.local == "srcset"
|| regexes::is_match_img_ext(&val.value))
});
for (prev_attr, prev_value) in prev_attrs {
match new_img.attributes.borrow().get(&prev_attr.local) {
Some(value) => {
if value == prev_value.value {
continue;
}
}
None => (),
}
let attr_name: &str = &prev_attr.local;
let mut attr_name = attr_name.to_owned();
if new_img.attributes.borrow().contains(attr_name.clone()) {
let new_name = format!("data-old-{}", &attr_name);
attr_name = new_name;
}
new_img
.attributes
.borrow_mut()
.insert(attr_name, prev_value.value.clone());
}
prev_elem.insert_after(new_img.as_node().clone());
prev_elem.detach();
}
}
}
}
}
/// Removes script tags from the document.
fn remove_scripts(&mut self) {
match self.root_node.select("script") {
Ok(mut script_elems) => {
let mut next_script = script_elems.next();
while let Some(next_script_ref) = next_script {
next_script = script_elems.next();
next_script_ref.as_node().detach();
}
}
Err(_) => (),
}
match self.root_node.select("noscript") {
Ok(mut noscript_elems) => {
let mut next_noscript = noscript_elems.next();
while let Some(noscript_ref) = next_noscript {
next_noscript = noscript_elems.next();
noscript_ref.as_node().detach();
}
}
Err(_) => (),
}
}
/// Prepare the HTML document for readability to scrape it. This includes things like stripping
/// CSS, and handling terrible markup.
fn prep_document(&mut self) {
match self.root_node.select("style") {
Ok(mut style_elems) => {
let mut style_elem = style_elems.next();
while let Some(style_ref) = style_elem {
style_elem = style_elems.next();
style_ref.as_node().detach();
}
}
Err(_) => (),
}
self.replace_brs();
match self.root_node.select("font") {
Ok(nodes_iter) => Self::replace_node_tags(nodes_iter, "span"),
Err(_) => (),
}
}
/// Replaces 2 or more successive <br> elements with a single <p>.
/// Whitespace between <br> elements are ignored. For example:
/// <div>foo<br>bar<br> <br><br>abc</div>
/// will become:
/// <div>foo<br>bar<p>abc</p></div>
fn replace_brs(&mut self) {
if let Ok(mut br_tags) = self.root_node.select("br") {
// The uses of `next_element` here are safe as it explicitly ensures the next element is an element node
while let Some(br_tag) = br_tags.next() {
let mut next = Self::next_element(br_tag.as_node().next_sibling(), false);
let mut replaced = false;
while let Some(next_elem) = next {
if next_elem.as_element().is_some()
&& &next_elem.as_element().as_ref().unwrap().name.local == "br"
{
replaced = true;
let br_sibling = next_elem.next_sibling();
next = Self::next_element(br_sibling, false);
next_elem.detach();
} else {
break;
}
}
if replaced {
let p = NodeRef::new_element(
QualName::new(None, Namespace::from(HTML_NS), LocalName::from("p")),
BTreeMap::new(),
);
br_tag.as_node().insert_before(p);
let p = br_tag.as_node().previous_sibling().unwrap();
br_tag.as_node().detach();
next = p.next_sibling();
while next.is_some() {
let next_sibling = next.unwrap();
if let Some(next_elem) = next_sibling.as_element() {
if &next_elem.name.local == "br" {
if let Some(second_sibling) = next_sibling.next_sibling() {
if second_sibling.as_element().is_some()
&& "br" == &second_sibling.as_element().unwrap().name.local
{
break;
}
}
}
}
if !Self::is_phrasing_content(&next_sibling) {
break;
}
let sibling = next_sibling.next_sibling();
p.append(next_sibling);
next = sibling;
}
while let Some(first_child) = p.first_child() {
if Self::is_whitespace(&first_child) {
first_child.detach();
} else {
break;
}
}
while let Some(last_child) = p.last_child() {
if Self::is_whitespace(&last_child) {
last_child.detach();
} else {
break;
}
}
if let Some(parent) = p.parent() {
if &parent.as_element().as_ref().unwrap().name.local == "p" {
Self::set_node_tag(&parent, "div");
}
}
}
}
}
}
/// Iterates over a Select, and calls set_node_tag for each node.
fn replace_node_tags(nodes: Select<Elements<Descendants>>, name: &str) {
for node in nodes {
Self::set_node_tag(node.as_node(), name);
}
}
/// Replaces the specified NodeRef by replacing its name. This works by copying over its
/// children and its attributes.
fn set_node_tag(node_ref: &NodeRef, name: &str) -> NodeRef {
match node_ref.as_element() {
Some(elem) => {
let attributes = elem.attributes.borrow().clone().map.into_iter();
let replacement = NodeRef::new_element(
QualName::new(None, Namespace::from(HTML_NS), LocalName::from(name)),
attributes,
);
for child in node_ref.children() {
replacement.append(child);
}
node_ref.insert_before(replacement);
let new_node = node_ref.previous_sibling().unwrap();
node_ref.detach();
return new_node;
}
None => (),
}
node_ref.clone()
}
fn is_whitespace(node_ref: &NodeRef) -> bool {
match node_ref.data() {
NodeData::Element(elem_data) => &elem_data.name.local == "br",
NodeData::Text(text_ref) => text_ref.borrow().trim().len() == 0,
_ => false,
}
}
/// Finds the next element, starting from the given node, and ignoring
/// whitespace in between. If the given node is an element, the same node is
/// returned.
/// The must_be_element argument ensure the next element is actually an element node.
/// This is likely to factored out into a new function.
fn next_element(node_ref: Option<NodeRef>, must_be_element: bool) -> Option<NodeRef> {
// TODO: Could probably be refactored to use the elements method
let mut node_ref = node_ref;
while node_ref.is_some() {
match node_ref.as_ref().unwrap().data() {
NodeData::Element(_) => break,
_ => {
if node_ref.as_ref().unwrap().text_contents().trim().is_empty() {
node_ref = node_ref.as_ref().unwrap().next_sibling();
} else if must_be_element
&& !node_ref.as_ref().unwrap().text_contents().trim().is_empty()
{
node_ref = node_ref.as_ref().unwrap().next_sibling();
} else {
break;
}
}
}
}
node_ref
}
/// Determine if a node qualifies as phrasing content.
/// https://developer.mozilla.org/en-US/docs/Web/Guide/HTML/Content_categories#Phrasing_content
fn is_phrasing_content(node_ref: &NodeRef) -> bool {
node_ref.as_text().is_some()
|| match node_ref.as_element() {
Some(elem) => {
let name: &str = &elem.name.local;
PHRASING_ELEMS.contains(&name)
|| ((name == "a" || name == "del" || name == "ins")
&& node_ref
.children()
.all(|child_ref| Self::is_phrasing_content(&child_ref)))
}
None => false,
}
}
///Attempts to get excerpt and byline metadata for the article. @return Object with optional "excerpt" and "byline" properties
fn get_article_metadata(&self) -> MetaData {
let mut values: HashMap<String, String> = HashMap::new();
let mut meta_data = MetaData::new();
if let Ok(meta_elems) = self.root_node.select("meta") {
meta_elems
.filter(|node_ref| {
let node_attr = node_ref.attributes.borrow();
node_attr.get("content").is_some()
})
.for_each(|node_ref| {
let node_attr = node_ref.attributes.borrow();
let content = node_attr.get("content").unwrap();
let name_attr = node_attr.get("name");
let mut matches = None;
if let Some(property) = node_attr.get("property") {
matches = regexes::PROPERTY_REGEX.captures(property);
if let Some(captures) = &matches {
for capture in captures.iter() {
let mut name = capture.unwrap().as_str().to_lowercase();
name = regexes::REPLACE_WHITESPACE_REGEX
.replace_all(&name, "")
.to_string();
values.insert(name, content.trim().to_string());
}
}
}
if matches.is_none() && name_attr.is_some() {
let name_val = name_attr.unwrap();
if regexes::is_match_name_pattern(name_val) {
let name = name_val.to_lowercase();
let name = regexes::REPLACE_WHITESPACE_REGEX.replace_all(&name, "");
let name = regexes::REPLACE_DOT_REGEX.replace_all(&name, ":");
values.insert(name.to_string(), content.trim().to_string());
}
}
});
}
let meta_title_keys = [
"dc:title",
"dcterm:title",
"og:title",
"weibo:article:title",
"weibo:webpage:title",
"title",
"twitter:title",
];
meta_data.title = if let Some(key) = meta_title_keys
.iter()
.find(|key| values.contains_key(**key))
{
let title = values.get(*key).map(|title| title.to_owned()).unwrap();
if title.is_empty() {
self.get_article_title()
} else {
title
}
} else {
self.get_article_title()
};
let meta_byline_keys = ["dc:creator", "dcterm:creator", "author"];
meta_data.byline = {
let possible_key = meta_byline_keys
.iter()
.find(|key| values.contains_key(**key));
if let Some(actual_key) = possible_key {
values.get(*actual_key).map(|byline| byline.to_owned())
} else {
None
}
};
let meta_excerpt_keys = [
"dc:description",
"dcterm:description",
"og:description",
"weibo:article:description",
"weibo:webpage:description",
"description",
"twitter:description",
];
meta_data.excerpt = {
let possible_key = meta_excerpt_keys
.iter()
.find(|key| values.contains_key(**key));
if let Some(actual_key) = possible_key {
values.get(*actual_key).map(|excerpt| excerpt.to_owned())
} else {
None
}
};
meta_data.site_name = values
.get("og:site_name")
.map(|site_name| site_name.to_owned());
Self::unescape_html_entities(&mut meta_data.title);
if meta_data.byline.is_some() {
Self::unescape_html_entities(&mut meta_data.byline.as_mut().unwrap());
}
if meta_data.excerpt.is_some() {
Self::unescape_html_entities(&mut meta_data.excerpt.as_mut().unwrap());
}
if meta_data.site_name.is_some() {
Self::unescape_html_entities(&mut meta_data.site_name.as_mut().unwrap());
}
meta_data
}
/// Converts some of the common HTML entities in string to their corresponding characters.
fn unescape_html_entities(value: &mut String) {
if !value.is_empty() {
// TODO: Extract this
let mut html_escape_map: HashMap<&str, &str> = HashMap::new();
html_escape_map.insert("lt", "<");
html_escape_map.insert("gt", ">");
html_escape_map.insert("amp", "&");
html_escape_map.insert("quot", "\"");
html_escape_map.insert("apos", "'");
let mut new_value = regexes::REPLACE_HTML_ESCAPE_REGEX
.replace_all(&value, |captures: ®ex::Captures| {
html_escape_map[&captures[1]].to_string()
})
.to_string();
new_value = regexes::REPLACE_HEX_REGEX
.replace_all(&new_value, |captures: ®ex::Captures| {
let num = if let Some(hex_capture) = captures.get(1) {
u16::from_str_radix(hex_capture.as_str(), 16)
} else if let Some(dec_capture) = captures.get(2) {
u16::from_str(dec_capture.as_str())
} else {
unreachable!("Unable to match any of the captures");
};
String::from_utf16_lossy(&[num.unwrap()])
})
.to_string();
*value = new_value;
}
}
/// Get the article title as an H1.
fn get_article_title(&self) -> String {
let mut cur_title = self
.root_node
.select_first("title")
.map(|title| title.text_contents().trim().to_string())
.unwrap_or("".to_string());
let orig_title = cur_title.clone();
let mut title_had_hierarchical_separators = false;
let word_count = |s: &str| -> usize { s.split_whitespace().count() };
if regexes::is_match_title_separator(&cur_title) {
title_had_hierarchical_separators = regexes::is_match_has_title_separator(&cur_title);
cur_title = regexes::REPLACE_START_SEPARATOR_REGEX
.replace_all(&orig_title, "$start")
.to_string();
if word_count(&cur_title) < 3 {
cur_title = regexes::REPLACE_END_SEPARATOR_REGEX
.replace_all(&orig_title, "$end")
.to_string();
}
} else if cur_title.contains(": ") {
let trimmed_title = cur_title.trim();
let is_match_heading = self
.root_node
.select("h1, h2")
.unwrap()
.any(|heading| heading.text_contents().trim() == trimmed_title);
if !is_match_heading {
let mut idx = orig_title.rfind(":").unwrap() + 1;
let mut new_title = &orig_title[idx..];
if word_count(new_title) < 3 {
idx = orig_title.find(":").unwrap() + 1;
new_title = &orig_title[idx..];
} else if word_count(&orig_title[0..orig_title.find(":").unwrap()]) > 5 {
new_title = &orig_title;
}
cur_title = new_title.to_string();
}
} else if cur_title.len() > 150 || cur_title.len() < 15 {
let mut h1_nodes = self.root_node.select("h1").unwrap();
let h1_count = self.root_node.select("h1").unwrap().count();
if h1_count == 1 {
cur_title = Self::get_inner_text(h1_nodes.next().unwrap().as_node(), None);
}
}
cur_title = regexes::NORMALIZE_REGEX
.replace_all(cur_title.trim(), " ")
.to_string();
let cur_word_count = word_count(&cur_title);
if cur_word_count <= 4
&& (!title_had_hierarchical_separators
|| cur_word_count
!= word_count(
®exes::REPLACE_MULTI_SEPARATOR_REGEX.replace_all(&orig_title, ""),
) - 1)
{
cur_title = orig_title;
}
cur_title
}
/// Removes the class="" attribute from every element in the given subtree, except those that
/// match CLASSES_TO_PRESERVE and the classesToPreserve array from the options object.
fn clean_classes(&mut self) {
// TODO: This should accessed from Self
let classes_to_preserve: HashSet<&str> = HashSet::new();
if let Some(article_node) = &mut self.article_node {
for elem in article_node.inclusive_descendants().elements() {
let mut elem_attrs = elem.attributes.borrow_mut();
if let Some(class_list) = elem_attrs.get_mut("class") {
let filtered_class: String = class_list
.split_whitespace()
.filter(|class| classes_to_preserve.contains(class))
.fold("".to_string(), |acc, x| acc + " " + x);
if filtered_class.is_empty() {
elem_attrs.remove("class");
} else {
*class_list = filtered_class;
}
}
}
}
}
/// Converts each <a> and <img> uri in the given element to an absolute URI, ignoring #ref URIs.
fn fix_relative_uris(&mut self, document_uri: &str) {
if let Some(article_node) = &mut self.article_node {
let document_uri =
Url::parse(document_uri).expect("Unable to parse the document's URI");
let base_uri = self
.root_node
.select("base")
.unwrap()
.filter(|node_ref| {
let node_attrs = node_ref.attributes.borrow();
node_attrs.contains("href")
})
.map(|node_ref| {
let node_attrs = node_ref.attributes.borrow();
let href = node_attrs.get("href").unwrap();
match Url::parse(href) {
Ok(url) => url,
Err(e) => match e {
url::ParseError::RelativeUrlWithoutBase => {
match document_uri.join(href) {
Ok(joined_url) => joined_url,
Err(e) => panic!(
"{:} unable to parse url {:?} on element {}",
e, href, &node_ref.name.local
),
}
}
e => panic!(
"{:} unable to parse url {:?} on element {}",
e, href, &node_ref.name.local
),
},
}
})
.next()
.unwrap_or(document_uri.clone());
let to_absolute_uri = |uri_str: &str| -> String {
if base_uri == document_uri && uri_str.starts_with("#") {
return uri_str.to_string();
}
if let Ok(new_uri) = Url::parse(uri_str) {
if new_uri.has_host() {
return new_uri.to_string();
}
} else if let Ok(joined_uri) = base_uri.join(uri_str) {
return joined_uri.to_string();
}
uri_str.to_string()
};
let mut links = article_node.select("a").unwrap().filter(|a_ref| {
let link_attrs = a_ref.attributes.borrow();
link_attrs.contains("href")
});
let mut link = links.next();
while let Some(link_ref) = link {
link = links.next();
let mut link_attrs = link_ref.attributes.borrow_mut();
let href = link_attrs.get("href").map(|val| val.to_string()).unwrap();
if href.starts_with("javascript:") {
let link_node = link_ref.as_node();
if link_node.children().count() == 1
&& link_node
.first_child()
.map(|node_ref| node_ref.as_text().is_some())
.unwrap()
{
let text_node = NodeRef::new_text(link_node.text_contents());
link_node.insert_before(text_node);
link_node.detach();
} else {
let container = NodeRef::new_element(
QualName::new(None, Namespace::from(HTML_NS), LocalName::from("span")),
BTreeMap::new(),
);
let mut children = link_node.children();
let mut child = children.next();
while let Some(child_ref) = child {
child = children.next();
container.append(child_ref);
}
link_node.insert_before(container);
link_node.detach();
}
} else {
link_attrs.insert("href", to_absolute_uri(&href));
}
}
let media_nodes = article_node
.select("img, picture, figure, video, audio, source")
.unwrap();
for media_node in media_nodes {
let mut media_attrs = media_node.attributes.borrow_mut();
if let Some(src) = media_attrs.get_mut("src") {
*src = to_absolute_uri(&src);
}
if let Some(poster) = media_attrs.get_mut("poster") {
*poster = to_absolute_uri(&poster);
}
if let Some(srcset) = media_attrs.get_mut("srcset") {
let new_srcset = regexes::SRCSET_CAPTURE_REGEX.replace_all(
&srcset,
|captures: ®ex::Captures| {
to_absolute_uri(&captures[1])
+ &captures.get(2).map(|cap| cap.as_str()).unwrap_or("")
+ &captures[3]
},
);
*srcset = new_srcset.to_string();
}
}
}
}
/// Removes readability attributes from DOM nodes as they are not needed in the final article
fn clean_readability_attrs(&mut self) {
if let Some(article_node) = &mut self.article_node {
for node in article_node.inclusive_descendants().elements() {
let mut node_attrs = node.attributes.borrow_mut();
node_attrs.remove(READABILITY_SCORE);
node_attrs.remove("readability-data-table");
}
}
}
/// Run any post-process modifications to article content as necessary.
fn post_process_content(&mut self, url: &str) {
self.fix_relative_uris(url);
// TODO: Add flag check
self.clean_classes();
self.clean_readability_attrs();
}
/// Converts an inline CSS string to a [HashMap] of property and value(s)
fn inline_css_str_to_map(css_str: &str) -> HashMap<String, String> {
enum State {
ReadProp,
ReadVal,
ReadQuot,
ReadDquot,
}
| rust | MIT | 796a34a34c365bc06191bf634918b71eaee7bd5d | 2026-01-04T20:24:03.677608Z | true |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/build.rs | crates/dc_bundle/build.rs | // Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::error::Error;
use std::fs;
use std::path::{Path, PathBuf};
fn main() -> Result<(), Box<dyn Error>> {
let out_dir_str = std::env::var_os("OUT_DIR").unwrap();
let out_dir = PathBuf::from(out_dir_str);
let config = protobuf_codegen::Customize::default();
// Define the directory where .proto files are located.
let proto_path = Path::new(env!("CARGO_MANIFEST_DIR")).join("src").join("proto");
// Collect the .proto files to compile.
let proto_files = collect_proto_files(&proto_path)?;
// Create input files as str
let proto_files_str: Vec<String> =
proto_files.iter().map(|p| p.to_str().unwrap().to_string()).collect();
let mut codegen = protobuf_codegen::Codegen::new();
codegen
.pure()
.customize(config)
.out_dir(&out_dir)
.include(&proto_path)
.inputs(&proto_files_str)
.run_from_script();
println!("cargo:rerun-if-changed={}", proto_path.to_str().unwrap());
Ok(())
}
fn collect_proto_files(proto_path: &Path) -> Result<Vec<PathBuf>, Box<dyn Error>> {
let mut proto_files = Vec::new();
if proto_path.is_dir() {
for entry in fs::read_dir(proto_path)? {
let entry = entry?;
let path = entry.path();
if path.is_dir() {
// Recursively search in subfolders.
proto_files.extend(collect_proto_files(&path)?);
} else if let Some(ext) = path.extension() {
if ext == "proto" {
proto_files.push(path);
}
}
}
}
Ok(proto_files)
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/lib.rs | crates/dc_bundle/src/lib.rs | // Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use thiserror::Error;
pub mod definition;
pub mod definition_file;
// Include the generated proto module.
include!(concat!(env!("OUT_DIR"), "/mod.rs"));
#[derive(Error, Debug)]
pub enum Error {
#[error("Missing field {field}")]
MissingFieldError { field: String },
#[error("Unknown enum variant for {enum_name}")]
UnknownEnumVariant { enum_name: String },
#[error("Attempted to parse unknown NodeQuery string {query}")]
InvalidNodeQuery { query: String },
#[error("IO Error")]
IoError(#[from] std::io::Error),
#[error("Proto Decode error")]
DecodeError(),
#[error("DesignComposeDefinition Load Error")]
DCDLoadError(String),
#[error("Protobuf Write Error")]
ProtobufWriteError(String),
}
pub type Result<T> = std::result::Result<T, Error>;
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/definition_file.rs | crates/dc_bundle/src/definition_file.rs | /*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::design_compose_definition::{DesignComposeDefinition, DesignComposeDefinitionHeader};
use crate::Error;
use protobuf::{CodedInputStream, Message};
use std::fs::File;
use std::io::{Read, Write};
use std::path::Path;
pub fn encode_dcd_with_header(
header: &DesignComposeDefinitionHeader,
doc: &DesignComposeDefinition,
) -> Result<Vec<u8>, Error> {
let mut encoded = header
.write_length_delimited_to_bytes()
.map_err(|e| Error::ProtobufWriteError(format!("Failed to write header: {}", e)))?;
encoded.append(
&mut doc
.write_length_delimited_to_bytes()
.map_err(|e| Error::ProtobufWriteError(format!("Failed to write definition: {}", e)))?,
);
Ok(encoded)
}
pub fn decode_dcd_with_header(
data: &[u8],
) -> Result<(DesignComposeDefinitionHeader, DesignComposeDefinition), Error> {
let mut cis = CodedInputStream::from_bytes(data);
let header_len = cis.read_raw_varint32().map_err(|_| Error::DecodeError())?;
let header_limit = cis.push_limit(header_len as u64).map_err(|_| Error::DecodeError())?;
let header = DesignComposeDefinitionHeader::parse_from(&mut cis)
.map_err(|e| Error::DCDLoadError(format!("Failed to parse header: {}", e)))?;
cis.pop_limit(header_limit);
// Ensure the version of the document matches this version of automotive design compose.
if header.dc_version != DesignComposeDefinitionHeader::current_version() {
println!(
"DesignComposeDefinition old version found. Expected {} Found: {}",
DesignComposeDefinitionHeader::current_version(),
header.dc_version
);
}
let dcd_length = cis.read_raw_varint32().map_err(|_| Error::DecodeError())?;
let dcd_limit = cis.push_limit(dcd_length as u64).map_err(|_| Error::DecodeError())?;
println!("DCD length = {:?}", dcd_length);
let dcd = DesignComposeDefinition::parse_from(&mut cis)
.map_err(|e| Error::DCDLoadError(format!("Failed to parse DCD: {}", e)))?;
cis.pop_limit(dcd_limit);
Ok((header, dcd))
}
/// A helper method to save serialized figma design docs.
pub fn save_design_def<P>(
save_path: P,
header: &DesignComposeDefinitionHeader,
doc: &DesignComposeDefinition,
) -> Result<(), Error>
where
P: AsRef<Path>,
{
let mut output = File::create(save_path)?;
output.write_all(encode_dcd_with_header(header, doc)?.as_slice())?;
Ok(())
}
/// A helper method to load a DesignCompose Definition from a file.
pub fn load_design_def<P>(
load_path: P,
) -> Result<(DesignComposeDefinitionHeader, DesignComposeDefinition), Error>
where
P: AsRef<Path>,
{
let mut buf: Vec<u8> = vec![];
let mut document_file = File::open(&load_path)?;
let _bytes = document_file.read_to_end(&mut buf)?;
let (header, doc) = decode_dcd_with_header(&buf)?;
Ok((header, doc))
}
#[cfg(test)]
mod tests {
use super::*;
use crate::background::{background, Background};
use crate::color::Color;
use crate::positioning::ScrollInfo;
use crate::variable::ColorOrVar;
use crate::view::view::RenderMethod;
use crate::view::View;
use crate::view_shape::ViewShape;
use crate::view_style::ViewStyle;
use std::collections::HashMap;
use tempfile::NamedTempFile;
#[test]
fn test_save_load_design_def() {
let mut header = DesignComposeDefinitionHeader::new();
header.dc_version = 123;
header.id = "doc_id".to_string();
header.name = "doc_name".to_string();
header.last_modified = "yesterday".to_string();
header.response_version = "v1".to_string();
let mut doc = DesignComposeDefinition::new();
let mut style = ViewStyle::new_default();
let color = Color::red();
let solid_bg = background::Background_type::Solid(ColorOrVar::new_color(color));
style.node_style_mut().backgrounds.push(Background::new_with_background(solid_bg));
let view_name = "test_view".to_string();
let view = View::new_rect(
&"test_id".to_string(),
&view_name,
ViewShape::default(),
style,
None,
None,
ScrollInfo::new_default(),
None,
None,
RenderMethod::RENDER_METHOD_NONE,
HashMap::new(),
);
doc.views.insert(view_name, view);
let temp_file = NamedTempFile::new().unwrap();
let temp_path = temp_file.path();
save_design_def(temp_path, &header, &doc).unwrap();
let (loaded_header, loaded_doc) = load_design_def(temp_path).unwrap();
assert_eq!(header, loaded_header);
assert_eq!(doc, loaded_doc);
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/definition.rs | crates/dc_bundle/src/definition.rs | use crate::variable::VariableMap;
/*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::design_compose_definition::{DesignComposeDefinition, DesignComposeDefinitionHeader};
use crate::view::View;
use crate::Error;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
use std::fmt;
use std::hash::Hash;
use std::sync::Arc;
pub mod element;
pub mod layout;
pub mod modifier;
pub mod view;
// LINT.IfChange
pub static CURRENT_VERSION: u32 = 28;
// Lint.ThenChange(common/src/main/java/com/android/designcompose/common/DCDVersion.kt)
impl DesignComposeDefinitionHeader {
pub fn current(
last_modified: String,
name: String,
response_version: String,
id: String,
) -> DesignComposeDefinitionHeader {
DesignComposeDefinitionHeader {
dc_version: CURRENT_VERSION,
last_modified,
name,
response_version,
id,
..Default::default()
}
}
pub fn current_version() -> u32 {
CURRENT_VERSION
}
}
impl fmt::Display for DesignComposeDefinitionHeader {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// NOTE: Using `write!` here instead of typical `format!`
// to keep newlines.
write!(
f,
"DC Version: {}\nDoc ID: {}\nName: {}\nLast Modified: {}\nResponse Version: {}",
&self.dc_version, &self.id, &self.name, &self.last_modified, &self.response_version
)
}
}
impl DesignComposeDefinition {
pub fn new_with_details(
views: HashMap<NodeQuery, View>,
images: EncodedImageMap,
component_sets: HashMap<String, String>,
variable_map: VariableMap,
) -> DesignComposeDefinition {
DesignComposeDefinition {
views: views.iter().map(|(k, v)| (k.encode(), v.to_owned())).collect(),
images: images.into(),
component_sets,
variable_map: Some(variable_map).into(),
..Default::default()
}
}
pub fn views(&self) -> Result<HashMap<NodeQuery, View>, Error> {
self.views
.iter()
.map(|(k, v)| NodeQuery::decode(k).map(|query| (query, v.clone())))
.collect::<Result<HashMap<NodeQuery, View>, Error>>()
}
}
impl fmt::Display for DesignComposeDefinition {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// NOTE: Using `write!` here instead of typical `format!`
// to keep newlines.
write!(
f,
"Views: {}\nComponent Sets: {}",
self.views.keys().count(),
self.component_sets.keys().count()
)
}
}
#[derive(Clone, PartialEq, Eq, PartialOrd, Ord, Debug, Hash, Serialize, Deserialize)]
pub enum NodeQuery {
/// Find the node by ID
NodeId(String),
/// Find the node by name
NodeName(String),
/// Node by name that is a variant, so the name may have multiple properties
/// The first string is the node name and the second is its parent's name
NodeVariant(String, String),
NodeComponentSet(String),
}
impl NodeQuery {
/// Construct a NodeQuery::NodeId from the given ID string.
pub fn id(id: impl ToString) -> NodeQuery {
NodeQuery::NodeId(id.to_string())
}
/// Construct a NodeQuery::NodeName from the given node name
pub fn name(name: impl ToString) -> NodeQuery {
NodeQuery::NodeName(name.to_string())
}
/// Construct a NodeQuery::NodeVariant from the given variant name and parent name
pub fn variant(name: impl ToString, parent: impl ToString) -> NodeQuery {
NodeQuery::NodeVariant(name.to_string(), parent.to_string())
}
/// Construct a NodeQuery::NodeComponentSet from the given node component set name
pub fn component_set(name: impl ToString) -> NodeQuery {
NodeQuery::NodeComponentSet(name.to_string())
}
pub fn encode(&self) -> String {
match self {
NodeQuery::NodeId(id) => format!("id:{}", id),
NodeQuery::NodeName(name) => format!("name:{}", name),
NodeQuery::NodeVariant(name, parent) => {
assert!(!name.contains('\x1f'));
format!("variant:{}\x1f{}", name, parent)
}
NodeQuery::NodeComponentSet(name) => format!("component_set:{}", name),
}
}
pub fn decode(s: &str) -> Result<NodeQuery, Error> {
let (query_type, query_value) =
s.split_once(':').ok_or_else(|| Error::InvalidNodeQuery { query: s.to_string() })?;
match query_type {
"id" => Ok(NodeQuery::NodeId(query_value.to_string())),
"name" => Ok(NodeQuery::NodeName(query_value.to_string())),
"variant" => {
let variant_parts: Vec<&str> = query_value.split('\x1f').collect();
if variant_parts.len() != 2 {
return Err(Error::InvalidNodeQuery { query: s.to_string() });
}
Ok(NodeQuery::NodeVariant(
variant_parts[0].to_string(),
variant_parts[1].to_string(),
))
}
"component_set" => Ok(NodeQuery::NodeComponentSet(query_value.to_string())),
_ => Err(Error::InvalidNodeQuery { query: s.to_string() }),
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_header_current() {
let header = DesignComposeDefinitionHeader::current(
"2024-01-01".to_string(),
"Test".to_string(),
"v1".to_string(),
"doc1".to_string(),
);
assert_eq!(header.dc_version, CURRENT_VERSION);
assert_eq!(header.last_modified, "2024-01-01");
assert_eq!(header.name, "Test");
assert_eq!(header.response_version, "v1");
assert_eq!(header.id, "doc1");
}
#[test]
fn test_definition_new() {
let mut views = HashMap::new();
views
.insert(NodeQuery::id("view1"), View { id: "view1".to_string(), ..Default::default() });
let images = EncodedImageMap(HashMap::new());
let component_sets = HashMap::new();
let variable_map = VariableMap::new();
let def =
DesignComposeDefinition::new_with_details(views, images, component_sets, variable_map);
assert_eq!(def.views.len(), 1);
}
#[test]
fn test_node_query_id() {
let query = NodeQuery::id("test_id");
assert_eq!(query, NodeQuery::NodeId("test_id".to_string()));
assert_eq!(query.encode(), "id:test_id");
assert_eq!(NodeQuery::decode("id:test_id").unwrap(), query);
}
#[test]
fn test_node_query_name() {
let query = NodeQuery::name("test_name");
assert_eq!(query, NodeQuery::NodeName("test_name".to_string()));
assert_eq!(query.encode(), "name:test_name");
assert_eq!(NodeQuery::decode("name:test_name").unwrap(), query);
}
#[test]
fn test_node_query_variant() {
let query = NodeQuery::variant("variant_name", "parent_name");
assert_eq!(
query,
NodeQuery::NodeVariant("variant_name".to_string(), "parent_name".to_string())
);
assert_eq!(query.encode(), "variant:variant_name\x1fparent_name");
assert_eq!(NodeQuery::decode("variant:variant_name\x1fparent_name").unwrap(), query);
}
#[test]
fn test_node_query_name_uses_name() {
let query = NodeQuery::name("name:deadbeef");
assert_eq!(query, NodeQuery::NodeName("name:deadbeef".to_string()));
assert_eq!(query.encode(), "name:name:deadbeef");
assert_eq!(NodeQuery::decode("name:name:deadbeef").unwrap(), query);
}
#[test]
fn test_node_query_component_set() {
let query = NodeQuery::component_set("component_set_name");
assert_eq!(query, NodeQuery::NodeComponentSet("component_set_name".to_string()));
assert_eq!(query.encode(), "component_set:component_set_name");
assert_eq!(NodeQuery::decode("component_set:component_set_name").unwrap(), query);
}
#[test]
fn test_node_query_from_string_invalid() {
assert!(NodeQuery::decode("invalid_query").is_err());
assert!(NodeQuery::decode("id").is_err());
assert!(NodeQuery::decode("variant:name").is_err()); // Missing parent
assert!(NodeQuery::decode("variant:name\x1fparent\x1fextra").is_err()); // Extra part
assert!(NodeQuery::decode("unknown:value").is_err()); // Unknown type
}
#[test]
#[should_panic]
fn test_node_query_variant_with_unit_separator() {
let name_with_separator = "name\x1fwith\x1fseparator";
let parent = "parent";
let query = NodeQuery::variant(name_with_separator, parent);
query.encode();
}
#[test]
fn test_definition_views() {
let mut views_map = HashMap::new();
let query = NodeQuery::id("view1");
let view = View { id: "view1".to_string(), ..Default::default() };
views_map.insert(query.clone(), view.clone());
let images = EncodedImageMap(HashMap::new());
let component_sets = HashMap::new();
let variable_map = VariableMap::new();
let def = DesignComposeDefinition::new_with_details(
views_map,
images,
component_sets,
variable_map,
);
let decoded_views = def.views().unwrap();
assert_eq!(decoded_views.len(), 1);
assert_eq!(decoded_views.get(&query), Some(&view));
}
#[test]
fn test_encoded_image_map_map() {
let mut image_data = HashMap::new();
let image_bytes = serde_bytes::ByteBuf::from(vec![1, 2, 3]);
image_data.insert("image1".to_string(), Arc::new(image_bytes));
let encoded_map = EncodedImageMap(image_data.clone());
let mapped_data = encoded_map.map();
assert_eq!(image_data, mapped_data);
}
}
/// EncodedImageMap contains a mapping from ImageKey to network bytes. It can create an
/// ImageMap and is intended to be used when we want to use Figma-defined components but do
/// not want to communicate with the Figma service.
#[derive(Clone, Debug, Serialize, Deserialize)]
pub struct EncodedImageMap(pub HashMap<String, Arc<serde_bytes::ByteBuf>>);
impl EncodedImageMap {
pub fn map(&self) -> HashMap<String, Arc<serde_bytes::ByteBuf>> {
self.0.clone()
}
}
impl Into<HashMap<String, Vec<u8>>> for EncodedImageMap {
fn into(self) -> HashMap<String, Vec<u8>> {
self.0.iter().map(|(k, v)| (k.clone(), v.to_vec())).collect()
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/definition/element.rs | crates/dc_bundle/src/definition/element.rs | /*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use protobuf::well_known_types::empty::Empty;
use protobuf::MessageField;
use crate::background::{background, Background};
use crate::color::{Color, FloatColor};
use crate::font::{FontFeature, FontStretch, FontStyle, FontWeight, TextDecoration};
use crate::geometry::dimension_proto::Dimension;
use crate::geometry::{DimensionProto, DimensionRect};
use crate::path::path::WindingRule;
use crate::path::Path;
use crate::variable::num_or_var::NumOrVarType;
use crate::variable::{color_or_var, ColorOrVar, NumOrVar};
use crate::view_shape::{view_shape, ViewShape};
use crate::Error;
use std::fmt;
use std::fmt::{Debug, Display, Formatter};
use std::hash::{Hash, Hasher};
impl Display for FontStyle {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
Debug::fmt(self, f)
}
}
impl FontFeature {
pub fn new_with_tag(tag: String) -> Self {
FontFeature { tag, enabled: true, ..Default::default() }
}
}
impl Display for TextDecoration {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
Debug::fmt(self, f)
}
}
/// Compare two finite f32 values
/// This is not meant for situations dealing with NAN and INFINITY
#[inline]
fn f32_eq(a: &f32, b: &f32) -> bool {
assert!(a.is_finite());
assert!(b.is_finite());
(a - b).abs() < f32::EPSILON
}
impl Color {
pub fn from_u8s(r: u8, g: u8, b: u8, a: u8) -> Color {
Color { r: r as u32, g: g as u32, b: b as u32, a: a as u32, ..Default::default() }
}
pub fn from_u8_tuple(color: (u8, u8, u8, u8)) -> Color {
Color {
r: color.0 as u32,
g: color.1 as u32,
b: color.2 as u32,
a: color.3 as u32,
..Default::default()
}
}
pub fn from_f32s(r: f32, g: f32, b: f32, a: f32) -> Color {
let tou32 = |c| (c * 255.0) as u32;
Color { r: tou32(r), g: tou32(g), b: tou32(b), a: tou32(a), ..Default::default() }
}
/// 0xAARRGGBB
pub fn from_u32(c: u32) -> Color {
Color {
r: ((c & 0x00FF_0000u32) >> 16) as u32,
g: ((c & 0x0000_FF00u32) >> 8) as u32,
b: (c & 0x0000_00FFu32) as u32,
a: ((c & 0xFF00_0000u32) >> 24) as u32,
..Default::default()
}
}
pub fn from_f32_tuple(color: (f32, f32, f32, f32)) -> Color {
Color::from_f32s(color.0, color.1, color.2, color.3)
}
/// Returns the H,S,V (Hue, Saturation, Value) representation
/// of the (Red, Green, Blue) color argument.
///
/// # Arguments
///
/// * `color` - a tuple of r,g,b values between 0.0 and 1.0
///
pub fn hsv_from_u8(color: (u8, u8, u8)) -> (f32, f32, f32) {
// After: https://math.stackexchange.com/questions/556341/rgb-to-hsv-color-conversion-algorithm
let r = color.0;
let g = color.1;
let b = color.2;
let r = r as f32 / 255.0f32;
let g = g as f32 / 255.0f32;
let b = b as f32 / 255.0f32;
let maxc = f32::max(f32::max(r, g), b);
let minc = f32::min(f32::min(r, g), b);
let v = maxc;
if f32_eq(&minc, &maxc) {
return (0.0, 0.0, v);
}
let s = (maxc - minc) / maxc;
let rc = (maxc - r) / (maxc - minc);
let gc = (maxc - g) / (maxc - minc);
let bc = (maxc - b) / (maxc - minc);
let h = if f32_eq(&r, &maxc) {
bc - gc
} else if f32_eq(&g, &maxc) {
2.0 + rc - bc
} else {
4.0 + gc - rc
};
let h = (h / 6.0) % 1.0;
let mut h = h * 2.0 * std::f32::consts::PI;
// make sure h is positive and within [0:2_PI)
if h < 0.0 {
h += 2.0 * std::f32::consts::PI;
} else if h >= 2.0 * std::f32::consts::PI {
h -= 2.0 * std::f32::consts::PI;
}
(h, s, v)
}
pub fn hsv_from_u32(color: (u32, u32, u32)) -> (f32, f32, f32) {
Color::hsv_from_u8((color.0 as u8, color.1 as u8, color.2 as u8))
}
/// Returns the H,S,V (Hue, Saturation, Value) representation
/// of color.
///
pub fn as_hsv(&self) -> (f32, f32, f32) {
Color::hsv_from_u32((self.r, self.g, self.b))
}
pub fn as_u8_tuple(&self) -> (u8, u8, u8, u8) {
(self.r as u8, self.g as u8, self.b as u8, self.a as u8)
}
pub fn as_f32_tuple(&self) -> (f32, f32, f32, f32) {
let tof32 = |c| c as f32 / 255.0;
(tof32(self.r), tof32(self.g), tof32(self.b), tof32(self.a))
}
pub fn as_f32_array(&self) -> [f32; 4] {
let c = self.as_f32_tuple();
[c.0, c.1, c.2, c.3]
}
pub fn as_u32(&self) -> u32 {
(self.r << 24) | (self.g << 16) | (self.b << 8) | self.a
}
pub fn set_red(&mut self, r: u8) {
self.r = r as u32;
}
pub fn set_green(&mut self, g: u8) {
self.g = g as u32;
}
pub fn set_blue(&mut self, b: u8) {
self.b = b as u32;
}
pub fn set_alpha(&mut self, a: u8) {
self.a = a as u32;
}
pub fn r(&self) -> u8 {
self.r as u8
}
pub fn g(&self) -> u8 {
self.g as u8
}
pub fn b(&self) -> u8 {
self.b as u8
}
pub fn a(&self) -> u8 {
self.a as u8
}
pub fn white() -> Color {
Color { r: 255, g: 255, b: 255, a: 255, ..Default::default() }
}
pub fn black() -> Color {
Color { r: 0, g: 0, b: 0, a: 255, ..Default::default() }
}
pub fn red() -> Color {
Color { r: 255, g: 0, b: 0, a: 255, ..Default::default() }
}
pub fn green() -> Color {
Color { r: 0, g: 255, b: 0, a: 255, ..Default::default() }
}
pub fn blue() -> Color {
Color { r: 0, g: 0, b: 255, a: 255, ..Default::default() }
}
pub fn yellow() -> Color {
Color { r: 255, g: 255, b: 0, a: 255, ..Default::default() }
}
pub fn magenta() -> Color {
Color { r: 255, g: 0, b: 255, a: 255, ..Default::default() }
}
pub fn cyan() -> Color {
Color { r: 0, g: 255, b: 255, a: 255, ..Default::default() }
}
pub fn gray() -> Color {
Color { r: 128, g: 128, b: 128, a: 255, ..Default::default() }
}
pub fn hot_pink() -> Color {
Color { r: 255, g: 105, b: 180, a: 255, ..Default::default() }
}
}
impl Into<Color> for &FloatColor {
fn into(self) -> Color {
Color::from_f32s(self.r, self.g, self.b, self.a)
}
}
impl DimensionProto {
pub fn new_auto() -> MessageField<Self> {
Some(DimensionProto { Dimension: Some(Dimension::Auto(().into())), ..Default::default() })
.into()
}
pub fn new_points(value: f32) -> MessageField<Self> {
Some(DimensionProto { Dimension: Some(Dimension::Points(value)), ..Default::default() })
.into()
}
pub fn new_percent(value: f32) -> MessageField<Self> {
Some(DimensionProto { Dimension: Some(Dimension::Percent(value)), ..Default::default() })
.into()
}
pub fn new_undefined() -> MessageField<Self> {
Some(DimensionProto {
Dimension: Some(Dimension::Undefined(().into())),
..Default::default()
})
.into()
}
}
pub trait DimensionExt {
fn is_points(&self) -> Result<bool, Error>;
}
impl DimensionExt for Option<DimensionProto> {
fn is_points(&self) -> Result<bool, Error> {
match self {
Some(DimensionProto { Dimension: Some(Dimension::Points(_)), .. }) => Ok(true),
Some(_) => Ok(false), // Other Dimension variants are not Points
None => Err(Error::MissingFieldError { field: "DimensionProto".to_string() }),
}
}
}
impl DimensionRect {
pub fn new_with_default_value() -> Self {
DimensionRect {
start: DimensionProto::new_undefined(),
end: DimensionProto::new_undefined(),
top: DimensionProto::new_undefined(),
bottom: DimensionProto::new_undefined(),
..Default::default()
}
}
// Sets the value of start to the given DimensionView value
pub fn set_start(&mut self, start: Dimension) {
self.start = Some(DimensionProto { Dimension: Some(start), ..Default::default() }).into();
}
// Sets the value of end to the given DimensionView value
pub fn set_end(&mut self, end: Dimension) {
self.end = Some(DimensionProto { Dimension: Some(end), ..Default::default() }).into();
}
// Sets the value of top to the given DimensionView value
pub fn set_top(&mut self, top: Dimension) {
self.top = Some(DimensionProto { Dimension: Some(top), ..Default::default() }).into();
}
// Sets the value of bottom to the given DimensionView value
pub fn set_bottom(&mut self, bottom: Dimension) {
self.bottom = Some(DimensionProto { Dimension: Some(bottom), ..Default::default() }).into();
}
}
// Define an extension trait
pub trait DimensionRectExt {
fn set_start(&mut self, start: Dimension) -> Result<(), Error>;
fn set_end(&mut self, end: Dimension) -> Result<(), Error>;
fn set_top(&mut self, top: Dimension) -> Result<(), Error>;
fn set_bottom(&mut self, bottom: Dimension) -> Result<(), Error>;
}
// Implement the extension trait for Option<DimensionRect>
impl DimensionRectExt for Option<DimensionRect> {
fn set_start(&mut self, start: Dimension) -> Result<(), Error> {
if let Some(rect) = self.as_mut() {
rect.set_start(start);
Ok(())
} else {
Err(Error::MissingFieldError { field: "DimensionRect->start".to_string() })
}
}
fn set_end(&mut self, end: Dimension) -> Result<(), Error> {
if let Some(rect) = self.as_mut() {
rect.set_end(end);
Ok(())
} else {
Err(Error::MissingFieldError { field: "DimensionRect->end".to_string() })
}
}
fn set_top(&mut self, top: Dimension) -> Result<(), Error> {
if let Some(rect) = self.as_mut() {
rect.set_top(top);
Ok(())
} else {
Err(Error::MissingFieldError { field: "DimensionRect->top".to_string() })
}
}
fn set_bottom(&mut self, bottom: Dimension) -> Result<(), Error> {
if let Some(rect) = self.as_mut() {
rect.set_bottom(bottom);
Ok(())
} else {
Err(Error::MissingFieldError { field: "DimensionRect->bottom".to_string() })
}
}
}
#[repr(u8)]
#[derive(Clone, Copy, PartialEq, Debug)]
pub enum PathCommand {
MoveTo = 0, // 1 Point
LineTo = 1, // 1 Point
CubicTo = 2, // 3 Points
QuadTo = 3, // 2 Points
Close = 4, // 0 Points
}
impl TryFrom<u8> for PathCommand {
type Error = &'static str;
fn try_from(value: u8) -> Result<Self, Self::Error> {
match value {
0 => Ok(PathCommand::MoveTo),
1 => Ok(PathCommand::LineTo),
2 => Ok(PathCommand::CubicTo),
3 => Ok(PathCommand::QuadTo),
4 => Ok(PathCommand::Close),
_ => Err("PathCommand out of range"),
}
}
}
impl Path {
pub fn new_default() -> Path {
Path {
commands: Vec::new(),
data: Vec::new(),
winding_rule: WindingRule::WINDING_RULE_NON_ZERO.into(),
..Default::default()
}
}
pub fn with_winding_rule(&mut self, winding_rule: WindingRule) -> &mut Path {
self.winding_rule = winding_rule.into();
self
}
pub fn move_to(&mut self, x: f32, y: f32) -> &mut Path {
self.commands.push(PathCommand::MoveTo as u8);
self.data.push(x);
self.data.push(y);
self
}
pub fn line_to(&mut self, x: f32, y: f32) -> &mut Path {
self.commands.push(PathCommand::LineTo as u8);
self.data.push(x);
self.data.push(y);
self
}
pub fn cubic_to(
&mut self,
c1_x: f32,
c1_y: f32,
c2_x: f32,
c2_y: f32,
x: f32,
y: f32,
) -> &mut Path {
self.commands.push(PathCommand::CubicTo as u8);
self.data.push(c1_x);
self.data.push(c1_y);
self.data.push(c2_x);
self.data.push(c2_y);
self.data.push(x);
self.data.push(y);
self
}
pub fn quad_to(&mut self, c1_x: f32, c1_y: f32, x: f32, y: f32) -> &mut Path {
self.commands.push(PathCommand::QuadTo as u8);
self.data.push(c1_x);
self.data.push(c1_y);
self.data.push(x);
self.data.push(y);
self
}
pub fn close(&mut self) -> &mut Path {
self.commands.push(PathCommand::Close as u8);
self
}
}
impl Background {
pub fn new_with_background(bg_type: background::Background_type) -> Self {
Background { background_type: Some(bg_type), ..Default::default() }
}
pub fn new_none() -> Self {
Background {
background_type: Some(background::Background_type::None(Empty::new())),
..Default::default()
}
}
pub fn is_some(&self) -> bool {
if let Some(bg) = &self.background_type {
match bg {
background::Background_type::None(_) => false,
_ => true,
}
} else {
false
}
}
}
impl ColorOrVar {
pub fn new_color(color: Color) -> Self {
ColorOrVar {
ColorOrVarType: Some(color_or_var::ColorOrVarType::Color(color)),
..Default::default()
}
}
pub fn new_var(id: String, fallback: Option<Color>) -> Self {
ColorOrVar {
ColorOrVarType: Some(color_or_var::ColorOrVarType::Var(color_or_var::ColorVar {
id,
fallback: fallback.into(),
..Default::default()
})),
..Default::default()
}
}
}
impl ViewShape {
pub fn new_rect(bx: view_shape::Box) -> Self {
ViewShape { shape: Some(view_shape::Shape::Rect(bx)), ..Default::default() }
}
pub fn new_round_rect(rect: view_shape::RoundRect) -> Self {
ViewShape { shape: Some(view_shape::Shape::RoundRect(rect)), ..Default::default() }
}
pub fn new_path(path: view_shape::VectorPath) -> Self {
ViewShape { shape: Some(view_shape::Shape::Path(path)), ..Default::default() }
}
pub fn new_arc(arc: view_shape::VectorArc) -> Self {
ViewShape { shape: Some(view_shape::Shape::Arc(arc)), ..Default::default() }
}
pub fn new_vector_rect(rect: view_shape::VectorRect) -> Self {
ViewShape { shape: Some(view_shape::Shape::VectorRect(rect)), ..Default::default() }
}
}
impl FontStretch {
/// Ultra-condensed width (50%), the narrowest possible.
pub fn ultra_condensed() -> Self {
FontStretch { value: 0.5, ..Default::default() }
}
/// Extra-condensed width (62.5%).
pub fn extra_condensed() -> Self {
FontStretch { value: 0.625, ..Default::default() }
}
/// Condensed width (75%).
pub fn condensed() -> Self {
FontStretch { value: 0.75, ..Default::default() }
}
/// Semi-condensed width (87.5%).
pub fn semi_condensed() -> Self {
FontStretch { value: 0.875, ..Default::default() }
}
/// Normal width (100%).
pub fn normal() -> Self {
FontStretch { value: 1.0, ..Default::default() }
}
/// Semi-expanded width (112.5%).
pub fn semi_expanded() -> Self {
FontStretch { value: 1.125, ..Default::default() }
}
/// Expanded width (125%).
pub fn expanded() -> Self {
FontStretch { value: 1.25, ..Default::default() }
}
/// Extra-expanded width (150%).
pub fn extra_expanded() -> Self {
FontStretch { value: 1.5, ..Default::default() }
}
/// Ultra-expanded width (200%), the widest possible.
pub fn ultra_expanded() -> Self {
FontStretch { value: 2.0, ..Default::default() }
}
}
impl Hash for FontStretch {
fn hash<H: Hasher>(&self, state: &mut H) {
let x = (self.value * 100.0) as i32;
x.hash(state);
}
}
impl NumOrVar {
pub fn from_num(num: f32) -> Self {
NumOrVar { NumOrVarType: Some(NumOrVarType::Num(num)), ..Default::default() }
}
}
impl FontWeight {
pub fn new_with_num_or_var_type(num_or_var_type: NumOrVarType) -> Self {
FontWeight {
weight: Some(NumOrVar { NumOrVarType: Some(num_or_var_type), ..Default::default() })
.into(),
..Default::default()
}
}
pub fn from_num(weight: f32) -> Self {
FontWeight { weight: Some(NumOrVar::from_num(weight)).into(), ..Default::default() }
}
/// Thin weight (100), the thinnest value.
pub fn thin() -> Self {
FontWeight::from_num(100.0)
}
/// Extra light weight (200).
pub fn extra_light() -> Self {
FontWeight::from_num(200.0)
}
/// Light weight (300).
pub fn light() -> Self {
FontWeight::from_num(300.0)
}
/// Normal (400).
pub fn normal() -> Self {
FontWeight::from_num(400.0)
}
/// Medium weight (500, higher than normal).
pub fn medium() -> Self {
FontWeight::from_num(500.0)
}
/// Semibold weight (600).
pub fn semibold() -> Self {
FontWeight::from_num(600.0)
}
/// Bold weight (700).
pub fn bold() -> Self {
FontWeight::from_num(700.0)
}
/// Extra-bold weight (800).
pub fn extra_bold() -> Self {
FontWeight::from_num(800.0)
}
/// Black weight (900), the thickest value.
pub fn black() -> Self {
FontWeight::from_num(900.0)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::background::background::Background_type::Solid;
#[test]
fn test_font_feature_new_with_tag() {
let feature = FontFeature::new_with_tag("test".to_string());
assert_eq!(feature.tag, "test");
assert!(feature.enabled);
}
#[test]
fn test_color_from() {
let c1 = Color::from_u8s(10, 20, 30, 40);
assert_eq!(c1.r, 10);
assert_eq!(c1.g, 20);
assert_eq!(c1.b, 30);
assert_eq!(c1.a, 40);
let c2 = Color::from_u8_tuple((10, 20, 30, 40));
assert_eq!(c1, c2);
let c3 = Color::from_f32s(0.5, 0.5, 0.5, 0.5);
assert_eq!(c3.r, 127);
assert_eq!(c3.g, 127);
assert_eq!(c3.b, 127);
assert_eq!(c3.a, 127);
let c4 = Color::from_f32_tuple((0.5, 0.5, 0.5, 0.5));
assert_eq!(c3, c4);
let c5 = Color::from_u32(0x40102030);
assert_eq!(c5.r, 0x10);
assert_eq!(c5.g, 0x20);
assert_eq!(c5.b, 0x30);
assert_eq!(c5.a, 0x40);
}
#[test]
fn test_color_setters() {
let mut c = Color::black();
c.set_red(10);
c.set_green(20);
c.set_blue(30);
c.set_alpha(40);
assert_eq!(c.r(), 10);
assert_eq!(c.g(), 20);
assert_eq!(c.b(), 30);
assert_eq!(c.a(), 40);
}
#[test]
fn test_color_static_colors() {
assert_eq!(Color::white(), Color::from_u8s(255, 255, 255, 255));
assert_eq!(Color::black(), Color::from_u8s(0, 0, 0, 255));
assert_eq!(Color::red(), Color::from_u8s(255, 0, 0, 255));
}
#[test]
fn test_color_as() {
let c = Color::from_u8s(10, 20, 30, 40);
assert_eq!(c.as_u8_tuple(), (10, 20, 30, 40));
let f = c.as_f32_tuple();
assert!((f.0 - 10.0 / 255.0).abs() < f32::EPSILON);
assert!((f.1 - 20.0 / 255.0).abs() < f32::EPSILON);
assert!((f.2 - 30.0 / 255.0).abs() < f32::EPSILON);
assert!((f.3 - 40.0 / 255.0).abs() < f32::EPSILON);
let a = c.as_f32_array();
assert_eq!(a[0], f.0);
assert_eq!(a[1], f.1);
assert_eq!(a[2], f.2);
assert_eq!(a[3], f.3);
assert_eq!(c.as_u32(), (10 << 24) | (20 << 16) | (30 << 8) | 40);
}
#[test]
fn test_color_hsv() {
let c = Color::from_u8s(255, 0, 0, 255);
let (h, s, v) = c.as_hsv();
assert!((h - 0.0).abs() < f32::EPSILON);
assert!((s - 1.0).abs() < f32::EPSILON);
assert!((v - 1.0).abs() < f32::EPSILON);
}
#[test]
fn test_float_color_into() {
let float_color = FloatColor { r: 0.5, g: 0.5, b: 0.5, a: 0.5, ..Default::default() };
let color: Color = (&float_color).into();
assert_eq!(color, Color::from_f32s(0.5, 0.5, 0.5, 0.5));
}
#[test]
fn test_dimension_proto() {
let auto = DimensionProto::new_auto();
assert!(matches!(auto.unwrap().Dimension, Some(Dimension::Auto(_))));
let points = DimensionProto::new_points(10.0);
assert!(matches!(points.unwrap().Dimension, Some(Dimension::Points(v)) if v == 10.0));
let percent = DimensionProto::new_percent(50.0);
assert!(matches!(percent.unwrap().Dimension, Some(Dimension::Percent(v)) if v == 50.0));
let undefined = DimensionProto::new_undefined();
assert!(matches!(undefined.unwrap().Dimension, Some(Dimension::Undefined(_))));
}
#[test]
fn test_dimension_ext() {
let points = DimensionProto::new_points(10.0);
assert!(points.as_ref().cloned().is_points().unwrap());
let auto = DimensionProto::new_auto();
assert!(!auto.as_ref().cloned().is_points().unwrap());
let none: Option<DimensionProto> = None;
assert!(none.as_ref().cloned().is_points().is_err());
}
#[test]
fn test_dimension_rect() {
let mut rect = DimensionRect::new_with_default_value();
rect.set_start(Dimension::Points(10.0));
assert!(matches!(rect.start.unwrap().Dimension, Some(Dimension::Points(v)) if v == 10.0));
}
#[test]
fn test_dimension_rect_ext() {
let mut rect = Some(DimensionRect::new_with_default_value());
assert!(rect.set_start(Dimension::Points(10.0)).is_ok());
assert!(rect.set_end(Dimension::Points(20.0)).is_ok());
assert!(rect.set_top(Dimension::Points(30.0)).is_ok());
assert!(rect.set_bottom(Dimension::Points(40.0)).is_ok());
let mut none_rect: Option<DimensionRect> = None;
assert!(none_rect.set_start(Dimension::Points(10.0)).is_err());
}
#[test]
fn test_path_command_try_from() {
assert_eq!(PathCommand::try_from(0).unwrap(), PathCommand::MoveTo);
assert_eq!(PathCommand::try_from(1).unwrap(), PathCommand::LineTo);
assert_eq!(PathCommand::try_from(2).unwrap(), PathCommand::CubicTo);
assert_eq!(PathCommand::try_from(3).unwrap(), PathCommand::QuadTo);
assert_eq!(PathCommand::try_from(4).unwrap(), PathCommand::Close);
assert!(PathCommand::try_from(5).is_err());
}
#[test]
fn test_path() {
let mut path = Path::new_default();
path.move_to(1.0, 2.0).line_to(3.0, 4.0).close();
assert_eq!(
path.commands,
vec![PathCommand::MoveTo as u8, PathCommand::LineTo as u8, PathCommand::Close as u8]
);
assert_eq!(path.data, vec![1.0, 2.0, 3.0, 4.0]);
}
#[test]
fn test_background() {
let solid_bg = Solid(ColorOrVar::new_color(Color::red()));
let bg = Background::new_with_background(solid_bg);
assert!(bg.is_some());
let none_bg = Background::new_none();
assert!(!none_bg.is_some());
}
#[test]
fn test_color_or_var() {
let color = Color::red();
let color_or_var = ColorOrVar::new_color(color.clone());
if let Some(color_or_var::ColorOrVarType::Color(c)) = color_or_var.ColorOrVarType {
assert_eq!(c, color);
} else {
panic!("Wrong type");
}
let color_or_var_var = ColorOrVar::new_var("my_var".to_string(), Some(Color::blue()));
if let Some(color_or_var::ColorOrVarType::Var(v)) = color_or_var_var.ColorOrVarType {
assert_eq!(v.id, "my_var");
assert_eq!(v.fallback.unwrap(), Color::blue());
} else {
panic!("Wrong type");
}
}
#[test]
fn test_view_shape() {
let rect = ViewShape::new_rect(view_shape::Box::default());
assert!(matches!(rect.shape, Some(view_shape::Shape::Rect(_))));
let round_rect = ViewShape::new_round_rect(view_shape::RoundRect::default());
assert!(matches!(round_rect.shape, Some(view_shape::Shape::RoundRect(_))));
}
#[test]
fn test_font_stretch() {
assert_eq!(FontStretch::normal().value, 1.0);
assert_eq!(FontStretch::condensed().value, 0.75);
}
#[test]
fn test_font_weight() {
assert!(
matches!(FontWeight::bold().weight.unwrap().NumOrVarType, Some(NumOrVarType::Num(v)) if v == 700.0)
);
assert!(
matches!(FontWeight::normal().weight.unwrap().NumOrVarType, Some(NumOrVarType::Num(v)) if v == 400.0)
);
}
#[test]
fn test_path_cubic_to() {
let mut path = Path::new_default();
path.cubic_to(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
assert_eq!(path.commands, vec![PathCommand::CubicTo as u8]);
assert_eq!(path.data, vec![1.0, 2.0, 3.0, 4.0, 5.0, 6.0]);
}
#[test]
fn test_path_quad_to() {
let mut path = Path::new_default();
path.quad_to(1.0, 2.0, 3.0, 4.0);
assert_eq!(path.commands, vec![PathCommand::QuadTo as u8]);
assert_eq!(path.data, vec![1.0, 2.0, 3.0, 4.0]);
}
#[test]
fn test_font_weight_new_with_num_or_var_type() {
use crate::variable::num_or_var::NumVar;
let num_or_var_type = NumOrVarType::Num(700.0);
let font_weight = FontWeight::new_with_num_or_var_type(num_or_var_type);
if let Some(num_or_var) = font_weight.weight.as_ref() {
if let Some(NumOrVarType::Num(v)) = num_or_var.NumOrVarType {
assert!((v - 700.0).abs() < f32::EPSILON);
} else {
panic!("Wrong type");
}
} else {
panic!("Weight not set");
}
let num_var = NumVar { id: "my_var".to_string(), fallback: 400.0, ..Default::default() };
let num_or_var_type_var = NumOrVarType::Var(num_var);
let font_weight_var = FontWeight::new_with_num_or_var_type(num_or_var_type_var);
if let Some(num_or_var) = font_weight_var.weight.as_ref() {
if let Some(NumOrVarType::Var(v)) = &num_or_var.NumOrVarType {
assert_eq!(v.id, "my_var");
assert!((v.fallback - 400.0).abs() < f32::EPSILON);
} else {
panic!("Wrong type");
}
} else {
panic!("Weight not set");
}
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/definition/view.rs | crates/dc_bundle/src/definition/view.rs | /*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use std::collections::HashMap;
use std::sync::atomic::AtomicU16;
use crate::background::{background, Background};
use crate::blend::BlendMode;
use crate::font::{FontStretch, FontStyle, FontWeight, TextDecoration};
use crate::frame_extras::FrameExtras;
use crate::geometry::{Rectangle, Size};
use crate::layout_style::LayoutStyle;
use crate::node_style::{Display, NodeStyle};
use crate::path::line_height::Line_height_type;
use crate::path::{LineHeight, Stroke};
use crate::pointer::PointerEvents;
use crate::positioning::{FlexWrap, LayoutSizing, Overflow, OverflowDirection, ScrollInfo};
use crate::reaction::Reaction;
use crate::text::{TextAlign, TextAlignVertical, TextOverflow};
use crate::text_style::StyledTextRun;
use crate::variable::NumOrVar;
use crate::view::view::RenderMethod;
use crate::view::view_data::{Container, StyledTextRuns, Text, View_data_type};
use crate::view::{ComponentInfo, View, ViewData};
use crate::view_shape::ViewShape;
use crate::view_style::ViewStyle;
impl NodeStyle {
pub(crate) fn new_default() -> NodeStyle {
NodeStyle {
font_color: Some(Background::new_with_background(background::Background_type::None(
().into(),
)))
.into(),
font_size: Some(NumOrVar::from_num(18.0)).into(),
font_family: None,
font_weight: Some(FontWeight::normal()).into(),
font_style: FontStyle::FONT_STYLE_NORMAL.into(),
text_decoration: TextDecoration::TEXT_DECORATION_NONE.into(),
letter_spacing: None,
font_stretch: Some(FontStretch::normal()).into(),
backgrounds: Vec::new(),
box_shadows: Vec::new(),
stroke: Some(Stroke::default()).into(),
opacity: None,
transform: None.into(),
relative_transform: None.into(),
text_align: TextAlign::TEXT_ALIGN_LEFT.into(),
text_align_vertical: TextAlignVertical::TEXT_ALIGN_VERTICAL_TOP.into(),
text_overflow: TextOverflow::TEXT_OVERFLOW_CLIP.into(),
text_shadow: None.into(),
node_size: Some(Size { width: 0.0, height: 0.0, ..Default::default() }).into(),
line_height: Some(LineHeight {
line_height_type: Some(Line_height_type::Percent(1.0)),
..Default::default()
})
.into(),
line_count: None,
font_features: Vec::new(),
filters: Vec::new(),
backdrop_filters: Vec::new(),
blend_mode: BlendMode::BLEND_MODE_PASS_THROUGH.into(),
display_type: Display::DISPLAY_FLEX.into(),
flex_wrap: FlexWrap::FLEX_WRAP_NO_WRAP.into(),
grid_layout_type: None,
grid_columns_rows: 0,
grid_adaptive_min_size: 1,
grid_span_contents: vec![],
overflow: Overflow::OVERFLOW_VISIBLE.into(),
max_children: None,
overflow_node_id: None,
overflow_node_name: None,
cross_axis_item_spacing: 0.0,
horizontal_sizing: LayoutSizing::LAYOUT_SIZING_FIXED.into(),
vertical_sizing: LayoutSizing::LAYOUT_SIZING_FIXED.into(),
aspect_ratio: None,
pointer_events: PointerEvents::POINTER_EVENTS_INHERIT.into(),
meter_data: None.into(),
hyperlink: None.into(),
shader_data: None.into(),
scalable_data: None.into(),
..Default::default()
}
}
}
impl ViewStyle {
pub fn new_default() -> Self {
Self {
layout_style: Some(LayoutStyle::new_default()).into(),
node_style: Some(NodeStyle::new_default()).into(),
..Default::default()
}
}
pub fn node_style(&self) -> &NodeStyle {
self.node_style.as_ref().expect("NodeStyle is required.")
}
pub fn node_style_mut(&mut self) -> &mut NodeStyle {
self.node_style.as_mut().expect("NodeStyle is required.")
}
pub fn layout_style(&self) -> &LayoutStyle {
self.layout_style.as_ref().expect("LayoutStyle is required.")
}
pub fn layout_style_mut(&mut self) -> &mut LayoutStyle {
self.layout_style.as_mut().expect("LayoutStyle is required.")
}
}
impl ViewStyle {
/// Compute the difference between this style and the given style, returning a style
/// that can be applied to this style to make it equal the given style using apply_non_default.
pub fn difference(&self, other: &ViewStyle) -> ViewStyle {
let mut delta = ViewStyle::new_default();
if self.node_style().font_color != other.node_style().font_color {
delta.node_style_mut().font_color = other.node_style().font_color.clone();
}
if self.node_style().font_size != other.node_style().font_size {
delta.node_style_mut().font_size = other.node_style().font_size.clone();
}
if self.node_style().font_family != other.node_style().font_family {
delta.node_style_mut().font_family = other.node_style().font_family.clone();
}
if self.node_style().font_weight != other.node_style().font_weight {
delta.node_style_mut().font_weight = other.node_style().font_weight.clone();
}
if self.node_style().font_style != other.node_style().font_style {
delta.node_style_mut().font_style = other.node_style().font_style;
}
if self.node_style().text_decoration != other.node_style().text_decoration {
delta.node_style_mut().text_decoration = other.node_style().text_decoration;
}
if self.node_style().letter_spacing != other.node_style().letter_spacing {
delta.node_style_mut().letter_spacing = other.node_style().letter_spacing;
}
if self.node_style().font_stretch != other.node_style().font_stretch {
delta.node_style_mut().font_stretch = other.node_style().font_stretch.clone();
}
if self.node_style().backgrounds != other.node_style().backgrounds {
delta.node_style_mut().backgrounds = other.node_style().backgrounds.clone();
}
if self.node_style().box_shadows != other.node_style().box_shadows {
delta.node_style_mut().box_shadows = other.node_style().box_shadows.clone();
}
if self.node_style().stroke != other.node_style().stroke {
delta.node_style_mut().stroke = other.node_style().stroke.clone();
}
if self.node_style().opacity != other.node_style().opacity {
delta.node_style_mut().opacity = other.node_style().opacity;
}
if self.node_style().transform != other.node_style().transform {
delta.node_style_mut().transform = other.node_style().transform.clone();
}
if self.node_style().relative_transform != other.node_style().relative_transform {
delta.node_style_mut().relative_transform =
other.node_style().relative_transform.clone();
}
if self.node_style().text_align != other.node_style().text_align {
delta.node_style_mut().text_align = other.node_style().text_align;
}
if self.node_style().text_align_vertical != other.node_style().text_align_vertical {
delta.node_style_mut().text_align_vertical = other.node_style().text_align_vertical;
}
if self.node_style().text_overflow != other.node_style().text_overflow {
delta.node_style_mut().text_overflow = other.node_style().text_overflow;
}
if self.node_style().text_shadow != other.node_style().text_shadow {
delta.node_style_mut().text_shadow = other.node_style().text_shadow.clone();
}
if self.node_style().node_size != other.node_style().node_size {
delta.node_style_mut().node_size = other.node_style().node_size.clone();
}
if self.node_style().line_height != other.node_style().line_height {
delta.node_style_mut().line_height = other.node_style().line_height.clone();
}
if self.node_style().line_count != other.node_style().line_count {
delta.node_style_mut().line_count = other.node_style().line_count;
}
if self.node_style().font_features != other.node_style().font_features {
delta.node_style_mut().font_features = other.node_style().font_features.clone();
}
if self.node_style().filters != other.node_style().filters {
delta.node_style_mut().filters = other.node_style().filters.clone();
}
if self.node_style().backdrop_filters != other.node_style().backdrop_filters {
delta.node_style_mut().backdrop_filters = other.node_style().backdrop_filters.clone();
}
if self.node_style().blend_mode != other.node_style().blend_mode {
delta.node_style_mut().blend_mode = other.node_style().blend_mode;
}
if self.node_style().hyperlink != other.node_style().hyperlink {
delta.node_style_mut().hyperlink = other.node_style().hyperlink.clone();
}
if self.node_style().display_type != other.node_style().display_type {
delta.node_style_mut().display_type = other.node_style().display_type;
}
if self.layout_style().position_type != other.layout_style().position_type {
delta.layout_style_mut().position_type = other.layout_style().position_type;
}
if self.layout_style().flex_direction != other.layout_style().flex_direction {
delta.layout_style_mut().flex_direction = other.layout_style().flex_direction;
}
if self.node_style().flex_wrap != other.node_style().flex_wrap {
delta.node_style_mut().flex_wrap = other.node_style().flex_wrap;
}
if self.node_style().grid_layout_type != other.node_style().grid_layout_type {
delta.node_style_mut().grid_layout_type = other.node_style().grid_layout_type;
}
if self.node_style().grid_columns_rows != other.node_style().grid_columns_rows {
delta.node_style_mut().grid_columns_rows = other.node_style().grid_columns_rows;
}
if self.node_style().grid_adaptive_min_size != other.node_style().grid_adaptive_min_size {
delta.node_style_mut().grid_adaptive_min_size =
other.node_style().grid_adaptive_min_size;
}
if self.node_style().grid_span_contents != other.node_style().grid_span_contents {
delta.node_style_mut().grid_span_contents =
other.node_style().grid_span_contents.clone();
}
if self.node_style().overflow != other.node_style().overflow {
delta.node_style_mut().overflow = other.node_style().overflow;
}
if self.node_style().max_children != other.node_style().max_children {
delta.node_style_mut().max_children = other.node_style().max_children;
}
if self.node_style().overflow_node_id != other.node_style().overflow_node_id {
delta.node_style_mut().overflow_node_id = other.node_style().overflow_node_id.clone();
}
if self.node_style().overflow_node_name != other.node_style().overflow_node_name {
delta.node_style_mut().overflow_node_name =
other.node_style().overflow_node_name.clone();
}
if self.node_style().shader_data != other.node_style().shader_data {
delta.node_style_mut().shader_data = other.node_style().shader_data.clone();
}
if self.layout_style().align_items != other.layout_style().align_items {
delta.layout_style_mut().align_items = other.layout_style().align_items;
}
if self.layout_style().align_content != other.layout_style().align_content {
delta.layout_style_mut().align_content = other.layout_style().align_content;
}
if self.layout_style().justify_content != other.layout_style().justify_content {
delta.layout_style_mut().justify_content = other.layout_style().justify_content;
}
if self.layout_style().top != other.layout_style().top {
delta.layout_style_mut().top = other.layout_style().top.clone();
}
if self.layout_style().left != other.layout_style().left {
delta.layout_style_mut().left = other.layout_style().left.clone();
}
if self.layout_style().bottom != other.layout_style().bottom {
delta.layout_style_mut().bottom = other.layout_style().bottom.clone();
}
if self.layout_style().right != other.layout_style().right {
delta.layout_style_mut().right = other.layout_style().right.clone();
}
if self.layout_style().margin != other.layout_style().margin {
delta.layout_style_mut().margin = other.layout_style().margin.clone();
}
if self.layout_style().padding != other.layout_style().padding {
delta.layout_style_mut().padding = other.layout_style().padding.clone();
}
if self.layout_style().item_spacing != other.layout_style().item_spacing {
delta.layout_style_mut().item_spacing = other.layout_style().item_spacing.clone();
}
if self.node_style().cross_axis_item_spacing != other.node_style().cross_axis_item_spacing {
delta.node_style_mut().cross_axis_item_spacing =
other.node_style().cross_axis_item_spacing;
}
if self.layout_style().flex_grow != other.layout_style().flex_grow {
delta.layout_style_mut().flex_grow = other.layout_style().flex_grow;
}
if self.layout_style().flex_shrink != other.layout_style().flex_shrink {
delta.layout_style_mut().flex_shrink = other.layout_style().flex_shrink;
}
if self.layout_style().flex_basis != other.layout_style().flex_basis {
delta.layout_style_mut().flex_basis = other.layout_style().flex_basis.clone();
}
if self.layout_style().width != other.layout_style().width {
delta.layout_style_mut().width = other.layout_style().width.clone();
}
if self.layout_style().height != other.layout_style().height {
delta.layout_style_mut().height = other.layout_style().height.clone();
}
if self.layout_style().max_width != other.layout_style().max_width {
delta.layout_style_mut().max_width = other.layout_style().max_width.clone();
}
if self.layout_style().max_height != other.layout_style().max_height {
delta.layout_style_mut().max_height = other.layout_style().max_height.clone();
}
if self.layout_style().min_width != other.layout_style().min_width {
delta.layout_style_mut().min_width = other.layout_style().min_width.clone();
}
if self.layout_style().min_height != other.layout_style().min_height {
delta.layout_style_mut().min_height = other.layout_style().min_height.clone();
}
if self.layout_style().bounding_box != other.layout_style().bounding_box {
delta.layout_style_mut().bounding_box = other.layout_style().bounding_box.clone();
}
if self.node_style().aspect_ratio != other.node_style().aspect_ratio {
delta.node_style_mut().aspect_ratio = other.node_style().aspect_ratio;
}
if self.node_style().pointer_events != other.node_style().pointer_events {
delta.node_style_mut().pointer_events = other.node_style().pointer_events;
}
if self.node_style().meter_data != other.node_style().meter_data {
delta.node_style_mut().meter_data = other.node_style().meter_data.clone();
}
delta
}
}
impl ViewData {
/// Compute the difference between this view data and the given view data.
/// Right now only computes the text overrides.
pub fn difference(&self, other: &ViewData) -> Option<ViewData> {
if let Some(View_data_type::Text { .. }) = self.view_data_type {
if self != other {
return Some(other.clone());
}
}
if let Some(View_data_type::StyledText { .. }) = self.view_data_type {
if self != other {
return Some(other.clone());
}
}
return None;
}
}
impl ScrollInfo {
pub fn new_default() -> Self {
ScrollInfo {
overflow: OverflowDirection::OVERFLOW_DIRECTION_NONE.into(),
paged_scrolling: false,
..Default::default()
}
}
}
impl View {
fn next_unique_id() -> u16 {
static COUNTER: AtomicU16 = AtomicU16::new(0);
COUNTER.fetch_add(1, std::sync::atomic::Ordering::Relaxed)
}
pub fn new_rect(
id: &String,
name: &String,
shape: ViewShape,
style: ViewStyle,
component_info: Option<ComponentInfo>,
reactions: Option<Vec<Reaction>>,
scroll_info: ScrollInfo,
frame_extras: Option<FrameExtras>,
design_absolute_bounding_box: Option<Rectangle>,
render_method: RenderMethod,
explicit_variable_modes: HashMap<String, String>,
) -> View {
View {
unique_id: View::next_unique_id() as u32,
id: id.clone(),
name: name.clone(),
component_info: component_info.into(),
reactions: reactions.unwrap_or_default(),
style: Some(style).into(),
frame_extras: frame_extras.into(),
scroll_info: Some(scroll_info).into(),
data: Some(ViewData {
view_data_type: Some(View_data_type::Container {
0: Container {
shape: Some(shape).into(),
children: vec![],
..Default::default()
},
}),
..Default::default()
})
.into(),
design_absolute_bounding_box: design_absolute_bounding_box.into(),
render_method: render_method.into(),
explicit_variable_modes,
..Default::default()
}
}
pub fn new_text(
id: &String,
name: &String,
style: ViewStyle,
component_info: Option<ComponentInfo>,
reactions: Option<Vec<Reaction>>,
text: &str,
text_res_name: Option<String>,
design_absolute_bounding_box: Option<Rectangle>,
render_method: RenderMethod,
explicit_variable_modes: HashMap<String, String>,
) -> View {
View {
unique_id: View::next_unique_id() as u32,
id: id.clone(),
name: name.clone(),
component_info: component_info.into(),
reactions: reactions.unwrap_or_default(),
style: Some(style).into(),
frame_extras: None.into(),
scroll_info: Some(ScrollInfo::new_default()).into(),
data: Some(ViewData {
view_data_type: Some(View_data_type::Text {
0: Text { content: text.into(), res_name: text_res_name, ..Default::default() },
}),
..Default::default()
})
.into(),
design_absolute_bounding_box: design_absolute_bounding_box.into(),
render_method: render_method.into(),
explicit_variable_modes,
..Default::default()
}
}
pub fn new_styled_text(
id: &String,
name: &String,
style: ViewStyle,
component_info: Option<ComponentInfo>,
reactions: Option<Vec<Reaction>>,
text: Vec<StyledTextRun>,
text_res_name: Option<String>,
design_absolute_bounding_box: Option<Rectangle>,
render_method: RenderMethod,
) -> View {
View {
unique_id: View::next_unique_id() as u32,
id: id.clone(),
name: name.clone(),
style: Some(style).into(),
component_info: component_info.into(),
reactions: reactions.unwrap_or_default(),
frame_extras: None.into(),
scroll_info: Some(ScrollInfo::new_default()).into(),
data: Some(ViewData {
view_data_type: Some(View_data_type::StyledText {
0: StyledTextRuns {
styled_texts: text,
res_name: text_res_name,
..Default::default()
},
}),
..Default::default()
})
.into(),
design_absolute_bounding_box: design_absolute_bounding_box.into(),
render_method: render_method.into(),
explicit_variable_modes: HashMap::new(),
..Default::default()
}
}
pub fn add_child(&mut self, child: View) {
if let Some(data) = self.data.as_mut() {
if let Some(View_data_type::Container { 0: Container { children, .. } }) =
data.view_data_type.as_mut()
{
children.push(child);
}
}
}
pub fn style(&self) -> &ViewStyle {
self.style.as_ref().expect("ViewStyle is required.")
}
pub fn style_mut(&mut self) -> &mut ViewStyle {
self.style.as_mut().expect("ViewStyle is required.")
}
/** This function is now only called by a view that is a COMPONENT. */
pub fn find_view_by_id(&self, view_id: &String) -> Option<&View> {
if view_id.as_str() == self.id {
return Some(&self);
} else if let Some(id) = view_id.split(";").last() {
// If this is a descendent node of an instance, the last section is the node id
// of the view in the component. Example: I70:17;29:15
if self.id == id.to_string() {
return Some(&self);
}
}
if let Some(data) = &self.data.as_ref() {
if let Some(View_data_type::Container { 0: Container { children, .. } }) =
&data.view_data_type
{
for child in children {
let result = child.find_view_by_id(&view_id);
if result.is_some() {
return result;
}
}
}
}
return None;
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::path::stroke_weight;
use crate::path::StrokeWeight;
use crate::variable::num_or_var::NumOrVarType;
#[test]
fn test_node_style_new_default() {
let style = NodeStyle::new_default();
assert!(style.font_color.is_some());
assert_eq!(style.font_size.unwrap().NumOrVarType, Some(NumOrVarType::Num(18.0)));
assert_eq!(style.font_weight.unwrap(), FontWeight::normal());
assert_eq!(style.font_style.enum_value().unwrap(), FontStyle::FONT_STYLE_NORMAL);
}
#[test]
fn test_view_style_new_default() {
let style = ViewStyle::new_default();
assert!(style.layout_style.is_some());
assert!(style.node_style.is_some());
}
#[test]
fn test_view_style_difference() {
let mut style1 = ViewStyle::new_default();
let mut style2 = ViewStyle::new_default();
// Test a few properties
style2.node_style_mut().opacity = Some(0.5);
style2.node_style_mut().letter_spacing = Some(1.2);
style2.layout_style_mut().flex_grow = 1.0;
let diff = style1.difference(&style2);
assert_eq!(diff.node_style().opacity, Some(0.5));
assert_eq!(diff.node_style().letter_spacing, Some(1.2));
assert_eq!(diff.layout_style().flex_grow, 1.0);
// Test no difference
style1.node_style_mut().opacity = Some(0.5);
style1.node_style_mut().letter_spacing = Some(1.2);
style1.layout_style_mut().flex_grow = 1.0;
let diff2 = style1.difference(&style2);
assert_eq!(diff2.node_style().opacity, None);
assert_eq!(diff2.node_style().letter_spacing, None);
assert_eq!(diff2.layout_style().flex_grow, 0.0);
// Test all properties
let mut style3 = ViewStyle::new_default();
let mut style4 = ViewStyle::new_default();
style4.node_style_mut().font_color =
Some(Background::new_with_background(background::Background_type::Solid(
crate::variable::ColorOrVar::new_color(crate::color::Color::red()),
)))
.into();
style4.node_style_mut().font_size = Some(NumOrVar::from_num(24.0)).into();
style4.node_style_mut().font_family = Some("Roboto".to_string());
style4.node_style_mut().font_weight = Some(FontWeight::bold()).into();
style4.node_style_mut().font_style = FontStyle::FONT_STYLE_ITALIC.into();
style4.node_style_mut().text_decoration = TextDecoration::TEXT_DECORATION_UNDERLINE.into();
style4.node_style_mut().letter_spacing = Some(2.0);
style4.node_style_mut().font_stretch = Some(FontStretch::expanded()).into();
style4.node_style_mut().backgrounds.push(Background::new_with_background(
background::Background_type::Solid(crate::variable::ColorOrVar::new_color(
crate::color::Color::blue(),
)),
));
style4.node_style_mut().stroke = Some(Stroke {
stroke_weight: Some(StrokeWeight {
stroke_weight_type: Some(stroke_weight::Stroke_weight_type::Uniform(1.0)),
..Default::default()
})
.into(),
..Default::default()
})
.into();
style4.layout_style_mut().flex_direction =
crate::positioning::FlexDirection::FLEX_DIRECTION_COLUMN.into();
style4.layout_style_mut().align_items =
crate::positioning::AlignItems::ALIGN_ITEMS_CENTER.into();
style4.layout_style_mut().margin = Some(crate::geometry::DimensionRect {
start: crate::geometry::DimensionProto::new_points(10.0),
..Default::default()
})
.into();
let diff3 = style3.difference(&style4);
assert_eq!(diff3.node_style().font_color, style4.node_style().font_color.clone());
assert_eq!(diff3.node_style().font_size, style4.node_style().font_size.clone());
assert_eq!(diff3.node_style().font_family, style4.node_style().font_family.clone());
assert_eq!(diff3.node_style().font_weight, style4.node_style().font_weight.clone());
assert_eq!(diff3.node_style().font_style, style4.node_style().font_style);
assert_eq!(diff3.node_style().text_decoration, style4.node_style().text_decoration);
assert_eq!(diff3.node_style().letter_spacing, style4.node_style().letter_spacing);
assert_eq!(diff3.node_style().font_stretch, style4.node_style().font_stretch.clone());
assert_eq!(diff3.node_style().backgrounds, style4.node_style().backgrounds.clone());
assert_eq!(diff3.node_style().stroke, style4.node_style().stroke.clone());
assert_eq!(diff3.layout_style().flex_direction, style4.layout_style().flex_direction);
assert_eq!(diff3.layout_style().align_items, style4.layout_style().align_items);
assert_eq!(diff3.layout_style().margin, style4.layout_style().margin.clone());
// Test no difference with all properties set
style3 = style4.clone();
let diff4 = style3.difference(&style4);
assert_eq!(diff4.node_style().font_color, ViewStyle::new_default().node_style().font_color);
assert_eq!(diff4.node_style().font_size, ViewStyle::new_default().node_style().font_size);
assert!(diff4.node_style().font_family.is_none());
assert_eq!(
diff4.node_style().font_weight,
ViewStyle::new_default().node_style().font_weight
);
assert_eq!(
diff4.node_style().font_style.enum_value().unwrap(),
FontStyle::FONT_STYLE_NORMAL
);
assert_eq!(
diff4.node_style().text_decoration.enum_value().unwrap(),
TextDecoration::TEXT_DECORATION_NONE
);
assert!(diff4.node_style().letter_spacing.is_none());
assert_eq!(
diff4.node_style().font_stretch,
ViewStyle::new_default().node_style().font_stretch
);
assert!(diff4.node_style().backgrounds.is_empty());
assert_eq!(diff4.node_style().stroke, ViewStyle::new_default().node_style().stroke);
assert_eq!(
diff4.layout_style().flex_direction.enum_value().unwrap(),
crate::positioning::FlexDirection::FLEX_DIRECTION_ROW
);
assert_eq!(
diff4.layout_style().align_items.enum_value().unwrap(),
crate::positioning::AlignItems::ALIGN_ITEMS_STRETCH
);
assert_eq!(diff4.layout_style().margin, ViewStyle::new_default().layout_style().margin);
}
#[test]
fn test_view_new_rect() {
let view = View::new_rect(
&"rect1".to_string(),
&"Rect View".to_string(),
ViewShape::default(),
ViewStyle::new_default(),
None,
None,
ScrollInfo::new_default(),
None,
None,
RenderMethod::RENDER_METHOD_NONE,
HashMap::new(),
);
assert_eq!(view.id, "rect1");
assert_eq!(view.name, "Rect View");
assert!(matches!(
view.data.unwrap().view_data_type,
Some(View_data_type::Container { .. })
));
}
#[test]
fn test_view_new_text() {
let view = View::new_text(
&"text1".to_string(),
&"Text View".to_string(),
ViewStyle::new_default(),
None,
None,
"Hello",
None,
None,
RenderMethod::RENDER_METHOD_NONE,
HashMap::new(),
);
assert_eq!(view.id, "text1");
assert_eq!(view.name, "Text View");
assert!(matches!(view.data.unwrap().view_data_type, Some(View_data_type::Text { .. })));
}
#[test]
fn test_view_add_child() {
let mut parent = View::new_rect(
&"parent".to_string(),
&"Parent".to_string(),
ViewShape::default(),
ViewStyle::new_default(),
None,
None,
ScrollInfo::new_default(),
None,
None,
RenderMethod::RENDER_METHOD_NONE,
HashMap::new(),
);
let child = View::new_rect(
&"child".to_string(),
&"Child".to_string(),
ViewShape::default(),
ViewStyle::new_default(),
None,
None,
ScrollInfo::new_default(),
None,
None,
RenderMethod::RENDER_METHOD_NONE,
HashMap::new(),
);
parent.add_child(child);
if let Some(View_data_type::Container { 0: Container { children, .. } }) =
parent.data.unwrap().view_data_type
{
assert_eq!(children.len(), 1);
assert_eq!(children[0].id, "child");
} else {
panic!("Wrong data type");
}
}
#[test]
fn test_find_view_by_id() {
let child = View::new_rect(
&"child".to_string(),
&"Child".to_string(),
ViewShape::default(),
ViewStyle::new_default(),
None,
None,
ScrollInfo::new_default(),
None,
None,
RenderMethod::RENDER_METHOD_NONE,
HashMap::new(),
);
let mut parent = View::new_rect(
&"parent".to_string(),
&"Parent".to_string(),
ViewShape::default(),
ViewStyle::new_default(),
None,
None,
ScrollInfo::new_default(),
None,
None,
RenderMethod::RENDER_METHOD_NONE,
HashMap::new(),
);
parent.add_child(child);
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | true |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/definition/modifier.rs | crates/dc_bundle/src/definition/modifier.rs | /*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::{
filter::{filter_op, FilterOp},
shadow::{box_shadow, BoxShadow, ShadowBox},
variable::ColorOrVar,
};
pub mod affine_transform;
pub mod layout_transform;
impl FilterOp {
pub fn new_with_op(op_type: filter_op::FilterOpType) -> Self {
FilterOp { FilterOpType: Some(op_type), ..Default::default() }
}
}
impl BoxShadow {
/// Create an outset box shadow.
pub fn new_with_outset(
blur_radius: f32,
spread_radius: f32,
color: ColorOrVar,
offset: (f32, f32),
) -> BoxShadow {
BoxShadow {
shadow_box: Some(box_shadow::Shadow_box::Outset(box_shadow::Shadow {
blur_radius,
spread_radius,
color: Some(color).into(),
offset_x: offset.0,
offset_y: offset.1,
shadow_box: ShadowBox::SHADOW_BOX_BORDER_BOX.into(),
..Default::default()
})),
..Default::default()
}
}
/// Create an inset shadow.
pub fn new_with_inset(
blur_radius: f32,
spread_radius: f32,
color: ColorOrVar,
offset: (f32, f32),
) -> BoxShadow {
BoxShadow {
shadow_box: Some(box_shadow::Shadow_box::Inset(box_shadow::Shadow {
blur_radius,
spread_radius,
color: Some(color).into(),
offset_x: offset.0,
offset_y: offset.1,
shadow_box: ShadowBox::SHADOW_BOX_BORDER_BOX.into(),
..Default::default()
})),
..Default::default()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::color::Color;
#[test]
fn test_filter_op_new_with_op() {
let op = FilterOp::new_with_op(filter_op::FilterOpType::Blur(10.0));
assert!(matches!(op.FilterOpType, Some(filter_op::FilterOpType::Blur(v)) if v == 10.0));
}
#[test]
fn test_box_shadow_new_with_outset() {
let color = ColorOrVar::new_color(Color::red());
let shadow = BoxShadow::new_with_outset(5.0, 2.0, color.clone(), (1.0, 1.0));
if let Some(box_shadow::Shadow_box::Outset(s)) = shadow.shadow_box {
assert_eq!(s.blur_radius, 5.0);
assert_eq!(s.spread_radius, 2.0);
assert_eq!(s.color.unwrap(), color);
assert_eq!(s.offset_x, 1.0);
assert_eq!(s.offset_y, 1.0);
} else {
panic!("Wrong shadow type");
}
}
#[test]
fn test_box_shadow_new_with_inset() {
let color = ColorOrVar::new_color(Color::blue());
let shadow = BoxShadow::new_with_inset(10.0, 4.0, color.clone(), (2.0, 2.0));
if let Some(box_shadow::Shadow_box::Inset(s)) = shadow.shadow_box {
assert_eq!(s.blur_radius, 10.0);
assert_eq!(s.spread_radius, 4.0);
assert_eq!(s.color.unwrap(), color);
assert_eq!(s.offset_x, 2.0);
assert_eq!(s.offset_y, 2.0);
} else {
panic!("Wrong shadow type");
}
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/definition/layout.rs | crates/dc_bundle/src/definition/layout.rs | /*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::geometry::{DimensionProto, DimensionRect, Size};
use crate::layout_style::LayoutStyle;
use crate::positioning::item_spacing::{self, ItemSpacingType};
use crate::positioning::{
AlignContent, AlignItems, AlignSelf, FlexDirection, ItemSpacing, JustifyContent, PositionType,
};
use crate::Error;
use crate::Error::MissingFieldError;
impl ItemSpacing {
pub fn new_default() -> Option<Self> {
Some(Self { ItemSpacingType: Some(ItemSpacingType::Fixed(0)), ..Default::default() })
}
}
impl LayoutStyle {
pub fn bounding_box(&self) -> Result<&Size, Error> {
self.bounding_box.as_ref().ok_or(MissingFieldError { field: "bounding_box".to_string() })
}
pub(crate) fn new_default() -> LayoutStyle {
LayoutStyle {
margin: Some(DimensionRect::new_with_default_value()).into(),
padding: Some(DimensionRect::new_with_default_value()).into(),
item_spacing: ItemSpacing::new_default().into(),
top: DimensionProto::new_undefined(),
left: DimensionProto::new_undefined(),
bottom: DimensionProto::new_undefined(),
right: DimensionProto::new_undefined(),
width: DimensionProto::new_undefined(),
height: DimensionProto::new_undefined(),
min_width: DimensionProto::new_undefined(),
max_width: DimensionProto::new_undefined(),
min_height: DimensionProto::new_undefined(),
max_height: DimensionProto::new_undefined(),
bounding_box: Some(Size::default()).into(),
flex_grow: 0.0,
flex_shrink: 0.0,
flex_basis: DimensionProto::new_undefined(),
align_self: AlignSelf::ALIGN_SELF_AUTO.into(),
align_content: AlignContent::ALIGN_CONTENT_STRETCH.into(),
align_items: AlignItems::ALIGN_ITEMS_STRETCH.into(),
flex_direction: FlexDirection::FLEX_DIRECTION_ROW.into(),
justify_content: JustifyContent::JUSTIFY_CONTENT_FLEX_START.into(),
position_type: PositionType::POSITION_TYPE_RELATIVE.into(),
..Default::default()
}
}
}
impl ItemSpacing {
pub fn new_fixed(value: i32) -> Self {
Self {
ItemSpacingType: Some(item_spacing::ItemSpacingType::Fixed(value)),
..Default::default()
}
}
pub fn new_auto(width: i32, height: i32) -> Self {
Self {
ItemSpacingType: Some(item_spacing::ItemSpacingType::Auto(item_spacing::Auto {
width,
height,
..Default::default()
})),
..Default::default()
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::geometry::dimension_proto::Dimension;
#[test]
fn test_item_spacing_new_default() {
let default_spacing = ItemSpacing::new_default().unwrap();
assert!(
matches!(default_spacing.ItemSpacingType, Some(ItemSpacingType::Fixed(v)) if v == 0)
);
}
#[test]
fn test_item_spacing_constructors() {
let fixed_spacing = ItemSpacing::new_fixed(10);
assert!(
matches!(fixed_spacing.ItemSpacingType, Some(ItemSpacingType::Fixed(v)) if v == 10)
);
let auto_spacing = ItemSpacing::new_auto(20, 30);
assert!(
matches!(auto_spacing.ItemSpacingType, Some(ItemSpacingType::Auto(a)) if a.width == 20 && a.height == 30)
);
}
#[test]
fn test_layout_style_new_default() {
let style = LayoutStyle::new_default();
assert!(style.margin.is_some());
assert!(style.padding.is_some());
assert!(style.item_spacing.is_some());
assert!(matches!(style.width.unwrap().Dimension, Some(Dimension::Undefined(_))));
assert!(matches!(style.height.unwrap().Dimension, Some(Dimension::Undefined(_))));
assert_eq!(style.flex_grow, 0.0);
assert_eq!(style.flex_shrink, 0.0);
assert_eq!(style.align_self.enum_value().unwrap(), AlignSelf::ALIGN_SELF_AUTO);
}
#[test]
fn test_layout_style_bounding_box() {
let mut style = LayoutStyle::new_default();
assert!(style.bounding_box().is_ok());
style.bounding_box = None.into();
assert!(style.bounding_box().is_err());
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/definition/modifier/affine_transform.rs | crates/dc_bundle/src/definition/modifier/affine_transform.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the same directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::matrix_transform::AffineTransform;
/// Implementations are forked from euclid Transform2D.
impl AffineTransform {
/// Create a transform specifying its matrix elements in row-major order.
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), then please use `column_major`
pub fn row_major(m11: f32, m12: f32, m21: f32, m22: f32, m31: f32, m32: f32) -> Self {
AffineTransform { m11, m12, m21, m22, m31, m32, ..Default::default() }
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_row_major() {
let transform = AffineTransform::row_major(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
assert_eq!(transform.m11, 1.0);
assert_eq!(transform.m12, 2.0);
assert_eq!(transform.m21, 3.0);
assert_eq!(transform.m22, 4.0);
assert_eq!(transform.m31, 5.0);
assert_eq!(transform.m32, 6.0);
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_bundle/src/definition/modifier/layout_transform.rs | crates/dc_bundle/src/definition/modifier/layout_transform.rs | // Copyright 2013 The Servo Project Developers. See the COPYRIGHT
// file at the same directory of this distribution.
//
// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
// option. This file may not be copied, modified, or distributed
// except according to those terms.
use crate::matrix_transform::{AffineTransform, LayoutTransform};
/// Implementations are forked from euclid Transform3D.
impl LayoutTransform {
/// Create a transform specifying its components in row-major order.
///
/// For example, the translation terms m41, m42, m43 on the last row with the
/// row-major convention) are the 13rd, 14th and 15th parameters.
///
/// Beware: This library is written with the assumption that row vectors
/// are being used. If your matrices use column vectors (i.e. transforming a vector
/// is `T * v`), then please use `column_major`
pub fn row_major(
m11: f32,
m12: f32,
m13: f32,
m14: f32,
m21: f32,
m22: f32,
m23: f32,
m24: f32,
m31: f32,
m32: f32,
m33: f32,
m34: f32,
m41: f32,
m42: f32,
m43: f32,
m44: f32,
) -> Self {
LayoutTransform {
m11,
m12,
m13,
m14,
m21,
m22,
m23,
m24,
m31,
m32,
m33,
m34,
m41,
m42,
m43,
m44,
..Default::default()
}
}
/// Create a 4 by 4 transform representing a 2d transformation, specifying its components
/// in row-major order:
///
/// ```text
/// m11 m12 0 0
/// m21 m22 0 0
/// 0 0 1 0
/// m41 m42 0 1
/// ```
#[inline]
pub fn row_major_2d(m11: f32, m12: f32, m21: f32, m22: f32, m41: f32, m42: f32) -> Self {
Self::row_major(
m11, m12, 0f32, 0f32, // row1
m21, m22, 0f32, 0f32, // row2
0f32, 0f32, 1f32, 0f32, //row3
m41, m42, 0f32, 1f32, //row4
)
}
/// Create a 3d translation transform:
///
/// ```text
/// 1 0 0 0
/// 0 1 0 0
/// 0 0 1 0
/// x y z 1
/// ```
#[inline]
pub fn create_translation(x: f32, y: f32, z: f32) -> Self {
Self::row_major(
1f32, 0f32, 0f32, 0f32, // row1
0f32, 1f32, 0f32, 0f32, // row2
0f32, 0f32, 1f32, 0f32, // row3
x, y, z, 1f32, // row4
)
}
/// Creates an identity matrix:
///
/// ```text
/// 1 0 0 0
/// 0 1 0 0
/// 0 0 1 0
/// 0 0 0 1
/// ```
#[inline]
pub fn identity() -> Self {
Self::create_translation(0f32, 0f32, 0f32)
}
/// Create a 3d rotation transform from an angle / axis.
/// The supplied axis must be normalized.
pub fn create_rotation(x: f32, y: f32, z: f32, theta: f32) -> Self {
let xx = x * x;
let yy = y * y;
let zz = z * z;
let half_theta = theta / 2f32;
let sc = half_theta.sin() * half_theta.cos();
let sq = half_theta.sin() * half_theta.sin();
Self::row_major(
1f32 - 2f32 * (yy + zz) * sq,
2f32 * (x * y * sq - z * sc),
2f32 * (x * z * sq + y * sc),
0f32,
2f32 * (x * y * sq + z * sc),
1f32 - 2f32 * (xx + zz) * sq,
2f32 * (y * z * sq - x * sc),
0f32,
2f32 * (x * z * sq - y * sc),
2f32 * (y * z * sq + x * sc),
1f32 - 2f32 * (xx + yy) * sq,
0f32,
0f32,
0f32,
0f32,
1f32,
)
}
/// Returns a transform with a rotation applied before self's transformation.
#[must_use]
pub fn pre_rotate(&self, x: f32, y: f32, z: f32, theta: f32) -> Self {
self.pre_transform(&Self::create_rotation(x, y, z, theta))
}
/// Returns the multiplication of the two matrices such that mat's transformation
/// applies before self's transformation.
///
/// Assuming row vectors, this is equivalent to mat * self
#[inline]
#[must_use]
pub fn pre_transform(&self, mat: &LayoutTransform) -> LayoutTransform {
mat.post_transform(self)
}
/// Returns the multiplication of the two matrices such that mat's transformation
/// applies after self's transformation.
///
/// Assuming row vectors, this is equivalent to self * mat
#[must_use]
pub fn post_transform(&self, mat: &LayoutTransform) -> Self {
Self::row_major(
self.m11 * mat.m11 + self.m12 * mat.m21 + self.m13 * mat.m31 + self.m14 * mat.m41,
self.m11 * mat.m12 + self.m12 * mat.m22 + self.m13 * mat.m32 + self.m14 * mat.m42,
self.m11 * mat.m13 + self.m12 * mat.m23 + self.m13 * mat.m33 + self.m14 * mat.m43,
self.m11 * mat.m14 + self.m12 * mat.m24 + self.m13 * mat.m34 + self.m14 * mat.m44,
self.m21 * mat.m11 + self.m22 * mat.m21 + self.m23 * mat.m31 + self.m24 * mat.m41,
self.m21 * mat.m12 + self.m22 * mat.m22 + self.m23 * mat.m32 + self.m24 * mat.m42,
self.m21 * mat.m13 + self.m22 * mat.m23 + self.m23 * mat.m33 + self.m24 * mat.m43,
self.m21 * mat.m14 + self.m22 * mat.m24 + self.m23 * mat.m34 + self.m24 * mat.m44,
self.m31 * mat.m11 + self.m32 * mat.m21 + self.m33 * mat.m31 + self.m34 * mat.m41,
self.m31 * mat.m12 + self.m32 * mat.m22 + self.m33 * mat.m32 + self.m34 * mat.m42,
self.m31 * mat.m13 + self.m32 * mat.m23 + self.m33 * mat.m33 + self.m34 * mat.m43,
self.m31 * mat.m14 + self.m32 * mat.m24 + self.m33 * mat.m34 + self.m34 * mat.m44,
self.m41 * mat.m11 + self.m42 * mat.m21 + self.m43 * mat.m31 + self.m44 * mat.m41,
self.m41 * mat.m12 + self.m42 * mat.m22 + self.m43 * mat.m32 + self.m44 * mat.m42,
self.m41 * mat.m13 + self.m42 * mat.m23 + self.m43 * mat.m33 + self.m44 * mat.m43,
self.m41 * mat.m14 + self.m42 * mat.m24 + self.m43 * mat.m34 + self.m44 * mat.m44,
)
}
/// Returns a transform with a translation applied after self's transformation.
#[must_use]
pub fn post_translate(&self, x: f32, y: f32, z: f32) -> Self {
self.post_transform(&Self::create_translation(x, y, z))
}
/// Compute the determinant of the transform.
pub fn determinant(&self) -> f32 {
self.m14 * self.m23 * self.m32 * self.m41
- self.m13 * self.m24 * self.m32 * self.m41
- self.m14 * self.m22 * self.m33 * self.m41
+ self.m12 * self.m24 * self.m33 * self.m41
+ self.m13 * self.m22 * self.m34 * self.m41
- self.m12 * self.m23 * self.m34 * self.m41
- self.m14 * self.m23 * self.m31 * self.m42
+ self.m13 * self.m24 * self.m31 * self.m42
+ self.m14 * self.m21 * self.m33 * self.m42
- self.m11 * self.m24 * self.m33 * self.m42
- self.m13 * self.m21 * self.m34 * self.m42
+ self.m11 * self.m23 * self.m34 * self.m42
+ self.m14 * self.m22 * self.m31 * self.m43
- self.m12 * self.m24 * self.m31 * self.m43
- self.m14 * self.m21 * self.m32 * self.m43
+ self.m11 * self.m24 * self.m32 * self.m43
+ self.m12 * self.m21 * self.m34 * self.m43
- self.m11 * self.m22 * self.m34 * self.m43
- self.m13 * self.m22 * self.m31 * self.m44
+ self.m12 * self.m23 * self.m31 * self.m44
+ self.m13 * self.m21 * self.m32 * self.m44
- self.m11 * self.m23 * self.m32 * self.m44
- self.m12 * self.m21 * self.m33 * self.m44
+ self.m11 * self.m22 * self.m33 * self.m44
}
/// Multiplies all of the transform's component by a scalar and returns the result.
#[must_use]
pub fn mul_s(&self, x: f32) -> Self {
Self::row_major(
self.m11 * x,
self.m12 * x,
self.m13 * x,
self.m14 * x,
self.m21 * x,
self.m22 * x,
self.m23 * x,
self.m24 * x,
self.m31 * x,
self.m32 * x,
self.m33 * x,
self.m34 * x,
self.m41 * x,
self.m42 * x,
self.m43 * x,
self.m44 * x,
)
}
/// Returns the inverse transform if possible.
pub fn inverse(&self) -> Option<LayoutTransform> {
let det = self.determinant();
if det == 0f32 {
return None;
}
let m = Self::row_major(
self.m23 * self.m34 * self.m42 - self.m24 * self.m33 * self.m42
+ self.m24 * self.m32 * self.m43
- self.m22 * self.m34 * self.m43
- self.m23 * self.m32 * self.m44
+ self.m22 * self.m33 * self.m44,
self.m14 * self.m33 * self.m42
- self.m13 * self.m34 * self.m42
- self.m14 * self.m32 * self.m43
+ self.m12 * self.m34 * self.m43
+ self.m13 * self.m32 * self.m44
- self.m12 * self.m33 * self.m44,
self.m13 * self.m24 * self.m42 - self.m14 * self.m23 * self.m42
+ self.m14 * self.m22 * self.m43
- self.m12 * self.m24 * self.m43
- self.m13 * self.m22 * self.m44
+ self.m12 * self.m23 * self.m44,
self.m14 * self.m23 * self.m32
- self.m13 * self.m24 * self.m32
- self.m14 * self.m22 * self.m33
+ self.m12 * self.m24 * self.m33
+ self.m13 * self.m22 * self.m34
- self.m12 * self.m23 * self.m34,
self.m24 * self.m33 * self.m41
- self.m23 * self.m34 * self.m41
- self.m24 * self.m31 * self.m43
+ self.m21 * self.m34 * self.m43
+ self.m23 * self.m31 * self.m44
- self.m21 * self.m33 * self.m44,
self.m13 * self.m34 * self.m41 - self.m14 * self.m33 * self.m41
+ self.m14 * self.m31 * self.m43
- self.m11 * self.m34 * self.m43
- self.m13 * self.m31 * self.m44
+ self.m11 * self.m33 * self.m44,
self.m14 * self.m23 * self.m41
- self.m13 * self.m24 * self.m41
- self.m14 * self.m21 * self.m43
+ self.m11 * self.m24 * self.m43
+ self.m13 * self.m21 * self.m44
- self.m11 * self.m23 * self.m44,
self.m13 * self.m24 * self.m31 - self.m14 * self.m23 * self.m31
+ self.m14 * self.m21 * self.m33
- self.m11 * self.m24 * self.m33
- self.m13 * self.m21 * self.m34
+ self.m11 * self.m23 * self.m34,
self.m22 * self.m34 * self.m41 - self.m24 * self.m32 * self.m41
+ self.m24 * self.m31 * self.m42
- self.m21 * self.m34 * self.m42
- self.m22 * self.m31 * self.m44
+ self.m21 * self.m32 * self.m44,
self.m14 * self.m32 * self.m41
- self.m12 * self.m34 * self.m41
- self.m14 * self.m31 * self.m42
+ self.m11 * self.m34 * self.m42
+ self.m12 * self.m31 * self.m44
- self.m11 * self.m32 * self.m44,
self.m12 * self.m24 * self.m41 - self.m14 * self.m22 * self.m41
+ self.m14 * self.m21 * self.m42
- self.m11 * self.m24 * self.m42
- self.m12 * self.m21 * self.m44
+ self.m11 * self.m22 * self.m44,
self.m14 * self.m22 * self.m31
- self.m12 * self.m24 * self.m31
- self.m14 * self.m21 * self.m32
+ self.m11 * self.m24 * self.m32
+ self.m12 * self.m21 * self.m34
- self.m11 * self.m22 * self.m34,
self.m23 * self.m32 * self.m41
- self.m22 * self.m33 * self.m41
- self.m23 * self.m31 * self.m42
+ self.m21 * self.m33 * self.m42
+ self.m22 * self.m31 * self.m43
- self.m21 * self.m32 * self.m43,
self.m12 * self.m33 * self.m41 - self.m13 * self.m32 * self.m41
+ self.m13 * self.m31 * self.m42
- self.m11 * self.m33 * self.m42
- self.m12 * self.m31 * self.m43
+ self.m11 * self.m32 * self.m43,
self.m13 * self.m22 * self.m41
- self.m12 * self.m23 * self.m41
- self.m13 * self.m21 * self.m42
+ self.m11 * self.m23 * self.m42
+ self.m12 * self.m21 * self.m43
- self.m11 * self.m22 * self.m43,
self.m12 * self.m23 * self.m31 - self.m13 * self.m22 * self.m31
+ self.m13 * self.m21 * self.m32
- self.m11 * self.m23 * self.m32
- self.m12 * self.m21 * self.m33
+ self.m11 * self.m22 * self.m33,
);
Some(m.mul_s(1f32 / det))
}
/// Create a 3d scale transform:
///
/// ```text
/// x 0 0 0
/// 0 y 0 0
/// 0 0 z 0
/// 0 0 0 1
/// ```
#[inline]
pub fn create_scale(x: f32, y: f32, z: f32) -> Self {
Self::row_major(
x, 0f32, 0f32, 0f32, // row1
0f32, y, 0f32, 0f32, // row2
0f32, 0f32, z, 0f32, //row3
0f32, 0f32, 0f32, 1f32, //row4
)
}
/// Returns a transform with a scale applied after self's transformation.
#[must_use]
pub fn post_scale(&self, x: f32, y: f32, z: f32) -> Self {
self.post_transform(&Self::create_scale(x, y, z))
}
/// Create a 2D transform picking the relevant terms from this transform.
///
/// This method assumes that self represents a 2d transformation, callers
/// should check that [`self.is_2d()`] returns `true` beforehand.
///
/// [`self.is_2d()`]: #method.is_2d
pub fn to_2d(&self) -> AffineTransform {
AffineTransform::row_major(self.m11, self.m12, self.m21, self.m22, self.m41, self.m42)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::matrix_transform::AffineTransform;
use std::f32::consts::FRAC_PI_2;
const EPSILON: f32 = 1e-6;
fn assert_matrix_eq(a: &LayoutTransform, b: &LayoutTransform) {
assert!((a.m11 - b.m11).abs() < EPSILON);
assert!((a.m12 - b.m12).abs() < EPSILON);
assert!((a.m13 - b.m13).abs() < EPSILON);
assert!((a.m14 - b.m14).abs() < EPSILON);
assert!((a.m21 - b.m21).abs() < EPSILON);
assert!((a.m22 - b.m22).abs() < EPSILON);
assert!((a.m23 - b.m23).abs() < EPSILON);
assert!((a.m24 - b.m24).abs() < EPSILON);
assert!((a.m31 - b.m31).abs() < EPSILON);
assert!((a.m32 - b.m32).abs() < EPSILON);
assert!((a.m33 - b.m33).abs() < EPSILON);
assert!((a.m34 - b.m34).abs() < EPSILON);
assert!((a.m41 - b.m41).abs() < EPSILON);
assert!((a.m42 - b.m42).abs() < EPSILON);
assert!((a.m43 - b.m43).abs() < EPSILON);
assert!((a.m44 - b.m44).abs() < EPSILON);
}
#[test]
fn test_row_major() {
let transform = LayoutTransform::row_major(
1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0, 13.0, 14.0, 15.0, 16.0,
);
assert_eq!(transform.m11, 1.0);
assert_eq!(transform.m12, 2.0);
assert_eq!(transform.m13, 3.0);
assert_eq!(transform.m14, 4.0);
assert_eq!(transform.m21, 5.0);
assert_eq!(transform.m22, 6.0);
assert_eq!(transform.m23, 7.0);
assert_eq!(transform.m24, 8.0);
assert_eq!(transform.m31, 9.0);
assert_eq!(transform.m32, 10.0);
assert_eq!(transform.m33, 11.0);
assert_eq!(transform.m34, 12.0);
assert_eq!(transform.m41, 13.0);
assert_eq!(transform.m42, 14.0);
assert_eq!(transform.m43, 15.0);
assert_eq!(transform.m44, 16.0);
}
#[test]
fn test_row_major_2d() {
let transform = LayoutTransform::row_major_2d(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
let expected = LayoutTransform::row_major(
1.0, 2.0, 0.0, 0.0, //
3.0, 4.0, 0.0, 0.0, //
0.0, 0.0, 1.0, 0.0, //
5.0, 6.0, 0.0, 1.0,
);
assert_matrix_eq(&transform, &expected);
}
#[test]
fn test_create_translation() {
let transform = LayoutTransform::create_translation(1.0, 2.0, 3.0);
let expected = LayoutTransform::row_major(
1.0, 0.0, 0.0, 0.0, //
0.0, 1.0, 0.0, 0.0, //
0.0, 0.0, 1.0, 0.0, //
1.0, 2.0, 3.0, 1.0,
);
assert_matrix_eq(&transform, &expected);
}
#[test]
fn test_identity() {
let transform = LayoutTransform::identity();
let expected = LayoutTransform::row_major(
1.0, 0.0, 0.0, 0.0, //
0.0, 1.0, 0.0, 0.0, //
0.0, 0.0, 1.0, 0.0, //
0.0, 0.0, 0.0, 1.0,
);
assert_matrix_eq(&transform, &expected);
}
#[test]
fn test_create_rotation() {
let angle = FRAC_PI_2;
let transform = LayoutTransform::create_rotation(0.0, 0.0, 1.0, angle);
let c = (angle / 2.0).cos();
let s = (angle / 2.0).sin();
#[rustfmt::skip]
let expected = LayoutTransform::row_major(
c * c - s * s, -2.0 * c * s, 0.0, 0.0, //
2.0 * c * s, c * c - s * s, 0.0, 0.0, //
0.0, 0.0, 1.0, 0.0, //
0.0, 0.0, 0.0, 1.0,
);
assert_matrix_eq(&transform, &expected);
}
#[test]
fn test_pre_rotate() {
let t = LayoutTransform::create_translation(10.0, 0.0, 0.0);
let angle = FRAC_PI_2;
let result = t.pre_rotate(0.0, 0.0, 1.0, angle);
let c = (angle / 2.0).cos();
let s = (angle / 2.0).sin();
#[rustfmt::skip]
let r = LayoutTransform::row_major(
c * c - s * s, -2.0 * c * s, 0.0, 0.0, //
2.0 * c * s, c * c - s * s, 0.0, 0.0, //
0.0, 0.0, 1.0, 0.0, //
0.0, 0.0, 0.0, 1.0,
);
let expected = r.post_transform(&t);
assert_matrix_eq(&result, &expected);
}
#[test]
fn test_pre_transform() {
let t = LayoutTransform::create_translation(10.0, 20.0, 30.0);
let s = LayoutTransform::create_scale(2.0, 3.0, 4.0);
let result = t.pre_transform(&s);
let expected = s.post_transform(&t);
assert_matrix_eq(&result, &expected);
}
#[test]
fn test_post_translate() {
let t = LayoutTransform::create_scale(2.0, 3.0, 4.0);
let result = t.post_translate(10.0, 20.0, 30.0);
let trans = LayoutTransform::create_translation(10.0, 20.0, 30.0);
let expected = t.post_transform(&trans);
assert_matrix_eq(&result, &expected);
}
#[test]
fn test_post_transform() {
let t = LayoutTransform::create_translation(10.0, 20.0, 30.0);
let s = LayoutTransform::create_scale(2.0, 3.0, 4.0);
let result = t.post_transform(&s);
let result_of_mult = t.post_transform(&s);
assert_matrix_eq(&result_of_mult, &result);
}
#[test]
fn test_inverse() {
let transform =
LayoutTransform::create_translation(10.0, 20.0, 30.0).post_scale(2.0, 3.0, 1.0);
let inverse = transform.inverse().unwrap();
let identity = transform.post_transform(&inverse);
assert_matrix_eq(&identity, &LayoutTransform::identity());
let singular = LayoutTransform::create_scale(1.0, 1.0, 0.0);
assert!(singular.inverse().is_none());
}
#[test]
fn test_to_2d() {
let transform_3d = LayoutTransform::row_major_2d(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
let transform_2d = transform_3d.to_2d();
let expected = AffineTransform::row_major(1.0, 2.0, 3.0, 4.0, 5.0, 6.0);
assert_eq!(transform_2d.m11, expected.m11);
assert_eq!(transform_2d.m12, expected.m12);
assert_eq!(transform_2d.m21, expected.m21);
assert_eq!(transform_2d.m22, expected.m22);
assert_eq!(transform_2d.m31, expected.m31);
assert_eq!(transform_2d.m32, expected.m32);
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_jni/src/lib.rs | crates/dc_jni/src/lib.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate android_logger;
extern crate log;
mod android_interface;
mod error;
mod error_map;
mod jni;
mod layout_manager;
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_jni/src/layout_manager.rs | crates/dc_jni/src/layout_manager.rs | // Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use std::sync::atomic::{AtomicI32, Ordering};
use std::sync::{Arc, Mutex, MutexGuard};
use dc_bundle::jni_layout::{LayoutChangedResponse, LayoutNodeList, LayoutParentChildren};
use dc_layout::LayoutManager;
use jni::objects::{JByteArray, JClass, JObject, JValue, JValueGen};
use jni::sys::{jboolean, jint};
use jni::JNIEnv;
use lazy_static::lazy_static;
use log::{error, info};
use crate::error::{throw_basic_exception, Error, Error::GenericError};
use crate::jni::javavm;
use protobuf::Message;
lazy_static! {
static ref LAYOUT_MANAGERS: Mutex<HashMap<i32, Arc<Mutex<LayoutManager>>>> =
Mutex::new(HashMap::new());
}
static LAYOUT_MANAGER_ID: AtomicI32 = AtomicI32::new(0);
fn create_layout_manager() -> i32 {
let manager = Arc::new(Mutex::new(LayoutManager::new(java_jni_measure_text)));
let mut hash = LAYOUT_MANAGERS.lock().unwrap();
let manager_id = LAYOUT_MANAGER_ID.fetch_add(1, Ordering::SeqCst);
hash.insert(manager_id, manager);
manager_id
}
fn manager(id: i32) -> Option<Arc<Mutex<LayoutManager>>> {
let managers = LAYOUT_MANAGERS.lock().unwrap();
let manager = managers.get(&id);
manager.map(|manager| manager.clone())
}
fn layout_response_to_bytearray(
mut env: JNIEnv,
layout_response: LayoutChangedResponse,
) -> JByteArray {
let mut bytes: Vec<u8> = vec![];
let result = layout_response.write_length_delimited_to_vec(&mut bytes);
match result {
Err(err) => {
throw_basic_exception(&mut env, &err);
JObject::null().into()
}
_ => match env.byte_array_from_slice(bytes.as_slice()) {
Ok(it) => it,
Err(err) => {
throw_basic_exception(&mut env, &err);
JObject::null().into()
}
},
}
}
pub(crate) fn jni_create_layout_manager(_env: JNIEnv, _class: JClass) -> i32 {
create_layout_manager()
}
pub(crate) fn jni_set_node_size<'local>(
mut env: JNIEnv<'local>,
_class: JClass,
manager_id: jint,
layout_id: jint,
root_layout_id: jint,
width: jint,
height: jint,
) -> JByteArray<'local> {
let manager = manager(manager_id);
if let Some(manager_ref) = manager {
let mut mgr = manager_ref.lock().unwrap();
let layout_response =
mgr.set_node_size(layout_id, root_layout_id, width as u32, height as u32);
layout_response_to_bytearray(env, layout_response)
} else {
throw_basic_exception(
&mut env,
&GenericError(format!("No manager with id {}", manager_id)),
);
JObject::null().into()
}
}
pub(crate) fn jni_add_nodes<'local>(
mut env: JNIEnv<'local>,
_class: JClass,
manager_id: jint,
root_layout_id: jint,
serialized_views: JByteArray,
) -> JByteArray<'local> {
let manager_ref = match manager(manager_id) {
Some(it) => it,
None => {
throw_basic_exception(
&mut env,
&GenericError(format!("No manager with id {}", manager_id)),
);
return JObject::null().into();
}
};
let mut manager = manager_ref.lock().unwrap();
fn deprotolize_layout_node_list(
env: &mut JNIEnv,
serialized_views: JByteArray,
) -> Result<LayoutNodeList, Error> {
let bytes_views: Vec<u8> = env.convert_byte_array(serialized_views)?;
LayoutNodeList::parse_from_bytes(bytes_views.as_slice()).map_err(Error::from)
}
match deprotolize_layout_node_list(&mut env, serialized_views) {
Ok(node_list) => {
if let Err(e) = handle_layout_node_list(node_list, &mut manager) {
info!("jni_add_nodes: Error handling layout node list: {:?}", e);
throw_basic_exception(&mut env, &e);
return JObject::null().into();
}
let layout_response = manager.compute_node_layout(root_layout_id);
layout_response_to_bytearray(env, layout_response)
}
Err(e) => {
info!("jni_add_nodes: failed with error {:?}", e);
throw_basic_exception(&mut env, &e);
JObject::null().into()
}
}
}
fn handle_layout_node_list(
node_list: LayoutNodeList,
manager: &mut MutexGuard<LayoutManager>,
) -> Result<(), Error> {
for node in node_list.layout_nodes.into_iter() {
manager.add_style(
node.layout_id,
node.parent_layout_id,
node.child_index,
node.style.into_option().expect("Malformed Data, style is required"),
node.name,
node.use_measure_func,
if node.use_measure_func { None } else { node.fixed_width },
if node.use_measure_func { None } else { node.fixed_height },
)?;
}
for LayoutParentChildren { parent_layout_id, child_layout_ids, .. } in
&node_list.parent_children
{
manager.update_children(*parent_layout_id, child_layout_ids)
}
Ok(())
}
pub(crate) fn jni_remove_node<'local>(
mut env: JNIEnv<'local>,
_class: JClass,
manager_id: jint,
layout_id: jint,
root_layout_id: jint,
compute_layout: jboolean,
) -> JByteArray<'local> {
let manager = manager(manager_id);
if let Some(manager_ref) = manager {
let mut manager = manager_ref.lock().unwrap();
let layout_response = manager.remove_view(layout_id, root_layout_id, compute_layout != 0);
layout_response_to_bytearray(env, layout_response)
} else {
throw_basic_exception(
&mut env,
&GenericError(format!("No manager with id {}", manager_id)),
);
JObject::null().into()
}
}
pub(crate) fn jni_mark_dirty<'local>(
mut env: JNIEnv<'local>,
_class: JClass,
manager_id: jint,
layout_id: jint,
) {
if let Some(manager_ref) = manager(manager_id) {
let mut manager = manager_ref.lock().unwrap();
manager.mark_dirty(layout_id);
} else {
throw_basic_exception(
&mut env,
&GenericError(format!("No manager with id {}", manager_id)),
);
}
}
fn get_text_size(env: &mut JNIEnv, input: &JObject) -> Result<(f32, f32), jni::errors::Error> {
let width = env.get_field(input, "width", "F")?.f()?;
let height = env.get_field(input, "height", "F")?.f()?;
Ok((width, height))
}
pub(crate) fn java_jni_measure_text(
layout_id: i32,
width: f32,
height: f32,
available_width: f32,
available_height: f32,
) -> (f32, f32) {
if let Some(vm) = javavm() {
let mut env: JNIEnv<'_> = vm.get_env().expect("Cannot get reference to the JNIEnv");
let class_result = env.find_class("com/android/designcompose/DesignTextMeasure");
match class_result {
Ok(jclass) => {
let call_result = env.call_static_method(
jclass,
"measureTextSize",
"(IFFFF)Lcom/android/designcompose/TextSize;",
&[
JValue::from(layout_id),
JValue::from(width),
JValue::from(height),
JValue::from(available_width),
JValue::from(available_height),
],
);
match &call_result {
Ok(text_size_object) => {
if let JValueGen::Object(o) = text_size_object {
let text_size_result = get_text_size(&mut env, o);
match text_size_result {
Ok(text_size) => return text_size,
Err(e) => {
error!("JNI get_text_size failed: {}", e);
}
}
}
}
Err(e) => {
error!("Java JNI measureTextSize() error: {:?}", e);
}
}
}
Err(e) => {
error!("Java JNI failed to find class DesignTextMeasure: {}", e);
}
}
} else {
error!("Java JNI failed to get JavaVM");
}
(0.0, 0.0)
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_jni/src/error.rs | crates/dc_jni/src/error.rs | // Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use jni::JNIEnv;
use log::error;
use thiserror::Error;
#[derive(Error, Debug)]
pub enum Error {
#[error("Error: {0}")]
GenericError(String),
#[error("Protobuf Error: {0:#?}")]
ProtobufDecodeError(#[from] protobuf::Error),
#[error("Protobuf ConversionError: {0}")]
MissingFieldError(#[from] dc_bundle::Error),
#[error("Json Serialization Error")]
JsonError(#[from] serde_json::Error),
#[error("DC_figma_import Error")]
FigmaImportError(#[from] dc_figma_import::Error),
#[error("JNI Error")]
JNIError(#[from] jni::errors::Error),
#[error("Protobuf Write Error")]
ProtobufWriteError(String),
}
#[allow(unused_qualifications)] // Linter thinks `std:` isn't needed but removing it fails the build
pub fn throw_basic_exception(env: &mut JNIEnv, err: &dyn std::error::Error) {
// An error occurring while trying to throw an exception shouldn't happen,
// but let's at least panic with a decent error message
env.throw(err.to_string()).expect("Error while trying to throw Exception");
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_jni/src/android_interface.rs | crates/dc_jni/src/android_interface.rs | /*
* Copyright 2024 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
pub mod convert_request;
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_jni/src/jni.rs | crates/dc_jni/src/jni.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::ffi::c_void;
use std::sync::{Arc, Mutex};
use crate::android_interface::convert_request::fetch_doc;
use crate::error::{throw_basic_exception, Error};
use crate::error_map::map_err_to_exception;
use crate::layout_manager::{
jni_add_nodes, jni_create_layout_manager, jni_mark_dirty, jni_remove_node, jni_set_node_size,
};
use android_logger::Config;
use dc_figma_import::ProxyConfig;
use jni::objects::{JByteArray, JClass, JObject, JString};
use jni::sys::{jint, JNI_VERSION_1_6};
use jni::{JNIEnv, JavaVM};
use dc_bundle::android_interface::{ConvertRequest, ConvertResponse};
use lazy_static::lazy_static;
use log::{error, info, LevelFilter};
use protobuf::Message;
lazy_static! {
static ref JAVA_VM: Mutex<Option<Arc<JavaVM>>> = Mutex::new(None);
}
pub fn javavm() -> Option<Arc<JavaVM>> {
JAVA_VM.lock().unwrap().clone()
}
fn set_javavm(vm: JavaVM) {
*JAVA_VM.lock().unwrap() = Some(Arc::new(vm))
}
fn get_string(env: &mut JNIEnv, obj: &JObject) -> Option<String> {
match env.get_string(obj.into()) {
Ok(it) => Some(it.into()),
Err(_) => {
// obj is null or not a java.lang.String
None
}
}
}
fn get_proxy_config(env: &mut JNIEnv, input: &JObject) -> Result<ProxyConfig, jni::errors::Error> {
let http_proxy_config = env
.get_field(input, "httpProxyConfig", "Lcom/android/designcompose/HttpProxyConfig;")?
.l()?;
let proxy_spec_field =
env.get_field(http_proxy_config, "proxySpec", "Ljava/lang/String;")?.l()?;
Ok(match get_string(env, &proxy_spec_field) {
Some(x) => ProxyConfig::HttpProxyConfig(x),
None => ProxyConfig::None,
})
}
fn jni_fetch_doc<'local>(
mut env: JNIEnv<'local>,
_class: JClass,
jdoc_id: JString,
jversion_id: JString,
jrequest: JByteArray,
jproxy_config: JObject,
) -> JByteArray<'local> {
let doc_id: String = match env.get_string(&jdoc_id) {
Ok(it) => it.into(),
Err(err) => {
throw_basic_exception(&mut env, &err);
return JObject::null().into();
}
};
let version_id: String = match env.get_string(&jversion_id) {
Ok(it) => it.into(),
Err(err) => {
throw_basic_exception(&mut env, &err);
return JObject::null().into();
}
};
let request_bytes: Vec<u8> = match env.convert_byte_array(&jrequest) {
Ok(it) => it.into(),
Err(err) => {
throw_basic_exception(&mut env, &err);
return JObject::null().into();
}
};
let mut request: ConvertRequest = ConvertRequest::new();
match request.merge_from_bytes(&request_bytes).map_err(Error::from) {
Err(err) => {
throw_basic_exception(&mut env, &err);
}
_ => {}
};
let proxy_config: ProxyConfig = match get_proxy_config(&mut env, &jproxy_config) {
Ok(it) => it,
Err(_) => ProxyConfig::None,
};
let ser_result = match jni_fetch_doc_impl(&mut env, doc_id, version_id, request, &proxy_config)
{
Ok(it) => it,
Err(_err) => {
return JObject::null().into();
}
};
match env.byte_array_from_slice(&ser_result) {
Ok(it) => it,
Err(err) => {
throw_basic_exception(&mut env, &err);
JObject::null().into()
}
}
}
fn jni_fetch_doc_impl(
env: &mut JNIEnv,
doc_id: String,
version_id: String,
request: ConvertRequest,
proxy_config: &ProxyConfig,
) -> Result<Vec<u8>, Error> {
let convert_result: ConvertResponse =
match fetch_doc(&doc_id, &version_id, &request, proxy_config).map_err(Error::from) {
Ok(it) => it,
Err(err) => {
let queries_string = request
.queries
.iter()
.map(|q| format!("--nodes=\"{}\" ", q))
.collect::<Vec<String>>()
.join(" ");
info!("Failed to fetch {}, Try fetching locally", doc_id);
info!("fetch --doc-id={} --version-id={} {} ", doc_id, version_id, queries_string);
map_err_to_exception(env, &err, doc_id).expect("Failed to throw exception");
return Err(err);
}
};
Ok(convert_result
.write_length_delimited_to_bytes()
.map_err(|e| Error::ProtobufWriteError(format!("Failed to write convert_result: {}", e)))?)
}
#[allow(non_snake_case)]
#[no_mangle]
pub extern "system" fn JNI_OnLoad(vm: JavaVM, _: *mut c_void) -> jint {
// Enable the logger, limit the log level to info to reduce spam
android_logger::init_once(Config::default().with_tag("Jni").with_max_level(LevelFilter::Info));
let mut env: JNIEnv<'_> = vm.get_env().expect("Cannot get reference to the JNIEnv");
let cls = env
.find_class("com/android/designcompose/Jni")
.expect("Unable to find com.android.designcompose.Jni class");
if env
.register_native_methods(
cls,
&[
jni::NativeMethod {
name: "jniFetchDoc".into(),
sig: "(Ljava/lang/String;Ljava/lang/String;[BLcom/android/designcompose/ProxyConfig;)[B".into(),
fn_ptr: jni_fetch_doc as *mut c_void,
},
jni::NativeMethod {
name: "jniCreateLayoutManager".into(),
sig: "()I".into(),
fn_ptr: jni_create_layout_manager as *mut c_void,
},
jni::NativeMethod {
name: "jniSetNodeSize".into(),
sig: "(IIIII)[B".into(),
fn_ptr: jni_set_node_size as *mut c_void,
},
jni::NativeMethod {
name: "jniAddNodes".into(),
sig: "(II[B)[B".into(),
fn_ptr: jni_add_nodes as *mut c_void,
},
jni::NativeMethod {
name: "jniRemoveNode".into(),
sig: "(IIIZ)[B".into(),
fn_ptr: jni_remove_node as *mut c_void,
},
jni::NativeMethod {
name: "jniMarkDirty".into(),
sig: "(II)V".into(),
fn_ptr: jni_mark_dirty as *mut c_void,
}
],
)
.is_err()
{
error!("Unable to register native methods");
}
// Save the Java VM so we can call back into Android
set_javavm(vm);
JNI_VERSION_1_6
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_jni/src/error_map.rs | crates/dc_jni/src/error_map.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use dc_figma_import::Error::NetworkError;
use jni::JNIEnv;
use log::error;
use std::error::Error;
pub fn map_err_to_exception(
env: &mut JNIEnv,
err: &crate::error::Error,
doc_id: String,
) -> Result<(), jni::errors::Error> {
match err {
crate::error::Error::FigmaImportError(NetworkError(network_error)) => {
error!("Network Error: {}, {}", err, err.source().unwrap().to_string());
if let Some(status) = network_error.status() {
match status.as_u16() {
400 => env.throw_new(
"com/android/designcompose/FetchException",
format!("Bad request: {}", status),
)?,
403 => env.throw_new(
"com/android/designcompose/AccessDeniedException",
"Invalid Authentication Token",
)?,
404 => env.throw_new(
"com/android/designcompose/FigmaFileNotFoundException",
doc_id,
)?,
429 => env.throw_new(
"com/android/designcompose/RateLimitedException",
"Figma Rate Limit Exceeded",
)?,
500 => env.throw_new(
"com/android/designcompose/InternalFigmaErrorException",
"Figma.com internal error",
)?,
code => env.throw_new(
"com/android/designcompose/FetchException",
format!("Unhandled response from server: {}", code),
)?,
}
} else if network_error.is_connect() || network_error.is_timeout() {
env.throw_new(
"java/net/ConnectException",
format!("Network error: {}", network_error),
)?
} else {
env.throw_new(
"java/net/SocketException",
format!("Network error: {}", network_error),
)?
}
}
_ => {
error!("Unhandled: {}, {}", err, err.source().unwrap().to_string());
env.throw(format!("{}", err))?;
}
};
Ok(())
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_jni/src/android_interface/convert_request.rs | crates/dc_jni/src/android_interface/convert_request.rs | // Copyright 2025 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::error::Error;
use dc_bundle::android_interface::{convert_response, ConvertRequest, ConvertResponse};
use dc_bundle::definition::NodeQuery;
use dc_bundle::design_compose_definition::{
DesignComposeDefinition, DesignComposeDefinitionHeader,
};
use dc_bundle::figma_doc::ServerFigmaDoc;
use dc_figma_import::HiddenNodePolicy;
use dc_figma_import::ImageContextSession;
use dc_figma_import::ProxyConfig;
pub fn fetch_doc(
id: &str,
requested_version_id: &str,
rq: &ConvertRequest,
proxy_config: &ProxyConfig,
) -> Result<ConvertResponse, Error> {
let image_session: Option<ImageContextSession> = {
match &rq.image_session_json {
Some(json) => match serde_json::from_slice(json) {
Ok(session) => Some(session),
Err(_) => None,
},
None => None,
}
};
if let Some(mut doc) = dc_figma_import::Document::new_if_changed(
&rq.figma_api_key,
id.into(),
requested_version_id.into(),
proxy_config,
rq.last_modified.clone().unwrap_or(String::new()),
rq.version.clone().unwrap_or(String::new()),
image_session,
)? {
// The document has changed since the version the client has, so we should fetch
// a new copy.
let mut error_list: Vec<String> = vec![];
let views = doc.nodes(
&rq.queries.iter().map(NodeQuery::name).collect(),
&rq.ignored_images
.iter()
.map(|imgref| (NodeQuery::name(imgref.node.clone()), imgref.images.clone()))
.collect(),
&mut error_list,
HiddenNodePolicy::Skip, // skip hidden nodes
)?;
let variable_map = doc.build_variable_map();
let figma_doc = DesignComposeDefinition::new_with_details(
views,
doc.encoded_image_map(),
doc.component_sets().clone(),
variable_map,
);
let header = DesignComposeDefinitionHeader::current(
doc.last_modified().clone(),
doc.get_name(),
doc.get_version(),
doc.get_document_id(),
);
let server_doc = ServerFigmaDoc {
figma_doc: Some(figma_doc).into(),
errors: error_list,
branches: doc.branches.clone(),
project_files: vec![],
..Default::default()
};
Ok(ConvertResponse {
convert_response_type: Some(convert_response::Convert_response_type::Document(
convert_response::Document {
header: Some(header).into(),
server_doc: Some(server_doc).into(),
// Return the image session as a JSON blob; we might want to encode this differently so we
// can be more robust if there's corruption.
image_session_json: serde_json::to_vec(&doc.image_session())?,
..Default::default()
},
)),
..Default::default()
})
} else {
Ok(ConvertResponse {
convert_response_type: Some(convert_response::Convert_response_type::Unmodified(
"x".into(),
)),
..Default::default()
})
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/scalableui_schema.rs | crates/dc_figma_import/src/scalableui_schema.rs | /*
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use dc_bundle::scalable;
use dc_bundle::scalable::ScalableUIComponentSet;
use serde::{Deserialize, Serialize};
//
// Schema data for component sets
//
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
struct Event {
event_name: String,
event_tokens: String,
from_variant_id: String,
from_variant_name: String,
to_variant_id: String,
to_variant_name: String,
}
impl Into<scalable::Event> for &Event {
fn into(self) -> scalable::Event {
scalable::Event {
event_name: self.event_name.clone(),
event_tokens: self.event_tokens.clone(),
from_variant_id: self.from_variant_id.clone(),
from_variant_name: self.from_variant_name.clone(),
to_variant_id: self.to_variant_id.clone(),
to_variant_name: self.to_variant_name.clone(),
..Default::default()
}
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
struct Keyframe {
frame: i32,
variant_name: String,
}
impl Into<scalable::Keyframe> for &Keyframe {
fn into(self) -> scalable::Keyframe {
scalable::Keyframe {
frame: self.frame,
variant_name: self.variant_name.clone(),
..Default::default()
}
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
struct KeyframeVariant {
name: String,
keyframes: Vec<Keyframe>,
}
impl Into<scalable::KeyframeVariant> for &KeyframeVariant {
fn into(self) -> scalable::KeyframeVariant {
scalable::KeyframeVariant {
name: self.name.clone(),
keyframes: self.keyframes.iter().map(|kf| kf.into()).collect(),
..Default::default()
}
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
pub(crate) struct ComponentSetDataJson {
id: String,
name: String,
role: String,
default_variant_id: String,
default_variant_name: String,
event_list: Vec<Event>,
keyframe_variants: Vec<KeyframeVariant>,
}
impl Into<ScalableUIComponentSet> for ComponentSetDataJson {
fn into(self) -> ScalableUIComponentSet {
ScalableUIComponentSet {
id: self.id,
name: self.name,
role: self.role,
default_variant_id: self.default_variant_id,
default_variant_name: self.default_variant_name,
events: self.event_list.iter().map(|e| e.into()).collect(),
keyframe_variants: self.keyframe_variants.iter().map(|kfv| kfv.into()).collect(),
variant_ids: vec![],
..Default::default()
}
}
}
//
// Schema data for variants
//
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
pub(crate) struct VariantDataJson {
id: String,
name: String,
is_default: bool,
layer: i32,
}
impl Into<scalable::ScalableUiVariant> for VariantDataJson {
fn into(self) -> scalable::ScalableUiVariant {
scalable::ScalableUiVariant {
id: self.id,
name: self.name,
is_default: self.is_default,
is_visible: true,
bounds: None.into(),
alpha: 1.0,
layer: self.layer,
..Default::default()
}
}
}
//
// ScalableUiDataJson represents the schema for any node that has scalable ui data
//
#[derive(Deserialize, Serialize, Debug, Clone)]
#[serde(untagged)]
pub(crate) enum ScalableUiDataJson {
Set(ComponentSetDataJson),
Variant(VariantDataJson),
}
impl Into<scalable::ScalableUIData> for ScalableUiDataJson {
fn into(self) -> scalable::ScalableUIData {
scalable::ScalableUIData {
data: Some(match self {
ScalableUiDataJson::Set(set) => scalable::scalable_uidata::Data::Set(set.into()),
ScalableUiDataJson::Variant(var) => {
scalable::scalable_uidata::Data::Variant(var.into())
}
}),
..Default::default()
}
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/proxy_config.rs | crates/dc_figma_import/src/proxy_config.rs | // Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Proxy configuration.
#[derive(Debug, Clone)]
pub enum ProxyConfig {
// HTTP Proxy Config in <host>:<port> format
HttpProxyConfig(String),
None,
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/shader_schema.rs | crates/dc_figma_import/src/shader_schema.rs | /*
* Copyright 2025 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
use crate::figma_schema::FigmaColor;
use crate::image_context::ImageContext;
use dc_bundle::color::Color;
use dc_bundle::shader::shader_uniform_value::Value_type::{
FloatColorValue, FloatValue, FloatVecValue, ImageRefValue, IntValue, IntVecValue,
};
use dc_bundle::shader::shader_uniform_value::{FloatVec, ImageRef, IntVec};
use dc_bundle::shader::{ShaderData, ShaderUniform, ShaderUniformValue};
use log::error;
use protobuf::MessageField;
use serde::{Deserialize, Serialize};
use std::collections::HashMap;
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct ShaderDataJson {
pub shader: Option<String>,
#[serde(rename = "shaderFallbackColor")]
pub shader_fallback_color: Option<FigmaColor>,
#[serde(rename = "shaderUniforms")]
pub shader_uniforms: Vec<ShaderUniformJson>,
}
impl ShaderDataJson {
pub fn into_shader_data(self, images: &mut ImageContext) -> Option<ShaderData> {
return if let Some(shader) = self.shader {
// Shader fallback color is the color used when shader isn't supported on lower sdks.
let shader_fallback_color: MessageField<Color> =
self.shader_fallback_color.as_ref().map(|figma_color| figma_color.into()).into();
// Shader uniforms: float, float array, color and color with alpha
let shader_uniforms: HashMap<String, ShaderUniform> = self
.shader_uniforms
.into_iter()
.map(|json| json.into_shader_uniform(images))
.collect();
Some(ShaderData {
shader,
shader_fallback_color,
shader_uniforms,
..Default::default()
})
} else {
None
};
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct ShaderUniformJson {
#[serde(rename = "uniformName")]
pub uniform_name: String,
#[serde(rename = "uniformType")]
pub uniform_type: String,
#[serde(rename = "uniformValue")]
pub uniform_value: serde_json::Value,
pub extras: Option<ShaderExtrasJson>,
}
impl ShaderUniformJson {
pub fn into_shader_uniform(self, images: &mut ImageContext) -> (String, ShaderUniform) {
let uniform_value = match self.uniform_type.as_str() {
"float" | "half" | "iTime" => {
if let Some(float_val) = self.uniform_value.as_f64() {
Some(ShaderUniformValue {
value_type: Some(FloatValue(float_val as f32)),
..Default::default()
})
} else {
error!("Error parsing float for shader float uniform {}", self.uniform_name);
None
}
}
"float2" | "float3" | "float4" | "half2" | "half3" | "half4" | "mat2" | "mat3"
| "mat4" | "half2x2" | "half3x3" | "half4x4" => {
if let Some(uniform_array) = self.uniform_value.as_array() {
let float_array: Vec<f32> = uniform_array
.iter()
.filter_map(|value| value.as_f64().map(|v| v as f32))
.collect();
match float_array.len() {
2 if self.uniform_type == "float2" || self.uniform_type == "half2" => {
Some(float_array)
}
3 if self.uniform_type == "float3" || self.uniform_type == "half3" => {
Some(float_array)
}
4 if self.uniform_type == "float4"
|| self.uniform_type == "half4"
|| self.uniform_type == "mat2"
|| self.uniform_type == "half2x2" =>
{
Some(float_array)
}
9 if self.uniform_type == "mat3" || self.uniform_type == "half3x3" => {
Some(float_array)
}
16 if self.uniform_type == "mat4" || self.uniform_type == "half4x4" => {
Some(float_array)
}
_ => None,
}
.map(|float_vec| ShaderUniformValue {
value_type: Some(FloatVecValue(FloatVec {
floats: float_vec,
..Default::default()
})),
..Default::default()
})
} else {
error!(
"Error parsing float array for shader {} uniform {}",
self.uniform_type, self.uniform_name
);
None
}
}
"color3" | "color4" => {
serde_json::from_str::<FigmaColor>(self.uniform_value.to_string().as_str())
.ok()
.map(|figma_color| (&figma_color).into())
.map(|parsed_color| ShaderUniformValue {
value_type: Some(FloatColorValue(parsed_color)),
..Default::default()
})
}
"int" => {
if let Some(int_val) = self.uniform_value.as_i64() {
Some(ShaderUniformValue {
value_type: Some(IntValue(int_val as i32)),
..Default::default()
})
} else {
error!("Error parsing integer for shader int uniform {}", self.uniform_name);
None
}
}
"int2" | "int3" | "int4" => {
if let Some(uniform_array) = self.uniform_value.as_array() {
let int_array: Vec<i32> = uniform_array
.iter()
.filter_map(|value| value.as_i64().map(|v| v as i32))
.collect();
match int_array.len() {
2 if self.uniform_type == "int2" => Some(int_array),
3 if self.uniform_type == "int3" => Some(int_array),
4 if self.uniform_type == "int4" => Some(int_array),
_ => None,
}
.map(|int_vec| ShaderUniformValue {
value_type: Some(IntVecValue(IntVec {
ints: int_vec,
..Default::default()
})),
..Default::default()
})
} else {
error!(
"Error parsing int array for shader {} uniform {}",
self.uniform_type, self.uniform_name
);
None
}
}
"shader" => {
if let Some(str_val) = self.uniform_value.as_str() {
// We use an empty string as the node name to skip the ignore check.
if let Some(fill) = images.image_fill(str_val, &"".to_string()) {
Some(ShaderUniformValue {
value_type: Some(ImageRefValue(ImageRef {
key: fill,
res_name: images.image_res(str_val),
..Default::default()
})),
..Default::default()
})
} else {
error!(
"No image found for image ref {} for shader {}",
str_val, self.uniform_name
);
None
}
} else {
error!(
"Error parsing image key for shader image uniform {}",
self.uniform_name
);
None
}
}
_ => None,
};
(
self.uniform_name.clone(),
ShaderUniform {
name: self.uniform_name.clone(),
type_: self.uniform_type,
value: uniform_value.into(),
ignore: self.extras.unwrap_or_default().ignore.unwrap_or(false),
..Default::default()
},
)
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct ShaderExtrasJson {
pub ignore: Option<bool>,
}
impl Default for ShaderExtrasJson {
fn default() -> Self {
ShaderExtrasJson { ignore: Some(false) }
}
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/lib.rs | crates/dc_figma_import/src/lib.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! `dc_figma_import` fetches a document from Figma and converts nodes from the document
//! to toolkit_schema Views, which can then be further customized (changing text or style)
//! and presented in other components that implement logic.
//!
//! The goal of this crate is to perform the mapping from Figma to the toolkit; it does
//! not provide any kind of UI logic mapping.
mod component_context;
mod design_definition;
mod document;
mod error;
mod extended_layout_schema;
mod figma_schema;
mod image_context;
pub mod meter_schema;
mod proxy_config;
pub mod reaction_schema;
pub mod scalableui_schema;
pub mod shader_schema;
pub mod tools;
mod transform_flexbox;
mod variable_utils;
// Exports for library users
pub use dc_bundle::design_compose_definition::DesignComposeDefinition;
pub use dc_bundle::design_compose_definition::DesignComposeDefinitionHeader;
pub use dc_bundle::geometry::Rectangle;
pub use document::Document;
pub use document::HiddenNodePolicy;
pub use error::Error;
pub use image_context::ImageContextSession;
pub use proxy_config::ProxyConfig;
// Internal convenience
pub use dc_bundle::color::Color;
pub use dc_bundle::definition::NodeQuery;
pub use dc_bundle::definition_file::load_design_def;
pub use dc_bundle::definition_file::save_design_def;
pub use dc_bundle::view::{View, ViewData};
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/svg.rs | crates/dc_figma_import/src/svg.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use image::{DynamicImage, RgbaImage};
use usvg::{NodeExt, Rect};
use crate::Error;
/* This file contains code for extracting SVG trees, as well as doing custom parsing
* In the futurem, any SVG related methods should go here.
*/
/// Calculate the bounding box of a node in an SVG tree, including child transforms
/// and filter bounds. This bounding box should represent the entire area that will
/// have pixels set when rendering.
fn calc_node_bbox(
tree: &usvg::Tree,
node: &usvg::Node,
ts: usvg::Transform,
) -> Option<usvg::PathBbox> {
match *node.borrow() {
usvg::NodeKind::Path(ref path) => path.data.bbox_with_transform(ts, path.stroke.as_ref()),
usvg::NodeKind::Image(ref img) => {
let path = usvg::PathData::from_rect(img.view_box.rect);
path.bbox_with_transform(ts, None)
}
usvg::NodeKind::Svg(_) => {
let mut bbox = usvg::PathBbox::new_bbox();
for child in node.children() {
let mut child_tx = ts.clone();
child_tx.append(&child.transform());
if let Some(c_bbox) = calc_node_bbox(tree, &child, child_tx) {
bbox = bbox.expand(c_bbox);
}
}
Some(bbox)
}
usvg::NodeKind::Group(usvg::Group { ref filter, .. }) => {
let mut bbox = usvg::PathBbox::new_bbox();
for child in node.children() {
let mut child_tx = ts.clone();
child_tx.append(&child.transform());
if let Some(c_bbox) = calc_node_bbox(tree, &child, child_tx) {
bbox = bbox.expand(c_bbox);
}
}
// Offset/outset the bounds based on the filters.
for f in filter {
if let Some(def) = tree.defs_by_id(f.as_str()) {
match *def.borrow() {
usvg::NodeKind::Filter(ref x) => {
bbox = bbox.expand(x.rect.to_path_bbox());
}
_ => (),
}
}
}
Some(bbox)
}
_ => None,
}
}
fn rasterize(tree: &usvg::Tree, bbox: &Rect, sf: f32) -> Result<(DynamicImage, Vec<u8>), Error> {
let mut img = tiny_skia::Pixmap::new(
(bbox.width() as f32 * sf) as u32,
(bbox.height() as f32 * sf) as u32,
)
.ok_or(usvg::Error::InvalidSize)?;
resvg::render(
&tree,
usvg::FitTo::Original,
tiny_skia::Transform::from_translate(-bbox.x() as f32, -bbox.y() as f32).post_scale(sf, sf),
img.as_mut(),
);
// We should import `png` directly so we can map errors appropriately.
let encoded_bytes = img.encode_png().ok().ok_or(usvg::Error::InvalidSize)?;
let raw_img = RgbaImage::from_raw(img.width(), img.height(), img.data().to_vec())
.ok_or(usvg::Error::InvalidSize)?;
Ok((DynamicImage::ImageRgba8(raw_img), encoded_bytes))
}
pub struct RasterizedVector {
pub content_box: Rect,
// We want to make serialized docs that work on multiple different kinds of device,
// so we rasterize vectors to several different scale factors.
pub encoded_bytes_1x: Arc<serde_bytes::ByteBuf>,
pub encoded_bytes_2x: Arc<serde_bytes::ByteBuf>,
pub encoded_bytes_3x: Arc<serde_bytes::ByteBuf>,
pub width: u32,
pub height: u32,
}
/// Render an SVG without applying the root-level "viewBox" clip (but honoring clips
/// set further down in the tree). This function parses and walks the SVG document to
/// find the largest paint rect, and then rasterizes the entire document. The rasterized
/// image is returned as an encoded PNG and as a `ToolkitImage` instance. A `content_box`
/// which contains a translation from the clipped SVG to the unclipped image is also
/// returned.
pub fn render_svg_without_clip(svg_content: &str) -> Result<RasterizedVector, Error> {
let tree = usvg::Tree::from_str(svg_content, &usvg::Options { ..Default::default() }.to_ref())?;
let svg_node = tree.svg_node();
// Take the bounding box of the vector content (including filter bounds) and union it with
// the reported bounding box from Figma.
let bbox = {
// Compute the union'd bounding box.
let bbox = calc_node_bbox(&tree, &tree.root(), usvg::Transform::default())
.ok_or(usvg::Error::InvalidSize)?
.expand(
usvg::PathBbox::new(0.0, 0.0, svg_node.size.width(), svg_node.size.height())
.ok_or(usvg::Error::InvalidSize)?,
);
// Various layout implementations that we work with only operate on integral quantities
// (even though we use floats). So let's round out the bounding box to the smallest integer
// box that fully covers the computed box, and avoid rounding errors at layout time.
let x = bbox.x().floor();
let y = bbox.y().floor();
let w = bbox.right().ceil() - x;
let h = bbox.bottom().ceil() - y;
Rect::new(x, y, w, h).ok_or(usvg::Error::InvalidSize)?
};
let (img, encoded_bytes_1x) = rasterize(&tree, &bbox, 1.0)?;
let (_, encoded_bytes_2x) = rasterize(&tree, &bbox, 2.0)?;
let (_, encoded_bytes_3x) = rasterize(&tree, &bbox, 3.0)?;
Ok(RasterizedVector {
content_box: bbox,
encoded_bytes_1x: Arc::new(serde_bytes::ByteBuf::from(encoded_bytes_1x)),
encoded_bytes_2x: Arc::new(serde_bytes::ByteBuf::from(encoded_bytes_2x)),
encoded_bytes_3x: Arc::new(serde_bytes::ByteBuf::from(encoded_bytes_3x)),
width: img.width(),
height: img.height(),
})
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/transform_flexbox.rs | crates/dc_figma_import/src/transform_flexbox.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Flexbox definitions derived from `stretch` 0.3.2 licensed MIT.
//! https://github.com/vislyhq/stretch
use std::collections::HashMap;
use std::f32::consts::PI;
use crate::meter_schema::MeterJson;
use crate::Error;
use crate::{
component_context::ComponentContext,
document::HiddenNodePolicy,
extended_layout_schema::{ExtendedAutoLayout, LayoutType, SizePolicy},
figma_schema,
image_context::ImageContext,
variable_utils::{bound_variables_color, FromFigmaVar},
};
use dc_bundle::font::{FontFeature, FontStyle, FontWeight, Hyperlink, TextDecoration};
use dc_bundle::geometry::dimension_proto::Dimension;
use dc_bundle::geometry::{DimensionProto, DimensionRect, Size};
use dc_bundle::path::LineHeight;
use dc_bundle::path::Path;
use dc_bundle::variable::num_or_var::NumOrVarType;
use dc_bundle::variable::NumOrVar;
use dc_bundle::view_shape;
use dc_bundle::view_shape::ViewShape;
use crate::figma_schema::LayoutPositioning;
use crate::reaction_schema::{FrameExtrasJson, ReactionJson};
use dc_bundle::background::background;
use dc_bundle::background::{background::Background_type, Background};
use dc_bundle::blend::BlendMode;
use dc_bundle::filter::{filter_op, FilterOp};
use dc_bundle::frame_extras::FrameExtras;
use dc_bundle::grid::{GridLayoutType, GridSpan};
use dc_bundle::matrix_transform::LayoutTransform;
use dc_bundle::meter_data::{
meter_data::Meter_data_type, ArcMeterData, MeterData, ProgressBarMeterData,
ProgressMarkerMeterData, ProgressVectorMeterData, RotationMeterData,
};
use dc_bundle::node_style::Display;
use dc_bundle::path::{stroke_weight, StrokeAlign, StrokeWeight};
use dc_bundle::positioning::{
item_spacing, AlignContent, AlignItems, AlignSelf, FlexDirection, FlexWrap, ItemSpacing,
JustifyContent, Overflow, OverflowDirection, PositionType, ScrollInfo,
};
use dc_bundle::reaction::Reaction;
use dc_bundle::shadow::{BoxShadow, TextShadow};
use dc_bundle::text::{TextAlign, TextAlignVertical, TextOverflow};
use dc_bundle::view_shape::view_shape::RoundRect;
use log::error;
use crate::scalableui_schema::ScalableUiDataJson;
use crate::shader_schema::ShaderDataJson;
use dc_bundle::path::line_height::Line_height_type;
use dc_bundle::scalable::scalable_uidata;
use dc_bundle::text_style::{StyledTextRun, TextStyle};
use dc_bundle::view::view::RenderMethod;
use dc_bundle::view::{ComponentInfo, View};
use dc_bundle::view_style::ViewStyle;
use unicode_segmentation::UnicodeSegmentation;
// If an Auto content preview widget specifies a "Hug contents" sizing policy, this
// overrides a fixed size sizing policy on its parent to allow it to grow to fit
// all of its child nodes.
fn check_child_size_override(node: &figma_schema::Node) -> Option<LayoutType> {
for child in node.children.iter() {
if child.is_widget() {
let plugin_data = child.shared_plugin_data.get("designcompose");
if let Some(vsw_data) = plugin_data {
if let Some(text_layout) = vsw_data.get("vsw-extended-auto-layout") {
let extended_auto_layout: Option<ExtendedAutoLayout> =
serde_json::from_str(text_layout.as_str()).ok();
if let Some(extended_auto_layout) = extended_auto_layout {
if extended_auto_layout.auto_layout_data.size_policy == SizePolicy::Hug {
return Some(extended_auto_layout.layout);
}
}
}
}
}
}
None
}
fn check_text_node_string_res(node: &figma_schema::Node) -> Option<String> {
let plugin_data = node.shared_plugin_data.get("designcompose");
if let Some(vsw_data) = plugin_data {
return vsw_data.get("vsw-string-res").cloned();
}
None
}
// Map Figma's new flexbox-based Auto Layout properties to our own flexbox-based layout
// properties.
fn compute_layout(
node: &figma_schema::Node,
parent: Option<&figma_schema::Node>,
) -> Result<ViewStyle, Error> {
let mut style = ViewStyle::new_default();
style.node_style_mut().display_type =
if node.visible { Display::DISPLAY_FLEX.into() } else { Display::DISPLAY_NONE.into() };
// Determine if the parent is using Auto Layout (and thus is a Flexbox parent) or if it isn't.
let parent_frame = parent.and_then(|p| p.frame());
let parent_bounding_box = parent.and_then(|p| p.absolute_bounding_box);
//let parent_is_root = parent.is_none();
let parent_is_flexbox = if let Some(frame) = parent_frame {
!frame.layout_mode.is_none()
} else {
// The root container is from our toolkit, and uses flexbox.
//parent_is_root
false
};
let mut hug_width = false;
let mut hug_height = false;
if let Some(bounds) = node.absolute_bounding_box {
style.layout_style_mut().bounding_box =
Some(Size { width: bounds.width(), height: bounds.height(), ..Default::default() })
.into()
}
if let Some(max_width) = node.max_width {
style.layout_style_mut().max_width = DimensionProto::new_points(max_width);
}
if let Some(max_height) = node.max_height {
style.layout_style_mut().max_height = DimensionProto::new_points(max_height);
}
if let Some(min_width) = node.min_width {
style.layout_style_mut().min_width = DimensionProto::new_points(min_width);
}
if let Some(min_height) = node.min_height {
style.layout_style_mut().min_height = DimensionProto::new_points(min_height);
}
// Frames can implement Auto Layout on their children.
if let Some(frame) = node.frame() {
style.layout_style_mut().position_type = match frame.layout_positioning {
LayoutPositioning::Absolute => PositionType::POSITION_TYPE_ABSOLUTE.into(),
LayoutPositioning::Auto => PositionType::POSITION_TYPE_RELATIVE.into(),
};
style.layout_style_mut().width = DimensionProto::new_auto();
style.layout_style_mut().height = DimensionProto::new_auto();
style.layout_style_mut().flex_grow = frame.layout_grow;
style.layout_style_mut().flex_basis = if frame.layout_grow == 1.0 {
// When layout_grow is 1, it means the node's width/height is set to FILL.
// Figma doesn't explicitly provide this info, but we need flex_basis = 0
// for it to grow from zero to fill the container.
DimensionProto::new_points(0.0)
} else {
DimensionProto::new_undefined()
};
style.layout_style_mut().flex_shrink = 0.0;
style.node_style_mut().horizontal_sizing = frame.layout_sizing_horizontal.into_proto_val();
style.node_style_mut().vertical_sizing = frame.layout_sizing_vertical.into_proto_val();
// Check for a flex direction override, which can happen if this node has a child widget
let flex_direction_override = check_child_size_override(node);
if let Some(dir) = flex_direction_override {
style.layout_style_mut().flex_direction = match dir {
LayoutType::Horizontal => FlexDirection::FLEX_DIRECTION_ROW.into(),
LayoutType::Vertical => FlexDirection::FLEX_DIRECTION_COLUMN.into(),
_ => FlexDirection::FLEX_DIRECTION_NONE.into(),
};
} else {
style.layout_style_mut().flex_direction = match frame.layout_mode {
figma_schema::LayoutMode::Horizontal => FlexDirection::FLEX_DIRECTION_ROW.into(),
figma_schema::LayoutMode::Vertical => FlexDirection::FLEX_DIRECTION_COLUMN.into(),
figma_schema::LayoutMode::None => FlexDirection::FLEX_DIRECTION_NONE.into(),
};
}
style.layout_style_mut().padding = Some(DimensionRect {
start: DimensionProto::new_points(frame.padding_left),
end: DimensionProto::new_points(frame.padding_right),
top: DimensionProto::new_points(frame.padding_top),
bottom: DimensionProto::new_points(frame.padding_bottom),
..Default::default()
})
.into();
style.layout_style_mut().item_spacing = Some(ItemSpacing {
ItemSpacingType: Some(item_spacing::ItemSpacingType::Fixed(frame.item_spacing as i32)),
..Default::default()
})
.into();
match frame.layout_align {
// Counter axis stretch
Some(figma_schema::LayoutAlign::Stretch) => {
style.layout_style_mut().align_self = AlignSelf::ALIGN_SELF_STRETCH.into();
}
_ => (),
};
style.layout_style_mut().align_items = match frame.counter_axis_align_items {
figma_schema::LayoutAlignItems::Center => AlignItems::ALIGN_ITEMS_CENTER.into(),
figma_schema::LayoutAlignItems::Max => AlignItems::ALIGN_ITEMS_FLEX_END.into(),
figma_schema::LayoutAlignItems::Min => AlignItems::ALIGN_ITEMS_FLEX_START.into(),
figma_schema::LayoutAlignItems::SpaceBetween => {
AlignItems::ALIGN_ITEMS_FLEX_START.into()
} // XXX
figma_schema::LayoutAlignItems::Baseline => AlignItems::ALIGN_ITEMS_FLEX_START.into(),
};
style.layout_style_mut().justify_content = match frame.primary_axis_align_items {
figma_schema::LayoutAlignItems::Center => JustifyContent::JUSTIFY_CONTENT_CENTER.into(),
figma_schema::LayoutAlignItems::Max => JustifyContent::JUSTIFY_CONTENT_FLEX_END.into(),
figma_schema::LayoutAlignItems::Min => {
JustifyContent::JUSTIFY_CONTENT_FLEX_START.into()
}
figma_schema::LayoutAlignItems::SpaceBetween => {
JustifyContent::JUSTIFY_CONTENT_SPACE_BETWEEN.into()
}
figma_schema::LayoutAlignItems::Baseline => {
JustifyContent::JUSTIFY_CONTENT_FLEX_START.into()
}
};
// The toolkit picks "Stretch" as a sensible default, but we don't
// want that for Figma elements.
style.layout_style_mut().align_content = AlignContent::ALIGN_CONTENT_FLEX_START.into();
let align_self_stretch =
style.layout_style_mut().align_self == AlignSelf::ALIGN_SELF_STRETCH.into();
if let Some(bounds) = node.absolute_bounding_box {
// If align_self is set to stretch, we want width/height to be Auto, even if the
// frame's primary or counter axis sizing mode is set to Fixed.
let dim_points_or_auto = |points| {
if align_self_stretch {
//|| parent_is_root {
DimensionProto::new_auto()
} else {
DimensionProto::new_points(points)
}
};
match frame.layout_mode {
figma_schema::LayoutMode::Horizontal => {
hug_width =
frame.primary_axis_sizing_mode == figma_schema::LayoutSizingMode::Auto;
hug_height =
frame.counter_axis_sizing_mode == figma_schema::LayoutSizingMode::Auto;
style.layout_style_mut().width = match frame.primary_axis_sizing_mode {
figma_schema::LayoutSizingMode::Fixed => {
dim_points_or_auto(bounds.width().ceil()).into()
}
figma_schema::LayoutSizingMode::Auto => DimensionProto::new_auto(),
};
style.layout_style_mut().height = match frame.counter_axis_sizing_mode {
figma_schema::LayoutSizingMode::Fixed => {
dim_points_or_auto(bounds.height().ceil()).into()
}
figma_schema::LayoutSizingMode::Auto => DimensionProto::new_auto(),
};
if hug_width && node.min_width.is_none() {
style.layout_style_mut().min_width = DimensionProto::new_auto();
}
}
figma_schema::LayoutMode::Vertical => {
hug_width =
frame.counter_axis_sizing_mode == figma_schema::LayoutSizingMode::Auto;
hug_height =
frame.primary_axis_sizing_mode == figma_schema::LayoutSizingMode::Auto;
style.layout_style_mut().width = match frame.counter_axis_sizing_mode {
figma_schema::LayoutSizingMode::Fixed => {
dim_points_or_auto(bounds.width().ceil()).into()
}
figma_schema::LayoutSizingMode::Auto => DimensionProto::new_auto(),
};
style.layout_style_mut().height = match frame.primary_axis_sizing_mode {
figma_schema::LayoutSizingMode::Fixed => {
dim_points_or_auto(bounds.height().ceil()).into()
}
figma_schema::LayoutSizingMode::Auto => DimensionProto::new_auto(),
};
if hug_height && node.min_height.is_none() {
style.layout_style_mut().min_height = DimensionProto::new_auto();
}
}
_ => {
// Frame is not autolayout, so use the layout sizing mode
// to determine size. If we have a node size specified, use
// that instead of the bounds since the node size is the
// size without rotation and scale.
let (width, height) = if let Some(size) = &node.size {
(size.x(), size.y())
} else {
(bounds.width().ceil(), bounds.height().ceil())
};
if frame.layout_sizing_horizontal == figma_schema::LayoutSizing::Fill {
style.layout_style_mut().width = DimensionProto::new_auto();
} else {
style.layout_style_mut().width = DimensionProto::new_points(width);
}
if frame.layout_sizing_vertical == figma_schema::LayoutSizing::Fill {
style.layout_style_mut().height = DimensionProto::new_auto();
} else {
style.layout_style_mut().height = DimensionProto::new_points(height);
}
}
}
if frame.layout_mode != figma_schema::LayoutMode::None {
let width_points = bounds.width().ceil();
let height_points = bounds.height().ceil();
style.layout_style_mut().width = match frame.layout_sizing_horizontal {
figma_schema::LayoutSizing::Fixed => DimensionProto::new_points(width_points),
figma_schema::LayoutSizing::Fill => DimensionProto::new_auto(),
figma_schema::LayoutSizing::Hug => DimensionProto::new_auto(),
};
style.layout_style_mut().height = match frame.layout_sizing_vertical {
figma_schema::LayoutSizing::Fixed => DimensionProto::new_points(height_points),
figma_schema::LayoutSizing::Fill => DimensionProto::new_auto(),
figma_schema::LayoutSizing::Hug => DimensionProto::new_auto(),
};
}
}
}
// Setup widget size to expand to its parent
let mut is_widget_or_parent_widget = node.is_widget();
if let Some(parent) = parent {
if !is_widget_or_parent_widget {
is_widget_or_parent_widget = parent.is_widget();
}
}
if is_widget_or_parent_widget {
style.layout_style_mut().position_type = PositionType::POSITION_TYPE_ABSOLUTE.into();
style.layout_style_mut().width = DimensionProto::new_auto();
style.layout_style_mut().height = DimensionProto::new_auto();
style.layout_style_mut().left = DimensionProto::new_points(0.0);
style.layout_style_mut().right = DimensionProto::new_points(0.0);
style.layout_style_mut().top = DimensionProto::new_points(0.0);
style.layout_style_mut().bottom = DimensionProto::new_points(0.0);
}
// Vector layers have some layout properties for themselves, but not for their children.
if let Some(vector) = node.vector() {
match vector.layout_align {
// Counter axis stretch
Some(figma_schema::LayoutAlign::Stretch) => {
style.layout_style_mut().align_self = AlignSelf::ALIGN_SELF_STRETCH.into();
}
_ => (),
};
style.layout_style_mut().position_type = match vector.layout_positioning {
LayoutPositioning::Absolute => PositionType::POSITION_TYPE_ABSOLUTE.into(),
LayoutPositioning::Auto => PositionType::POSITION_TYPE_RELATIVE.into(),
};
style.layout_style_mut().width = DimensionProto::new_auto();
style.layout_style_mut().height = DimensionProto::new_auto();
}
if let Some(bounds) = node.absolute_bounding_box {
if !hug_width && node.min_width.is_none() {
style.layout_style_mut().min_width = DimensionProto::new_points(bounds.width().ceil());
}
if !hug_height && node.min_height.is_none() {
style.layout_style_mut().min_height =
DimensionProto::new_points(bounds.height().ceil());
}
}
if let Some(size) = &node.size {
if size.is_valid() {
if !hug_width && node.min_width.is_none() {
style.layout_style_mut().min_width = DimensionProto::new_points(size.x());
}
if !hug_height && node.min_height.is_none() {
style.layout_style_mut().min_height = DimensionProto::new_points(size.y());
}
// Set fixed vector size
// TODO need to change to support scale?
if node.vector().is_some() {
style.layout_style_mut().width = DimensionProto::new_points(size.x());
style.layout_style_mut().height = DimensionProto::new_points(size.y());
}
style.node_style_mut().node_size.as_mut().map(|s| {
s.width = size.x();
s.height = size.y();
});
}
}
// For text we want to force the width.
if let figma_schema::NodeData::Text { vector, style: text_style, .. } = &node.data {
style.layout_style_mut().flex_grow = vector.layout_grow;
if vector.layout_grow == 1.0 {
// When the value of layout_grow is 1, it is because the node has
// its width or height set to FILL. Figma's node properties don't
// specify this, but flex_basis needs to be set to 0 so that it
// starts out small and grows to fill the container
style.layout_style_mut().flex_basis = DimensionProto::new_points(0.0);
}
style.node_style_mut().horizontal_sizing = vector.layout_sizing_horizontal.into_proto_val();
style.node_style_mut().vertical_sizing = vector.layout_sizing_vertical.into_proto_val();
// The text style also contains some layout information. We previously exposed
// auto-width text in our plugin.
match text_style.text_auto_resize {
figma_schema::TextAutoResize::Height => {
if vector.layout_sizing_horizontal == figma_schema::LayoutSizing::Fill {
style.layout_style_mut().width = DimensionProto::new_auto();
} else {
style.layout_style_mut().width = style.layout_style().min_width.clone();
}
style.layout_style_mut().height = DimensionProto::new_auto();
}
figma_schema::TextAutoResize::WidthAndHeight => {
if node.min_width.is_none() {
style.layout_style_mut().min_width = DimensionProto::new_auto();
}
style.layout_style_mut().width = DimensionProto::new_auto();
style.layout_style_mut().height = DimensionProto::new_auto();
}
// TextAutoResize::Truncate is deprecated
// Use fixed width and height
_ => {
style.layout_style_mut().width = style.layout_style().min_width.clone();
style.layout_style_mut().height = style.layout_style().min_height.clone();
}
}
}
if !parent_is_flexbox
|| style.layout_style_mut().position_type == PositionType::POSITION_TYPE_ABSOLUTE.into()
{
match (node.absolute_bounding_box, parent_bounding_box) {
(Some(bounds), Some(parent_bounds)) => {
style.layout_style_mut().position_type =
PositionType::POSITION_TYPE_ABSOLUTE.into();
// Figure out all the values we might need when calculating the layout constraints.
let (width, height) = if let Some(size) = &node.size {
(size.x(), size.y())
} else {
(bounds.width().ceil(), bounds.height().ceil())
};
let left = (bounds.x() - parent_bounds.x()).round();
let right = parent_bounds.width().ceil() - (left + bounds.width().ceil()); // px from our right edge to parent's right edge
let top = (bounds.y() - parent_bounds.y()).round();
let bottom = parent_bounds.height().ceil() - (top + bounds.height().ceil());
match node.constraints().map(|c| c.horizontal) {
Some(figma_schema::HorizontalLayoutConstraintValue::Left) | None => {
style.layout_style_mut().left = DimensionProto::new_percent(0.0);
style.layout_style_mut().right = DimensionProto::new_auto();
style
.layout_style_mut()
.margin
.as_mut()
.map(|m| m.set_start(Dimension::Points(left)));
if !hug_width && !node.is_text() {
style.layout_style_mut().width = DimensionProto::new_points(width);
}
}
Some(figma_schema::HorizontalLayoutConstraintValue::Right) => {
style.layout_style_mut().left = DimensionProto::new_auto();
style.layout_style_mut().right = DimensionProto::new_percent(0.0);
style
.layout_style_mut()
.margin
.as_mut()
.map(|m| m.set_end(Dimension::Points(right)));
if !hug_width && !node.is_text() {
style.layout_style_mut().width = DimensionProto::new_points(width);
}
}
Some(figma_schema::HorizontalLayoutConstraintValue::LeftRight) => {
style.layout_style_mut().left = DimensionProto::new_percent(0.0);
style.layout_style_mut().right = DimensionProto::new_percent(0.0);
style
.layout_style_mut()
.margin
.as_mut()
.map(|m| m.set_start(Dimension::Points(left)));
style
.layout_style_mut()
.margin
.as_mut()
.map(|m| m.set_end(Dimension::Points(right)));
style.layout_style_mut().width = DimensionProto::new_auto();
}
Some(figma_schema::HorizontalLayoutConstraintValue::Center) => {
// Centering with absolute positioning isn't directly possible; instead we
// give our style a left/top margin of 50%, which centers the left/top edge
// within the parent, then we apply the delta to move it to the correct
// location using the left/top property. All of this adds up to position the
// component where it was in Figma, but anchored to the horizontal/vertical
// centerpoint.
style.layout_style_mut().left = DimensionProto::new_percent(0.5);
style.layout_style_mut().right = DimensionProto::new_auto();
style.layout_style_mut().margin.as_mut().map(|m| {
m.set_start(Dimension::Points(
left - parent_bounds.width().ceil() / 2.0,
))
});
if !hug_width && !node.is_text() {
style.layout_style_mut().width = DimensionProto::new_points(width);
}
}
Some(figma_schema::HorizontalLayoutConstraintValue::Scale) => {
let is_zero: bool = parent_bounds.width() == 0.0;
style.layout_style_mut().left = DimensionProto::new_percent(if is_zero {
0.0
} else {
left / parent_bounds.width().ceil()
});
style.layout_style_mut().right = DimensionProto::new_percent(if is_zero {
0.0
} else {
right / parent_bounds.width().ceil()
});
style.layout_style_mut().width = DimensionProto::new_auto();
if node.min_width.is_none() {
style.layout_style_mut().min_width = DimensionProto::new_auto();
}
}
}
match node.constraints().map(|c| c.vertical) {
Some(figma_schema::VerticalLayoutConstraintValue::Top) | None => {
style.layout_style_mut().top = DimensionProto::new_percent(0.0);
style.layout_style_mut().bottom = DimensionProto::new_auto();
style
.layout_style_mut()
.margin
.as_mut()
.map(|m| m.set_top(Dimension::Points(top)));
if !hug_height && !node.is_text() {
style.layout_style_mut().height = DimensionProto::new_points(height);
}
}
Some(figma_schema::VerticalLayoutConstraintValue::Bottom) => {
style.layout_style_mut().top = DimensionProto::new_auto();
style.layout_style_mut().bottom = DimensionProto::new_percent(0.0);
style
.layout_style_mut()
.margin
.as_mut()
.map(|m| m.set_bottom(Dimension::Points(bottom)));
if !hug_height && !node.is_text() {
style.layout_style_mut().height = DimensionProto::new_points(height);
}
}
Some(figma_schema::VerticalLayoutConstraintValue::TopBottom) => {
style.layout_style_mut().top = DimensionProto::new_percent(0.0);
style.layout_style_mut().bottom = DimensionProto::new_percent(0.0);
style
.layout_style_mut()
.margin
.as_mut()
.map(|m| m.set_top(Dimension::Points(top)));
style
.layout_style_mut()
.margin
.as_mut()
.map(|m| m.set_bottom(Dimension::Points(bottom)));
style.layout_style_mut().height = DimensionProto::new_auto();
}
Some(figma_schema::VerticalLayoutConstraintValue::Center) => {
style.layout_style_mut().top = DimensionProto::new_percent(0.5);
style.layout_style_mut().bottom = DimensionProto::new_auto();
style.layout_style_mut().margin.as_mut().map(|m| {
m.set_top(Dimension::Points(top - parent_bounds.height().ceil() / 2.0))
});
if !hug_height && !node.is_text() {
style.layout_style_mut().height = DimensionProto::new_points(height);
}
}
Some(figma_schema::VerticalLayoutConstraintValue::Scale) => {
let is_zero: bool = parent_bounds.height() == 0.0;
let top = if is_zero { 0.0 } else { top / parent_bounds.height().ceil() };
let bottom =
if is_zero { 0.0 } else { bottom / parent_bounds.height().ceil() };
style.layout_style_mut().top = DimensionProto::new_percent(top);
style.layout_style_mut().bottom = DimensionProto::new_percent(bottom);
style.layout_style_mut().height = DimensionProto::new_auto();
if node.min_height.is_none() {
style.layout_style_mut().min_height = DimensionProto::new_auto();
}
}
}
}
_ => {}
}
}
Ok(style)
}
fn convert_transform(transform: &figma_schema::Transform) -> LayoutTransform {
LayoutTransform::row_major_2d(
transform[0][0].unwrap_or(1.0),
transform[1][0].unwrap_or(0.0),
transform[0][1].unwrap_or(0.0),
transform[1][1].unwrap_or(1.0),
transform[0][2].unwrap_or(0.0),
transform[1][2].unwrap_or(0.0),
)
}
fn convert_blend_mode(blend_mode: Option<figma_schema::BlendMode>) -> BlendMode {
match blend_mode {
Some(figma_schema::BlendMode::PassThrough) | None => BlendMode::BLEND_MODE_PASS_THROUGH,
Some(figma_schema::BlendMode::Normal) => BlendMode::BLEND_MODE_NORMAL,
Some(figma_schema::BlendMode::Darken) => BlendMode::BLEND_MODE_DARKEN,
Some(figma_schema::BlendMode::Multiply) => BlendMode::BLEND_MODE_MULTIPLY,
Some(figma_schema::BlendMode::LinearBurn) => BlendMode::BLEND_MODE_LINEAR_BURN,
Some(figma_schema::BlendMode::ColorBurn) => BlendMode::BLEND_MODE_COLOR_BURN,
Some(figma_schema::BlendMode::Lighten) => BlendMode::BLEND_MODE_LIGHTEN,
Some(figma_schema::BlendMode::Screen) => BlendMode::BLEND_MODE_SCREEN,
Some(figma_schema::BlendMode::LinearDodge) => BlendMode::BLEND_MODE_LINEAR_DODGE,
Some(figma_schema::BlendMode::ColorDodge) => BlendMode::BLEND_MODE_COLOR_DODGE,
Some(figma_schema::BlendMode::Overlay) => BlendMode::BLEND_MODE_OVERLAY,
Some(figma_schema::BlendMode::SoftLight) => BlendMode::BLEND_MODE_SOFT_LIGHT,
Some(figma_schema::BlendMode::HardLight) => BlendMode::BLEND_MODE_HARD_LIGHT,
Some(figma_schema::BlendMode::Difference) => BlendMode::BLEND_MODE_DIFFERENCE,
Some(figma_schema::BlendMode::Exclusion) => BlendMode::BLEND_MODE_EXCLUSION,
Some(figma_schema::BlendMode::Hue) => BlendMode::BLEND_MODE_HUE,
Some(figma_schema::BlendMode::Saturation) => BlendMode::BLEND_MODE_SATURATION,
Some(figma_schema::BlendMode::Color) => BlendMode::BLEND_MODE_COLOR,
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | true |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/reaction_schema.rs | crates/dc_figma_import/src/reaction_schema.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::figma_schema::FigmaColor;
use dc_bundle::{
color::FloatColor,
frame_extras::{
FrameExtras, OverlayBackground, OverlayBackgroundInteraction, OverlayPositionType,
},
geometry::Vector,
reaction::{
action::{self, node::Navigation, ActionUrl, Action_type},
trigger::{KeyDown, MouseDown, MouseEnter, MouseLeave, MouseUp, Timeout, Trigger_type},
Action, Reaction, Trigger,
},
transition::{
easing::{Bezier, Easing_type, Spring},
transition::{
Dissolve, MoveIn, MoveOut, Push, ScrollAnimate, SlideIn, SlideOut, SmartAnimate,
TransitionDirection, Transition_type,
},
Easing, Transition,
},
};
use serde::{Deserialize, Serialize};
// This module can deserialize Figma's "reactions" struct, which is used to define the
// interactivity of interactive components. It's in a separate module from `figma_schema`
// because it's not yet part of Figma's REST API. We get access to it via a custom plugin
// which copies the reactions array into plugin storage in the node which we then fetch
// via the REST API.
//
// Once Figma returns reactions to the REST API we'll move these definitions into
// `figma_schema` under the `Node` type (because reactions can be added to most nodes).
//
// The Figma documentation that these definitions correspond to is here:
// https://www.figma.com/plugin-docs/api/Reaction/
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct BezierJson {
pub x1: f32,
pub y1: f32,
pub x2: f32,
pub y2: f32,
}
impl Into<Bezier> for BezierJson {
fn into(self) -> Bezier {
Bezier { x1: self.x1, y1: self.y1, x2: self.x2, y2: self.y2, ..Default::default() }
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct SpringJson {
pub mass: f32,
pub stiffness: f32,
pub damping: f32,
}
impl Into<Spring> for SpringJson {
fn into(self) -> Spring {
Spring {
mass: self.mass,
stiffness: self.stiffness,
damping: self.damping,
..Default::default()
}
}
}
/// The type of easing to perform in a transition.
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(tag = "type", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum EasingJson {
// Cubic beziers
EaseIn,
EaseOut,
EaseInAndOut,
Linear,
EaseInBack,
EaseOutBack,
EaseInAndOutBack,
// Manually specified cubic bezier
CustomCubicBezier {
#[serde(rename = "easingFunctionCubicBezier")]
bezier: BezierJson,
},
// Springs
Gentle,
Quick,
Bouncy,
Slow,
// Manually specified spring
CustomSpring {
#[serde(rename = "easingFunctionSpring")]
spring: SpringJson,
},
}
// We flatten the Easing type to a bezier for the toolkit. These values were taken from
// https://easings.net/ and verified against Figma optically.
impl Into<Easing_type> for EasingJson {
fn into(self) -> Easing_type {
match self {
EasingJson::EaseIn => Easing_type::Bezier(Bezier {
x1: 0.12,
y1: 0.0,
x2: 0.39,
y2: 0.0,
..Default::default()
}),
EasingJson::EaseOut => Easing_type::Bezier(Bezier {
x1: 0.61,
y1: 1.0,
x2: 0.88,
y2: 1.0,
..Default::default()
}),
EasingJson::EaseInAndOut => Easing_type::Bezier(Bezier {
x1: 0.37,
y1: 0.0,
x2: 0.63,
y2: 1.0,
..Default::default()
}),
EasingJson::Linear => Easing_type::Bezier(Bezier {
x1: 0.0,
y1: 0.0,
x2: 1.0,
y2: 1.0,
..Default::default()
}),
EasingJson::EaseInBack => Easing_type::Bezier(Bezier {
x1: 0.36,
y1: 0.0,
x2: 0.66,
y2: -0.56,
..Default::default()
}),
EasingJson::EaseOutBack => Easing_type::Bezier(Bezier {
x1: 0.34,
y1: 1.56,
x2: 0.64,
y2: 1.0,
..Default::default()
}),
EasingJson::EaseInAndOutBack => Easing_type::Bezier(Bezier {
x1: 0.68,
y1: -0.6,
x2: 0.32,
y2: 1.6,
..Default::default()
}),
EasingJson::CustomCubicBezier { bezier } => Easing_type::Bezier(bezier.into()),
EasingJson::Gentle => Easing_type::Spring(Spring {
mass: 1.0,
damping: 15.0,
stiffness: 100.0,
..Default::default()
}),
EasingJson::Quick => Easing_type::Spring(Spring {
mass: 1.0,
damping: 20.0,
stiffness: 300.0,
..Default::default()
}),
EasingJson::Bouncy => Easing_type::Spring(Spring {
mass: 1.0,
damping: 15.0,
stiffness: 600.0,
..Default::default()
}),
EasingJson::Slow => Easing_type::Spring(Spring {
mass: 1.0,
damping: 20.0,
stiffness: 80.0,
..Default::default()
}),
EasingJson::CustomSpring { spring } => Easing_type::Spring(spring.into()),
}
}
}
impl Into<Easing> for EasingJson {
fn into(self) -> Easing {
Easing { easing_type: Some(self.into()), ..Default::default() }
}
}
#[derive(Clone, Copy, PartialEq, Eq, Debug, Hash, Serialize, Deserialize)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum TransitionDirectionJson {
Unspecified = 0,
Left = 1,
Right = 2,
Top = 3,
Bottom = 4,
}
impl Into<TransitionDirection> for TransitionDirectionJson {
fn into(self) -> TransitionDirection {
match self {
TransitionDirectionJson::Unspecified => {
TransitionDirection::TRANSITION_DIRECTION_UNSPECIFIED
}
TransitionDirectionJson::Left => TransitionDirection::TRANSITION_DIRECTION_LEFT,
TransitionDirectionJson::Right => TransitionDirection::TRANSITION_DIRECTION_RIGHT,
TransitionDirectionJson::Top => TransitionDirection::TRANSITION_DIRECTION_TOP,
TransitionDirectionJson::Bottom => TransitionDirection::TRANSITION_DIRECTION_BOTTOM,
}
}
}
/// This represents the Figma "Transition" type.
/// https://www.figma.com/plugin-docs/api/Transition/
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(tag = "type", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum TransitionJson {
Dissolve {
easing: EasingJson,
duration: f32,
},
SmartAnimate {
easing: EasingJson,
duration: f32,
},
ScrollAnimate {
easing: EasingJson,
duration: f32,
},
MoveIn {
easing: EasingJson,
duration: f32,
direction: TransitionDirectionJson,
#[serde(rename = "matchLayers")]
match_layers: bool,
},
MoveOut {
easing: EasingJson,
duration: f32,
direction: TransitionDirectionJson,
#[serde(rename = "matchLayers")]
match_layers: bool,
},
Push {
easing: EasingJson,
duration: f32,
direction: TransitionDirectionJson,
#[serde(rename = "matchLayers")]
match_layers: bool,
},
SlideIn {
easing: EasingJson,
duration: f32,
direction: TransitionDirectionJson,
#[serde(rename = "matchLayers")]
match_layers: bool,
},
SlideOut {
easing: EasingJson,
duration: f32,
direction: TransitionDirectionJson,
#[serde(rename = "matchLayers")]
match_layers: bool,
},
}
impl Into<Transition_type> for TransitionJson {
fn into(self) -> Transition_type {
match self {
TransitionJson::Dissolve { easing, duration } => Transition_type::Dissolve(Dissolve {
easing: Some(easing.into()).into(),
duration,
..Default::default()
}),
TransitionJson::SmartAnimate { easing, duration } => {
Transition_type::SmartAnimate(SmartAnimate {
easing: Some(easing.into()).into(),
duration,
..Default::default()
})
}
TransitionJson::ScrollAnimate { easing, duration } => {
Transition_type::ScrollAnimate(ScrollAnimate {
easing: Some(easing.into()).into(),
duration,
..Default::default()
})
}
TransitionJson::MoveIn { easing, duration, direction, match_layers } => {
Transition_type::MoveIn(MoveIn {
easing: Some(easing.into()).into(),
duration,
direction: TransitionDirection::from(direction.into()).into(),
match_layers,
..Default::default()
})
}
TransitionJson::MoveOut { easing, duration, direction, match_layers } => {
Transition_type::MoveOut(MoveOut {
easing: Some(easing.into()).into(),
duration,
direction: TransitionDirection::from(direction.into()).into(),
match_layers,
..Default::default()
})
}
TransitionJson::Push { easing, duration, direction, match_layers } => {
Transition_type::Push(Push {
easing: Some(easing.into()).into(),
duration,
direction: TransitionDirection::from(direction.into()).into(),
match_layers,
..Default::default()
})
}
TransitionJson::SlideIn { easing, duration, direction, match_layers } => {
Transition_type::SlideIn(SlideIn {
easing: Some(easing.into()).into(),
duration,
direction: TransitionDirection::from(direction.into()).into(),
match_layers,
..Default::default()
})
}
TransitionJson::SlideOut { easing, duration, direction, match_layers } => {
Transition_type::SlideOut(SlideOut {
easing: Some(easing.into()).into(),
duration,
direction: TransitionDirection::from(direction.into()).into(),
match_layers,
..Default::default()
})
}
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum NavigationJson {
Unspecified,
Navigate,
Swap,
Overlay,
ScrollTo,
ChangeTo,
}
impl Into<Navigation> for NavigationJson {
fn into(self) -> Navigation {
match self {
NavigationJson::Unspecified => Navigation::NAVIGATION_UNSPECIFIED,
NavigationJson::Navigate => Navigation::NAVIGATION_NAVIGATE,
NavigationJson::Swap => Navigation::NAVIGATION_SWAP,
NavigationJson::Overlay => Navigation::NAVIGATION_OVERLAY,
NavigationJson::ScrollTo => Navigation::NAVIGATION_SCROLL_TO,
NavigationJson::ChangeTo => Navigation::NAVIGATION_CHANGE_TO,
}
}
}
#[derive(Serialize, Deserialize, PartialEq, Clone, Debug)]
pub struct VectorJson {
pub x: f32,
pub y: f32,
}
impl Into<Vector> for VectorJson {
fn into(self) -> Vector {
Vector { x: self.x, y: self.y, ..Default::default() }
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(tag = "type", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum ActionJson {
/// Navigate the top-level frame back.
Back,
/// Close the top-most overlay.
Close,
/// Open a URL.
Url { url: String },
/// Do something with a destination node.
Node {
/// Node that we should navigate to, change to, open or swap as an overlay, or
/// scroll to reveal.
#[serde(rename = "destinationId")]
destination_id: Option<String>,
/// The kind of navigation (really the kind of action) to perform with the destination
/// node (if it's not null).
navigation: NavigationJson,
/// The transition to perform for this animation, if any.
transition: Option<TransitionJson>,
/// For "Navigate", should we open the new node with the current frame's scroll position?
#[serde(rename = "preserveScrollPosition", default)]
preserve_scroll_position: bool,
/// For overlays that have been manually positioned.
#[serde(rename = "overlayRelativePosition", default)]
overlay_relative_position: Option<VectorJson>,
},
}
impl Into<Action_type> for ActionJson {
fn into(self) -> Action_type {
match self {
ActionJson::Back => Action_type::Back(().into()),
ActionJson::Close => Action_type::Close(().into()),
ActionJson::Url { url } => Action_type::Url(ActionUrl { url, ..Default::default() }),
ActionJson::Node {
destination_id,
navigation,
transition,
preserve_scroll_position,
overlay_relative_position,
} => {
let nav: Navigation = navigation.into();
let pos: Option<Vector> = overlay_relative_position.map(|v| v.into());
Action_type::Node(action::Node {
destination_id,
navigation: nav.into(),
transition: transition
.map(|t| Transition {
transition_type: Some(t.into()),
..Default::default()
})
.into(),
preserve_scroll_position,
overlay_relative_position: pos.into(),
..Default::default()
})
}
}
}
}
/// Trigger describes the input needed to make a Reaction happen.
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(tag = "type", rename_all = "SCREAMING_SNAKE_CASE")]
pub enum TriggerJson {
OnClick,
/// OnHover reverts Navigate and ChangeTo actions when hovering ends.
OnHover,
/// OnPress reverts Navigate and ChangeTo actions when hovering ends.
OnPress,
OnDrag,
/// OnKeyDown has a list of JavaScript key codes. Multiple key codes are
/// interpreted as all the keys pressed at the same time. An empty vector
/// is interpreted as any key can trigger the action.
OnKeyDown {
#[serde(rename = "keyCodes")]
key_codes: Vec<u8>,
},
AfterTimeout {
timeout: f32,
},
MouseEnter {
delay: f32,
},
MouseLeave {
delay: f32,
},
MouseUp {
delay: f32,
},
MouseDown {
delay: f32,
},
}
impl Into<Trigger_type> for TriggerJson {
fn into(self) -> Trigger_type {
match self {
TriggerJson::OnClick => Trigger_type::Click(().into()),
TriggerJson::OnHover => Trigger_type::Hover(().into()),
TriggerJson::OnPress => Trigger_type::Press(().into()),
TriggerJson::OnDrag => Trigger_type::Drag(().into()),
TriggerJson::OnKeyDown { key_codes } => {
Trigger_type::KeyDown(KeyDown { key_codes, ..Default::default() })
}
TriggerJson::AfterTimeout { timeout } => {
Trigger_type::AfterTimeout(Timeout { timeout, ..Default::default() })
}
TriggerJson::MouseEnter { delay } => {
Trigger_type::MouseEnter(MouseEnter { delay, ..Default::default() })
}
TriggerJson::MouseLeave { delay } => {
Trigger_type::MouseLeave(MouseLeave { delay, ..Default::default() })
}
TriggerJson::MouseUp { delay } => {
Trigger_type::MouseUp(MouseUp { delay, ..Default::default() })
}
TriggerJson::MouseDown { delay } => {
Trigger_type::MouseDown(MouseDown { delay, ..Default::default() })
}
}
}
}
/// Reaction describes interactivity for a node. It's a pair of Action ("what happens?") and
/// Trigger ("how do you make it happen?")
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct ReactionJson {
pub action: Option<ActionJson>,
pub trigger: TriggerJson,
}
impl Into<Option<Reaction>> for ReactionJson {
fn into(self) -> Option<Reaction> {
if let Some(action) = self.action {
Some(Reaction {
action: Some(Action { action_type: Some(action.into()), ..Default::default() })
.into(),
trigger: Some(Trigger {
trigger_type: Some(self.trigger.into()),
..Default::default()
})
.into(),
..Default::default()
})
} else {
None
}
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Copy)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum OverlayPositionJson {
Center,
TopLeft,
TopCenter,
TopRight,
BottomLeft,
BottomCenter,
BottomRight,
Manual, // then we look at the Action
}
impl Into<OverlayPositionType> for OverlayPositionJson {
fn into(self) -> OverlayPositionType {
match self {
OverlayPositionJson::Center => OverlayPositionType::OVERLAY_POSITION_TYPE_CENTER,
OverlayPositionJson::TopLeft => OverlayPositionType::OVERLAY_POSITION_TYPE_TOP_LEFT,
OverlayPositionJson::TopCenter => OverlayPositionType::OVERLAY_POSITION_TYPE_TOP_CENTER,
OverlayPositionJson::TopRight => OverlayPositionType::OVERLAY_POSITION_TYPE_TOP_RIGHT,
OverlayPositionJson::BottomLeft => {
OverlayPositionType::OVERLAY_POSITION_TYPE_BOTTOM_LEFT
}
OverlayPositionJson::BottomCenter => {
OverlayPositionType::OVERLAY_POSITION_TYPE_BOTTOM_CENTER
}
OverlayPositionJson::BottomRight => {
OverlayPositionType::OVERLAY_POSITION_TYPE_BOTTOM_RIGHT
}
OverlayPositionJson::Manual => OverlayPositionType::OVERLAY_POSITION_TYPE_MANUAL,
}
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
pub struct OverlayBackgroundJson {
pub color: Option<FigmaColor>,
}
impl Into<OverlayBackground> for OverlayBackgroundJson {
fn into(self) -> OverlayBackground {
let float_color: Option<FloatColor> = self.color.map(|c| (&c).into());
OverlayBackground { color: float_color.into(), ..Default::default() }
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq, Copy)]
#[serde(rename_all = "SCREAMING_SNAKE_CASE")]
pub enum OverlayBackgroundInteractionJson {
None,
CloseOnClickOutside,
}
impl Into<OverlayBackgroundInteraction> for OverlayBackgroundInteractionJson {
fn into(self) -> OverlayBackgroundInteraction {
match self {
OverlayBackgroundInteractionJson::None => {
OverlayBackgroundInteraction::OVERLAY_BACKGROUND_INTERACTION_NONE
}
OverlayBackgroundInteractionJson::CloseOnClickOutside => {
OverlayBackgroundInteraction::OVERLAY_BACKGROUND_INTERACTION_CLOSE_ON_CLICK_OUTSIDE
}
}
}
}
/// Some frame properties are only available through the plugin API and are needed to
/// implement Reactions properly. They're included in this FrameExtras struct.
#[derive(PartialEq, Debug, Clone, Deserialize, Serialize)]
#[serde(rename_all = "camelCase")]
pub struct FrameExtrasJson {
pub number_of_fixed_children: usize,
pub overlay_position_type: OverlayPositionJson,
pub overlay_background: OverlayBackgroundJson,
pub overlay_background_interaction: OverlayBackgroundInteractionJson,
}
impl Into<FrameExtras> for FrameExtrasJson {
fn into(self) -> FrameExtras {
let bg: OverlayBackground = self.overlay_background.into();
FrameExtras {
fixed_children: self.number_of_fixed_children as u32,
overlay_position_type: OverlayPositionType::from(self.overlay_position_type.into())
.into(), //It's confusing but it works? Need to convert one
overlay_background: Some(bg).into(),
overlay_background_interaction: OverlayBackgroundInteraction::from(
self.overlay_background_interaction.into(),
)
.into(),
..Default::default()
}
}
}
#[test]
fn parse_frame_extras() {
use serde_json;
let def = r#"{
"numberOfFixedChildren": 0,
"overlayPositionType": "CENTER",
"overlayBackground": {
"type": "NONE"
},
"overlayBackgroundInteraction": "NONE",
"overflowDirection": "NONE"
}"#;
let click_to_close = r#"{
"numberOfFixedChildren": 0,
"overlayPositionType": "BOTTOM_CENTER",
"overlayBackground": {
"type": "SOLID_COLOR",
"color": {
"r": 0,
"g": 0,
"b": 0,
"a": 0.25
}
},
"overlayBackgroundInteraction": "CLOSE_ON_CLICK_OUTSIDE",
"overflowDirection": "HORIZONTAL_AND_VERTICAL_SCROLLING"
}"#;
let def: FrameExtrasJson = serde_json::from_str(def).unwrap();
let click_to_close: FrameExtrasJson = serde_json::from_str(click_to_close).unwrap();
assert_eq!(def.number_of_fixed_children, 0);
assert_eq!(def.overlay_position_type, OverlayPositionJson::Center);
assert_eq!(def.overlay_background, OverlayBackgroundJson { color: None });
assert_eq!(def.overlay_background_interaction, OverlayBackgroundInteractionJson::None);
assert_eq!(click_to_close.number_of_fixed_children, 0);
assert_eq!(click_to_close.overlay_position_type, OverlayPositionJson::BottomCenter);
assert_eq!(
click_to_close.overlay_background,
OverlayBackgroundJson { color: Some(FigmaColor { r: 0.0, g: 0.0, b: 0.0, a: 0.25 }) }
);
assert_eq!(
click_to_close.overlay_background_interaction,
OverlayBackgroundInteractionJson::CloseOnClickOutside
);
}
#[test]
fn parse_reactions() {
use serde_json;
use serde_json::Result;
let multiple_json_text = r#"[{"action":{"type":"NODE","destinationId":"13:1","navigation":"NAVIGATE","transition":{"type":"SMART_ANIMATE","easing":{"type":"EASE_IN_AND_OUT"},"duration":0.6000000238418579},"preserveScrollPosition":false},"trigger":{"type":"ON_CLICK"}},{"action":{"type":"NODE","destinationId":"13:1","navigation":"OVERLAY","transition":{"type":"MOVE_IN","direction":"RIGHT","matchLayers":false,"easing":{"type":"EASE_OUT"},"duration":0.30000001192092896},"preserveScrollPosition":false},"trigger":{"type":"ON_DRAG"}},{"action":{"type":"NODE","destinationId":"13:1","navigation":"SWAP","transition":{"type":"SMART_ANIMATE","easing":{"type":"EASE_OUT"},"duration":0.30000001192092896},"preserveScrollPosition":false},"trigger":{"type":"ON_KEY_DOWN","keyCodes":[60]}}]"#;
let scroll_json_text = r#"[{"action":{"type":"NODE","destinationId":"241:2","navigation":"SCROLL_TO","transition":{"type":"SCROLL_ANIMATE","easing":{"type":"EASE_OUT"},"duration":0.30000001192092896},"preserveScrollPosition":false},"trigger":{"type":"ON_HOVER"}}]"#;
let overlay_json_text = r#"[{"action":{"type":"NODE","destinationId":"222:27","navigation":"OVERLAY","transition":{"type":"MOVE_IN","direction":"TOP","matchLayers":false,"easing":{"type":"EASE_IN_AND_OUT"},"duration":0.30000001192092896},"preserveScrollPosition":false},"trigger":{"type":"ON_CLICK","keyCodes":[]}}]"#;
let maybe_multiple: Result<Vec<ReactionJson>> = serde_json::from_str(multiple_json_text);
let maybe_scroll: Result<Vec<ReactionJson>> = serde_json::from_str(scroll_json_text);
let maybe_overlay: Result<Vec<ReactionJson>> = serde_json::from_str(overlay_json_text);
let mut multiple_json = maybe_multiple.unwrap();
let mut scroll_json = maybe_scroll.unwrap();
let mut overlay_json = maybe_overlay.unwrap();
// We should check that `into` did what we expected it to do here.
let multiple: Vec<Reaction> =
multiple_json.drain(..).map(|json| Into::<Option<Reaction>>::into(json).unwrap()).collect();
let scroll: Vec<Reaction> =
scroll_json.drain(..).map(|json| Into::<Option<Reaction>>::into(json).unwrap()).collect();
let overlay: Vec<Reaction> =
overlay_json.drain(..).map(|json| Into::<Option<Reaction>>::into(json).unwrap()).collect();
// assert if the parsed objects have the correct properties
assert_eq!(multiple.len(), 3);
assert_eq!(scroll.len(), 1);
assert_eq!(overlay.len(), 1);
// Multiple assertions
assert_eq!(
multiple[0].action.as_ref().unwrap().action_type.as_ref().unwrap(),
&Action_type::Node(action::Node {
destination_id: Some("13:1".to_string()),
navigation: Navigation::NAVIGATION_NAVIGATE.into(),
transition: Some(Transition {
transition_type: Some(Transition_type::SmartAnimate(SmartAnimate {
easing: Some(Easing {
easing_type: Some(Easing_type::Bezier(Bezier {
x1: 0.37,
y1: 0.0,
x2: 0.63,
y2: 1.0,
..Default::default()
})),
..Default::default()
})
.into(),
duration: 0.6000000238418579,
..Default::default()
})),
..Default::default()
})
.into(),
preserve_scroll_position: false,
overlay_relative_position: None.into(),
..Default::default()
})
);
assert_eq!(
multiple[0].trigger.as_ref().unwrap().trigger_type.as_ref().unwrap(),
&Trigger_type::Click(().into())
);
assert_eq!(
multiple[1].action.as_ref().unwrap().action_type.as_ref().unwrap(),
&Action_type::Node(action::Node {
destination_id: Some("13:1".to_string()),
navigation: Navigation::NAVIGATION_OVERLAY.into(),
transition: Some(Transition {
transition_type: Some(Transition_type::MoveIn(MoveIn {
easing: Some(Easing {
easing_type: Some(Easing_type::Bezier(Bezier {
x1: 0.61,
y1: 1.0,
x2: 0.88,
y2: 1.0,
..Default::default()
})),
..Default::default()
})
.into(),
duration: 0.30000001192092896,
direction: TransitionDirection::TRANSITION_DIRECTION_RIGHT.into(),
match_layers: false,
..Default::default()
})),
..Default::default()
})
.into(),
preserve_scroll_position: false,
overlay_relative_position: None.into(),
..Default::default()
})
);
assert_eq!(
multiple[1].trigger.as_ref().unwrap().trigger_type.as_ref().unwrap(),
&Trigger_type::Drag(().into())
);
assert_eq!(
multiple[2].action.as_ref().unwrap().action_type.as_ref().unwrap(),
&Action_type::Node(action::Node {
destination_id: Some("13:1".to_string()),
navigation: Navigation::NAVIGATION_SWAP.into(),
transition: Some(Transition {
transition_type: Some(Transition_type::SmartAnimate(SmartAnimate {
easing: Some(Easing {
easing_type: Some(Easing_type::Bezier(Bezier {
x1: 0.61,
y1: 1.0,
x2: 0.88,
y2: 1.0,
..Default::default()
})),
..Default::default()
})
.into(),
duration: 0.30000001192092896,
..Default::default()
})),
..Default::default()
})
.into(),
preserve_scroll_position: false,
overlay_relative_position: None.into(),
..Default::default()
})
);
assert_eq!(
multiple[2].trigger.as_ref().unwrap().trigger_type.as_ref().unwrap(),
&Trigger_type::KeyDown(KeyDown { key_codes: vec![60], ..Default::default() })
);
// Scroll assertions
assert_eq!(
scroll[0].action.as_ref().unwrap().action_type.as_ref().unwrap(),
&Action_type::Node(action::Node {
destination_id: Some("241:2".to_string()),
navigation: Navigation::NAVIGATION_SCROLL_TO.into(),
transition: Some(Transition {
transition_type: Some(Transition_type::ScrollAnimate(ScrollAnimate {
easing: Some(Easing {
easing_type: Some(Easing_type::Bezier(Bezier {
x1: 0.61,
y1: 1.0,
x2: 0.88,
y2: 1.0,
..Default::default()
})),
..Default::default()
})
.into(),
duration: 0.30000001192092896,
..Default::default()
})),
..Default::default()
})
.into(),
preserve_scroll_position: false,
overlay_relative_position: None.into(),
..Default::default()
})
);
assert_eq!(
scroll[0].trigger.as_ref().unwrap().trigger_type.as_ref().unwrap(),
&Trigger_type::Hover(().into())
);
// Overlay assertions
assert_eq!(
overlay[0].action.as_ref().unwrap().action_type.as_ref().unwrap(),
&Action_type::Node(action::Node {
destination_id: Some("222:27".to_string()),
navigation: Navigation::NAVIGATION_OVERLAY.into(),
transition: Some(Transition {
transition_type: Some(Transition_type::MoveIn(MoveIn {
easing: Some(Easing {
easing_type: Some(Easing_type::Bezier(Bezier {
x1: 0.37,
y1: 0.0,
x2: 0.63,
y2: 1.0,
..Default::default()
})),
..Default::default()
})
.into(),
duration: 0.30000001192092896,
direction: TransitionDirection::TRANSITION_DIRECTION_TOP.into(),
match_layers: false,
..Default::default()
})),
..Default::default()
})
.into(),
preserve_scroll_position: false,
overlay_relative_position: None.into(),
..Default::default()
})
);
assert_eq!(
overlay[0].trigger.as_ref().unwrap().trigger_type.as_ref().unwrap(),
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | true |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/document.rs | crates/dc_figma_import/src/document.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
component_context::ComponentContext,
error::Error,
extended_layout_schema::ExtendedAutoLayout,
figma_schema,
image_context::{ImageContext, ImageContextSession},
proxy_config::ProxyConfig,
transform_flexbox::create_component_flexbox,
variable_utils::create_variable,
};
use dc_bundle::definition::EncodedImageMap;
use dc_bundle::definition::NodeQuery;
use dc_bundle::figma_doc::FigmaDocInfo;
use dc_bundle::variable::variable_map::NameIdMap;
use dc_bundle::variable::{Collection, Mode, Variable, VariableMap};
use dc_bundle::view::view_data::{Container, View_data_type};
use dc_bundle::view::{ComponentInfo, ComponentOverrides, View, ViewData};
use dc_bundle::view_style::ViewStyle;
use log::error;
use protobuf::MessageField;
use std::time::Duration;
use std::{
collections::{HashMap, HashSet},
iter::FromIterator,
};
const FIGMA_TOKEN_HEADER: &str = "X-Figma-Token";
const BASE_FILE_URL: &str = "https://api.figma.com/v1/files/";
const BASE_COMPONENT_URL: &str = "https://api.figma.com/v1/components/";
const BASE_PROJECT_URL: &str = "https://api.figma.com/v1/projects/";
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum HiddenNodePolicy {
Skip,
Keep,
}
fn http_fetch(api_key: &str, url: String, proxy_config: &ProxyConfig) -> Result<String, Error> {
let mut client_builder = reqwest::blocking::Client::builder();
// Only HttpProxyConfig is supported.
if let ProxyConfig::HttpProxyConfig(spec) = proxy_config {
client_builder = client_builder.proxy(reqwest::Proxy::all(spec)?);
}
let body = client_builder
.timeout(Duration::from_secs(90))
.build()?
.get(url.as_str())
.header(FIGMA_TOKEN_HEADER, api_key)
.send()?
.error_for_status()?
.text()?;
Ok(body)
}
/// Document update requests return this value to indicate if an update was
/// made or not.
#[derive(Copy, Clone, PartialEq, Eq, Debug)]
pub enum UpdateStatus {
Updated,
NotUpdated,
}
/// Branches alwasy return head of file, i.e. no version returned
fn get_branches(document_root: &figma_schema::FileResponse) -> Vec<FigmaDocInfo> {
let mut branches = vec![];
if let Some(doc_branches) = &document_root.branches {
for hash in doc_branches {
if let (Some(Some(id)), Some(Some(name))) = (hash.get("key"), hash.get("name")) {
let figma_doc =
FigmaDocInfo { name: name.clone(), id: id.clone(), ..Default::default() };
branches.push(figma_doc);
}
}
}
branches
}
fn load_image_hash_to_res_map(
document_root: &figma_schema::FileResponse,
) -> HashMap<String, String> {
let root_node = &document_root.document;
let shared_plugin_data = &root_node.shared_plugin_data;
if shared_plugin_data.contains_key("designcompose") {
let plugin_data = shared_plugin_data.get("designcompose");
if let Some(vsw_data) = plugin_data {
if let Some(image_hash_to_res_data) = vsw_data.get("image_hash_to_res") {
let image_hash_to_res_map: Option<HashMap<String, String>> =
serde_json::from_str(image_hash_to_res_data.as_str()).ok();
if let Some(map) = image_hash_to_res_map {
return map;
}
}
}
}
return HashMap::new();
}
/// Document is used to access and maintain an entire Figma document, including
/// components and image resources. It can be updated if the source document
/// has changed since this structure was created.
pub struct Document {
api_key: String,
document_id: String,
version_id: String,
proxy_config: ProxyConfig,
document_root: figma_schema::FileResponse,
variables_responses: HashMap<String, figma_schema::VariablesResponse>,
image_context: ImageContext,
variant_nodes: HashMap<String, figma_schema::Node>,
component_sets: HashMap<String, String>,
pub branches: Vec<FigmaDocInfo>,
key_to_global_id_map: HashMap<String, String>,
remote_node_responses: HashMap<(String, String), figma_schema::NodesResponse>,
}
impl Document {
pub fn root_node(&self) -> &figma_schema::Node {
&self.document_root.document
}
/// Fetch a document from Figma and return a Document instance that can be used
/// to extract toolkit nodes.
pub fn new(
api_key: &str,
document_id: String,
version_id: String,
proxy_config: &ProxyConfig,
image_session: Option<ImageContextSession>,
) -> Result<Document, Error> {
// Fetch the document...
let mut document_url = format!(
"{}{}?plugin_data=shared&geometry=paths&branch_data=true",
BASE_FILE_URL, document_id,
);
if !version_id.is_empty() {
document_url.push_str("&version=");
document_url.push_str(&version_id);
}
let document_root: figma_schema::FileResponse =
serde_json::from_str(http_fetch(api_key, document_url, proxy_config)?.as_str())?;
// ...and the mapping from imageRef to URL. It returns images from all versions.
let image_ref_url = format!("{}{}/images", BASE_FILE_URL, document_id);
let image_refs: figma_schema::ImageFillResponse =
serde_json::from_str(http_fetch(api_key, image_ref_url, proxy_config)?.as_str())?;
let image_hash_to_res_map = load_image_hash_to_res_map(&document_root);
let mut image_context =
ImageContext::new(image_refs.meta.images, image_hash_to_res_map, proxy_config);
if let Some(session) = image_session {
image_context.add_session_info(session);
}
let branches = get_branches(&document_root);
let mut variables_responses = HashMap::new();
match Self::fetch_variables(api_key, &document_id, proxy_config).map_err(Error::from) {
Ok(it) => {
variables_responses.insert(document_id.clone(), it);
}
Err(err) => {
error!("Failed to fetch variables for doc {} {:?}", document_id, err);
}
};
Ok(Document {
api_key: api_key.to_string(),
document_id,
version_id,
proxy_config: proxy_config.clone(),
document_root,
variables_responses,
image_context,
variant_nodes: HashMap::new(),
component_sets: HashMap::new(),
branches,
key_to_global_id_map: HashMap::new(),
remote_node_responses: HashMap::new(),
})
}
// Fetch and store all the variables, collections, and modes from the Figma document.
fn fetch_variables(
api_key: &str,
document_id: &String,
proxy_config: &ProxyConfig,
) -> Result<figma_schema::VariablesResponse, Error> {
let variables_url = format!("{}{}/variables/local", BASE_FILE_URL, document_id);
let var_fetch = http_fetch(api_key, variables_url, proxy_config)?;
let var_response: figma_schema::VariablesResponse =
serde_json::from_str(var_fetch.as_str())?;
Ok(var_response)
}
/// Fetch a document from Figma only if it has changed since the given last
/// modified time.
pub fn new_if_changed(
api_key: &str,
document_id: String,
requested_version_id: String,
proxy_config: &ProxyConfig,
last_modified: String,
last_version: String,
image_session: Option<ImageContextSession>,
) -> Result<Option<Document>, Error> {
let mut document_head_url = format!("{}{}?depth=1", BASE_FILE_URL, document_id);
if !requested_version_id.is_empty() {
document_head_url.push_str("&version=");
document_head_url.push_str(&requested_version_id);
}
let document_head: figma_schema::FileHeadResponse =
serde_json::from_str(http_fetch(api_key, document_head_url, proxy_config)?.as_str())?;
if document_head.last_modified == last_modified && document_head.version == last_version {
return Ok(None);
}
Document::new(api_key, document_id, requested_version_id, proxy_config, image_session)
.map(Some)
}
/// Ask Figma if an updated document is available, and then fetch the updated document
/// if so.
pub fn update(&mut self, proxy_config: &ProxyConfig) -> Result<UpdateStatus, Error> {
self.proxy_config = proxy_config.clone();
// Fetch just the top level of the document. (depth=0 causes an internal server error).
let mut document_head_url = format!("{}{}?depth=1", BASE_FILE_URL, self.document_id);
if !self.version_id.is_empty() {
document_head_url.push_str("&version=");
document_head_url.push_str(&self.version_id);
}
let document_head: figma_schema::FileHeadResponse = serde_json::from_str(
http_fetch(self.api_key.as_str(), document_head_url, &self.proxy_config)?.as_str(),
)?;
// Now compare the version and modification times and bail out if they're the same.
// Figma docs include a "version" field, but that doesn't always change when the document
// changes (but the mtime always seems to change). The version does change (and mtime does
// not) when a branch is created.
if document_head.last_modified == self.document_root.last_modified
&& document_head.version == self.document_root.version
{
return Ok(UpdateStatus::NotUpdated);
}
// Fetch the updated document in its entirety and replace our document root...
let mut document_url = format!(
"{}{}?plugin_data=shared&geometry=paths&branch_data=true",
BASE_FILE_URL, self.document_id,
);
if !self.version_id.is_empty() {
document_url.push_str("&version=");
document_url.push_str(&self.version_id);
}
let document_root: figma_schema::FileResponse = serde_json::from_str(
http_fetch(self.api_key.as_str(), document_url, &self.proxy_config)?.as_str(),
)?;
// ...and the mapping from imageRef to URL. It returns images from all versions.
let image_ref_url = format!("{}{}/images", BASE_FILE_URL, self.document_id);
let image_refs: figma_schema::ImageFillResponse = serde_json::from_str(
http_fetch(self.api_key.as_str(), image_ref_url, &self.proxy_config)?.as_str(),
)?;
self.branches = get_branches(&document_root);
self.document_root = document_root;
self.image_context.update_images(image_refs.meta.images);
Ok(UpdateStatus::Updated)
}
/// Return the last modified time of this document. This seems to update whenever the doc
/// is changed (but the version number does not).
pub fn last_modified(&self) -> &String {
&self.document_root.last_modified
}
/// Find all nodes whose data is of type NodeData::Instance, which means could be a
/// component with variants. Find its parent, and if it is of type NodeData::ComponentSet,
/// then add all of its children to self.variant_nodes. Also fill out node_doc_hash,
/// which hashes node ids to the document id they come from.
fn fetch_component_variants(
&mut self,
node: &figma_schema::Node,
node_doc_hash: &mut HashMap<String, String>,
variant_nodes: &mut HashMap<String, figma_schema::Node>,
id_index: &HashMap<String, &figma_schema::Node>,
component_hash: &HashMap<String, figma_schema::Component>,
parent_tree: &mut Vec<String>,
error_list: &mut Vec<String>,
error_hash: &mut HashSet<String>,
completed_hash: &mut HashSet<String>,
hidden_node_policy: HiddenNodePolicy,
) -> Result<(), Error> {
// Ignore hidden nodes
if !node.visible && hidden_node_policy == HiddenNodePolicy::Skip {
return Ok(());
}
fn add_node_doc_hash(
node: &figma_schema::Node,
node_doc_hash: &mut HashMap<String, String>,
doc_id: &String,
) {
// Add the node id, doc id to the hash and recurse on all children
node_doc_hash.insert(node.id.clone(), doc_id.clone());
for child in &node.children {
add_node_doc_hash(child, node_doc_hash, doc_id);
}
}
if let figma_schema::NodeData::Instance { frame: _, component_id } = &node.data {
// If the component_id is in id_index, we know it's in this document so we don't
// need to do anything. If it isn't, it's in a different doc, so proceed to
// download data for it
if !id_index.contains_key(component_id) {
// Find the component info for the component_id
let component = component_hash.get(component_id);
if let Some(component) = component {
// Fetch the component from the figma api given its key
let file_key = component.key.clone();
// If we already retrieved this component instance but got an error, don't try again
if error_hash.contains(&file_key) {
return Ok(());
}
// If we already completed this node, skip
if completed_hash.contains(&file_key) {
return Ok(());
}
let component_url = format!("{}{}", BASE_COMPONENT_URL, file_key);
let component_http_response = match http_fetch(
self.api_key.as_str(),
component_url.clone(),
&self.proxy_config,
) {
Ok(str) => {
completed_hash.insert(file_key);
str
}
Err(e) => {
let fetch_error = if let Error::NetworkError(reqwest_error) = &e {
if let Some(code) = reqwest_error.status() {
format!("HTTP {} at {}", code, component_url)
} else {
reqwest_error.to_string()
}
} else {
e.to_string()
};
let error_string = format!(
"Fetch component error {}: {} -> {}",
fetch_error,
parent_tree.join(" -> "),
node.name
);
error_hash.insert(file_key);
error_list.push(error_string);
return Ok(());
}
};
// Deserialize into a ComponentKeyResponse
let component_key_response: figma_schema::ComponentKeyResponse =
serde_json::from_str(component_http_response.as_str())?;
// If this variant points to a file_key different than this document, fetch it
let maybe_parent_node_id = component_key_response.parent_id();
if let Some(parent_node_id) = maybe_parent_node_id {
let variant_document_id = component_key_response.meta.file_key;
if variant_document_id != self.document_id {
if let Err(e) = Self::fetch_variables(
&self.api_key,
&variant_document_id,
&self.proxy_config,
) {
error!(
"Failed to fetch variables for remote document {}: {:?}",
variant_document_id, e
);
}
if !self.variables_responses.contains_key(&variant_document_id) {
match Self::fetch_variables(
self.api_key.as_str(),
&variant_document_id,
&self.proxy_config,
) {
Ok(vars) => {
self.variables_responses
.insert(variant_document_id.clone(), vars);
}
Err(e) => {
error!(
"Failed to fetch variables for doc {}: {:?}",
variant_document_id, e
);
}
}
}
let nodes_response = if let Some(response) = self
.remote_node_responses
.get(&(variant_document_id.clone(), parent_node_id.clone()))
{
response.clone()
} else {
let nodes_url = format!(
"{}{}/nodes?ids={}",
BASE_FILE_URL, variant_document_id, parent_node_id
);
let http_str = http_fetch(
self.api_key.as_str(),
nodes_url,
&self.proxy_config,
)?;
let response: figma_schema::NodesResponse =
serde_json::from_str(http_str.as_str())?;
self.remote_node_responses.insert(
(variant_document_id.clone(), parent_node_id.clone()),
response.clone(),
);
response
};
// The response is a list of nodes, but we only requested one so this loop
// should only go through one time
for (node_id, node_response_data) in nodes_response.nodes {
if node_id != parent_node_id {
continue; // We only care about parent_node_id
}
// If the parent is a COMPONENT_SET, then we want to get the parent's children
// and add them to our list of nodes
if let figma_schema::NodeData::ComponentSet { frame: _ } =
node_response_data.document.data
{
for node in node_response_data.document.children {
add_node_doc_hash(
&node,
node_doc_hash,
&variant_document_id,
);
// Recurse on all children
for child in &node.children {
parent_tree.push(node.name.clone());
self.fetch_component_variants(
child,
node_doc_hash,
variant_nodes,
id_index,
&node_response_data.components,
parent_tree,
error_list,
error_hash,
completed_hash,
hidden_node_policy,
)?;
parent_tree.pop();
}
variant_nodes.insert(node.id.clone(), node);
}
}
}
}
} else {
let error_string = format!(
"Fetch component unable to find component parent for: {} -> {}",
parent_tree.join(" -> "),
node.name
);
error_list.push(error_string);
return Ok(());
}
}
}
}
// Recurse on all children
for child in &node.children {
parent_tree.push(node.name.clone());
self.fetch_component_variants(
child,
node_doc_hash,
variant_nodes,
id_index,
component_hash,
parent_tree,
error_list,
error_hash,
completed_hash,
hidden_node_policy,
)?;
parent_tree.pop();
}
Ok(())
}
/// Find all of the Component Instance views and see which style and text properties are
/// overridden in the instance compared to the reference component. If we then render a
/// different variant of the component (due to an interaction) we can apply these delta
/// styles to get the correct output.
fn compute_component_overrides(&self, nodes: &mut HashMap<NodeQuery, View>) {
// XXX: Would be nice to avoid cloning here. Do we need to? We need to mutate the
// instance views in place. And we can't hold a ref and a mutable ref to nodes
// at the same time.
let reference_components = nodes.clone();
// This function finds all of the Component Instances (views with a populated
// component_info field) in the given view tree, and looks up which component
// they are an instance of. If the component is found, then the "action" function
// is run with a mutable reference to the Component Instance view and a reference
// to the component.
//
// These two pieces of information (the instance and the component) can then be
// used to figure out which properties of the component have been customized in
// the instance. Then we can be sure to apply those customized properties to other
// variants (where Figma just gives us the variant definition, but not filled out
// instance with overrides applied).
fn for_each_component_instance(
reference_components: &HashMap<NodeQuery, View>,
view: &mut View,
parent_component_info: Option<&mut ComponentInfo>,
parent_reference_component: Option<&View>,
action: &impl Fn(
MessageField<ViewStyle>,
MessageField<ViewData>,
String,
String,
&mut ComponentInfo,
Option<&View>,
bool,
),
) {
match (view.component_info.as_mut(), parent_component_info) {
(Some(info), _) => {
// This is the root node of a component instance.
// Compute its style and data overrides and write to its component info whose
// key is the component_set_name.
// See if we can find the target component. If not then don't look up
// references. Try searching by id, name, and variant
let queries = [
NodeQuery::NodeId(info.id.clone()),
NodeQuery::NodeName(info.name.clone()),
NodeQuery::NodeVariant(info.name.clone(), info.component_set_name.clone()),
];
let reference_component_option =
queries.iter().find_map(|query| reference_components.get(query));
if reference_component_option.is_some() {
action(
view.style.clone(),
view.data.clone(),
view.id.clone(),
info.component_set_name.clone(),
info,
reference_component_option,
true,
);
}
if let Some(data) = view.data.as_mut() {
if let Some(View_data_type::Container { 0: Container { children, .. } }) =
data.view_data_type.as_mut()
{
for child in children {
for_each_component_instance(
reference_components,
child,
Some(info),
reference_component_option,
action,
);
}
}
}
}
(None, Some(parent_info)) => {
// This matches a descendent view of a component instance.
// The style and data overrides are written to hash map keyed by the view name
// in the component info of the instance.
action(
view.style.clone(),
view.data.clone(),
view.id.clone(),
view.name.clone(),
parent_info,
parent_reference_component,
false,
);
if let Some(data) = view.data.as_mut() {
if let Some(View_data_type::Container { 0: Container { children, .. } }) =
data.view_data_type.as_mut()
{
for child in children {
for_each_component_instance(
reference_components,
child,
Some(parent_info),
parent_reference_component,
action,
);
}
}
}
}
(None, None) => {
// This matches the nodes from the root node of the view tree until it
// meets a component instance.
if let Some(data) = view.data.as_mut() {
if let Some(View_data_type::Container { 0: Container { children, .. } }) =
data.view_data_type.as_mut()
{
for child in children {
for_each_component_instance(
reference_components,
child,
None,
None,
action,
);
}
}
}
}
}
}
for view in nodes.values_mut() {
for_each_component_instance(
&reference_components,
view,
None,
None,
&|view_style,
view_data,
view_id,
// overrides_table_key will either be the component_set_name or view_name.
// This only works if the view name is identical.
overrides_table_key,
component_info,
component,
is_component_root| {
if let Some(reference_component) = component {
let template_view_option = if is_component_root {
Some(reference_component)
} else {
reference_component.find_view_by_id(&view_id)
};
if let Some(template_view) = template_view_option {
let override_view_style = if view_style == template_view.style {
MessageField::none()
} else if let Some(view_style_ref) = view_style.as_ref() {
let diff: Option<ViewStyle> =
Some(template_view.style().difference(view_style_ref));
diff.into()
} else {
error!("ViewStyle is required.");
MessageField::none()
};
let override_view_data =
if let Some(reference_view_data) = template_view.data.as_ref() {
if let Some(data) = view_data.as_ref() {
reference_view_data.difference(data).into()
} else {
MessageField::none()
}
} else {
MessageField::none()
};
if override_view_style.is_some() || override_view_data.is_some() {
component_info.overrides_table.insert(
overrides_table_key,
ComponentOverrides {
style: override_view_style,
view_data: override_view_data,
..Default::default()
},
);
}
}
}
},
);
}
}
/// Convert the nodes with the given names to a structure that's closer to a toolkit
/// View. This method doesn't use the toolkit itself.
pub fn nodes(
&mut self,
node_names: &Vec<NodeQuery>,
ignored_images: &Vec<(NodeQuery, Vec<String>)>,
error_list: &mut Vec<String>,
hidden_node_policy: HiddenNodePolicy,
) -> Result<HashMap<NodeQuery, View>, Error> {
// First we gather all of nodes that we're going to convert and find all of the
// child nodes that can't be rendered. Then we ask Figma to do a batch render on
// them. Finally we convert and return the set of toolkit nodes.
fn index_node<'a>(
node: &'a figma_schema::Node,
parent_node: Option<&'a figma_schema::Node>,
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | true |
google/automotive-design-compose | https://github.com/google/automotive-design-compose/blob/4caea40f7dfc29cafb17c0cc981d1a5607ef0aad/crates/dc_figma_import/src/extended_layout_schema.rs | crates/dc_figma_import/src/extended_layout_schema.rs | // Copyright 2023 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Serialize};
// This module deserializes our "vsw Extended Layout" plugin output. The plugin writes one of two
// values: "vsw-extended-text-layout" and "vsw-extended-auto-layout". We define a struct for each
// so we don't have to deal with serde/bincode tagged enum issues.
/// ExtendedTextLayout is an extra set of fields set by our plugin that help us to deal with
/// dynamic text content. We need to know if it should wrap, or be truncated or ellipsized,
/// and Figma doesn't have any properties for those details because Figma only deals with
/// static text.
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct ExtendedTextLayout {
/// The number of lines to let the text wrap to, or zero for an unlimited number of lines.
pub line_count: u32,
/// Should the text be ellipsized when it runs out of space, or just truncated?
pub ellipsize: bool,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct SpanData {
/// The node name of an item that goes into a grid layout
#[serde(default)]
pub node_name: String,
/// The number of rows or columns this item occupies in the grid layout
#[serde(default)]
pub span: u32,
/// If true, spans all columns or rows in the grid layout
#[serde(default)]
pub max_span: bool,
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
#[derive(Default)]
pub enum LayoutType {
#[default]
None,
FixedColumns,
FixedRows,
AutoColumns,
AutoRows,
Horizontal,
Vertical,
}
impl LayoutType {
pub(crate) fn is_grid(&self) -> bool {
match self {
LayoutType::FixedColumns
| LayoutType::FixedRows
| LayoutType::AutoColumns
| LayoutType::AutoRows => true,
_ => false,
}
}
pub(crate) fn is_row_or_column(&self) -> bool {
match self {
LayoutType::Horizontal | LayoutType::Vertical => true,
_ => false,
}
}
}
#[derive(Deserialize, Serialize, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
#[derive(Default)]
pub enum Alignment {
#[default]
Start,
Center,
End,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct CommonLayoutData {
/// Left margin of widget layout
#[serde(default)]
pub margin_left: f32,
/// Right margin of widget layout
#[serde(default)]
pub margin_right: f32,
/// Top margin of widget layout
#[serde(default)]
pub margin_top: f32,
/// Bottom margin of widget layout
#[serde(default)]
pub margin_bottom: f32,
/// Scrolling enabled
#[serde(default)]
pub scrolling: bool,
}
impl Default for CommonLayoutData {
fn default() -> CommonLayoutData {
CommonLayoutData {
margin_left: 0.0,
margin_right: 0.0,
margin_top: 0.0,
margin_bottom: 0.0,
scrolling: false,
}
}
}
#[derive(Deserialize, Serialize, Debug, Clone, PartialEq)]
#[serde(rename_all = "camelCase")]
#[derive(Default)]
pub enum SizePolicy {
Hug,
#[default]
Fixed,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct AutoLayoutData {
/// Spacing in pixels between items
#[serde(default)]
pub item_spacing: f32,
/// Horizontal alignment
#[serde(default)]
pub horizontal_alignment: Alignment,
/// Vertical alignment
#[serde(default)]
pub vertical_alignment: Alignment,
/// Space between instead of fixed item spacing
#[serde(default)]
pub space_between: bool,
/// Size policy: hug contents or fixed
#[serde(default)]
pub size_policy: SizePolicy,
}
impl Default for AutoLayoutData {
fn default() -> AutoLayoutData {
AutoLayoutData {
item_spacing: 0.0,
horizontal_alignment: Alignment::Start,
vertical_alignment: Alignment::Start,
space_between: false,
size_policy: SizePolicy::Fixed,
}
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct GridLayoutData {
/// The number of fixed columns if horizontal is true, or rows if horizontal is false
#[serde(default)]
pub columns_rows: u32,
/// The minimum width or height of a column or row when using adaptive columns/rows
#[serde(default)]
pub adaptive_min_size: u32,
/// Automatic column/row edge-to-edge spacing
#[serde(default)]
pub auto_spacing: bool,
/// Item size for auto spacing
#[serde(default)]
pub auto_spacing_item_size: i32,
/// Vertical spacing in pixels between items
#[serde(default)]
pub vertical_spacing: i32,
/// Horizontal spacing in pixels between items
#[serde(default)]
pub horizontal_spacing: i32,
/// The number of columns or rows that each type of item that can go into this grid occupies
#[serde(default)]
pub span_content: Vec<SpanData>,
}
impl Default for GridLayoutData {
fn default() -> GridLayoutData {
GridLayoutData {
columns_rows: 2,
adaptive_min_size: 100,
auto_spacing: false,
auto_spacing_item_size: 1,
vertical_spacing: 0,
horizontal_spacing: 0,
span_content: vec![],
}
}
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
#[derive(Default)]
pub struct LimitContentData {
/// maximum number of children a frame can have
#[serde(default)]
pub max_num_items: u32,
/// name of overflow node to use as last child when there are more children than max_num_items
#[serde(default)]
pub overflow_node_name: String,
/// id of overflow node to use as last child when there are more children than max_num_items
#[serde(default)]
pub overflow_node_id: String,
}
#[derive(Serialize, Deserialize, Debug, PartialEq)]
#[serde(rename_all = "camelCase")]
pub struct ExtendedAutoLayout {
/// Should the layout wrap when its children overflow the available space? This
/// corresponds to the "flex-wrap" property and is useful for making grids.
#[serde(default)]
pub wrap: bool,
/// For frames with scrolling enabled, this is true if scrolling is page-based,
/// meaning the width/height of the frame is considered a page and scrolling snaps
/// to the next page
#[serde(default)]
pub paged_scrolling: bool,
/// Layout type: Grid (column or row, fixed or adaptive) or Autolayout (horizontal or vertical)
#[serde(default)]
pub layout: LayoutType,
/// Various parameters shared between grid and auto layouts
#[serde(default)]
pub common_data: CommonLayoutData,
/// Various parameters for grid layouts
#[serde(default)]
pub grid_layout_data: GridLayoutData,
/// Various parameters for horizontal/vertical layouts from the preview widget
#[serde(default)]
pub auto_layout_data: AutoLayoutData,
/// For frames with autolayout, limit_content specifies that the maximum number of
/// child items under the frame is max_num_items.
#[serde(default)]
pub limit_content: bool,
/// Parameters for limiting the number of items and using an overflow node
#[serde(default)]
pub limit_content_data: LimitContentData,
}
| rust | Apache-2.0 | 4caea40f7dfc29cafb17c0cc981d1a5607ef0aad | 2026-01-04T19:58:26.365701Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.