repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/ast.rs | rust/src/ast.rs | use crate::{real::Real, vars::VarSet};
use inari::{const_dec_interval, Decoration};
use std::{
collections::hash_map::DefaultHasher,
fmt,
hash::{Hash, Hasher},
ops::Range,
};
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum ExplicitRelOp {
Eq,
Ge,
Gt,
Le,
Lt,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum UnaryOp {
Abs,
Acos,
Acosh,
AiryAi,
AiryAiPrime,
AiryBi,
AiryBiPrime,
Arg,
Asin,
Asinh,
Atan,
Atanh,
Boole, // Iverson bracket
BooleEqZero,
BooleLeZero,
BooleLtZero,
Ceil,
Chi,
Ci,
Conj,
Cos,
Cosh,
Digamma,
Ei,
EllipticE,
EllipticK,
Erf,
Erfc,
Erfi,
Exp,
Floor,
FresnelC,
FresnelS,
Gamma,
Im,
InverseErf,
InverseErfc,
Li,
Ln,
LnGamma,
Neg,
Not,
Re,
Recip,
Shi,
Si,
Sign,
Sin,
Sinc,
Sinh,
Sqr,
Sqrt,
Tan,
Tanh,
UndefAt0,
Zeta,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum BinaryOp {
Add,
And,
Atan2,
BesselI,
BesselJ,
BesselK,
BesselY,
Complex,
Div,
Eq,
ExplicitRel(ExplicitRelOp),
GammaInc,
Gcd,
Ge,
Gt,
ImSinc,
ImUndefAt0,
ImZeta,
LambertW,
Lcm,
Le,
Log,
Lt,
Max,
Min,
Mod,
Mul,
Or,
Pow,
PowRational,
RankedMax,
RankedMin,
ReSignNonnegative,
ReSinc,
ReUndefAt0,
ReZeta,
Sub,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum TernaryOp {
IfThenElse,
MulAdd,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum NaryOp {
AndN,
List,
MaxN,
MinN,
OrN,
Plus,
Times,
}
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub enum ExprKind {
BoolConstant(bool),
Constant(Box<Real>),
Var(String),
Unary(UnaryOp, Box<Expr>),
Binary(BinaryOp, Box<Expr>, Box<Expr>),
Ternary(TernaryOp, Box<Expr>, Box<Expr>, Box<Expr>),
Nary(NaryOp, Vec<Expr>),
Pown(Box<Expr>, i32),
Rootn(Box<Expr>, u32),
Error,
Uninit,
}
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum ValueType {
Boolean,
Complex,
Real,
RealVector,
Unknown,
}
impl fmt::Display for ValueType {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
use ValueType::*;
match self {
Boolean => write!(f, "boolean"),
Complex => write!(f, "complex"),
Real => write!(f, "real"),
RealVector => write!(f, "real vector"),
Unknown => write!(f, "unknown"),
}
}
}
/// An AST node for an expression.
#[derive(Clone, Debug)]
pub struct Expr {
pub kind: ExprKind,
pub source_range: Range<usize>,
pub totally_defined: bool,
pub ty: ValueType,
pub vars: VarSet,
internal_hash: u64,
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Binary`].
#[macro_export]
macro_rules! binary {
($op:pat, $x:pat, $y:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Binary($op, box $x, box $y),
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::BoolConstant`].
#[macro_export]
macro_rules! bool_constant {
($a:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::BoolConstant($a),
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Constant`].
#[macro_export]
macro_rules! constant {
($a:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Constant(box $a),
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Error`].
#[macro_export]
macro_rules! error {
() => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Error,
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Nary`].
#[macro_export]
macro_rules! nary {
($op:pat, $xs:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Nary($op, $xs),
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Pown`].
#[macro_export]
macro_rules! pown {
($x:pat, $n:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Pown(box $x, $n),
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Rootn`].
#[macro_export]
macro_rules! rootn {
($x:pat, $n:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Rootn(box $x, $n),
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Ternary`].
#[macro_export]
macro_rules! ternary {
($op:pat, $x:pat, $y:pat, $z:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Ternary($op, box $x, box $y, box $z),
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Unary`].
#[macro_export]
macro_rules! unary {
($op:pat, $x:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Unary($op, box $x),
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Uninit`].
#[macro_export]
macro_rules! uninit {
() => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Uninit,
..
}
};
}
/// Makes a pattern that matches an [`Expr`] of kind [`ExprKind::Var`].
#[macro_export]
macro_rules! var {
($name:pat) => {
$crate::ast::Expr {
kind: $crate::ast::ExprKind::Var($name),
..
}
};
}
impl Expr {
/// Creates a new expression.
pub fn new(kind: ExprKind) -> Self {
Self {
kind,
source_range: 0..0,
totally_defined: false,
ty: ValueType::Unknown,
vars: VarSet::EMPTY,
internal_hash: 0,
}
}
/// Creates a new expression of kind [`ExprKind::Binary`].
pub fn binary(op: BinaryOp, x: Expr, y: Expr) -> Self {
Self::new(ExprKind::Binary(op, Box::new(x), Box::new(y)))
}
/// Creates a new expression of kind [`ExprKind::BoolConstant`].
pub fn bool_constant(x: bool) -> Self {
Self::new(ExprKind::BoolConstant(x))
}
/// Creates a new expression of kind [`ExprKind::Constant`].
pub fn constant(x: Real) -> Self {
Self::new(ExprKind::Constant(Box::new(x)))
}
/// Creates a new expression of kind [`ExprKind::Error`].
pub fn error() -> Self {
Self::new(ExprKind::Error)
}
/// Creates a constant node with value -1.
pub fn minus_one() -> Self {
Self::constant(const_dec_interval!(-1.0, -1.0).into())
}
/// Creates a constant node with value -1/2.
pub fn minus_one_half() -> Self {
Self::constant(const_dec_interval!(-0.5, -0.5).into())
}
/// Creates a new expression of kind [`ExprKind::Nary`].
pub fn nary(op: NaryOp, xs: Vec<Expr>) -> Self {
Self::new(ExprKind::Nary(op, xs))
}
/// Creates a constant node with value 1.
pub fn one() -> Self {
Self::constant(Real::one())
}
/// Creates a constant node with value 1/2.
pub fn one_half() -> Self {
Self::constant(const_dec_interval!(0.5, 0.5).into())
}
/// Creates a constant node with value π.
pub fn pi() -> Self {
Self::constant(Real::pi())
}
/// Creates a new expression of kind [`ExprKind::Pown`].
pub fn pown(x: Expr, n: i32) -> Self {
Self::new(ExprKind::Pown(Box::new(x), n))
}
/// Creates a new expression of kind [`ExprKind::Rootn`].
pub fn rootn(x: Expr, n: u32) -> Self {
Self::new(ExprKind::Rootn(Box::new(x), n))
}
/// Creates a constant node with value 2π.
pub fn tau() -> Self {
Self::constant(Real::tau())
}
/// Creates a new expression of kind [`ExprKind::Ternary`].
pub fn ternary(op: TernaryOp, x: Expr, y: Expr, z: Expr) -> Self {
Self::new(ExprKind::Ternary(op, Box::new(x), Box::new(y), Box::new(z)))
}
/// Creates a constant node with value 2.
pub fn two() -> Self {
Self::constant(const_dec_interval!(2.0, 2.0).into())
}
/// Creates a new expression of kind [`ExprKind::Unary`].
pub fn unary(op: UnaryOp, x: Expr) -> Self {
Self::new(ExprKind::Unary(op, Box::new(x)))
}
/// Creates a constant node with an undefined value.
pub fn undefined() -> Self {
Self::constant(Real::undefined())
}
/// Creates a new expression of kind [`ExprKind::Var`].
pub fn var(name: &str) -> Self {
Self::new(ExprKind::Var(name.into()))
}
/// Creates a constant node with value 0.
pub fn zero() -> Self {
Self::constant(Real::zero())
}
/// Formats the AST in a fashion similar to an S-expression.
///
/// A real constant is formatted either as a decimal number
/// or as a set of hexadecimal interval literals, so it is not necessarily human-readable.
///
/// The representation can change across versions;
/// thus, you _must not_ use the output for serialization, etc.
pub fn dump_full(&self) -> impl fmt::Display + '_ {
self.dump(DumpKind::Full)
}
/// Formats the AST in a fashion similar to an S-expression.
///
/// When a real constant is convertible to a [`f64`] number with [`TupperIntervalSet::to_f64`],
/// it is formatted as a decimal number; otherwise, as an `@`.
///
/// [`TupperIntervalSet::to_f64`]: crate::interval_set::TupperIntervalSet::to_f64
pub fn dump_short(&self) -> impl fmt::Display + '_ {
self.dump(DumpKind::Short)
}
/// Evaluates `self` if it is a real-valued constant expression.
///
/// Returns [`None`] if the expression is not both real-valued and constant,
/// or constant evaluation is not implemented for the operation.
pub fn eval(&self) -> Option<Real> {
use {BinaryOp::*, NaryOp::*, TernaryOp::*, UnaryOp::*};
match self {
bool_constant!(_) => None,
constant!(x) => Some(x.clone()),
var!(_) => None,
unary!(Abs, x) => Some(x.eval()?.abs()),
unary!(Acos, x) => Some(x.eval()?.acos()),
unary!(Acosh, x) => Some(x.eval()?.acosh()),
unary!(AiryAi, x) => Some(x.eval()?.airy_ai()),
unary!(AiryAiPrime, x) => Some(x.eval()?.airy_ai_prime()),
unary!(AiryBi, x) => Some(x.eval()?.airy_bi()),
unary!(AiryBiPrime, x) => Some(x.eval()?.airy_bi_prime()),
unary!(Asin, x) => Some(x.eval()?.asin()),
unary!(Asinh, x) => Some(x.eval()?.asinh()),
unary!(Atan, x) => Some(x.eval()?.atan()),
unary!(Atanh, x) => Some(x.eval()?.atanh()),
unary!(BooleEqZero, x) => Some(x.eval()?.boole_eq_zero()),
unary!(BooleLeZero, x) => Some(x.eval()?.boole_le_zero()),
unary!(BooleLtZero, x) => Some(x.eval()?.boole_lt_zero()),
unary!(Ceil, x) => Some(x.eval()?.ceil()),
unary!(Chi, x) => Some(x.eval()?.chi()),
unary!(Ci, x) => Some(x.eval()?.ci()),
unary!(Cos, x) => Some(x.eval()?.cos()),
unary!(Cosh, x) => Some(x.eval()?.cosh()),
unary!(Digamma, x) => Some(x.eval()?.digamma()),
unary!(Ei, x) => Some(x.eval()?.ei()),
unary!(EllipticE, x) => Some(x.eval()?.elliptic_e()),
unary!(EllipticK, x) => Some(x.eval()?.elliptic_k()),
unary!(Erf, x) => Some(x.eval()?.erf()),
unary!(Erfc, x) => Some(x.eval()?.erfc()),
unary!(Erfi, x) => Some(x.eval()?.erfi()),
unary!(Exp, x) => Some(x.eval()?.exp()),
unary!(Floor, x) => Some(x.eval()?.floor()),
unary!(FresnelC, x) => Some(x.eval()?.fresnel_c()),
unary!(FresnelS, x) => Some(x.eval()?.fresnel_s()),
unary!(Gamma, x) => Some(x.eval()?.gamma()),
unary!(InverseErf, x) => Some(x.eval()?.inverse_erf()),
unary!(InverseErfc, x) => Some(x.eval()?.inverse_erfc()),
unary!(Li, x) => Some(x.eval()?.li()),
unary!(Ln, x) => Some(x.eval()?.ln()),
unary!(LnGamma, x) => Some(x.eval()?.ln_gamma()),
unary!(Shi, x) => Some(x.eval()?.shi()),
unary!(Si, x) => Some(x.eval()?.si()),
unary!(Sin, x) => Some(x.eval()?.sin()),
unary!(Sinc, x) => Some(x.eval()?.sinc()),
unary!(Sinh, x) => Some(x.eval()?.sinh()),
unary!(Tan, x) => Some(x.eval()?.tan()),
unary!(Tanh, x) => Some(x.eval()?.tanh()),
unary!(UndefAt0, x) => Some(x.eval()?.undef_at_0()),
unary!(Zeta, x) => Some(x.eval()?.zeta()),
unary!(
Arg | Boole | Conj | Im | Neg | Not | Re | Recip | Sign | Sqr | Sqrt,
_
) => None,
binary!(Add, x, y) => Some(x.eval()? + y.eval()?),
binary!(Atan2, y, x) => Some(y.eval()?.atan2(x.eval()?)),
binary!(BesselI, n, x) => Some(n.eval()?.bessel_i(x.eval()?)),
binary!(BesselJ, n, x) => Some(n.eval()?.bessel_j(x.eval()?)),
binary!(BesselK, n, x) => Some(n.eval()?.bessel_k(x.eval()?)),
binary!(BesselY, n, x) => Some(n.eval()?.bessel_y(x.eval()?)),
binary!(GammaInc, a, x) => Some(a.eval()?.gamma_inc(x.eval()?)),
binary!(Gcd, x, y) => Some(x.eval()?.gcd(y.eval()?)),
binary!(ImSinc, re_x, im_x) => Some(re_x.eval()?.im_sinc(im_x.eval()?)),
binary!(ImUndefAt0, re_x, im_x) => Some(re_x.eval()?.im_undef_at_0(im_x.eval()?)),
binary!(ImZeta, re_x, im_x) => Some(re_x.eval()?.im_zeta(im_x.eval()?)),
binary!(LambertW, k, x) => Some(k.eval()?.lambert_w(x.eval()?)),
binary!(Lcm, x, y) => Some(x.eval()?.lcm(y.eval()?)),
// Beware the order of arguments.
binary!(Log, b, x) => Some(x.eval()?.log(b.eval()?)),
binary!(Max, x, y) => Some(x.eval()?.max(y.eval()?)),
binary!(Min, x, y) => Some(x.eval()?.min(y.eval()?)),
binary!(Mod, x, y) => Some(x.eval()?.modulo(y.eval()?)),
binary!(Mul, x, y) => Some(x.eval()? * y.eval()?),
binary!(Pow, x, y) => Some(x.eval()?.pow(y.eval()?)),
binary!(PowRational, x, y) => Some(x.eval()?.pow_rational(y.eval()?)),
binary!(RankedMax, nary!(List, xs), n) => {
let xs = xs.iter().map(|x| x.eval()).collect::<Option<Vec<_>>>()?;
Some(Real::ranked_max(xs, n.eval()?))
}
binary!(RankedMin, nary!(List, xs), n) => {
let xs = xs.iter().map(|x| x.eval()).collect::<Option<Vec<_>>>()?;
Some(Real::ranked_min(xs, n.eval()?))
}
binary!(ReSignNonnegative, x, y) => Some(x.eval()?.re_sign_nonnegative(y.eval()?)),
binary!(ReSinc, re_x, im_x) => Some(re_x.eval()?.re_sinc(im_x.eval()?)),
binary!(ReUndefAt0, re_x, im_x) => Some(re_x.eval()?.re_undef_at_0(im_x.eval()?)),
binary!(ReZeta, re_x, im_x) => Some(re_x.eval()?.re_zeta(im_x.eval()?)),
binary!(
And | Complex
| Div
| Eq
| ExplicitRel(_)
| Ge
| Gt
| Le
| Lt
| Or
| RankedMax
| RankedMin
| Sub,
_,
_
) => None,
ternary!(IfThenElse, cond, t, f) => {
Some(cond.eval()?.if_then_else(t.eval()?, f.eval()?))
}
ternary!(MulAdd, _, _, _) => None,
nary!(AndN | List | MaxN | MinN | OrN | Plus | Times, _) => None,
pown!(_, _) => None,
rootn!(_, _) => None,
error!() => None,
uninit!() => panic!(),
}
}
/// Updates the fields [`Expr::totally_defined`], [`Expr::ty`], [`Expr::vars`],
/// and [`Expr::internal_hash`] of the expression.
///
/// Precondition: the function is called on all sub-expressions
/// and they have not been modified since then.
pub fn update_metadata(&mut self) {
self.ty = self.value_type();
self.totally_defined = self.totally_defined(); // Requires `self.ty`.
self.vars = self.variables();
self.internal_hash = {
// Use `DefaultHasher::new` so that the value of `internal_hash` will be deterministic.
let mut hasher = DefaultHasher::new();
self.kind.hash(&mut hasher);
hasher.finish()
}
}
pub fn with_source_range(mut self, range: Range<usize>) -> Self {
self.source_range = range;
self
}
fn dump(&self, kind: DumpKind) -> impl fmt::Display + '_ {
Dump { e: self, kind }
}
/// Returns `true` if the expression is real-valued and is defined on the entire domain.
///
/// Preconditions:
///
/// - [`Expr::totally_defined`] is correctly assigned for all sub-expressions.
/// - [`Expr::ty`] is correctly assigned for `self`.
fn totally_defined(&self) -> bool {
use {BinaryOp::*, NaryOp::*, TernaryOp::*, UnaryOp::*};
// NOTE: Mathematica's `FunctionDomain` would be useful when the same definition is used.
match self {
constant!(a) if a.interval().decoration() >= Decoration::Def => true,
var!(_) => self.totally_defined,
unary!(
Abs | AiryAi
| AiryAiPrime
| AiryBi
| AiryBiPrime
| Asinh
| Atan
| Ceil
| Conj
| Cos
| Cosh
| Erf
| Erfc
| Erfi
| Exp
| Floor
| FresnelC
| FresnelS
| Im
| Neg
| Re
| Shi
| Si
| Sign
| Sin
| Sinc
| Sinh
| Sqr
| Tanh,
x
) => x.totally_defined,
binary!(
Add | ImSinc | Max | Min | Mul | ReSignNonnegative | ReSinc | Sub,
x,
y
) => x.totally_defined && y.totally_defined,
binary!(Pow, x, constant!(y)) => {
x.totally_defined
&& matches!(y.rational(), Some(q) if *q >= 0 && q.denom().is_odd())
}
ternary!(IfThenElse, _, t, f) => t.totally_defined && f.totally_defined,
ternary!(MulAdd, x, y, z) => {
x.totally_defined && y.totally_defined && z.totally_defined
}
nary!(MaxN | MinN | Plus | Times, xs) => xs.iter().all(|x| x.totally_defined),
pown!(x, n) => x.totally_defined && *n >= 0,
rootn!(x, n) => x.totally_defined && n % 2 == 1,
uninit!() => panic!(),
_ => false,
}
}
/// Returns the value type of the expression.
///
/// Precondition: [`Expr::ty`] is correctly assigned for `self`.
fn value_type(&self) -> ValueType {
use {
BinaryOp::{Complex, *},
NaryOp::*,
TernaryOp::*,
UnaryOp::*,
ValueType::{Complex as ComplexT, *},
};
fn boolean(e: &Expr) -> bool {
e.ty == Boolean
}
fn complex(e: &Expr) -> bool {
e.ty == ComplexT
}
fn real(e: &Expr) -> bool {
e.ty == Real
}
fn real_or_complex(e: &Expr) -> bool {
real(e) || complex(e)
}
fn real_vector(e: &Expr) -> bool {
e.ty == RealVector
}
match self {
// Boolean
bool_constant!(_) => Boolean,
unary!(Not, x) if boolean(x) => Boolean,
binary!(And | Or, x, y) if boolean(x) && boolean(y) => Boolean,
binary!(Eq, x, y) if real_or_complex(x) && real_or_complex(y) => Boolean,
binary!(ExplicitRel(_) | Ge | Gt | Le | Lt, x, y) if real(x) && real(y) => Boolean,
nary!(AndN | OrN, xs) if xs.iter().all(boolean) => Boolean,
// Complex
unary!(
Acos | Acosh
| Asin
| Asinh
| Atan
| Atanh
| Conj
| Cos
| Cosh
| Exp
| Ln
| Neg
| Recip
| Sign
| Sin
| Sinc
| Sinh
| Sqr
| Sqrt
| Tan
| Tanh
| Zeta,
x
) if complex(x) => ComplexT,
binary!(Complex, x, y) if real(x) && real(y) => ComplexT,
binary!(Add | Div | Log | Mul | Pow | Sub, x, y)
if real_or_complex(x) && real_or_complex(y) && (complex(x) || complex(y)) =>
{
ComplexT
}
ternary!(IfThenElse, cond, t, f)
if real(cond)
&& real_or_complex(t)
&& real_or_complex(f)
&& (complex(t) || complex(f)) =>
{
ComplexT
}
nary!(Plus | Times, xs) if xs.iter().all(real_or_complex) && xs.iter().any(complex) => {
ComplexT
}
// Real
constant!(_) => Real,
unary!(Boole, x) if boolean(x) => Real,
unary!(Abs | Arg | Im | Re, x) if complex(x) => Real,
unary!(
Abs | Acos
| Acosh
| AiryAi
| AiryAiPrime
| AiryBi
| AiryBiPrime
| Arg
| Asin
| Asinh
| Atan
| Atanh
| BooleEqZero
| BooleLeZero
| BooleLtZero
| Ceil
| Chi
| Ci
| Conj
| Cos
| Cosh
| Digamma
| Ei
| EllipticE
| EllipticK
| Erf
| Erfc
| Erfi
| Exp
| Floor
| FresnelC
| FresnelS
| Gamma
| Im
| InverseErf
| InverseErfc
| Li
| Ln
| LnGamma
| Neg
| Re
| Recip
| Shi
| Si
| Sign
| Sin
| Sinc
| Sinh
| Sqr
| Sqrt
| Tan
| Tanh
| UndefAt0
| Zeta,
x
) if real(x) => Real,
binary!(
Add | Atan2
| BesselI
| BesselJ
| BesselK
| BesselY
| Div
| GammaInc
| Gcd
| ImSinc
| ImUndefAt0
| ImZeta
| LambertW
| Lcm
| Log
| Max
| Min
| Mod
| Mul
| Pow
| PowRational
| ReSignNonnegative
| ReSinc
| ReUndefAt0
| ReZeta
| Sub,
x,
y
) if real(x) && real(y) => Real,
binary!(RankedMax | RankedMin, x, y) if real_vector(x) && real(y) => Real,
ternary!(IfThenElse, cond, t, f) if real(cond) && real(t) && real(f) => Real,
ternary!(MulAdd, x, y, z) if real(x) && real(y) && real(z) => Real,
nary!(MaxN | MinN | Plus | Times, xs) if xs.iter().all(real) => Real,
pown!(x, _) | rootn!(x, _) if real(x) => Real,
// RealVector
nary!(List, xs) if xs.iter().all(real) => RealVector,
// Others
var!(_) => self.ty,
uninit!() => panic!(),
_ => Unknown,
}
}
/// Returns the set of free variables in the expression.
///
/// Precondition: [`Expr::vars`] is correctly assigned for `self`.
fn variables(&self) -> VarSet {
match self {
bool_constant!(_) | constant!(_) => VarSet::EMPTY,
var!(_) => self.vars,
unary!(_, x) | pown!(x, _) | rootn!(x, _) => x.vars,
binary!(_, x, y) => x.vars | y.vars,
ternary!(_, x, y, z) => x.vars | y.vars | z.vars,
nary!(_, xs) => xs.iter().fold(VarSet::EMPTY, |vs, x| vs | x.vars),
error!() => VarSet::EMPTY,
uninit!() => panic!(),
}
}
}
impl Default for Expr {
fn default() -> Self {
Self::new(ExprKind::Uninit)
}
}
impl PartialEq for Expr {
fn eq(&self, rhs: &Self) -> bool {
self.kind == rhs.kind
}
}
impl Eq for Expr {}
impl Hash for Expr {
fn hash<H: Hasher>(&self, state: &mut H) {
self.internal_hash.hash(state);
}
}
#[derive(Clone, Copy, Debug)]
enum DumpKind {
Short,
Full,
}
struct Dump<'a> {
e: &'a Expr,
kind: DumpKind,
}
impl<'a> fmt::Display for Dump<'a> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let k = self.kind;
match self.e {
bool_constant!(false) => {
write!(f, "False")
}
bool_constant!(true) => {
write!(f, "True")
}
constant!(a) => {
if let Some(a) = a.to_f64() {
write!(f, "{}", a)
} else {
match self.kind {
DumpKind::Short => write!(f, "@"),
DumpKind::Full => {
write!(f, "@{{")?;
for x in a.interval() {
write!(f, "{:x},", x.dec_interval())?
}
write!(f, "}}")
}
}
}
}
var!(name) => write!(f, "{}", name),
unary!(op, x) => write!(f, "({:?} {})", op, x.dump(k)),
binary!(op, x, y) => write!(f, "({:?} {} {})", op, x.dump(k), y.dump(k)),
ternary!(op, x, y, z) => {
write!(f, "({:?} {} {} {})", op, x.dump(k), y.dump(k), z.dump(k))
}
nary!(op, xs) => {
write!(
f,
"({:?} {})",
op,
xs.iter()
.map(|x| format!("{}", x.dump(k)))
.collect::<Vec<_>>()
.join(" ")
)
}
pown!(x, n) => write!(f, "(Pown {} {})", x.dump(k), n),
rootn!(x, n) => write!(f, "(Rootn {} {})", x.dump(k), n),
error!() => write!(f, "Error"),
uninit!() => panic!(),
}
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/fftw.rs | rust/src/fftw.rs | use graphest_fftw_sys::*;
use std::ffi::c_void;
use std::ops::{Index, IndexMut};
use std::slice::{from_raw_parts, from_raw_parts_mut};
// From https://www.fftw.org/fftw3_doc/Introduction.html
//
// On the other hand, if you need a single transform of a given size,
// the one-time cost of the planner becomes significant. For this case,
// FFTW provides fast planners based on heuristics or on previously computed plans.
//
// The better choice would be to utilize FFTW wisdom.
const FFTW_ESTIMATE: u32 = 1 << 6;
pub struct FftImage {
ptr: *mut f32,
len: usize,
stride: usize,
plan_r2c: fftwf_plan,
plan_c2r: fftwf_plan,
}
impl FftImage {
pub fn new(width: u32, height: u32) -> Self {
// For the stride, see
// https://www.fftw.org/fftw3_doc/Multi_002dDimensional-DFTs-of-Real-Data.html
// https://www.fftw.org/fftw3_doc/Real_002ddata-DFT-Array-Format.html
let stride = 2 * (width as usize / 2 + 1);
let len = height as usize * stride;
let ptr = unsafe { fftwf_alloc_real(len) };
// Unless `FFTW_ESTIMATE` is set, which we currently do, the arrays are destroyed during planning.
// https://www.fftw.org/fftw3_doc/Planner-Flags.html
let plan_r2c = unsafe {
fftwf_plan_dft_r2c_2d(
height as i32,
width as i32,
ptr,
ptr as *mut [f32; 2],
FFTW_ESTIMATE,
)
};
let plan_c2r = unsafe {
fftwf_plan_dft_c2r_2d(
height as i32,
width as i32,
ptr as *mut [f32; 2],
ptr,
FFTW_ESTIMATE,
)
};
unsafe { from_raw_parts_mut(ptr, len).fill(0.0) };
Self {
ptr,
len,
stride,
plan_r2c,
plan_c2r,
}
}
pub fn complexes(&self) -> &[[f32; 2]] {
unsafe { from_raw_parts(self.ptr as *const [f32; 2], self.len / 2) }
}
pub fn complexes_mut(&mut self) -> &mut [[f32; 2]] {
unsafe { from_raw_parts_mut(self.ptr as *mut [f32; 2], self.len / 2) }
}
pub fn fft(&mut self) {
unsafe { fftwf_execute(self.plan_r2c) };
}
pub fn ifft(&mut self) {
unsafe { fftwf_execute(self.plan_c2r) };
}
}
impl Drop for FftImage {
fn drop(&mut self) {
unsafe {
fftwf_free(self.ptr as *mut c_void);
fftwf_destroy_plan(self.plan_r2c);
fftwf_destroy_plan(self.plan_c2r);
};
}
}
impl Index<usize> for FftImage {
type Output = [f32];
fn index(&self, index: usize) -> &Self::Output {
let stride = self.stride;
let slice = unsafe { from_raw_parts(self.ptr, self.len) };
&slice[stride * index..stride * (index + 1)]
}
}
impl IndexMut<usize> for FftImage {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
let stride = self.stride;
let slice_mut = unsafe { from_raw_parts_mut(self.ptr, self.len) };
&mut slice_mut[stride * index..stride * (index + 1)]
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/lib.rs | rust/src/lib.rs | #![allow(clippy::float_cmp)]
#![feature(box_patterns)]
pub use crate::{
fftw::FftImage,
geom::Box2D,
graph::{
explicit::Explicit, implicit::Implicit, parametric::Parametric, Graph, GraphingStatistics,
Padding,
},
image::{Image, PixelIndex},
relation::{Relation, RelationType},
ternary::Ternary,
};
#[cfg(feature = "arb")]
mod arb;
#[cfg(feature = "arb")]
mod arb_interval_set_ops;
mod ast;
mod block;
mod context;
mod eval_cache;
mod eval_result;
mod fftw;
mod geom;
mod graph;
mod image;
mod interval_set;
mod interval_set_ops;
mod ops;
mod parse;
mod rational_ops;
mod real;
mod region;
mod relation;
mod ternary;
mod traits;
mod vars;
mod visit;
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/vars.rs | rust/src/vars.rs | use bitflags::*;
bitflags! {
/// A set of free variables.
#[derive(Clone, Copy, Debug, Default, Eq, Hash, PartialEq, PartialOrd, Ord)]
pub struct VarSet: u8 {
const EMPTY = 0;
const M = 1;
const N = 2;
const N_THETA = 4;
const T = 8;
const X = 16;
const Y = 32;
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum VarType {
Integer,
Real,
}
impl VarSet {
pub fn len(&self) -> usize {
self.bits().count_ones() as usize
}
pub fn var_type(&self) -> VarType {
match *self {
VarSet::M | VarSet::N | VarSet::N_THETA => VarType::Integer,
VarSet::T | VarSet::X | VarSet::Y => VarType::Real,
_ => panic!(),
}
}
}
/// Creates a set of variables in a const context.
///
/// See <https://github.com/bitflags/bitflags/issues/180>
#[macro_export]
macro_rules! vars {
($($var:path)|*) => {
$crate::vars::VarSet::from_bits_truncate($($var.bits())|*)
};
}
pub type VarIndex = u8;
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/interval_set_ops.rs | rust/src/interval_set_ops.rs | use crate::{
interval_set::{
Branch, BranchMap, DecSignSet, SignSet, Site, TupperInterval, TupperIntervalSet,
},
Ternary,
};
use gmp_mpfr_sys::mpfr;
use inari::{
const_dec_interval, const_interval, dec_interval, interval, DecInterval, Decoration, Interval,
};
use itertools::Itertools;
use rug::Float;
use smallvec::{smallvec, SmallVec};
use std::{
convert::From,
ops::{Add, Mul, Neg, Sub},
};
impl Neg for &TupperIntervalSet {
type Output = TupperIntervalSet;
fn neg(self) -> Self::Output {
let mut rs = Self::Output::new();
for x in self {
rs.insert(TupperInterval::new(-x.dec_interval(), x.g));
}
rs // Skip normalization since negation does not produce new overlapping intervals.
}
}
macro_rules! impl_arith_op {
($Op:ident, $op:ident) => {
impl $Op for &TupperIntervalSet {
type Output = TupperIntervalSet;
fn $op(self, rhs: &TupperIntervalSet) -> Self::Output {
let mut rs = Self::Output::new();
for x in self {
for y in rhs {
if let Some(g) = x.g.union(y.g) {
rs.insert(TupperInterval::new(
x.dec_interval().$op(y.dec_interval()),
g,
));
}
}
}
rs.normalize(false);
rs
}
}
};
}
impl_arith_op!(Add, add);
impl_arith_op!(Sub, sub);
impl_arith_op!(Mul, mul);
/// The parity of a function.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
enum Parity {
None,
Even,
Odd,
}
macro_rules! impl_op {
($op:ident($x:ident $(,$p:ident: $pt:ty)*), $result:expr) => {
pub fn $op(&self, $($p: $pt,)*) -> Self {
let mut rs = Self::new();
for x in self {
let $x = x.dec_interval();
rs.insert(TupperInterval::new($result, x.g));
}
rs.normalize(false);
rs
}
};
($op:ident($x:ident, $y:ident), $result:expr) => {
pub fn $op(&self, rhs: &Self) -> Self {
let mut rs = Self::new();
for x in self {
for y in rhs {
if let Some(g) = x.g.union(y.g) {
let $x = x.dec_interval();
let $y = y.dec_interval();
rs.insert(TupperInterval::new($result, g));
}
}
}
rs.normalize(false);
rs
}
};
}
fn insert_intervals(
rs: &mut TupperIntervalSet,
r: (DecInterval, Option<DecInterval>),
g: BranchMap,
site: Option<Site>,
) {
match r {
(r0, None) => {
rs.insert(TupperInterval::new(r0, g));
}
(r0, Some(r1)) => {
if !r0.disjoint(r1) {
rs.insert(TupperInterval::new(
DecInterval::set_dec(
r0.interval().unwrap().convex_hull(r1.interval().unwrap()),
r0.decoration().min(r1.decoration()),
),
g,
));
} else {
rs.insert(TupperInterval::new(
r0,
match site {
Some(site) => g.inserted(site, Branch::new(0)),
_ => g,
},
));
rs.insert(TupperInterval::new(
r1,
match site {
Some(site) => g.inserted(site, Branch::new(1)),
_ => g,
},
));
}
}
}
}
macro_rules! impl_op_cut {
($op:ident($x:ident $(,$p:ident: $pt:ty)*), $result:expr) => {
pub fn $op(&self, $($p: $pt,)* site: Option<Site>) -> Self {
let mut rs = Self::new();
for x in self {
let $x = x.dec_interval();
insert_intervals(&mut rs, $result, x.g, site);
}
rs.normalize(false);
rs
}
};
($(#[$meta:meta])* $op:ident($x:ident, $y:ident), $result:expr) => {
$(#[$meta])*
pub fn $op(&self, rhs: &Self, site: Option<Site>) -> Self {
let mut rs = Self::new();
for x in self {
for y in rhs {
if let Some(g) = x.g.union(y.g) {
let $x = x.dec_interval();
let $y = y.dec_interval();
insert_intervals(&mut rs, $result, g, site);
}
}
}
rs.normalize(false);
rs
}
};
}
const I_ZERO: Interval = const_interval!(0.0, 0.0);
const I_ONE: Interval = const_interval!(1.0, 1.0);
const DI_ZERO: DecInterval = const_dec_interval!(0.0, 0.0);
const DI_ONE: DecInterval = const_dec_interval!(1.0, 1.0);
/// Returns the parity of the function f(x) = x^y.
///
/// Precondition: `y` is finite.
fn exponentiation_parity(y: f64) -> Parity {
if y == y.trunc() {
// y ∈ ℤ.
if y == 2.0 * (y / 2.0).trunc() {
// y ∈ 2ℤ.
Parity::Even
} else {
// y ∈ 2ℤ + 1.
Parity::Odd
}
} else {
// y is a rational number of the form odd / even.
Parity::None
}
}
fn ternary_to_intervals(t: Ternary) -> (DecInterval, Option<DecInterval>) {
match t {
Ternary::False => (DI_ZERO, None),
Ternary::True => (DI_ONE, None),
_ => (
DecInterval::set_dec(I_ZERO, Decoration::Def),
Some(DecInterval::set_dec(I_ONE, Decoration::Def)),
),
}
}
impl TupperIntervalSet {
impl_op!(abs(x), x.abs());
#[cfg(not(feature = "arb"))]
impl_op!(acos(x), x.acos());
#[cfg(not(feature = "arb"))]
impl_op!(acosh(x), x.acosh());
#[cfg(not(feature = "arb"))]
impl_op!(asin(x), x.asin());
#[cfg(not(feature = "arb"))]
impl_op!(asinh(x), x.asinh());
#[cfg(not(feature = "arb"))]
impl_op!(atan(x), x.atan());
#[cfg(not(feature = "arb"))]
pub fn atan2(&self, rhs: &Self, site: Option<Site>) -> Self {
self.atan2_impl(rhs, site)
}
impl_op_cut!(atan2_impl(y, x), {
let a = x.inf();
let b = x.sup();
let c = y.inf();
let d = y.sup();
if a == 0.0 && b == 0.0 && c < 0.0 && d > 0.0 {
let dec = Decoration::Trv;
(
DecInterval::set_dec(-Interval::FRAC_PI_2, dec),
Some(DecInterval::set_dec(Interval::FRAC_PI_2, dec)),
)
} else if a < 0.0 && b > 0.0 && c == 0.0 && d == 0.0 {
let dec = Decoration::Trv;
(
DecInterval::set_dec(I_ZERO, dec),
Some(DecInterval::set_dec(Interval::PI, dec)),
)
} else if a < 0.0 && b <= 0.0 && c < 0.0 && d >= 0.0 {
let dec = if b == 0.0 {
Decoration::Trv
} else {
Decoration::Def.min(x.decoration()).min(y.decoration())
};
// y < 0 (thus z < 0) part.
let z0 = if c == f64::NEG_INFINITY {
interval!(-Interval::PI.sup(), -Interval::FRAC_PI_2.inf()).unwrap()
} else {
let x0 = interval!(b, b).unwrap();
let y0 = interval!(c, c).unwrap();
interval!(-Interval::PI.sup(), y0.atan2(x0).sup()).unwrap()
};
// y ≥ 0 (thus z > 0) part.
let x1 = interval!(a, b).unwrap();
let y1 = interval!(0.0, d).unwrap();
let z1 = y1.atan2(x1);
(
DecInterval::set_dec(z0, dec),
Some(DecInterval::set_dec(z1, dec)),
)
} else {
// a = b = c = d = 0 goes here.
(y.atan2(x), None)
}
});
#[cfg(not(feature = "arb"))]
impl_op!(atanh(x), x.atanh());
pub fn boole_eq_zero(&self, site: Option<Site>) -> Self {
if self.is_empty() {
Self::from(DI_ZERO)
} else {
let mut rs = self.boole_eq_zero_nonempty(site);
rs.normalize_boole();
rs
}
}
impl_op_cut!(boole_eq_zero_nonempty(x), {
ternary_to_intervals(Ternary::from((
x == DI_ZERO && x.decoration() >= Decoration::Def,
x.contains(0.0),
)))
});
pub fn boole_le_zero(&self, site: Option<Site>) -> Self {
if self.is_empty() {
Self::from(DI_ZERO)
} else {
let mut rs = self.boole_le_zero_nonempty(site);
rs.normalize_boole();
rs
}
}
impl_op_cut!(boole_le_zero_nonempty(x), {
ternary_to_intervals(Ternary::from((
x.sup() <= 0.0 && x.decoration() >= Decoration::Def,
x.inf() <= 0.0,
)))
});
pub fn boole_lt_zero(&self, site: Option<Site>) -> Self {
if self.is_empty() {
Self::from(DI_ZERO)
} else {
let mut rs = self.boole_lt_zero_nonempty(site);
rs.normalize_boole();
rs
}
}
impl_op_cut!(boole_lt_zero_nonempty(x), {
ternary_to_intervals(Ternary::from((
x.sup() < 0.0 && x.decoration() >= Decoration::Def,
x.inf() < 0.0,
)))
});
fn normalize_boole(&mut self) {
let has_zero = self.iter().any(|&x| x.x == I_ZERO);
let has_one = self.iter().any(|&x| x.x == I_ONE);
match (has_zero, has_one) {
(true, false) => *self = TupperIntervalSet::from(DI_ZERO),
(false, true) => *self = TupperIntervalSet::from(DI_ONE),
_ => (),
};
}
#[cfg(not(feature = "arb"))]
impl_op!(cos(x), x.cos());
#[cfg(not(feature = "arb"))]
impl_op!(cosh(x), x.cosh());
#[cfg(not(feature = "arb"))]
pub fn digamma(&self, site: Option<Site>) -> Self {
self.digamma_impl(site)
}
impl_op_cut!(digamma_impl(x), {
let a = x.inf();
let b = x.sup();
let ia = a.ceil();
let ib = b.floor();
if ia == ib && a <= 0.0 {
// ∃i ∈ S : x ∩ S = {i}, where S = {0, -1, …}.
let dec = Decoration::Trv;
let x0 = interval!(a, ia).unwrap();
let x1 = interval!(ia, b).unwrap();
(
DecInterval::set_dec(digamma(x0), dec),
Some(DecInterval::set_dec(digamma(x1), dec)),
)
} else {
let dec = if ia < ib && a <= 0.0 {
// x ∩ S ≠ ∅.
Decoration::Trv
} else {
Decoration::Com.min(x.decoration())
};
let x = x.interval().unwrap();
(DecInterval::set_dec(digamma(x), dec), None)
}
});
impl_op_cut!(div(x, y), {
let c = y.inf();
let d = y.sup();
if c < 0.0 && d > 0.0 {
let y0 = DecInterval::set_dec(interval!(c, 0.0).unwrap(), y.decoration());
let y1 = DecInterval::set_dec(interval!(0.0, d).unwrap(), y.decoration());
(x / y0, Some(x / y1))
} else {
(x / y, None)
}
});
#[cfg(not(feature = "arb"))]
impl_op!(erf(x), {
DecInterval::set_dec(erf(x.interval().unwrap()), x.decoration())
});
#[cfg(not(feature = "arb"))]
impl_op!(erfc(x), {
DecInterval::set_dec(erfc(x.interval().unwrap()), x.decoration())
});
#[cfg(not(feature = "arb"))]
impl_op!(exp(x), x.exp());
#[cfg(not(feature = "arb"))]
pub fn gamma(&self, site: Option<Site>) -> Self {
self.gamma_impl(site)
}
pub fn gamma_impl(&self, site: Option<Site>) -> Self {
// argmin_{x > 0} Γ(x), rounded down/up.
const ARGMIN_RD: f64 = 1.4616321449683622;
const ARGMIN_RU: f64 = 1.4616321449683625;
// min_{x > 0} Γ(x), rounded down.
const MIN_RD: f64 = 0.8856031944108886;
let mut rs = Self::new();
for x in self {
let a = x.x.inf();
let b = x.x.sup();
if b <= 0.0 {
// b ≤ 0.
if a == b && a == a.floor() {
// Empty.
} else {
// Γ(x) = π / (sin(π x) Γ(1 - x)).
let one = Self::from(DI_ONE);
let pi = Self::from(DecInterval::PI);
let mut xs = Self::new();
xs.insert(*x);
let mut sin = (&pi * &xs).sin();
// `a.floor() + 1.0` can be inexact when the first condition is not met.
if x.x.wid() <= 1.0 && b <= a.floor() + 1.0 {
let zero = Self::from(DI_ZERO);
sin = if a.floor() % 2.0 == 0.0 {
// ∃k ∈ ℤ : 2k ≤ x ≤ 2k + 1 ⟹ sin(π x) ≥ 0.
sin.max(&zero)
} else {
// ∃k ∈ ℤ : 2k - 1 ≤ x ≤ 2k ⟹ sin(π x) ≤ 0.
sin.min(&zero)
};
}
let gamma = pi.div(&(&sin * &(&one - &xs).gamma(None)), site);
rs.extend(gamma);
}
} else if a < 0.0 {
// a < 0 < b.
let mut xs = Self::new();
let dec = Decoration::Trv;
// We cannot use `insert_intervals` here as it merges overlapping intervals.
xs.insert(TupperInterval::new(
DecInterval::set_dec(interval!(a, 0.0).unwrap(), dec),
match site {
Some(site) => x.g.inserted(site, Branch::new(0)),
_ => x.g,
},
));
xs.insert(TupperInterval::new(
DecInterval::set_dec(interval!(0.0, b).unwrap(), dec),
match site {
Some(site) => x.g.inserted(site, Branch::new(1)),
_ => x.g,
},
));
rs.extend(xs.gamma(None));
} else {
// 0 ≤ a.
let dec = if a == 0.0 { Decoration::Trv } else { x.d };
// gamma_rd/ru(±0.0) returns ±∞.
let a = if a == 0.0 { 0.0 } else { a };
let y = if b <= ARGMIN_RD {
// b < x0, where x0 = argmin_{x > 0} Γ(x).
interval!(gamma_rd(b), gamma_ru(a)).unwrap()
} else if a >= ARGMIN_RU {
// x0 < a.
interval!(gamma_rd(a), gamma_ru(b)).unwrap()
} else {
// a < x0 < b.
interval!(MIN_RD, gamma_ru(a).max(gamma_ru(b))).unwrap()
};
rs.insert(TupperInterval::new(DecInterval::set_dec(y, dec), x.g));
}
}
rs.normalize(false);
rs
}
// For any x, y ∈ ℚ, the GCD (greatest common divisor) of x and y is defined as an extension
// to the integer GCD as:
//
// gcd(x, y) := gcd(q x, q y) / q,
//
// where q is any positive integer that satisfies q x, q y ∈ ℤ (the trivial one is the product
// of the denominators of |x| and |y|). We leave the function undefined for irrational numbers.
// The Euclidean algorithm can be applied to compute the GCD of rational numbers:
//
// gcd(x, y) = | |x| if y = 0,
// | gcd(y, x mod y) otherwise,
//
// which can be seen from a simple observation:
//
// gcd(q x, q y) / q = | |q x| / q if y = 0,
// | gcd(q y, (q x) mod (q y)) / q otherwise,
// (the Euclidean algorithm for the integer GCD)
// = | |x| if y = 0,
// | gcd(y, ((q x) mod (q y)) / q) otherwise.
// ^^^^^^^^^^^^^^^^^^^^^ = x mod y
//
// We construct an interval extension of the function as follows:
//
// R_0(X, Y) := X,
// R_1(X, Y) := Y,
// R_k(X, Y) := R_{k-2}(X, Y) mod R_{k-1}(X, Y) for any k ∈ ℕ_{≥2},
//
// Z_0(X, Y) := ∅,
// Z_k(X, Y) := | Z_{k-1}(X, Y) ∪ |R_{k-1}(X, Y)| if 0 ∈ R_k(X, Y),
// | Z_{k-1}(X, Y) otherwise,
// for any k ∈ ℕ_{≥1},
//
// gcd(X, Y) := ⋃ Z_k(X, Y).
// k ∈ ℕ_{≥0}
//
// We will denote R_k(X, Y) and Z_k(X, Y) just by R_k and Z_k, respectively.
//
// Proposition. gcd(X, Y) is an interval extension of gcd(x, y).
//
// Proof. Let X and Y be any intervals. There are two possibilities:
//
// (1): X ∩ ℚ = ∅ ∨ Y ∩ ℚ = ∅,
// (2): X ∩ ℚ ≠ ∅ ∧ Y ∩ ℚ ≠ ∅.
//
// Suppose (1). Then, gcd[X, Y] = ∅ ⊆ gcd(X, Y).
// Suppose (2). Let x ∈ X ∩ ℚ, y ∈ Y ∩ ℚ.
// Let r_0 := x, r_1 := y, r_k := r_{k-2} mod r_{k-1} for k ≥ 2.
// Let P(k) :⟺ r_k ∈ R_k. We show that P(k) holds for every k ≥ 0 by induction on k.
// Base cases: From r_0 = x ∈ X = R_0 and r_1 = y ∈ Y = R_1, P(0) and P(1) holds.
// Inductive step: Let k ≥ 0. Suppose P(k), P(k + 1).
// Since X mod Y is an interval extension of x mod y, the following holds:
//
// r_{k+2} = r_k mod r_{k+1} ∈ R_k mod R_{k+1} = R_{k+2}.
//
// Thus, P(k + 2) holds. Therefore, P(k) holds for every k ∈ ℕ_{≥0}.
// Since the Euclidean algorithm halts on any input, there exists n ≥ 1 such that
// r_n = 0 ∧ ∀k ∈ {2, …, n-1} : r_k ≠ 0, which leads to |r_{n-1}| = gcd(x, y).
// Let n be such a number. Then from r_n = 0 and r_n ∈ R_n, 0 ∈ R_n. Therefore:
//
// gcd(x, y) = |r_{n-1}| ∈ |R_{n-1}| ⊆ Z_n ⊆ gcd(X, Y).
//
// Therefore, for any intervals X and Y, gcd[X, Y] ⊆ gcd(X, Y). ∎
//
// Proposition. For any intervals X and Y, and any k ≥ 2,
// ∃i ∈ {1, …, k - 1} : R_{i-1} = R_{k-1} ∧ R_i = R_k ∧ Z_{i-1} = Z_{k-1} ⟹ gcd(X, Y) = Z_{k-1}.
// The statement may look a bit awkward, but it makes the implementation easier.
//
// Proof. For any j ≥ 1, Z_j can be written in the form:
//
// Z_j = f(Z_{j-1}, R_{j-1}, R_j),
//
// where f is common for every j.
// Suppose ∃i ∈ {1, …, k - 1} : R_{i-1} = R{k-1} ∧ R_i = R_k ∧ Z_{i-1} = Z_{k-1}.
// Let i be such a number. Let n := k - i. Then:
//
// Z_{i+n} = f(Z_{i+n-1}, R_{i+n-1}, R_{i+n})
// = f(Z_{i-1}, R_{i-1}, R_i)
// = Z_i.
//
// By repeating the process, we get ∀m ∈ ℕ_{≥0} : Z_{i+mn} = Z_i.
// Therefore, ∀j ∈ ℕ_{≥i} : Z_j = Z_i.
// Therefore, gcd(X, Y) = Z_i = Z_{k-1}. ∎
pub fn gcd(&self, rhs: &Self, site: Option<Site>) -> Self {
let mut rs = Self::new();
// {gcd(x, y) ∣ x ∈ X, y ∈ Y}
// = {gcd(x, y) ∣ x ∈ |X|, y ∈ |Y|}
// = {gcd(max(x, y), min(x, y)) ∣ x ∈ |X|, y ∈ |Y|}
// ⊆ {gcd(x, y) ∣ x ∈ max(|X|, |Y|), y ∈ min(|X|, |Y|)}.
let xs = &self.abs();
let ys = &rhs.abs();
for x in xs {
for y in ys {
if let Some(g) = x.g.union(y.g) {
let dec = if x.x.is_singleton() && y.x.is_singleton() {
Decoration::Dac.min(x.d).min(y.d)
} else {
Decoration::Trv
};
let x = DecInterval::set_dec(x.x, dec);
let y = DecInterval::set_dec(y.x, dec);
let mut zs = TupperIntervalSet::new();
let mut zs_prev = zs.clone();
let mut rems: SmallVec<[_; 4]> = smallvec![
Self::from(TupperInterval::new(x.max(y), g)),
Self::from(TupperInterval::new(x.min(y), g))
];
'outer: loop {
// The iteration starts with k = 1.
let xs = &rems[rems.len() - 2];
let ys = &rems[rems.len() - 1];
// R_{k-1} = `xs`, R_k = `ys`, Z_{k-1} = `zs_prev`.
for i_prime in 0..rems.len() - 2 {
// `i_prime` is i with some offset subtracted.
// We have (1): 1 ≤ i < k, (2): Z_{i-1} = Z_i = … = Z_{k-1}.
if &rems[i_prime] == xs && &rems[i_prime + 1] == ys {
// We have R_{i-1} = R_{k-1} ∧ R_i = R_k.
// Therefore, gcd(X, Y) = Z_{k-1}.
break 'outer;
}
}
// (used later) R_{k+1} = `rem`.
let mut rem = xs.modulo(ys, None);
rem.normalize(true);
if ys.iter().any(|y| y.x.contains(0.0)) {
zs.extend(xs);
zs.normalize(true);
// Z_k = `zs`.
if zs != zs_prev {
// Z_k ≠ Z_{k-1}.
// Retain only R_k so that both (1) and (2) will hold
// in subsequent iterations.
rems = rems[rems.len() - 1..].into();
zs_prev = zs.clone();
}
}
rems.push(rem); // […, R_k, R_{k+1}]
}
rs.extend(zs_prev);
}
}
}
rs.normalize(true);
if let Some(site) = site {
if rs.len() == 2 {
// Assign branches.
rs = rs
.into_iter()
.enumerate()
.map(|(i, x)| {
TupperInterval::new(
x.dec_interval(),
x.g.inserted(site, Branch::new(i as u8)),
)
})
.collect();
}
}
rs
}
pub fn if_then_else(&self, t: &Self, f: &Self) -> Self {
assert!(self.decoration() >= Decoration::Def);
let mut rs = Self::new();
for cond in self {
// `cond.x` can be [0, 1] if many conjunctions and/or disjunctions are involved.
for (a, xs) in [(0.0, f), (1.0, t)] {
if cond.x.contains(a) {
if xs.is_empty() {
rs.insert(TupperInterval::from(DecInterval::EMPTY));
} else {
for x in xs {
if let Some(g) = cond.g.union(x.g) {
rs.insert(TupperInterval::new(
DecInterval::set_dec(x.x, cond.d.min(x.d)),
g,
))
}
}
}
}
}
}
rs.normalize(false);
rs
}
// For x, y ∈ ℚ, the LCM (least common multiple) of x and y is defined as:
//
// lcm(x, y) = | 0 if x = y = 0,
// | |x y| / gcd(x, y) otherwise.
//
// We leave the function undefined for irrational numbers.
// Here is an interval extension of the function:
//
// lcm(X, Y) := | {0} if X = Y = {0},
// | |X Y| / gcd(X, Y) otherwise.
//
// Proposition. lcm(X, Y) is an interval extension of lcm(x, y).
//
// Proof. Let X and Y be any intervals. There are five possibilities:
//
// (1): X ∩ ℚ = ∅ ∨ Y ∩ ℚ = ∅,
// (2): X = Y = {0},
// (3): X = {0} ∧ Y ∩ ℚ∖{0} ≠ ∅,
// (4): X ∩ ℚ∖{0} ≠ ∅ ∧ Y = {0},
// (5): X ∩ ℚ∖{0} ≠ ∅ ∧ Y ∩ ℚ∖{0} ≠ ∅.
//
// Suppose (1). Then, lcm[X, Y] = ∅ ⊆ lcm(X, Y).
// Suppose (2). Then, lcm[X, Y] = lcm(X, Y) = {0}.
// Suppose (3). As Y ≠ {0}, lcm(X, Y) = |X Y| / gcd(X, Y).
// Therefore, from 0 ∈ |X Y| and ∃y ∈ Y ∩ ℚ∖{0} : |y| ∈ gcd(X, Y), 0 ∈ lcm(X, Y).
// Therefore, lcm[X, Y] = {0} ⊆ lcm(X, Y).
// Suppose (4). In the same manner, lcm[X, Y] ⊆ lcm(X, Y).
// Suppose (5). Let x ∈ X ∩ ℚ∖{0}, y ∈ Y ∩ ℚ∖{0} ≠ ∅.
// Then, |x y| / gcd(x, y) ∈ lcm(X, Y) = |X Y| / gcd(X, Y).
// Therefore, lcm[X, Y] ⊆ lcm(X, Y).
//
// Hence, the result. ∎
pub fn lcm(&self, rhs: &Self, site: Option<Site>) -> Self {
let mut rs = TupperIntervalSet::new();
for x in self {
for y in rhs {
if let Some(g) = x.g.union(y.g) {
if x.x == I_ZERO && y.x == I_ZERO {
let dec = Decoration::Dac.min(x.d).min(y.d);
rs.insert(TupperInterval::new(DecInterval::set_dec(I_ZERO, dec), g));
} else {
let xs = &TupperIntervalSet::from(*x);
let ys = &TupperIntervalSet::from(*y);
rs.extend((xs * ys).abs().div(&xs.gcd(ys, site), None).into_iter());
}
}
}
}
rs.normalize(false);
rs
}
#[cfg(not(feature = "arb"))]
impl_op!(ln(x), x.ln());
pub fn log(&self, rhs: &Self, site: Option<Site>) -> Self {
self.ln().div(&rhs.ln(), site)
}
impl_op!(max(x, y), x.max(y));
impl_op!(min(x, y), x.min(y));
// f(x, y) = x - y ⌊x / y⌋.
impl_op_cut!(modulo(x, y), {
let q = (x / y).floor();
let qa = q.inf();
let qb = q.sup();
let range = y.interval().unwrap().convex_hull(I_ZERO);
if qb - qa == 1.0 {
let q0 = DecInterval::set_dec(interval!(qa, qa).unwrap(), q.decoration());
let q1 = DecInterval::set_dec(interval!(qb, qb).unwrap(), q.decoration());
let z0 = (-y).mul_add(q0, x);
let z1 = (-y).mul_add(q1, x);
let z0 =
DecInterval::set_dec(z0.interval().unwrap().intersection(range), z0.decoration());
let z1 =
DecInterval::set_dec(z1.interval().unwrap().intersection(range), z1.decoration());
(z0, Some(z1))
} else {
let z = (-y).mul_add(q, x);
let z = DecInterval::set_dec(z.interval().unwrap().intersection(range), z.decoration());
(z, None)
}
});
pub fn mul_add(&self, rhs: &Self, addend: &Self) -> Self {
let mut rs = Self::new();
for x in self {
for y in rhs {
if let Some(g) = x.g.union(y.g) {
for z in addend {
if let Some(g) = g.union(z.g) {
rs.insert(TupperInterval::new(
x.dec_interval().mul_add(y.dec_interval(), z.dec_interval()),
g,
));
}
}
}
}
}
rs.normalize(false);
rs
}
#[cfg(not(feature = "arb"))]
pub fn pow(&self, rhs: &Self, site: Option<Site>) -> Self {
self.pow_impl(rhs, site)
}
// For any integer n,
//
// | x × ⋯ × x (n copies) if n > 0,
// x^n = | 1 if n = 0 ∧ x ≠ 0,
// | 1 / x^-n if n < 0,
//
// and for any non-integer y,
//
// x^y = | 1 if x = 0 ∧ y > 0,
// | exp(y ln(x)) otherwise.
//
// 0^0 is left undefined.
impl_op_cut!(pow_impl(x, y), {
let a = x.inf();
let b = x.sup();
let c = y.inf();
let d = y.sup();
if y.is_singleton() {
Self::pow_singleton(x, y)
} else if a < 0.0 {
// a < 0.
let dec = Decoration::Trv;
let nc = c.ceil();
let nd = d.floor();
let x_neg = x.min(DI_ZERO);
let (z0, z1) = if nc > nd {
(DecInterval::EMPTY, None)
} else if nc == nd {
let y = dec_interval!(nc, nc).unwrap();
let z = Self::pow_singleton(x_neg, y).0;
let z = DecInterval::set_dec(z.interval().unwrap(), dec);
(z, None)
} else if nd - nc == 1.0 {
let y0 = dec_interval!(nc, nc).unwrap();
let y1 = dec_interval!(nd, nd).unwrap();
let z0 = Self::pow_singleton(x_neg, y0).0;
let z1 = Self::pow_singleton(x_neg, y1).0;
let z0 = DecInterval::set_dec(z0.interval().unwrap(), dec);
let z1 = DecInterval::set_dec(z1.interval().unwrap(), dec);
(z0, Some(z1))
} else {
// |x^y|
// ⋮ | ⋮ ↑
// y = 1 | -1 |
// 0 | 0 -+- 1
// -1 | 1 |
// ⋮ | ⋮ |
// ---------+---------+- 0 -→
// -1 0 x
let z0 = if nd == f64::INFINITY {
DecInterval::ENTIRE
} else {
let x = x.intersection(const_dec_interval!(f64::NEG_INFINITY, -1.0));
if x.is_empty() {
DecInterval::EMPTY
} else {
let y = dec_interval!(nd, nd).unwrap();
let z = Self::pow_singleton(x, y).0;
z.convex_hull(-z)
}
};
let z1 = if nc == f64::NEG_INFINITY {
DecInterval::ENTIRE
} else {
let x = x.intersection(const_dec_interval!(-1.0, 0.0));
if x.is_empty() {
DecInterval::EMPTY
} else {
let y = dec_interval!(nc, nc).unwrap();
let z = Self::pow_singleton(x, y).0;
z.convex_hull(-z)
}
};
let z = z0.convex_hull(z1);
(z, None)
};
if b < 0.0 {
(z0, z1)
} else {
let z = z0
.convex_hull(z1.unwrap_or(DecInterval::EMPTY))
.convex_hull(x.pow(y));
(z, None)
}
} else {
// a ≥ 0.
(x.pow(y), None)
}
});
#[cfg(not(feature = "arb"))]
pub fn pow_rational(&self, rhs: &Self, site: Option<Site>) -> Self {
self.pow_rational_impl(rhs, site)
}
// For any rational number y = p/q where p and q (> 0) are coprime integers,
//
// x^y = surd(x, q)^p.
//
// surd(x, q) is the real-valued qth root of x for odd q,
// and is the principal qth root of x ≥ 0 for even q. Therefore, for x < 0,
//
// | (-x)^y if y = (even)/(odd)
// | (x^y is an even function of x),
// x^y = | -(-x)^y if y = (odd)/(odd)
// | (x^y is an odd function of x),
// | undefined otherwise (y = (odd)/(even) or irrational).
//
// And for any irrational number y,
//
// x^y = | 1 if x = 0 ∧ y > 0,
// | exp(y ln(x)) otherwise.
//
// 0^0 is left undefined.
impl_op_cut!(pow_rational_impl(x, y), {
let a = x.inf();
if y.is_singleton() {
Self::pow_singleton(x, y)
} else if a < 0.0 {
// a < 0.
let dec = Decoration::Trv;
let x = x.interval().unwrap();
let y = y.interval().unwrap();
// x^y < 0 part, which comes from
// x < 0, y = (odd)/(odd) (x^y is an odd function of x).
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | true |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/parse.rs | rust/src/parse.rs | use crate::{
ast::{Expr, NaryOp},
context::{Context, InputWithContext},
real::Real,
};
use inari::dec_interval;
use nom::{
branch::alt,
bytes::complete::{tag, take, take_while},
character::complete::{char, digit0, digit1, one_of, satisfy, space0},
combinator::{
all_consuming, consumed, cut, map, map_opt, not, opt, peek, recognize, value, verify,
},
error::{ErrorKind as NomErrorKind, ParseError},
multi::{fold_many0, many0_count},
sequence::{delimited, pair, preceded, separated_pair, terminated},
Err, Finish, IResult, Input, Mode, OutputMode, PResult, Parser,
};
use rug::{Integer, Rational};
use std::ops::Range;
#[derive(Clone, Debug)]
enum ErrorKind<'a> {
ExpectedChar(char),
ExpectedEof,
ExpectedExpr,
UnknownIdentifier(&'a str),
/// Errors reported by nom's combinators that should not be exposed.
OtherNomError,
}
#[derive(Clone, Debug)]
struct Error<'a, I> {
input: I,
kind: ErrorKind<'a>,
}
impl<'a, I> Error<'a, I> {
fn expected_expr(input: I) -> Self {
Self {
input,
kind: ErrorKind::ExpectedExpr,
}
}
fn unknown_identifier(input: I, name: &'a str) -> Self {
Self {
input,
kind: ErrorKind::UnknownIdentifier(name),
}
}
}
impl<'a, I> ParseError<I> for Error<'a, I> {
fn from_error_kind(input: I, kind: NomErrorKind) -> Self {
Self {
input,
kind: match kind {
NomErrorKind::Eof => ErrorKind::ExpectedEof,
_ => ErrorKind::OtherNomError,
},
}
}
fn append(_: I, _: NomErrorKind, other: Self) -> Self {
// Only keep the first error.
other
}
fn from_char(input: I, c: char) -> Self {
Self {
input,
kind: ErrorKind::ExpectedChar(c),
}
}
}
type ParseResult<'a, O> = IResult<InputWithContext<'a>, O, Error<'a, InputWithContext<'a>>>;
// Based on `inari::parse::parse_dec_float`.
fn parse_decimal(mant: &str) -> Option<Rational> {
fn pow(base: u32, exp: i32) -> Rational {
let i = Integer::from(Integer::u_pow_u(base, exp.unsigned_abs()));
let mut r = Rational::from(i);
if exp < 0 {
r.recip_mut();
}
r
}
let mut parts = mant.split('.');
let int_part = parts.next().unwrap();
let frac_part = parts.next().unwrap_or_default();
// 123.456 -> 123456e-3 (ulp == 1e-3)
let log_ulp = -(frac_part.len() as i32);
let ulp = pow(10, log_ulp);
let i_str = [int_part, frac_part].concat();
let i = Integer::parse_radix(i_str, 10).unwrap();
Some(Rational::from(i) * ulp)
}
fn decimal_literal(i: InputWithContext<'_>) -> ParseResult<'_, &str> {
map(
alt((
// "12", "12." or "12.3"
recognize(pair(digit1, opt(pair(char('.'), digit0)))),
// ".3"
recognize(pair(char('.'), digit1)),
)),
|i: InputWithContext| i.source,
)
.parse(i)
}
fn decimal_constant(i: InputWithContext) -> ParseResult<Expr> {
map(decimal_literal, |s| {
let x = if let Some(x_q) = parse_decimal(s) {
Real::from(x_q)
} else {
let interval_literal = ["[", s, "]"].concat();
Real::from(dec_interval!(&interval_literal).unwrap())
};
Expr::constant(x)
})
.parse(i)
}
fn identifier_head(i: InputWithContext) -> ParseResult<char> {
satisfy(|c| c.is_alphabetic())(i)
}
fn identifier_tail(i: InputWithContext<'_>) -> ParseResult<'_, &str> {
map(
recognize(many0_count(satisfy(|c| c.is_alphanumeric() || c == '\''))),
|i: InputWithContext| i.source,
)
.parse(i)
}
fn identifier(i: InputWithContext<'_>) -> ParseResult<'_, &str> {
map(recognize(pair(identifier_head, identifier_tail)), |i| {
i.source
})
.parse(i)
}
fn name_in_context(i: InputWithContext<'_>) -> ParseResult<'_, (&Context, &str)> {
let context_stack = i.context_stack;
map_opt(identifier, move |name| {
context_stack
.iter()
.rfind(|c| c.has(name))
.map(|&c| (c, name))
})
.parse(i)
}
fn named_constant(i: InputWithContext) -> ParseResult<Expr> {
map_opt(name_in_context, |(ctx, name)| ctx.get_constant(name)).parse(i)
}
fn function_name(i: InputWithContext<'_>) -> ParseResult<'_, (&Context, &str)> {
verify(name_in_context, |(ctx, name)| ctx.is_function(name)).parse(i)
}
/// Nonempty, comma-separated list of expressions.
fn expr_list(i: InputWithContext) -> ParseResult<Vec<Expr>> {
let (i, mut x) = expr(i)?;
fold_many0(
preceded(delimited(space0, char(','), space0), cut(expr)),
move || vec![std::mem::take(&mut x)],
|mut xs, x| {
xs.push(x);
xs
},
)
.parse(i)
}
fn function_application(i: InputWithContext) -> ParseResult<Expr> {
map(
pair(
function_name,
delimited(
delimited(space0, cut(char('(')), space0),
cut(expr_list),
preceded(space0, cut(char(')'))),
),
),
|((ctx, name), args)| ctx.apply(name, args),
)
.parse(i)
}
/// If an identifier is found, [`cut`]s with [`ErrorKind::UnknownIdentifier`]
/// (the position where the identifier is found is reported);
/// otherwise, fails in the same manner as [`identifier`].
fn fail_unknown_identifier(i: InputWithContext) -> ParseResult<Expr> {
let (i, name) = peek(identifier).parse(i)?;
Err(Err::Failure(Error::unknown_identifier(i, name)))
}
/// Fails with [`ErrorKind::ExpectedExpr`].
fn fail_expr(i: InputWithContext) -> ParseResult<Expr> {
Err(Err::Error(Error::expected_expr(i)))
}
fn expr_within_bars_terminated_with_space0(i: InputWithContext) -> ParseResult<Expr> {
use crate::{
ast::{
BinaryOp::{And, Eq, Ge, Gt, Le, Lt, Or},
UnaryOp::Not,
},
binary, bool_constant, unary,
};
let mut o = recognize(take_while(|c| c != '|')).parse(i.clone())?;
let mut even_bars_taken = true;
loop {
let (rest, taken) = o;
if even_bars_taken {
if let Ok((_, x)) = all_consuming(terminated(expr, space0)).parse(taken.clone()) {
match x {
bool_constant!(_)
| unary!(Not, _)
| binary!(And | Eq | Ge | Gt | Le | Lt | Or, _, _) => {
// Not an absolute value.
}
_ => return Ok((rest, x)),
}
}
}
if rest.input_len() == 0 {
// Reached the end of input. All we can do is return a meaningful error.
return expr(taken);
}
// Do not use `taken.input_len()`, which returns the number of bytes instead of chars.
o = recognize(pair(
take(taken.source.chars().count() + 1),
take_while(|c| c != '|'),
))
.parse(i.clone())?;
even_bars_taken = !even_bars_taken;
}
}
/// The inverse operation of [`cut`]; converts [`Err::Failure`] back to [`Err::Error`].
pub fn decut<I, F>(parser: F) -> impl Parser<I, Output = F::Output, Error = F::Error>
where
F: Parser<I>,
{
Decut { parser }
}
pub struct Decut<F> {
parser: F,
}
impl<I, F> Parser<I> for Decut<F>
where
F: Parser<I>,
{
type Output = F::Output;
type Error = F::Error;
#[inline(always)]
fn process<OM: OutputMode>(&mut self, input: I) -> PResult<OM, I, Self::Output, Self::Error> {
match self.parser.process::<OM>(input) {
Err(Err::Error(e)) => Err(Err::Error(e)),
Err(Err::Failure(e)) => Err(Err::Error(OM::Error::bind(|| e))),
Err(Err::Incomplete(i)) => Err(Err::Incomplete(i)),
Ok((i, o)) => Ok((i, o)),
}
}
}
fn primary_expr(i: InputWithContext) -> ParseResult<Expr> {
let builtin = i.context_stack.first().unwrap();
map(
consumed(alt((
decimal_constant,
named_constant,
function_application,
fail_unknown_identifier,
delimited(
terminated(char('('), space0),
cut(expr),
preceded(space0, cut(char(')'))),
),
map(
delimited(
terminated(char('['), space0),
cut(expr_list),
preceded(space0, cut(char(']'))),
),
|xs| Expr::nary(NaryOp::List, xs),
),
map(
delimited(
delimited(char('|'), peek(not(char('|'))), space0),
// Not an OR expression (unless it's called from the case below).
// So we can cut when no expression is found.
cut(expr_within_bars_terminated_with_space0),
cut(char('|')),
),
move |x| builtin.apply("abs", vec![x]),
),
map(
delimited(
terminated(char('|'), space0),
// Possibly an OR expression. We must not cut when no expression is found.
// The above case is called recursively, so we also need to cancel cut.
decut(expr_within_bars_terminated_with_space0),
cut(char('|')),
),
move |x| builtin.apply("abs", vec![x]),
),
map(
delimited(
terminated(char('⌈'), space0),
cut(expr),
preceded(space0, cut(char('⌉'))),
),
move |x| builtin.apply("ceil", vec![x]),
),
map(
delimited(
terminated(char('⌊'), space0),
cut(expr),
preceded(space0, cut(char('⌋'))),
),
move |x| builtin.apply("floor", vec![x]),
),
fail_expr,
))),
|(i, x)| x.with_source_range(i.source_range),
)
.parse(i)
}
// ^ is right-associative; x^y^z is equivalent to x^(y^z).
fn power_expr(i: InputWithContext) -> ParseResult<Expr> {
let builtin = i.context_stack.first().unwrap();
map(
pair(
primary_expr,
opt(pair(
delimited(
space0,
alt((value("^^", tag("^^")), value("^", char('^')))),
space0,
),
cut(unary_expr),
)),
),
move |(x, op_y)| match op_y {
Some((op, y)) => {
let range = x.source_range.start..y.source_range.end;
builtin.apply(op, vec![x, y]).with_source_range(range)
}
_ => x,
},
)
.parse(i)
}
fn unary_expr(i: InputWithContext) -> ParseResult<Expr> {
let builtin = i.context_stack.first().unwrap();
alt((
preceded(pair(char('+'), space0), cut(unary_expr)),
map(
consumed(separated_pair(
alt((
value("~", char('~')),
value("-", one_of("-−")), // a hyphen-minus or a minus sign
value("!", one_of("!¬")),
)),
space0,
cut(unary_expr),
)),
move |(i, (op, x))| builtin.apply(op, vec![x]).with_source_range(i.source_range),
),
power_expr,
))
.parse(i)
}
fn multiplicative_expr(i: InputWithContext) -> ParseResult<Expr> {
let builtin = i.context_stack.first().unwrap();
let (i, mut x) = unary_expr(i)?;
fold_many0(
alt((
// x * y
// x / y
pair(
delimited(
space0,
alt((value("*", char('*')), value("/", char('/')))),
space0,
),
cut(unary_expr),
),
// 2x
// x y
pair(value("*", space0), power_expr),
)),
move || std::mem::take(&mut x),
move |xs, (op, y)| {
let range = xs.source_range.start..y.source_range.end;
builtin.apply(op, vec![xs, y]).with_source_range(range)
},
)
.parse(i)
}
fn additive_expr(i: InputWithContext) -> ParseResult<Expr> {
let builtin = i.context_stack.first().unwrap();
let (i, mut x) = multiplicative_expr(i)?;
fold_many0(
pair(
delimited(
space0,
alt((
value("+", char('+')),
value("-", one_of("-−")), // a hyphen-minus or a minus sign
)),
space0,
),
cut(multiplicative_expr),
),
move || std::mem::take(&mut x),
move |xs, (op, y)| {
let range = xs.source_range.start..y.source_range.end;
builtin.apply(op, vec![xs, y]).with_source_range(range)
},
)
.parse(i)
}
// Relational operators can be chained: x op1 y op2 z is equivalent to x op1 y ∧ y op2 z.
fn relational_expr(i: InputWithContext) -> ParseResult<Expr> {
let builtin = i.context_stack.first().unwrap();
let (i, mut side) = additive_expr(i)?;
map(
fold_many0(
pair(
delimited(
space0,
alt((
value("=", char('=')),
value(">=", alt((tag(">="), tag("≥")))),
value(">", char('>')),
value("<=", alt((tag("<="), tag("≤")))),
value("<", char('<')),
)),
space0,
),
cut(additive_expr),
),
move || (vec![], vec![std::mem::take(&mut side)]),
|(mut ops, mut sides), (op, side)| {
ops.push(op);
sides.push(side);
(ops, sides)
},
),
move |(ops, sides)| {
assert_eq!(sides.len(), ops.len() + 1);
if sides.len() == 1 {
sides.into_iter().next().unwrap()
} else {
ops.iter()
.zip(sides.windows(2))
.map(|(op, sides)| {
let range = sides[0].source_range.start..sides[1].source_range.end;
builtin.apply(op, sides.to_vec()).with_source_range(range)
})
.reduce(|xs, y| {
let range = xs.source_range.start..y.source_range.end;
builtin.apply("&&", vec![xs, y]).with_source_range(range)
})
.unwrap()
}
},
)
.parse(i)
}
fn and_expr(i: InputWithContext) -> ParseResult<Expr> {
let builtin = i.context_stack.first().unwrap();
let (i, mut x) = relational_expr(i)?;
fold_many0(
preceded(
delimited(space0, alt((tag("&&"), tag("∧"))), space0),
cut(relational_expr),
),
move || std::mem::take(&mut x),
move |xs, y| {
let range = xs.source_range.start..y.source_range.end;
builtin.apply("&&", vec![xs, y]).with_source_range(range)
},
)
.parse(i)
}
fn or_expr(i: InputWithContext) -> ParseResult<Expr> {
let builtin = i.context_stack.first().unwrap();
let (i, mut x) = and_expr(i)?;
fold_many0(
preceded(
delimited(space0, alt((tag("||"), tag("∨"))), space0),
cut(and_expr),
),
move || std::mem::take(&mut x),
move |xs, y| {
let range = xs.source_range.start..y.source_range.end;
builtin.apply("||", vec![xs, y]).with_source_range(range)
},
)
.parse(i)
}
fn expr(i: InputWithContext) -> ParseResult<Expr> {
or_expr(i)
}
/// Parses an expression.
pub fn parse_expr(source: &str, context_stack: &[&Context]) -> Result<Expr, String> {
let i = InputWithContext::new(source, context_stack);
match all_consuming(delimited(space0, expr, space0))
.parse(i.clone())
.finish()
{
Ok((_, x)) => Ok(x),
Err(e) => Err(convert_error(i, e)),
}
}
pub fn format_error(source: &str, range: Range<usize>, message: &str) -> String {
assert!(range.start <= range.end && range.end <= source.len());
let offset = |substr: &str| {
use nom::Offset;
source.offset(substr)
};
let (line, source_line) = source
.split('\n') // Do not use `.lines()` which ignores a final line ending.
.enumerate()
.take_while(|(_, line)| offset(line) <= range.start)
.last()
.unwrap();
let start_in_line = range.start - offset(source_line);
let end_in_line = (range.end - offset(source_line)).min(source_line.len());
let col = source_line[..start_in_line].chars().count();
let n_cols = source_line[start_in_line..end_in_line].chars().count();
let decoration = match n_cols {
0 => "^".to_owned(),
_ => "~".repeat(n_cols),
};
format!(
r"
input:{}:{}: error: {}
{}
{:col$}{}
",
line + 1,
col + 1,
message,
source_line,
"",
decoration
)
}
fn convert_error(input: InputWithContext, e: Error<InputWithContext>) -> String {
use nom::Offset;
let source = input.source;
let offset = source.offset(e.input.source);
let len = match e.kind {
ErrorKind::UnknownIdentifier(name) => name.len(),
_ => 0,
};
let message = match e.kind {
ErrorKind::ExpectedChar(c) => format!("expected '{}'", c),
ErrorKind::ExpectedEof => "unexpected input".to_owned(),
ErrorKind::ExpectedExpr => "expected expression".to_owned(),
ErrorKind::UnknownIdentifier(name) => format!("'{}' is not defined", name),
_ => panic!("unexpected error kind"),
};
format_error(source, offset..offset + len, &message)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::context::{Def, VarProps};
#[test]
fn parse_expr() {
let ctx = Context::new()
.def("a", Def::var("a", VarProps::default()))
.def("b", Def::var("b", VarProps::default()))
.def("k", Def::var("k", VarProps::default()))
.def("n", Def::var("n", VarProps::default()))
.def("z", Def::var("z", VarProps::default()));
let test = |input, expected| {
let f = super::parse_expr(input, &[Context::builtin(), &ctx]).unwrap();
assert_eq!(format!("{}", f.dump_short()), expected);
};
test("false", "False");
test("true", "True");
test("e", "@");
test("gamma", "@");
test("γ", "@");
test("pi", "@");
test("π", "@");
test("i", "(Complex 0 1)");
test("[x, y, z]", "(List x y z)");
test("|x|", "(Abs x)");
test("||x| + y|", "(Abs (Add (Abs x) y))");
test("|x + |y||", "(Abs (Add x (Abs y)))");
test(
"y = ||x|| || |||x|| + |||y|||| = y",
"(Or (Eq y (Abs (Abs x))) (Eq (Abs (Add (Abs (Abs x)) (Abs (Abs (Abs y))))) y))",
);
test("y = x || x || x", "(Eq y (Mul (Mul x (Abs (Abs x))) x))");
test(
"y = x || x || x = y",
"(And (Eq y (Mul (Mul x (Abs (Abs x))) x)) (Eq (Mul (Mul x (Abs (Abs x))) x) y))",
);
test(
// Parsed as "y = x ||x| x| = y" instead of "y = x || x |x| = y".
"y = x || x | x | = y",
"(And (Eq y (Mul x (Abs (Mul (Abs x) x)))) (Eq (Mul x (Abs (Mul (Abs x) x))) y))",
);
test("true || true || true", "(Or (Or True True) True)");
test("⌈x⌉", "(Ceil x)");
test("⌊x⌋", "(Floor x)");
test("abs(x)", "(Abs x)");
test("acos(x)", "(Acos x)");
test("acosh(x)", "(Acosh x)");
test("Ai(x)", "(AiryAi x)");
test("Ai'(x)", "(AiryAiPrime x)");
test("Bi(x)", "(AiryBi x)");
test("Bi'(x)", "(AiryBiPrime x)");
test("arg(x)", "(Arg x)");
test("asin(x)", "(Asin x)");
test("asinh(x)", "(Asinh x)");
test("atan(x)", "(Atan x)");
test("atanh(x)", "(Atanh x)");
test("ceil(x)", "(Ceil x)");
test("Chi(x)", "(Chi x)");
test("Ci(x)", "(Ci x)");
test("~x", "(Conj x)");
test("cos(x)", "(Cos x)");
test("cosh(x)", "(Cosh x)");
test("psi(x)", "(Digamma x)");
test("ψ(x)", "(Digamma x)");
test("Ei(x)", "(Ei x)");
test("E(x)", "(EllipticE x)");
test("K(x)", "(EllipticK x)");
test("erf(x)", "(Erf x)");
test("erfc(x)", "(Erfc x)");
test("erfi(x)", "(Erfi x)");
test("exp(x)", "(Exp x)");
test("floor(x)", "(Floor x)");
test("C(x)", "(FresnelC x)");
test("S(x)", "(FresnelS x)");
test("Gamma(x)", "(Gamma x)");
test("Γ(x)", "(Gamma x)");
test("Im(x)", "(Im x)");
test("erfinv(x)", "(InverseErf x)");
test("erfcinv(x)", "(InverseErfc x)");
test("li(x)", "(Li x)");
test("ln(x)", "(Ln x)");
test("lnGamma(x)", "(LnGamma x)");
test("lnΓ(x)", "(LnGamma x)");
test("-x", "(Neg x)"); // hyphen-minus
test("−x", "(Neg x)"); // minus sign
test("Re(x)", "(Re x)");
test("Shi(x)", "(Shi x)");
test("Si(x)", "(Si x)");
test("sgn(x)", "(Sign x)");
test("sin(x)", "(Sin x)");
test("sinc(x)", "(Sinc x)");
test("sinh(x)", "(Sinh x)");
test("sqrt(x)", "(Sqrt x)");
test("tan(x)", "(Tan x)");
test("tanh(x)", "(Tanh x)");
test("zeta(x)", "(Zeta x)");
test("ζ(x)", "(Zeta x)");
test("atan2(y, x)", "(Atan2 y x)");
test("I(n, x)", "(BesselI n x)");
test("J(n, x)", "(BesselJ n x)");
test("K(n, x)", "(BesselK n x)");
test("Y(n, x)", "(BesselY n x)");
test("Gamma(a, x)", "(GammaInc a x)");
test("Γ(a, x)", "(GammaInc a x)");
test("W(x)", "(LambertW 0 x)");
test("W(k, x)", "(LambertW k x)");
test("log(b, x)", "(Log b x)");
test("mod(x, y)", "(Mod x y)");
test("gcd(x, y, z)", "(Gcd (Gcd x y) z)");
test("lcm(x, y, z)", "(Lcm (Lcm x y) z)");
test("max(x, y, z)", "(Max (Max x y) z)");
test("min(x, y, z)", "(Min (Min x y) z)");
test("if(x = 0, y, z)", "(IfThenElse (Boole (Eq x 0)) y z)");
test("rankedMax([x, y, z], k)", "(RankedMax (List x y z) k)");
test("rankedMin([x, y, z], k)", "(RankedMin (List x y z) k)");
test("x ^ y ^ z", "(Pow x (Pow y z))");
test("-x ^ -y", "(Neg (Pow x (Neg y)))");
test("x ^^ y ^^ z", "(PowRational x (PowRational y z))");
test("-x ^^ -y", "(Neg (PowRational x (Neg y)))");
test("+x", "x");
test("2x", "(Mul 2 x)");
test("x y z", "(Mul (Mul x y) z)");
test("x * y * z", "(Mul (Mul x y) z)");
test("x / y / z", "(Div (Div x y) z)");
test("x + y + z", "(Add (Add x y) z)");
test("x - y - z", "(Sub (Sub x y) z)"); // hyphen-minus
test("x − y − z", "(Sub (Sub x y) z)"); // minus sign
test("x + y z", "(Add x (Mul y z))");
test("(x + y) z", "(Mul (Add x y) z)");
test("x = y", "(Eq x y)");
test("x >= y", "(Ge x y)");
test("x ≥ y", "(Ge x y)");
test("x > y", "(Gt x y)");
test("x <= y", "(Le x y)");
test("x ≤ y", "(Le x y)");
test("x < y", "(Lt x y)");
test("x = y = z", "(And (Eq x y) (Eq y z))");
test("!x", "(Not x)");
test("¬x", "(Not x)");
test("x && y", "(And x y)");
test("x ∧ y", "(And x y)");
test("x || y", "(Or x y)");
test("x ∨ y", "(Or x y)");
test("x = y && y = z", "(And (Eq x y) (Eq y z))");
test("x = y || y = z", "(Or (Eq x y) (Eq y z))");
test(
"x = y || y = z && z = x",
"(Or (Eq x y) (And (Eq y z) (Eq z x)))",
);
test(
"(x = y || y = z) && z = x",
"(And (Or (Eq x y) (Eq y z)) (Eq z x))",
);
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/region.rs | rust/src/region.rs | use inari::Interval;
/// The Cartesian product of two [`Interval`]s.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Region(Interval, Interval);
impl Region {
/// The empty region.
pub const EMPTY: Self = Self(Interval::EMPTY, Interval::EMPTY);
/// Creates a new [`Region`].
pub fn new(x: Interval, y: Interval) -> Self {
if x.is_empty() || y.is_empty() {
Self::EMPTY
} else {
Self(x, y)
}
}
/// Returns the convex hull of the regions.
pub fn convex_hull(&self, rhs: &Self) -> Self {
Self::new(self.0.convex_hull(rhs.0), self.1.convex_hull(rhs.1))
}
/// Returns the intersection of the regions.
pub fn intersection(&self, rhs: &Self) -> Self {
Self::new(self.0.intersection(rhs.0), self.1.intersection(rhs.1))
}
/// Returns `true` if the region is empty.
pub fn is_empty(&self) -> bool {
self.0.is_empty()
}
/// Returns `true` if `self` is a subset of `rhs`.
pub fn subset(&self, rhs: &Self) -> bool {
self.0.subset(rhs.0) && self.1.subset(rhs.1)
}
/// Returns the x component (the first factor) of the region, i.e., `{x ∣ (x, y) ∈ R}`.
pub fn x(&self) -> Interval {
self.0
}
/// Returns the y component (the second factor) of the region, i.e., `{y ∣ (x, y) ∈ R}`.
pub fn y(&self) -> Interval {
self.1
}
}
#[cfg(test)]
mod tests {
use super::*;
use inari::const_interval;
#[test]
fn region() {
let r = Region::new(Interval::EMPTY, Interval::EMPTY);
assert_eq!(r, Region::EMPTY);
assert!(r.is_empty());
assert!(r.subset(&r));
let r = Region::new(Interval::EMPTY, const_interval!(2.0, 4.0));
assert_eq!(r, Region::EMPTY);
assert!(r.is_empty());
let r = Region::new(const_interval!(2.0, 4.0), Interval::EMPTY);
assert_eq!(r, Region::EMPTY);
assert!(r.is_empty());
let r = Region::new(const_interval!(1.0, 4.0), const_interval!(2.0, 5.0));
let s = Region::new(const_interval!(2.0, 3.0), const_interval!(3.0, 4.0));
assert!(r.subset(&r));
assert!(Region::EMPTY.subset(&r));
assert!(!r.subset(&s));
assert!(s.subset(&r));
let r = Region::new(const_interval!(1.0, 3.0), const_interval!(2.0, 4.0));
let s = Region::new(const_interval!(2.0, 4.0), const_interval!(3.0, 5.0));
assert_eq!(
r.convex_hull(&s),
Region::new(const_interval!(1.0, 4.0), const_interval!(2.0, 5.0))
);
assert_eq!(
r.intersection(&s),
Region::new(const_interval!(2.0, 3.0), const_interval!(3.0, 4.0))
);
assert!(!r.subset(&s));
assert!(!s.subset(&r));
let r = Region::new(const_interval!(1.0, 2.0), const_interval!(2.0, 3.0));
let s = Region::new(const_interval!(3.0, 4.0), const_interval!(4.0, 5.0));
assert_eq!(
r.convex_hull(&s),
Region::new(const_interval!(1.0, 4.0), const_interval!(2.0, 5.0))
);
assert!(r.intersection(&s).is_empty());
assert!(!r.subset(&s));
assert!(!s.subset(&r));
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/image.rs | rust/src/image.rs | use crate::traits::BytesAllocated;
use std::{
ops::{Index, IndexMut},
slice::{Iter, IterMut},
};
/// A two-dimensional image with a generic pixel type.
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct Image<T: Clone + Copy + Default> {
width: u32,
height: u32,
data: Vec<T>,
}
impl<T: Clone + Copy + Default> Image<T> {
/// The maximum limit of the width/height of an [`Image`] in pixels.
pub const MAX_WIDTH: u32 = 32768;
/// Creates a new [`Image`] with all pixels set to the default value of the type.
pub fn new(width: u32, height: u32) -> Self {
assert!(width > 0 && width <= Self::MAX_WIDTH && height > 0 && height <= Self::MAX_WIDTH);
Self {
width,
height,
data: vec![T::default(); height as usize * width as usize],
}
}
/// Returns the height of the image in pixels.
pub fn height(&self) -> u32 {
self.height
}
/// Returns an iterator over the references to the pixels of the image
/// in the lexicographical order of `(y, x)`.
pub fn pixels(&self) -> Iter<'_, T> {
self.data.iter()
}
/// Returns an iterator over the mutable references to the pixels of the image
/// in the lexicographical order of `(y, x)`.
pub fn pixels_mut(&mut self) -> IterMut<'_, T> {
self.data.iter_mut()
}
/// Returns the width of the image in pixels.
pub fn width(&self) -> u32 {
self.width
}
/// Returns the flattened index of the pixel.
fn index(&self, p: PixelIndex) -> usize {
p.y as usize * self.width as usize + p.x as usize
}
}
impl<T: Clone + Copy + Default> Index<PixelIndex> for Image<T> {
type Output = T;
fn index(&self, index: PixelIndex) -> &Self::Output {
&self.data[self.index(index)]
}
}
impl<T: Clone + Copy + Default> IndexMut<PixelIndex> for Image<T> {
fn index_mut(&mut self, index: PixelIndex) -> &mut Self::Output {
let i = self.index(index);
&mut self.data[i]
}
}
impl<T: Clone + Copy + Default> BytesAllocated for Image<T> {
fn bytes_allocated(&self) -> usize {
self.data.bytes_allocated()
}
}
/// The index of a pixel of an [`Image`].
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct PixelIndex {
/// The horizontal index of the pixel.
pub x: u32,
/// The vertical index of the pixel.
pub y: u32,
}
impl PixelIndex {
/// Creates a new [`PixelIndex`].
pub fn new(x: u32, y: u32) -> Self {
Self { x, y }
}
}
/// A rectangular region of an [`Image`].
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct PixelRange {
begin: PixelIndex,
end: PixelIndex,
}
impl PixelRange {
pub const EMPTY: Self = Self {
begin: PixelIndex { x: 0, y: 0 },
end: PixelIndex { x: 0, y: 0 },
};
/// Creates a new [`PixelRange`] that spans pixels within
/// `begin.x ≤ x < end.x` and `begin.y ≤ y < end.y`.
pub fn new(begin: PixelIndex, end: PixelIndex) -> Self {
assert!(begin.x <= end.x && begin.y <= end.y);
if begin.x == end.x || begin.y == end.y {
Self::EMPTY
} else {
Self { begin, end }
}
}
/// Returns an iterator over the pixels in the region.
pub fn iter(&self) -> PixelIter<'_> {
self.into_iter()
}
}
impl<'a> IntoIterator for &'a PixelRange {
type Item = PixelIndex;
type IntoIter = PixelIter<'a>;
fn into_iter(self) -> Self::IntoIter {
PixelIter {
region: self,
p: self.begin,
}
}
}
/// An iterator that iterates over the pixels of an [`Image`].
pub struct PixelIter<'a> {
region: &'a PixelRange,
p: PixelIndex,
}
impl<'a> Iterator for PixelIter<'a> {
type Item = PixelIndex;
fn next(&mut self) -> Option<Self::Item> {
let p = self.p;
if p.y == self.region.end.y {
return None;
}
let mut x = p.x + 1;
let mut y = p.y;
if x == self.region.end.x {
x = self.region.begin.x;
y += 1;
}
self.p = PixelIndex::new(x, y);
Some(p)
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn image() {
let mut im = Image::<i32>::new(34, 45);
let p = PixelIndex::new(12, 23);
assert_eq!(im.width(), 34);
assert_eq!(im.height(), 45);
assert_eq!(im[p], 0);
im[p] = 123456;
assert_eq!(im[p], 123456);
assert_eq!(
im.pixels().copied().nth((p.y * im.width() + p.x) as usize),
Some(123456)
);
}
#[test]
fn pixel_range() {
let r = PixelRange::new(PixelIndex::new(1, 2), PixelIndex::new(1, 2));
let mut iter = r.iter();
assert_eq!(iter.next(), None);
let r = PixelRange::new(PixelIndex::new(1, 2), PixelIndex::new(4, 2));
let mut iter = r.iter();
assert_eq!(iter.next(), None);
let r = PixelRange::new(PixelIndex::new(1, 2), PixelIndex::new(1, 8));
let mut iter = r.iter();
assert_eq!(iter.next(), None);
let r = PixelRange::new(PixelIndex::new(1, 2), PixelIndex::new(4, 8));
let mut iter = r.iter();
for y in 2..8 {
for x in 1..4 {
assert_eq!(iter.next(), Some(PixelIndex::new(x, y)));
}
}
assert_eq!(iter.next(), None);
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/interval_set.rs | rust/src/interval_set.rs | use crate::{
geom::{Transform, TransformInPlace, Transformation1D},
traits::BytesAllocated,
};
use bitflags::*;
use inari::{DecInterval, Decoration, Interval};
use smallvec::SmallVec;
use std::{
convert::From,
hash::{Hash, Hasher},
iter::{Extend, FromIterator},
mem::transmute,
slice::Iter,
};
/// A branch cut site.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Site(u8);
impl Site {
pub const MAX: u8 = 31;
pub fn new(site: u8) -> Self {
assert!(site <= Self::MAX);
Self(site)
}
}
/// A branch index.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct Branch(u8);
impl Branch {
pub const MAX: u8 = 1;
pub fn new(branch: u8) -> Self {
assert!(branch <= Self::MAX);
Self(branch)
}
}
/// A partial function from the set of branch cut sites to the set of branch indices.
///
/// For example, `BranchMap { cut: 0b00101110, chosen: 0b00001010 }`
/// represents a function `{1 ↦ 1, 2 ↦ 0, 3 ↦ 1, 5 ↦ 0}`.
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub struct BranchMap {
/// A bit field that keeps track of at which sites
/// branch cuts have been performed during evaluation.
cut: u32,
/// A bit field that records the branch chosen (0 or 1)
/// at each site, when the corresponding bit of `cut` is set.
chosen: u32,
}
impl BranchMap {
/// Creates an empty [`BranchMap`].
pub fn new() -> Self {
Self { cut: 0, chosen: 0 }
}
/// Creates a [`BranchMap`] defined by `self ∪ {site ↦ branch}`.
///
/// Panics if `site ∈ dom(self)`.
pub fn inserted(self, site: Site, branch: Branch) -> Self {
assert!(self.cut & (1 << site.0) == 0);
Self {
cut: self.cut | (1 << site.0),
chosen: self.chosen | ((branch.0 as u32) << site.0),
}
}
/// Returns `self ∪ rhs` if `self` and `rhs` are compatible, i.e., they satisfy
/// `∀x ∈ dom(self) ∩ dom(rhs) : self(x) = rhs(x)`; otherwise, [`None`].
pub fn union(self, rhs: Self) -> Option<Self> {
let mask = self.cut & rhs.cut;
let compatible = self.chosen & mask == rhs.chosen & mask;
if compatible {
Some(Self {
cut: self.cut | rhs.cut,
chosen: self.chosen | rhs.chosen,
})
} else {
None
}
}
}
impl Default for BranchMap {
fn default() -> Self {
Self::new()
}
}
// Used for type punning. The layout must be exactly the same as `DecInterval`.
#[repr(C)]
struct _DecInterval {
x: Interval,
d: Decoration,
}
/// An interval augmented with properties that are required by Tupper interval arithmetic.
///
/// The decoration system is used instead of the Tupper IA's interval properties: `def` and `cont`.
/// For a nonempty interval, the relationship between them is:
///
/// | Decoration | `def` | `cont` |
/// | ------------ | -------------- | ---------------------- |
/// | `Com`, `Dac` | [T, T] | [T, T] |
/// | `Def` | [T, T] | [F, F], [F, T] |
/// | `Trv` | [F, F], [F, T] | [F, F], [F, T], [T, T] |
///
/// Tupper IA primarily works with sets of intervals.
/// The empty set is represented by the empty set of intervals, instead of the empty interval.
///
/// The interval and the decoration are stored directly rather than through [`DecInterval`]
/// to reduce the size of the struct to 32 bytes from 48, which is due to the alignment.
#[derive(Clone, Copy, Debug)]
pub struct TupperInterval {
pub x: Interval,
pub d: Decoration,
pub g: BranchMap,
}
impl TupperInterval {
/// Creates a new [`TupperInterval`] with the given [`DecInterval`] and [`BranchMap`].
///
/// Panics if the interval is NaI.
pub fn new(x: DecInterval, g: BranchMap) -> Self {
assert!(!x.is_nai());
let x = unsafe { transmute::<inari::DecInterval, _DecInterval>(x) };
Self { x: x.x, d: x.d, g }
}
/// Returns the [`DecInterval`] part of the interval.
pub fn dec_interval(self) -> DecInterval {
unsafe {
transmute(_DecInterval {
x: self.x,
d: self.d,
})
}
}
}
impl From<DecInterval> for TupperInterval {
fn from(x: DecInterval) -> Self {
Self::new(x, BranchMap::new())
}
}
type TupperIntervalVecBackingArray = [TupperInterval; 2];
type TupperIntervalVec = SmallVec<TupperIntervalVecBackingArray>;
/// A set of [`TupperInterval`]s.
///
/// Notes on the traits [`PartialEq`], [`Eq`] and [`Hash`]:
///
/// - Unlike [`DecInterval`], the traits distinguish interval sets with different decorations.
///
/// - The traits are sensitive to the order by which the intervals have been inserted.
/// To compare interval sets, you first need to call `normalize(true)` on them.
#[derive(Clone, Debug)]
pub struct TupperIntervalSet {
xs: TupperIntervalVec,
/// The decoration of the interval set.
///
/// The same decoration is also stored in each interval.
/// However, this is the only place where we can keep track of the decoration [`Decoration::Trv`]
/// if the first intervals being inserted are empty, since they will not be stored in `xs`.
d: Decoration,
}
impl TupperIntervalSet {
/// The maximum number of intervals left after normalization.
///
/// See [`Self::normalize`].
pub const MAX_INTERVALS: usize = 16;
/// Creates an empty [`TupperIntervalSet`].
pub fn new() -> Self {
Self {
xs: TupperIntervalVec::new(),
d: Decoration::Com,
}
}
/// Returns the decoration of the interval set.
pub fn decoration(&self) -> Decoration {
if self.is_empty() {
Decoration::Trv
} else {
self.d
}
}
/// Inserts an interval to the set and weakens the decoration of the intervals
/// if the interval being inserted has a weaker decoration than that.
pub fn insert(&mut self, x: TupperInterval) {
if !x.x.is_empty() {
self.xs.push(x);
}
self.d = self.d.min(x.d);
for x in self.xs.iter_mut() {
x.d = self.d;
}
}
/// Returns `true` if the set is empty.
pub fn is_empty(&self) -> bool {
self.xs.is_empty()
}
/// Returns an iterator over the intervals.
pub fn iter(&self) -> Iter<'_, TupperInterval> {
self.xs.iter()
}
/// Returns the number of intervals in the set.
pub fn len(&self) -> usize {
self.xs.len()
}
/// Sorts the intervals in a consistent order, and merges overlapping ones
/// which share the same branch map. If there are more than [`Self::MAX_INTERVALS`] intervals,
/// merges all of them regardless of the branch maps by taking the convex hull,
/// leaving exactly one interval.
///
/// It does nothing when the set is small enough and `force` is `false`.
pub fn normalize(&mut self, force: bool) {
let xs = &mut self.xs;
if !force && !xs.spilled() || xs.is_empty() {
return;
}
xs.sort_by(|x, y| {
(x.g.cut.cmp(&y.g.cut))
.then(x.g.chosen.cmp(&y.g.chosen))
.then(x.x.inf().partial_cmp(&y.x.inf()).unwrap())
});
let mut hull = Interval::EMPTY;
let mut g = BranchMap::new();
let mut write: usize = 0;
for read in 0..xs.len() {
let x = xs[read];
if x.g == g && !x.x.disjoint(hull) {
hull = hull.convex_hull(x.x);
} else {
if !hull.is_empty() {
xs[write] = TupperInterval::new(DecInterval::set_dec(hull, self.d), g);
write += 1;
}
hull = x.x;
g = x.g;
}
}
if !hull.is_empty() {
xs[write] = TupperInterval::new(DecInterval::set_dec(hull, self.d), g);
write += 1;
}
xs.truncate(write);
if xs.len() > Self::MAX_INTERVALS {
let hull = xs
.drain(..)
.map(|x| x.x)
.reduce(|acc, x| acc.convex_hull(x))
.unwrap();
xs.push(DecInterval::set_dec(hull, self.d).into());
}
xs.shrink_to_fit();
}
/// Returns the only [`f64`] number in the set if `self` contains exactly one interval
/// which is a singleton and has a decoration ≥ [`Decoration::Def`]; otherwise, [`None`].
/// Zero is returned as `+0.0`.
///
/// If a [`f64`] number is obtained, that is the exact value of the evaluated expression.
pub fn to_f64(&self) -> Option<f64> {
if self.len() != 1 {
return None;
}
let x = self.xs[0].x;
if x.is_singleton() && self.d >= Decoration::Def {
// Use `sup` instead of `inf` to return +0.0, which is more suitable for formatting.
Some(x.sup())
} else {
None
}
}
}
impl PartialEq for TupperIntervalSet {
fn eq(&self, rhs: &Self) -> bool {
self.len() == rhs.len()
&& self
.iter()
.zip(rhs.iter())
.all(|(x, y)| x.x == y.x && x.g == y.g)
&& self.decoration() == rhs.decoration()
}
}
impl Eq for TupperIntervalSet {}
impl Hash for TupperIntervalSet {
fn hash<H: Hasher>(&self, state: &mut H) {
for x in self.iter() {
x.x.hash(state);
x.g.hash(state);
}
self.decoration().hash(state);
}
}
impl Default for TupperIntervalSet {
fn default() -> Self {
Self::new()
}
}
impl Extend<TupperInterval> for TupperIntervalSet {
fn extend<T: IntoIterator<Item = TupperInterval>>(&mut self, iter: T) {
for x in iter {
self.insert(x);
}
}
}
impl<'a> Extend<&'a TupperInterval> for TupperIntervalSet {
fn extend<T: IntoIterator<Item = &'a TupperInterval>>(&mut self, iter: T) {
for x in iter {
self.insert(*x);
}
}
}
impl From<DecInterval> for TupperIntervalSet {
fn from(x: DecInterval) -> Self {
let mut xs = Self::new();
xs.insert(x.into());
xs
}
}
impl From<TupperInterval> for TupperIntervalSet {
fn from(x: TupperInterval) -> Self {
let mut xs = Self::new();
xs.insert(x);
xs
}
}
impl FromIterator<TupperInterval> for TupperIntervalSet {
fn from_iter<T: IntoIterator<Item = TupperInterval>>(iter: T) -> Self {
let mut xs = Self::new();
xs.extend(iter);
xs
}
}
impl IntoIterator for TupperIntervalSet {
type Item = TupperInterval;
type IntoIter = smallvec::IntoIter<TupperIntervalVecBackingArray>;
fn into_iter(self) -> Self::IntoIter {
self.xs.into_iter()
}
}
impl<'a> IntoIterator for &'a TupperIntervalSet {
type Item = &'a TupperInterval;
type IntoIter = Iter<'a, TupperInterval>;
fn into_iter(self) -> Self::IntoIter {
self.xs.iter()
}
}
impl BytesAllocated for TupperIntervalSet {
fn bytes_allocated(&self) -> usize {
self.xs.bytes_allocated()
}
}
impl TransformInPlace<Transformation1D> for TupperIntervalSet {
fn transform_in_place(&mut self, t: &Transformation1D) {
self.d = self.d.min(Decoration::Dac);
for x in &mut self.xs {
x.x = x.x.transform(t);
x.d = self.d;
}
}
}
bitflags! {
/// A set of signs; a subset of {−, 0, +}.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct SignSet: u8 {
const NEG = 1;
const ZERO = 2;
const POS = 4;
}
}
/// A pair of [`SignSet`] and [`Decoration`].
///
/// It is used as a compact version of [`DecInterval`] when only the sign of the interval
/// is of interest.
#[derive(Clone, Copy, Debug)]
pub struct DecSignSet(pub SignSet, pub Decoration);
#[cfg(test)]
mod tests {
use super::*;
use inari::{const_dec_interval, const_interval};
#[test]
fn decoration() {
use Decoration::*;
let mut xs = TupperIntervalSet::new();
assert_eq!(xs.decoration(), Trv);
xs.insert(TupperInterval::from(const_dec_interval!(0.0, 0.0)));
assert_eq!(xs.decoration(), Com);
xs.insert(TupperInterval::from(DecInterval::set_dec(
const_interval!(0.0, 0.0),
Def,
)));
assert_eq!(xs.decoration(), Def);
let mut xs = TupperIntervalSet::new();
assert_eq!(xs.decoration(), Trv);
xs.insert(TupperInterval::from(DecInterval::EMPTY));
assert_eq!(xs.decoration(), Trv);
xs.insert(TupperInterval::from(const_dec_interval!(0.0, 0.0)));
assert_eq!(xs.decoration(), Trv);
}
#[test]
fn normalize() {
fn test(input: Vec<TupperInterval>, output: Vec<TupperInterval>) {
let mut input = input.into_iter().collect::<TupperIntervalSet>();
let output = output.into_iter().collect::<TupperIntervalSet>();
input.normalize(true);
assert_eq!(input, output);
}
macro_rules! i {
($a:expr, $b:expr) => {
const_dec_interval!($a, $b)
};
}
let g = BranchMap::new().inserted(Site::new(0), Branch::new(0));
let g2 = BranchMap::new().inserted(Site::new(0), Branch::new(1));
test(vec![], vec![]);
test(
vec![
TupperInterval::new(i!(0.0, 2.0), g),
TupperInterval::new(i!(1.0, 3.0), g),
],
vec![TupperInterval::new(i!(0.0, 3.0), g)],
);
test(
vec![
TupperInterval::new(i!(1.0, 3.0), g),
TupperInterval::new(i!(0.0, 2.0), g),
],
vec![TupperInterval::new(i!(0.0, 3.0), g)],
);
// Non-overlapping intervals are not merged.
test(
vec![
TupperInterval::new(i!(0.0, 1.0), g),
TupperInterval::new(i!(2.0, 3.0), g),
],
vec![
TupperInterval::new(i!(0.0, 1.0), g),
TupperInterval::new(i!(2.0, 3.0), g),
],
);
// Intervals with different branch maps are not merged.
test(
vec![
TupperInterval::new(i!(0.0, 2.0), g),
TupperInterval::new(i!(1.0, 3.0), g2),
],
vec![
TupperInterval::new(i!(0.0, 2.0), g),
TupperInterval::new(i!(1.0, 3.0), g2),
],
);
test(
vec![
TupperInterval::new(i!(0.0, 2.0), g),
TupperInterval::new(i!(1.0, 3.0), g2),
TupperInterval::new(i!(2.0, 4.0), g),
],
vec![
TupperInterval::new(i!(0.0, 4.0), g),
TupperInterval::new(i!(1.0, 3.0), g2),
],
);
}
#[test]
fn struct_size() {
assert_eq!(size_of::<TupperIntervalSet>(), 112);
assert_eq!(size_of::<Option<TupperIntervalSet>>(), 112);
}
#[test]
fn to_f64() {
let xs = TupperIntervalSet::new();
assert_eq!(xs.to_f64(), None);
for d in [Decoration::Com, Decoration::Dac, Decoration::Def] {
let mut xs = TupperIntervalSet::new();
xs.insert(TupperInterval::from(DecInterval::set_dec(
const_interval!(0.1, 0.1),
d,
)));
assert_eq!(xs.to_f64(), Some(0.1));
}
let mut xs = TupperIntervalSet::new();
xs.insert(TupperInterval::from(DecInterval::set_dec(
const_interval!(0.1, 0.1),
Decoration::Trv,
)));
assert_eq!(xs.to_f64(), None);
let mut xs = TupperIntervalSet::new();
xs.insert(TupperInterval::from(DecInterval::PI));
assert_eq!(xs.to_f64(), None);
// The sign bit of 0.0 is positive.
let mut xs = TupperIntervalSet::new();
xs.insert(TupperInterval::from(const_dec_interval!(0.0, 0.0)));
assert_eq!(xs.to_f64(), Some(0.0));
if let Some(zero) = xs.to_f64() {
assert!(zero.is_sign_positive());
}
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/relation.rs | rust/src/relation.rs | use crate::{
ast::{BinaryOp, ExplicitRelOp, Expr, NaryOp, TernaryOp, UnaryOp, ValueType},
binary, bool_constant, constant,
context::Context,
error,
eval_cache::{EvalExplicitCache, EvalImplicitCache, EvalParametricCache, MaximalTermCache},
eval_result::{EvalArgs, EvalExplicitResult, EvalParametricResult, EvalResult},
geom::{TransformInPlace, Transformation1D},
interval_set::TupperIntervalSet,
nary,
ops::{OptionalValueStore, StaticForm, StaticFormKind, StaticTerm, StaticTermKind, StoreIndex},
parse::{format_error, parse_expr},
pown, rational_ops,
real::{Real, RealUnit},
rootn, ternary, unary, uninit, var, vars,
vars::{VarIndex, VarSet, VarType},
visit::*,
};
use inari::{const_dec_interval, const_interval, interval, DecInterval, Decoration, Interval};
use itertools::Itertools;
use rug::{Integer, Rational};
use std::{collections::HashMap, mem::take, str::FromStr, vec};
/// The type of a [`Relation`], which decides the graphing algorithm to be used.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum RelationType {
/// The relation is of the form y op f(x) ∧ P(x), where P(x) is an optional constraint on x.
ExplicitFunctionOfX(ExplicitRelOp),
/// The relation is of the form x op f(y) ∧ P(y), where P(y) is an optional constraint on y.
ExplicitFunctionOfY(ExplicitRelOp),
/// The relation is of a general form.
Implicit,
/// The relation is of the form x = f(m, n, t) ∧ y = g(m, n, t) ∧ P(m, n, t),
/// where P(m, n, t) is an optional constraint on the parameters.
Parametric,
}
#[derive(Clone, Debug)]
pub struct VarIndices {
pub m: Option<VarIndex>,
pub n: Option<VarIndex>,
pub n_theta: Option<VarIndex>,
pub t: Option<VarIndex>,
pub x: Option<VarIndex>,
pub y: Option<VarIndex>,
}
/// A mathematical relation whose graph is to be plotted.
#[derive(Clone, Debug)]
pub struct Relation {
ast: Expr,
terms: Vec<StaticTerm>,
forms: Vec<StaticForm>,
n_atom_forms: usize,
ts: OptionalValueStore<TupperIntervalSet>,
eval_count: usize,
x_explicit: Option<StoreIndex>,
y_explicit: Option<StoreIndex>,
term_to_eval: Vec<bool>,
m_range: Interval,
n_range: Interval,
n_theta_range: Interval,
t_range: Interval,
relation_type: RelationType,
vars: VarSet,
vars_ordered: Vec<VarSet>,
var_indices: VarIndices,
}
impl Relation {
/// Returns the processed AST of the relation.
pub fn ast(&self) -> &Expr {
&self.ast
}
/// Creates a new [`Vec<Interval>`] with all elements initialized to [`Interval::ENTIRE`].
pub fn create_args(&self) -> Vec<Interval> {
vec![Interval::ENTIRE; self.vars_ordered.len()]
}
/// Returns the total number of times either of the functions [`Self::eval_implicit`],
/// [`Self::eval_explicit`] or [`Self::eval_parametric`] is called for `self`.
pub fn eval_count(&self) -> usize {
self.eval_count
}
/// Evaluates the explicit relation y = f(x) ∧ P(x) (or x = f(y) ∧ P(y))
/// and returns f'(x) (or f'(y)), where f'(x) is the value of the conditional expression
/// if(P(x), f(x), ¿) transformed by `ty`.
///
/// If P(x) is absent, it is assumed to be always true.
///
/// f'(x) is forced normalized.
///
/// Precondition: `cache` has never been passed to other relations.
pub fn eval_explicit<'a>(
&mut self,
args: &EvalArgs,
ty: &Transformation1D,
cache: &'a mut EvalExplicitCache,
) -> &'a EvalExplicitResult {
assert!(matches!(
self.relation_type,
RelationType::ExplicitFunctionOfX(_) | RelationType::ExplicitFunctionOfY(_)
));
self.eval_count += 1;
cache.setup(&self.terms, &self.vars_ordered);
cache.full.get_or_insert_with(args, || {
self.eval(args, &mut cache.univariate);
let mut ys = match self.relation_type {
RelationType::ExplicitFunctionOfX(_) => {
self.ts.get(self.y_explicit.unwrap()).unwrap().clone()
}
RelationType::ExplicitFunctionOfY(_) => {
self.ts.get(self.x_explicit.unwrap()).unwrap().clone()
}
_ => unreachable!(),
};
ys.normalize(true);
ys.transform_in_place(ty);
ys
})
}
/// Evaluates the implicit relation.
///
/// Precondition: `cache` has never been passed to other relations.
pub fn eval_implicit<'a>(
&mut self,
args: &EvalArgs,
cache: &'a mut EvalImplicitCache,
) -> &'a EvalResult {
assert_eq!(self.relation_type, RelationType::Implicit);
self.eval_count += 1;
cache.setup(&self.terms, &self.vars_ordered);
cache
.full
.get_or_insert_with(args, || self.eval(args, &mut cache.univariate))
}
/// Evaluates the parametric relation x = f(m, n, t) ∧ y = g(m, n, t) ∧ P(m, n, t)
/// and returns (f'(…), g'(…)), where f'(…) and g'(…) are the values of the conditional expressions
/// if(P(…), f(…), ¿) and if(P(…), g(…), ¿) transformed by `tx` and `ty`, respectively.
///
/// If P(…) is absent, it is assumed to be always true.
///
/// f'(…) and g'(…) are forced normalized.
///
/// Precondition: `cache` has never been passed to other relations.
pub fn eval_parametric<'a>(
&mut self,
args: &EvalArgs,
tx: &Transformation1D,
ty: &Transformation1D,
cache: &'a mut EvalParametricCache,
) -> &'a EvalParametricResult {
assert_eq!(self.relation_type, RelationType::Parametric);
self.eval_count += 1;
cache.setup(&self.terms, &self.vars_ordered);
cache.full.get_or_insert_with(args, || {
self.eval(args, &mut cache.univariate);
let mut xs = self.ts.get(self.x_explicit.unwrap()).unwrap().clone();
let mut ys = self.ts.get(self.y_explicit.unwrap()).unwrap().clone();
xs.normalize(true);
ys.normalize(true);
xs.transform_in_place(tx);
ys.transform_in_place(ty);
(xs, ys)
})
}
pub fn forms(&self) -> &Vec<StaticForm> {
&self.forms
}
/// Returns the range of the parameter m that needs to be covered to plot the graph of the relation.
///
/// Each of the bounds is either an integer or ±∞.
pub fn m_range(&self) -> Interval {
self.m_range
}
/// Returns the range of the parameter n that needs to be covered to plot the graph of the relation.
///
/// Each of the bounds is either an integer or ±∞.
pub fn n_range(&self) -> Interval {
self.n_range
}
/// Returns the range of the parameter n_θ that needs to be covered to plot the graph of the relation.
///
/// Each of the bounds is either an integer or ±∞.
pub fn n_theta_range(&self) -> Interval {
self.n_theta_range
}
/// Returns the type of the relation.
pub fn relation_type(&self) -> RelationType {
self.relation_type
}
/// Returns the range of the parameter t that needs to be covered to plot the graph of the relation.
pub fn t_range(&self) -> Interval {
self.t_range
}
/// Returns the set of variables in the relation.
/// For an explicit or parametric relation, the variables x and y are excluded from the set.
pub fn vars(&self) -> VarSet {
self.vars
}
/// Returns the indices of the variables in the relation
/// that is used for indexing in [`EvalArgs`].
pub fn var_indices(&self) -> &VarIndices {
&self.var_indices
}
fn eval(
&mut self,
args: &EvalArgs,
univariate_caches: &mut [MaximalTermCache<1>],
) -> EvalResult {
let ts = &mut self.ts;
for t in &self.terms {
if !t.vars.is_empty() {
ts.remove(t.store_index);
}
}
for cache in univariate_caches.iter_mut() {
cache.restore(args, ts);
}
for t in &self.terms {
let to_eval = self.term_to_eval[t.store_index.get()];
match t.kind {
StaticTermKind::Var(i, ty) => {
let x = args[i as usize];
let d = if ty == VarType::Integer && !x.is_singleton() {
Decoration::Def
} else {
Decoration::Com
};
t.put(ts, DecInterval::set_dec(x, d).into());
}
_ if to_eval => t.put_eval(&self.terms[..], ts),
_ => (),
}
}
let r = EvalResult(
self.forms[..self.n_atom_forms]
.iter()
.map(|f| f.eval(ts))
.collect(),
);
for cache in univariate_caches.iter_mut() {
cache.store(args, ts);
}
r
}
fn initialize(&mut self) {
for t in &self.terms {
self.ts.remove(t.store_index);
// This condition is different from `let StaticTermKind::Constant(_) = t.kind`,
// as not all constant expressions are folded. See the comment on [`FoldConstant`].
if t.vars.is_empty() {
t.put_eval(&self.terms[..], &mut self.ts);
}
}
}
}
impl FromStr for Relation {
type Err = String;
fn from_str(s: &str) -> Result<Self, String> {
let mut e = parse_expr(s, &[Context::builtin()])?;
UpdateMetadata.visit_expr_mut(&mut e);
if let Some(e) = find_unknown_type_expr(&e) {
return Err(format_error(
s,
e.source_range.clone(),
"cannot interpret the expression",
));
}
if e.ty != ValueType::Boolean {
return Err(format_error(
s,
e.source_range.clone(),
&format!(
"relation must be of type `{}` but not `{}`",
ValueType::Boolean,
e.ty
),
));
}
NormalizeNotExprs.visit_expr_mut(&mut e);
PreTransform.visit_expr_mut(&mut e);
expand_complex_functions(&mut e);
simplify(&mut e);
let mut param_ranges = ParamRanges::new();
param_ranges.refine_with(&e);
let relation_type = relation_type(&mut e);
NormalizeRelationalExprs.visit_expr_mut(&mut e);
ExpandBoole.visit_expr_mut(&mut e);
simplify(&mut e);
ModEqTransform.visit_expr_mut(&mut e);
simplify(&mut e);
let m_range = {
let period = function_period(&e, VarSet::M);
if let Some(period) = &period {
if let Some(q) = period.rational() {
if q.is_zero() {
const_interval!(0.0, 0.0)
} else {
interval!(&format!("[0,{}]", Integer::from(q.numer() - 1))).unwrap()
}
} else {
Interval::ENTIRE
}
} else {
Interval::ENTIRE
}
}
.intersection(param_ranges.m_range);
let n_range = {
let period = function_period(&e, VarSet::N);
if let Some(period) = &period {
if let Some(q) = period.rational() {
if q.is_zero() {
const_interval!(0.0, 0.0)
} else {
interval!(&format!("[0,{}]", Integer::from(q.numer() - 1))).unwrap()
}
} else {
Interval::ENTIRE
}
} else {
Interval::ENTIRE
}
}
.intersection(param_ranges.n_range);
let n_theta_range = {
let period = function_period(&e, VarSet::N_THETA);
if let Some(period) = &period {
let (q, unit) = period.rational_unit().unwrap();
if q.is_zero() {
const_interval!(0.0, 0.0)
} else {
match unit {
RealUnit::One => Interval::ENTIRE,
RealUnit::Pi => interval!(&format!(
"[0,{}]",
Integer::from(Rational::from(q / 2).numer() - 1)
))
.unwrap(),
}
}
} else {
Interval::ENTIRE
}
};
assert_eq!(n_theta_range.trunc(), n_theta_range);
let t_range = {
let period = function_period(&e, VarSet::T);
if let Some(period) = &period {
let (q, unit) = period.rational_unit().unwrap();
match unit {
RealUnit::One => interval!(&format!("[0,{}]", q)).unwrap(),
RealUnit::Pi => interval!(&format!("[0,{}]", q)).unwrap() * Interval::PI,
}
} else {
Interval::ENTIRE
}
}
.intersection(param_ranges.t_range);
expand_polar_coords(&mut e);
simplify(&mut e);
SubDivTransform.visit_expr_mut(&mut e);
simplify(&mut e);
PostTransform.visit_expr_mut(&mut e);
FuseMulAdd.visit_expr_mut(&mut e);
UpdateMetadata.visit_expr_mut(&mut e);
assert_eq!(e.ty, ValueType::Boolean);
let vars = match relation_type {
RelationType::ExplicitFunctionOfX(_) => e.vars.difference(VarSet::Y),
RelationType::ExplicitFunctionOfY(_) => e.vars.difference(VarSet::X),
RelationType::Parametric => e.vars.difference(VarSet::X | VarSet::Y),
_ => e.vars,
};
let vars_ordered = [
VarSet::M,
VarSet::N,
VarSet::N_THETA,
VarSet::T,
VarSet::X,
VarSet::Y,
]
.into_iter()
.filter(|&v| vars.contains(v))
.collect::<Vec<_>>();
let var_index = vars_ordered
.iter()
.enumerate()
.map(|(i, &v)| (v, i as VarIndex))
.collect::<HashMap<VarSet, VarIndex>>();
let mut v = AssignSite::new();
v.visit_expr_mut(&mut e);
let mut collect_real_exprs = CollectRealExprs::new(vars);
collect_real_exprs.visit_expr_mut(&mut e);
let collector = CollectStatic::new(v, collect_real_exprs, &var_index);
let terms = collector.terms.clone();
let forms = collector.forms.clone();
let n_terms = terms.len();
let n_atom_forms = forms
.iter()
.filter(|f| matches!(f.kind, StaticFormKind::Atomic(_, _)))
.count();
let mut v = FindExplicitRelation::new(&collector, VarSet::X);
v.visit_expr(&e);
let x_explicit = v.get();
let mut v = FindExplicitRelation::new(&collector, VarSet::Y);
v.visit_expr(&e);
let y_explicit = v.get();
let mut term_to_eval = vec![false; n_terms];
for f in &forms {
if let StaticFormKind::Atomic(_, i) = &f.kind {
term_to_eval[i.get()] = true;
}
}
if let Some(i) = x_explicit {
term_to_eval[i.get()] = true;
}
if let Some(i) = y_explicit {
term_to_eval[i.get()] = true;
}
let mut slf = Self {
ast: e,
terms,
forms,
n_atom_forms,
ts: OptionalValueStore::new(n_terms),
eval_count: 0,
x_explicit,
y_explicit,
term_to_eval,
m_range,
n_range,
n_theta_range,
t_range,
relation_type,
vars,
vars_ordered,
var_indices: VarIndices {
m: var_index.get(&VarSet::M).copied(),
n: var_index.get(&VarSet::N).copied(),
n_theta: var_index.get(&VarSet::N_THETA).copied(),
t: var_index.get(&VarSet::T).copied(),
x: var_index.get(&VarSet::X).copied(),
y: var_index.get(&VarSet::Y).copied(),
},
};
slf.initialize();
Ok(slf)
}
}
/// Transforms an expression that contains r or θ into the equivalent expression
/// that contains only x, y and n_θ. When the result contains n_θ,
/// it actually represents a disjunction of expressions indexed by n_θ.
///
/// Precondition: `e` has been pre-transformed and simplified.
fn expand_polar_coords(e: &mut Expr) {
use {BinaryOp::*, NaryOp::*, TernaryOp::*, UnaryOp::*};
let ctx = Context::builtin();
let x = ctx.get_constant("x").unwrap();
let y = ctx.get_constant("y").unwrap();
let minus_x = Expr::nary(Times, vec![Expr::minus_one(), x.clone()]);
let minus_y = Expr::nary(Times, vec![Expr::minus_one(), y.clone()]);
let hypot = Expr::binary(
Pow,
Expr::nary(
Plus,
vec![
Expr::binary(Pow, x.clone(), Expr::two()),
Expr::binary(Pow, y.clone(), Expr::two()),
],
),
Expr::one_half(),
);
let neg_hypot = Expr::nary(Times, vec![Expr::minus_one(), hypot.clone()]);
let atan2 = Expr::ternary(
IfThenElse,
// Restrict the domain to x > -ε |y| to reduce the computational cost.
Expr::unary(
BooleLtZero,
Expr::nary(
Plus,
vec![
minus_x.clone(),
Expr::nary(
Times,
vec![
Expr::constant(
const_dec_interval!(-6.103515625e-05, -6.103515625e-05).into(),
),
Expr::unary(Abs, y.clone()),
],
),
],
),
),
Expr::binary(Atan2, y.clone(), x.clone()),
Expr::undefined(),
);
let anti_atan2 = Expr::ternary(
IfThenElse,
// Restrict the domain to x < ε |y| to reduce the computational cost.
Expr::unary(
BooleLtZero,
Expr::nary(
Plus,
vec![
x.clone(),
Expr::nary(
Times,
vec![
Expr::constant(
const_dec_interval!(-6.103515625e-05, -6.103515625e-05).into(),
),
Expr::unary(Abs, y.clone()),
],
),
],
),
),
Expr::binary(Atan2, minus_y.clone(), minus_x.clone()),
Expr::undefined(),
);
let two_pi_n_theta = Expr::nary(
Times,
vec![Expr::tau(), ctx.get_constant("<n-theta>").unwrap()],
);
// e11 = e /. {r → sqrt(x^2 + y^2), θ → 2π n_θ + atan2(y, x)}.
let e11 = {
let mut e = e.clone();
let mut v = ReplaceAll::new(|e| match e {
var!(x) if x == "r" => Some(hypot.clone()),
var!(x) if x == "theta" => Some(Expr::nary(
Plus,
vec![two_pi_n_theta.clone(), atan2.clone()],
)),
_ => None,
});
v.visit_expr_mut(&mut e);
if !v.modified {
// `e` contains neither r nor θ.
return;
}
e
};
// e12 = e /. {r → sqrt(x^2 + y^2), θ → π + 2π n_θ + atan2(-y, -x)}.
let e12 = {
let mut e = e.clone();
let mut v = ReplaceAll::new(|e| match e {
var!(x) if x == "r" => Some(hypot.clone()),
var!(x) if x == "theta" => Some(Expr::nary(
Plus,
vec![Expr::pi(), two_pi_n_theta.clone(), anti_atan2.clone()],
)),
_ => None,
});
v.visit_expr_mut(&mut e);
e
};
// e21 = e /. {r → -sqrt(x^2 + y^2), θ → 2π n_θ + atan2(-y, -x)}.
let e21 = {
let mut e = e.clone();
let mut v = ReplaceAll::new(|e| match e {
var!(x) if x == "r" => Some(neg_hypot.clone()),
var!(x) if x == "theta" => Some(Expr::nary(
Plus,
vec![two_pi_n_theta.clone(), anti_atan2.clone()],
)),
_ => None,
});
v.visit_expr_mut(&mut e);
e
};
// e22 = e /. {r → -sqrt(x^2 + y^2), θ → π + 2π n_θ + atan2(y, x)}.
let e22 = {
let mut e = e.clone();
let mut v = ReplaceAll::new(|e| match e {
var!(x) if x == "r" => Some(neg_hypot.clone()),
var!(x) if x == "theta" => Some(Expr::nary(
Plus,
vec![Expr::pi(), two_pi_n_theta.clone(), atan2.clone()],
)),
_ => None,
});
v.visit_expr_mut(&mut e);
e
};
*e = Expr::nary(OrN, vec![e11, e12, e21, e22]);
}
/// Returns the period of a function of a variable t,
/// i.e., a real number p that satisfies (e /. t → t + p) = e.
/// If the period is 0, the expression is independent of the variable.
///
/// Precondition: `e` has been pre-transformed and simplified.
fn function_period(e: &Expr, variable: VarSet) -> Option<Real> {
use {BinaryOp::*, NaryOp::*, UnaryOp::*};
fn common_period(xp: Real, yp: Real) -> Option<Real> {
match (xp.rational_unit(), yp.rational_unit()) {
(Some((q, _)), _) if q.is_zero() => Some(yp),
(_, Some((r, _))) if r.is_zero() => Some(xp),
(Some((q, q_unit)), Some((r, r_unit))) if q_unit == r_unit => Some(Real::from((
rational_ops::lcm(q.clone(), r.clone()).unwrap(),
q_unit,
))),
_ => None,
}
}
fn generic_function_period(e: &Expr, variable: VarSet) -> Option<Real> {
match e {
bool_constant!(_) | constant!(_) => Some(Real::zero()),
x @ var!(_) if x.vars.contains(variable) => None,
var!(_) => Some(Real::zero()),
unary!(_, x) => function_period(x, variable),
binary!(_, x, y) => {
let xp = function_period(x, variable)?;
let yp = function_period(y, variable)?;
common_period(xp, yp)
}
ternary!(_, x, y, z) => {
let xp = function_period(x, variable)?;
let yp = function_period(y, variable)?;
let zp = function_period(z, variable)?;
common_period(common_period(xp, yp)?, zp)
}
nary!(_, xs) => xs
.iter()
.map(|x| function_period(x, variable))
.collect::<Vec<_>>()
.into_iter()
.try_fold(Real::zero(), |x, y| common_period(x, y?)),
pown!(_, _) | rootn!(_, _) | error!() | uninit!() => {
panic!("unexpected kind of expression")
}
}
}
match e {
unary!(op @ (Cos | Sin | Tan), x) if x.vars.contains(variable) => match x {
var!(_) => {
// op(t)
match op {
Tan => Some(Real::pi()),
_ => Some(Real::tau()),
}
}
nary!(Plus, xs) => match xs
.iter()
.filter(|x| x.vars.contains(variable))
.exactly_one()
{
Ok(var!(_)) => {
// op(… + t + …)
match op {
Tan => Some(Real::pi()),
_ => Some(Real::tau()),
}
}
Ok(nary!(Times, xs)) => match &xs[..] {
[constant!(a), var!(_)] => {
// op(… + a t + …)
if let Some((q, unit)) = a.rational_unit() {
let unit = match unit {
RealUnit::One => RealUnit::Pi,
RealUnit::Pi => RealUnit::One,
};
match op {
Tan => Some(Real::from((q.clone().recip(), unit))),
_ => Some(Real::from((2 * q.clone().recip(), unit))),
}
} else {
None
}
}
_ => generic_function_period(e, variable),
},
_ => generic_function_period(e, variable),
},
nary!(Times, xs) => match &xs[..] {
[constant!(a), var!(_)] => {
// op(a t)
if let Some((q, unit)) = a.rational_unit() {
let unit = match unit {
RealUnit::One => RealUnit::Pi,
RealUnit::Pi => RealUnit::One,
};
match op {
Tan => Some(Real::from((q.clone().recip(), unit))),
_ => Some(Real::from((2 * q.clone().recip(), unit))),
}
} else {
None
}
}
_ => generic_function_period(e, variable),
},
_ => generic_function_period(e, variable),
},
binary!(Mod, x, constant!(y))
if x.vars.contains(variable) && y.rational_unit().is_some() =>
{
let p = y.clone().abs();
match x {
var!(_) => {
// mod(t, y)
Some(p)
}
nary!(Plus, xs) => match xs
.iter()
.filter(|x| x.vars.contains(variable))
.exactly_one()
{
Ok(var!(_)) => {
// mod(… + t + …, y)
Some(p)
}
Ok(nary!(Times, xs)) => match &xs[..] {
[constant!(a), var!(_)] => {
// mod(… + a t + …, y)
match p / a.clone().abs() {
p if p.rational_unit().is_some() => Some(p),
_ => None,
}
}
_ => generic_function_period(e, variable),
},
_ => generic_function_period(e, variable),
},
nary!(Times, xs) => match &xs[..] {
[constant!(a), var!(_)] => {
// mod(a t, y)
match p / a.clone().abs() {
p if p.rational_unit().is_some() => Some(p),
_ => None,
}
}
_ => generic_function_period(e, variable),
},
_ => generic_function_period(x, variable),
}
}
_ => generic_function_period(e, variable),
}
}
struct ParamRanges {
m_range: Interval,
n_range: Interval,
t_range: Interval,
}
impl ParamRanges {
fn new() -> Self {
Self {
m_range: Interval::ENTIRE,
n_range: Interval::ENTIRE,
t_range: Interval::ENTIRE,
}
}
fn refine_with(&mut self, e: &Expr) {
use {BinaryOp::*, NaryOp::*};
match e {
binary!(Ge | Gt, var!(x), constant!(a)) | binary!(Le | Lt, constant!(a), var!(x)) => {
if let Some(r) = get_mut(self, x) {
let inf = a
.interval()
.iter()
.fold(f64::INFINITY, |acc, x| acc.min(x.x.inf()));
*r = r.intersection(interval!(inf, f64::INFINITY).unwrap_or(Interval::EMPTY));
}
}
binary!(Le | Lt, var!(x), constant!(a)) | binary!(Ge | Gt, constant!(a), var!(x)) => {
if let Some(r) = get_mut(self, x) {
let sup = a
.interval()
.iter()
.fold(f64::NEG_INFINITY, |acc, x| acc.max(x.x.sup()));
*r = r
.intersection(interval!(f64::NEG_INFINITY, sup).unwrap_or(Interval::EMPTY));
}
}
binary!(Eq, var!(x), constant!(a)) | binary!(Eq, constant!(a), var!(x)) => {
if let Some(r) = get_mut(self, x) {
*r = r.intersection(
a.interval()
.iter()
.fold(Interval::EMPTY, |acc, x| acc.convex_hull(x.x)),
);
}
}
nary!(AndN, es) => {
for e in es {
self.refine_with(e);
}
}
_ => return,
}
self.m_range = self.m_range.trunc();
self.n_range = self.n_range.trunc();
// Empty ranges are not supported.
const I_ZERO: Interval = const_interval!(0.0, 0.0);
if self.m_range.is_empty() {
self.m_range = I_ZERO;
}
if self.n_range.is_empty() {
self.n_range = I_ZERO;
}
if self.t_range.is_empty() {
self.t_range = I_ZERO;
}
fn get_mut<'a>(slf: &'a mut ParamRanges, param_name: &str) -> Option<&'a mut Interval> {
match param_name {
"m" => Some(&mut slf.m_range),
"n" => Some(&mut slf.n_range),
"t" => Some(&mut slf.t_range),
_ => None,
}
}
}
}
struct ExplicitRelationParts {
op: ExplicitRelOp,
y: Option<Expr>, // y op f(x)
px: Vec<Expr>, // P(x)
}
/// Tries to identify `e` as an explicit relation.
fn normalize_explicit_relation(
e: &mut Expr,
y_var: VarSet,
x_var: VarSet,
) -> Option<ExplicitRelOp> {
use {NaryOp::*, TernaryOp::*, UnaryOp::*};
let mut parts = ExplicitRelationParts {
op: ExplicitRelOp::Eq,
y: None,
px: vec![],
};
if !normalize_explicit_relation_impl(&mut e.clone(), &mut parts, y_var, x_var) {
return None;
}
if let Some(y) = &mut parts.y {
if !parts.px.is_empty() {
let cond = Expr::unary(Boole, Expr::nary(AndN, parts.px));
if let binary!(_, _, f) = y {
*f = Expr::ternary(IfThenElse, cond, take(f), Expr::undefined());
} else {
unreachable!();
}
}
*e = take(y);
Some(parts.op)
} else {
None
}
}
fn normalize_explicit_relation_impl(
e: &mut Expr,
parts: &mut ExplicitRelationParts,
y_var: VarSet,
x_var: VarSet,
) -> bool {
use {BinaryOp::*, NaryOp::*};
macro_rules! explicit_rel_op {
() => {
Eq | Ge | Gt | Le | Lt
};
}
match e {
binary!(op @ explicit_rel_op!(), y @ var!(_), e)
if y.vars == y_var && x_var.contains(e.vars) =>
{
parts.y.is_none() && {
parts.op = match op {
Eq => ExplicitRelOp::Eq,
Ge => ExplicitRelOp::Ge,
Gt => ExplicitRelOp::Gt,
Le => ExplicitRelOp::Le,
Lt => ExplicitRelOp::Lt,
_ => unreachable!(),
};
parts.y = Some(Expr::binary(ExplicitRel(parts.op), take(y), take(e)));
true
}
}
binary!(op @ explicit_rel_op!(), e, y @ var!(_))
if y.vars == y_var && x_var.contains(e.vars) =>
{
parts.y.is_none() && {
parts.op = match op {
Eq => ExplicitRelOp::Eq,
Ge => ExplicitRelOp::Le,
Gt => ExplicitRelOp::Lt,
Le => ExplicitRelOp::Ge,
Lt => ExplicitRelOp::Gt,
_ => unreachable!(),
};
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | true |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/ternary.rs | rust/src/ternary.rs | use std::ops::{BitAnd, BitOr, Not};
use Ternary::*;
/// A ternary value which could be either [`False`], [`Uncertain`], or [`True`].
///
/// The values are ordered as: [`False`] < [`Uncertain`] < [`True`].
///
/// The default value is [`Uncertain`].
#[derive(Clone, Copy, Debug, Default, Eq, Ord, PartialEq, PartialOrd)]
pub enum Ternary {
False,
#[default]
Uncertain,
True,
}
impl Ternary {
/// Returns `true` if `self` is [`False`].
pub fn certainly_false(self) -> bool {
self == False
}
/// Returns `true` if `self` is [`True`].
pub fn certainly_true(self) -> bool {
self == True
}
/// Returns `true` if `self` is either [`False`] or [`Uncertain`].
pub fn possibly_false(self) -> bool {
!self.certainly_true()
}
/// Returns `true` if `self` is either [`True`] or [`Uncertain`].
pub fn possibly_true(self) -> bool {
!self.certainly_false()
}
}
impl BitAnd for Ternary {
type Output = Self;
fn bitand(self, rhs: Self) -> Self::Output {
self.min(rhs)
}
}
impl BitOr for Ternary {
type Output = Self;
fn bitor(self, rhs: Self) -> Self::Output {
self.max(rhs)
}
}
impl From<bool> for Ternary {
fn from(x: bool) -> Self {
if x {
True
} else {
False
}
}
}
impl From<(bool, bool)> for Ternary {
fn from(x: (bool, bool)) -> Self {
match x {
(true, true) => True,
(false, true) => Uncertain,
(false, false) => False,
_ => panic!(),
}
}
}
impl Not for Ternary {
type Output = Ternary;
fn not(self) -> Self::Output {
match self {
True => False,
False => True,
_ => Uncertain,
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn ternary() {
assert_eq!(Ternary::default(), Uncertain);
assert_eq!(Ternary::from(false), False);
assert_eq!(Ternary::from(true), True);
assert_eq!(Ternary::from((false, false)), False);
assert_eq!(Ternary::from((false, true)), Uncertain);
assert_eq!(Ternary::from((true, true)), True);
assert!(False.certainly_false());
assert!(!False.certainly_true());
assert!(False.possibly_false());
assert!(!False.possibly_true());
assert!(!Uncertain.certainly_false());
assert!(!Uncertain.certainly_true());
assert!(Uncertain.possibly_false());
assert!(Uncertain.possibly_true());
assert!(!True.certainly_false());
assert!(True.certainly_true());
assert!(!True.possibly_false());
assert!(True.possibly_true());
assert!(False < Uncertain);
assert!(Uncertain < True);
assert_eq!(False & False, False);
assert_eq!(False & Uncertain, False);
assert_eq!(False & True, False);
assert_eq!(Uncertain & Uncertain, Uncertain);
assert_eq!(Uncertain & True, Uncertain);
assert_eq!(True & True, True);
assert_eq!(False | False, False);
assert_eq!(False | Uncertain, Uncertain);
assert_eq!(False | True, True);
assert_eq!(Uncertain | Uncertain, Uncertain);
assert_eq!(Uncertain | True, True);
assert_eq!(True | True, True);
assert_eq!(!False, True);
assert_eq!(!Uncertain, Uncertain);
assert_eq!(!True, False);
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/arb_interval_set_ops.rs | rust/src/arb_interval_set_ops.rs | use crate::{
interval_set::{Site, TupperInterval, TupperIntervalSet},
interval_set_ops, Ternary,
};
use inari::{const_interval, interval, DecInterval, Decoration, Interval};
use itertools::Itertools;
macro_rules! ge {
($x:expr, $y:expr) => {{
const _: () = assert!(f64::NEG_INFINITY < $y && $y < f64::INFINITY);
Ternary::from(($x.inf() >= $y, $x.sup() >= $y))
}};
}
macro_rules! gt {
($x:expr, $y:expr) => {{
const _: () = assert!(f64::NEG_INFINITY < $y && $y < f64::INFINITY);
Ternary::from(($x.inf() > $y, $x.sup() > $y))
}};
}
macro_rules! le {
($x:expr, $y:expr) => {{
const _: () = assert!(f64::NEG_INFINITY < $y && $y < f64::INFINITY);
Ternary::from(($x.sup() <= $y, $x.inf() <= $y))
}};
}
macro_rules! lt {
($x:expr, $y:expr) => {{
const _: () = assert!(f64::NEG_INFINITY < $y && $y < f64::INFINITY);
Ternary::from(($x.sup() < $y, $x.inf() < $y))
}};
}
macro_rules! ne {
($x:expr, $y:expr) => {{
const _: () = assert!(f64::NEG_INFINITY < $y && $y < f64::INFINITY);
Ternary::from((!$x.contains($y), $x != const_interval!($y, $y)))
}};
}
macro_rules! impl_arb_op {
($op:ident($x:ident), $result:expr) => {
impl_arb_op!($op($x), $result, Ternary::True);
};
($op:ident($x:ident), $result:expr, $def:expr) => {
pub fn $op(&self) -> Self {
let mut rs = Self::new();
for x in self {
let $x = x.x;
let def = $def;
if def.possibly_true() {
let dec = if def.certainly_true() {
// Assuming the restriction of f to x is continuous.
Decoration::Dac.min(x.d)
} else {
Decoration::Trv
};
rs.insert(TupperInterval::new(DecInterval::set_dec($result, dec), x.g));
}
}
rs.normalize(false);
rs
}
};
($op:ident($x:ident, $y:ident), $result:expr, $def:expr) => {
pub fn $op(&self, rhs: &Self) -> Self {
let mut rs = Self::new();
for x in self {
for y in rhs {
if let Some(g) = x.g.union(y.g) {
let $x = x.x;
let $y = y.x;
let def = $def;
if def.possibly_true() {
let dec = if def.certainly_true() {
// Assuming the restriction of f to x × y is continuous.
Decoration::Dac.min(x.d).min(y.d)
} else {
Decoration::Trv
};
rs.insert(TupperInterval::new(DecInterval::set_dec($result, dec), g));
}
}
}
}
rs.normalize(false);
rs
}
};
}
fn i(x: f64) -> Interval {
interval!(x, x).unwrap()
}
// Arb's sin/cos return relatively wide intervals around extrema.
// In that case, we should use the MPFR's functions instead.
const USE_MPFR_SIN_COS_ABOVE: f64 = 0.999999;
const M_ONE_TO_ONE: Interval = const_interval!(-1.0, 1.0);
const N_INF_TO_ZERO: Interval = const_interval!(f64::NEG_INFINITY, 0.0);
const ONE_HALF: Interval = const_interval!(0.5, 0.5);
const ONE_TO_INF: Interval = const_interval!(1.0, f64::INFINITY);
const ZERO: Interval = const_interval!(0.0, 0.0);
const ZERO_TO_INF: Interval = const_interval!(0.0, f64::INFINITY);
const ZERO_TO_ONE: Interval = const_interval!(0.0, 1.0);
impl TupperIntervalSet {
// Mid-rad IA, which is used by Arb, cannot represent half-bounded intervals.
// So we need to handle such inputs and unbounded functions explicitly.
impl_arb_op!(
acos(x),
if x.interior(M_ONE_TO_ONE) {
arb_acos(x)
} else {
x.acos()
},
ge!(x, -1.0) & le!(x, 1.0)
);
impl_arb_op!(
acosh(x),
if x.inf() > 1.0 && x.sup() < f64::INFINITY {
arb_acosh(x)
} else {
x.acosh()
},
ge!(x, 1.0)
);
impl_arb_op!(airy_ai(x), {
let a = x.inf();
let b = x.sup();
if a >= 0.0 && b == f64::INFINITY {
// [0, Ai(a)]
interval!(0.0, arb_airy_ai_ru(i(a))).unwrap()
} else {
arb_airy_ai(x).intersection(airy_envelope(x))
}
});
impl_arb_op!(airy_ai_prime(x), {
let a = x.inf();
let b = x.sup();
if a >= 0.0 && b == f64::INFINITY {
// [Ai'(a), 0]
interval!(arb_airy_ai_prime_rd(i(a)), 0.0).unwrap()
} else {
arb_airy_ai_prime(x)
}
});
impl_arb_op!(airy_bi(x), {
let a = x.inf();
let b = x.sup();
if a >= 0.0 && b == f64::INFINITY {
// [Bi(a), +∞]
interval!(arb_airy_bi_rd(i(a)), f64::INFINITY).unwrap()
} else {
arb_airy_bi(x).intersection(airy_envelope(x))
}
});
impl_arb_op!(airy_bi_prime(x), {
let a = x.inf();
let b = x.sup();
if a >= 0.0 && b == f64::INFINITY {
// [Bi'(a), +∞]
interval!(arb_airy_bi_prime_rd(i(a)), f64::INFINITY).unwrap()
} else {
arb_airy_bi_prime(x)
}
});
impl_arb_op!(
asin(x),
if x.interior(M_ONE_TO_ONE) {
arb_asin(x)
} else {
x.asin()
},
ge!(x, -1.0) & le!(x, 1.0)
);
impl_arb_op!(
asinh(x),
if x.is_common_interval() {
arb_asinh(x)
} else {
x.asinh()
}
);
impl_arb_op!(
atan(x),
if x.is_common_interval() {
arb_atan(x)
} else {
x.atan()
}
);
pub fn atan2(&self, rhs: &Self, site: Option<Site>) -> Self {
if self.iter().all(|x| x.x.is_common_interval())
&& rhs.iter().all(|x| x.x.is_common_interval())
&& self
.iter()
.cartesian_product(rhs.iter())
.filter(|(x, y)| x.g.union(y.g).is_some())
.all(|(y, x)| x.x.inf() > 0.0 || !y.x.contains(0.0))
{
let mut rs = Self::new();
for x in self {
for y in rhs {
if let Some(g) = x.g.union(y.g) {
let (y, x) = (x, y);
let dec = Decoration::Com.min(x.d).min(y.d);
let z = arb_atan2(y.x, x.x);
rs.insert(TupperInterval::new(DecInterval::set_dec(z, dec), g));
}
}
}
rs.normalize(false);
rs
} else {
self.atan2_impl(rhs, site)
}
}
impl_arb_op!(
atanh(x),
if x.interior(M_ONE_TO_ONE) {
arb_atanh(x)
} else {
x.atanh()
},
gt!(x, -1.0) & lt!(x, 1.0)
);
impl_arb_op!(
bessel_i(n, x),
{
if n.inf() % 2.0 == 0.0 {
// n ∈ 2ℤ
let x = x.abs();
let a = x.inf();
let b = x.sup();
let inf = arb_bessel_i_rd(n, i(a));
let sup = if b == f64::INFINITY {
b
} else {
arb_bessel_i_ru(n, i(b))
};
interval!(inf, sup).unwrap()
} else if n.inf() % 1.0 == 0.0 {
// n ∈ 2ℤ + 1
let a = x.inf();
let b = x.sup();
let inf = if a == f64::NEG_INFINITY {
a
} else {
arb_bessel_i_rd(n, i(a))
};
let sup = if b == f64::INFINITY {
b
} else {
arb_bessel_i_ru(n, i(b))
};
interval!(inf, sup).unwrap()
} else if n.inf() > 0.0 {
// n ∈ (0, +∞) ∖ ℤ
let x = x.intersection(ZERO_TO_INF);
if x.is_empty() || x == ZERO {
Interval::EMPTY
} else {
let a = x.inf();
let b = x.sup();
let inf = if a == 0.0 {
0.0
} else {
arb_bessel_i_rd(n, i(a))
};
let sup = if b == f64::INFINITY {
b
} else {
arb_bessel_i_ru(n, i(b))
};
interval!(inf, sup).unwrap()
}
} else if n.inf() % 2.0 > -1.0 {
// n ∈ (-1, 0) ∪ (-3, -2) ∪ …
let y0 = {
let x = x.intersection(const_interval!(0.0, 0.5));
if x.is_empty() || x == ZERO {
Interval::EMPTY
} else {
let a = x.inf();
let b = x.sup();
interval!(arb_bessel_i_rd(n, i(b)), arb_bessel_i_ru(n, i(a))).unwrap()
}
};
let y1 = {
let x = x.intersection(const_interval!(0.5, f64::INFINITY));
if x.is_empty() {
Interval::EMPTY
} else {
arb_bessel_i(n, x)
}
};
y0.convex_hull(y1)
} else {
// n ∈ (-2, -1) ∪ (-4, -3) ∪ …
let x = x.intersection(ZERO_TO_INF);
if x.is_empty() || x == ZERO {
Interval::EMPTY
} else {
let a = x.inf();
let b = x.sup();
let inf = arb_bessel_i_rd(n, i(a));
let sup = if b == f64::INFINITY {
b
} else {
arb_bessel_i_ru(n, i(b))
};
interval!(inf, sup).unwrap()
}
}
},
{
assert!(
n.is_singleton() && n.inf() % 0.5 == 0.0,
"`n` must be an integer or a half-integer in `I(n, x)`"
);
if n.inf() % 1.0 == 0.0 {
Ternary::True
} else {
gt!(x, 0.0)
}
}
);
impl_arb_op!(
bessel_j(n, x),
{
if n.inf() % 1.0 == 0.0 {
// n ∈ ℤ
arb_bessel_j(n, x).intersection(bessel_envelope(n, x))
} else {
// Bisection at 1 is only valid for integer/half-integer orders.
// The first extremum point can get arbitrarily close to the origin in a general order.
// The same note applies to `bessel_y` and `bessel_i`.
let y0 = {
let x = x.intersection(ZERO_TO_ONE);
if x.is_empty() || x == ZERO {
Interval::EMPTY
} else {
let a = x.inf();
let b = x.sup();
if n.inf() > 0.0 {
// n ∈ (0, +∞) ∖ ℤ
let inf = if a == 0.0 {
0.0
} else {
arb_bessel_j_rd(n, i(a))
};
interval!(inf, arb_bessel_j_ru(n, i(b))).unwrap()
} else if n.inf() % 2.0 > -1.0 {
// n ∈ (-1, 0) ∪ (-3, -2) ∪ …
interval!(arb_bessel_j_rd(n, i(b)), arb_bessel_j_ru(n, i(a))).unwrap()
} else {
// n = (-2, -1) ∪ (-4, -3) ∪ …
interval!(arb_bessel_j_rd(n, i(a)), arb_bessel_j_ru(n, i(b))).unwrap()
}
}
};
let y1 = {
let x = x.intersection(ONE_TO_INF);
if x.is_empty() {
Interval::EMPTY
} else {
arb_bessel_j(n, x).intersection(bessel_envelope(n, x))
}
};
y0.convex_hull(y1)
}
},
{
assert!(
n.is_singleton() && n.inf() % 0.5 == 0.0,
"`n` must be an integer or a half-integer in `J(n, x)`"
);
if n.inf() % 1.0 == 0.0 {
Ternary::True
} else {
gt!(x, 0.0)
}
}
);
impl_arb_op!(
bessel_k(n, x),
{
let x = x.intersection(ZERO_TO_INF);
let a = x.inf();
let b = x.sup();
let inf = if b == f64::INFINITY {
0.0
} else {
arb_bessel_k_rd(n, i(b))
};
let sup = arb_bessel_k_ru(n, i(a));
interval!(inf, sup).unwrap()
},
{
assert!(
n.is_singleton() && n.inf() % 0.5 == 0.0,
"`n` must be an integer or a half-integer in `K(n, x)`"
);
gt!(x, 0.0)
}
);
impl_arb_op!(
bessel_y(n, x),
{
let y0 = {
let x = x.intersection(ZERO_TO_ONE);
if x.is_empty() || x == ZERO {
Interval::EMPTY
} else {
let a = x.inf();
let b = x.sup();
let n_rem_2 = n.inf() % 2.0;
if n_rem_2 == -0.5 {
// n = -1/2, -5/2, …
let inf = if a == 0.0 {
0.0
} else {
arb_bessel_y_rd(n, i(a))
};
interval!(inf, arb_bessel_y_ru(n, i(b))).unwrap()
} else if n_rem_2 == -1.5 {
// n = -3/2, -7/2, …
let sup = if a == 0.0 {
0.0
} else {
arb_bessel_y_ru(n, i(a))
};
interval!(arb_bessel_y_rd(n, i(b)), sup).unwrap()
} else if n_rem_2 > -1.5 && n_rem_2 < -0.5 {
// n ∈ (-3/2, -1/2) ∪ (-7/2, -5/2) ∪ …
interval!(arb_bessel_y_rd(n, i(b)), arb_bessel_y_ru(n, i(a))).unwrap()
} else {
// n ∈ (-1/2, +∞) ∪ (-5/2, -3/2) ∪ (-9/2, -7/2) ∪ …
interval!(arb_bessel_y_rd(n, i(a)), arb_bessel_y_ru(n, i(b))).unwrap()
}
}
};
let y1 = {
let x = x.intersection(ONE_TO_INF);
if x.is_empty() {
Interval::EMPTY
} else {
arb_bessel_y(n, x).intersection(bessel_envelope(n, x))
}
};
y0.convex_hull(y1)
},
{
assert!(
n.is_singleton() && n.inf() % 0.5 == 0.0,
"`n` must be an integer or a half-integer in `Y(n, x)`"
);
gt!(x, 0.0)
}
);
impl_arb_op!(
chi(x),
{
let x = x.intersection(ZERO_TO_INF);
let a = x.inf();
let b = x.sup();
if x == ZERO_TO_INF {
Interval::ENTIRE
} else if a == 0.0 {
// [-∞, Chi(b)]
interval!(f64::NEG_INFINITY, arb_chi_ru(i(b))).unwrap()
} else if b == f64::INFINITY {
// [Chi(a), +∞]
interval!(arb_chi_rd(i(a)), f64::INFINITY).unwrap()
} else {
arb_chi(x)
}
},
gt!(x, 0.0)
);
impl_arb_op!(
ci(x),
{
let x = x.intersection(ZERO_TO_INF);
let a = x.inf();
let b = x.sup();
if a == 0.0 && b <= Interval::FRAC_PI_2.inf() {
// [-∞, Ci(b)]
interval!(f64::NEG_INFINITY, arb_ci_ru(i(b))).unwrap()
} else {
arb_ci(x).intersection(ci_envelope(x))
}
},
gt!(x, 0.0)
);
impl_arb_op!(cos(x), {
let y = arb_cos(x);
if y.abs().inf() > USE_MPFR_SIN_COS_ABOVE {
x.cos()
} else {
y
}
});
impl_arb_op!(
cosh(x),
if x.is_common_interval() {
arb_cosh(x)
} else {
x.cosh()
}
);
pub fn digamma(&self, site: Option<Site>) -> Self {
if self.iter().all(|x| {
let a = x.x.inf();
let b = x.x.sup();
b < 0.0 && a.ceil() > b.floor() || a > 0.0 && b < f64::INFINITY
}) {
let mut rs = Self::new();
for x in self {
let a = x.x.inf();
let b = x.x.sup();
let dec = Decoration::Com.min(x.d);
let a = interval!(a, a).unwrap();
let b = interval!(b, b).unwrap();
let y = interval!(arb_digamma_rd(a), arb_digamma_ru(b)).unwrap();
rs.insert(TupperInterval::new(DecInterval::set_dec(y, dec), x.g));
}
rs.normalize(false);
rs
} else {
self.digamma_impl(site)
}
}
impl_arb_op!(
ei(x),
{
let a = x.inf();
let b = x.sup();
if b <= 0.0 {
// [Ei(b), Ei(a)]
// When b = 0, inf(arb_ei([b, b])) = inf([-∞, +∞]) = -∞.
let inf = arb_ei_rd(i(b));
let sup = if a == f64::NEG_INFINITY {
0.0
} else {
arb_ei_ru(i(a))
};
interval!(inf, sup).unwrap()
} else if a >= 0.0 {
// [Ei(a), Ei(b)]
let inf = arb_ei_rd(i(a));
let sup = if b == f64::INFINITY {
f64::INFINITY
} else {
arb_ei_ru(i(b))
};
interval!(inf, sup).unwrap()
} else {
// [-∞, max(Ei(a), Ei(b))]
let sup0 = if a == f64::NEG_INFINITY {
0.0
} else {
arb_ei_ru(i(a))
};
let sup1 = if b == f64::INFINITY {
f64::INFINITY
} else {
arb_ei_ru(i(b))
};
interval!(f64::NEG_INFINITY, sup0.max(sup1)).unwrap()
}
},
ne!(x, 0.0)
);
impl_arb_op!(
elliptic_e(x),
{
let a = x.inf();
let b = x.sup();
if a == f64::NEG_INFINITY && b >= 1.0 {
const_interval!(1.0, f64::INFINITY)
} else if a == f64::NEG_INFINITY {
interval!(arb_elliptic_e_rd(i(b)), f64::INFINITY).unwrap()
} else if b >= 1.0 {
interval!(1.0, arb_elliptic_e_ru(i(a))).unwrap()
} else {
arb_elliptic_e(x)
}
},
le!(x, 1.0)
);
impl_arb_op!(
elliptic_k(x),
{
let a = x.inf();
let b = x.sup();
if a == f64::NEG_INFINITY && b >= 1.0 {
const_interval!(0.0, f64::INFINITY)
} else if a == f64::NEG_INFINITY {
interval!(0.0, arb_elliptic_k_ru(i(b))).unwrap()
} else if b >= 1.0 {
interval!(arb_elliptic_k_rd(i(a)), f64::INFINITY).unwrap()
} else {
arb_elliptic_k(x)
}
},
lt!(x, 1.0)
);
impl_arb_op!(
erf(x),
if x.is_common_interval() {
arb_erf(x)
} else {
interval_set_ops::erf(x)
}
);
impl_arb_op!(
erfc(x),
if x.is_common_interval() {
arb_erfc(x)
} else {
interval_set_ops::erfc(x)
}
);
impl_arb_op!(erfi(x), {
let a = x.inf();
let b = x.sup();
if x.is_entire() {
x
} else if a == f64::NEG_INFINITY {
// [-∞, erfi(b)]
interval!(f64::NEG_INFINITY, arb_erfi(i(b)).sup()).unwrap()
} else if b == f64::INFINITY {
// [erfi(a), +∞]
interval!(arb_erfi(i(a)).inf(), f64::INFINITY).unwrap()
} else {
arb_erfi(x)
}
});
impl_arb_op!(
exp(x),
if x.is_common_interval() {
arb_exp(x)
} else {
x.exp()
}
);
impl_arb_op!(fresnel_c(x), {
let a = x.inf();
let b = x.sup();
if b <= 0.0 {
arb_fresnel_c(x).intersection(fresnel_envelope_centered(-x) - ONE_HALF)
} else if a >= 0.0 {
arb_fresnel_c(x).intersection(fresnel_envelope_centered(x) + ONE_HALF)
} else {
arb_fresnel_c(x)
}
});
impl_arb_op!(fresnel_s(x), {
let a = x.inf();
let b = x.sup();
if b <= 0.0 {
arb_fresnel_s(x).intersection(fresnel_envelope_centered(-x) - ONE_HALF)
} else if a >= 0.0 {
arb_fresnel_s(x).intersection(fresnel_envelope_centered(x) + ONE_HALF)
} else {
arb_fresnel_s(x)
}
});
pub fn gamma(&self, site: Option<Site>) -> Self {
// NSolve[{Gamma[x] == $MaxMachineNumber, 0 < x < 180}, x]
const X_LIMIT: f64 = 171.0;
if self.iter().all(|x| {
let a = x.x.inf();
let b = x.x.sup();
b < 0.0 && a.ceil() > b.floor() || a > 0.0 && b < X_LIMIT
}) {
let mut rs = Self::new();
for x in self {
let dec = Decoration::Com.min(x.d);
let y = arb_gamma(x.x);
rs.insert(TupperInterval::new(DecInterval::set_dec(y, dec), x.g));
}
rs.normalize(false);
rs
} else {
self.gamma_impl(site)
}
}
impl_arb_op!(
gamma_inc(s, x),
if s.inf() % 2.0 == 1.0 {
// s = 1, 3, 5, …
let a = x.inf();
let b = x.sup();
let inf = if b == f64::INFINITY {
0.0
} else {
arb_gamma_inc(s, i(b)).inf()
};
let sup = if a == f64::NEG_INFINITY {
f64::INFINITY
} else {
arb_gamma_inc(s, i(a)).sup()
};
interval!(inf, sup).unwrap()
} else {
// s ≠ 1, 3, 5, …
let y0 = if s.inf() > 0.0 && s.inf() % 2.0 == 0.0 {
// s = 2, 4, 6, …
let x = x.intersection(N_INF_TO_ZERO);
if x.is_empty() {
Interval::EMPTY
} else {
let a = x.inf();
let b = x.sup();
let inf = if a == f64::NEG_INFINITY {
a
} else {
arb_gamma_inc(s, i(a)).inf()
};
interval!(inf, arb_gamma_inc(s, i(b)).sup()).unwrap()
}
} else {
// s ≠ 1, 2, 3, …
Interval::EMPTY
};
let y1 = {
let x = x.intersection(ZERO_TO_INF);
if x.is_empty() || s.inf() <= 0.0 && x == ZERO {
Interval::EMPTY
} else {
let a = x.inf();
let b = x.sup();
let inf = if b == f64::INFINITY {
0.0
} else {
arb_gamma_inc(s, i(b)).inf()
};
interval!(inf, arb_gamma_inc(s, i(a)).sup()).unwrap()
}
};
y0.convex_hull(y1)
},
{
assert!(
s.is_singleton(),
"`a` must be an exact number in `Gamma(a, x)`"
);
let s = s.inf();
if s > 0.0 && s % 1.0 == 0.0 {
Ternary::True
} else {
gt!(x, 0.0)
}
}
);
impl_arb_op!(
inverse_erf(x),
{
let x = x.intersection(const_interval!(-1.0, 1.0));
if x.is_empty() {
x
} else {
let a = x.inf();
let b = x.sup();
interval!(arb_erfinv(i(a)).inf(), arb_erfinv(i(b)).sup()).unwrap()
}
},
ge!(x, -1.0) & le!(x, 1.0)
);
impl_arb_op!(
inverse_erfc(x),
{
let x = x.intersection(const_interval!(0.0, 2.0));
if x.is_empty() {
x
} else {
let a = x.inf();
let b = x.sup();
interval!(arb_erfcinv(i(b)).inf(), arb_erfcinv(i(a)).sup()).unwrap()
}
},
ge!(x, 0.0) & le!(x, 2.0)
);
pub fn lambert_w(&self, rhs: &Self) -> Self {
const FRAC_M_ONE_E_RD: f64 = -0.36787944117144233;
const FRAC_M_ONE_E_RU: f64 = -0.3678794411714423;
let mut rs = Self::new();
if self.to_f64() == Some(0.0) {
for k in self {
for x in rhs {
if let Some(g) = k.g.union(x.g) {
let a = x.x.inf();
let dec = if a >= FRAC_M_ONE_E_RU {
Decoration::Com.min(k.d).min(x.d)
} else {
Decoration::Trv
};
let x =
x.x.intersection(const_interval!(FRAC_M_ONE_E_RD, f64::INFINITY));
let a = x.inf();
let b = x.sup();
let y = if x.is_empty() {
Interval::EMPTY
} else {
let inf = {
let a = interval!(a, a).unwrap();
arb_lambert_w_0_rd(a)
};
let sup = if b == f64::INFINITY {
f64::INFINITY
} else {
let b = interval!(b, b).unwrap();
arb_lambert_w_0_ru(b)
};
interval!(inf, sup).unwrap()
};
rs.insert(TupperInterval::new(DecInterval::set_dec(y, dec), g));
}
}
}
} else if self.to_f64() == Some(-1.0) {
for k in self {
for x in rhs {
if let Some(g) = k.g.union(x.g) {
let a = x.x.inf();
let b = x.x.sup();
let dec = if a >= FRAC_M_ONE_E_RU && b < 0.0 {
Decoration::Com.min(k.d).min(x.d)
} else {
Decoration::Trv
};
let x = x.x.intersection(const_interval!(FRAC_M_ONE_E_RD, 0.0));
let a = x.inf();
let b = x.sup();
let y = if a == 0.0 || x.is_empty() {
Interval::EMPTY
} else {
let inf = if b == 0.0 {
f64::NEG_INFINITY
} else {
let b = interval!(b, b).unwrap();
arb_lambert_w_m1_rd(b)
};
let sup = {
let a = interval!(a, a).unwrap();
arb_lambert_w_m1_ru(a)
};
interval!(inf, sup).unwrap()
};
rs.insert(TupperInterval::new(DecInterval::set_dec(y, dec), g));
}
}
}
} else {
panic!("`k` must be either 0 or -1 in `W(k, x)`");
}
rs.normalize(false);
rs
}
impl_arb_op!(
li(x),
{
let x = x.intersection(ZERO_TO_INF);
let a = x.inf();
let b = x.sup();
if b <= 1.0 {
// [li(b), li(a)]
interval!(arb_li_rd(i(b)), arb_li_ru(i(a))).unwrap()
} else if a >= 1.0 {
// [li(a), li(b)]
let inf = arb_li_rd(i(a));
let sup = if b == f64::INFINITY {
f64::INFINITY
} else {
arb_li_ru(i(b))
};
interval!(inf, sup).unwrap()
} else {
// [-∞, max(li(a), li(b))]
let sup0 = arb_li_ru(i(a));
let sup1 = if b == f64::INFINITY {
f64::INFINITY
} else {
arb_li_ru(i(b))
};
interval!(f64::NEG_INFINITY, sup0.max(sup1)).unwrap()
}
},
ge!(x, 0.0) & ne!(x, 1.0)
);
impl_arb_op!(
ln(x),
if x.inf() > 0.0 && x.sup() < f64::INFINITY {
arb_ln(x)
} else {
x.ln()
},
gt!(x, 0.0)
);
impl_arb_op!(
ln_gamma(x),
{
const ARGMIN_RD: f64 = 1.4616321449683622;
const ARGMIN_RU: f64 = 1.4616321449683625;
let a = x.inf();
let b = x.sup();
if a <= 0.0 && b <= ARGMIN_RD {
interval!(arb_ln_gamma_rd(i(b)), f64::INFINITY).unwrap()
} else if a >= ARGMIN_RU && b == f64::INFINITY {
interval!(arb_ln_gamma_rd(i(a)), f64::INFINITY).unwrap()
} else {
arb_ln_gamma(x)
}
},
gt!(x, 0.0)
);
pub fn pow(&self, rhs: &Self, site: Option<Site>) -> Self {
if self.iter().all(|x| {
let a = x.x.inf();
let b = x.x.sup();
a > 0.0 && b < f64::INFINITY
}) && rhs.iter().all(|y| y.x.is_common_interval())
{
self.pow_common(rhs)
} else {
self.pow_impl(rhs, site)
}
}
pub fn pow_rational(&self, rhs: &Self, site: Option<Site>) -> Self {
if self.iter().all(|x| {
let a = x.x.inf();
let b = x.x.sup();
a > 0.0 && b < f64::INFINITY
}) && rhs.iter().all(|y| y.x.is_common_interval())
{
self.pow_common(rhs)
} else {
self.pow_rational_impl(rhs, site)
}
}
fn pow_common(&self, rhs: &Self) -> Self {
let mut rs = Self::new();
for x in self {
for y in rhs {
if let Some(g) = x.g.union(y.g) {
let dec = Decoration::Com.min(x.d).min(y.d);
let z = arb_pow(x.x, y.x);
rs.insert(TupperInterval::new(DecInterval::set_dec(z, dec), g));
}
}
}
rs.normalize(false);
rs
}
impl_arb_op!(shi(x), {
let a = x.inf();
let b = x.sup();
if x.is_entire() {
x
} else if a == f64::NEG_INFINITY {
// [-∞, Shi(b)]
interval!(f64::NEG_INFINITY, arb_shi_ru(i(b))).unwrap()
} else if b == f64::INFINITY {
// [Shi(a), +∞]
interval!(arb_shi_rd(i(a)), f64::INFINITY).unwrap()
} else {
arb_shi(x)
}
});
impl_arb_op!(si(x), {
let a = x.inf();
let b = x.sup();
if b <= 0.0 {
arb_si(x).intersection(ci_envelope(-x) - Interval::FRAC_PI_2)
} else if a >= 0.0 {
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | true |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/geom.rs | rust/src/geom.rs | use crate::region::Region;
use inari::{interval, Interval};
/// A one-dimensional geometric region that represents a line segment.
///
/// Conceptually, it is a pair of two [`Interval`]s `inner` and `outer`
/// that satisfy `inner ⊆ outer`. `inner` can be empty, while `outer` cannot.
#[derive(Clone, Debug)]
pub struct Box1D {
l: Interval,
r: Interval,
}
impl Box1D {
/// Creates a new [`Box1D`] with the given bounds.
pub fn new(l: Interval, r: Interval) -> Self {
assert!(l.inf() <= r.sup());
Self { l, r }
}
/// Returns the inner region.
pub fn inner(&self) -> Interval {
let l = self.l.sup();
let r = self.r.inf();
if l <= r {
interval!(l, r).unwrap()
} else {
Interval::EMPTY
}
}
/// Returns the left bound of the region.
pub fn left(&self) -> Interval {
self.l
}
/// Returns the outer region.
pub fn outer(&self) -> Interval {
interval!(self.l.inf(), self.r.sup()).unwrap()
}
/// Returns the right bound of the region.
pub fn right(&self) -> Interval {
self.r
}
}
/// A two-dimensional geometric region that represents an axis-aligned rectangle.
///
/// Conceptually, it is a pair of two [`Region`]s `inner` and `outer`
/// that satisfy `inner ⊆ outer`. `inner` can be empty, while `outer` cannot.
#[derive(Clone, Debug)]
pub struct Box2D(Box1D, Box1D);
impl Box2D {
/// Creates a new [`Box2D`] with the given bounds.
pub fn new(l: Interval, r: Interval, b: Interval, t: Interval) -> Self {
Self(Box1D::new(l, r), Box1D::new(b, t))
}
/// Returns the bottom bound of the region.
pub fn bottom(&self) -> Interval {
self.1.left()
}
/// Returns the inner region.
pub fn inner(&self) -> Region {
Region::new(self.0.inner(), self.1.inner())
}
/// Returns the left bound of the region.
pub fn left(&self) -> Interval {
self.0.left()
}
/// Returns the outer region.
pub fn outer(&self) -> Region {
Region::new(self.0.outer(), self.1.outer())
}
/// Returns the right bound of the region.
pub fn right(&self) -> Interval {
self.0.right()
}
/// Returns the top bound of the region.
pub fn top(&self) -> Interval {
self.1.right()
}
/// Swaps the axes of the region.
pub fn transpose(&self) -> Self {
Self(self.1.clone(), self.0.clone())
}
}
/// The type of the formula that should be used for performing geometric transformations.
#[derive(Clone, Copy, Debug)]
pub enum TransformationMode {
/// Suitable for transformation from image coordinates to real coordinates,
/// which usually involves exact divisions (division by image dimensions).
Fast,
/// Suitable for transformation from real coordinates to image coordinates,
/// which usually involves inexact divisions (division by lengths of the plot range).
Precise,
}
/// A one-dimensional affine geometric transformation that consists of only scaling and translation.
#[derive(Clone, Debug)]
pub enum Transformation1D {
Fast {
s: Interval,
t: Interval,
},
Precise {
a0: Interval,
a01: Interval,
x0: Interval,
x01: Interval,
},
}
impl Transformation1D {
/// Creates a transformation that maps each source point to the corresponding destination point.
pub fn new(
from_points: [Interval; 2],
to_points: [Interval; 2],
mode: TransformationMode,
) -> Self {
let [a0, a1] = from_points;
let [x0, x1] = to_points;
match mode {
TransformationMode::Fast => Self::Fast {
s: (x1 - x0) / (a1 - a0),
t: (-a0).mul_add((x1 - x0) / (a1 - a0), x0),
},
TransformationMode::Precise => Self::Precise {
a0,
a01: a1 - a0,
x0,
x01: x1 - x0,
},
}
}
}
/// A two-dimensional affine geometric transformation that consists of only scaling and translation.
#[derive(Clone, Debug)]
pub struct Transformation2D(Transformation1D, Transformation1D);
impl Transformation2D {
/// Creates a transformation that maps each source point to the corresponding destination point.
pub fn new(from_points: [Region; 2], to_points: [Region; 2], mode: TransformationMode) -> Self {
Self(
Transformation1D::new(
[from_points[0].x(), from_points[1].x()],
[to_points[0].x(), to_points[1].x()],
mode,
),
Transformation1D::new(
[from_points[0].y(), from_points[1].y()],
[to_points[0].y(), to_points[1].y()],
mode,
),
)
}
}
pub trait Transform<T> {
/// Returns an enclosure of the geometric object transformed by `t`.
fn transform(&self, t: &T) -> Self;
}
impl Transform<Transformation1D> for Box1D {
fn transform(&self, t: &Transformation1D) -> Self {
Self {
l: self.l.transform(t),
r: self.r.transform(t),
}
}
}
impl Transform<Transformation2D> for Box2D {
fn transform(&self, t: &Transformation2D) -> Self {
Self(self.0.transform(&t.0), self.1.transform(&t.1))
}
}
impl Transform<Transformation1D> for Interval {
fn transform(&self, t: &Transformation1D) -> Self {
match *t {
Transformation1D::Fast { s, t } => self.mul_add(s, t),
Transformation1D::Precise { a0, a01, x0, x01 } => ((*self - a0) / a01).mul_add(x01, x0),
}
}
}
pub trait TransformInPlace<T> {
/// The in-place version of [`Transform`].
fn transform_in_place(&mut self, t: &T);
}
#[cfg(test)]
mod tests {
use super::*;
use inari::const_interval;
#[test]
fn box2d() {
let u = Box2D::new(
const_interval!(0.33, 0.34),
const_interval!(0.66, 0.67),
const_interval!(1.33, 1.34),
const_interval!(1.66, 1.67),
);
assert_eq!(
u.inner(),
Region::new(const_interval!(0.34, 0.66), const_interval!(1.34, 1.66))
);
assert_eq!(
u.outer(),
Region::new(const_interval!(0.33, 0.67), const_interval!(1.33, 1.67))
);
let u = Box2D::new(
const_interval!(0.33, 0.66),
const_interval!(0.34, 0.67),
const_interval!(1.33, 1.66),
const_interval!(1.34, 1.67),
);
assert_eq!(u.inner(), Region::EMPTY);
assert_eq!(
u.outer(),
Region::new(const_interval!(0.33, 0.67), const_interval!(1.33, 1.67))
);
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/block.rs | rust/src/block.rs | use crate::{traits::BytesAllocated, vars::VarSet};
use inari::{interval, Interval};
use itertools::Itertools;
use smallvec::SmallVec;
use std::{collections::VecDeque, ptr::copy_nonoverlapping};
/// A component of a [`Block`] that corresponds to the horizontal or vertical axis of an [`Image`].
///
/// A [`Coordinate`] with index `i` and level `k` represents the interval `[i 2^k, (i + 1) 2^k]`,
/// where the endpoints are in pixel coordinates.
///
/// [`Image`]: crate::image::Image
#[derive(Clone, Copy, Debug, Default, Eq, PartialEq)]
pub struct Coordinate {
i: u64,
k: i8,
}
impl Coordinate {
/// The smallest level of blocks.
///
/// A smaller value can be used, as long as the following condition is met:
///
/// - [`Image::MAX_WIDTH`]` / 2^`[`Self::MIN_LEVEL`]` ≤ 2^53`,
///
/// which is required for keeping `block_to_region` operations exact.
///
/// [`Image::MAX_WIDTH`]: crate::image::Image::MAX_WIDTH
pub const MIN_LEVEL: i8 = -32;
/// Creates a new [`Coordinate`].
///
/// Panics if `level` is less than [`Self::MIN_LEVEL`].
pub fn new(index: u64, level: i8) -> Self {
assert!(level >= Self::MIN_LEVEL);
Self { i: index, k: level }
}
/// Returns the index of the block in multiples of the block width.
pub fn index(&self) -> u64 {
self.i
}
/// Returns `true` if `self.level() < 0`, i.e., the block width is smaller than the pixel width.
pub fn is_subpixel(&self) -> bool {
self.k < 0
}
/// Returns `true` if `self.level() > 0`, i.e., the block width is larger than the pixel width.
pub fn is_superpixel(&self) -> bool {
self.k > 0
}
/// Returns `true` if the block can be subdivided.
pub fn is_subdivisible(&self) -> bool {
self.k > Self::MIN_LEVEL
}
/// Returns the level of the block.
#[allow(dead_code)]
pub fn level(&self) -> i8 {
self.k
}
/// Returns the pixel-level block that contains the given block.
/// If the block spans multiple pixels, the one with the least index is returned.
pub fn pixel(&self) -> Self {
Self {
i: self.pixel_index() as u64,
k: 0,
}
}
/// Returns the pixel width divided by the block width.
///
/// Panics if `self.level() > 0`.
pub fn pixel_align(&self) -> u64 {
assert!(self.k <= 0);
1u64 << -self.k
}
/// Returns the index of the pixel that contains the block.
/// If the block spans multiple pixels, the least index is returned.
pub fn pixel_index(&self) -> u32 {
if self.k >= 0 {
(self.i << self.k) as u32
} else {
(self.i >> -self.k) as u32
}
}
/// Returns the subdivided blocks.
///
/// Two blocks are returned.
///
/// Precondition: [`Self::is_subdivisible()`] is `true`.
pub fn subdivide(&self) -> [Self; 2] {
let i0 = 2 * self.i;
let i1 = i0 + 1;
let k = self.k - 1;
[Self { i: i0, k }, Self { i: i1, k }]
}
/// Returns the block width in pixels.
///
/// Panics if [`Self::level`] is less than 0.
pub fn width(&self) -> u32 {
assert!(self.k >= 0);
1u32 << self.k
}
/// Returns the block width in pixels.
pub fn widthf(&self) -> f64 {
Self::exp2(self.k)
}
/// Returns `2^k`.
fn exp2(k: i8) -> f64 {
f64::from_bits(((1023 + k as i32) as u64) << 52)
}
}
fn subdivision_point(x: Interval, integer: bool) -> f64 {
let a = x.inf();
let b = x.sup();
if a == f64::NEG_INFINITY {
if b < 0.0 {
(2.0 * b).max(f64::MIN)
} else if b == 0.0 {
-1.0
} else {
0.0
}
} else if b == f64::INFINITY {
if a < 0.0 {
0.0
} else if a == 0.0 {
1.0
} else {
(2.0 * a).min(f64::MAX)
}
} else if integer {
x.mid().round()
} else {
x.mid()
}
}
/// A component of a [`Block`] that corresponds to a integer parameter.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct IntegerParameter(Interval);
impl IntegerParameter {
/// Creates a new [`IntegerParameter`].
///
/// Panics if `x` is empty or an endpoint of `x` is a finite non-integer number.
pub fn new(x: Interval) -> Self {
assert!(!x.is_empty() && x == x.trunc());
Self(x)
}
/// Returns the interval that the block spans.
pub fn interval(&self) -> Interval {
self.0
}
/// Returns `true` if the block can be subdivided.
pub fn is_subdivisible(&self) -> bool {
Self::is_subdivisible_impl(self.0)
}
fn is_subdivisible_impl(x: Interval) -> bool {
let mid = x.mid().round();
x.inf() != mid && x.sup() != mid
}
/// Returns subdivided blocks. This version must be called for the initial subdivision.
/// Use [`Self::subdivide1`] for subsequent subdivisions.
///
/// Three blocks are returned at most.
pub fn subdivide0(&self) -> Vec<Self> {
let x = self.0;
let a = x.inf();
let b = x.sup();
[interval!(a, a), interval!(a, b), interval!(b, b)]
.into_iter()
.filter_map(|x| x.ok()) // Remove invalid constructions, namely, [-∞, -∞] and [+∞, +∞].
.filter(|x| x.wid() != 1.0)
.dedup()
.map(Self)
.collect()
}
/// Returns subdivided blocks. This version must be called for subsequent subdivisions.
/// Use [`Self::subdivide0`] for the initial subdivision.
///
/// Three blocks are returned at most.
///
/// Precondition: [`Self::is_subdivisible()`] is `true`.
pub fn subdivide1(&self) -> SmallVec<[Self; 3]> {
let x = self.0;
let a = x.inf();
let b = x.sup();
let mid = subdivision_point(x, true);
[
interval!(a, mid).unwrap(),
interval!(mid, mid).unwrap(),
interval!(mid, b).unwrap(),
]
.into_iter()
.filter_map(|x| {
let w = x.wid();
if w == 1.0 {
// Discard the interval since both of the endpoints are already taken
// as point intervals and there is no integer between them.
None
} else if w == 2.0 && Self::is_subdivisible_impl(x) {
let m = x.mid();
Some(Self(interval!(m, m).unwrap()))
} else {
Some(Self(x))
}
})
.collect()
}
}
impl Default for IntegerParameter {
fn default() -> Self {
Self(Interval::ENTIRE)
}
}
/// A component of a [`Block`] that corresponds to a real parameter.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct RealParameter(Interval);
impl RealParameter {
/// Creates a new [`RealParameter`].
///
/// Panics if `x` is empty.
pub fn new(x: Interval) -> Self {
assert!(!x.is_empty());
Self(x)
}
/// Returns the interval that the block spans.
pub fn interval(&self) -> Interval {
self.0
}
/// Returns `true` if the block can be subdivided.
pub fn is_subdivisible(&self) -> bool {
let x = self.0;
let mid = x.mid();
x.inf() != mid && x.sup() != mid
}
/// (To be used for parametric relations.)
/// Returns subdivided blocks.
///
/// Two blocks are returned at most.
///
/// Precondition: [`Self::is_subdivisible()`] is `true`.
pub fn subdivide(&self) -> SmallVec<[Self; 2]> {
let x = self.0;
let a = x.inf();
let b = x.sup();
let mid = subdivision_point(x, false);
[interval!(a, mid).unwrap(), interval!(mid, b).unwrap()]
.into_iter()
.filter(|x| !x.is_singleton())
.map(Self)
.collect()
}
/// (To be used for implicit relations.)
/// Returns subdivided blocks. This version must be called for the initial subdivision.
/// Use [`Self::subdivide1`] for subsequent subdivisions.
///
/// Three blocks are returned at most.
pub fn subdivide0(&self) -> SmallVec<[Self; 2]> {
let x = self.0;
let a = x.inf();
let b = x.sup();
[interval!(a, a), interval!(a, b), interval!(b, b)]
.into_iter()
.filter_map(|x| x.ok()) // Remove invalid constructions, namely, [-∞, -∞] and [+∞, +∞].
.map(Self)
.collect()
}
/// (To be used for implicit relations.)
/// Returns subdivided blocks. This version must be called for subsequent subdivisions.
/// Use [`Self::subdivide0`] for the initial subdivision.
///
/// Three blocks are returned at most.
///
/// Precondition: [`Self::is_subdivisible()`] is `true`.
pub fn subdivide1(&self) -> SmallVec<[Self; 2]> {
let x = self.0;
let a = x.inf();
let b = x.sup();
let mid = subdivision_point(x, false);
[
interval!(a, mid).unwrap(),
interval!(mid, mid).unwrap(),
interval!(mid, b).unwrap(),
]
.into_iter()
.map(Self)
.collect()
}
}
impl Default for RealParameter {
fn default() -> Self {
Self(Interval::ENTIRE)
}
}
/// A subset of the domain of a relation.
#[derive(Clone, Debug, Default, Eq, PartialEq)]
pub struct Block {
/// The horizontal coordinate x.
pub x: Coordinate,
/// The vertical coordinate y.
pub y: Coordinate,
/// The integer parameter m.
pub m: IntegerParameter,
/// The integer parameter n.
pub n: IntegerParameter,
/// The integer parameter used for selecting the branch of the angular coordinate θ.
pub n_theta: IntegerParameter,
/// The real parameter t.
pub t: RealParameter,
/// The index of the subdivision direction, defined by the graphing algorithm,
/// that should be chosen to subdivide this block.
pub next_dir_index: u8,
}
const DEFAULT_BLOCK: Block = Block {
x: Coordinate { i: 0, k: 0 },
y: Coordinate { i: 0, k: 0 },
m: IntegerParameter(Interval::ENTIRE),
n: IntegerParameter(Interval::ENTIRE),
n_theta: IntegerParameter(Interval::ENTIRE),
t: RealParameter(Interval::ENTIRE),
next_dir_index: 0,
};
/// A queue that stores [`Block`]s.
///
/// Internally, [`Block`]s are entropy-coded so that if consecutive blocks have closer indices,
/// which is likely if they are sorted according to the Morton ordering, less memory is consumed.
pub struct BlockQueue {
seq: VecDeque<u8>,
store_vars: VarSet,
begin_index: usize,
end_index: usize,
x_front: u64,
x_back: u64,
y_front: u64,
y_back: u64,
m_front: Interval,
m_back: Interval,
n_front: Interval,
n_back: Interval,
n_theta_front: Interval,
n_theta_back: Interval,
t_front: Interval,
t_back: Interval,
}
impl BlockQueue {
/// Creates an empty queue.
pub fn new(store_vars: VarSet) -> Self {
Self {
seq: VecDeque::new(),
store_vars,
begin_index: 0,
end_index: 0,
x_front: 0,
x_back: 0,
y_front: 0,
y_back: 0,
m_front: Interval::ENTIRE,
m_back: Interval::ENTIRE,
n_front: Interval::ENTIRE,
n_back: Interval::ENTIRE,
n_theta_front: Interval::ENTIRE,
n_theta_back: Interval::ENTIRE,
t_front: Interval::ENTIRE,
t_back: Interval::ENTIRE,
}
}
/// Returns the index of the first block in the queue.
///
/// Initially, the index is zero, and is incremented by and only by calling to [`Self::pop_front`].
/// Therefore, the index is tied to a block in the queue and never reused for another block.
///
/// You can obtain the index of the block right **after** it is returned by [`Self::pop_front`],
/// by `queue.begin_index() - 1`. Beware the off-by-one error.
pub fn begin_index(&self) -> usize {
self.begin_index
}
/// Returns the index of one past the last block in the queue.
///
/// Initially, the index is zero, and is incremented by and only by calling to [`Self::push_back`].
///
/// You can obtain the index of the block right **after** it is passed to [`Self::push_back`],
/// by `queue.end_index() - 1`. Beware the off-by-one error.
///
/// See also [`Self::begin_index`].
pub fn end_index(&self) -> usize {
self.end_index
}
/// Returns `true` if the queue is empty.
pub fn is_empty(&self) -> bool {
self.seq.is_empty()
}
/// Removes the first block from the queue and returns it.
/// [`None`] is returned if the queue is empty.
pub fn pop_front(&mut self) -> Option<Block> {
// This is (somehow) faster than Block::default().
let mut b = DEFAULT_BLOCK;
if self.store_vars.contains(VarSet::X) {
self.x_front ^= self.pop_small_u64()?;
b.x = Coordinate {
i: self.x_front,
k: self.pop_i8()?,
};
}
if self.store_vars.contains(VarSet::Y) {
self.y_front ^= self.pop_small_u64()?;
b.y = Coordinate {
i: self.y_front,
k: self.pop_i8()?,
};
}
if self.store_vars.contains(VarSet::M) {
self.m_front = self.pop_interval(self.m_front)?;
b.m = IntegerParameter(self.m_front)
}
if self.store_vars.contains(VarSet::N) {
self.n_front = self.pop_interval(self.n_front)?;
b.n = IntegerParameter(self.n_front);
}
if self.store_vars.contains(VarSet::N_THETA) {
self.n_theta_front = self.pop_interval(self.n_theta_front)?;
b.n_theta = IntegerParameter(self.n_theta_front);
}
if self.store_vars.contains(VarSet::T) {
self.t_front = self.pop_interval(self.t_front)?;
b.t = RealParameter(self.t_front);
}
b.next_dir_index = self.pop_u8()?;
self.begin_index += 1;
Some(b)
}
/// Appends the block to the back of the queue.
pub fn push_back(&mut self, b: Block) {
if self.store_vars.contains(VarSet::X) {
self.push_small_u64(b.x.i ^ self.x_back);
self.push_i8(b.x.k);
self.x_back = b.x.i;
}
if self.store_vars.contains(VarSet::Y) {
self.push_small_u64(b.y.i ^ self.y_back);
self.push_i8(b.y.k);
self.y_back = b.y.i;
}
if self.store_vars.contains(VarSet::M) {
let m = b.m.interval();
self.push_interval(m, self.m_back);
self.m_back = m;
}
if self.store_vars.contains(VarSet::N) {
let n = b.n.interval();
self.push_interval(n, self.n_back);
self.n_back = n;
}
if self.store_vars.contains(VarSet::N_THETA) {
let n_theta = b.n_theta.interval();
self.push_interval(n_theta, self.n_theta_back);
self.n_theta_back = n_theta;
}
if self.store_vars.contains(VarSet::T) {
let t = b.t.interval();
self.push_interval(t, self.t_back);
self.t_back = t;
}
self.push_u8(b.next_dir_index);
self.end_index += 1;
}
fn pop_i8(&mut self) -> Option<i8> {
Some(self.seq.pop_front()? as i8)
}
fn pop_interval(&mut self, front: Interval) -> Option<Interval> {
if self.seq.is_empty() {
return None;
}
let mut bytes = [0u8; 16];
for (src, dst) in self.seq.drain(..2).zip(bytes.iter_mut()) {
*dst = src;
}
if (bytes[0], bytes[1]) != (0xff, 0xff) {
for (src, dst) in self.seq.drain(..14).zip(bytes.iter_mut().skip(2)) {
*dst = src;
}
Some(Interval::try_from_be_bytes(bytes).unwrap())
} else {
Some(front)
}
}
// `u64` values up to 2^56 - 1 are encoded with PrefixVarint[1,2] so that smaller numbers take less space:
//
// Range `zeros` Encoded bytes in `seq`
// ------ ------- ------------------------------------------------------------
// 6 0 -- Bit place in the original number
// < 2^7 0 [0bxxxxxxx1]
// 5 0 13 6
// < 2^14 1 [0bxxxxxx10, 0byyyyyyyy]
// 4 0 12 5 20 13
// < 2^21 2 [0bxxxxx100, 0byyyyyyyy, 0byyyyyyyy]
// 3 0 11 4 19 12 27 20
// < 2^28 3 [0bxxxx1000, 0byyyyyyyy, 0byyyyyyyy, 0byyyyyyyy]
// 2 0 10 3 18 11 26 19 34 27
// < 2^35 4 [0bxxx10000, 0byyyyyyyy, 0byyyyyyyy, 0byyyyyyyy, 0byyyyyyyy]
// 1┐┌0 9 2 17 10 41 34
// < 2^42 5 [0bxx100000, 0byyyyyyyy, 0byyyyyyyy, «2 bytes», 0byyyyyyyy]
// 0 8 1 16 9 48 41
// < 2^49 6 [0bx1000000, 0byyyyyyyy, 0byyyyyyyy, «3 bytes», 0byyyyyyyy]
// 7 0 15 8 55 48
// < 2^56 7 [0b10000000, 0byyyyyyyy, 0byyyyyyyy, «4 bytes», 0byyyyyyyy]
// | -----------------------v----------------------
// | Padded zeros to the right, these bytes can be
// | interpreted as a `u64` value in little endian.
// The number of trailing zeros in the first byte.
//
// [1]: https://github.com/stoklund/varint#prefixvarint
// [2]: https://news.ycombinator.com/item?id=11263667
fn pop_small_u64(&mut self) -> Option<u64> {
let head = self.seq.pop_front()?;
let zeros = head.trailing_zeros();
let tail_len = zeros as usize;
let (tail1, tail2) = {
let (mut t1, mut t2) = self.seq.as_slices();
t1 = &t1[..tail_len.min(t1.len())];
t2 = &t2[..(tail_len - t1.len())];
(t1, t2)
};
// Shift twice to avoid overflow by `head >> 8`.
let x = ((head >> zeros) >> 1) as u64;
let y = {
let mut y = 0u64;
let y_ptr = &mut y as *mut u64 as *mut u8;
unsafe {
copy_nonoverlapping(tail1.as_ptr(), y_ptr, tail1.len());
copy_nonoverlapping(tail2.as_ptr(), y_ptr.add(tail1.len()), tail2.len());
}
y = u64::from_le(y);
y << (7 - zeros)
};
self.seq.drain(..tail_len);
Some(x | y)
}
fn pop_u8(&mut self) -> Option<u8> {
self.seq.pop_front()
}
fn push_i8(&mut self, x: i8) {
self.seq.push_back(x as u8);
}
fn push_interval(&mut self, x: Interval, back: Interval) {
if x != back {
self.seq.extend(x.to_be_bytes());
} else {
// A `f64` datum that starts with 0xffff is NaN, which never appears in interval bounds.
self.seq.extend([0xff, 0xff]);
}
}
fn push_small_u64(&mut self, x: u64) {
assert!(x <= 0xffffffffffffff);
let zeros = match x {
0..=0x7f => 0,
0x80..=0x3fff => 1,
0x4000..=0x1fffff => 2,
0x200000..=0xfffffff => 3,
0x10000000..=0x7ffffffff => 4,
0x800000000..=0x3ffffffffff => 5,
0x40000000000..=0x1ffffffffffff => 6,
0x2000000000000..=0xffffffffffffff => 7,
_ => unreachable!(),
};
self.seq.push_back((((x << 1) | 0x1) << zeros) as u8);
let y = x >> (7 - zeros);
let tail_len = zeros;
self.seq.extend(y.to_le_bytes()[..tail_len].iter());
}
fn push_u8(&mut self, x: u8) {
self.seq.push_back(x)
}
}
impl Extend<Block> for BlockQueue {
fn extend<T: IntoIterator<Item = Block>>(&mut self, iter: T) {
for b in iter {
self.push_back(b);
}
}
}
impl BytesAllocated for BlockQueue {
fn bytes_allocated(&self) -> usize {
self.seq.bytes_allocated()
}
}
#[cfg(test)]
mod tests {
use super::*;
use inari::const_interval;
#[test]
fn coordinate() {
let x = Coordinate::new(42, 3);
assert!(!x.is_subpixel());
assert!(x.is_superpixel());
assert_eq!(x.pixel(), Coordinate::new(336, 0));
assert_eq!(x.pixel_index(), 336);
assert_eq!(
x.subdivide(),
[Coordinate::new(84, 2), Coordinate::new(85, 2)]
);
assert_eq!(x.width(), 8);
assert_eq!(x.widthf(), 8.0);
let x = Coordinate::new(42, 0);
assert!(!x.is_subpixel());
assert!(!x.is_superpixel());
assert_eq!(x.pixel(), x);
assert_eq!(x.pixel_align(), 1);
assert_eq!(x.pixel_index(), 42);
assert_eq!(
x.subdivide(),
[Coordinate::new(84, -1), Coordinate::new(85, -1)]
);
assert_eq!(x.width(), 1);
assert_eq!(x.widthf(), 1.0);
let x = Coordinate::new(42, -3);
assert!(x.is_subpixel());
assert!(!x.is_superpixel());
assert_eq!(x.pixel(), Coordinate::new(5, 0));
assert_eq!(x.pixel_align(), 8);
assert_eq!(x.pixel_index(), 5);
assert_eq!(
x.subdivide(),
[Coordinate::new(84, -4), Coordinate::new(85, -4)]
);
assert_eq!(x.widthf(), 0.125);
}
#[test]
fn integer_parameter() {
fn test(x: Interval, ys: Vec<Interval>) {
let n = IntegerParameter::new(x);
assert_eq!(
n.subdivide1(),
ys.iter()
.copied()
.map(IntegerParameter::new)
.collect::<SmallVec<[_; 2]>>()
);
let n = IntegerParameter::new(-x);
assert_eq!(
n.subdivide1(),
ys.into_iter()
.map(|y| IntegerParameter::new(-y))
.rev()
.collect::<SmallVec<[_; 2]>>()
);
}
test(
Interval::ENTIRE,
vec![
const_interval!(f64::NEG_INFINITY, 0.0),
const_interval!(0.0, 0.0),
const_interval!(0.0, f64::INFINITY),
],
);
test(
const_interval!(0.0, f64::INFINITY),
vec![
const_interval!(1.0, 1.0),
const_interval!(1.0, f64::INFINITY),
],
);
test(
const_interval!(1.0, f64::INFINITY),
vec![
const_interval!(2.0, 2.0),
const_interval!(2.0, f64::INFINITY),
],
);
test(
const_interval!(2.0, f64::INFINITY),
vec![
const_interval!(3.0, 3.0),
const_interval!(4.0, 4.0),
const_interval!(4.0, f64::INFINITY),
],
);
test(
const_interval!(4.0, f64::INFINITY),
vec![
const_interval!(4.0, 8.0),
const_interval!(8.0, 8.0),
const_interval!(8.0, f64::INFINITY),
],
);
test(const_interval!(0.0, 2.0), vec![const_interval!(1.0, 1.0)]);
test(
const_interval!(0.0, 3.0),
vec![const_interval!(1.0, 1.0), const_interval!(2.0, 2.0)],
);
test(
const_interval!(0.0, 4.0),
vec![
const_interval!(1.0, 1.0),
const_interval!(2.0, 2.0),
const_interval!(3.0, 3.0),
],
);
}
#[test]
fn real_parameter() {
fn test(x: Interval, ys: Vec<Interval>) {
let n = RealParameter::new(x);
assert_eq!(
n.subdivide(),
ys.iter()
.copied()
.map(RealParameter::new)
.collect::<SmallVec<[_; 3]>>()
);
let n = RealParameter::new(-x);
assert_eq!(
n.subdivide(),
ys.into_iter()
.map(|y| RealParameter::new(-y))
.rev()
.collect::<SmallVec<[_; 3]>>()
);
}
test(
Interval::ENTIRE,
vec![
const_interval!(f64::NEG_INFINITY, 0.0),
const_interval!(0.0, f64::INFINITY),
],
);
test(
const_interval!(0.0, f64::INFINITY),
vec![
const_interval!(0.0, 1.0),
const_interval!(1.0, f64::INFINITY),
],
);
test(
const_interval!(2.0, f64::INFINITY),
vec![
const_interval!(2.0, 4.0),
const_interval!(4.0, f64::INFINITY),
],
);
test(
const_interval!(2.0, 3.0),
vec![const_interval!(2.0, 2.5), const_interval!(2.5, 3.0)],
);
}
#[test]
fn block_queue() {
let mut queue = BlockQueue::new(VarSet::X | VarSet::Y);
let blocks = [
Block {
x: Coordinate::new(0, -32),
y: Coordinate::new(0xffffffffffffff, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x7f, 64),
y: Coordinate::new(0x2000000000000, 127),
..Default::default()
},
Block {
x: Coordinate::new(0x80, 0),
y: Coordinate::new(0x1ffffffffffff, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x3fff, 0),
y: Coordinate::new(0x40000000000, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x4000, 0),
y: Coordinate::new(0x3ffffffffff, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x1fffff, 0),
y: Coordinate::new(0x800000000, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x200000, 0),
y: Coordinate::new(0x7ffffffff, 0),
..Default::default()
},
Block {
x: Coordinate::new(0xfffffff, 0),
y: Coordinate::new(0x10000000, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x10000000, 0),
y: Coordinate::new(0xfffffff, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x7ffffffff, 0),
y: Coordinate::new(0x200000, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x800000000, 0),
y: Coordinate::new(0x1fffff, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x3ffffffffff, 0),
y: Coordinate::new(0x4000, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x40000000000, 0),
y: Coordinate::new(0x3fff, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x1ffffffffffff, 0),
y: Coordinate::new(0x80, 0),
..Default::default()
},
Block {
x: Coordinate::new(0x2000000000000, 0),
y: Coordinate::new(0x7f, 0),
..Default::default()
},
Block {
x: Coordinate::new(0xffffffffffffff, 0),
y: Coordinate::new(0, 0),
..Default::default()
},
];
assert_eq!(queue.begin_index(), 0);
assert_eq!(queue.end_index(), 0);
for (i, b) in blocks.iter().cloned().enumerate() {
queue.push_back(b);
assert_eq!(queue.begin_index(), 0);
assert_eq!(queue.end_index(), i + 1);
}
for (i, b) in blocks.iter().cloned().enumerate() {
assert_eq!(queue.pop_front(), Some(b));
assert_eq!(queue.begin_index(), i + 1);
assert_eq!(queue.end_index(), blocks.len());
}
assert_eq!(queue.pop_front(), None);
assert_eq!(queue.begin_index(), blocks.len());
assert_eq!(queue.end_index(), blocks.len());
fn test(mut queue: BlockQueue, b: Block) {
queue.push_back(b.clone());
queue.push_back(b.clone());
assert_eq!(queue.pop_front(), Some(b.clone()));
assert_eq!(queue.pop_front(), Some(b));
}
test(
BlockQueue::new(VarSet::M),
Block {
m: IntegerParameter::new(const_interval!(-2.0, 3.0)),
..Default::default()
},
);
test(
BlockQueue::new(VarSet::N),
Block {
n: IntegerParameter::new(const_interval!(-2.0, 3.0)),
..Default::default()
},
);
test(
BlockQueue::new(VarSet::N_THETA),
Block {
n_theta: IntegerParameter::new(const_interval!(-2.0, 3.0)),
..Default::default()
},
);
test(
BlockQueue::new(VarSet::T),
Block {
t: RealParameter::new(const_interval!(-2.0, 3.0)),
..Default::default()
},
);
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/eval_cache.rs | rust/src/eval_cache.rs | use crate::{
eval_result::{EvalArgs, EvalExplicitResult, EvalParametricResult, EvalResult},
interval_set::TupperIntervalSet,
ops::{OptionalValueStore, StaticTerm, StaticTermKind, StoreIndex},
traits::BytesAllocated,
vars::VarSet,
};
use inari::Interval;
use std::{collections::HashMap, hash::Hash};
enum MultiKeyHashMap<K, V> {
Zero(Option<V>),
One(HashMap<[K; 1], V>),
Two(HashMap<[K; 2], V>),
Three(HashMap<[K; 3], V>),
Four(HashMap<[K; 4], V>),
Five(HashMap<[K; 5], V>),
Six(HashMap<[K; 6], V>),
}
impl<K, V> MultiKeyHashMap<K, V>
where
K: Copy + Eq + Hash,
{
fn new(n: usize) -> Self {
match n {
0 => MultiKeyHashMap::Zero(None),
1 => MultiKeyHashMap::One(HashMap::new()),
2 => MultiKeyHashMap::Two(HashMap::new()),
3 => MultiKeyHashMap::Three(HashMap::new()),
4 => MultiKeyHashMap::Four(HashMap::new()),
5 => MultiKeyHashMap::Five(HashMap::new()),
6 => MultiKeyHashMap::Six(HashMap::new()),
_ => panic!(),
}
}
fn get_or_insert_with<F>(&mut self, k: &[K], f: F) -> &V
where
F: FnOnce() -> V,
{
match self {
MultiKeyHashMap::Zero(m) => m.get_or_insert_with(f),
MultiKeyHashMap::One(m) => m.entry(k.try_into().unwrap()).or_insert_with(f),
MultiKeyHashMap::Two(m) => m.entry(k.try_into().unwrap()).or_insert_with(f),
MultiKeyHashMap::Three(m) => m.entry(k.try_into().unwrap()).or_insert_with(f),
MultiKeyHashMap::Four(m) => m.entry(k.try_into().unwrap()).or_insert_with(f),
MultiKeyHashMap::Five(m) => m.entry(k.try_into().unwrap()).or_insert_with(f),
MultiKeyHashMap::Six(m) => m.entry(k.try_into().unwrap()).or_insert_with(f),
}
}
}
impl<K, V> BytesAllocated for MultiKeyHashMap<K, V> {
fn bytes_allocated(&self) -> usize {
match self {
Self::Zero(_) => 0,
Self::One(m) => m.bytes_allocated(),
Self::Two(m) => m.bytes_allocated(),
Self::Three(m) => m.bytes_allocated(),
Self::Four(m) => m.bytes_allocated(),
Self::Five(m) => m.bytes_allocated(),
Self::Six(m) => m.bytes_allocated(),
}
}
}
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub enum EvalCacheLevel {
None,
Univariate,
Full,
}
pub struct FullCache<T: BytesAllocated> {
cache_level: EvalCacheLevel,
n_vars: usize,
c: MultiKeyHashMap<Interval, T>,
last_value: Option<T>,
bytes_allocated_by_values: usize,
}
impl<T: BytesAllocated> FullCache<T> {
fn new(cache_level: EvalCacheLevel, vars: VarSet) -> Self {
let n_vars = vars.len();
Self {
cache_level,
n_vars,
c: MultiKeyHashMap::new(n_vars),
last_value: None,
bytes_allocated_by_values: 0,
}
}
fn clear(&mut self) {
self.c = MultiKeyHashMap::new(self.n_vars);
self.bytes_allocated_by_values = 0;
}
pub fn get_or_insert_with<F>(&mut self, args: &EvalArgs, f: F) -> &T
where
F: FnOnce() -> T,
{
if self.cache_level == EvalCacheLevel::Full {
self.c.get_or_insert_with(args, || {
let v = f();
self.bytes_allocated_by_values += v.bytes_allocated();
v
})
} else {
self.last_value.insert(f())
}
}
}
impl<T: BytesAllocated> BytesAllocated for FullCache<T> {
fn bytes_allocated(&self) -> usize {
self.c.bytes_allocated() + self.bytes_allocated_by_values
}
}
fn maximal_term_indices(vars: VarSet, terms: &[StaticTerm]) -> Vec<StoreIndex> {
use StaticTermKind::*;
let mut is_maximal = vec![false; terms.len()];
for (i, term) in terms.iter().enumerate() {
if term.vars != vars {
continue;
}
let indices = match &term.kind {
Unary(_, k) => vec![*k],
Binary(_, k, l) => vec![*k, *l],
Ternary(_, k, l, m) => vec![*k, *l, *m],
Pown(k, _) => vec![*k],
Rootn(k, _) => vec![*k],
RankedMinMax(_, ks, l) => {
let mut indices = ks.clone();
indices.push(*l);
indices
}
Constant(_) | Var(_, _) => continue,
};
for index in indices {
is_maximal[index.get()] = false;
}
is_maximal[i] = true;
}
is_maximal
.into_iter()
.enumerate()
.filter_map(|(i, maximal)| {
if maximal {
Some(StoreIndex::new(i))
} else {
None
}
})
.collect::<Vec<_>>()
}
pub struct MaximalTermCache<const N: usize> {
arg_indices: [usize; N],
cache: HashMap<[Interval; N], Vec<Option<TupperIntervalSet>>>,
term_indices: Vec<StoreIndex>,
bytes_allocated_by_values: usize,
}
impl<const N: usize> MaximalTermCache<N> {
fn new(arg_indices: [usize; N], term_indices: Vec<StoreIndex>) -> Self {
Self {
arg_indices,
cache: HashMap::new(),
term_indices,
bytes_allocated_by_values: 0,
}
}
/// Clears the cache and releases the allocated memory.
fn clear(&mut self) {
self.cache = HashMap::new();
self.bytes_allocated_by_values = 0;
}
pub fn restore(&self, args: &EvalArgs, store: &mut OptionalValueStore<TupperIntervalSet>) {
let mut key = [Interval::EMPTY; N];
for (i, &arg_index) in self.arg_indices.iter().enumerate() {
key[i] = args[arg_index];
}
if let Some(vs) = self.cache.get(&key) {
for (i, v) in self.term_indices.iter().zip(vs.iter()) {
if let Some(v) = v {
store.insert(*i, v.clone());
}
}
}
}
pub fn store(&mut self, args: &EvalArgs, store: &OptionalValueStore<TupperIntervalSet>) {
let mut key = [Interval::EMPTY; N];
for (i, &arg_index) in self.arg_indices.iter().enumerate() {
key[i] = args[arg_index];
}
if let Some(vs) = self.cache.get_mut(&key) {
for (i, v) in self.term_indices.iter().zip(vs.iter_mut()) {
if v.is_none() {
*v = store.get(*i).cloned();
self.bytes_allocated_by_values +=
v.iter().map(|xs| xs.bytes_allocated()).sum::<usize>();
}
}
} else {
let mut vs = vec![None; self.term_indices.len()];
for (i, v) in self.term_indices.iter().zip(vs.iter_mut()) {
*v = store.get(*i).cloned();
self.bytes_allocated_by_values +=
v.iter().map(|xs| xs.bytes_allocated()).sum::<usize>();
}
self.bytes_allocated_by_values += vs.bytes_allocated();
self.cache.insert(key, vs);
}
}
}
impl<const N: usize> BytesAllocated for MaximalTermCache<N> {
fn bytes_allocated(&self) -> usize {
self.cache.bytes_allocated() + self.bytes_allocated_by_values
}
}
/// A cache for memoizing evaluation of a relation.
pub struct EvalCache<T: BytesAllocated> {
pub full: FullCache<T>,
pub univariate: Vec<MaximalTermCache<1>>,
level: EvalCacheLevel,
initialized: bool,
}
impl<T: BytesAllocated> EvalCache<T> {
pub fn new(cache_level: EvalCacheLevel, vars: VarSet) -> Self {
Self {
full: FullCache::new(cache_level, vars),
univariate: vec![],
level: cache_level,
initialized: false,
}
}
pub fn setup(&mut self, terms: &[StaticTerm], vars_ordered: &[VarSet]) {
if self.initialized || self.level < EvalCacheLevel::Univariate {
return;
}
let mut univariate_vars = vec![];
for term in terms {
if term.vars.len() == 1 {
univariate_vars.push(term.vars);
}
}
univariate_vars.sort();
univariate_vars.dedup();
self.univariate = univariate_vars
.into_iter()
.filter_map(|vars| {
let indices = maximal_term_indices(vars, terms);
if indices.is_empty() {
return None;
}
Some(MaximalTermCache::new(
[vars_ordered.iter().position(|&v| v == vars).unwrap()],
indices,
))
})
.collect();
self.initialized = true;
}
/// Clears the cache and releases allocated memory.
pub fn clear(&mut self) {
self.full.clear();
for cache in &mut self.univariate {
cache.clear();
}
}
}
impl<T: BytesAllocated> BytesAllocated for EvalCache<T> {
fn bytes_allocated(&self) -> usize {
self.full.bytes_allocated()
+ self
.univariate
.iter()
.map(|c| c.bytes_allocated())
.sum::<usize>()
}
}
pub type EvalExplicitCache = EvalCache<EvalExplicitResult>;
pub type EvalImplicitCache = EvalCache<EvalResult>;
pub type EvalParametricCache = EvalCache<EvalParametricResult>;
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/visit.rs | rust/src/visit.rs | use crate::{
ast::{BinaryOp, Expr, NaryOp, TernaryOp, UnaryOp, ValueType},
binary, bool_constant, constant,
context::{Context, Def, VarProps},
error,
interval_set::Site,
nary,
ops::{
FormIndex, RankedMinMaxOp, RelOp, ScalarBinaryOp, ScalarTernaryOp, ScalarUnaryOp,
StaticForm, StaticFormKind, StaticTerm, StaticTermKind, StoreIndex,
},
parse::parse_expr,
pown,
real::Real,
rootn, ternary, unary, uninit, var,
vars::{VarIndex, VarSet},
};
use inari::Decoration;
use rug::Rational;
use std::{
cmp::Ordering,
collections::{HashMap, HashSet},
hash::{Hash, Hasher},
iter,
marker::Sized,
mem::take,
ops::Deref,
};
/// A visitor that visits AST nodes in depth-first order.
pub trait Visit<'a>
where
Self: Sized,
{
fn visit_expr(&mut self, e: &'a Expr) {
traverse_expr(self, e);
}
}
#[allow(clippy::many_single_char_names)]
fn traverse_expr<'a, V: Visit<'a>>(v: &mut V, e: &'a Expr) {
match e {
unary!(_, x) | pown!(x, _) | rootn!(x, _) => v.visit_expr(x),
binary!(_, x, y) => {
v.visit_expr(x);
v.visit_expr(y);
}
ternary!(_, x, y, z) => {
v.visit_expr(x);
v.visit_expr(y);
v.visit_expr(z);
}
nary!(_, xs) => {
for x in xs {
v.visit_expr(x);
}
}
bool_constant!(_) | constant!(_) | var!(_) | error!() => (),
uninit!() => panic!(),
};
}
/// A visitor that visits AST nodes in depth-first order and possibly modifies them.
pub trait VisitMut
where
Self: Sized,
{
fn visit_expr_mut(&mut self, e: &mut Expr) {
traverse_expr_mut(self, e);
}
}
#[allow(clippy::many_single_char_names)]
fn traverse_expr_mut<V: VisitMut>(v: &mut V, e: &mut Expr) {
match e {
unary!(_, x) | pown!(x, _) | rootn!(x, _) => v.visit_expr_mut(x),
binary!(_, x, y) => {
v.visit_expr_mut(x);
v.visit_expr_mut(y);
}
ternary!(_, x, y, z) => {
v.visit_expr_mut(x);
v.visit_expr_mut(y);
v.visit_expr_mut(z);
}
nary!(_, xs) => {
for x in xs {
v.visit_expr_mut(x);
}
}
bool_constant!(_) | constant!(_) | var!(_) | error!() => (),
uninit!() => panic!(),
};
}
/// A possibly dangling reference to a value.
/// All operations except `from` and `clone` are **unsafe** despite not being marked as so.
struct UnsafeRef<T: Eq + Hash> {
ptr: *const T,
}
impl<T: Eq + Hash> UnsafeRef<T> {
fn from(x: &T) -> Self {
Self { ptr: x as *const T }
}
}
impl<T: Eq + Hash> Clone for UnsafeRef<T> {
fn clone(&self) -> Self {
*self
}
}
impl<T: Eq + Hash> Copy for UnsafeRef<T> {}
impl<T: Eq + Hash> Deref for UnsafeRef<T> {
type Target = T;
fn deref(&self) -> &Self::Target {
unsafe { &*self.ptr }
}
}
impl<T: Eq + Hash> PartialEq for UnsafeRef<T> {
fn eq(&self, rhs: &Self) -> bool {
unsafe { *self.ptr == *rhs.ptr }
}
}
impl<T: Eq + Hash> Eq for UnsafeRef<T> {}
impl<T: Eq + Hash> Hash for UnsafeRef<T> {
fn hash<H: Hasher>(&self, state: &mut H) {
unsafe { (*self.ptr).hash(state) }
}
}
/// Replaces the name of each [`ExprKind::Var`] that matches `params[i]` with `"#i"`.
///
/// [`ExprKind::Var`]: crate::ast::ExprKind::Var
pub struct Parametrize<'a> {
params: &'a [&'a str],
}
impl<'a> Parametrize<'a> {
pub fn new(params: &'a [&'a str]) -> Self {
Self { params }
}
}
impl<'a> VisitMut for Parametrize<'a> {
fn visit_expr_mut(&mut self, e: &mut Expr) {
traverse_expr_mut(self, e);
if let var!(x) = e {
if let Some(i) = self.params.iter().position(|p| p == x) {
*x = format!("#{}", i);
}
}
}
}
/// Replaces each [`ExprKind::Var`] with name `"#i"` with `args[i]`.
///
/// [`ExprKind::Var`]: crate::ast::ExprKind::Var
pub struct Substitute {
args: Vec<Expr>,
}
impl Substitute {
pub fn new(args: Vec<Expr>) -> Self {
Self { args }
}
}
impl VisitMut for Substitute {
fn visit_expr_mut(&mut self, e: &mut Expr) {
traverse_expr_mut(self, e);
if let var!(x) = e {
if let Some(x) = x.strip_prefix('#') {
if let Ok(i) = x.parse::<usize>() {
*e = self.args[i].clone()
}
}
}
}
}
pub struct ReplaceAll<Rule>
where
Rule: Fn(&Expr) -> Option<Expr>,
{
pub modified: bool,
rule: Rule,
}
impl<Rule> ReplaceAll<Rule>
where
Rule: Fn(&Expr) -> Option<Expr>,
{
pub fn new(rule: Rule) -> Self {
Self {
modified: false,
rule,
}
}
}
impl<Rule> VisitMut for ReplaceAll<Rule>
where
Rule: Fn(&Expr) -> Option<Expr>,
{
fn visit_expr_mut(&mut self, e: &mut Expr) {
traverse_expr_mut(self, e);
if let Some(replacement) = (self.rule)(e) {
*e = replacement;
self.modified = true;
}
}
}
/// Distributes [`UnaryOp::Not`] over [`NaryOp::AndN`] and [`NaryOp::OrN`],
/// and then eliminates double negations.
pub struct NormalizeNotExprs;
impl VisitMut for NormalizeNotExprs {
fn visit_expr_mut(&mut self, e: &mut Expr) {
use {NaryOp::*, UnaryOp::*};
let mut modified = false;
if let unary!(Not, x) = e {
match x {
unary!(Not, x) => {
// (Not (Not x)) → x
*e = take(x);
modified = true;
}
nary!(AndN, xs) => {
// (Not (AndN x1 x2 …)) → (OrN (Not x1) (Not x2) …)
*e = Expr::nary(OrN, xs.drain(..).map(|x| Expr::unary(Not, x)).collect());
modified = true;
}
nary!(OrN, xs) => {
// (Not (OrN x1 x2 …)) → (AndN (Not x1) (Not x2) …)
*e = Expr::nary(AndN, xs.drain(..).map(|x| Expr::unary(Not, x)).collect());
modified = true;
}
_ => (),
}
}
if modified {
// `e` has not been visited yet.
self.visit_expr_mut(e);
} else {
traverse_expr_mut(self, e);
}
}
}
/// Does the following tasks:
///
/// - Replace arithmetic expressions that contain [`UnaryOp::Neg`], [`UnaryOp::Sqrt`],
/// [`BinaryOp::Add`], [`BinaryOp::Div`], [`BinaryOp::Mul`] or [`BinaryOp::Sub`] with their equivalents
/// with [`BinaryOp::Pow`], [`NaryOp::Plus`] and [`NaryOp::Times`].
/// - Replace [`BinaryOp::And`], [`BinaryOp::Max`], [`BinaryOp::Min`], and [`BinaryOp::Or`]
/// with their n-ary counterparts.
/// - Do some ad-hoc transformations, mainly for demonstrational purposes.
pub struct PreTransform;
impl VisitMut for PreTransform {
fn visit_expr_mut(&mut self, e: &mut Expr) {
use {BinaryOp::*, NaryOp::*, UnaryOp::*};
traverse_expr_mut(self, e);
match e {
unary!(Neg, x) => {
// (Neg x) → (Times -1 x)
*e = Expr::nary(Times, vec![Expr::minus_one(), take(x)]);
}
unary!(Sqrt, x) => {
// (Sqrt x) → (Pow x 1/2)
*e = Expr::binary(Pow, take(x), Expr::one_half());
}
binary!(op @ (Add | And | Max | Min | Or | Mul), x, y) => {
let nary_op = match op {
Add => Plus,
And => AndN,
Max => MaxN,
Min => MinN,
Or => OrN,
Mul => Times,
_ => unreachable!(),
};
// (op x y) → (nary-op x y)
*e = Expr::nary(nary_op, vec![take(x), take(y)]);
}
binary!(Div, unary!(Sin, x), y) if x == y => {
// Ad-hoc.
// (Div (Sin x) x) → (Sinc (UndefAt0 x))
*e = Expr::unary(Sinc, Expr::unary(UndefAt0, take(x)));
}
binary!(Div, x, unary!(Sin, y)) if y == x => {
// Ad-hoc.
// (Div x (Sin x)) → (Pow (Sinc (UndefAt0 x)) -1)
*e = Expr::binary(
Pow,
Expr::unary(Sinc, Expr::unary(UndefAt0, take(x))),
Expr::minus_one(),
);
}
binary!(Div, x, y) => {
// (Div x y) → (Times x (Pow y -1))
*e = Expr::nary(
Times,
vec![take(x), Expr::binary(Pow, take(y), Expr::minus_one())],
);
}
binary!(Sub, x, y) => {
// (Sub x y) → (Plus x (Times -1 y))
*e = Expr::nary(
Plus,
vec![take(x), Expr::nary(Times, vec![Expr::minus_one(), take(y)])],
);
}
_ => (),
}
}
}
/// Precondition: [`PreTransform`] and then [`UpdateMetadata`] have been applied
/// to the expression and it has not been modified since then.
struct ExpandComplexFunctions {
ctx: Context,
binary_ops: HashMap<BinaryOp, Expr>,
unary_ops: HashMap<UnaryOp, Expr>,
}
impl ExpandComplexFunctions {
fn new() -> Self {
Self {
ctx: Context::new()
.def(
"a",
Def::var(
"a",
VarProps {
totally_defined: true,
ty: ValueType::Real,
..Default::default()
},
),
)
.def(
"b",
Def::var(
"b",
VarProps {
totally_defined: true,
ty: ValueType::Real,
..Default::default()
},
),
),
binary_ops: HashMap::new(),
unary_ops: HashMap::new(),
}
}
fn def_binary(&mut self, op: BinaryOp, body: &str) {
let e = self.make_def(&["a", "b", "x", "y"], body);
self.binary_ops.insert(op, e);
}
fn def_unary(&mut self, op: UnaryOp, body: &str) {
let e = self.make_def(&["x", "y"], body);
self.unary_ops.insert(op, e);
}
fn make_def(&mut self, params: &[&str], body: &str) -> Expr {
let mut e = parse_expr(body, &[Context::builtin(), &self.ctx]).unwrap();
PreTransform.visit_expr_mut(&mut e);
NormalizeRelationalExprs.visit_expr_mut(&mut e);
ExpandBoole.visit_expr_mut(&mut e);
UpdateMetadata.visit_expr_mut(&mut e);
self.visit_expr_mut(&mut e);
simplify(&mut e);
Parametrize::new(params).visit_expr_mut(&mut e);
e
}
}
impl Default for ExpandComplexFunctions {
fn default() -> Self {
use {BinaryOp::*, UnaryOp::*};
let mut v = Self::new();
// Some of the definitions may depend on previous ones.
v.def_unary(Abs, "sqrt(x^2 + y^2)");
v.def_unary(Cos, "cos(x) cosh(y) - i sin(x) sinh(y)");
v.def_unary(Cosh, "cosh(x) cos(y) + i sinh(x) sin(y)");
v.def_unary(Exp, "exp(x) cos(y) + i exp(x) sin(y)");
v.def_unary(Ln, "1/2 ln(x^2 + y^2) + i atan2(y, x)");
v.def_binary(
Pow,
"if(a = 0 ∧ b = 0 ∧ x > 0, 0, exp((x + i y) ln(a + i b)))",
);
v.def_unary(Recip, "x / (x^2 + y^2) - i y / (x^2 + y^2)");
v.def_unary(Sin, "sin(x) cosh(y) + i cos(x) sinh(y)");
v.def_unary(Sinh, "sinh(x) cos(y) + i cosh(x) sin(y)");
v.def_unary(Sqr, "x^2 - y^2 + 2 i x y");
v.def_unary(Tan, "sin(x + i y) / cos(x + i y)");
v.def_unary(Tanh, "sinh(x + i y) / cosh(x + i y)");
// http://functions.wolfram.com/01.13.02.0001.01
v.def_unary(Acos, "π/2 + i ln(i (x + i y) + sqrt(1 - (x + i y)^2))");
// http://functions.wolfram.com/01.26.02.0001.01
v.def_unary(
Acosh,
"ln((x + i y) + sqrt((x + i y) - 1) sqrt((x + i y) + 1))",
);
// http://functions.wolfram.com/01.12.02.0001.01
v.def_unary(Asin, "-i ln(i (x + i y) + sqrt(1 - (x + i y)^2))");
// http://functions.wolfram.com/01.25.02.0001.01
v.def_unary(Asinh, "ln((x + i y) + sqrt((x + i y)^2 + 1))");
// http://functions.wolfram.com/01.14.02.0001.01
v.def_unary(Atan, "i/2 (ln(1 - i (x + i y)) - ln(1 + i (x + i y)))");
// http://functions.wolfram.com/01.27.02.0001.01
v.def_unary(Atanh, "1/2 (ln(1 + (x + i y)) - ln(1 - (x + i y)))");
v.def_binary(Log, "ln(x + i y) / ln(a + i b)");
v
}
}
impl VisitMut for ExpandComplexFunctions {
fn visit_expr_mut(&mut self, e: &mut Expr) {
use {
BinaryOp::{Complex, *},
NaryOp::*,
TernaryOp::*,
UnaryOp::*,
ValueType::{Complex as ComplexT, *},
};
traverse_expr_mut(self, e);
match e {
unary!(Arg, binary!(Complex, x, y)) => {
*e = Expr::binary(Atan2, take(y), take(x));
}
unary!(Arg, x) => {
assert_eq!(x.ty, Real);
*e = Expr::binary(Atan2, Expr::zero(), take(x));
}
unary!(Conj, binary!(Complex, x, y)) => {
*e = Expr::binary(
Complex,
take(x),
Expr::nary(Times, vec![Expr::minus_one(), take(y)]),
);
}
unary!(Conj, x) => {
assert_eq!(x.ty, Real);
*e = take(x);
}
unary!(Im, binary!(Complex, x, y)) => {
*e = Expr::nary(
Plus,
vec![
// Keep `x` to preserve the domain.
Expr::nary(Times, vec![Expr::zero(), take(x)]),
take(y),
],
);
}
unary!(Im, x) => {
assert_eq!(x.ty, Real);
// Keep `x` to preserve the domain.
*e = Expr::nary(Times, vec![Expr::zero(), take(x)]);
}
unary!(Re, binary!(Complex, x, y)) => {
*e = Expr::nary(
Plus,
vec![
take(x),
// Keep `y` to preserve the domain.
Expr::nary(Times, vec![Expr::zero(), take(y)]),
],
);
}
unary!(Re, x) => {
assert_eq!(x.ty, Real);
*e = take(x);
}
unary!(Sign, binary!(Complex, x, y)) => {
// sgn(x + i y) = f(x, y) - f(-x, y) + i (f(y, x) - f(-y, x)), where f is `ReSignNonnegative`.
*e = Expr::binary(
Complex,
Expr::nary(
Plus,
vec![
Expr::binary(ReSignNonnegative, x.clone(), y.clone()),
Expr::nary(
Times,
vec![
Expr::minus_one(),
Expr::binary(
ReSignNonnegative,
Expr::nary(Times, vec![Expr::minus_one(), x.clone()]),
y.clone(),
),
],
),
],
),
Expr::nary(
Plus,
vec![
Expr::binary(ReSignNonnegative, y.clone(), x.clone()),
Expr::nary(
Times,
vec![
Expr::minus_one(),
Expr::binary(
ReSignNonnegative,
Expr::nary(Times, vec![Expr::minus_one(), take(y)]),
take(x),
),
],
),
],
),
);
}
unary!(Sign, x) => {
assert_eq!(x.ty, Real);
// sgn(x) = f(x, 0) - f(-x, 0).
*e = Expr::nary(
Plus,
vec![
Expr::binary(ReSignNonnegative, x.clone(), Expr::zero()),
Expr::nary(
Times,
vec![
Expr::minus_one(),
Expr::binary(
ReSignNonnegative,
Expr::nary(Times, vec![Expr::minus_one(), take(x)]),
Expr::zero(),
),
],
),
],
);
}
unary!(op @ (Sinc | UndefAt0 | Zeta), binary!(Complex, x, y)) => {
let re_op = match op {
Sinc => ReSinc,
UndefAt0 => ReUndefAt0,
Zeta => ReZeta,
_ => unreachable!(),
};
let im_op = match op {
Sinc => ImSinc,
UndefAt0 => ImUndefAt0,
Zeta => ImZeta,
_ => unreachable!(),
};
*e = Expr::binary(
Complex,
Expr::binary(re_op, x.clone(), y.clone()),
Expr::binary(im_op, take(x), take(y)),
);
}
unary!(op, binary!(Complex, x, y)) => {
if let Some(template) = self.unary_ops.get(op) {
let mut new_e = template.clone();
Substitute::new(vec![take(x), take(y)]).visit_expr_mut(&mut new_e);
*e = new_e;
}
}
binary!(Eq, x, y) if x.ty == ComplexT || y.ty == ComplexT => {
*e = match (x, y) {
(binary!(Complex, a, b), binary!(Complex, x, y)) => Expr::nary(
AndN,
vec![
Expr::binary(Eq, take(a), take(x)),
Expr::binary(Eq, take(b), take(y)),
],
),
(binary!(Complex, a, b), x) => Expr::nary(
AndN,
vec![
Expr::binary(Eq, take(a), take(x)),
Expr::binary(Eq, take(b), Expr::zero()),
],
),
(a, binary!(Complex, x, y)) => Expr::nary(
AndN,
vec![
Expr::binary(Eq, take(a), take(x)),
Expr::binary(Eq, Expr::zero(), take(y)),
],
),
_ => panic!(), // `x.ty` or `y.ty` is wrong.
};
}
binary!(Pow, binary!(Complex, x, y), constant!(a)) if a.to_f64() == Some(-1.0) => {
let mut new_e = self.unary_ops[&Recip].clone();
Substitute::new(vec![take(x), take(y)]).visit_expr_mut(&mut new_e);
*e = new_e;
}
binary!(Pow, binary!(Complex, x, y), constant!(a)) if a.to_f64() == Some(2.0) => {
let mut new_e = self.unary_ops[&Sqr].clone();
Substitute::new(vec![take(x), take(y)]).visit_expr_mut(&mut new_e);
*e = new_e;
}
binary!(op, x, y) if e.ty == ComplexT => {
if let Some(template) = self.binary_ops.get(op) {
let mut new_e = template.clone();
let mut subst = match (x, y) {
(binary!(Complex, a, b), binary!(Complex, x, y)) => {
Substitute::new(vec![take(a), take(b), take(x), take(y)])
}
(a, binary!(Complex, x, y)) => {
Substitute::new(vec![take(a), Expr::zero(), take(x), take(y)])
}
(binary!(Complex, a, b), x) => {
Substitute::new(vec![take(a), take(b), take(x), Expr::zero()])
}
_ => panic!(), // `e.ty` is wrong.
};
subst.visit_expr_mut(&mut new_e);
*e = new_e;
}
}
ternary!(IfThenElse, cond, t, f) if e.ty == ComplexT => {
*e = match (t, f) {
(binary!(Complex, a, b), binary!(Complex, x, y)) => Expr::binary(
Complex,
Expr::ternary(IfThenElse, cond.clone(), take(a), take(x)),
Expr::ternary(IfThenElse, take(cond), take(b), take(y)),
),
(binary!(Complex, a, b), x) => Expr::binary(
Complex,
Expr::ternary(IfThenElse, cond.clone(), take(a), take(x)),
Expr::ternary(IfThenElse, take(cond), take(b), Expr::zero()),
),
(a, binary!(Complex, x, y)) => Expr::binary(
Complex,
Expr::ternary(IfThenElse, cond.clone(), take(a), take(x)),
Expr::ternary(IfThenElse, take(cond), Expr::zero(), take(y)),
),
_ => panic!(), // `t.ty` or `f.ty` is wrong.
}
}
nary!(Plus, xs) if e.ty == ComplexT => {
let mut reals = vec![];
let mut imags = vec![];
for x in xs {
match x {
binary!(Complex, x, y) => {
reals.push(take(x));
imags.push(take(y));
}
_ => {
reals.push(take(x));
}
}
}
*e = Expr::binary(Complex, Expr::nary(Plus, reals), Expr::nary(Plus, imags));
}
nary!(Times, xs) if e.ty == ComplexT => {
let mut it = xs.drain(..);
let mut x = it.next().unwrap();
for mut y in it {
x = match (&mut x, &mut y) {
(binary!(Complex, a, b), binary!(Complex, x, y)) => Expr::binary(
Complex,
Expr::nary(
Plus,
vec![
Expr::nary(Times, vec![a.clone(), x.clone()]),
Expr::nary(
Times,
vec![Expr::minus_one(), b.clone(), y.clone()],
),
],
),
Expr::nary(
Plus,
vec![
Expr::nary(Times, vec![take(b), take(x)]),
Expr::nary(Times, vec![take(a), take(y)]),
],
),
),
(a, binary!(Complex, x, y)) | (binary!(Complex, x, y), a) => Expr::binary(
Complex,
Expr::nary(Times, vec![a.clone(), take(x)]),
Expr::nary(Times, vec![take(a), take(y)]),
),
_ => panic!(), // `e.ty` is wrong.
};
}
*e = x;
}
_ => return,
}
UpdateMetadata.visit_expr_mut(e);
}
}
/// Does the following tasks:
///
/// - Replaces [`BinaryOp::Ge`] and [`BinaryOp::Gt`] with [`BinaryOp::Le`] and [`BinaryOp::Lt`]
/// by flipping the signs of both sides of the inequalities.
/// - Transposes all terms to the left-hand sides of equations and inequalities,
/// leaving zeros on the right-hand sides.
pub struct NormalizeRelationalExprs;
impl VisitMut for NormalizeRelationalExprs {
fn visit_expr_mut(&mut self, e: &mut Expr) {
use {BinaryOp::*, NaryOp::*};
traverse_expr_mut(self, e);
match e {
binary!(op @ (Eq | Le | Lt), x, y) => {
// (op x y) → (op (Plus x (Times -1 y)) 0)
*e = Expr::binary(
*op,
Expr::nary(
Plus,
vec![take(x), Expr::nary(Times, vec![Expr::minus_one(), take(y)])],
),
Expr::zero(),
)
}
binary!(op @ (Ge | Gt), x, y) => {
// (op x y) → (inv-op (Plus y (Times -1 x)) 0)
let inv_op = match op {
Ge => Le,
Gt => Lt,
_ => unreachable!(),
};
*e = Expr::binary(
inv_op,
Expr::nary(
Plus,
vec![take(y), Expr::nary(Times, vec![Expr::minus_one(), take(x)])],
),
Expr::zero(),
)
}
_ => (),
}
}
}
/// Precondition: [`NormalizeRelationalExprs`] has been applied to the expression.
pub struct ExpandBoole;
impl VisitMut for ExpandBoole {
fn visit_expr_mut(&mut self, e: &mut Expr) {
use {BinaryOp::*, NaryOp::*, UnaryOp::*};
let mut modified = false;
if let unary!(Boole, x) = e {
match x {
bool_constant!(false) => {
*e = Expr::zero();
modified = true;
}
bool_constant!(true) => {
*e = Expr::one();
modified = true;
}
unary!(Not, x) => {
*e = Expr::nary(
Plus,
vec![
Expr::one(),
Expr::nary(Times, vec![Expr::minus_one(), Expr::unary(Boole, take(x))]),
],
);
modified = true;
}
binary!(op @ (Eq | Le | Lt), x, y) => {
assert!(matches!(y, constant!(a) if a.to_f64() == Some(0.0)));
let op = match op {
Eq => BooleEqZero,
Le => BooleLeZero,
Lt => BooleLtZero,
_ => unreachable!(),
};
*e = Expr::unary(op, take(x));
modified = true;
}
nary!(op @ (AndN | OrN), xs) => {
let arith_op = match op {
AndN => MinN,
OrN => MaxN,
_ => unreachable!(),
};
*e = Expr::nary(
arith_op,
xs.drain(..).map(|x| Expr::unary(Boole, x)).collect(),
);
modified = true;
}
_ => (),
}
}
if modified {
// `e` has not been visited yet.
self.visit_expr_mut(e);
} else {
traverse_expr_mut(self, e);
}
}
}
/// Transforms `M1 ... Mn = 0` into `M1' ... Mn' = 0`,
/// where `Mi = mod(xi, yi)` and `Mi' = mod(xi + yi / 2, yi) - yi / 2`.
///
/// Precondition: [`PreTransform`] and [`NormalizeRelationalExprs`] have been applied to the expression,
/// and then the expression has been simplified.
///
/// It might be better to also apply [`ExpandBoole`] before this one,
/// since transforming equations in the conditions of `if` expressions
/// will not likely to help improve quality of the output.
pub struct ModEqTransform;
impl VisitMut for ModEqTransform {
fn visit_expr_mut(&mut self, e: &mut Expr) {
use {BinaryOp::*, NaryOp::*};
traverse_expr_mut(self, e);
match e {
binary!(Eq, z, _) if matches!(z, binary!(Mod, _, _)) => {
if let binary!(Mod, x, y) = z {
*z = Expr::nary(
Plus,
vec![
Expr::binary(
Mod,
Expr::nary(
Plus,
vec![
take(x),
Expr::nary(Times, vec![Expr::one_half(), y.clone()]),
],
),
y.clone(),
),
Expr::nary(Times, vec![Expr::minus_one_half(), y.clone()]),
],
);
}
}
binary!(Eq, nary!(Times, zs), _)
if zs.iter().all(|z| matches!(z, binary!(Mod, _, _))) =>
{
for z in zs.iter_mut() {
if let binary!(Mod, x, y) = z {
*z = Expr::nary(
Plus,
vec![
Expr::binary(
Mod,
Expr::nary(
Plus,
vec![
take(x),
Expr::nary(Times, vec![Expr::one_half(), y.clone()]),
],
),
y.clone(),
),
Expr::nary(Times, vec![Expr::minus_one_half(), y.clone()]),
],
);
}
}
}
_ => (),
}
}
}
/// Flattens associative n-ary operations of the same kind.
///
/// If the expression has no arguments, it is replaced by the identity element of the operation.
/// If it has exactly one argument, it is replaced by that argument.
#[derive(Default)]
struct Flatten {
modified: bool,
}
impl VisitMut for Flatten {
fn visit_expr_mut(&mut self, e: &mut Expr) {
use NaryOp::*;
traverse_expr_mut(self, e);
if let nary!(op @ (AndN | MaxN | MinN | OrN | Plus | Times), xs) = e {
match &mut xs[..] {
[] => {
*e = match op {
AndN => Expr::bool_constant(true),
OrN => Expr::bool_constant(false),
Plus => Expr::zero(),
Times => Expr::one(),
MaxN | MinN => panic!(),
_ => unreachable!(),
};
self.modified = true;
}
[x] => {
*e = take(x);
self.modified = true;
}
_ => {
*xs = xs.drain(..).fold(vec![], |mut acc, x| {
match x {
nary!(opx, mut xs) if opx == *op => {
acc.append(&mut xs);
self.modified = true;
}
_ => acc.push(x),
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | true |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/rational_ops.rs | rust/src/rational_ops.rs | use gmp_mpfr_sys::{mpfr, mpfr::rnd_t};
use inari::{interval, Interval};
use rug::{Float, Rational};
pub fn cos_pi(x: Rational) -> Option<Rational> {
match modulo(2 * x, 4.into()) {
Some(r) if r == 0 => Some(1.into()),
Some(r) if r == 1 => Some(0.into()),
Some(r) if r == 2 => Some((-1).into()),
Some(r) if r == 3 => Some(0.into()),
_ => None,
}
}
pub fn div(x: Rational, y: Rational) -> Option<Rational> {
if y == 0 {
None
} else {
Some(x / y)
}
}
pub fn gcd(mut x: Rational, y: Rational) -> Option<Rational> {
let mut y = y.abs();
while y != 0 {
let rem = modulo(x, y.clone())?;
x = y;
y = rem;
}
Some(x.abs())
}
pub fn lcm(x: Rational, y: Rational) -> Option<Rational> {
if x == 0 && y == 0 {
Some(Rational::new())
} else {
let xy = Rational::from(&x * &y);
div(xy.abs(), gcd(x, y)?)
}
}
pub fn max(x: Rational, y: Rational) -> Option<Rational> {
if x < y {
Some(y)
} else {
Some(x)
}
}
pub fn min(x: Rational, y: Rational) -> Option<Rational> {
if x < y {
Some(x)
} else {
Some(y)
}
}
pub fn modulo(x: Rational, y: Rational) -> Option<Rational> {
if y == 0 {
None
} else {
Some(&x - &y * Rational::from(&x / &y).floor())
}
}
pub fn pow(x: Rational, y: Rational) -> Option<Rational> {
let xn = x.numer().to_i32()?;
let xd = x.denom().to_u32()?;
let yn = y.numer().to_i32()?;
let yd = y.denom().to_u32()?;
if yd == 1 {
// y ∈ ℤ.
if yn <= 0 && xn == 0 {
// y ≤ 0 ∧ x = 0.
None
} else if yn >= 0 {
// y ≥ 0.
let n = yn as u32;
let zn = xn.checked_pow(n)?;
let zd = xd.checked_pow(n)?;
Some((zn, zd).into())
} else {
// y < 0 ∧ x ≠ 0.
let n = -yn as u32;
let zn = xd.checked_pow(n)?;
let zd = xn.checked_pow(n)?;
Some((zn, zd).into())
}
} else {
// y ∉ ℤ.
if xn == 0 && yn > 0 {
// x = 0 ∧ y > 0.
Some(0.into())
} else {
// x ≠ 0 ∨ y ≤ 0.
None
}
}
}
pub fn pow_rational(x: Rational, y: Rational) -> Option<Rational> {
pow(x, y)
}
pub fn sin_pi(x: Rational) -> Option<Rational> {
match modulo(2 * x, 4.into()) {
Some(r) if r == 0 => Some(0.into()),
Some(r) if r == 1 => Some(1.into()),
Some(r) if r == 2 => Some(0.into()),
Some(r) if r == 3 => Some((-1).into()),
_ => None,
}
}
pub fn tan_pi(x: Rational) -> Option<Rational> {
match modulo(4 * x, 4.into()) {
Some(r) if r == 0 => Some(0.into()),
Some(r) if r == 1 => Some(1.into()),
Some(r) if r == 3 => Some((-1).into()),
_ => None,
}
}
// Based on `inari::parse::rational_to_f64`.
#[allow(clippy::many_single_char_names)]
pub fn to_interval(r: &Rational) -> Interval {
let mut f = Float::new(f64::MANTISSA_DIGITS);
unsafe {
let orig_emin = mpfr::get_emin();
let orig_emax = mpfr::get_emax();
mpfr::set_emin((f64::MIN_EXP - (f64::MANTISSA_DIGITS as i32) + 1).into());
mpfr::set_emax(f64::MAX_EXP.into());
let rnd = rnd_t::RNDD;
let t = mpfr::set_q(f.as_raw_mut(), r.as_raw(), rnd);
mpfr::subnormalize(f.as_raw_mut(), t, rnd);
let a = mpfr::get_d(f.as_raw(), rnd);
let rnd = rnd_t::RNDU;
let t = mpfr::set_q(f.as_raw_mut(), r.as_raw(), rnd);
mpfr::subnormalize(f.as_raw_mut(), t, rnd);
let b = mpfr::get_d(f.as_raw(), rnd);
mpfr::set_emin(orig_emin);
mpfr::set_emax(orig_emax);
interval!(a, b).unwrap()
}
}
#[cfg(test)]
mod tests {
use super::*;
macro_rules! r {
($i:literal) => {
Rational::from($i)
};
($p:literal / $q:literal) => {
Rational::from(($p, $q))
};
}
macro_rules! test {
($f:path, $x:expr, $y:expr) => {
assert_eq!($f($x), $y);
};
($f:path, $x:expr, $y:expr, $z:expr) => {
assert_eq!($f($x, $y), $z);
};
(@commut $(@$af:ident)* $f:path, $(@$ax:ident)* $x:expr, $(@$ay:ident)* $y:expr, $z:expr) => {
test!($(@$af)* $f, $(@$ax)* $x, $(@$ay)* $y, $z);
test!($(@$af)* $f, $(@$ax)* $y, $(@$ay)* $x, $z);
};
($(@$af:ident)* $f:path, @even $(@$ax:ident)* $x:expr, $(@$ay:ident)* $y:expr, $z:expr) => {
test!($(@$af)* $f, $(@$ax)* $x, $(@$ay)* $y, $z);
test!($(@$af)* $f, $(@$ax)* -$x, $(@$ay)* $y, $z);
};
($(@$af:ident)* $f:path, @odd $(@$ax:ident)* $x:expr, $(@$ay:ident)* $y:expr, $z:expr) => {
test!($(@$af)* $f, $(@$ax)* $x, $(@$ay)* $y, $z);
test!($(@$af)* $f, $(@$ax)* -$x, $(@$ay)* $y, $z.map(|z: Rational| -z));
};
($(@$af:ident)* $f:path, $(@$ax:ident)* $x:expr, @even $(@$ay:ident)* $y:expr, $z:expr) => {
test!($(@$af)* $f, $(@$ax)* $x, $(@$ay)* $y, $z);
test!($(@$af)* $f, $(@$ax)* $x, $(@$ay)* -$y, $z);
};
($(@$af:ident)* $f:path, $(@$ax:ident)* $x:expr, @odd $(@$ay:ident)* $y:expr, $z:expr) => {
test!($(@$af)* $f, $(@$ax)* $x, $(@$ay)* $y, $z);
test!($(@$af)* $f, $(@$ax)* $x, $(@$ay)* -$y, $z.map(|z: Rational| -z));
};
}
#[test]
fn cos_pi() {
use super::cos_pi;
test!(cos_pi, r!(-1), Some(r!(-1)));
test!(cos_pi, r!(-1 / 2), Some(r!(0)));
test!(cos_pi, r!(0), Some(r!(1)));
test!(cos_pi, r!(1 / 2), Some(r!(0)));
test!(cos_pi, r!(1), Some(r!(-1)));
test!(cos_pi, r!(3 / 2), Some(r!(0)));
test!(cos_pi, r!(2), Some(r!(1)));
test!(cos_pi, r!(5 / 2), Some(r!(0)));
test!(cos_pi, r!(3), Some(r!(-1)));
}
#[test]
fn div() {
use super::div;
test!(div, r!(0), r!(0), None);
test!(div, @odd r!(2 / 3), r!(0), None);
test!(div, @odd r!(2 / 3), @odd r!(4 / 5), Some(r!(5 / 6)));
}
#[test]
fn gcd() {
use super::gcd;
test!(gcd, r!(0), r!(0), Some(r!(0)));
test!(@commut gcd, @even r!(2 / 3), r!(0), Some(r!(2 / 3)));
test!(@commut gcd, @even r!(2 / 3), @even r!(4 / 5), Some(r!(2 / 15)));
}
#[test]
fn lcm() {
use super::lcm;
test!(lcm, r!(0), r!(0), Some(r!(0)));
test!(@commut lcm, @even r!(2 / 3), r!(0), Some(r!(0)));
test!(@commut lcm, @even r!(2 / 3), @even r!(4 / 5), Some(r!(4)));
}
#[test]
fn max() {
use super::max;
test!(@commut max, r!(2 / 3), r!(4 / 5), Some(r!(4 / 5)));
}
#[test]
fn min() {
use super::min;
test!(@commut min, r!(2 / 3), r!(4 / 5), Some(r!(2 / 3)));
}
#[test]
fn modulo() {
use super::modulo;
test!(modulo, r!(0), r!(0), None);
test!(modulo, r!(0), r!(4 / 5), Some(r!(0)));
test!(modulo, r!(0), r!(-4 / 5), Some(r!(0)));
test!(modulo, r!(2 / 3), r!(0), None);
test!(modulo, r!(2 / 3), r!(4 / 5), Some(r!(2 / 3)));
test!(modulo, r!(2 / 3), r!(-4 / 5), Some(r!(-2 / 15)));
test!(modulo, r!(4 / 5), r!(2 / 3), Some(r!(2 / 15)));
test!(modulo, r!(4 / 5), r!(-2 / 3), Some(r!(-8 / 15)));
test!(modulo, r!(-2 / 3), r!(0), None);
test!(modulo, r!(-2 / 3), r!(4 / 5), Some(r!(2 / 15)));
test!(modulo, r!(-2 / 3), r!(-4 / 5), Some(r!(-2 / 3)));
test!(modulo, r!(-4 / 5), r!(2 / 3), Some(r!(8 / 15)));
test!(modulo, r!(-4 / 5), r!(-2 / 3), Some(r!(-2 / 15)));
}
#[test]
fn pow_rational() {
use super::pow_rational;
test!(pow_rational, r!(0), r!(-4), None);
test!(pow_rational, r!(0), r!(-3), None);
test!(pow_rational, r!(0), r!(-4 / 5), None);
test!(pow_rational, r!(0), r!(-3 / 5), None);
test!(pow_rational, r!(0), r!(0), None);
test!(pow_rational, r!(0), r!(3 / 5), Some(r!(0)));
test!(pow_rational, r!(0), r!(4 / 5), Some(r!(0)));
test!(pow_rational, r!(0), r!(3), Some(r!(0)));
test!(pow_rational, r!(0), r!(4), Some(r!(0)));
test!(pow_rational, @even r!(2 / 3), r!(-4), Some(r!(81 / 16)));
test!(pow_rational, @odd r!(2 / 3), r!(-3), Some(r!(27 / 8)));
test!(pow_rational, @even r!(2 / 3), r!(-4 / 5), None);
test!(pow_rational, @odd r!(2 / 3), r!(-3 / 5), None);
test!(pow_rational, @even r!(2 / 3), r!(0), Some(r!(1)));
test!(pow_rational, @odd r!(2 / 3), r!(3 / 5), None);
test!(pow_rational, @even r!(2 / 3), r!(4 / 5), None);
test!(pow_rational, @odd r!(2 / 3), r!(3), Some(r!(8 / 27)));
test!(pow_rational, @even r!(2 / 3), r!(4), Some(r!(16 / 81)));
// The result is rational, but not computed.
test!(pow_rational, r!(1), r!(1 / 2), None);
}
#[test]
fn sin_pi() {
use super::sin_pi;
test!(sin_pi, r!(-1), Some(r!(0)));
test!(sin_pi, r!(-1 / 2), Some(r!(-1)));
test!(sin_pi, r!(0), Some(r!(0)));
test!(sin_pi, r!(1 / 2), Some(r!(1)));
test!(sin_pi, r!(1), Some(r!(0)));
test!(sin_pi, r!(3 / 2), Some(r!(-1)));
test!(sin_pi, r!(2), Some(r!(0)));
test!(sin_pi, r!(5 / 2), Some(r!(1)));
test!(sin_pi, r!(3), Some(r!(0)));
}
#[test]
fn tan_pi() {
use super::tan_pi;
test!(tan_pi, r!(-1 / 4), Some(r!(-1)));
test!(tan_pi, r!(0), Some(r!(0)));
test!(tan_pi, r!(1 / 4), Some(r!(1)));
test!(tan_pi, r!(3 / 4), Some(r!(-1)));
test!(tan_pi, r!(1), Some(r!(0)));
test!(tan_pi, r!(5 / 4), Some(r!(1)));
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/real.rs | rust/src/real.rs | use crate::{interval_set::TupperIntervalSet, rational_ops};
use inari::{const_interval, DecInterval, Decoration, Interval};
use rug::Rational;
use std::ops::{Add, Div, Mul, Neg, Sub};
#[derive(Clone, Copy, Debug, Eq, Hash, PartialEq)]
pub enum RealUnit {
One,
Pi,
}
/// Stores the value of an AST node of kind [`ExprKind::Constant`].
/// Values that are elements of ℚ ∪ π ℚ can be represented exactly.
///
/// [`ExprKind::Constant`]: crate::ast::ExprKind::Constant
#[derive(Clone, Debug, Eq, Hash, PartialEq)]
pub struct Real {
x: TupperIntervalSet,
q: Option<(Rational, RealUnit)>,
}
fn interval_set(x: Interval) -> TupperIntervalSet {
// Always decorate with `Decoration::Dac` to avoid that, for example,
// `Real::zero().floor().interval().decoration()` results in `Decoration::Com`,
// which should actually be `Decoration::Dac`.
TupperIntervalSet::from(DecInterval::set_dec(x, Decoration::Dac))
}
impl Real {
/// Returns a [`Real`] representing 0, exactly.
pub fn zero() -> Self {
Self {
x: interval_set(const_interval!(0.0, 0.0)),
q: Some((Rational::new(), RealUnit::One)),
}
}
/// Returns a [`Real`] representing 1, exactly.
pub fn one() -> Self {
Self {
x: interval_set(const_interval!(1.0, 1.0)),
q: Some((Rational::from(1), RealUnit::One)),
}
}
/// Returns a [`Real`] representing π, exactly.
pub fn pi() -> Self {
Self {
x: interval_set(Interval::PI),
q: Some((Rational::from(1), RealUnit::Pi)),
}
}
/// Returns a [`Real`] representing 2π, exactly.
pub fn tau() -> Self {
Self {
x: interval_set(Interval::TAU),
q: Some((Rational::from(2), RealUnit::Pi)),
}
}
/// Returns a [`Real`] representing an undefined value.
pub fn undefined() -> Self {
Self {
x: TupperIntervalSet::new(),
q: None,
}
}
/// Returns an enclosure of the value.
pub fn interval(&self) -> &TupperIntervalSet {
&self.x
}
/// Returns the value as [`Rational`] if the representation is exact.
pub fn rational(&self) -> Option<&Rational> {
match &self.q {
Some((q, RealUnit::One)) => Some(q),
Some((q, RealUnit::Pi)) if q.is_zero() => Some(q),
_ => None,
}
}
/// Returns the value as [`Rational`] multiple of π if the representation is exact.
pub fn rational_pi(&self) -> Option<&Rational> {
match &self.q {
Some((q, RealUnit::Pi)) => Some(q),
Some((q, RealUnit::One)) if q.is_zero() => Some(q),
_ => None,
}
}
/// Returns the value as [`Rational`] and its unit if the representation is exact.
pub fn rational_unit(&self) -> Option<(&Rational, RealUnit)> {
self.q.as_ref().map(|(q, unit)| (q, *unit))
}
/// Returns the value as [`f64`] if the representation is exact.
pub fn to_f64(&self) -> Option<f64> {
self.x.to_f64()
}
}
impl Default for Real {
fn default() -> Self {
Self::zero()
}
}
impl From<DecInterval> for Real {
fn from(x: DecInterval) -> Self {
let x = TupperIntervalSet::from(x);
Self::from(x)
}
}
impl From<Rational> for Real {
fn from(q: Rational) -> Self {
Self {
x: interval_set(rational_ops::to_interval(&q)),
q: Some((q, RealUnit::One)),
}
}
}
impl From<(Rational, RealUnit)> for Real {
fn from((q, unit): (Rational, RealUnit)) -> Self {
if q.is_zero() {
return Self::zero();
}
Self {
x: match unit {
RealUnit::One => interval_set(rational_ops::to_interval(&q)),
RealUnit::Pi => interval_set(rational_ops::to_interval(&q) * Interval::PI),
},
q: Some((q, unit)),
}
}
}
impl From<TupperIntervalSet> for Real {
fn from(x: TupperIntervalSet) -> Self {
let q = x
.to_f64()
.and_then(Rational::from_f64)
.map(|q| (q, RealUnit::One));
Self { x, q }
}
}
macro_rules! impl_op {
($op:ident($x:ident)) => {
impl_op!($op($x), $x.$op());
};
($op:ident($x:ident), $y:expr) => {
pub fn $op(self) -> Self {
let $x = self.x;
$y.into()
}
};
($op:ident($x:ident), $y:expr, $y_q:expr, $y_q_pi:expr) => {
pub fn $op(self) -> Self {
let y_q = match self.q {
Some(($x, RealUnit::One)) => $y_q.map(|q| (q, RealUnit::One)),
Some(($x, RealUnit::Pi)) => $y_q_pi.map(|q| (q, RealUnit::Pi)),
_ => None,
};
if let Some(y_q) = y_q {
y_q.into()
} else {
let $x = self.x;
$y.into()
}
}
};
($op:ident($x:ident, $y:ident)) => {
impl_op!($op($x, $y), $x.$op(&$y));
};
($op:ident($x:ident, $y:ident), $z:expr) => {
pub fn $op(self, rhs: Self) -> Self {
let $x = self.x;
let $y = rhs.x;
$z.into()
}
};
($op:ident($x:ident, $y:ident), $z:expr, $z_q:expr, $z_q_pi:expr) => {
pub fn $op(self, rhs: Self) -> Self {
let z_q = match (self.q, rhs.q) {
(Some(($x, RealUnit::One)), Some(($y, RealUnit::One))) => {
$z_q.map(|q| (q, RealUnit::One))
}
(Some(($x, RealUnit::Pi)), Some(($y, RealUnit::Pi))) => {
$z_q_pi.map(|q| (q, RealUnit::Pi))
}
_ => None,
};
if let Some(z_q) = z_q {
z_q.into()
} else {
let $x = self.x;
let $y = rhs.x;
$z.into()
}
}
};
($op:ident($x:ident, $y:ident, $z:ident)) => {
impl_op!($op($x, $y, $z), $x.$op(&$y, &$z));
};
($op:ident($x:ident, $y:ident, $z:ident), $result:expr) => {
pub fn $op(self, $y: Self, $z: Self) -> Self {
let $x = self.x;
let $y = $y.x;
let $z = $z.x;
$result.into()
}
};
}
#[allow(unused_variables)]
impl Real {
impl_op!(abs(x), x.abs(), Some(x.abs()), Some(x.abs()));
impl_op!(acos(x));
impl_op!(acosh(x));
pub fn add(self, rhs: Self) -> Self {
let z_q = match (self.q, rhs.q) {
(Some((x, x_unit)), Some((y, y_unit))) if x_unit == y_unit => Some((x + y, x_unit)),
(Some((x, _)), y_q) | (y_q, Some((x, _))) if x.is_zero() => y_q,
_ => None,
};
if let Some(z_q) = z_q {
z_q.into()
} else {
let x = self.x;
let y = rhs.x;
(&x + &y).into()
}
}
impl_op!(airy_ai(x));
impl_op!(airy_ai_prime(x));
impl_op!(airy_bi(x));
impl_op!(airy_bi_prime(x));
impl_op!(asin(x));
impl_op!(asinh(x));
impl_op!(atan(x));
impl_op!(atanh(x));
impl_op!(atan2(y, x), y.atan2(&x, None));
impl_op!(bessel_i(n, x));
impl_op!(bessel_j(n, x));
impl_op!(bessel_k(n, x));
impl_op!(bessel_y(n, x));
impl_op!(boole_eq_zero(x), x.boole_eq_zero(None));
impl_op!(boole_le_zero(x), x.boole_le_zero(None));
impl_op!(boole_lt_zero(x), x.boole_lt_zero(None));
impl_op!(ceil(x), x.ceil(None), Some(x.ceil()), None);
impl_op!(chi(x));
impl_op!(ci(x));
pub fn cos(self) -> Self {
let y_q = match self.q {
Some((x, RealUnit::Pi)) => rational_ops::cos_pi(x).map(|q| (q, RealUnit::One)),
_ => None,
};
if let Some(y_q) = y_q {
y_q.into()
} else {
let x = self.x;
x.cos().into()
}
}
impl_op!(cosh(x));
impl_op!(digamma(x), x.digamma(None));
pub fn div(self, rhs: Self) -> Self {
let z_q = match (self.q, rhs.q) {
(Some((x, x_unit)), Some((y, y_unit))) if x_unit == y_unit => {
rational_ops::div(x, y).map(|q| (q, RealUnit::One))
}
(Some((x, RealUnit::Pi)), Some((y, RealUnit::One))) => {
rational_ops::div(x, y).map(|q| (q, RealUnit::Pi))
}
_ => None,
};
if let Some(z_q) = z_q {
z_q.into()
} else {
let x = self.x;
let y = rhs.x;
x.div(&y, None).into()
}
}
impl_op!(ei(x));
impl_op!(elliptic_e(x));
impl_op!(elliptic_k(x));
impl_op!(erf(x));
impl_op!(erfc(x));
impl_op!(erfi(x));
impl_op!(exp(x));
impl_op!(floor(x), x.floor(None), Some(x.floor()), None);
impl_op!(fresnel_c(x));
impl_op!(fresnel_s(x));
impl_op!(gamma(x), x.gamma(None));
impl_op!(gamma_inc(a, x));
impl_op!(gcd(x, y), x.gcd(&y, None), rational_ops::gcd(x, y), None);
impl_op!(if_then_else(cond, t, f));
impl_op!(im_sinc(re_x, im_x));
impl_op!(im_undef_at_0(re_x, im_x));
impl_op!(im_zeta(re_x, im_x));
impl_op!(inverse_erf(x));
impl_op!(inverse_erfc(x));
impl_op!(lambert_w(k, x));
impl_op!(lcm(x, y), x.lcm(&y, None), rational_ops::lcm(x, y), None);
impl_op!(li(x));
impl_op!(ln(x));
impl_op!(ln_gamma(x));
impl_op!(log(x, b), x.log(&b, None));
impl_op!(
max(x, y),
x.max(&y),
rational_ops::max(x, y),
rational_ops::max(x, y)
);
impl_op!(
min(x, y),
x.min(&y),
rational_ops::min(x, y),
rational_ops::min(x, y)
);
impl_op!(
modulo(x, y),
x.modulo(&y, None),
rational_ops::modulo(x, y),
rational_ops::modulo(x, y)
);
pub fn mul(self, rhs: Self) -> Self {
let z_q = match (self.q, rhs.q) {
(Some((x, RealUnit::One)), Some((y, RealUnit::One))) => Some((x * y, RealUnit::One)),
(Some((x, RealUnit::One)), Some((y, RealUnit::Pi)))
| (Some((x, RealUnit::Pi)), Some((y, RealUnit::One))) => Some((x * y, RealUnit::Pi)),
_ => None,
};
if let Some(z_q) = z_q {
z_q.into()
} else {
let x = self.x;
let y = rhs.x;
(&x * &y).into()
}
}
impl_op!(neg(x), -&x, Some(-x), Some(-x));
impl_op!(pow(x, y), x.pow(&y, None), rational_ops::pow(x, y), None);
impl_op!(
pow_rational(x, y),
x.pow_rational(&y, None),
rational_ops::pow_rational(x, y),
None
);
pub fn ranked_max(xs: Vec<Real>, n: Real) -> Self {
TupperIntervalSet::ranked_max(xs.iter().map(|x| &x.x).collect(), &n.x, None).into()
}
pub fn ranked_min(xs: Vec<Real>, n: Real) -> Self {
TupperIntervalSet::ranked_min(xs.iter().map(|x| &x.x).collect(), &n.x, None).into()
}
impl_op!(re_sign_nonnegative(x, y), x.re_sign_nonnegative(&y, None));
impl_op!(re_sinc(re_x, im_x));
impl_op!(re_undef_at_0(re_x, im_x));
impl_op!(re_zeta(re_x, im_x));
impl_op!(shi(x));
impl_op!(si(x));
pub fn sin(self) -> Self {
let y_q = match self.q {
Some((x, RealUnit::Pi)) => rational_ops::sin_pi(x).map(|q| (q, RealUnit::One)),
_ => None,
};
if let Some(y_q) = y_q {
y_q.into()
} else {
let x = self.x;
x.sin().into()
}
}
impl_op!(sinc(x));
impl_op!(sinh(x));
pub fn sub(self, rhs: Self) -> Self {
let z_q = match (self.q, rhs.q) {
(Some((x, x_unit)), Some((y, y_unit))) if x_unit == y_unit => Some((x - y, x_unit)),
(Some((x, _)), Some((y, unit))) if x.is_zero() => Some((-y, unit)),
(x_q, Some((y, _))) if y.is_zero() => x_q,
_ => None,
};
if let Some(z_q) = z_q {
z_q.into()
} else {
let x = self.x;
let y = rhs.x;
(&x - &y).into()
}
}
pub fn tan(self) -> Self {
let y_q = match self.q {
Some((x, RealUnit::Pi)) => rational_ops::tan_pi(x).map(|q| (q, RealUnit::One)),
_ => None,
};
if let Some(y_q) = y_q {
y_q.into()
} else {
let x = self.x;
x.tan(None).into()
}
}
impl_op!(tanh(x));
impl_op!(undef_at_0(x));
impl_op!(zeta(x));
}
impl Neg for Real {
type Output = Self;
fn neg(self) -> Self::Output {
self.neg()
}
}
impl Add for Real {
type Output = Self;
fn add(self, rhs: Self) -> Self::Output {
self.add(rhs)
}
}
impl Sub for Real {
type Output = Self;
fn sub(self, rhs: Self) -> Self::Output {
self.sub(rhs)
}
}
impl Mul for Real {
type Output = Self;
fn mul(self, rhs: Self) -> Self::Output {
self.mul(rhs)
}
}
impl Div for Real {
type Output = Self;
fn div(self, rhs: Self) -> Self::Output {
self.div(rhs)
}
}
#[cfg(test)]
mod tests {
use super::*;
use inari::{const_dec_interval, const_interval};
#[test]
fn decoration() {
let x = Real::zero();
let y = x.floor();
// Without the treatment explained in `interval_set`,
// the decoration would be `Decoration::Com`, which is wrong.
assert_eq!(y.interval().decoration(), Decoration::Dac);
}
#[test]
fn from_dec_interval() {
let x = Real::from(const_dec_interval!(1.5, 1.5));
assert_eq!(x.rational(), Some(&(3, 2).into()));
assert_eq!(x.to_f64(), Some(1.5));
let x = Real::from(DecInterval::PI);
assert_eq!(x.rational(), None);
assert_eq!(x.to_f64(), None);
}
#[test]
fn from_rational() {
let x = Real::from(Rational::from((3, 2)));
assert_eq!(
*x.interval(),
TupperIntervalSet::from(DecInterval::set_dec(
const_interval!(1.5, 1.5),
Decoration::Dac
))
);
assert_eq!(x.to_f64(), Some(1.5));
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/eval_result.rs | rust/src/eval_result.rs | use crate::{
interval_set::{DecSignSet, SignSet, TupperIntervalSet},
ops::{StaticForm, StaticFormKind},
traits::BytesAllocated,
Ternary,
};
use core::ops::{BitAnd, BitAndAssign, BitOr, BitOrAssign, Not};
use inari::{Decoration, Interval};
use smallvec::SmallVec;
/// A sequence of evaluation results of atomic formulas.
#[derive(Clone, Debug)]
pub struct EvalResult(pub SmallVec<[DecSignSet; 8]>);
impl EvalResult {
/// Applies the given ternary-valued function on each result.
pub fn map<F, T>(&self, f: F) -> EvalResultMask
where
F: Fn(DecSignSet) -> T,
T: Into<Ternary>,
{
EvalResultMask(self.0.iter().map(|&x| f(x).into()).collect())
}
pub fn result(&self, forms: &[StaticForm]) -> Ternary {
self.result_mask().eval(forms)
}
pub fn result_mask(&self) -> EvalResultMask {
self.map(|DecSignSet(ss, d)| {
(
ss == SignSet::ZERO && d >= Decoration::Def,
ss.contains(SignSet::ZERO),
)
})
}
}
impl BytesAllocated for EvalResult {
fn bytes_allocated(&self) -> usize {
self.0.bytes_allocated()
}
}
/// A sequence of Boolean values assigned to atomic formulas.
#[derive(Clone, Debug)]
pub struct EvalResultMask(pub SmallVec<[Ternary; 8]>);
impl EvalResultMask {
/// Evaluates the last formula to a Boolean value.
pub fn eval(&self, forms: &[StaticForm]) -> Ternary {
Self::eval_impl(&self.0[..], forms, forms.len() - 1)
}
fn eval_impl(slf: &[Ternary], forms: &[StaticForm], i: usize) -> Ternary {
use StaticFormKind::*;
match &forms[i].kind {
Constant(a) => (*a).into(),
Atomic(_, _) => slf[i],
Not(x) => !Self::eval_impl(slf, forms, *x as usize),
And(x, y) => {
Self::eval_impl(slf, forms, *x as usize) & Self::eval_impl(slf, forms, *y as usize)
}
Or(x, y) => {
Self::eval_impl(slf, forms, *x as usize) | Self::eval_impl(slf, forms, *y as usize)
}
}
}
/// Returns `true` if the existence of a solution is concluded by the arguments.
/// See the actual usage for details.
pub fn solution_certainly_exists(
&self,
forms: &[StaticForm],
locally_zero_mask: &Self,
) -> bool {
Self::solution_certainly_exists_impl(
&self.0[..],
forms,
forms.len() - 1,
&locally_zero_mask.0[..],
)
.certainly_true()
}
fn solution_certainly_exists_impl(
slf: &[Ternary],
forms: &[StaticForm],
i: usize,
locally_zero_mask: &[Ternary],
) -> Ternary {
use StaticFormKind::*;
match &forms[i].kind {
Constant(a) => (*a).into(),
Atomic(_, _) => slf[i],
Not(x) => {
// `Not` must not be nested inside `And` since the result of the match arm for `And`
// uses `certainly_true()`, and negating it can lead to a wrong conclusion.
assert!(matches!(forms[*x as usize].kind, Atomic(_, _)));
!Self::solution_certainly_exists_impl(slf, forms, *x as usize, locally_zero_mask)
}
And(x, y) => {
if Self::eval_impl(locally_zero_mask, forms, *x as usize).certainly_true() {
Self::solution_certainly_exists_impl(slf, forms, *y as usize, locally_zero_mask)
} else if Self::eval_impl(locally_zero_mask, forms, *y as usize).certainly_true() {
Self::solution_certainly_exists_impl(slf, forms, *x as usize, locally_zero_mask)
} else {
// Cannot tell the existence of a solution by a normal conjunction.
Ternary::False
}
}
Or(x, y) => {
Self::solution_certainly_exists_impl(slf, forms, *x as usize, locally_zero_mask)
| Self::solution_certainly_exists_impl(
slf,
forms,
*y as usize,
locally_zero_mask,
)
}
}
}
}
impl BitAnd for &EvalResultMask {
type Output = EvalResultMask;
fn bitand(self, rhs: Self) -> Self::Output {
assert_eq!(self.0.len(), rhs.0.len());
EvalResultMask(
self.0
.iter()
.zip(rhs.0.iter())
.map(|(&x, &y)| x & y)
.collect(),
)
}
}
impl BitAndAssign for EvalResultMask {
fn bitand_assign(&mut self, rhs: Self) {
*self = self.bitand(&rhs)
}
}
impl BitOr for &EvalResultMask {
type Output = EvalResultMask;
fn bitor(self, rhs: Self) -> Self::Output {
assert_eq!(self.0.len(), rhs.0.len());
EvalResultMask(
self.0
.iter()
.zip(rhs.0.iter())
.map(|(&x, &y)| x | y)
.collect(),
)
}
}
impl BitOrAssign for EvalResultMask {
fn bitor_assign(&mut self, rhs: Self) {
*self = self.bitor(&rhs)
}
}
impl Not for EvalResultMask {
type Output = EvalResultMask;
fn not(self) -> Self::Output {
EvalResultMask(self.0.iter().map(|&x| !x).collect())
}
}
pub type EvalArgs = [Interval];
#[macro_export]
macro_rules! set_arg {
($args:expr, $opt_index:expr, $x:expr) => {
if let Some(i) = $opt_index {
$args[i as usize] = $x;
}
};
}
pub type EvalExplicitResult = TupperIntervalSet;
pub type EvalParametricResult = (TupperIntervalSet, TupperIntervalSet);
impl BytesAllocated for EvalParametricResult {
fn bytes_allocated(&self) -> usize {
self.0.bytes_allocated() + self.1.bytes_allocated()
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/traits.rs | rust/src/traits.rs | use std::{
collections::{HashMap, VecDeque},
mem::size_of,
};
pub trait BytesAllocated {
/// Returns the approximate amount of memory allocated by `self` in bytes.
fn bytes_allocated(&self) -> usize;
}
impl<K, V> BytesAllocated for HashMap<K, V> {
fn bytes_allocated(&self) -> usize {
self.capacity() * (size_of::<u64>() + size_of::<K>() + size_of::<V>())
}
}
impl<T> BytesAllocated for Vec<T> {
fn bytes_allocated(&self) -> usize {
self.capacity() * size_of::<T>()
}
}
impl<T> BytesAllocated for VecDeque<T> {
fn bytes_allocated(&self) -> usize {
self.capacity() * size_of::<T>()
}
}
impl<T, const N: usize> BytesAllocated for smallvec::SmallVec<[T; N]>
where
[T; N]: smallvec::Array,
{
fn bytes_allocated(&self) -> usize {
if self.spilled() {
self.capacity() * size_of::<T>()
} else {
0
}
}
}
pub trait Single: Iterator {
fn single(self) -> Option<Self::Item>;
}
impl<I: Iterator> Single for I {
fn single(mut self) -> Option<Self::Item> {
match (self.next(), self.next()) {
(Some(item), None) => Some(item),
_ => None,
}
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/arb.rs | rust/src/arb.rs | use graphest_flint_sys::*;
use inari::{interval, Interval};
use std::{mem::MaybeUninit, ops::Drop};
// Notes:
//
// - We always need to pass Arb pointers as `*_ptr` to Arb functions even if they expect `*_srcptr`,
// due to: https://github.com/rust-lang/rust-bindgen/issues/1962
//
// - Arb is thread-safe, thus we implement `Send` and `Sync` for Arb types.
// https://arblib.org/issues.html#thread-safety-and-caches
/// The precision of the `mag_t` type.
const MAG_BITS: u32 = 30;
/// Constants that correspond to the values of `arf_rnd_t`.
#[derive(Clone, Copy, Debug)]
enum ArfRound {
// Down = 0,
// Up = 1,
Floor = 2,
Ceil = 3,
// Near = 4,
}
/// A wrapper for the `arf_t` type.
struct Arf(arf_struct);
impl Arf {
/// Creates an `Arf` value initialized to be zero.
pub fn new() -> Self {
unsafe {
let mut x = MaybeUninit::uninit();
arf_init(x.as_mut_ptr());
Self(x.assume_init())
}
}
/// Returns an unsafe mutable pointer to the underlying `arf_t`.
pub fn as_mut_ptr(&mut self) -> arf_ptr {
&mut self.0
}
/// Returns an unsafe pointer to the underlying `arf_t`.
pub fn as_ptr(&self) -> arf_srcptr {
&self.0
}
/// Rounds `self` to a [`f64`] number using the given rounding mode.
pub fn to_f64_round(&self, round: ArfRound) -> f64 {
unsafe { arf_get_d(self.as_ptr() as arf_ptr, round as arf_rnd_t) }
}
}
impl Drop for Arf {
fn drop(&mut self) {
unsafe {
arf_clear(self.as_mut_ptr());
}
}
}
unsafe impl Send for Arf {}
unsafe impl Sync for Arf {}
/// A wrapper for the `arb_t` type.
pub struct Arb(arb_struct);
impl Arb {
/// Creates an `Arb` interval initialized to be `[0 ± 0]`.
pub fn new() -> Self {
unsafe {
let mut x = MaybeUninit::uninit();
arb_init(x.as_mut_ptr());
Self(x.assume_init())
}
}
/// Returns an unsafe mutable pointer to the underlying `arb_t`.
pub fn as_mut_ptr(&mut self) -> arb_ptr {
&mut self.0
}
/// Returns an unsafe pointer to the underlying `arb_t`.
pub fn as_ptr(&self) -> arb_srcptr {
&self.0
}
/// Creates an `Arb` interval that encloses `x`.
pub fn from_interval(x: Interval) -> Self {
let mut y = Self::new();
if x.is_singleton() {
unsafe {
arb_set_d(y.as_mut_ptr(), x.inf());
}
} else if !x.is_common_interval() {
unsafe {
arb_zero_pm_inf(y.as_mut_ptr());
}
} else {
// Construct an `Arb` interval faster and more precisely than
// using `arb_set_interval_arf`.
let mid = x.mid();
unsafe {
arf_set_d(&mut y.0.mid, mid);
}
let rad = x.rad();
if rad != 0.0 {
let (man, mut exp) = frexp(rad);
let mut man = (man * (1 << MAG_BITS) as f64).ceil() as u32;
if man == 1 << MAG_BITS {
// Restrict the mantissa within 30 bits:
// 100...000 ≤ `man` ≤ 111...111 (30 1's).
man = 1 << (MAG_BITS - 1);
exp += 1;
}
// For safer construction, see `mag_set_ui_2exp_si`.
// https://github.com/fredrik-johansson/arb/blob/master/mag/set_ui_2exp_si.c
y.0.rad.exp = exp.into();
y.0.rad.man = man.into();
}
}
y
}
/// Returns the lower bound of the interval.
///
/// This is faster than `self.to_interval().sup()` if you don't need the upper bound.
pub fn inf(&self) -> f64 {
let mut x = Arf::new();
unsafe {
arb_get_lbound_arf(
x.as_mut_ptr(),
self.as_ptr() as arb_ptr,
f64::MANTISSA_DIGITS.into(),
);
}
let x = x.to_f64_round(ArfRound::Floor);
if x.is_nan() {
f64::NEG_INFINITY
} else {
x
}
}
/// Returns the upper bound of the interval.
///
/// This is faster than `self.to_interval().sup()` if you don't need the lower bound.
pub fn sup(&self) -> f64 {
let mut x = Arf::new();
unsafe {
arb_get_ubound_arf(
x.as_mut_ptr(),
self.as_ptr() as arb_ptr,
f64::MANTISSA_DIGITS.into(),
);
}
let x = x.to_f64_round(ArfRound::Ceil);
if x.is_nan() {
f64::INFINITY
} else {
x
}
}
/// Returns an [`Interval`] that encloses `self`.
pub fn to_interval(&self) -> Interval {
let mut a = Arf::new();
let mut b = Arf::new();
unsafe {
arb_get_interval_arf(
a.as_mut_ptr(),
b.as_mut_ptr(),
self.as_ptr() as arb_ptr,
f64::MANTISSA_DIGITS.into(),
);
}
interval!(
a.to_f64_round(ArfRound::Floor),
b.to_f64_round(ArfRound::Ceil)
)
.unwrap_or(Interval::ENTIRE) // [+∞ ± c], [-∞ ± c] or [NaN ± c]
}
}
impl Drop for Arb {
fn drop(&mut self) {
unsafe {
arb_clear(self.as_mut_ptr());
}
}
}
unsafe impl Send for Arb {}
unsafe impl Sync for Arb {}
/// A wrapper for the `acb_t` type.
pub struct Acb(acb_struct);
impl Acb {
/// Creates an `Acb` interval initialized to be `[0 ± 0] + [0 ± 0]i`.
pub fn new() -> Self {
unsafe {
let mut x = MaybeUninit::uninit();
acb_init(x.as_mut_ptr());
Self(x.assume_init())
}
}
/// Creates an `Acb` interval from the real and imaginary parts.
pub fn from_parts(mut re: Arb, mut im: Arb) -> Self {
let mut z = Acb::new();
unsafe {
acb_set_arb_arb(z.as_mut_ptr(), re.as_mut_ptr(), im.as_mut_ptr());
}
z
}
/// Returns an unsafe mutable pointer to the underlying `acb_t`.
pub fn as_mut_ptr(&mut self) -> acb_ptr {
&mut self.0
}
/// Returns an unsafe pointer to the underlying `acb_t`.
pub fn as_ptr(&self) -> acb_srcptr {
&self.0
}
/// Returns the real part of `self`.
pub fn real(&self) -> Arb {
let mut x = Arb::new();
unsafe {
acb_get_real(x.as_mut_ptr(), self.as_ptr() as acb_ptr);
}
x
}
/// Returns the imaginary part of `self`.
pub fn imag(&self) -> Arb {
let mut x = Arb::new();
unsafe {
acb_get_imag(x.as_mut_ptr(), self.as_ptr() as acb_ptr);
}
x
}
}
impl Drop for Acb {
fn drop(&mut self) {
unsafe {
acb_clear(self.as_mut_ptr());
}
}
}
impl From<Arb> for Acb {
fn from(mut x: Arb) -> Self {
let mut z = Acb::new();
unsafe {
acb_set_arb(z.as_mut_ptr(), x.as_mut_ptr());
}
z
}
}
unsafe impl Send for Acb {}
unsafe impl Sync for Acb {}
// A copy-paste of https://github.com/rust-lang/libm/blob/master/src/math/frexp.rs
fn frexp(x: f64) -> (f64, i32) {
let mut y = x.to_bits();
let ee = ((y >> 52) & 0x7ff) as i32;
if ee == 0 {
if x != 0.0 {
let x1p64 = f64::from_bits(0x43f0000000000000);
let (x, e) = frexp(x * x1p64);
return (x, e - 64);
}
return (x, 0);
} else if ee == 0x7ff {
return (x, 0);
}
let e = ee - 0x3fe;
y &= 0x800fffffffffffff;
y |= 0x3fe0000000000000;
(f64::from_bits(y), e)
}
#[cfg(test)]
mod tests {
use super::*;
use inari::{const_interval, Interval};
#[test]
fn inclusion_property() {
let xs = [
Interval::EMPTY,
const_interval!(0.0, 0.0),
const_interval!(1.0, 1.0),
Interval::PI,
const_interval!(0.0, f64::INFINITY),
const_interval!(f64::NEG_INFINITY, 0.0),
Interval::ENTIRE,
// The case where rounding up the interval radius (`mag_t`) produces a carry:
// As opposed to `f64`, the hidden bit is not used in the mantissa of a `mag_t`.
// a = 0.0₂
// b = 0.111...111 1₂ × 2^1
// ^^^^^^^^^^^ 31 1's (1-bit larger than what can fit in the mantissa)
// (b - a)/2 = 0.111...111 1₂ × 2^0
// rad = 1.000...00₂ × 2^0 <- Round the mantissa of (b - a)/2 up to
// = 0.100...000₂ × 2^1 the nearest 30-bit number. (produces a carry)
// ^^^^^^^^^ the mantissa of a `mag_t` (30-bit)
const_interval!(0.0, 1.9999999990686774),
];
for x in xs {
let x_arb = Arb::from_interval(x);
let a = x_arb.inf();
let b = x_arb.sup();
let y = x_arb.to_interval();
assert_eq!(a, y.inf());
assert_eq!(b, y.sup());
assert!(x.subset(y));
}
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/context.rs | rust/src/context.rs | use crate::{
ast::{BinaryOp, Expr, TernaryOp, UnaryOp, ValueType},
real::Real,
vars::VarSet,
visit::{Substitute, VisitMut},
};
use inari::{const_dec_interval, DecInterval};
use nom::{Compare, CompareResult, Input, Needed, Offset};
use std::{
collections::HashMap,
ops::Range,
str::{CharIndices, Chars},
sync::LazyLock,
};
/// A definition of a constant or a function in terms of the AST.
#[derive(Clone, Debug)]
pub enum Def {
Constant {
body: Expr,
},
Function {
arity: usize,
body: Expr,
left_associative: bool,
},
}
#[derive(Clone, Debug)]
pub struct VarProps {
pub totally_defined: bool,
pub ty: ValueType,
pub vars: VarSet,
}
impl Default for VarProps {
fn default() -> Self {
Self {
totally_defined: false,
ty: ValueType::Unknown,
vars: VarSet::EMPTY,
}
}
}
impl Def {
/// Creates a definition of a Boolean constant.
fn bool_constant(x: bool) -> Self {
Self::Constant {
body: Expr::bool_constant(x),
}
}
/// Creates a definition of a real constant.
fn constant(x: Real) -> Self {
Self::Constant {
body: Expr::constant(x),
}
}
/// Creates a definition of a variable.
pub fn var(name: &str, props: VarProps) -> Self {
let mut e = Expr::var(name);
e.totally_defined = props.totally_defined;
e.ty = props.ty;
e.vars = props.vars;
Self::Constant { body: e }
}
/// Creates a definition of a unary function.
fn unary(op: UnaryOp) -> Self {
Self::Function {
arity: 1,
body: Expr::unary(op, Expr::var("#0")),
left_associative: false,
}
}
/// Creates a definition of a binary function.
fn binary(op: BinaryOp) -> Self {
Self::Function {
arity: 2,
body: Expr::binary(op, Expr::var("#0"), Expr::var("#1")),
left_associative: false,
}
}
/// Marks the binary function as left-associative and returns `self`.
///
/// Panics if `self` is not a function of arity 2.
fn left_associative(mut self) -> Self {
match self {
Def::Function {
arity: 2,
ref mut left_associative,
..
} => {
*left_associative = true;
}
_ => panic!(),
}
self
}
/// Applies the function to the arguments.
///
/// Panics if `self` is not a function or the number of arguments does not match the arity.
fn apply(&self, args: Vec<Expr>) -> Expr {
match self {
Def::Function { arity, body, .. } => {
assert!(*arity == args.len());
let mut t = body.clone();
Substitute::new(args).visit_expr_mut(&mut t);
t
}
_ => panic!(),
}
}
}
/// A set of definitions of constants and functions.
#[derive(Clone, Debug)]
pub struct Context {
defs: HashMap<String, Vec<Def>>,
}
impl Context {
/// Creates an empty context.
pub fn new() -> Self {
Self {
defs: HashMap::new(),
}
}
/// Appends a definition to the context and returns `self`.
///
/// Panics if there is already a definition that conflicts with the new one.
pub fn def(mut self, name: &str, def: Def) -> Self {
if let Some(defs) = self.defs.get_mut(name) {
for d in defs.iter() {
match (&def, d) {
(Def::Constant { .. }, _) | (_, Def::Constant { .. }) => panic!(),
(Def::Function { arity: a1, .. }, Def::Function { arity: a2, .. })
if a1 == a2 =>
{
panic!()
}
_ => (),
}
}
defs.push(def);
} else {
self.defs.insert(name.into(), vec![def]);
}
self
}
}
/// The context that is used when parsing relations.
static BUILTIN_CONTEXT: LazyLock<Context> = LazyLock::new(|| {
use {BinaryOp::*, TernaryOp::*, UnaryOp::*};
const EULER_GAMMA: DecInterval = const_dec_interval!(0.5772156649015328, 0.5772156649015329);
Context::new()
.def("false", Def::bool_constant(false))
.def("true", Def::bool_constant(true))
.def("e", Def::constant(DecInterval::E.into()))
.def("gamma", Def::constant(EULER_GAMMA.into()))
.def("γ", Def::constant(EULER_GAMMA.into()))
.def("pi", Def::constant(Real::pi()))
.def("π", Def::constant(Real::pi()))
.def(
"i",
Def::Constant {
body: Expr::binary(Complex, Expr::zero(), Expr::one()),
},
)
.def(
"m",
Def::var(
"m",
VarProps {
totally_defined: true,
ty: ValueType::Real,
vars: VarSet::M,
},
),
)
.def(
"n",
Def::var(
"n",
VarProps {
totally_defined: true,
ty: ValueType::Real,
vars: VarSet::N,
},
),
)
.def(
"r",
Def::var(
"r",
VarProps {
totally_defined: true,
ty: ValueType::Real,
vars: VarSet::X | VarSet::Y,
},
),
)
.def(
"t",
Def::var(
"t",
VarProps {
totally_defined: true,
ty: ValueType::Real,
vars: VarSet::T,
},
),
)
// `theta` will be expanded to an expression that contains [`BinaryOp::Atan2`],
// which is not totally defined.
.def(
"theta",
Def::var(
"theta",
VarProps {
totally_defined: false,
ty: ValueType::Real,
vars: VarSet::X | VarSet::Y | VarSet::N_THETA,
},
),
)
.def(
"θ",
Def::var(
"theta",
VarProps {
totally_defined: false,
ty: ValueType::Real,
vars: VarSet::X | VarSet::Y | VarSet::N_THETA,
},
),
)
.def(
"x",
Def::var(
"x",
VarProps {
totally_defined: true,
ty: ValueType::Real,
vars: VarSet::X,
},
),
)
.def(
"y",
Def::var(
"y",
VarProps {
totally_defined: true,
ty: ValueType::Real,
vars: VarSet::Y,
},
),
)
.def(
"<n-theta>",
Def::var(
"<n-theta>",
VarProps {
totally_defined: true,
ty: ValueType::Real,
vars: VarSet::N_THETA,
},
),
)
.def("abs", Def::unary(Abs))
.def("acos", Def::unary(Acos))
.def("acosh", Def::unary(Acosh))
.def("Ai", Def::unary(AiryAi))
.def("Ai'", Def::unary(AiryAiPrime))
.def("Bi", Def::unary(AiryBi))
.def("Bi'", Def::unary(AiryBiPrime))
.def("arg", Def::unary(Arg))
.def("asin", Def::unary(Asin))
.def("asinh", Def::unary(Asinh))
.def("atan", Def::unary(Atan))
.def("atanh", Def::unary(Atanh))
.def("ceil", Def::unary(Ceil))
.def("Chi", Def::unary(Chi))
.def("Ci", Def::unary(Ci))
.def("~", Def::unary(Conj))
.def("cos", Def::unary(Cos))
.def("cosh", Def::unary(Cosh))
.def("psi", Def::unary(Digamma))
.def("ψ", Def::unary(Digamma))
.def("Ei", Def::unary(Ei))
.def("E", Def::unary(EllipticE))
.def("K", Def::unary(EllipticK))
.def("erf", Def::unary(Erf))
.def("erfc", Def::unary(Erfc))
.def("erfi", Def::unary(Erfi))
.def("exp", Def::unary(Exp))
.def("floor", Def::unary(Floor))
.def("C", Def::unary(FresnelC))
.def("S", Def::unary(FresnelS))
.def("Gamma", Def::unary(Gamma))
.def("Γ", Def::unary(Gamma))
.def("Im", Def::unary(Im))
.def("erfinv", Def::unary(InverseErf))
.def("erfcinv", Def::unary(InverseErfc))
.def("li", Def::unary(Li))
.def("ln", Def::unary(Ln))
.def("lnGamma", Def::unary(LnGamma))
.def("lnΓ", Def::unary(LnGamma))
.def("-", Def::unary(Neg))
.def("!", Def::unary(Not))
.def("Re", Def::unary(Re))
.def("Shi", Def::unary(Shi))
.def("Si", Def::unary(Si))
.def("sgn", Def::unary(Sign))
.def("sin", Def::unary(Sin))
.def("sinc", Def::unary(Sinc))
.def("sinh", Def::unary(Sinh))
.def("sqrt", Def::unary(Sqrt))
.def("tan", Def::unary(Tan))
.def("tanh", Def::unary(Tanh))
.def("zeta", Def::unary(Zeta))
.def("ζ", Def::unary(Zeta))
.def("+", Def::binary(Add))
.def("&&", Def::binary(And))
.def("atan2", Def::binary(Atan2))
.def("I", Def::binary(BesselI))
.def("J", Def::binary(BesselJ))
.def("K", Def::binary(BesselK))
.def("Y", Def::binary(BesselY))
.def("/", Def::binary(Div))
.def("=", Def::binary(Eq))
.def("Gamma", Def::binary(GammaInc))
.def("Γ", Def::binary(GammaInc))
.def("gcd", Def::binary(Gcd).left_associative())
.def(">=", Def::binary(Ge))
.def(">", Def::binary(Gt))
.def(
"W",
Def::Function {
arity: 1,
body: { Expr::binary(LambertW, Expr::zero(), Expr::var("#0")) },
left_associative: false,
},
)
.def("W", Def::binary(LambertW))
.def("lcm", Def::binary(Lcm).left_associative())
.def("<=", Def::binary(Le))
.def("log", Def::binary(Log))
.def("<", Def::binary(Lt))
.def("max", Def::binary(Max).left_associative())
.def("min", Def::binary(Min).left_associative())
.def("mod", Def::binary(Mod))
.def("*", Def::binary(Mul))
.def("||", Def::binary(Or))
.def("^", Def::binary(Pow))
.def("^^", Def::binary(PowRational))
.def("rankedMax", Def::binary(RankedMax))
.def("rankedMin", Def::binary(RankedMin))
.def("-", Def::binary(Sub))
.def(
"if",
Def::Function {
arity: 3,
body: {
Expr::ternary(
IfThenElse,
Expr::unary(Boole, Expr::var("#0")),
Expr::var("#1"),
Expr::var("#2"),
)
},
left_associative: false,
},
)
});
impl Context {
/// Returns the context with the builtin definitions.
pub fn builtin() -> &'static Self {
&BUILTIN_CONTEXT
}
/// Precondition: `name` is a function symbol.
pub fn apply(&self, name: &str, args: Vec<Expr>) -> Expr {
for d in self.defs.get(name).unwrap() {
match *d {
Def::Function { arity, .. } if args.len() == arity => {
return d.apply(args);
}
Def::Function {
left_associative, ..
} if left_associative && args.len() >= 2 => {
return args
.into_iter()
.reduce(|xs, y| d.apply(vec![xs, y]))
.unwrap();
}
_ => (),
}
}
Expr::error()
}
pub fn get_constant(&self, name: &str) -> Option<Expr> {
for d in self.defs.get(name)? {
if let Def::Constant { body } = d {
return Some(body.clone());
}
}
None
}
pub fn has(&self, name: &str) -> bool {
self.defs.contains_key(name)
}
pub fn is_function(&self, name: &str) -> bool {
if let Some(defs) = self.defs.get(name) {
matches!(defs.first().unwrap(), Def::Function { .. })
} else {
false
}
}
}
#[derive(Clone)]
pub struct InputWithContext<'a> {
pub source: &'a str,
pub context_stack: &'a [&'a Context],
pub source_range: Range<usize>,
}
impl<'a> InputWithContext<'a> {
pub fn new(source: &'a str, context_stack: &'a [&'a Context]) -> Self {
Self {
source,
context_stack,
source_range: 0..source.len(),
}
}
}
impl<'a> Compare<&str> for InputWithContext<'a> {
fn compare(&self, t: &str) -> CompareResult {
self.source.compare(t)
}
fn compare_no_case(&self, t: &str) -> CompareResult {
self.source.compare_no_case(t)
}
}
impl<'a> Input for InputWithContext<'a> {
type Item = char;
type Iter = Chars<'a>;
type IterIndices = CharIndices<'a>;
fn input_len(&self) -> usize {
self.source.input_len()
}
fn take(&self, count: usize) -> Self {
let start = self.source_range.start;
let end = self.source_range.start + count;
InputWithContext {
source: self.source.take(count),
source_range: start..end,
..*self
}
}
fn take_from(&self, index: usize) -> Self {
let start = self.source_range.start + index;
let end = self.source_range.end;
InputWithContext {
source: self.source.take_from(index),
source_range: start..end,
..*self
}
}
fn take_split(&self, count: usize) -> (Self, Self) {
// Beware the order.
let (suffix, prefix) = self.source.take_split(count);
let start = self.source_range.start;
let mid = self.source_range.start + count;
let end = self.source_range.end;
(
InputWithContext {
source: suffix,
source_range: mid..end,
..*self
},
InputWithContext {
source: prefix,
source_range: start..mid,
..*self
},
)
}
fn position<P>(&self, predicate: P) -> Option<usize>
where
P: Fn(Self::Item) -> bool,
{
self.source.position(predicate)
}
fn iter_elements(&self) -> Self::Iter {
self.source.iter_elements()
}
fn iter_indices(&self) -> Self::IterIndices {
self.source.iter_indices()
}
fn slice_index(&self, count: usize) -> Result<usize, Needed> {
self.source.slice_index(count)
}
}
impl<'a> Offset for InputWithContext<'a> {
fn offset(&self, second: &Self) -> usize {
self.source.offset(second.source)
}
}
impl<'a> PartialEq for InputWithContext<'a> {
fn eq(&self, other: &Self) -> bool {
self.source == other.source
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/graph.rs | rust/src/graph.rs | use crate::{image::Image, traits::BytesAllocated, Ternary};
use std::{error, fmt, time::Duration};
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum GraphingErrorKind {
BlockIndexOverflow,
ReachedMemLimit,
ReachedSubdivisionLimit,
}
#[derive(Clone, Debug, Eq, PartialEq)]
pub struct GraphingError {
pub kind: GraphingErrorKind,
}
impl fmt::Display for GraphingError {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
match self.kind {
GraphingErrorKind::BlockIndexOverflow => write!(f, "block index overflow"),
GraphingErrorKind::ReachedMemLimit => write!(f, "reached the memory usage limit"),
GraphingErrorKind::ReachedSubdivisionLimit => {
write!(f, "reached the subdivision limit")
}
}
}
}
impl error::Error for GraphingError {}
/// Statistical information about graphing.
#[derive(Clone, Debug)]
pub struct GraphingStatistics {
/// The number of times the relation has been evaluated.
pub eval_count: usize,
/// The total number of pixels in the image.
pub pixels: usize,
/// The number of pixels that have been shown to be true or false.
pub pixels_complete: usize,
/// The total amount of time spent on refinement.
pub time_elapsed: Duration,
}
/// An implementation of a faithful graphing algorithm.
pub trait Graph: BytesAllocated {
/// Puts the image of the graph to `im`.
///
/// The image is vertically flipped, i.e., the pixel (0, 0) is the bottom-left corner of the graph.
fn get_image(&self, im: &mut Image<Ternary>);
/// Returns the current statistics of graphing.
fn get_statistics(&self) -> GraphingStatistics;
/// Refines the graph for the specified amount of time.
///
/// Returns `Ok(true)`/`Ok(false)` if graphing is complete/incomplete after refinement.
fn refine(&mut self, duration: Duration) -> Result<bool, GraphingError>;
}
#[derive(Clone, Debug)]
pub struct Padding {
pub bottom: u32,
pub left: u32,
pub right: u32,
pub top: u32,
}
pub mod explicit;
pub mod implicit;
pub mod parametric;
mod common;
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/ops.rs | rust/src/ops.rs | use crate::{
interval_set::{DecSignSet, Site, TupperIntervalSet},
vars::{VarIndex, VarSet, VarType},
};
#[derive(Clone, Copy, Debug, Eq, Ord, PartialEq, PartialOrd)]
pub struct StoreIndex(u32);
impl StoreIndex {
pub fn new(i: usize) -> Self {
Self(u32::try_from(i).unwrap())
}
pub fn get(&self) -> usize {
self.0 as usize
}
}
#[derive(Clone, Debug)]
pub struct OptionalValueStore<T: Clone>(Vec<Option<T>>);
impl<T: Clone> OptionalValueStore<T> {
pub fn new(size: usize) -> Self {
Self(vec![None; size])
}
pub fn get(&self, index: StoreIndex) -> Option<&T> {
self.0[index.get()].as_ref()
}
pub fn insert(&mut self, index: StoreIndex, value: T) -> Option<T> {
self.0[index.get()].replace(value)
}
pub fn remove(&mut self, index: StoreIndex) -> Option<T> {
self.0[index.get()].take()
}
}
pub type FormIndex = u32;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ScalarUnaryOp {
Abs,
Acos,
Acosh,
AiryAi,
AiryAiPrime,
AiryBi,
AiryBiPrime,
Asin,
Asinh,
Atan,
Atanh,
BooleEqZero,
BooleLeZero,
BooleLtZero,
Ceil,
Chi,
Ci,
Cos,
Cosh,
Digamma,
Ei,
EllipticE,
EllipticK,
Erf,
Erfc,
Erfi,
Exp,
Floor,
FresnelC,
FresnelS,
Gamma,
InverseErf,
InverseErfc,
Li,
Ln,
LnGamma,
Neg,
Recip,
Shi,
Si,
Sin,
Sinc,
Sinh,
Sqr,
Sqrt,
Tan,
Tanh,
UndefAt0,
Zeta,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ScalarBinaryOp {
Add,
Atan2,
BesselI,
BesselJ,
BesselK,
BesselY,
Div,
GammaInc,
Gcd,
ImSinc,
ImUndefAt0,
ImZeta,
LambertW,
Lcm,
Log,
Max,
Min,
Mod,
Mul,
Pow,
PowRational,
ReSignNonnegative,
ReSinc,
ReUndefAt0,
ReZeta,
Sub,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum ScalarTernaryOp {
IfThenElse,
MulAdd,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum RankedMinMaxOp {
RankedMax,
RankedMin,
}
#[allow(clippy::enum_variant_names)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum RelOp {
EqZero,
LeZero,
LtZero,
}
#[derive(Clone, Debug)]
pub enum StaticTermKind {
Constant(Box<TupperIntervalSet>),
Var(VarIndex, VarType),
Unary(ScalarUnaryOp, StoreIndex),
Binary(ScalarBinaryOp, StoreIndex, StoreIndex),
Ternary(ScalarTernaryOp, StoreIndex, StoreIndex, StoreIndex),
Pown(StoreIndex, i32),
Rootn(StoreIndex, u32),
RankedMinMax(RankedMinMaxOp, Vec<StoreIndex>, StoreIndex),
}
/// A term in a cache-efficient representation.
#[derive(Clone, Debug)]
pub struct StaticTerm {
pub kind: StaticTermKind,
pub site: Option<Site>,
pub store_index: StoreIndex,
pub vars: VarSet,
}
impl StaticTerm {
pub fn put(&self, store: &mut OptionalValueStore<TupperIntervalSet>, value: TupperIntervalSet) {
store.insert(self.store_index, value);
}
/// Evaluates the term and puts the result in the value store.
///
/// Panics if the term is of the kind [`StaticTermKind::Var`].
pub fn put_eval(&self, terms: &[Self], ts: &mut OptionalValueStore<TupperIntervalSet>) {
use {
RankedMinMaxOp::*, ScalarBinaryOp::*, ScalarTernaryOp::*, ScalarUnaryOp::*,
StaticTermKind::*,
};
if ts.get(self.store_index).is_some() {
return;
}
match &self.kind {
Unary(_, x) | Pown(x, _) | Rootn(x, _) => {
if ts.get(*x).is_none() {
terms[x.get()].put_eval(terms, ts);
}
}
Binary(_, x, y) => {
if ts.get(*x).is_none() {
terms[x.get()].put_eval(terms, ts);
}
if ts.get(*y).is_none() {
terms[y.get()].put_eval(terms, ts);
}
}
Ternary(IfThenElse, cond, t, f) => {
if ts.get(*cond).is_none() {
terms[cond.get()].put_eval(terms, ts);
}
let c = &ts.get(*cond).unwrap();
let eval_t = c.iter().any(|x| x.x.contains(1.0));
let eval_f = c.iter().any(|x| x.x.contains(0.0));
if eval_t && ts.get(*t).is_none() {
terms[t.get()].put_eval(terms, ts);
}
if eval_f && ts.get(*f).is_none() {
terms[f.get()].put_eval(terms, ts);
}
}
Ternary(_, x, y, z) => {
if ts.get(*x).is_none() {
terms[x.get()].put_eval(terms, ts);
}
if ts.get(*y).is_none() {
terms[y.get()].put_eval(terms, ts);
}
if ts.get(*z).is_none() {
terms[z.get()].put_eval(terms, ts);
}
}
RankedMinMax(_, xs, n) => {
for x in xs {
if ts.get(*x).is_none() {
terms[x.get()].put_eval(terms, ts);
}
}
if ts.get(*n).is_none() {
terms[n.get()].put_eval(terms, ts);
}
}
Constant(_) | Var(_, _) => (),
}
let dummy_interval_set = TupperIntervalSet::new();
match &self.kind {
Constant(x) => self.put(ts, *x.clone()),
Unary(Abs, x) => self.put(ts, ts.get(*x).unwrap().abs()),
Unary(Acos, x) => self.put(ts, ts.get(*x).unwrap().acos()),
Unary(Acosh, x) => self.put(ts, ts.get(*x).unwrap().acosh()),
Unary(AiryAi, x) => self.put(ts, ts.get(*x).unwrap().airy_ai()),
Unary(AiryAiPrime, x) => self.put(ts, ts.get(*x).unwrap().airy_ai_prime()),
Unary(AiryBi, x) => self.put(ts, ts.get(*x).unwrap().airy_bi()),
Unary(AiryBiPrime, x) => self.put(ts, ts.get(*x).unwrap().airy_bi_prime()),
Unary(Asin, x) => self.put(ts, ts.get(*x).unwrap().asin()),
Unary(Asinh, x) => self.put(ts, ts.get(*x).unwrap().asinh()),
Unary(Atan, x) => self.put(ts, ts.get(*x).unwrap().atan()),
Unary(Atanh, x) => self.put(ts, ts.get(*x).unwrap().atanh()),
Unary(BooleEqZero, x) => self.put(ts, ts.get(*x).unwrap().boole_eq_zero(self.site)),
Unary(BooleLeZero, x) => self.put(ts, ts.get(*x).unwrap().boole_le_zero(self.site)),
Unary(BooleLtZero, x) => self.put(ts, ts.get(*x).unwrap().boole_lt_zero(self.site)),
Unary(Ceil, x) => self.put(ts, ts.get(*x).unwrap().ceil(self.site)),
Unary(Chi, x) => self.put(ts, ts.get(*x).unwrap().chi()),
Unary(Ci, x) => self.put(ts, ts.get(*x).unwrap().ci()),
Unary(Cos, x) => self.put(ts, ts.get(*x).unwrap().cos()),
Unary(Cosh, x) => self.put(ts, ts.get(*x).unwrap().cosh()),
Unary(Digamma, x) => self.put(ts, ts.get(*x).unwrap().digamma(self.site)),
Unary(Ei, x) => self.put(ts, ts.get(*x).unwrap().ei()),
Unary(EllipticE, x) => self.put(ts, ts.get(*x).unwrap().elliptic_e()),
Unary(EllipticK, x) => self.put(ts, ts.get(*x).unwrap().elliptic_k()),
Unary(Erf, x) => self.put(ts, ts.get(*x).unwrap().erf()),
Unary(Erfc, x) => self.put(ts, ts.get(*x).unwrap().erfc()),
Unary(Erfi, x) => self.put(ts, ts.get(*x).unwrap().erfi()),
Unary(Exp, x) => self.put(ts, ts.get(*x).unwrap().exp()),
Unary(Floor, x) => self.put(ts, ts.get(*x).unwrap().floor(self.site)),
Unary(FresnelC, x) => self.put(ts, ts.get(*x).unwrap().fresnel_c()),
Unary(FresnelS, x) => self.put(ts, ts.get(*x).unwrap().fresnel_s()),
Unary(Gamma, x) => self.put(ts, ts.get(*x).unwrap().gamma(self.site)),
Unary(InverseErf, x) => self.put(ts, ts.get(*x).unwrap().inverse_erf()),
Unary(InverseErfc, x) => self.put(ts, ts.get(*x).unwrap().inverse_erfc()),
Unary(Li, x) => self.put(ts, ts.get(*x).unwrap().li()),
Unary(Ln, x) => self.put(ts, ts.get(*x).unwrap().ln()),
Unary(LnGamma, x) => self.put(ts, ts.get(*x).unwrap().ln_gamma()),
Unary(Neg, x) => self.put(ts, -ts.get(*x).unwrap()),
Unary(Recip, x) => self.put(ts, ts.get(*x).unwrap().recip(self.site)),
Unary(Shi, x) => self.put(ts, ts.get(*x).unwrap().shi()),
Unary(Si, x) => self.put(ts, ts.get(*x).unwrap().si()),
Unary(Sin, x) => self.put(ts, ts.get(*x).unwrap().sin()),
Unary(Sinc, x) => self.put(ts, ts.get(*x).unwrap().sinc()),
Unary(Sinh, x) => self.put(ts, ts.get(*x).unwrap().sinh()),
Unary(Sqr, x) => self.put(ts, ts.get(*x).unwrap().sqr()),
Unary(Sqrt, x) => self.put(ts, ts.get(*x).unwrap().sqrt()),
Unary(Tan, x) => self.put(ts, ts.get(*x).unwrap().tan(self.site)),
Unary(Tanh, x) => self.put(ts, ts.get(*x).unwrap().tanh()),
Unary(UndefAt0, x) => self.put(ts, ts.get(*x).unwrap().undef_at_0()),
Unary(Zeta, x) => self.put(ts, ts.get(*x).unwrap().zeta()),
Binary(Add, x, y) => self.put(ts, ts.get(*x).unwrap() + ts.get(*y).unwrap()),
Binary(Atan2, y, x) => self.put(
ts,
ts.get(*y).unwrap().atan2(ts.get(*x).unwrap(), self.site),
),
Binary(BesselI, n, x) => {
self.put(ts, ts.get(*n).unwrap().bessel_i(ts.get(*x).unwrap()))
}
Binary(BesselJ, n, x) => {
self.put(ts, ts.get(*n).unwrap().bessel_j(ts.get(*x).unwrap()))
}
Binary(BesselK, n, x) => {
self.put(ts, ts.get(*n).unwrap().bessel_k(ts.get(*x).unwrap()))
}
Binary(BesselY, n, x) => {
self.put(ts, ts.get(*n).unwrap().bessel_y(ts.get(*x).unwrap()))
}
Binary(Div, x, y) => {
self.put(ts, ts.get(*x).unwrap().div(ts.get(*y).unwrap(), self.site))
}
Binary(GammaInc, a, x) => {
self.put(ts, ts.get(*a).unwrap().gamma_inc(ts.get(*x).unwrap()))
}
Binary(Gcd, x, y) => {
self.put(ts, ts.get(*x).unwrap().gcd(ts.get(*y).unwrap(), self.site))
}
Binary(ImSinc, re_x, im_x) => {
self.put(ts, ts.get(*re_x).unwrap().im_sinc(ts.get(*im_x).unwrap()))
}
Binary(ImUndefAt0, re_x, im_x) => self.put(
ts,
ts.get(*re_x).unwrap().im_undef_at_0(ts.get(*im_x).unwrap()),
),
Binary(ImZeta, re_x, im_x) => {
self.put(ts, ts.get(*re_x).unwrap().im_zeta(ts.get(*im_x).unwrap()))
}
Binary(LambertW, k, x) => {
self.put(ts, ts.get(*k).unwrap().lambert_w(ts.get(*x).unwrap()))
}
Binary(Lcm, x, y) => {
self.put(ts, ts.get(*x).unwrap().lcm(ts.get(*y).unwrap(), self.site))
}
// Beware the order of arguments.
Binary(Log, b, x) => {
self.put(ts, ts.get(*x).unwrap().log(ts.get(*b).unwrap(), self.site))
}
Binary(Max, x, y) => self.put(ts, ts.get(*x).unwrap().max(ts.get(*y).unwrap())),
Binary(Min, x, y) => self.put(ts, ts.get(*x).unwrap().min(ts.get(*y).unwrap())),
Binary(Mod, x, y) => self.put(
ts,
ts.get(*x).unwrap().modulo(ts.get(*y).unwrap(), self.site),
),
Binary(Mul, x, y) => self.put(ts, ts.get(*x).unwrap() * ts.get(*y).unwrap()),
Binary(Pow, x, y) => {
self.put(ts, ts.get(*x).unwrap().pow(ts.get(*y).unwrap(), self.site))
}
Binary(PowRational, x, y) => self.put(
ts,
ts.get(*x)
.unwrap()
.pow_rational(ts.get(*y).unwrap(), self.site),
),
Binary(ReSignNonnegative, x, y) => self.put(
ts,
ts.get(*x)
.unwrap()
.re_sign_nonnegative(ts.get(*y).unwrap(), self.site),
),
Binary(ReSinc, re_x, im_x) => {
self.put(ts, ts.get(*re_x).unwrap().re_sinc(ts.get(*im_x).unwrap()))
}
Binary(ReUndefAt0, re_x, im_x) => self.put(
ts,
ts.get(*re_x).unwrap().re_undef_at_0(ts.get(*im_x).unwrap()),
),
Binary(ReZeta, re_x, im_x) => {
self.put(ts, ts.get(*re_x).unwrap().re_zeta(ts.get(*im_x).unwrap()))
}
Binary(Sub, x, y) => self.put(ts, ts.get(*x).unwrap() - ts.get(*y).unwrap()),
Ternary(IfThenElse, cond, t, f) => self.put(
ts,
ts.get(*cond).unwrap().if_then_else(
ts.get(*t).unwrap_or(&dummy_interval_set),
ts.get(*f).unwrap_or(&dummy_interval_set),
),
),
Ternary(MulAdd, x, y, z) => self.put(
ts,
ts.get(*x)
.unwrap()
.mul_add(ts.get(*y).unwrap(), ts.get(*z).unwrap()),
),
Pown(x, n) => self.put(ts, ts.get(*x).unwrap().pown(*n, self.site)),
Rootn(x, n) => self.put(ts, ts.get(*x).unwrap().rootn(*n)),
RankedMinMax(RankedMax, xs, n) => {
self.put(
ts,
TupperIntervalSet::ranked_max(
xs.iter().map(|x| ts.get(*x).unwrap()).collect(),
ts.get(*n).unwrap(),
self.site,
),
);
}
RankedMinMax(RankedMin, xs, n) => {
self.put(
ts,
TupperIntervalSet::ranked_min(
xs.iter().map(|x| ts.get(*x).unwrap()).collect(),
ts.get(*n).unwrap(),
self.site,
),
);
}
Var(_, _) => panic!("variables cannot be evaluated"),
}
}
}
#[derive(Clone, Debug)]
pub enum StaticFormKind {
Constant(bool),
Atomic(RelOp, StoreIndex),
Not(FormIndex),
And(FormIndex, FormIndex),
Or(FormIndex, FormIndex),
}
/// A formula in a cache-efficient representation.
#[derive(Clone, Debug)]
pub struct StaticForm {
pub kind: StaticFormKind,
}
impl StaticForm {
/// Evaluates the formula.
///
/// Panics if the formula is *not* of the kind [`StaticFormKind::Atomic`].
pub fn eval(&self, ts: &OptionalValueStore<TupperIntervalSet>) -> DecSignSet {
use {RelOp::*, StaticFormKind::*};
match &self.kind {
Atomic(EqZero, x) => ts.get(*x).unwrap().eq_zero(),
Atomic(LeZero, x) => ts.get(*x).unwrap().le_zero(),
Atomic(LtZero, x) => ts.get(*x).unwrap().lt_zero(),
Constant(_) | Not(_) | And(_, _) | Or(_, _) => {
panic!("constant or non-atomic formulas cannot be evaluated")
}
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::mem::size_of;
#[test]
fn struct_size() {
assert_eq!(size_of::<StaticTermKind>(), 32);
assert_eq!(size_of::<StaticTerm>(), 40);
assert_eq!(size_of::<StaticFormKind>(), 12);
assert_eq!(size_of::<StaticForm>(), 12);
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/graph/explicit.rs | rust/src/graph/explicit.rs | use crate::{
ast::ExplicitRelOp,
block::{Block, BlockQueue, Coordinate},
eval_cache::{EvalCacheLevel, EvalExplicitCache},
eval_result::EvalArgs,
geom::{Box1D, Box2D, Transform, Transformation1D, TransformationMode},
graph::{
common::*, Graph, GraphingError, GraphingErrorKind, GraphingStatistics, Padding, Ternary,
},
image::{Image, PixelIndex, PixelRange},
region::Region,
relation::{Relation, RelationType},
set_arg,
traits::BytesAllocated,
vars::{VarIndex, VarSet},
};
use inari::{const_interval, interval, Decoration, Interval};
use itertools::Itertools;
use smallvec::SmallVec;
use std::{
convert::TryFrom,
mem::swap,
time::{Duration, Instant},
};
/// The graphing algorithm for explicit relations.
pub struct Explicit {
rel: Relation,
op: ExplicitRelOp,
transpose: bool,
x_index: Option<VarIndex>,
im: Image<PixelState>,
block_queue: BlockQueue,
im_region: Region,
im_to_real_x: Transformation1D,
real_to_im_y: Transformation1D,
stats: GraphingStatistics,
mem_limit: usize,
no_cache: EvalExplicitCache,
cache: EvalExplicitCache,
}
impl Explicit {
pub fn new(
rel: Relation,
region: Box2D,
im_width: u32,
im_height: u32,
padding: Padding,
mem_limit: usize,
) -> Self {
let relation_type = rel.relation_type();
assert!(matches!(
relation_type,
RelationType::ExplicitFunctionOfX(_) | RelationType::ExplicitFunctionOfY(_)
));
let vars = rel.vars();
let op = match relation_type {
RelationType::ExplicitFunctionOfX(op) | RelationType::ExplicitFunctionOfY(op) => op,
_ => unreachable!(),
};
let transpose = matches!(relation_type, RelationType::ExplicitFunctionOfY(_));
let im = Image::new(im_width, im_height);
let (x_index, region, im_width, im_height, padding) = if transpose {
(
rel.var_indices().y,
region.transpose(),
im_height,
im_width,
Padding {
bottom: padding.left,
left: padding.bottom,
right: padding.top,
top: padding.right,
},
)
} else {
(rel.var_indices().x, region, im_width, im_height, padding)
};
let mut g = Self {
rel,
op,
transpose,
x_index,
im,
block_queue: BlockQueue::new(VarSet::X),
im_region: Region::new(
interval!(0.0, im_width as f64).unwrap(),
interval!(0.0, im_height as f64).unwrap(),
),
im_to_real_x: Transformation1D::new(
[
point_interval(padding.left as f64),
point_interval((im_width - padding.right) as f64),
],
[region.left(), region.right()],
TransformationMode::Fast,
),
real_to_im_y: Transformation1D::new(
[region.bottom(), region.top()],
[
point_interval(padding.bottom as f64),
point_interval((im_height - padding.top) as f64),
],
TransformationMode::Precise,
),
stats: GraphingStatistics {
eval_count: 0,
pixels: im_width as usize * im_height as usize,
pixels_complete: 0,
time_elapsed: Duration::ZERO,
},
mem_limit,
no_cache: EvalExplicitCache::new(EvalCacheLevel::None, vars),
cache: EvalExplicitCache::new(EvalCacheLevel::Full, vars),
};
let kx = (im_width as f64).log2().ceil() as i8;
let b = Block {
x: Coordinate::new(0, kx),
..Default::default()
};
g.block_queue.push_back(b);
g
}
fn refine_impl(&mut self, duration: Duration, now: &Instant) -> Result<bool, GraphingError> {
let mut sub_bs = vec![];
let mut incomplete_pixels = vec![];
let mut args = self.rel.create_args();
while let Some(b) = self.block_queue.pop_front() {
let bi = self.block_queue.begin_index() - 1;
if !b.x.is_subpixel() {
self.process_block(&b, &mut args, &mut incomplete_pixels);
} else {
self.process_subpixel_block(&b, &mut args, &mut incomplete_pixels);
}
if self.is_any_pixel_uncertain(&incomplete_pixels, bi) {
if b.x.is_subdivisible() {
self.subdivide_x(&mut sub_bs, &b);
self.block_queue.extend(sub_bs.drain(..));
let last_bi = self.block_queue.end_index() - 1;
self.set_last_queued_block(&incomplete_pixels, last_bi, bi)?;
} else {
self.set_undisprovable(&incomplete_pixels, bi);
}
}
incomplete_pixels.clear();
let mut clear_cache_and_retry = true;
while self.bytes_allocated() > self.mem_limit {
if clear_cache_and_retry {
self.cache.clear();
clear_cache_and_retry = false;
} else {
return Err(GraphingError {
kind: GraphingErrorKind::ReachedMemLimit,
});
}
}
if now.elapsed() > duration {
break;
}
}
if self.block_queue.is_empty() {
if self
.im
.pixels()
.any(|&s| s.is_uncertain_and_undisprovable())
{
Err(GraphingError {
kind: GraphingErrorKind::ReachedSubdivisionLimit,
})
} else {
Ok(true)
}
} else {
Ok(false)
}
}
/// Tries to prove or disprove the existence of a solution in the block,
/// and if that fails, appends pixels that may contain solutions to `incomplete_pixels`.
///
/// Precondition: the block is either a pixel or a superpixel.
fn process_block(
&mut self,
b: &Block,
args: &mut EvalArgs,
incomplete_pixels: &mut Vec<PixelRange>,
) {
let x_up = self.region_clipped(b.x).outer();
set_arg!(args, self.x_index, x_up);
let ys = self
.rel
.eval_explicit(args, &self.real_to_im_y, &mut self.no_cache)
.clone();
if ys.is_empty() {
return;
}
let px = {
let begin = b.x.pixel_index();
let end = (begin + b.x.width()).min(self.im_width());
interval!(begin as f64, end as f64).unwrap()
};
let dec = ys.decoration();
if dec >= Decoration::Def {
let y = ys
.iter()
.fold(Interval::EMPTY, |acc, &y| acc.convex_hull(y.x));
let pixels = self.possibly_true_pixels(px, y);
let t_pixels = self.true_pixels(px, y);
for p in &t_pixels {
self.im[p] = PixelState::True;
}
if pixels == t_pixels {
return;
}
}
incomplete_pixels.extend(ys.into_iter().map(|y| self.possibly_true_pixels(px, y.x)))
}
/// Tries to prove or disprove the existence of a solution in the block,
/// and if that fails, appends pixels that may contain solutions to `incomplete_pixels`.
///
/// Precondition: the block is a subpixel.
fn process_subpixel_block(
&mut self,
b: &Block,
args: &mut EvalArgs,
incomplete_pixels: &mut Vec<PixelRange>,
) {
let x_up = subpixel_outer_x(&self.region(b.x), b);
set_arg!(args, self.x_index, x_up);
let ys = self
.rel
.eval_explicit(args, &self.real_to_im_y, &mut self.no_cache)
.clone();
if ys.is_empty() {
return;
}
let px = {
let begin = b.x.pixel_index();
let end = begin + 1;
interval!(begin as f64, end as f64).unwrap()
};
let dec = ys.decoration();
let x_dn = self.region(b.x.pixel()).inner();
let inter = x_up.intersection(x_dn);
if !inter.is_empty() {
if dec >= Decoration::Def {
let y = ys
.iter()
.fold(Interval::EMPTY, |acc, &y| acc.convex_hull(y.x));
let pixels = self.possibly_true_pixels(px, y);
let t_pixels = self.true_pixels(px, y);
for p in &t_pixels {
self.im[p] = PixelState::True;
}
if pixels == t_pixels {
return;
}
}
// To dedup, points must be sorted.
let rs = [inter.inf(), simple_fraction(inter), inter.sup()]
.into_iter()
.dedup()
.map(|x| {
set_arg!(args, self.x_index, point_interval(x));
self.rel
.eval_explicit(args, &self.real_to_im_y, &mut self.cache)
.clone()
})
.collect::<SmallVec<[_; 3]>>();
if dec >= Decoration::Dac {
let ys = rs
.into_iter()
.map(|y| {
assert_eq!(y.len(), 1);
y.iter().next().unwrap().x
})
.collect::<SmallVec<[_; 3]>>();
let y0 = ys
.iter()
.map(|y| y.sup())
.fold(f64::INFINITY, |acc, y| acc.min(y));
let y1 = ys
.iter()
.map(|y| y.inf())
.fold(f64::NEG_INFINITY, |acc, y| acc.max(y));
if y0 <= y1 {
// All "possibly-true" pixels are actually true.
let t_pixels = self.possibly_true_pixels(
px,
Box1D::new(
point_interval_possibly_infinite(y0),
point_interval_possibly_infinite(y1),
)
.inner(),
);
for p in &t_pixels {
self.im[p] = PixelState::True;
}
}
} else {
for ys in rs {
let dec = ys.decoration();
if dec >= Decoration::Def {
let y = ys
.iter()
.fold(Interval::EMPTY, |acc, y| acc.convex_hull(y.x));
let t_pixels = self.true_pixels(px, y);
for p in &t_pixels {
self.im[p] = PixelState::True;
}
}
}
}
}
incomplete_pixels.extend(ys.into_iter().map(|y| self.possibly_true_pixels(px, y.x)))
}
fn im_width(&self) -> u32 {
if self.transpose {
self.im.height()
} else {
self.im.width()
}
}
fn is_any_pixel_uncertain(&self, pixels: &[PixelRange], front_block_index: usize) -> bool {
pixels
.iter()
.flatten()
.any(|p| self.im[p].is_uncertain(front_block_index))
}
/// Returns the smallest pixel-aligned interval that contains `x` in its interior.
fn outer_pixels(x: Interval) -> Interval {
if x.is_empty() {
x
} else {
const TINY: Interval = const_interval!(-5e-324, 5e-324);
let x = x + TINY;
interval!(x.inf().floor(), x.sup().ceil()).unwrap()
}
}
/// Returns the smallest pixel-aligned interval that contains `x`.
fn pixels(x: Interval) -> Interval {
if x.is_empty() {
x
} else {
interval!(x.inf().floor(), x.sup().ceil()).unwrap()
}
}
/// For the pixel-aligned region, returns the pixels in the region that are contained in the image.
///
/// If [`Self::transpose`] is `true`, the x and y components of the result are swapped.
fn pixels_in_image(&self, r: &Region) -> PixelRange {
let r = r.intersection(&self.im_region);
if r.is_empty() {
PixelRange::EMPTY
} else {
// If `r` is degenerate, the result is `PixelRange::EMPTY`.
let mut x = r.x();
let mut y = r.y();
if self.transpose {
swap(&mut x, &mut y);
}
PixelRange::new(
PixelIndex::new(x.inf() as u32, self.im.height() - y.sup() as u32),
PixelIndex::new(x.sup() as u32, self.im.height() - y.inf() as u32),
)
}
}
/// Given an interval in image coordinates that possibly contains a solution,
/// returns the set of pixels that possibly contain solutions.
///
/// If every member of `y` is a solution, all the pixels contain solutions.
fn possibly_true_pixels(&self, px: Interval, y: Interval) -> PixelRange {
use ExplicitRelOp::*;
if y.is_empty() {
return PixelRange::EMPTY;
}
let y = match self.op {
Eq => y,
Ge | Gt => interval!(y.inf(), f64::INFINITY).unwrap(),
Le | Lt => interval!(f64::NEG_INFINITY, y.sup()).unwrap(),
};
let py = match self.op {
Eq | Ge | Le => Self::outer_pixels(y),
Gt | Lt => Self::pixels(y),
};
self.pixels_in_image(&Region::new(px, py))
}
/// Returns the region that corresponds to the given subpixel.
fn region(&self, x: Coordinate) -> Box1D {
let pw = x.widthf();
let px = x.index() as f64 * pw;
Box1D::new(point_interval(px), point_interval(px + pw)).transform(&self.im_to_real_x)
}
/// Returns the region that corresponds to the given pixel or superpixel.
fn region_clipped(&self, x: Coordinate) -> Box1D {
let pw = x.widthf();
let px = x.index() as f64 * pw;
Box1D::new(
point_interval(px),
point_interval((px + pw).min(self.im_width() as f64)),
)
.transform(&self.im_to_real_x)
}
fn set_last_queued_block(
&mut self,
pixels: &[PixelRange],
block_index: usize,
parent_block_index: usize,
) -> Result<(), GraphingError> {
if let Ok(block_index) = QueuedBlockIndex::try_from(block_index) {
for p in pixels.iter().flatten() {
if self.im[p].is_uncertain_and_disprovable(parent_block_index) {
self.im[p] = PixelState::Uncertain(Some(block_index));
}
}
Ok(())
} else {
Err(GraphingError {
kind: GraphingErrorKind::BlockIndexOverflow,
})
}
}
fn set_undisprovable(&mut self, pixels: &[PixelRange], parent_block_index: usize) {
for p in pixels.iter().flatten() {
if self.im[p].is_uncertain_and_disprovable(parent_block_index) {
self.im[p] = PixelState::Uncertain(None);
}
}
}
/// Subdivides `b.x` and appends the sub-blocks to `sub_bs`.
/// Two sub-blocks are created at most.
///
/// Precondition: `b.x.is_subdivisible()` is `true`.
fn subdivide_x(&self, sub_bs: &mut Vec<Block>, b: &Block) {
let [x0, x1] = b.x.subdivide();
if b.x.is_superpixel() {
sub_bs.push(Block { x: x0, ..*b });
if x1.pixel_index() < self.im_width() {
sub_bs.push(Block { x: x1, ..*b });
}
} else {
sub_bs.extend([Block { x: x0, ..*b }, Block { x: x1, ..*b }]);
}
}
/// Given an interval in pixel coordinates that contains a solution,
/// returns the set of all pixels that contain solutions.
///
/// Panics if `y` is empty.
fn true_pixels(&self, px: Interval, y: Interval) -> PixelRange {
use ExplicitRelOp::*;
assert!(!y.is_empty());
let y = match self.op {
Eq => y,
Ge | Gt => interval!(y.sup(), f64::INFINITY).unwrap_or(Interval::EMPTY),
Le | Lt => interval!(f64::NEG_INFINITY, y.inf()).unwrap_or(Interval::EMPTY),
};
let py = match self.op {
Eq => {
let py = Self::outer_pixels(y);
if y.is_singleton() || py.wid() == 1.0 {
py
} else {
Interval::EMPTY
}
}
Ge | Le => Self::outer_pixels(y),
Gt | Lt => Self::pixels(y),
};
self.pixels_in_image(&Region::new(px, py))
}
}
impl Graph for Explicit {
fn get_image(&self, im: &mut Image<Ternary>) {
assert!(im.width() == self.im.width() && im.height() == self.im.height());
for (s, dst) in self.im.pixels().copied().zip(im.pixels_mut()) {
*dst = match s {
PixelState::True => Ternary::True,
_ if s.is_uncertain(self.block_queue.begin_index()) => Ternary::Uncertain,
_ => Ternary::False,
}
}
}
fn get_statistics(&self) -> GraphingStatistics {
GraphingStatistics {
eval_count: self.rel.eval_count(),
pixels_complete: self
.im
.pixels()
.filter(|s| !s.is_uncertain(self.block_queue.begin_index()))
.count(),
..self.stats
}
}
fn refine(&mut self, duration: Duration) -> Result<bool, GraphingError> {
let now = Instant::now();
let result = self.refine_impl(duration, &now);
self.stats.time_elapsed += now.elapsed();
result
}
}
impl BytesAllocated for Explicit {
fn bytes_allocated(&self) -> usize {
self.im.bytes_allocated()
+ self.block_queue.bytes_allocated()
+ self.cache.bytes_allocated()
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/graph/implicit.rs | rust/src/graph/implicit.rs | use crate::{
block::{Block, BlockQueue, Coordinate, IntegerParameter, RealParameter},
eval_cache::{EvalCacheLevel, EvalImplicitCache},
eval_result::EvalArgs,
geom::{Box2D, Transform, Transformation2D, TransformationMode},
graph::{
common::*, Graph, GraphingError, GraphingErrorKind, GraphingStatistics, Padding, Ternary,
},
image::{Image, PixelIndex, PixelRange},
interval_set::{DecSignSet, SignSet},
region::Region,
relation::{Relation, RelationType, VarIndices},
set_arg,
traits::BytesAllocated,
vars,
vars::VarSet,
};
use inari::Decoration;
use std::{
convert::TryFrom,
time::{Duration, Instant},
};
const XY: VarSet = vars!(VarSet::X | VarSet::Y);
/// The graphing algorithm for implicit relations.
pub struct Implicit {
rel: Relation,
var_indices: VarIndices,
subdivision_dirs: Vec<VarSet>,
im: Image<PixelState>,
// Queue blocks that will be subdivided instead of the divided blocks to save memory.
bs_to_subdivide: BlockQueue,
// Affine transformation from image coordinates to real coordinates.
im_to_real: Transformation2D,
stats: GraphingStatistics,
mem_limit: usize,
cache_eval_on_region: EvalImplicitCache,
cache_eval_on_point: EvalImplicitCache,
}
impl Implicit {
pub fn new(
rel: Relation,
region: Box2D,
im_width: u32,
im_height: u32,
padding: Padding,
mem_limit: usize,
) -> Self {
assert_eq!(rel.relation_type(), RelationType::Implicit);
let vars = rel.vars();
let var_indices = rel.var_indices().clone();
let mut subdivision_dirs = if vars.contains(VarSet::X) || vars.contains(VarSet::Y) {
vec![XY]
} else {
vec![]
};
subdivision_dirs.extend(
[VarSet::M, VarSet::N, VarSet::N_THETA, VarSet::T]
.into_iter()
.filter(|&d| vars.contains(d)),
);
let mut g = Self {
rel,
var_indices,
subdivision_dirs,
im: Image::new(im_width, im_height),
bs_to_subdivide: BlockQueue::new(XY | vars),
im_to_real: Transformation2D::new(
[
Region::new(
point_interval(padding.left as f64),
point_interval(padding.bottom as f64),
),
Region::new(
point_interval((im_width - padding.right) as f64),
point_interval((im_height - padding.top) as f64),
),
],
[
Region::new(region.left(), region.bottom()),
Region::new(region.right(), region.top()),
],
TransformationMode::Fast,
),
stats: GraphingStatistics {
eval_count: 0,
pixels: im_width as usize * im_height as usize,
pixels_complete: 0,
time_elapsed: Duration::ZERO,
},
mem_limit,
cache_eval_on_region: EvalImplicitCache::new(EvalCacheLevel::Univariate, vars),
cache_eval_on_point: if XY.contains(vars) {
EvalImplicitCache::new(EvalCacheLevel::Full, vars)
} else {
EvalImplicitCache::new(EvalCacheLevel::Univariate, vars)
},
};
let k = (im_width.max(im_height) as f64).log2().ceil() as i8;
let mut bs = vec![Block {
m: IntegerParameter::new(g.rel.m_range()),
n: IntegerParameter::new(g.rel.n_range()),
n_theta: IntegerParameter::new(g.rel.n_theta_range()),
t: RealParameter::new(g.rel.t_range()),
x: Coordinate::new(0, k),
y: Coordinate::new(0, k),
..Default::default()
}];
if vars.contains(VarSet::M) {
bs = bs
.into_iter()
.flat_map(|b| b.m.subdivide0().into_iter().map(move |m| Block { m, ..b }))
.collect::<Vec<_>>();
}
if vars.contains(VarSet::N) {
bs = bs
.into_iter()
.flat_map(|b| b.n.subdivide0().into_iter().map(move |n| Block { n, ..b }))
.collect::<Vec<_>>();
}
if vars.contains(VarSet::N_THETA) {
bs = bs
.into_iter()
.flat_map(|b| {
b.n_theta
.subdivide0()
.into_iter()
.map(move |n_theta| Block { n_theta, ..b })
})
.collect::<Vec<_>>();
}
if vars.contains(VarSet::T) {
bs = bs
.into_iter()
.flat_map(|b| b.t.subdivide0().into_iter().map(move |t| Block { t, ..b }))
.collect::<Vec<_>>();
}
let last_block = bs.len() - 1;
g.set_last_queued_block(&bs[last_block], last_block, None)
.unwrap();
for b in bs {
g.bs_to_subdivide.push_back(b);
}
g
}
fn refine_impl(&mut self, duration: Duration, now: &Instant) -> Result<bool, GraphingError> {
let mut sub_bs = vec![];
let mut incomplete_sub_bs = vec![];
let mut args = self.rel.create_args();
while let Some(b) = self.bs_to_subdivide.pop_front() {
let bi = self.bs_to_subdivide.begin_index() - 1;
let next_dir = if (b.next_dir_index as usize) < self.subdivision_dirs.len() {
Some(self.subdivision_dirs[b.next_dir_index as usize])
} else {
None
};
match next_dir {
Some(XY) => self.subdivide_xy(&mut sub_bs, &b),
Some(VarSet::M) => subdivide_m(&mut sub_bs, &b),
Some(VarSet::N) => subdivide_n(&mut sub_bs, &b),
Some(VarSet::N_THETA) => subdivide_n_theta(&mut sub_bs, &b),
Some(VarSet::T) => subdivide_t_implicit(&mut sub_bs, &b),
Some(_) => panic!(),
_ => sub_bs.push(b.clone()),
}
for sub_b in sub_bs.drain(..) {
let complete = if !sub_b.x.is_subpixel() {
self.process_block(&sub_b, &mut args)
} else {
self.process_subpixel_block(&sub_b, &mut args)
};
if !complete {
incomplete_sub_bs.push(sub_b);
}
}
let n_max = match next_dir {
Some(XY) => 1,
Some(VarSet::M | VarSet::N | VarSet::N_THETA) => 2,
Some(VarSet::T) => 2,
Some(_) => panic!(),
_ => 0,
};
let it = (0..self.subdivision_dirs.len())
.cycle()
.skip(
if incomplete_sub_bs.len() <= n_max {
// Subdivide in the same direction again.
b.next_dir_index
} else {
// Subdivide in other direction.
b.next_dir_index + 1
}
.into(),
)
.take(self.subdivision_dirs.len());
for mut sub_b in incomplete_sub_bs.drain(..) {
let next_dir_index = next_dir.and_then(|_| {
it.clone().find(|&i| {
let d = self.subdivision_dirs[i];
d == XY && sub_b.x.is_subdivisible()
|| d == VarSet::M && sub_b.m.is_subdivisible()
|| d == VarSet::N && sub_b.n.is_subdivisible()
|| d == VarSet::N_THETA && sub_b.n_theta.is_subdivisible()
|| d == VarSet::T && sub_b.t.is_subdivisible()
})
});
if let Some(i) = next_dir_index {
sub_b.next_dir_index = i as u8;
} else {
// Cannot subdivide in any direction.
for p in &self.pixels_in_image(&b) {
self.im[p] = PixelState::Uncertain(None);
}
continue;
}
self.bs_to_subdivide.push_back(sub_b.clone());
let last_bi = self.bs_to_subdivide.end_index() - 1;
self.set_last_queued_block(&sub_b, last_bi, Some(bi))?;
}
let mut clear_cache_and_retry = true;
while self.bytes_allocated() > self.mem_limit {
if clear_cache_and_retry {
self.cache_eval_on_region.clear();
self.cache_eval_on_point.clear();
clear_cache_and_retry = false;
} else {
return Err(GraphingError {
kind: GraphingErrorKind::ReachedMemLimit,
});
}
}
if now.elapsed() > duration {
break;
}
}
if self.bs_to_subdivide.is_empty() {
if self
.im
.pixels()
.any(|&s| s.is_uncertain_and_undisprovable())
{
Err(GraphingError {
kind: GraphingErrorKind::ReachedSubdivisionLimit,
})
} else {
Ok(true)
}
} else {
Ok(false)
}
}
/// Tries to prove or disprove the existence of a solution in the block
/// and returns `true` if it is successful.
///
/// Precondition: the block is either a pixel or a superpixel.
fn process_block(&mut self, b: &Block, args: &mut EvalArgs) -> bool {
let pixels = self.pixels_in_image(b);
if pixels.iter().all(|p| self.im[p] == PixelState::True) {
// All pixels have already been proven to be true.
return true;
}
let u_up = self.region_clipped(b.x, b.y).outer();
set_arg!(args, self.var_indices.m, b.m.interval());
set_arg!(args, self.var_indices.n, b.n.interval());
set_arg!(args, self.var_indices.n_theta, b.n_theta.interval());
set_arg!(args, self.var_indices.t, b.t.interval());
set_arg!(args, self.var_indices.x, u_up.x());
set_arg!(args, self.var_indices.y, u_up.y());
let r_u_up = self.rel.eval_implicit(args, &mut self.cache_eval_on_region);
let result = r_u_up.result(self.rel.forms());
if result.certainly_true() {
for p in pixels.iter() {
self.im[p] = PixelState::True;
}
return true;
}
result.certainly_false()
}
/// Tries to prove or disprove the existence of a solution in the block
/// and returns `true` if it is successful.
///
/// Precondition: the block is a subpixel.
fn process_subpixel_block(&mut self, b: &Block, args: &mut EvalArgs) -> bool {
let pixels = self.pixels_in_image(b);
if pixels.iter().all(|p| self.im[p] == PixelState::True) {
// This pixel has already been proven to be true.
return true;
}
let u_up = subpixel_outer(&self.region(b.x, b.y), b);
let p_dn = self.region(b.x.pixel(), b.y.pixel()).inner();
let inter = u_up.intersection(&p_dn);
set_arg!(args, self.var_indices.m, b.m.interval());
set_arg!(args, self.var_indices.n, b.n.interval());
set_arg!(args, self.var_indices.n_theta, b.n_theta.interval());
set_arg!(args, self.var_indices.t, b.t.interval());
set_arg!(args, self.var_indices.x, u_up.x());
set_arg!(args, self.var_indices.y, u_up.y());
let (locally_zero_mask, dac_mask, mut neg_mask, mut pos_mask) = {
let r_u_up = self.rel.eval_implicit(args, &mut self.cache_eval_on_region);
let result_mask = r_u_up.result_mask();
let result = result_mask.eval(self.rel.forms());
if result.certainly_true() && !inter.is_empty() {
// The relation is true everywhere in the subpixel, and the subpixel certainly overlaps
// with the pixel. Therefore, the pixel contains a solution.
for p in &pixels {
self.im[p] = PixelState::True;
}
return true;
}
if result.certainly_false() {
// The relation is false everywhere in the subpixel.
return true;
}
if inter.is_empty() {
// We still need to refine the subpixel to show absence of solutions.
return false;
}
let dac_mask = r_u_up.map(|DecSignSet(_, d)| d >= Decoration::Dac);
let neg_mask = r_u_up.map(|_| false);
let pos_mask = neg_mask.clone();
(result_mask, dac_mask, neg_mask, pos_mask)
};
// Evaluate the relation for some sample points within the inner bounds of the subpixel
// and try proving existence of a solution in two ways:
//
// a. Test if the relation is true for any of the sample points.
// This is useful especially for plotting inequalities such as "lcm(x, y) ≤ 1".
//
// b. Use the intermediate value theorem.
// A note on `locally_zero_mask` (for plotting conjunction):
// Suppose we are plotting "y = sin(x) && x ≥ 0".
// If the conjunct "x ≥ 0" is true throughout the subpixel
// and "y - sin(x)" evaluates to `POS` for a sample point and `NEG` for another,
// we can conclude that there is a point within the subpixel where the entire relation holds.
// Such observation would not be possible by merely converting the relation to
// "|y - sin(x)| + |x ≥ 0 ? 0 : 1| = 0".
let points = {
let x = inter.x();
let y = inter.y();
[
(simple_fraction(x), simple_fraction(y)),
(x.inf(), y.inf()), // bottom left
(x.sup(), y.inf()), // bottom right
(x.inf(), y.sup()), // top left
(x.sup(), y.sup()), // top right
]
};
for point in &points {
set_arg!(args, self.var_indices.x, point_interval(point.0));
set_arg!(args, self.var_indices.y, point_interval(point.1));
let r = self.rel.eval_implicit(args, &mut self.cache_eval_on_point);
// `ss` is nonempty if the decoration is ≥ `Def`, which will be ensured
// by taking bitand with `dac_mask`.
neg_mask |= r.map(|DecSignSet(ss, _)| (SignSet::NEG | SignSet::ZERO).contains(ss));
pos_mask |= r.map(|DecSignSet(ss, _)| (SignSet::POS | SignSet::ZERO).contains(ss));
let point_result = r.result(self.rel.forms());
if point_result.certainly_true()
|| (&(&neg_mask & &pos_mask) & &dac_mask)
.solution_certainly_exists(self.rel.forms(), &locally_zero_mask)
{
// Found a solution.
for p in &pixels {
self.im[p] = PixelState::True;
}
return true;
}
}
false
}
/// Returns the pixels that are contained in both the block and the image.
fn pixels_in_image(&self, b: &Block) -> PixelRange {
let begin = PixelIndex::new(b.x.pixel_index(), b.y.pixel_index());
let end = if b.x.is_superpixel() {
PixelIndex::new(
(begin.x + b.x.width()).min(self.im.width()),
(begin.y + b.y.width()).min(self.im.height()),
)
} else {
PixelIndex::new(
(begin.x + 1).min(self.im.width()),
(begin.y + 1).min(self.im.height()),
)
};
PixelRange::new(
PixelIndex::new(begin.x, self.im.height() - end.y),
PixelIndex::new(end.x, self.im.height() - begin.y),
)
}
/// Returns the region that corresponds to the given subpixel.
fn region(&self, x: Coordinate, y: Coordinate) -> Box2D {
let pw = x.widthf();
let ph = y.widthf();
let px = x.index() as f64 * pw;
let py = y.index() as f64 * ph;
Box2D::new(
point_interval(px),
point_interval(px + pw),
point_interval(py),
point_interval(py + ph),
)
.transform(&self.im_to_real)
}
/// Returns the region that corresponds to the given pixel or superpixel.
fn region_clipped(&self, x: Coordinate, y: Coordinate) -> Box2D {
let pw = x.widthf();
let ph = y.widthf();
let px = x.index() as f64 * pw;
let py = y.index() as f64 * ph;
Box2D::new(
point_interval(px),
point_interval((px + pw).min(self.im.width() as f64)),
point_interval(py),
point_interval((py + ph).min(self.im.height() as f64)),
)
.transform(&self.im_to_real)
}
fn set_last_queued_block(
&mut self,
b: &Block,
block_index: usize,
parent_block_index: Option<usize>,
) -> Result<(), GraphingError> {
if let Ok(block_index) = QueuedBlockIndex::try_from(block_index) {
for p in self.pixels_in_image(b).iter() {
if parent_block_index.is_none()
|| self.im[p].is_uncertain_and_disprovable(parent_block_index.unwrap())
{
self.im[p] = PixelState::Uncertain(Some(block_index));
}
}
Ok(())
} else {
Err(GraphingError {
kind: GraphingErrorKind::BlockIndexOverflow,
})
}
}
/// Subdivides both `b.x` and `b.y` and appends the sub-blocks to `sub_bs`.
/// Four sub-blocks are created at most.
///
/// Precondition: both `b.x.is_subdivisible()` and `b.y.is_subdivisible()` are `true`.
fn subdivide_xy(&self, sub_bs: &mut Vec<Block>, b: &Block) {
let [x0, x1] = b.x.subdivide();
let [y0, y1] = b.y.subdivide();
if b.x.is_superpixel() {
let push_x1 = x1.pixel_index() < self.im.width();
let push_y1 = y1.pixel_index() < self.im.height();
sub_bs.push(Block { x: x0, y: y0, ..*b });
if push_x1 {
sub_bs.push(Block { x: x1, y: y0, ..*b });
}
if push_y1 {
sub_bs.push(Block { x: x0, y: y1, ..*b });
}
if push_x1 && push_y1 {
sub_bs.push(Block { x: x1, y: y1, ..*b });
}
} else {
sub_bs.extend([
Block { x: x0, y: y0, ..*b },
Block { x: x1, y: y0, ..*b },
Block { x: x0, y: y1, ..*b },
Block { x: x1, y: y1, ..*b },
]);
}
}
}
impl Graph for Implicit {
fn get_image(&self, im: &mut Image<Ternary>) {
assert!(im.width() == self.im.width() && im.height() == self.im.height());
for (s, dst) in self.im.pixels().copied().zip(im.pixels_mut()) {
*dst = match s {
PixelState::True => Ternary::True,
_ if s.is_uncertain(self.bs_to_subdivide.begin_index()) => Ternary::Uncertain,
_ => Ternary::False,
}
}
}
fn get_statistics(&self) -> GraphingStatistics {
GraphingStatistics {
eval_count: self.rel.eval_count(),
pixels_complete: self
.im
.pixels()
.filter(|s| !s.is_uncertain(self.bs_to_subdivide.begin_index()))
.count(),
..self.stats
}
}
fn refine(&mut self, duration: Duration) -> Result<bool, GraphingError> {
let now = Instant::now();
let result = self.refine_impl(duration, &now);
self.stats.time_elapsed += now.elapsed();
result
}
}
impl BytesAllocated for Implicit {
fn bytes_allocated(&self) -> usize {
self.im.bytes_allocated()
+ self.bs_to_subdivide.bytes_allocated()
+ self.cache_eval_on_region.bytes_allocated()
+ self.cache_eval_on_point.bytes_allocated()
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/graph/parametric.rs | rust/src/graph/parametric.rs | use crate::{
block::{Block, BlockQueue, IntegerParameter, RealParameter},
eval_cache::{EvalCacheLevel, EvalParametricCache},
eval_result::EvalArgs,
geom::{Box2D, Transformation1D, TransformationMode},
graph::{
common::*, Graph, GraphingError, GraphingErrorKind, GraphingStatistics, Padding, Ternary,
},
image::{Image, PixelIndex, PixelRange},
interval_set::TupperIntervalSet,
region::Region,
relation::{Relation, RelationType, VarIndices},
set_arg,
traits::{BytesAllocated, Single},
vars::VarSet,
};
use inari::{const_interval, interval, Decoration, Interval};
use itertools::Itertools;
use smallvec::SmallVec;
use std::{
convert::TryFrom,
mem::swap,
time::{Duration, Instant},
};
/// The graphing algorithm for parametric relations.
///
/// A parametric relation is a relation of type [`RelationType::Parametric`].
pub struct Parametric {
rel: Relation,
var_indices: VarIndices,
subdivision_dirs: Vec<VarSet>,
im: Image<PixelState>,
bs_to_subdivide: BlockQueue,
/// The pixel-aligned region that matches the entire image.
im_region: Region,
/// The affine transformation from real coordinates to image coordinates.
real_to_im_x: Transformation1D,
real_to_im_y: Transformation1D,
stats: GraphingStatistics,
mem_limit: usize,
no_cache: EvalParametricCache,
cache: EvalParametricCache,
}
impl Parametric {
pub fn new(
rel: Relation,
region: Box2D,
im_width: u32,
im_height: u32,
padding: Padding,
mem_limit: usize,
) -> Self {
assert_eq!(rel.relation_type(), RelationType::Parametric);
let vars = rel.vars();
let var_indices = rel.var_indices().clone();
let subdivision_dirs = [VarSet::M, VarSet::N, VarSet::T]
.into_iter()
.filter(|&d| vars.contains(d))
.collect();
let mut g = Self {
rel,
var_indices,
subdivision_dirs,
im: Image::new(im_width, im_height),
bs_to_subdivide: BlockQueue::new(vars),
im_region: Region::new(
interval!(0.0, im_width as f64).unwrap(),
interval!(0.0, im_height as f64).unwrap(),
),
real_to_im_x: Transformation1D::new(
[region.left(), region.right()],
[
point_interval(padding.left as f64),
point_interval((im_width - padding.right) as f64),
],
TransformationMode::Precise,
),
real_to_im_y: Transformation1D::new(
[region.bottom(), region.top()],
[
point_interval(padding.bottom as f64),
point_interval((im_height - padding.top) as f64),
],
TransformationMode::Precise,
),
stats: GraphingStatistics {
eval_count: 0,
pixels: im_width as usize * im_height as usize,
pixels_complete: 0,
time_elapsed: Duration::ZERO,
},
mem_limit,
no_cache: EvalParametricCache::new(EvalCacheLevel::None, vars),
cache: EvalParametricCache::new(EvalCacheLevel::Full, vars),
};
let mut bs = vec![Block {
m: IntegerParameter::new(g.rel.m_range()),
n: IntegerParameter::new(g.rel.n_range()),
t: RealParameter::new(g.rel.t_range()),
..Default::default()
}];
if vars.contains(VarSet::M) {
bs = bs
.into_iter()
.flat_map(|b| b.m.subdivide0().into_iter().map(move |m| Block { m, ..b }))
.collect::<Vec<_>>();
}
if vars.contains(VarSet::N) {
bs = bs
.into_iter()
.flat_map(|b| b.n.subdivide0().into_iter().map(move |n| Block { n, ..b }))
.collect::<Vec<_>>();
}
let last_block = bs.len() - 1;
g.set_last_queued_block(
&[PixelRange::new(
PixelIndex::new(0, 0),
PixelIndex::new(im_width, im_height),
)],
last_block,
None,
)
.unwrap();
for b in bs {
g.bs_to_subdivide.push_back(b);
}
g
}
fn refine_impl(&mut self, duration: Duration, now: &Instant) -> Result<bool, GraphingError> {
let mut sub_bs = vec![];
let mut incomplete_pixels = vec![];
let mut incomplete_sub_bs = vec![];
let mut args = self.rel.create_args();
while let Some(b) = self.bs_to_subdivide.pop_front() {
let bi = self.bs_to_subdivide.begin_index() - 1;
let next_dir = if (b.next_dir_index as usize) < self.subdivision_dirs.len() {
Some(self.subdivision_dirs[b.next_dir_index as usize])
} else {
None
};
match next_dir {
Some(VarSet::M) => subdivide_m(&mut sub_bs, &b),
Some(VarSet::N) => subdivide_n(&mut sub_bs, &b),
Some(VarSet::T) => subdivide_t_parametric(&mut sub_bs, &b),
Some(_) => panic!(),
_ => sub_bs.push(b.clone()),
}
for sub_b in sub_bs.drain(..) {
self.process_block(&sub_b, &mut args, &mut incomplete_pixels);
if self.is_any_pixel_uncertain(&incomplete_pixels, bi) {
incomplete_sub_bs.push((
sub_b,
incomplete_pixels.drain(..).collect::<SmallVec<[_; 4]>>(),
));
}
incomplete_pixels.clear();
}
let n_max = match next_dir {
Some(VarSet::M | VarSet::N) => 2,
Some(VarSet::T) => 1,
Some(_) => panic!(),
_ => 0,
};
let it = (0..self.subdivision_dirs.len())
.cycle()
.skip(
if incomplete_sub_bs.len() <= n_max {
// Subdivide in the same direction again.
b.next_dir_index
} else {
// Subdivide in other direction.
b.next_dir_index + 1
}
.into(),
)
.take(self.subdivision_dirs.len());
for (mut sub_b, incomplete_pixels) in incomplete_sub_bs.drain(..) {
let next_dir_index = next_dir.and_then(|_| {
it.clone().find(|&i| {
let d = self.subdivision_dirs[i];
d == VarSet::M && sub_b.m.is_subdivisible()
|| d == VarSet::N && sub_b.n.is_subdivisible()
|| d == VarSet::T && sub_b.t.is_subdivisible()
})
});
if let Some(i) = next_dir_index {
sub_b.next_dir_index = i as u8;
} else {
// Cannot subdivide in any direction.
self.set_undisprovable(&incomplete_pixels, bi);
continue;
}
self.bs_to_subdivide.push_back(sub_b.clone());
let last_bi = self.bs_to_subdivide.end_index() - 1;
self.set_last_queued_block(&incomplete_pixels, last_bi, Some(bi))?;
}
let mut clear_cache_and_retry = true;
while self.bytes_allocated() > self.mem_limit {
if clear_cache_and_retry {
self.cache.clear();
clear_cache_and_retry = false;
} else {
return Err(GraphingError {
kind: GraphingErrorKind::ReachedMemLimit,
});
}
}
if now.elapsed() > duration {
break;
}
}
if self.bs_to_subdivide.is_empty() {
if self
.im
.pixels()
.any(|&s| s.is_uncertain_and_undisprovable())
{
Err(GraphingError {
kind: GraphingErrorKind::ReachedSubdivisionLimit,
})
} else {
Ok(true)
}
} else {
Ok(false)
}
}
/// Tries to prove or disprove the existence of a solution in the block,
/// and if that fails, appends pixels that may contain solutions to `incomplete_pixels`.
fn process_block(
&mut self,
block: &Block,
args: &mut EvalArgs,
incomplete_pixels: &mut Vec<PixelRange>,
) {
set_arg!(args, self.var_indices.m, block.m.interval());
set_arg!(args, self.var_indices.n, block.n.interval());
set_arg!(args, self.var_indices.t, block.t.interval());
let (xs, ys) = self
.rel
.eval_parametric(
args,
&self.real_to_im_x,
&self.real_to_im_y,
&mut self.no_cache,
)
.clone();
if xs.is_empty() || ys.is_empty() {
return;
}
let rs = Self::regions(&xs, &ys)
.map(|r| Self::outer_pixels(&r))
.collect::<SmallVec<[_; 4]>>();
let dec = xs.decoration().min(ys.decoration());
if dec >= Decoration::Def {
let r = rs.iter().fold(Region::EMPTY, |acc, r| acc.convex_hull(r));
if Self::is_pixel(&r) {
// f(…) × g(…) is interior to a single pixel.
for p in &self.pixels_in_image(&r) {
self.im[p] = PixelState::True;
}
return;
} else if dec >= Decoration::Dac && (r.x().wid() == 1.0 || r.y().wid() == 1.0) {
assert_eq!(rs.len(), 1);
let r1 = {
set_arg!(
args,
self.var_indices.t,
point_interval_possibly_infinite(block.t.interval().inf())
);
let (xs, ys) = self
.rel
.eval_parametric(
args,
&self.real_to_im_x,
&self.real_to_im_y,
&mut self.cache,
)
.clone();
let rs = Self::regions(&xs, &ys);
rs.single().unwrap()
};
let r2 = {
set_arg!(
args,
self.var_indices.t,
point_interval_possibly_infinite(block.t.interval().sup())
);
let (xs, ys) = self
.rel
.eval_parametric(
args,
&self.real_to_im_x,
&self.real_to_im_y,
&mut self.cache,
)
.clone();
let rs = Self::regions(&xs, &ys);
rs.single().unwrap()
};
let mut r12 = Region::EMPTY;
if r.x().wid() == 1.0 {
// `r` is a single column.
let mut y1 = r1.y();
let mut y2 = r2.y();
if y2.precedes(y1) {
swap(&mut y1, &mut y2);
}
if y1.precedes(y2) {
r12 = Self::outer_pixels(&Region::new(
r1.x(),
interval!(y1.sup(), y2.inf()).unwrap(),
));
// `r12.x()` could be wider than a pixel.
r12 = Region::new(r.x(), r12.y());
}
} else {
// `r` is a single row.
let mut x1 = r1.x();
let mut x2 = r2.x();
if x2.precedes(x1) {
swap(&mut x1, &mut x2);
}
if x1.precedes(x2) {
r12 = Self::outer_pixels(&Region::new(
interval!(x1.sup(), x2.inf()).unwrap(),
r1.y(),
));
// `r12.y()` could be wider than a pixel.
r12 = Region::new(r12.x(), r.y());
}
}
// There is at least one solution per pixel of `r12`.
for p in &self.pixels_in_image(&r12) {
self.im[p] = PixelState::True;
}
if r12 == r {
return;
}
}
}
incomplete_pixels.extend(rs.into_iter().map(|r| self.pixels_in_image(&r)))
}
fn is_any_pixel_uncertain(&self, pixels: &[PixelRange], front_block_index: usize) -> bool {
pixels
.iter()
.flatten()
.any(|p| self.im[p].is_uncertain(front_block_index))
}
/// For the pixel-aligned region,
/// returns `true` if both the width and the height of the region are `1.0`.
fn is_pixel(r: &Region) -> bool {
r.x().wid() == 1.0 && r.y().wid() == 1.0
}
/// Returns the smallest pixel-aligned region that contains `r` in its interior.
fn outer_pixels(r: &Region) -> Region {
// 5e-324 is interpreted as the smallest positive subnormal number.
const TINY: Interval = const_interval!(-5e-324, 5e-324);
let x = r.x() + TINY;
let y = r.y() + TINY;
Region::new(
interval!(x.inf().floor(), x.sup().ceil()).unwrap(),
interval!(y.inf().floor(), y.sup().ceil()).unwrap(),
)
}
/// For the pixel-aligned region,
/// returns the pixels in the region that are contained in the image.
fn pixels_in_image(&self, r: &Region) -> PixelRange {
let r = r.intersection(&self.im_region);
if r.is_empty() {
PixelRange::EMPTY
} else {
// If `r` is degenerate, the result is `PixelRange::EMPTY`.
let x = r.x();
let y = r.y();
PixelRange::new(
PixelIndex::new(x.inf() as u32, self.im.height() - y.sup() as u32),
PixelIndex::new(x.sup() as u32, self.im.height() - y.inf() as u32),
)
}
}
/// Returns possible combinations of `x × y`.
fn regions<'a>(
xs: &'a TupperIntervalSet,
ys: &'a TupperIntervalSet,
) -> impl 'a + Iterator<Item = Region> {
xs.iter()
.cartesian_product(ys.iter())
.filter(|(x, y)| x.g.union(y.g).is_some())
.map(|(x, y)| Region::new(x.x, y.x))
}
fn set_last_queued_block(
&mut self,
pixels: &[PixelRange],
block_index: usize,
parent_block_index: Option<usize>,
) -> Result<(), GraphingError> {
if let Ok(block_index) = QueuedBlockIndex::try_from(block_index) {
for p in pixels.iter().flatten() {
if parent_block_index.is_none()
|| self.im[p].is_uncertain_and_disprovable(parent_block_index.unwrap())
{
self.im[p] = PixelState::Uncertain(Some(block_index));
}
}
Ok(())
} else {
Err(GraphingError {
kind: GraphingErrorKind::BlockIndexOverflow,
})
}
}
fn set_undisprovable(&mut self, pixels: &[PixelRange], parent_block_index: usize) {
for p in pixels.iter().flatten() {
if self.im[p].is_uncertain_and_disprovable(parent_block_index) {
self.im[p] = PixelState::Uncertain(None);
}
}
}
}
impl Graph for Parametric {
fn get_image(&self, im: &mut Image<Ternary>) {
assert!(im.width() == self.im.width() && im.height() == self.im.height());
for (s, dst) in self.im.pixels().copied().zip(im.pixels_mut()) {
*dst = match s {
PixelState::True => Ternary::True,
_ if s.is_uncertain(self.bs_to_subdivide.begin_index()) => Ternary::Uncertain,
_ => Ternary::False,
}
}
}
fn get_statistics(&self) -> GraphingStatistics {
GraphingStatistics {
eval_count: self.rel.eval_count(),
pixels_complete: self
.im
.pixels()
.filter(|s| !s.is_uncertain(self.bs_to_subdivide.begin_index()))
.count(),
..self.stats
}
}
fn refine(&mut self, duration: Duration) -> Result<bool, GraphingError> {
let now = Instant::now();
let result = self.refine_impl(duration, &now);
self.stats.time_elapsed += now.elapsed();
result
}
}
impl BytesAllocated for Parametric {
fn bytes_allocated(&self) -> usize {
self.im.bytes_allocated()
+ self.bs_to_subdivide.bytes_allocated()
+ self.cache.bytes_allocated()
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/graph/common.rs | rust/src/graph/common.rs | use crate::{
block::Block,
geom::{Box1D, Box2D},
region::Region,
};
use inari::{interval, Interval};
/// The index of a [`Block`] in a [`BlockQueue`].
///
/// While [`BlockQueue::begin_index`]/[`BlockQueue::end_index`] return [`usize`],
/// [`u32`] would be large enough.
///
/// [`Block`]: crate::block::Block
/// [`BlockQueue`]: crate::block::BlockQueue
/// [`BlockQueue::begin_index`]: crate::block::BlockQueue::begin_index
/// [`BlockQueue::end_index`]: crate::block::BlockQueue::end_index
pub type QueuedBlockIndex = u32;
/// The graphing status of a pixel.
///
/// # Overview of Pixel States
///
/// ```text
/// Found
/// a solution +---------------+
/// +------------------>| True |
/// | +---------------+
/// | Λ Found
/// | No subdivisible | a solution
/// +---------------+ | blocks are left +-------+-------+
/// ●----->| Uncertain +-----+------------------>| Uncertain |
/// | disprovable | | | undisprovable |
/// +---------------+ | +---------------+
/// | No solution
/// | in all blocks +---------------+
/// +------------------>| False |
/// +---------------+
/// ```
///
/// The false state is represented as a special case of the variant [`PixelState::Uncertain`],
/// as explained in its description.
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum PixelState {
/// The pixel contains a solution.
True,
/// The pixel may or may not contain a solution.
///
/// It holds the index of the last block in the queue that intersects with the pixel.
/// If it is [`None`], no subdivisible block is left for the pixel,
/// thus we cannot prove absence of solutions.
///
/// If the index is less than that of the front element of the queue,
/// that implies the pixel does not contain a solution.
Uncertain(Option<QueuedBlockIndex>),
}
impl PixelState {
pub fn is_uncertain(self, front_block_index: usize) -> bool {
match self {
PixelState::Uncertain(Some(bi)) => bi as usize >= front_block_index,
PixelState::Uncertain(None) => true,
_ => false,
}
}
pub fn is_uncertain_and_disprovable(self, front_block_index: usize) -> bool {
match self {
PixelState::Uncertain(Some(bi)) => bi as usize >= front_block_index,
_ => false,
}
}
pub fn is_uncertain_and_undisprovable(self) -> bool {
self == PixelState::Uncertain(None)
}
}
impl Default for PixelState {
fn default() -> Self {
PixelState::Uncertain(Some(0))
}
}
/// Returns the interval \[`x`, `x`\].
///
/// Panics if `x` is infinite or NaN.
pub fn point_interval(x: f64) -> Interval {
interval!(x, x).unwrap()
}
/// Returns the interval:
///
/// - \[`x`, `x`\] if `x` is finite,
/// - \[−∞, [`f64::MIN`]\] if `x` is −∞, or
/// - \[[`f64::MAX`], +∞\] if `x` is +∞.
///
/// Panics if `x` is NaN.
pub fn point_interval_possibly_infinite(x: f64) -> Interval {
interval!(x.min(f64::MAX), x.max(f64::MIN)).unwrap()
}
/// Returns a number within the interval whose significand is as short as possible in the binary
/// representation. For such inputs, arithmetic expressions are more likely to be evaluated
/// exactly.
///
/// Precondition: the interval is nonempty.
pub fn simple_fraction(x: Interval) -> f64 {
let a = x.inf();
let b = x.sup();
let a_bits = a.to_bits();
let b_bits = b.to_bits();
let diff = a_bits ^ b_bits;
// The number of leading equal bits.
let n = diff.leading_zeros();
if n == 64 {
return a;
}
// Set all bits from the MSB through the first differing bit.
let mask = !0u64 << (64 - n - 1);
if a <= 0.0 {
f64::from_bits(a_bits & mask)
} else {
f64::from_bits(b_bits & mask)
}
}
/// Subdivides `b.m` and appends the sub-blocks to `sub_bs`.
/// Three sub-blocks are created at most.
///
/// Precondition: `b.m.is_subdivisible()` is `true`.
pub fn subdivide_m(sub_bs: &mut Vec<Block>, b: &Block) {
sub_bs.extend(b.m.subdivide1().into_iter().map(|m| Block { m, ..*b }));
}
/// Subdivides `b.n` and appends the sub-blocks to `sub_bs`.
/// Three sub-blocks are created at most.
///
/// Precondition: `b.n.is_subdivisible()` is `true`.
pub fn subdivide_n(sub_bs: &mut Vec<Block>, b: &Block) {
sub_bs.extend(b.n.subdivide1().into_iter().map(|n| Block { n, ..*b }));
}
/// Subdivides `b.n_theta` and appends the sub-blocks to `sub_bs`.
/// Three sub-blocks are created at most.
///
/// Precondition: `b.n_theta.is_subdivisible()` is `true`.
pub fn subdivide_n_theta(sub_bs: &mut Vec<Block>, b: &Block) {
sub_bs.extend(
b.n_theta
.subdivide1()
.into_iter()
.map(|n| Block { n_theta: n, ..*b }),
);
}
/// Subdivides `b.t` and appends the sub-blocks to `sub_bs`.
/// Two sub-blocks are created.
///
/// Precondition: `b.t.is_subdivisible()` is `true`.
pub fn subdivide_t_parametric(sub_bs: &mut Vec<Block>, b: &Block) {
sub_bs.extend(b.t.subdivide().into_iter().map(|t| Block { t, ..*b }));
}
/// Subdivides `b.t` and appends the sub-blocks to `sub_bs`.
/// Two sub-blocks are created at most.
///
/// Precondition: `b.t.is_subdivisible()` is `true`.
pub fn subdivide_t_implicit(sub_bs: &mut Vec<Block>, b: &Block) {
sub_bs.extend(b.t.subdivide1().into_iter().map(|t| Block { t, ..*b }));
}
/// Returns a subset of the outer region.
///
/// It is assumed that the region is obtained from the given block.
/// When applied to a set of regions/blocks which form a partition of a pixel,
/// the results form a partition of the outer boundary of the pixel.
///
/// Precondition: the block is a subpixel.
pub fn subpixel_outer(r: &Box2D, b: &Block) -> Region {
let x = b.x.index();
let y = b.y.index();
let mask_x = b.x.pixel_align() - 1;
let mask_y = b.y.pixel_align() - 1;
let left = if x & mask_x == 0 {
r.left().inf()
} else {
r.left().mid()
};
let right = if (x + 1) & mask_x == 0 {
r.right().sup()
} else {
r.right().mid()
};
let bottom = if y & mask_y == 0 {
r.bottom().inf()
} else {
r.bottom().mid()
};
let top = if (y + 1) & mask_y == 0 {
r.top().sup()
} else {
r.top().mid()
};
Region::new(
interval!(left, right).unwrap(),
interval!(bottom, top).unwrap(),
)
}
/// One-dimensional version of [`subpixel_outer`].
pub fn subpixel_outer_x(r: &Box1D, b: &Block) -> Interval {
let x = b.x.index();
let mask_x = b.x.pixel_align() - 1;
let left = if x & mask_x == 0 {
r.left().inf()
} else {
r.left().mid()
};
let right = if (x + 1) & mask_x == 0 {
r.right().sup()
} else {
r.right().mid()
};
interval!(left, right).unwrap()
}
#[cfg(test)]
mod tests {
use super::*;
use crate::block::Coordinate;
use inari::const_interval;
#[test]
fn test_subpixel_outer_x() {
let r = Box1D::new(const_interval!(0.33, 0.34), const_interval!(0.66, 0.67));
// The left side is pixel boundary.
let b = Block {
x: Coordinate::new(4, -2),
..Default::default()
};
assert_eq!(
subpixel_outer_x(&r, &b),
interval!(r.left().inf(), r.right().mid()).unwrap()
);
// The right side is pixel boundary.
let b = Block {
x: Coordinate::new(b.x.index() + 3, b.x.level()),
..Default::default()
};
assert_eq!(
subpixel_outer_x(&r, &b),
interval!(r.left().mid(), r.right().sup()).unwrap(),
);
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/bin/compose.rs | rust/src/bin/compose.rs | use clap::{value_parser, Arg, ArgAction, Command};
use image::{imageops, DynamicImage, ImageReader, Rgba, Rgba32FImage};
use std::ffi::OsString;
#[derive(Clone, Debug)]
struct Entry {
color: Rgba<f32>,
file: String,
}
fn colorize(im: &mut Rgba32FImage, color: Rgba<f32>) {
for p in im.pixels_mut() {
*p = Rgba([color[0], color[1], color[2], p[3] * color[3]]);
}
}
/// Converts the color space of the image from linearized sRGB to sRGB.
fn linear_to_srgb(im: &mut Rgba32FImage) {
fn linear_to_srgb(c: f32) -> f32 {
if c <= 0.0031308 {
12.92 * c
} else {
1.055 * c.powf(1.0 / 2.4) - 0.055
}
}
for p in im.pixels_mut() {
*p = Rgba([
linear_to_srgb(p[0]),
linear_to_srgb(p[1]),
linear_to_srgb(p[2]),
p[3],
]);
}
}
/// Parses a hex color string.
///
/// Linearized sRGB value is returned if `correct_alpha` is `true`.
fn parse_color(color: &str, correct_alpha: bool) -> Rgba<f32> {
fn srgb_to_linear(c: f32) -> f32 {
if c <= 0.04045 {
c / 12.92
} else {
((c + 0.055) / 1.055).powf(2.4)
}
}
let chars = color.chars().collect::<Vec<_>>();
assert_eq!(chars.len(), 9);
assert_eq!(chars[0], '#');
let digits = chars[1..]
.iter()
.map(|c| c.to_digit(16).unwrap())
.collect::<Vec<_>>();
let r = (16 * digits[0] + digits[1]) as f32 / 255.0;
let g = (16 * digits[2] + digits[3]) as f32 / 255.0;
let b = (16 * digits[4] + digits[5]) as f32 / 255.0;
let a = (16 * digits[6] + digits[7]) as f32 / 255.0;
if correct_alpha {
Rgba([srgb_to_linear(r), srgb_to_linear(g), srgb_to_linear(b), a])
} else {
Rgba([r, g, b, a])
}
}
fn main() {
let mut matches = Command::new("compose")
.about("Colorizes and alpha-composes gray-alpha images.")
.arg(
Arg::new("add")
.long("add")
.number_of_values(2)
.value_names(["file", "color"])
.action(ArgAction::Append),
)
.arg(
Arg::new("background")
.long("background")
.default_value("#ffffffff"),
)
.arg(
Arg::new("correct-alpha")
.long("correct-alpha")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("output")
.long("output")
.value_name("file")
.value_parser(value_parser!(OsString)),
)
.get_matches();
let correct_alpha = matches.get_flag("correct-alpha");
let background = parse_color(
&matches.remove_one::<String>("background").unwrap(),
correct_alpha,
);
let entries = matches
.remove_many::<String>("add")
.unwrap_or_default()
.collect::<Vec<_>>()
.chunks_exact(2)
.map(|e| Entry {
color: parse_color(&e[1], correct_alpha),
file: e[0].clone(),
})
.collect::<Vec<_>>();
let output = matches.remove_one::<OsString>("output").unwrap();
let mut composed = None;
for entry in entries {
let mut im = ImageReader::open(&entry.file)
.unwrap_or_else(|_| panic!("failed to open the image '{}'", entry.file))
.decode()
.unwrap_or_else(|_| panic!("failed to decode the image '{}'", entry.file))
.into_rgba32f();
let composed = composed.get_or_insert_with(|| {
let mut composed = Rgba32FImage::new(im.width(), im.height());
composed.fill(1.0);
colorize(&mut composed, background);
composed
});
assert_eq!(im.width(), composed.width());
assert_eq!(im.height(), composed.height());
colorize(&mut im, entry.color);
imageops::overlay(composed, &im, 0, 0);
}
if let Some(mut composed) = composed {
if correct_alpha {
linear_to_srgb(&mut composed);
}
DynamicImage::ImageRgba32F(composed)
.to_rgba8()
.save(output)
.expect("failed to save the image");
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/bin/graph.rs | rust/src/bin/graph.rs | use clap::{value_parser, Arg, ArgAction, ArgGroup, Command};
use graphest::{
Box2D, Explicit, FftImage, Graph, GraphingStatistics, Image, Implicit, Padding, Parametric,
PixelIndex, Relation, RelationType, Ternary,
};
use image::{imageops, ImageBuffer, LumaA, Rgb, RgbImage};
use inari::{const_interval, interval, Interval};
use itertools::Itertools;
use std::{ffi::OsString, fs, io::stdin, process, str::FromStr, time::Duration};
type GrayAlpha16Image = ImageBuffer<LumaA<u16>, Vec<u16>>;
/// Dilates the image by convolution by FFT,
/// and returns the index of the top-left corner of the image after discarding the padding.
fn dilate_and_crop_fast(im: &mut Image<Ternary>, kernel: &Image<bool>) -> PixelIndex {
/// Finds a size larger than or equal to `size` that FFTW can handle efficiently.
fn find_good_size(size: u32) -> u32 {
let x = size.next_power_of_two();
let candidates = [
x * 9 / 8, // 9 = 3^2
x * 10 / 8, // 10 = 2 × 5
x * 11 / 8, // 11 = 11
x * 12 / 8, // 12 = 2^2 × 3
x * 13 / 8, // 13 = 13
x * 14 / 8, // 14 = 2 × 7
x * 15 / 8, // 15 = 3 × 5
];
for candidate in candidates {
if candidate >= size {
return candidate;
}
}
x
}
// To convolve arrays of lengths M and N using cyclic convolution,
// we first need to pad them with zeros to make their lengths M + N - 1 to avoid overlapping.
// Do not confuse this padding with the one that were present in the original image.
let width = find_good_size(im.width() + (kernel.width() - 1));
let height = find_good_size(im.height() + (kernel.height() - 1));
let mut ker = FftImage::new(width, height);
for i in 0..kernel.height() {
for j in 0..kernel.width() {
if kernel[PixelIndex::new(j, i)] {
ker[i as usize][j as usize] = 1.0;
}
}
}
ker.fft();
let mut im_true = FftImage::new(width, height);
let mut im_uncert = FftImage::new(width, height);
for i in 0..im.height() {
for j in 0..im.width() {
match im[PixelIndex::new(j, i)] {
Ternary::True => im_true[i as usize][j as usize] = 1.0,
Ternary::Uncertain => im_uncert[i as usize][j as usize] = 1.0,
_ => (),
}
}
}
im_true.fft();
for (dst, src) in im_true.complexes_mut().iter_mut().zip(ker.complexes()) {
let [a, b] = *dst;
let [x, y] = *src;
*dst = [a * x - b * y, b * x + a * y];
}
im_true.ifft();
im_uncert.fft();
for (dst, src) in im_uncert.complexes_mut().iter_mut().zip(ker.complexes()) {
let [a, b] = *dst;
let [x, y] = *src;
*dst = [a * x - b * y, b * x + a * y];
}
im_uncert.ifft();
// For the normalization, see the last paragraph of
// https://www.fftw.org/fftw3_doc/Real_002ddata-DFTs.html
let threshold = width as f32 * height as f32 / 2.0;
for i in 0..im.height() {
for j in 0..im.width() {
let src_true = im_true[i as usize][j as usize];
let src_uncert = im_uncert[i as usize][j as usize];
im[PixelIndex::new(j, i)] = if src_true > threshold {
Ternary::True
} else if src_uncert > threshold {
Ternary::Uncertain
} else {
Ternary::False
};
}
}
// The dilated image is translated by (⌊kw/2⌋, ⌊kh/2⌋), which is the center of the kernel.
// We also want to crop the image further by the same amount to discard the (original) padding.
// Therefore, the top-left corner of the image will be at (kw - 1, kh - 1).
PixelIndex::new(kernel.width() - 1, kernel.height() - 1)
}
/// Dilates the image by the naive algorithm,
/// and returns the index of the top-left corner of the image after discarding the padding.
fn dilate_and_crop_naive(im: &mut Image<Ternary>, kernel: &Image<bool>) -> PixelIndex {
for (dy, dx) in (0..im.height() - (kernel.height() - 1))
.cartesian_product(0..im.width() - (kernel.width() - 1))
{
// kx : kw - 1 - kx
// +==========+
// (dx, dy) ‖ ky
// ‖ X------>+ ··
// ‖ | ‖ kh - 1 - ky
// ‖ | ‖
// ‖ V ‖
// +==+=======X
// (sx, sy)
im[PixelIndex::new(dx, dy)] = (0..kernel.height())
.cartesian_product(0..kernel.width())
.filter(|&(ky, kx)| kernel[PixelIndex::new(kx, ky)])
.map(|(ky, kx)| {
let sx = dx + (kernel.width() - 1 - kx);
let sy = dy + (kernel.height() - 1 - ky);
im[PixelIndex::new(sx, sy)]
})
.max()
.unwrap_or(Ternary::False);
}
// In this case, the dilated image is translated by (-⌊kw/2⌋, -⌊kh/2⌋).
// After cropping, the top-left corner is at (0, 0).
PixelIndex::new(0, 0)
}
/// Returns the binary matrix whose elements are true
/// where the distance of the pixel from the center satisfies d ≤ |r + 1/2|.
/// This is defined the same way as Mathematica's `DiskMatrix[r]`.
fn disk_matrix(radius: f64) -> Image<bool> {
let radius = radius.max(0.0);
// size = 2 ⌊r⌉ + 1 = 2 ⌊r + 1/2⌋ + 1.
let size = 2 * (radius + 0.5).floor() as u32 + 1;
let mut im = Image::new(size, size);
let mid = size / 2;
let max_d_sq = (radius + 0.5) * (radius + 0.5);
for i in 0..size {
let y = i as i32 - mid as i32;
for j in 0..size {
let x = j as i32 - mid as i32;
let d_sq = (x * x + y * y) as f64;
im[PixelIndex::new(j, i)] = d_sq <= max_d_sq;
}
}
im
}
/// Dilates a kernel by another kernel.
fn minkowski_sum(ka: &Image<bool>, kb: &Image<bool>) -> Image<bool> {
let mut kc = Image::new(ka.width() + kb.width() - 1, ka.height() + kb.height() - 1);
for (ia, ja) in (0..ka.height()).cartesian_product(0..ka.width()) {
let a = ka[PixelIndex::new(ja, ia)];
for (ib, jb) in (0..kb.height()).cartesian_product(0..kb.width()) {
let b = kb[PixelIndex::new(jb, ib)];
let ic = (ia as i32 - ka.height() as i32 / 2)
+ (ib as i32 - kb.height() as i32 / 2)
+ kc.height() as i32 / 2;
let jc = (ja as i32 - ka.width() as i32 / 2)
+ (jb as i32 - kb.width() as i32 / 2)
+ kc.width() as i32 / 2;
if ic >= 0 && ic < kc.height() as i32 && jc >= 0 && jc < kc.width() as i32 {
kc[PixelIndex::new(jc as u32, ic as u32)] = a && b;
}
}
}
kc
}
fn parse_binary_matrix(mat: &str) -> Image<bool> {
let mat = mat
.split(';')
.map(|row| {
row.split(',')
.map(|c| match c {
"0" => false,
"1" => true,
_ => panic!("elements of dilation kernel must be either 0 or 1"),
})
.collect::<Vec<_>>()
})
.collect::<Vec<_>>();
assert!(
!mat.is_empty() && !mat[0].is_empty(),
"the matrix must not be empty"
);
assert!(
mat[1..].iter().all(|r| r.len() == mat[0].len()),
"the matrix rows must have the same length"
);
let height = u32::try_from(mat.len()).expect("the matrix has too many rows");
let width = u32::try_from(mat[0].len()).expect("the matrix has too many columns");
let mut im = Image::<bool>::new(width, height);
for (p, el) in im.pixels_mut().zip(mat.into_iter().flatten()) {
*p = el;
}
im
}
fn print_statistics_header() {
println!(
" {:>14} {:>24} {:>28}",
"Eval. Time (s)", "Area Proven (%)", "Number of Evaluations"
);
println!(" {:->14} {:->24} {:->28}", "", "", "");
}
fn print_statistics(cur: &GraphingStatistics, prev: &GraphingStatistics) {
fn point_interval(x: f64) -> Interval {
interval!(x, x).unwrap()
}
fn to_percent(x: Interval) -> Interval {
const_interval!(100.0, 100.0) * x
}
let pixels = point_interval(cur.pixels as f64);
let area = to_percent(point_interval(cur.pixels_complete as f64) / pixels);
let delta_area =
to_percent(point_interval((cur.pixels_complete - prev.pixels_complete) as f64) / pixels);
println!(
" {:>14.3} {:>11} (+ {:>7}) {:>13} (+ {:>9})",
cur.time_elapsed.as_secs_f64(),
// Extract the lower bound and remove the minus sign in "-0.000".
format!("{:7.3}", area)[1..8].replace('-', " "),
format!("{:7.3}", delta_area)[1..8].replace('-', " "),
cur.eval_count,
(cur.eval_count - prev.eval_count)
);
}
#[derive(Clone, Debug)]
struct Bound {
x: Interval,
}
impl FromStr for Bound {
type Err = String;
fn from_str(s: &str) -> Result<Self, Self::Err> {
let ss = format!("[{}]", s);
match interval!(&ss) {
Ok(x) => Ok(Bound { x }),
Err(_) => Err(format!("{} is not a valid number", s)),
}
}
}
fn main() {
let mut matches = Command::new("graph")
.about("Plots the graph of a mathematical relation to an image.")
.arg(Arg::new("relation").index(1).help("Relation to plot."))
.arg(
Arg::new("bounds")
.short('b')
.long("bounds")
.num_args(4)
.allow_hyphen_values(true)
.default_values(["-10", "10", "-10", "10"])
.value_names(["xmin", "xmax", "ymin", "ymax"])
.help("Bounds of the region over which the relation is plotted.")
.value_parser(value_parser!(Bound)),
)
.arg(
Arg::new("dilate")
.long("dilate")
.hide(true)
.default_value("1"),
)
.arg(
Arg::new("dump-ast")
.long("dump-ast")
.hide(true)
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("gray-alpha")
.long("gray-alpha")
.hide(true)
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("input")
.short('i')
.long("input")
.value_name("file")
.help("Path to the file that contains the relation to plot.")
.value_parser(value_parser!(OsString)),
)
.arg(
Arg::new("mem-limit")
.long("mem-limit")
.default_value("1024")
.value_name("mbytes")
.help("Approximate maximum amount of memory in MiB that the program can use.")
.value_parser(value_parser!(usize)),
)
.arg(
Arg::new("output")
.short('o')
.long("output")
.default_value("graph.png")
.value_name("file")
.help("Path to the output image. It must end with '.png'.")
.value_parser(value_parser!(OsString)),
)
.arg(
Arg::new("output-once")
.long("output-once")
.help("Do not output intermediate images.")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("pad-bottom")
.long("pad-bottom")
.hide(true)
.default_value("0")
.value_parser(value_parser!(u32)),
)
.arg(
Arg::new("pad-left")
.long("pad-left")
.hide(true)
.default_value("0")
.value_parser(value_parser!(u32)),
)
.arg(
Arg::new("pad-right")
.long("pad-right")
.hide(true)
.default_value("0")
.value_parser(value_parser!(u32)),
)
.arg(
Arg::new("pad-top")
.long("pad-top")
.hide(true)
.default_value("0")
.value_parser(value_parser!(u32)),
)
.arg(
Arg::new("parse")
.long("parse")
.help("Only validate the relation and exit without plotting.")
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("pause-per-iteration")
.long("pause-per-iteration")
.hide(true)
.action(ArgAction::SetTrue),
)
.arg(
Arg::new("pen-size")
.long("pen-size")
.hide(true)
.default_value("1")
.value_parser(value_parser!(f64)),
)
.arg(
Arg::new("size")
.short('s')
.long("size")
.num_args(2)
.default_values(["1024", "1024"])
.value_names(["width", "height"])
.help("Dimensions of the output image in pixels.")
.value_parser(value_parser!(u32)),
)
.arg(
Arg::new("ssaa")
.long("ssaa")
.default_value("1")
.value_name("scale")
.next_line_help(true)
.help("Anti-alias the graph by supersampling pixels by the given scale.")
.value_parser(value_parser!(u32)),
)
.arg(
Arg::new("timeout")
.long("timeout")
.value_name("msecs")
.help("Maximum limit of evaluation time in milliseconds.")
.value_parser(value_parser!(u64)),
)
.group(
ArgGroup::new("relation-or-input")
.args(["relation", "input"])
.required(true),
)
.get_matches();
let rel = matches
.remove_one::<String>("relation")
.unwrap_or_else(|| {
let input = matches.remove_one::<OsString>("input").unwrap();
fs::read_to_string(&input).unwrap_or_else(|e| {
eprintln!("{}: {}", e, input.to_string_lossy());
process::exit(1);
})
})
.parse::<Relation>()
.unwrap_or_else(|e| {
eprintln!("{}", e);
process::exit(1);
});
if matches.get_flag("dump-ast") {
println!("{}", rel.ast().dump_full());
}
if matches.get_flag("parse") {
return;
}
let bounds = matches
.remove_many::<Bound>("bounds")
.unwrap()
.map(|b| b.x)
.collect::<Vec<_>>();
let dilation = matches.remove_one::<String>("dilate").unwrap();
let gray_alpha = matches.get_flag("gray-alpha");
let mem_limit = 1024 * 1024 * matches.remove_one::<usize>("mem-limit").unwrap();
let output = matches.remove_one::<OsString>("output").unwrap();
let output_once = matches.get_flag("output-once");
let output_padding = Padding {
bottom: matches.remove_one::<u32>("pad-bottom").unwrap(),
left: matches.remove_one::<u32>("pad-left").unwrap(),
right: matches.remove_one::<u32>("pad-right").unwrap(),
top: matches.remove_one::<u32>("pad-top").unwrap(),
};
let output_size = {
let s = matches
.remove_many::<u32>("size")
.unwrap()
.collect::<Vec<_>>();
[
s[0] + output_padding.left + output_padding.right,
s[1] + output_padding.bottom + output_padding.top,
]
};
let pause_per_iteration = matches.get_flag("pause-per-iteration");
let pen_size = matches.remove_one::<f64>("pen-size").unwrap();
let ssaa = matches.remove_one::<u32>("ssaa").unwrap();
let timeout = matches
.remove_one::<u64>("timeout")
.map(Duration::from_millis);
if ssaa > 1 && dilation != "1" {
println!("`--dilate` and `--ssaa` cannot be used together.");
}
let dilation_kernel = disk_matrix((ssaa / 2) as f64 + ssaa as f64 * (pen_size - 1.0) / 2.0);
let user_dilation_kernel = {
let k = parse_binary_matrix(&dilation);
assert!(
k.width() == k.height() && k.width() % 2 == 1,
"dilation kernel must be square and have odd dimensions"
);
k
};
let dilation_kernel = minkowski_sum(&dilation_kernel, &user_dilation_kernel);
// THe speed of `dilate_and_crop_fast` and `naive` are almost the same at 10.
let dilation_size = if dilation_kernel == parse_binary_matrix("1") {
DilationSize::Identity
} else if dilation_kernel.width() <= 10 {
DilationSize::Small
} else {
DilationSize::Large
};
// The support radius of the resampling filter used when resizing the image.
// More precisely, it is ⌈r - 1/2⌉, where r is the support radius.
let resampling_radius = if ssaa > 1 { 1 } else { 0 };
let graph_padding = Padding {
bottom: ssaa * (output_padding.bottom + resampling_radius) + dilation_kernel.height() / 2,
left: ssaa * (output_padding.left + resampling_radius) + dilation_kernel.width() / 2,
right: ssaa * (output_padding.right + resampling_radius) + dilation_kernel.width() / 2,
top: ssaa * (output_padding.top + resampling_radius) + dilation_kernel.height() / 2,
};
let graph_size = [
ssaa * (output_size[0] + 2 * resampling_radius) + (dilation_kernel.width() - 1),
ssaa * (output_size[1] + 2 * resampling_radius) + (dilation_kernel.height() - 1),
];
let opts = PlotOptions {
dilation_size,
dilation_kernel,
graph_size,
gray_alpha,
output,
output_once,
output_size,
pause_per_iteration,
resampling_radius,
timeout,
};
let region = Box2D::new(bounds[0], bounds[1], bounds[2], bounds[3]);
match rel.relation_type() {
RelationType::ExplicitFunctionOfX(_) | RelationType::ExplicitFunctionOfY(_) => plot(
Explicit::new(
rel,
region,
graph_size[0],
graph_size[1],
graph_padding,
mem_limit,
),
opts,
),
RelationType::Parametric => plot(
Parametric::new(
rel,
region,
graph_size[0],
graph_size[1],
graph_padding,
mem_limit,
),
opts,
),
_ => plot(
Implicit::new(
rel,
region,
graph_size[0],
graph_size[1],
graph_padding,
mem_limit,
),
opts,
),
};
}
enum DilationSize {
Identity,
Small,
Large,
}
struct PlotOptions {
dilation_kernel: Image<bool>,
dilation_size: DilationSize,
graph_size: [u32; 2],
gray_alpha: bool,
output: OsString,
output_once: bool,
output_size: [u32; 2],
pause_per_iteration: bool,
resampling_radius: u32,
timeout: Option<Duration>,
}
fn plot<G: Graph>(mut graph: G, opts: PlotOptions) {
// Use 16-bit as gamma correction will be applied by the program `compose`.
let mut gray_alpha_im: Option<GrayAlpha16Image> = None;
let mut rgb_im: Option<RgbImage> = None;
let mut raw_im = Image::<Ternary>::new(opts.graph_size[0], opts.graph_size[1]);
let cropped_width = raw_im.width() - (opts.dilation_kernel.width() - 1);
let cropped_height = raw_im.height() - (opts.dilation_kernel.height() - 1);
if opts.gray_alpha {
gray_alpha_im = Some(GrayAlpha16Image::new(cropped_width, cropped_height));
} else {
rgb_im = Some(RgbImage::new(cropped_width, cropped_height));
}
let mut prev_stat = graph.get_statistics();
print_statistics_header();
print_statistics(&prev_stat, &prev_stat);
let mut save_image = |graph: &G| {
graph.get_image(&mut raw_im);
let top_left = match opts.dilation_size {
DilationSize::Identity => PixelIndex::new(0, 0),
DilationSize::Small => dilate_and_crop_naive(&mut raw_im, &opts.dilation_kernel),
DilationSize::Large => dilate_and_crop_fast(&mut raw_im, &opts.dilation_kernel),
};
if let Some(im) = &mut gray_alpha_im {
for i in 0..im.height() {
for j in 0..im.width() {
*im.get_pixel_mut(j, i) =
match raw_im[PixelIndex::new(top_left.x + j, top_left.y + i)] {
Ternary::True => LumaA([0, 65535]),
Ternary::Uncertain => LumaA([0, 32768]),
Ternary::False => LumaA([0, 0]),
};
}
}
if im.width() != opts.output_size[0] || im.height() != opts.output_size[1] {
let im = imageops::resize(
im,
opts.output_size[0] + 2 * opts.resampling_radius,
opts.output_size[1] + 2 * opts.resampling_radius,
imageops::FilterType::Triangle,
);
let im = imageops::crop_imm(
&im,
opts.resampling_radius,
opts.resampling_radius,
opts.output_size[0],
opts.output_size[1],
)
.to_image();
im.save(&opts.output).expect("saving image failed");
} else {
im.save(&opts.output).expect("saving image failed");
}
} else if let Some(im) = &mut rgb_im {
for i in 0..im.height() {
for j in 0..im.width() {
*im.get_pixel_mut(j, i) =
match raw_im[PixelIndex::new(top_left.x + j, top_left.y + i)] {
Ternary::True => Rgb([0, 0, 0]),
Ternary::Uncertain => Rgb([64, 128, 192]),
Ternary::False => Rgb([255, 255, 255]),
};
}
}
if im.width() != opts.output_size[0] || im.height() != opts.output_size[1] {
let im = imageops::resize(
im,
opts.output_size[0] + 2 * opts.resampling_radius,
opts.output_size[1] + 2 * opts.resampling_radius,
imageops::FilterType::Triangle,
);
let im = imageops::crop_imm(
&im,
opts.resampling_radius,
opts.resampling_radius,
opts.output_size[0],
opts.output_size[1],
)
.to_image();
im.save(&opts.output).expect("saving image failed");
} else {
im.save(&opts.output).expect("saving image failed");
}
}
};
loop {
if opts.pause_per_iteration {
// Await for a newline character.
let mut input = String::new();
stdin().read_line(&mut input).unwrap();
}
let duration = match opts.timeout {
Some(t) => t
.saturating_sub(prev_stat.time_elapsed)
.min(Duration::from_millis(1500)),
_ => Duration::from_millis(1500),
};
if duration.is_zero() {
eprintln!("Warning: reached the timeout");
break;
}
let result = graph.refine(duration);
let stat = graph.get_statistics();
print_statistics(&stat, &prev_stat);
prev_stat = stat;
if !opts.output_once {
save_image(&graph);
}
match result {
Ok(false) => continue,
Ok(true) => break,
Err(e) => {
eprintln!("Warning: {}", e);
break;
}
}
}
if opts.output_once {
save_image(&graph);
}
}
#[cfg(test)]
mod tests {
use crate::*;
#[test]
fn test_disk_matrix() {
// CopyToClipboard@StringRiffle[Map[ToString, DiskMatrix[r], {2}], ";", ","]
assert_eq!(disk_matrix(-10.0), parse_binary_matrix("1"));
assert_eq!(disk_matrix(-1.0), parse_binary_matrix("1"));
assert_eq!(disk_matrix(-0.1), parse_binary_matrix("1"));
assert_eq!(disk_matrix(0.0), parse_binary_matrix("1"));
assert_eq!(disk_matrix(0.4), parse_binary_matrix("1"));
assert_eq!(disk_matrix(0.5), parse_binary_matrix("0,1,0;1,1,1;0,1,0"));
assert_eq!(disk_matrix(1.0), parse_binary_matrix("1,1,1;1,1,1;1,1,1"));
assert_eq!(disk_matrix(1.4), parse_binary_matrix("1,1,1;1,1,1;1,1,1"));
assert_eq!(
disk_matrix(1.5),
parse_binary_matrix(
"0,0,1,0,0;\
0,1,1,1,0;\
1,1,1,1,1;\
0,1,1,1,0;\
0,0,1,0,0"
)
);
assert_eq!(
disk_matrix(2.0),
parse_binary_matrix(
"0,1,1,1,0;\
1,1,1,1,1;\
1,1,1,1,1;\
1,1,1,1,1;\
0,1,1,1,0"
)
);
assert_eq!(
disk_matrix(2.4),
parse_binary_matrix(
"1,1,1,1,1;\
1,1,1,1,1;\
1,1,1,1,1;\
1,1,1,1,1;\
1,1,1,1,1"
)
);
assert_eq!(
disk_matrix(2.5),
parse_binary_matrix(
"0,0,0,1,0,0,0;\
0,1,1,1,1,1,0;\
0,1,1,1,1,1,0;\
1,1,1,1,1,1,1;\
0,1,1,1,1,1,0;\
0,1,1,1,1,1,0;\
0,0,0,1,0,0,0"
)
);
assert_eq!(
disk_matrix(3.0),
parse_binary_matrix(
"0,0,1,1,1,0,0;\
0,1,1,1,1,1,0;\
1,1,1,1,1,1,1;\
1,1,1,1,1,1,1;\
1,1,1,1,1,1,1;\
0,1,1,1,1,1,0;\
0,0,1,1,1,0,0"
)
);
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/src/bin/concatenate.rs | rust/src/bin/concatenate.rs | use clap::{value_parser, Arg, Command};
use image::{imageops, ImageBuffer, ImageReader, LumaA};
use std::ffi::OsString;
type GrayAlpha16Image = ImageBuffer<LumaA<u16>, Vec<u16>>;
fn main() {
let mut matches = Command::new("concatenate")
.about("Concatenates tiles of graphs.")
.arg(
Arg::new("output")
.long("output")
.value_name("file")
.value_parser(value_parser!(OsString)),
)
.arg(
Arg::new("prefix")
.long("prefix")
.value_parser(value_parser!(OsString)),
)
.arg(
Arg::new("size")
.long("size")
.num_args(2)
.value_names(["width", "height"])
.value_parser(value_parser!(u32)),
)
.arg(
Arg::new("suffix")
.long("suffix")
.value_parser(value_parser!(OsString)),
)
.arg(
Arg::new("x-tiles")
.long("x-tiles")
.value_parser(value_parser!(u32)),
)
.arg(
Arg::new("y-tiles")
.long("y-tiles")
.value_parser(value_parser!(u32)),
)
.get_matches();
let output = matches.remove_one::<OsString>("output").unwrap();
let prefix = matches.remove_one::<OsString>("prefix").unwrap();
let size = {
let s = matches
.remove_many::<u32>("size")
.unwrap()
.collect::<Vec<_>>();
[s[0], s[1]]
};
let suffix = matches.remove_one::<OsString>("suffix").unwrap();
let x_tiles = matches.remove_one::<u32>("x-tiles").unwrap();
let y_tiles = matches.remove_one::<u32>("y-tiles").unwrap();
let mut im = GrayAlpha16Image::new(size[0], size[1]);
let mut i = 0;
for i_tile in 0..y_tiles {
let mut j = 0;
let mut last_tile_height = None;
for j_tile in 0..x_tiles {
let path = [
prefix.clone(),
OsString::from(format!("{}-{}", i_tile, j_tile)),
suffix.clone(),
]
.into_iter()
.collect::<OsString>();
let tile = ImageReader::open(&path)
.unwrap_or_else(|_| panic!("failed to open the image '{:?}'", path))
.decode()
.unwrap_or_else(|_| panic!("failed to decode the image '{:?}'", path))
.into_luma_alpha16();
let tile_width = tile.width();
let tile_height = tile.height();
assert!(last_tile_height.is_none() || last_tile_height == Some(tile_height));
imageops::replace(&mut im, &tile, j as i64, i as i64);
last_tile_height = Some(tile_height);
j += tile_width;
}
assert_eq!(j, size[0]);
i += last_tile_height.unwrap();
}
assert_eq!(i, size[1]);
im.save(output).expect("failed to save the image");
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph.rs | rust/tests/graph.rs | #![cfg(all(not(debug_assertions), feature = "arb"))]
use image::ImageReader;
use std::{
fs::create_dir,
path::PathBuf,
process::{Command, Stdio},
};
use uuid::Uuid;
fn execute(cmd: &mut Command) -> bool {
cmd.stdout(Stdio::null())
.status()
.unwrap_or_else(|_| panic!("failed to execute the command: {:?}", cmd))
.success()
}
fn is_valid_id(id: &str) -> bool {
&id[0..2] == "t_"
&& Uuid::parse_str(&id[2..]).map_or(false, |u| {
u.get_variant() == uuid::Variant::RFC4122
&& u.get_version() == Some(uuid::Version::Random)
})
}
pub fn test(id: &str, args: &[String]) {
assert!(is_valid_id(id));
let graph = PathBuf::from("../target/release/graph");
let ref_dir = PathBuf::from("./tests/graph_tests/reference");
let actual_dir = PathBuf::from("./tests/graph_tests/actual");
let ref_img_path = ref_dir.join([id, ".png"].concat());
let actual_img_path = actual_dir.join([id, ".png"].concat());
if ref_img_path.exists() {
let _ = create_dir(actual_dir);
let mut cmd = Command::new(graph);
cmd.args(args).arg("--output").arg(actual_img_path.clone());
if !args.iter().any(|a| a == "--timeout") {
cmd.args(["--timeout", "1000"]);
}
assert!(execute(&mut cmd));
let ref_img = ImageReader::open(ref_img_path)
.unwrap()
.decode()
.unwrap()
.to_rgba8();
let actual_img = ImageReader::open(actual_img_path)
.unwrap()
.decode()
.unwrap()
.to_rgba8();
// Avoid using assert_eq! to prevent printing a huge sequence of numbers.
assert!(ref_img == actual_img);
} else {
let mut cmd = Command::new(graph);
cmd.args(args).arg("--output").arg(ref_img_path);
if !args.iter().any(|a| a == "--timeout") {
cmd.args(["--timeout", "1000"]);
}
assert!(execute(&mut cmd));
}
}
#[macro_export]
macro_rules! t {
($id:ident, $($arg:expr),+, @bounds($xmin:expr, $xmax:expr, $ymin:expr, $ymax:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--bounds", stringify!($xmin), stringify!($xmax), stringify!($ymin), stringify!($ymax) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @dilate($dilate:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--dilate", $dilate $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @pad_bottom($length:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--pad-bottom", stringify!($length) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @pad_left($length:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--pad-left", stringify!($length) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @pad_right($length:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--pad-right", stringify!($length) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @pad_top($length:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--pad-top", stringify!($length) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @pen_size($pen_size:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--pen-size", stringify!($pen_size) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @size($width:expr, $height:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--size", stringify!($width), stringify!($height) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @ssaa($ssaa:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--ssaa", stringify!($ssaa) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+, @timeout($timeout:expr) $(, @$opt:ident($($opt_arg:expr),+))* $(,)?) => {
t!($id, $($arg),+, "--timeout", stringify!($timeout) $(, @$opt($($opt_arg),+))*);
};
($id:ident, $($arg:expr),+ $(,)?) => {
#[test]
fn $id() {
let id = stringify!($id);
let args = vec![$($arg.into()),+];
$crate::test(id, &args);
}
};
}
mod graph_tests {
mod constant;
mod dilate;
mod examples;
mod explicit;
mod functions;
mod implicit;
mod pad;
mod parametric;
mod pen_size;
mod polar;
mod ssaa;
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/dilate.rs | rust/tests/graph_tests/dilate.rs | // Identity.
t!(
t_54cc8216d22149aaa67c853c772b6477,
"max(|x|, |y|) = 7",
@dilate("0,0,0;0,1,0;0,0,0"),
@size(8, 8)
);
// Duplicate horizontally.
t!(
t_79611f10496d4bfa8ebe084ffd1240c8,
"max(|x|, |y|) = 7",
@dilate("0,0,0;1,0,1;0,0,0"),
@size(8, 8)
);
// Duplicate vertically.
t!(
t_b5b6122d0843477581498ca873716f00,
"max(|x|, |y|) = 7",
@dilate("0,1,0;0,0,0;0,1,0"),
@size(8, 8)
);
// Duplicate to NW and SE.
t!(
t_007d72e733d944bfbf5c024ad4ea1ff8,
"max(|x|, |y|) = 7",
@dilate("1,0,0;0,0,0;0,0,1"),
@size(8, 8)
);
// Duplicate to NE and SW.
t!(
t_b7ffc60468a74a70bd55201504f98801,
"max(|x|, |y|) = 7",
@dilate("0,0,1;0,0,0;1,0,0"),
@size(8, 8)
);
// Asymmetric shape.
t!(
t_f810bce2133b46ebb23fdd4c8bdc1d56,
"x = 0.01 ∧ y = 0.01",
@dilate("0,1,1,1,0;1,0,0,0,0;1,0,1,1,0;1,0,0,1,0;0,1,1,1,0"),
@size(8, 8)
);
// True pixels overwrite uncertain ones.
t!(
t_63c475689a5643b4a8bb55177504e8a4,
"(y - x - 0.01) |y + x - 0.01| = 0",
@dilate("1,1,1;1,1,1;1,1,1"),
@size(8, 8)
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/explicit.rs | rust/tests/graph_tests/explicit.rs | // Non-square
t!(t_b0561718fd3541d2958de31f0310e101, "y = sin(exp(x))", @size(456, 789));
t!(t_900f23b5cd764428b608611b30859d6e, "y = sin(exp(x))", @size(789, 456));
t!(t_8f83795722ca41349fe0e31fa80447f9, "x = sin(exp(y))", @size(456, 789));
t!(t_bc85afb687af42daaedfa0db21afb2a1, "x = sin(exp(y))", @size(789, 456));
// Non-centered
t!(t_ddbf2d9ea2644e10a84f23c58c156870, "y = sin(exp(x))", @bounds(-12, 8, -4, 16));
t!(t_da2f53e4fdc9456a907e308edfbddf4b, "x = sin(exp(y))", @bounds(-4, 16, -12, 8));
// Inequality
t!(t_12ec2e045b3c497bb3e16a73dd07e7d6, "y < sqrt(x)");
t!(t_efde3acd852446e689c48f1490506d24, "y ≤ sqrt(x)");
t!(t_db32206574674f008c1db254e97f2742, "y > sqrt(x)");
t!(t_c8bcbe5b1ca849a9bf413518c22d4fa4, "y ≥ sqrt(x)");
// Check if `ModEqTransform` is applied.
t!(t_531465e0b09043fd9a61a215df79afab, "mod(x, π) = 0");
// Huge coordinates
t!(
t_4a655f11265f4fe68ebab2f796dbc43f,
"y = sin(x)",
@bounds(1000000000000000, 1000000000000004, -2, 2)
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/pad.rs | rust/tests/graph_tests/pad.rs | t!(
t_c4138320b0294abbb90cd60a913803c6,
"max(|x|, |y|) = 7",
@pad_left(4),
@size(8, 8)
);
t!(
t_9a1733d1160f409b8d4036a802c9d602,
"max(|x|, |y|) = 7",
@pad_right(4),
@size(8, 8)
);
t!(
t_2b66412c864540eda3a593c39f4899dc,
"max(|x|, |y|) = 7",
@pad_bottom(4),
@size(8, 8)
);
t!(
t_448fa13b0349425c8da22d8adc9275db,
"max(|x|, |y|) = 7",
@pad_top(4),
@size(8, 8)
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/implicit.rs | rust/tests/graph_tests/implicit.rs | t!(
t_84a2738be35745da97a9b120c563b4b3,
"y = t",
@timeout(8000),
);
t!(
t_7f25b9f2bd3746b3ae8f95a82921de2b,
"mod(cos(n/12 π) x + sin(n/12 π) y, 3) = 0",
@timeout(2000),
);
t!(
t_37b042e4a46346fda37c9af4b71fb404,
"⌊16/(2π) ln(r)⌋ = ⌊16/(2π) θ⌋ + n ∧ mod(n, 2) = 0 ∧ 0 ≤ n < 16",
@timeout(6000),
);
t!(
t_7f564d75d3de45faa732ffdcec3f7ea5,
"θ = 2π n/12 ∧ 0 ≤ n < 12",
@timeout(2000),
);
// Non-square
t!(
t_3d0c421d8fed4ea58e286d3bf1b1fb3a,
"r = 8 ∧ 1 < θ < 5",
@size(456, 789)
);
t!(
t_4cf5b14b6b0946ff80ac77ce5c4cb165,
"r = 8 ∧ 1 < θ < 5",
@size(789, 456)
);
// Check if MPFR's sin/cos is used instead of the Arb's functions around extrema.
t!(
t_250a7184c7444a3a928fdcb92bc75bf9,
"sin(x) = cos(y)",
@bounds(4.712, 4.713, 3.141, 3.142)
);
// Huge coordinates
t!(
t_2ccd8bdd9d2840bfbaee9b34138011fd,
"2 y = 2 sin(x)",
@bounds(1000000000000000, 1000000000000004, -2, 2)
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/functions.rs | rust/tests/graph_tests/functions.rs | t!(t_9141185741e3473bafdd4fbd69daacb0, "y = abs(x)");
t!(t_3424e84276854880bfbaf722a55f4083, "y = acos(x)");
t!(t_37f5ce66894a4b94a578df440066cf19, "y = acosh(x)");
t!(t_38eec1c4b37442ce82c57cca99f36e81, "y = Ai(x)");
t!(t_df6c0b5de0cb45beb1dfe37e25b5c3ba, "y = Ai'(x)");
t!(t_e46074d3fb3d4876aa7cde273ded0a34, "y = asin(x)");
t!(t_9301c82b5f894f16b9b5122c09221aee, "y = asinh(x)");
t!(t_0444c061f4974171a76c9726967ecdd4, "y = atan(x)");
t!(t_d0cb560e76f64a2c96b51cc659192105, "y = atanh(x)");
t!(t_0dd58ad0988349c3bf70719d5db1b195, "y = Bi(x)");
t!(t_99d818f7f192402a9c7579b1e3bda62b, "y = Bi'(x)");
t!(
t_a5daa83f3ba6452694b8dc31989e49a5,
"y = C(x)",
@timeout(4000),
);
t!(t_e2432ca1dca74defb99c089a7d232eeb, "y = ceil(x)");
t!(t_3c8809b76d304630b6bb047ed44c3c84, "y = Chi(x)");
t!(t_2f00a24bfa8c4d4ab2f74382299d1029, "y = Ci(x)");
t!(t_4e6cef6277de458aa116679f52721b31, "y = cos(x)");
t!(t_ed14cf332fd64c8398d072b14dd93d54, "y = cosh(x)");
t!(
t_627f51aa56d14309b527f2a37b6368e5,
"y = E(x)",
@timeout(2000),
);
t!(t_db337020ebbb4d4f82d930d38c09ee7c, "y = Ei(x)");
t!(t_ae7ed0eb236e4e579b04aadf26a2d767, "y = erf(x)");
t!(t_93156a90660c43ce987d0cab7316cba3, "y = erfc(x)");
t!(t_1cc0b236a5c34d2a81bc5dc6f3152fcf, "y = erfcinv(x)");
t!(t_99de97a06c824edfbccd3e425633a290, "y = erfi(x)");
t!(t_41c51c4052894908ad252aa1f45f30ac, "y = erfinv(x)");
t!(t_944c89064a004af9bcda883f98785d4c, "y = exp(x)");
t!(t_0d3db3fb75604b85893ef6935b6f6ce8, "y = floor(x)");
t!(t_4e62c6b2dc704f47a8275b5f412b6f6c, "y = Γ(x)");
t!(t_50f32698582b4c4da21202d71998a54b, "y = lnΓ(x)");
t!(t_9903294009524c4aa671b035804cd213, "y = Γ(-1, x)");
t!(t_7ba9b50587f64182a5fb7130232843f6, "y = Γ(0, x)");
t!(t_3d165e6f280540cf88da4b1ececeb299, "y = Γ(1, x)");
t!(t_eafdf80bfc1d45f6a244dfa827b389f9, "y = Γ(2, x)");
t!(t_4f6d9c9c7b8d484993cb265a88322897, "y = K(x)");
t!(t_a1781b46eb8c45f58d656f455677c84c, "y = li(x)");
t!(t_6c486f3c2aef49dead6acd55f3848433, "y = ln(x)");
t!(t_09177d1cf2fb44af91b5e47a7213019d, "y = log(10, x)");
t!(t_0e998089005f4afda4752098d63cd55a, "y = ψ(x)");
t!(
t_a99a60361a8d4b69a1461484c0622e10,
"y = S(x)",
@timeout(4000),
);
t!(t_15e48b4aa9864b56bc295596008ced2f, "y = sgn(x)");
t!(t_c6e28ae9902e4517885c39c5c776e26a, "y = Shi(x)");
t!(t_1a93bd43905e4d7eb041e4fe75175534, "y = Si(x)");
t!(t_b9fb18d5692e4ae6ab6e011ee31e32e6, "y = sin(x)");
t!(t_2167a4da30594e209d49215d180c6edb, "y = sinc(x)");
t!(t_5ac006668e084891a8394362499446ea, "y = sinh(x)");
t!(t_58ee894ce5fb4164ac0e3c422ac7948f, "y = sqrt(x)");
t!(t_51f6423ada6a45eba368eb03b57e10ee, "y = tan(x)");
t!(t_c88349f5b4a74502b82b56c56ffd9ea7, "y = tanh(x)");
t!(t_0a655aecc017422a8a829bb33f8ff280, "y = W(x)");
t!(t_91b4b44592264430835999d3ec1b0363, "y = W(-1, x)");
t!(t_e2a2a39070e6465fb4ce10c6b6ab3588, "y = ζ(x)");
// Bessel functions
t!(
t_1766086ff8d848829170cead2dc18e7b,
"y = J(-3/2, x)",
@bounds(-4, 4, -4, 4),
);
t!(
t_4f55a2d3f14b4539b5a1c38277a1aaf1,
"y = J(-1, x)",
@bounds(-4, 4, -4, 4),
@timeout(3000),
);
t!(
t_e657f897db644391905f6245dfd67df1,
"y = J(-1/2, x)",
@bounds(-4, 4, -4, 4),
);
t!(
t_f8780e7966644953b23ded3a6ce8e2a6,
"y = J(0, x)",
@bounds(-4, 4, -4, 4),
@timeout(4000),
);
t!(
t_b7e50ea47abe42559b286b66964498d7,
"y = J(1/2, x)",
@bounds(-4, 4, -4, 4),
);
t!(
t_6334b67231b84fcaa62320fa65b7c77f,
"y = J(1, x)",
@bounds(-4, 4, -4, 4),
@timeout(3000),
);
t!(
t_49dd5ac993f24e078ae1a0606b95a49f,
"y = J(3/2, x)",
@bounds(-4, 4, -4, 4),
);
t!(
t_19532c1221be449481385eaf7c42a279,
"y = Y(-3/2, x)",
@bounds(-4, 4, -4, 4),
);
t!(
t_2bba64e6e7d24423bbd3d2a08fe5c780,
"y = Y(-1, x)",
@bounds(-4, 4, -4, 4),
@timeout(5000),
);
t!(
t_1d96ba0ba09f44e7a9e3f497526013fa,
"y = Y(-1/2, x)",
@bounds(-4, 4, -4, 4),
);
t!(
t_9359b338708e4df1a8bdc6220b95e527,
"y = Y(0, x)",
@bounds(-4, 4, -4, 4),
@timeout(4000),
);
t!(
t_795e8b2422db48ca86b5de4a2de94b4c,
"y = Y(1/2, x)",
@bounds(-4, 4, -4, 4),
@timeout(3000),
);
t!(
t_a95ce899a2c24239b21bd1f787610d4b,
"y = Y(1, x)",
@bounds(-4, 4, -4, 4),
@timeout(5000),
);
t!(
t_7432071d9b3c4526bfe61c05f91e1964,
"y = Y(3/2, x)",
@bounds(-4, 4, -4, 4),
@timeout(2000),
);
t!(t_27eebd1d353b4aada53c8dfd22f7e08c, "y = I(-3/2, x)",);
t!(t_eaa5ae3d73ac4599a0dcb865278684f2, "y = I(-1, x)",);
t!(t_a495cd2e3ac141729ce70da2bbf69005, "y = I(-1/2, x)",);
t!(t_8355dc4b93bf4a8896db89c7abdb804f, "y = I(0, x)",);
t!(t_6a5ff0062f8c46ad9bbb4c8dc7d9681c, "y = I(1/2, x)",);
t!(t_961ac5f68d8747db8b1e701cd0138931, "y = I(1, x)",);
t!(t_6a99a6054e314aedb19af97d03ca66f8, "y = I(3/2, x)",);
t!(t_0c5785ed91294fd9b87815926fb92c4a, "y = K(-3/2, x)",);
t!(t_24fee629fc8d49008760b5da7a2122c6, "y = K(-1, x)",);
t!(t_17e51ff3f84d463384561a781a909c5c, "y = K(-1/2, x)",);
t!(t_52c7fee2d49a4cc7ab8500dc20bc917d, "y = K(0, x)",);
t!(t_d90cfe5768e045d2b2f06f6a17de8f45, "y = K(1/2, x)",);
t!(t_2a7837af7fa54a4ca9da271ac72f339b, "y = K(1, x)",);
t!(t_b81dbc5c9ffa4286922c2989ef730638, "y = K(3/2, x)",);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/pen_size.rs | rust/tests/graph_tests/pen_size.rs | t!(
t_e4148fc295e94cc5bfde9377ab10a146,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@pen_size(1),
@ssaa(3)
);
t!(
t_0226139f3e7442c997a1e90bb621708e,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@pen_size(2),
@ssaa(3)
);
t!(
t_9c1e98e649b846cd83ac47232cc5f109,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@pen_size(5),
@ssaa(3)
);
t!(
t_cd41667be38b47c39921b668bf27992b,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@pen_size(100),
@ssaa(3)
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/polar.rs | rust/tests/graph_tests/polar.rs | // From https://www.wolframalpha.com/input/?i=polar+curves
// bifoliate (a = 1)
t!(
t_b1b247d3dba94a98a1e89ae9099bb442,
"r = 8 sin(θ)^2 cos(θ) / (cos(4 θ) + 3)",
@bounds(-2, 2, -2, 2),
@timeout(7000),
);
// bifolium (a = 1)
t!(
t_cc34e20f7186486690b8cf44e6eee27f,
"r = 4 sin(θ)^2 cos(θ)",
@bounds(-2, 2, -2, 2),
@timeout(3000),
);
// cardioid (a = 1)
t!(
t_4ccd608823904d1a935a15cc36d17c7f,
"r = 1 - cos(θ)",
@bounds(-5, 5, -5, 5),
);
// Cayley sextic (a = 1)
t!(
t_a96f22e561114a4ca946e8e0aae6d124,
"r = cos(θ / 3)^3",
@bounds(-2, 2, -2, 2),
@timeout(3000),
);
// cycloid of Ceva (a = 1)
t!(
t_cd74d6c729494e8a9249b739d8986e50,
"r = 2 cos(2 θ) + 1",
@bounds(-5, 5, -5, 5),
);
// circle (a = 1)
t!(
t_aa74fbf9a5ae4501a4e5bd9730a75723,
"r = 1",
@bounds(-2, 2, -2, 2),
);
// kampyle of Eudoxus (a = 1)
t!(t_b6ca9a802ce4498f8b8b5e92c30e1fa6, "r = 1 / cos(θ)^2");
// Freeth nephroid (a = 1)
t!(
t_53a9535dc7a348c6b8eea80e7a27982f,
"r = 2 sin(θ / 2) + 1",
@bounds(-5, 5, -5, 5),
);
// Garfield curve (a = 1)
t!(
t_34429ea795494b0593297fdb67e1c823,
"r = θ cos(θ) ∧ -2π ≤ θ ≤ 2π",
@timeout(2000),
);
// fourth heart curve (a = 1)
t!(
t_d32ed36d48f94fc8960be7e237e99913,
"r = sin(θ) sqrt(|cos(θ)|) / (sin(θ) + 7/5) - 2 sin(θ) + 2",
@bounds(-5, 5, -5, 5),
@timeout(2000),
);
// lituus (a = 1)
t!(
t_6c81a6bd650d4af2806953aa6668fc7f,
"r = 1 / sqrt(θ)",
@bounds(-1, 1, -1, 1),
@timeout(2000),
);
// Maltese cross curve (a = 1)
t!(t_4e807a8c807c419e8b1442aced86251f, "r = 2 / sqrt(sin(4 θ))");
// trifolium (a = 1)
t!(
t_ec54de81941547d9a1bfec6f09badf75,
"r = -cos(3θ)",
@bounds(-2, 2, -2, 2),
@timeout(2000),
);
// Tschirnhausen cubic (a = 1)
t!(
t_1a810505ca14423bbe7d14eb1d03119c,
"r = 1 / cos(θ / 3)^3",
@bounds(-100, 100, -100, 100),
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/constant.rs | rust/tests/graph_tests/constant.rs | // False
t!(t_21454acd110d47bab4baeb8b4877a7c1, "1 = 0");
// True
t!(t_1ea42308824249e8948e21b77c549805, "1 = 1");
// Uncertain
t!(t_65654d086dde4597ad22a3d0802a9c6b, "2 = sqrt(2)^2");
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/parametric.rs | rust/tests/graph_tests/parametric.rs | // Non-square
t!(
t_90e85ab048f04560bfc9eef9e32c4b9c,
"x = 8 cos(t) ∧ y = 8 sin(t) ∧ 1 < t < 5",
@size(456, 789)
);
t!(
t_a039ce4efca04d99b6a6bdd08b5b2bf4,
"x = 8 cos(t) ∧ y = 8 sin(t) ∧ 1 < t < 5",
@size(789, 456)
);
// From https://www.wolframalpha.com/input/?i=parametric+planar+curves
// Archimedes' spiral (a = 1)
t!(
t_8844cc6c192544249d545aa4ff93fdfc,
"x = t cos(t) ∧ y = t sin(t) ∧ 0 ≤ t ≤ 6π",
@bounds(-20, 20, -20, 20),
);
// astroid (a = 1)
t!(
t_886f8b99d8394e8eae76907506d95f1e,
"x = cos(t)^3 ∧ y = sin(t)^3",
@bounds(-2, 2, -2, 2),
);
// Atzema spiral (a = 1)
t!(
t_812458dc9b594ace92306e2941a48539,
"x = -t sin(t) + sin(t) / t - 2 cos(t) ∧ y = -2 sin(t) + t cos(t) - cos(t) / t ∧ 1 ≤ t ≤ 10",
);
// bifoliate (a = 1)
t!(
t_98966651db534aa79c7e026b6d2171dd,
"x = 8 sin(t)^2 cos(t)^2 / (cos(4 t) + 3) ∧ y = 8 sin(t)^3 cos(t) / (cos(4 t) + 3)",
@bounds(-2, 2, -2, 2),
@timeout(3000),
);
// bifolium (a = 1)
t!(
t_af16b5074e804e86bc2bd7ce2fc07db0,
"x = 4 sin(t)^2 cos(t)^2 ∧ y = 4 sin(t)^3 cos(t)",
@bounds(-2, 2, -2, 2),
);
// second butterfly curve
t!(
t_1fc142fa2f4b49bc993234a1384220b0,
"x = sin(t) (sin(t / 12)^5 + exp(cos(t)) - 2 cos(4 t)) ∧ y = cos(t) (sin(t / 12)^5 + exp(cos(t)) - 2 cos(4 t))",
@bounds(-5, 5, -5, 5),
@timeout(4000),
);
// cardioid (a = 1)
t!(
t_302532e32f2f46ea96c82f312543de97,
"x = (1 - cos(t)) cos(t) ∧ y = sin(t) (1 - cos(t))",
@bounds(-5, 5, -5, 5),
);
// Cayley sextic (a = 1)
t!(
t_64942ef8f2d5401882f0d91820e506ac,
"x = cos(t / 3)^3 cos(t) ∧ y = sin(t) cos(t / 3)^3",
@bounds(-2, 2, -2, 2),
);
// cycloid of Ceva (a = 1)
t!(
t_b4bf85744ce444fb83f38ca9c6c56b30,
"x = cos(t) (2 cos(2 t) + 1) ∧ y = sin(t) (2 cos(2 t) + 1)",
@bounds(-5, 5, -5, 5),
);
// circle (a = 1)
t!(
t_6acfe3a561814938a3a3b01d5514e2ca,
"x = cos(t) ∧ y = sin(t)",
@bounds(-2, 2, -2, 2),
);
// circle involute (a = 1)
t!(
t_8aa880fe2833467084836d7dd21198c9,
"x = t sin(t) + cos(t) ∧ y = sin(t) - t cos(t) ∧ 0 ≤ t ≤ 10",
);
// cycloid (a = 1)
t!(
t_f2c688e095a9469fa6483e0d3dd0e36a,
"x = t - sin(t) ∧ y = 1 - cos(t)",
);
// deltoid (a = 1)
t!(
t_268ee2e1bec147d8989ab63963769bb0,
"x = 2 cos(t) / 3 + cos(2 t) / 3 ∧ y = 2 sin(t) / 3 - sin(2 t) / 3",
@bounds(-2, 2, -2, 2),
);
// folium of Descartes (a = 1)
t!(
t_f85bacb939644c6381c22568c3e8a54f,
"x = 3 t / (t^3 + 1) ∧ y = 3 t^2 / (t^3 + 1) ∧ -30 ≤ t ≤ 30",
@bounds(-2, 2, -2, 2),
);
// cissoid of Diocles (a = 1)
t!(
t_539949c082ff4b319c98c9f68e9a3d14,
"x = 2 sin(t)^2 ∧ y = 2 sin(t)^2 tan(t)",
@bounds(-5, 5, -5, 5),
);
// Doppler spiral (a = 1, k = 2)
t!(
t_55ac60d148b44bbe9aad186ee255c61c,
"x = 2 t + t cos(t) ∧ y = t sin(t) ∧ 0 ≤ t ≤ 40",
@bounds(-50, 150, -100, 100),
);
// eight curve (a = 1)
t!(
t_7c7632d36c2c49499bbb8ac454a0063a,
"x = sin(t) ∧ y = sin(t) cos(t)",
@bounds(-2, 2, -2, 2),
@timeout(2000),
);
// kampyle of Eudoxus (a = 1)
t!(
t_83353dd50cd441c2b99967cd1333a601,
"x = 1 / cos(t) ∧ y = tan(t) / cos(t)",
);
// Freeth nephroid (a = 1)
t!(
t_dca178704b6b4f259552f104208e8110,
"x = (2 sin(t / 2) + 1) cos(t) ∧ y = (2 sin(t / 2) + 1) sin(t)",
@bounds(-5, 5, -5, 5),
);
// Garfield curve (a = 1)
t!(
t_9d71fb2ea4844f699a78bebc123772e2,
"x = t cos(t)^2 ∧ y = t sin(t) cos(t) ∧ -2π ≤ t ≤ 2π",
);
// fourth heart curve (a = 1)
t!(
t_b5438096e28242eea92db15e8786a563,
"x = cos(t) (sin(t) sqrt(|cos(t)|) / (sin(t) + 7/5) - 2 sin(t) + 2) ∧ y = sin(t) (sin(t) sqrt(|cos(t)|) / (sin(t) + 7/5) - 2 sin(t) + 2)",
@bounds(-5, 5, -5, 5),
);
// fifth heart curve
t!(
t_0e2ef4e21ed34beeb4279b4c5c36a228,
"x = 16 sin(t)^3 ∧ y = 13 cos(t) - 5 cos(2 t) - 2 cos(3 t) - cos(4 t)",
@bounds(-20, 20, -20, 20),
);
// lituus (a = 1)
t!(
t_c6f55e36d9ce4673bad541e0b1ad5f24,
"x = cos(t) / sqrt(t) ∧ y = sin(t) / sqrt(t)",
@bounds(-1, 1, -1, 1),
@timeout(6000),
);
// Maltese cross curve (a = 1)
t!(
t_e80f0dbadfeb49248226ec4cc859385f,
"x = 2 cos(t) / sqrt(sin(4 t)) ∧ y = 2 sin(t) / sqrt(sin(4 t))",
);
// parabola involute (a = 1)
t!(
t_14c9b1bb6ad04e4bb1c5252a73c96e51,
"x = -1/4 t tanh(t) ∧ y = 1/4 (sinh(t) - t / cosh(t))",
@bounds(-5, 5, -5, 5),
);
// piriform curve (a = 1, b = 1)
t!(
t_1e66c48b91f74a9592b2c53adc73bb37,
"x = sin(t) + 1 ∧ y = (sin(t) + 1) cos(t)",
@bounds(-1, 3, -2, 2),
);
// ranunculoid (a = 1)
t!(
t_4f1d55a0923c4d4fb6cbcc67ea8b12fb,
"x = 6 cos(t) - cos(6 t) ∧ y = 6 sin(t) - sin(6 t)",
);
// tractrix (a = 1)
t!(
t_953663f505aa46f2a7dfbb460e5da84a,
"x = t - tanh(t) ∧ y = 1 / cosh(t)",
@bounds(-5, 5, -5, 5),
);
// tractrix spiral (a = 1)
t!(
t_ec7bc6583a004fca8a4dfefe5d24ca8b,
"x = cos(t) cos(t - tan(t)) ∧ y = cos(t) sin(t - tan(t)) ∧ 0 ≤ t < π/2",
@bounds(-1, 1, -1, 1),
@timeout(4000),
);
// trifolium (a = 1)
t!(
t_2ce09882cb1a4de9a007f124c2dc36af,
"x = -cos(t) cos(3 t) ∧ y = -sin(t) cos(3 t)",
@bounds(-2, 2, -2, 2),
);
// Tschirnhausen cubic (a = 1)
t!(
t_7da3ecab3414467aa8d3e832e231e70c,
"x = 1 - 3 t^2 ∧ y = t (3 - t^2)",
@bounds(-100, 100, -100, 100),
);
// Integer parameters
t!(
t_edb96325adb6497cbb0173eb9d5496cc,
"x = 10 / m ∧ y = 20 / n",
);
t!(
t_c6947324ad53422e9076ce55e9073913,
"x = 10 / m ∧ y = 20 / n ∧ m = n",
);
t!(
t_bfe7a22400304fa0bf9d0260f522f047,
"x = cos(t) + m ∧ y = sin(t) + 2 n ∧ m = n",
);
// Huge coordinates
t!(
t_c99d61b6e2324c6c8f307b3a90b522d4,
"x = t ∧ y = sin(t)",
@bounds(1000000000000000, 1000000000000004, -2, 2)
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/ssaa.rs | rust/tests/graph_tests/ssaa.rs | t!(
t_4b288ec719e249e7bcd0f96a4ae51011,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@ssaa(3),
@size(128, 128)
);
t!(
t_7566712cf4224f3f9df071f363ebdb89,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@ssaa(5),
@size(128, 128)
);
t!(
t_216015c57aff43cd9d0add14a406a32a,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@ssaa(7),
@size(128, 128)
);
t!(
t_379c733730ac4c1fa319ee9b20547021,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@ssaa(9),
@size(128, 128)
);
t!(
t_faafc5de961744ac8be80987b3ac8b79,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@ssaa(11),
@size(128, 128)
);
t!(
t_d8a70cd2f6b14bc0881fbf36659e413b,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@ssaa(13),
@size(128, 128)
);
t!(
t_e5d7b2480d9a425384548aa3b1a8ca99,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@ssaa(15),
@size(128, 128)
);
t!(
t_dcd1697a9e854a47bd41f0497535db97,
"y = sin(x) + 0.01",
@bounds(-4, 4, -4, 4),
@ssaa(17),
@size(128, 128)
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/tests/graph_tests/examples.rs | rust/tests/graph_tests/examples.rs | // From Examples.md
t!(
t_b8b04a37eaf64d0491cc2b1e6ee7bb1b,
"(2y-x-1)(2y-x+1)(2x+y-1)(2x+y+1)((5x-2)^2+(5y-6)^2-10)((5x)^2+(5y)^2-10)((5x+2)^2+(5y+6)^2-10) = 0",
@bounds(-3, 3, -3, 3),
);
t!(
t_0b3b446edb104b2680bdb05fcdbef602,
"((x-2)^2+(y-2)^2-0.4)((x-2)^2+(y-1)^2-0.4)((x-2)^2+y^2-0.4)((x-2)^2+(y+1)^2-0.4)((x-2)^2+(y+2)^2-0.4) ((x-1)^2+(y-2)^2-0.4)((x-1)^2+(y-1)^2-0.4)((x-1)^2+y^2-0.4)((x-1)^2+(y+1)^2-0.4)((x-1)^2+(y+2)^2-0.4) (x^2+(y-2)^2-0.4)(x^2+(y-1)^2-0.4)(x^2+y^2-0.4)(x^2+(y+1)^2-0.4)(x^2+(y+2)^2-0.4) ((x+1)^2+(y-2)^2-0.4)((x+1)^2+(y-1)^2-0.4)((x+1)^2+y^2-0.4)((x+1)^2+(y+1)^2-0.4)((x+1)^2+(y+2)^2-0.4) ((x+2)^2+(y-2)^2-0.4)((x+2)^2+(y-1)^2-0.4)((x+2)^2+y^2-0.4)((x+2)^2+(y+1)^2-0.4)((x+2)^2+(y+2)^2-0.4) = 0",
@bounds(-3, 3, -3, 3),
);
// Irrationally Contin
t!(
t_c130725c25914756aa959dcbad0edc87,
"y = gcd(x, 1)",
@timeout(3000),
);
// Parabolic Waves
t!(
t_23ac7cf5b57d4c7388397379cd277762,
"|sin(sqrt(x^2 + y^2))| = |cos(x)|",
@timeout(2000),
);
// Prime Bars
t!(
t_402ef2fd017a41d088525e08fb27e03c,
"gcd(⌊x⌋, Γ(⌊sqrt(2⌊x⌋) + 1/2⌋)) ≤ 1 < x - 1",
@bounds(0, 40, -20, 20),
);
// Pythagorean Pairs
t!(
t_ba0d2f30668b421484f34cf614f52744,
"⌊x⌋^2 + ⌊y⌋^2 = ⌊sqrt(⌊x⌋^2 + ⌊y⌋^2)⌋^2",
@bounds(-40, 40, -40, 40),
);
// Pythagorean Triples
t!(t_df0cd3aa1f02458689e59c6d5b383da7, "⌊x⌋^2 + ⌊y⌋^2 = 25");
// Rational Beams
t!(
t_305f167fd5ba4d0e8b6c5408f01cdab6,
"gcd(x, y) > 1",
@bounds(0.3, 8.9, 0.4, 9.0),
@timeout(5000),
);
// Infinite Frequency
t!(t_f2c337b79a9843aa898fb494c3869916, "y = sin(40 / x)");
// O Spike
t!(
t_0fcfd060295c411d8724a7872c071315,
"(x (x-3) / (x-3.001))^2 + (y (y-3) / (y-3.001))^2 = 81",
@timeout(2000),
);
// Solid Disc
t!(
t_8c9209237ba04e67a715382c7bcbf5e0,
"81 - x^2 - y^2 = |81 - x^2 - y^2|",
@timeout(3000),
);
// Spike
t!(
t_ad4600f870c24171a0bb88f0dfb002e9,
"y = x (x-3) / (x-3.001)",
);
// Step
t!(t_f8c6fb04be6f464ebb4f12ff2bb52638, "y = atan(9^9^9 (x-1))");
// Upper Triangle
t!(
t_d2a0e9e88c41406a80a266d78f6dc7a5,
"x + y = |x + y|",
@timeout(2000),
);
// Wave
t!(t_95e4d99eaafc4e749ca0703ff6a8f1b1, "y = sin(x) / x");
// binary naturals
t!(
t_0f59284bfc0948758389bb0f6f91b68a,
"(1 + 99 ⌊mod(⌊y⌋ 2^⌈x⌉, 2)⌋) (mod(x,1) - 1/2)^2 + (mod(y,1) - 1/2)^2 = 0.15 ∧ ⌊-log(2,y)⌋ < x < 0",
@bounds(-15, 5, -5, 15),
);
// binary squares
t!(
t_a85d65f32c5d4ae2831eeccb240071b8,
"(1 + 99 ⌊mod(⌊y⌋^2 2^⌈x⌉, 2)⌋) (mod(x,1) - 1/2)^2 + (mod(y,1) - 1/2)^2 = 0.15 ∧ x < 0 < ⌊y⌋^2 ≥ 2^-⌈x⌉",
@bounds(-15, 5, -5, 15),
);
// decimal squares
t!(
t_23d33db665b843368c9272075452f04b,
"mod(if(30 max(|mod(y,1) - 1/2|, |mod(x,0.8)+0.1 - 1/2| + |mod(y,1) - 1/2| - 1/4) < 1, 892, if(30 max(|mod(y,1) - 1/10|, |mod(x,0.8)+0.1 - 1/2| + |mod(y,1) - 1/10| - 1/4) < 1, 365, if(30 max(|mod(y,1) - 9/10|, |mod(x,0.8)+0.1 - 1/2| + |mod(y,1) - 9/10| - 1/4) < 1, 941, if(30 max(|mod(x,0.8) + 0.1 - 4/5|, |mod(y,1) - 7/10| + |mod(x,0.8) + 0.1 - 4/5| - 1/8) < 1, 927, if(30 max(|mod(x,0.8) + 0.1 - 1/5|, |mod(y,1) - 7/10| + |mod(x,0.8) + 0.1 - 1/5| - 1/8) < 1, 881, if(30 max(|mod(x,0.8) + 0.1 - 1/5|, |mod(y,1) - 3/10| + |mod(x,0.8) + 0.1 - 1/5| - 1/8) < 1, 325, if(30 max(|mod(x,0.8) + 0.1 - 4/5|, |mod(y,1) - 3/10| + |mod(x,0.8) + 0.1 - 4/5| - 1/8) < 1, 1019, sqrt(-1)))))))) 2^-⌊mod(⌊y⌋^2 / 10^-⌈1.25x⌉, 10)⌋, 2) ≥ 1 ∧ x < 0 < ⌊y⌋^2 ≥ 10^-⌈1.25x⌉",
@bounds(-7, 3, 1, 11),
);
// bi-infinite binary tree
t!(
t_ed8070659e554cc881b864687658ee9b,
"sin(2^⌊y⌋ x + π/4 (y - ⌊y⌋) - π/2) = 0 ∨ sin(2^⌊y⌋ x - π/4 (y - ⌊y⌋) - π/2) = 0",
@timeout(6000),
);
// Simply Spherical
t!(
t_7c22e4322e2c4388adf50668b15cadb0,
"sin(20x) - cos(20y) + 2 > 4 if((x+1)^2 + (y-1)^2 < 25, (3/4 - 1/15 sqrt((x+4)^2 + (y-3)^2)), (0.65 + 1/π atan(6 (sqrt((x-1)^2/30 + (y+1)^2/9) - 1))))",
@timeout(2000),
);
// Tube
t!(
t_bfd842225b5a499bba4be9a08feecd2a,
"cos(5x) + cos(5/2 (x - sqrt(3) y)) + cos(5/2 (x + sqrt(3) y)) > 1 + if((x^2 + 2y^2 - 1600) (x^2 + 3 (y-2)^2 - 700) ≤ 0, 3/2 sin(1/4 sqrt((x+3)^2 + 2 (y-3)^2)), 2 atan(1/8 sqrt(4 (x-2)^2 + 10 (y+4)^2) - 9)^2)",
@bounds(-50, 50, -50, 50),
@timeout(4000),
);
// Frontispiece #2
t!(
t_70f11f255fe54073996223eec75840f4,
"x / cos(x) + y / cos(y) = x y / cos(x y) ∨ x / cos(x) + y / cos(y) = -(x y / cos(x y)) ∨ x / cos(x) - y / cos(y) = x y / cos(x y) ∨ x / cos(x) - y / cos(y) = -(x y / cos(x y))",
@timeout(5000),
);
// Frontispiece
t!(
t_29f17030e314469e816e03979778f244,
"x / sin(x) + y / sin(y) = x y / sin(x y) ∨ x / sin(x) + y / sin(y) = -(x y / sin(x y)) ∨ x / sin(x) - y / sin(y) = x y / sin(x y) ∨ x / sin(x) - y / sin(y) = -(x y / sin(x y))",
@timeout(7000),
);
// Highwire
t!(
t_564aaa5e88a54895b1851a1f1e5ffa3c,
"|x cos(x) - y sin(y)| = |x cos(y) - y sin(x)|",
@bounds(-10.1, 9.9, -9.8, 10.2),
@timeout(9000),
);
// Trapezoidal Fortress
t!(
t_b0175d3b58ed46158ba72bbd85670fc9,
"|x cos(x) + y sin(y)| = x cos(y) - y sin(x)",
@bounds(-10.1, 9.9, -9.8, 10.2),
@timeout(8000),
);
// Sharp Threesome
t!(
t_2bd34182bf58454bb6b7ce7d72548588,
"sin(sqrt((x+5)^2 + y^2)) cos(8 atan(y / (x+5))) sin(sqrt((x-5)^2 + (y-5)^2)) cos(8 atan((y-5) / (x-5))) sin(sqrt(x^2 + (y+5)^2)) cos(8 atan((y+5) / x)) > 0",
@timeout(7000),
);
// The Disco Hall
t!(
t_8b36e2e711b7491f9af53daf696053a9,
"sin(|x + y|) > max(cos(x^2), sin(y^2))",
@timeout(2000),
);
// “the patterned star”
t!(
t_40c29ccd39684aa7847dc32caf5bcab6,
"0.15 > |rankedMin([cos(8y), cos(4(y-sqrt(3)x)), cos(4(y+sqrt(3)x))], 2) - cos(⌊3/π mod(atan2(y,x),2π) - 0.5⌋) - 0.1| ∧ rankedMin([|2x|, |x-sqrt(3)y|, |x+sqrt(3)y|], 2) < 10",
@bounds(-10.1, 9.9, -9.8, 10.2),
@timeout(5000),
);
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/graphest-fftw-sys/build.rs | rust/graphest-fftw-sys/build.rs | use std::{
env, fs, io,
path::{Path, PathBuf},
process::Command,
};
const FFTW_TAR_URL: &str = "https://www.fftw.org/fftw-3.3.10.tar.gz";
struct Environment {
build_dir: PathBuf,
cache_dir: Option<PathBuf>,
has_avx2: bool,
has_neon: bool,
include_dir: PathBuf,
is_windows: bool,
lib_dir: PathBuf,
makeflags: String,
out_dir: PathBuf,
}
fn main() {
let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap());
let pkg_name = env::var("CARGO_PKG_NAME").unwrap();
let pkg_version = env::var("CARGO_PKG_VERSION").unwrap();
let cpu_features = env::var("CARGO_CFG_TARGET_FEATURE").unwrap();
let cpu_features = cpu_features.split(',').collect::<Vec<_>>();
let env = Environment {
build_dir: out_dir.join("build"),
cache_dir: user_cache_dir().map(|c| c.join(pkg_name).join(pkg_version)),
has_avx2: cpu_features.contains(&"avx2"),
has_neon: cpu_features.contains(&"neon"),
include_dir: out_dir.join("include"),
is_windows: env::var("CARGO_CFG_WINDOWS").is_ok(),
lib_dir: out_dir.join("lib"),
makeflags: "-j".to_owned(),
out_dir: out_dir.clone(),
};
fs::create_dir_all(&env.build_dir)
.unwrap_or_else(|_| panic!("failed to create the directory: {:?}", env.build_dir));
load_cache(&env);
build(&env);
save_cache(&env);
run_bindgen(&env);
write_link_info(&env);
}
fn load_cache(env: &Environment) {
if let Some(c) = &env.cache_dir {
let _ = copy_dir_all(c.join("include"), env.include_dir.clone());
let _ = copy_dir_all(c.join("lib"), env.lib_dir.clone());
}
}
fn build(env: &Environment) {
if env.lib_dir.join("libfftw3f.a").exists() {
return;
}
let build_dir = env.build_dir.join("fftw-build");
if !build_dir.exists() {
execute_or_panic(Command::new("wget").current_dir(&env.build_dir).args([
"--output-document",
"fftw.tar.gz",
"--quiet",
FFTW_TAR_URL,
]));
execute_or_panic(Command::new("mkdir").args([build_dir.to_str().unwrap()]));
execute_or_panic(Command::new("tar").current_dir(&env.build_dir).args([
"xf",
"fftw.tar.gz",
"--directory",
build_dir.to_str().unwrap(),
"--strip-components=1",
]));
}
execute_or_panic(
Command::new("sh").current_dir(&build_dir).arg("-c").arg(
[
"./configure",
"--prefix",
&if env.is_windows {
env.out_dir.to_str().unwrap().replace('\\', "/")
} else {
env.out_dir.to_str().unwrap().into()
},
// http://www.fftw.org/install/windows.html
if env.is_windows {
"--with-our-malloc"
} else {
""
},
"--disable-doc",
"--disable-fortran",
"--enable-float",
if env.has_avx2 { "--enable-avx2" } else { "" },
if env.has_neon { "--enable-neon" } else { "" },
]
.join(" "),
),
);
execute_or_panic(
Command::new("make")
.current_dir(&build_dir)
.env("MAKEFLAGS", &env.makeflags),
);
execute_or_panic(
Command::new("make")
.current_dir(&build_dir)
.arg("check")
.env("MAKEFLAGS", &env.makeflags),
);
execute_or_panic(Command::new("make").current_dir(&build_dir).arg("install"));
}
fn run_bindgen(env: &Environment) {
let binding_file = env.out_dir.join("fftw.rs");
if binding_file.exists() {
return;
}
bindgen::Builder::default()
.header(env.include_dir.join("fftw3.h").to_str().unwrap())
.allowlist_function("fftwf_.*")
.generate()
.expect("failed to generate bindings")
.write_to_file(binding_file.clone())
.unwrap_or_else(|_| {
panic!(
"failed to write to the file: {}",
binding_file.to_string_lossy()
)
});
}
fn save_cache(env: &Environment) {
if let Some(c) = &env.cache_dir {
let _ = copy_dir_all(env.include_dir.clone(), c.join("include"));
let _ = copy_dir_all(env.lib_dir.clone(), c.join("lib"));
}
}
fn write_link_info(env: &Environment) {
println!(
"cargo:rustc-link-search=native={}",
env.lib_dir.to_str().unwrap()
);
println!("cargo:rustc-link-lib=static=fftw3f");
}
/// Copies all files and directories in `from` into `to`, preserving the directory structure.
///
/// The directory `to` is created if it does not exist. Symlinks are ignored.
fn copy_dir_all<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
fs::create_dir_all(&to)?;
for entry in fs::read_dir(from)? {
let entry = entry?;
let from = entry.path();
let to = to.as_ref().join(entry.file_name());
let ty = entry.file_type()?;
if ty.is_dir() {
copy_dir_all(from, to)?;
} else if ty.is_file() {
fs::copy(from, to)?;
}
}
Ok(())
}
fn execute_or_panic(cmd: &mut Command) {
let status = cmd
.status()
.unwrap_or_else(|_| panic!("failed to execute the command: {:?}", cmd));
if !status.success() {
if let Some(code) = status.code() {
panic!("the process exited with code {}: {:?}", code, cmd);
} else {
panic!("the process is terminated by a signal: {:?}", cmd);
}
}
}
fn user_cache_dir() -> Option<PathBuf> {
let host = env::var("HOST").ok()?;
if host.contains("darwin") {
env::var_os("HOME")
.filter(|s| !s.is_empty())
.map(|s| PathBuf::from(s).join("Library").join("Caches"))
} else if host.contains("linux") {
env::var_os("XDG_CACHE_HOME")
.filter(|s| !s.is_empty())
.map(PathBuf::from)
.or_else(|| {
env::var_os("HOME")
.filter(|s| !s.is_empty())
.map(|s| PathBuf::from(s).join(".cache"))
})
} else if host.contains("windows") {
env::var_os("LOCALAPPDATA")
.filter(|s| !s.is_empty())
.map(PathBuf::from)
} else {
None
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/graphest-fftw-sys/src/lib.rs | rust/graphest-fftw-sys/src/lib.rs | #![allow(non_camel_case_types, non_snake_case, non_upper_case_globals)]
include!(concat!(env!("OUT_DIR"), "/fftw.rs"));
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/graphest-flint-sys/build.rs | rust/graphest-flint-sys/build.rs | use std::{
env, fs, io,
path::{Path, PathBuf},
process::Command,
};
// https://gitlab.com/tspiteri/gmp-mpfr-sys/-/blob/master/build.rs
const FLINT_GIT_TAG: &str = "v3.4.0";
const FLINT_GIT_URL: &str = "https://github.com/flintlib/flint.git";
struct Environment {
build_dir: PathBuf,
cache_dir: Option<PathBuf>,
gmp_dir: PathBuf,
include_dir: PathBuf,
lib_dir: PathBuf,
makeflags: String,
out_dir: PathBuf,
}
fn main() {
let out_dir = PathBuf::from(env::var_os("OUT_DIR").unwrap());
let pkg_name = env::var("CARGO_PKG_NAME").unwrap();
let pkg_version = env::var("CARGO_PKG_VERSION").unwrap();
let env = Environment {
build_dir: out_dir.join("build"),
cache_dir: user_cache_dir().map(|c| c.join(pkg_name).join(pkg_version)),
gmp_dir: PathBuf::from(env::var_os("DEP_GMP_OUT_DIR").unwrap()),
include_dir: out_dir.join("include"),
lib_dir: out_dir.join("lib"),
makeflags: "-j".to_owned(),
out_dir: out_dir.clone(),
};
fs::create_dir_all(&env.build_dir)
.unwrap_or_else(|_| panic!("failed to create the directory: {:?}", env.build_dir));
load_cache(&env);
build_flint(&env);
save_cache(&env);
run_arb_bindgen(&env);
write_link_info(&env);
}
fn load_cache(env: &Environment) {
if let Some(c) = &env.cache_dir {
let _ = copy_dir_all(c.join("include"), env.include_dir.clone());
let _ = copy_dir_all(c.join("lib"), env.lib_dir.clone());
}
}
fn build_flint(env: &Environment) {
if env.lib_dir.join("libflint.a").exists() {
return;
}
let gmp_dir = env.build_dir.join("gmp");
if !gmp_dir.exists() {
symlink_dir_or_panic(&env.gmp_dir, &gmp_dir);
}
let build_dir = env.build_dir.join("flint-build");
if !build_dir.exists() {
execute_or_panic(Command::new("git").current_dir(&env.build_dir).args([
"clone",
"--branch",
FLINT_GIT_TAG,
"--depth",
"1",
FLINT_GIT_URL,
build_dir.to_str().unwrap(),
]));
}
execute_or_panic(
Command::new("sh")
.current_dir(&build_dir)
.arg("-c")
.arg("./bootstrap.sh"),
);
execute_or_panic(
Command::new("sh")
.current_dir(&build_dir)
.arg("-c")
.arg(
[
"./configure",
"--enable-static",
&format!("--prefix={}", env.out_dir.to_str().unwrap()),
"--with-gmp=../gmp", // `gmp_dir`
"--with-mpfr=../gmp", // `gmp_dir`
]
.join(" "),
)
.env("CFLAGS", "-Wno-error"),
);
execute_or_panic(
Command::new("make")
.current_dir(&build_dir)
.env("MAKEFLAGS", &env.makeflags),
);
execute_or_panic(
Command::new("make")
.current_dir(&build_dir)
.arg("check")
.env("FLINT_TEST_MULTIPLIER", "0.1")
.env("MAKEFLAGS", &env.makeflags),
);
execute_or_panic(Command::new("make").current_dir(&build_dir).arg("install"));
}
fn run_arb_bindgen(env: &Environment) {
let binding_file = env.out_dir.join("arb.rs");
if binding_file.exists() {
return;
}
let include_dir = env.gmp_dir.join("include").to_str().unwrap().to_owned();
let mut clang_args = vec![
"-DACB_INLINES_C",
"-DARB_INLINES_C",
"-DARF_INLINES_C",
"-DMAG_INLINES_C",
"-I",
&include_dir,
];
// https://github.com/rust-lang/rust-bindgen/issues/1760
if cfg!(all(
target_arch = "x86_64",
target_os = "windows",
target_env = "gnu"
)) {
clang_args.push("--target=x86_64-pc-windows-gnu");
}
bindgen::Builder::default()
.header(
env.include_dir
.join("flint")
.join("acb.h")
.to_str()
.unwrap(),
)
.header(
env.include_dir
.join("flint")
.join("acb_elliptic.h")
.to_str()
.unwrap(),
)
.header(
env.include_dir
.join("flint")
.join("arb.h")
.to_str()
.unwrap(),
)
.header(
env.include_dir
.join("flint")
.join("arb_hypgeom.h")
.to_str()
.unwrap(),
)
.header(
env.include_dir
.join("flint")
.join("arf.h")
.to_str()
.unwrap(),
)
.header(
env.include_dir
.join("flint")
.join("mag.h")
.to_str()
.unwrap(),
)
.allowlist_function("(acb|arb|arf|mag)_.*")
.clang_args(&clang_args)
.generate()
.expect("failed to generate bindings")
.write_to_file(binding_file.clone())
.unwrap_or_else(|_| {
panic!(
"failed to write to the file: {}",
binding_file.to_string_lossy()
)
});
}
fn save_cache(env: &Environment) {
if let Some(c) = &env.cache_dir {
let _ = copy_dir_all(env.include_dir.clone(), c.join("include"));
let _ = copy_dir_all(env.lib_dir.clone(), c.join("lib"));
}
}
fn write_link_info(env: &Environment) {
println!(
"cargo:rustc-link-search=native={}",
env.gmp_dir.join("lib").to_str().unwrap()
);
println!("cargo:rustc-link-lib=static=gmp");
println!("cargo:rustc-link-lib=static=mpfr");
println!(
"cargo:rustc-link-search=native={}",
env.lib_dir.to_str().unwrap()
);
println!("cargo:rustc-link-lib=static=flint");
}
/// Copies all files and directories in `from` into `to`, preserving the directory structure.
///
/// The directory `to` is created if it does not exist. Symlinks are ignored.
fn copy_dir_all<P: AsRef<Path>, Q: AsRef<Path>>(from: P, to: Q) -> io::Result<()> {
fs::create_dir_all(&to)?;
for entry in fs::read_dir(from)? {
let entry = entry?;
let from = entry.path();
let to = to.as_ref().join(entry.file_name());
let ty = entry.file_type()?;
if ty.is_dir() {
copy_dir_all(from, to)?;
} else if ty.is_file() {
fs::copy(from, to)?;
}
}
Ok(())
}
fn execute_or_panic(cmd: &mut Command) {
let status = cmd
.status()
.unwrap_or_else(|_| panic!("failed to execute the command: {:?}", cmd));
if !status.success() {
if let Some(code) = status.code() {
panic!("the process exited with code {}: {:?}", code, cmd);
} else {
panic!("the process is terminated by a signal: {:?}", cmd);
}
}
}
#[cfg(unix)]
fn symlink_dir_or_panic(original: &Path, link: &Path) {
std::os::unix::fs::symlink(original, link).unwrap_or_else(|_| {
panic!("failed to create a symlink to {:?} at {:?}", original, link);
});
}
#[cfg(windows)]
fn symlink_dir_or_panic(original: &Path, link: &Path) {
if std::os::windows::fs::symlink_dir(original, link).is_ok() {
return;
}
eprintln!(
"failed to create a symlink to {:?} at {:?}, copying instead",
original, link
);
execute_or_panic(Command::new("cp").arg("-R").arg(original).arg(link));
}
fn user_cache_dir() -> Option<PathBuf> {
let host = env::var("HOST").ok()?;
if host.contains("darwin") {
env::var_os("HOME")
.filter(|s| !s.is_empty())
.map(|s| PathBuf::from(s).join("Library").join("Caches"))
} else if host.contains("linux") {
env::var_os("XDG_CACHE_HOME")
.filter(|s| !s.is_empty())
.map(PathBuf::from)
.or_else(|| {
env::var_os("HOME")
.filter(|s| !s.is_empty())
.map(|s| PathBuf::from(s).join(".cache"))
})
} else if host.contains("windows") {
env::var_os("LOCALAPPDATA")
.filter(|s| !s.is_empty())
.map(PathBuf::from)
} else {
None
}
}
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
unageek/graphest | https://github.com/unageek/graphest/blob/4bbdc568b4725a1a196c9db15746fd484336cb91/rust/graphest-flint-sys/src/lib.rs | rust/graphest-flint-sys/src/lib.rs | #![allow(
dead_code,
deref_nullptr, // https://github.com/rust-lang/rust-bindgen/issues/1651
non_camel_case_types,
non_snake_case,
non_upper_case_globals,
clippy::upper_case_acronyms
)]
include!(concat!(env!("OUT_DIR"), "/arb.rs"));
| rust | MIT | 4bbdc568b4725a1a196c9db15746fd484336cb91 | 2026-01-04T20:25:24.884510Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples-builder/build.rs | examples-builder/build.rs | use glob::glob;
use std::{
fs::{File, read_dir, remove_file},
io::{self, Write},
path::Path,
process::Command,
};
const CARGO_MANIFEST_DIR: &str = env!("CARGO_MANIFEST_DIR");
fn rerun_all_but_target(dir: &Path) {
for entry in read_dir(dir).unwrap().filter_map(Result::ok) {
if "target" == entry.file_name() {
continue;
}
println!("cargo:rerun-if-changed={}", entry.path().to_string_lossy());
}
}
fn build_elfs() {
let out_dir = std::env::var_os("OUT_DIR").unwrap();
let dest_path = Path::new(&out_dir).join("vars.rs");
let _ = remove_file(&dest_path);
let mut dest = File::create(&dest_path).expect("failed to create vars.rs");
let is_release = std::env::var("PROFILE").unwrap() == "release";
let mut args = vec!["build", "--examples", "--target-dir", "target"];
if is_release {
args.insert(1, "--release"); // insert --release after "build"
}
let output = Command::new("cargo")
.args(args)
.current_dir("../examples")
.env_clear()
.envs(std::env::vars().filter(|x| !x.0.starts_with("CARGO_")))
.output()
.expect("cargo command failed to run");
if !output.status.success() {
io::stdout().write_all(&output.stdout).unwrap();
io::stderr().write_all(&output.stderr).unwrap();
panic!("cargo build of examples failed.");
}
for example in glob("../examples/examples/*.rs")
.unwrap()
.map(Result::unwrap)
{
let example = example.file_stem().unwrap().to_str().unwrap();
writeln!(
dest,
r#"#[allow(non_upper_case_globals)]
pub const {example}: &[u8] =
include_bytes!(r"{CARGO_MANIFEST_DIR}/../examples/target/riscv32im-ceno-zkvm-elf/{}/examples/{example}");"#,
std::env::var("PROFILE").unwrap()).expect("failed to write vars.rs");
}
rerun_all_but_target(Path::new("../examples"));
rerun_all_but_target(Path::new("../ceno_rt"));
rerun_all_but_target(Path::new("../guest_libs"));
}
fn main() {
println!("cargo:rerun-if-changed=build.rs");
println!("cargo:rerun-if-env-changed=PROFILE");
build_elfs();
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples-builder/src/lib.rs | examples-builder/src/lib.rs | #![deny(clippy::cargo)]
include!(concat!(env!("OUT_DIR"), "/vars.rs"));
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/lib.rs | gkr_iop/src/lib.rs | #![feature(variant_count)]
use crate::{
chip::Chip, circuit_builder::CircuitBuilder, error::CircuitBuilderError,
utils::lk_multiplicity::LkMultiplicity,
};
use either::Either;
use ff_ext::ExtensionField;
use multilinear_extensions::{Expression, impl_expr_from_unsigned, mle::ArcMultilinearExtension};
use std::marker::PhantomData;
use strum_macros::EnumIter;
use transcript::Transcript;
use witness::RowMajorMatrix;
pub mod chip;
pub mod circuit_builder;
pub mod cpu;
pub mod error;
pub mod evaluation;
pub mod gadgets;
pub mod gkr;
#[cfg(feature = "gpu")]
pub mod gpu;
pub mod hal;
pub mod selector;
pub mod tables;
pub mod utils;
pub type Phase1WitnessGroup<'a, E> = Vec<ArcMultilinearExtension<'a, E>>;
// format: [r_records, w_records, lk_records, zero_records]
pub type OutEvalGroups = [Vec<usize>; 4];
pub trait ProtocolBuilder<E: ExtensionField>: Sized {
type Params;
/// Create the GKR layers in the reverse order. For each layer, specify the
/// polynomial expressions, evaluation expressions of outputs and evaluation
/// positions of the inputs.
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
params: Self::Params,
) -> Result<Self, CircuitBuilderError>;
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>);
fn n_committed(&self) -> usize {
todo!()
}
fn n_fixed(&self) -> usize {
todo!()
}
fn n_challenges(&self) -> usize {
todo!()
}
fn n_evaluations(&self) -> usize {
todo!()
}
fn n_layers(&self) -> usize {
todo!()
}
}
pub trait ProtocolWitnessGenerator<E: ExtensionField> {
type Trace;
/// The fixed witness.
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField>;
/// The vectors to be committed in the phase1.
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
);
}
// TODO: the following trait consists of `commit_phase1`, `commit_phase2`,
// `gkr_phase` and `opening_phase`.
pub struct ProtocolProver<E: ExtensionField, Trans: Transcript<E>, PCS>(
PhantomData<(E, Trans, PCS)>,
);
// TODO: the following trait consists of `commit_phase1`, `commit_phase2`,
// `gkr_phase` and `opening_phase`.
pub struct ProtocolVerifier<E: ExtensionField, Trans: Transcript<E>, PCS>(
PhantomData<(E, Trans, PCS)>,
);
#[derive(Clone, Debug, Copy, EnumIter, PartialEq, Eq, serde::Serialize, serde::Deserialize)]
#[repr(usize)]
pub enum RAMType {
GlobalState = 0,
Register,
Memory,
Undefined,
}
impl_expr_from_unsigned!(RAMType);
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/circuit_builder.rs | gkr_iop/src/circuit_builder.rs | use itertools::{Itertools, chain};
use multilinear_extensions::{
Expression, Fixed, Instance, StructuralWitIn, StructuralWitInType, ToExpr, WitIn, WitnessId,
rlc_chip_record,
};
use serde::de::DeserializeOwned;
use std::{collections::HashMap, iter::once, marker::PhantomData};
use ff_ext::ExtensionField;
use crate::{
RAMType, error::CircuitBuilderError, gkr::layer::ROTATION_OPENING_COUNT,
selector::SelectorType, tables::LookupTable,
};
use p3::field::FieldAlgebra;
pub mod ram;
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
#[serde(bound = "E: ExtensionField + DeserializeOwned")]
pub struct RotationParams<E: ExtensionField> {
pub rotation_eqs: Option<[Expression<E>; ROTATION_OPENING_COUNT]>,
pub rotation_cyclic_group_log2: usize,
pub rotation_cyclic_subgroup_size: usize,
}
/// namespace used for annotation, preserve meta info during circuit construction
#[derive(Clone, Debug, Default, serde::Serialize, serde::Deserialize)]
pub struct NameSpace {
namespace: Vec<String>,
}
impl NameSpace {
pub fn new<NR: Into<String>, N: FnOnce() -> NR>(name_fn: N) -> Self {
NameSpace {
namespace: vec![name_fn().into()],
}
}
pub fn namespace<NR: Into<String>, N: FnOnce() -> NR>(&self, name_fn: N) -> Self {
let mut new = self.clone();
new.push_namespace(name_fn().into());
new
}
pub(crate) fn push_namespace(&mut self, namespace: String) {
self.namespace.push(namespace)
}
pub(crate) fn pop_namespace(&mut self) {
let _ = self.namespace.pop();
}
pub(crate) fn compute_path(&self, this: String) -> String {
if this.chars().contains(&'/') {
panic!("'/' is not allowed in names");
}
chain!(self.get_namespaces(), once(&this)).join("/")
}
pub fn get_namespaces(&self) -> &[String] {
&self.namespace
}
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[serde(bound = "E: ExtensionField + DeserializeOwned")]
pub struct LogupTableExpression<E: ExtensionField> {
pub multiplicity: Expression<E>,
pub values: Expression<E>,
pub table_spec: SetTableSpec,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
pub struct SetTableSpec {
pub len: Option<usize>,
pub structural_witins: Vec<StructuralWitIn>,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[serde(bound = "E: ExtensionField + DeserializeOwned")]
pub struct SetTableExpression<E: ExtensionField> {
/// table expression
pub expr: Expression<E>,
// TODO make decision to have enum/struct
// for which option is more friendly to be processed by ConstrainSystem + recursive verifier
pub table_spec: SetTableSpec,
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[serde(bound = "E: ExtensionField + DeserializeOwned")]
pub struct ConstraintSystem<E: ExtensionField> {
pub ns: NameSpace,
pub num_witin: WitnessId,
pub witin_namespace_map: Vec<String>,
pub num_structural_witin: WitnessId,
pub structural_witins: Vec<StructuralWitIn>,
pub structural_witin_namespace_map: Vec<String>,
pub num_fixed: usize,
pub fixed_namespace_map: Vec<String>,
pub instance_openings: Vec<Instance>,
pub ec_point_exprs: Vec<Expression<E>>,
pub ec_slope_exprs: Vec<Expression<E>>,
pub ec_final_sum: Vec<Expression<E>>,
pub r_selector: Option<SelectorType<E>>,
pub r_expressions: Vec<Expression<E>>,
pub r_expressions_namespace_map: Vec<String>,
// for each read expression we store its ram type and original value before doing RLC
// the original value will be used for debugging
pub r_ram_types: Vec<(Expression<E>, Vec<Expression<E>>)>,
pub w_selector: Option<SelectorType<E>>,
pub w_expressions: Vec<Expression<E>>,
pub w_expressions_namespace_map: Vec<String>,
// for each write expression we store its ram type and original value before doing RLC
// the original value will be used for debugging
pub w_ram_types: Vec<(Expression<E>, Vec<Expression<E>>)>,
/// init/final ram expression
pub r_table_expressions: Vec<SetTableExpression<E>>,
pub r_table_expressions_namespace_map: Vec<String>,
pub w_table_expressions: Vec<SetTableExpression<E>>,
pub w_table_expressions_namespace_map: Vec<String>,
// specify whether constrains system cover only init_w
// as it imply w/r set and final_w might happen ACROSS shards
pub with_omc_init_only: bool,
pub lk_selector: Option<SelectorType<E>>,
/// lookup expression
pub lk_expressions: Vec<Expression<E>>,
pub lk_table_expressions: Vec<LogupTableExpression<E>>,
pub lk_expressions_namespace_map: Vec<String>,
pub lk_expressions_items_map: Vec<(LookupTable, Vec<Expression<E>>)>,
pub zero_selector: Option<SelectorType<E>>,
/// main constraints zero expression
pub assert_zero_expressions: Vec<Expression<E>>,
pub assert_zero_expressions_namespace_map: Vec<String>,
/// main constraints zero expression for expression degree > 1, which require sumcheck to prove
pub assert_zero_sumcheck_expressions: Vec<Expression<E>>,
pub assert_zero_sumcheck_expressions_namespace_map: Vec<String>,
/// max zero sumcheck degree
pub max_non_lc_degree: usize,
/// rotation argumment
pub rotations: Vec<(Expression<E>, Expression<E>)>,
pub rotation_params: Option<RotationParams<E>>,
// alpha, beta challenge for chip record
pub chip_record_alpha: Expression<E>,
pub chip_record_beta: Expression<E>,
pub debug_map: HashMap<usize, Vec<Expression<E>>>,
pub(crate) phantom: PhantomData<E>,
}
impl<E: ExtensionField> ConstraintSystem<E> {
pub fn new<NR: Into<String>, N: FnOnce() -> NR>(root_name_fn: N) -> Self {
Self {
num_witin: 0,
// platform,
witin_namespace_map: vec![],
num_structural_witin: 0,
structural_witins: vec![],
structural_witin_namespace_map: vec![],
num_fixed: 0,
fixed_namespace_map: vec![],
ns: NameSpace::new(root_name_fn),
instance_openings: vec![],
ec_final_sum: vec![],
ec_slope_exprs: vec![],
ec_point_exprs: vec![],
r_selector: None,
r_expressions: vec![],
r_expressions_namespace_map: vec![],
r_ram_types: vec![],
w_selector: None,
w_expressions: vec![],
w_expressions_namespace_map: vec![],
w_ram_types: vec![],
r_table_expressions: vec![],
r_table_expressions_namespace_map: vec![],
w_table_expressions: vec![],
w_table_expressions_namespace_map: vec![],
with_omc_init_only: false,
lk_selector: None,
lk_expressions: vec![],
lk_table_expressions: vec![],
lk_expressions_namespace_map: vec![],
lk_expressions_items_map: vec![],
zero_selector: None,
assert_zero_expressions: vec![],
assert_zero_expressions_namespace_map: vec![],
assert_zero_sumcheck_expressions: vec![],
assert_zero_sumcheck_expressions_namespace_map: vec![],
max_non_lc_degree: 0,
rotations: vec![],
rotation_params: None,
chip_record_alpha: Expression::Challenge(0, 1, E::ONE, E::ZERO),
chip_record_beta: Expression::Challenge(1, 1, E::ONE, E::ZERO),
debug_map: HashMap::new(),
phantom: std::marker::PhantomData,
}
}
pub fn create_witin<NR: Into<String>, N: FnOnce() -> NR>(&mut self, n: N) -> WitIn {
let wit_in = WitIn { id: self.num_witin };
self.num_witin = self.num_witin.strict_add(1);
let path = self.ns.compute_path(n().into());
self.witin_namespace_map.push(path);
wit_in
}
pub fn create_structural_witin<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
n: N,
witin_type: StructuralWitInType,
) -> StructuralWitIn {
let wit_in = StructuralWitIn {
id: self.num_structural_witin,
witin_type,
};
self.structural_witins.push(wit_in);
self.num_structural_witin = self.num_structural_witin.strict_add(1);
let path = self.ns.compute_path(n().into());
self.structural_witin_namespace_map.push(path);
wit_in
}
pub fn create_placeholder_structural_witin<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
n: N,
) -> StructuralWitIn {
self.create_structural_witin(n, StructuralWitInType::Empty)
}
pub fn create_fixed<NR: Into<String>, N: FnOnce() -> NR>(&mut self, n: N) -> Fixed {
let f = Fixed(self.num_fixed);
self.num_fixed += 1;
let path = self.ns.compute_path(n().into());
self.fixed_namespace_map.push(path);
f
}
pub fn query_instance(&self, idx: usize) -> Result<Instance, CircuitBuilderError> {
let i = Instance(idx);
Ok(i)
}
pub fn query_instance_for_openings(
&mut self,
idx: usize,
) -> Result<Instance, CircuitBuilderError> {
let i = Instance(idx);
assert!(
!self.instance_openings.contains(&i),
"query same pubio idx {idx} mle more than once",
);
self.instance_openings.push(i);
// return instance only count
Ok(Instance(self.instance_openings.len() - 1))
}
pub fn rlc_chip_record(&self, items: Vec<Expression<E>>) -> Expression<E> {
rlc_chip_record(
items,
self.chip_record_alpha.clone(),
self.chip_record_beta.clone(),
)
}
pub fn lk_record<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
rom_type: LookupTable,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError> {
let rlc_record = self.rlc_chip_record(
std::iter::once(E::BaseField::from_canonical_u64(rom_type as u64).expr())
.chain(record.clone())
.collect(),
);
self.lk_expressions.push(rlc_record);
let path = self.ns.compute_path(name_fn().into());
self.lk_expressions_namespace_map.push(path);
// Since lk_expression is RLC(record) and when we're debugging
// it's helpful to recover the value of record itself.
self.lk_expressions_items_map.push((rom_type, record));
Ok(())
}
pub fn lk_table_record<NR, N>(
&mut self,
name_fn: N,
table_spec: SetTableSpec,
rom_type: LookupTable,
record: Vec<Expression<E>>,
multiplicity: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
let rlc_record = self.rlc_chip_record(
vec![(rom_type as usize).into()]
.into_iter()
.chain(record.clone())
.collect_vec(),
);
assert_eq!(
rlc_record.degree(),
1,
"rlc lk_table_record degree ({})",
name_fn().into()
);
self.lk_table_expressions.push(LogupTableExpression {
values: rlc_record,
multiplicity,
table_spec,
});
let path = self.ns.compute_path(name_fn().into());
self.lk_expressions_namespace_map.push(path);
// Since lk_expression is RLC(record) and when we're debugging
// it's helpful to recover the value of record itself.
self.lk_expressions_items_map.push((rom_type, record));
Ok(())
}
pub fn r_table_record<NR, N>(
&mut self,
name_fn: N,
ram_type: RAMType,
table_spec: SetTableSpec,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
let rlc_record = self.rlc_chip_record(record.clone());
self.r_table_rlc_record(
name_fn,
(ram_type as u64).into(),
table_spec,
record,
rlc_record,
)
}
pub fn r_table_rlc_record<NR, N>(
&mut self,
name_fn: N,
ram_type: Expression<E>,
table_spec: SetTableSpec,
record: Vec<Expression<E>>,
rlc_record: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.r_table_expressions.push(SetTableExpression {
expr: rlc_record,
table_spec,
});
let path = self.ns.compute_path(name_fn().into());
self.r_table_expressions_namespace_map.push(path);
self.r_ram_types.push((ram_type, record));
Ok(())
}
pub fn w_table_record<NR, N>(
&mut self,
name_fn: N,
ram_type: RAMType,
table_spec: SetTableSpec,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
let rlc_record = self.rlc_chip_record(record.clone());
self.w_table_rlc_record(
name_fn,
(ram_type as u64).into(),
table_spec,
record,
rlc_record,
)
}
pub fn w_table_rlc_record<NR, N>(
&mut self,
name_fn: N,
ram_type: Expression<E>,
table_spec: SetTableSpec,
record: Vec<Expression<E>>,
rlc_record: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.w_table_expressions.push(SetTableExpression {
expr: rlc_record,
table_spec,
});
let path = self.ns.compute_path(name_fn().into());
self.w_table_expressions_namespace_map.push(path);
self.w_ram_types.push((ram_type, record));
Ok(())
}
pub fn read_record<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
ram_type: RAMType,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError> {
let rlc_record = self.rlc_chip_record(record.clone());
self.read_rlc_record(name_fn, (ram_type as u64).into(), record, rlc_record)
}
pub fn read_rlc_record<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
ram_type: Expression<E>,
record: Vec<Expression<E>>,
rlc_record: Expression<E>,
) -> Result<(), CircuitBuilderError> {
self.r_expressions.push(rlc_record);
let path = self.ns.compute_path(name_fn().into());
self.r_expressions_namespace_map.push(path);
// Since r_expression is RLC(record) and when we're debugging
// it's helpful to recover the value of record itself.
self.r_ram_types.push((ram_type, record));
Ok(())
}
pub fn write_record<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
ram_type: RAMType,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError> {
let rlc_record = self.rlc_chip_record(record.clone());
self.write_rlc_record(name_fn, (ram_type as u64).into(), record, rlc_record)
}
pub fn write_rlc_record<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
ram_type: Expression<E>,
record: Vec<Expression<E>>,
rlc_record: Expression<E>,
) -> Result<(), CircuitBuilderError> {
self.w_expressions.push(rlc_record);
let path = self.ns.compute_path(name_fn().into());
self.w_expressions_namespace_map.push(path);
// Since w_expression is RLC(record) and when we're debugging
// it's helpful to recover the value of record itself.
self.w_ram_types.push((ram_type, record));
Ok(())
}
pub fn ec_sum(
&mut self,
xs: Vec<Expression<E>>,
ys: Vec<Expression<E>>,
slopes: Vec<Expression<E>>,
final_sum: Vec<Expression<E>>,
) {
assert_eq!(xs.len(), 7);
assert_eq!(ys.len(), 7);
assert_eq!(slopes.len(), 7);
assert_eq!(final_sum.len(), 7 * 2);
assert_eq!(self.ec_point_exprs.len(), 0);
self.ec_point_exprs.extend(xs);
self.ec_point_exprs.extend(ys);
self.ec_slope_exprs = slopes;
self.ec_final_sum = final_sum;
}
pub fn require_zero<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
assert_zero_expr: Expression<E>,
) -> Result<(), CircuitBuilderError> {
assert!(
assert_zero_expr.degree() > 0,
"constant expression assert to zero ?"
);
if assert_zero_expr.degree() == 1 {
self.assert_zero_expressions.push(assert_zero_expr);
let path = self.ns.compute_path(name_fn().into());
self.assert_zero_expressions_namespace_map.push(path);
} else {
let assert_zero_expr = if assert_zero_expr.is_monomial_form() {
assert_zero_expr
} else {
let e = assert_zero_expr.get_monomial_form();
assert!(e.is_monomial_form(), "failed to put into monomial form");
e
};
self.max_non_lc_degree = self.max_non_lc_degree.max(assert_zero_expr.degree());
self.assert_zero_sumcheck_expressions.push(assert_zero_expr);
let path = self.ns.compute_path(name_fn().into());
self.assert_zero_sumcheck_expressions_namespace_map
.push(path);
}
Ok(())
}
pub fn namespace<NR: Into<String>, N: FnOnce() -> NR, T>(
&mut self,
name_fn: N,
cb: impl FnOnce(&mut ConstraintSystem<E>) -> T,
) -> T {
self.ns.push_namespace(name_fn().into());
let t = cb(self);
self.ns.pop_namespace();
t
}
pub fn set_omc_init_only(&mut self) {
self.with_omc_init_only = true;
}
}
impl<E: ExtensionField> ConstraintSystem<E> {
pub fn register_debug_expr<T: Into<usize>>(&mut self, debug_index: T, expr: Expression<E>) {
let key = debug_index.into();
self.debug_map.entry(key).or_default().push(expr);
}
pub fn get_debug_expr<T: Into<usize>>(&mut self, debug_index: T) -> &[Expression<E>] {
let key = debug_index.into();
match self.debug_map.get(&key) {
Some(v) => v,
_ => panic!("non-existent entry {}", key),
}
}
}
#[derive(Debug)]
pub struct CircuitBuilder<'a, E: ExtensionField> {
pub cs: &'a mut ConstraintSystem<E>,
}
impl<'a, E: ExtensionField> CircuitBuilder<'a, E> {
pub fn new(cs: &'a mut ConstraintSystem<E>) -> Self {
Self { cs }
}
pub fn create_witin<NR, N>(&mut self, name_fn: N) -> WitIn
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.create_witin(name_fn)
}
/// create namespace to prefix all constraints define under the scope
pub fn namespace<NR: Into<String>, N: FnOnce() -> NR, T>(
&mut self,
name_fn: N,
cb: impl for<'b> FnOnce(&mut CircuitBuilder<'b, E>) -> Result<T, CircuitBuilderError>,
) -> Result<T, CircuitBuilderError> {
self.cs.namespace(name_fn, |cs| {
let mut inner_circuit_builder = CircuitBuilder::<'_, E>::new(cs);
cb(&mut inner_circuit_builder)
})
}
pub fn create_witin_from_exprs<NR, N>(
&mut self,
name_fn: N,
input: Expression<E>,
debug: bool,
) -> Result<WitIn, CircuitBuilderError>
where
NR: Into<String> + Clone,
N: FnOnce() -> NR,
{
self.namespace(
|| "witin_from_expr",
|cb| {
let name = name_fn().into();
let wit = cb.create_witin(|| name.clone());
if !debug {
cb.require_zero(|| name.clone(), wit.expr() - input)?;
}
Ok(wit)
},
)
}
pub fn create_structural_witin<NR, N>(
&mut self,
name_fn: N,
witin_type: StructuralWitInType,
) -> StructuralWitIn
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.create_structural_witin(name_fn, witin_type)
}
pub fn create_placeholder_structural_witin<NR, N>(&mut self, name_fn: N) -> StructuralWitIn
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.create_placeholder_structural_witin(name_fn)
}
pub fn create_fixed<NR, N>(&mut self, name_fn: N) -> Fixed
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.create_fixed(name_fn)
}
pub fn lk_record<NR, N>(
&mut self,
name_fn: N,
rom_type: LookupTable,
items: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.lk_record(name_fn, rom_type, items)
}
pub fn lk_table_record<NR, N>(
&mut self,
name_fn: N,
table_spec: SetTableSpec,
rom_type: LookupTable,
record: Vec<Expression<E>>,
multiplicity: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs
.lk_table_record(name_fn, table_spec, rom_type, record, multiplicity)
}
pub fn r_table_record<NR, N>(
&mut self,
name_fn: N,
ram_type: RAMType,
table_spec: SetTableSpec,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs
.r_table_record(name_fn, ram_type, table_spec, record)
}
pub fn r_table_rlc_record<NR, N>(
&mut self,
name_fn: N,
ram_type: Expression<E>,
table_spec: SetTableSpec,
record: Vec<Expression<E>>,
rlc_record: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs
.r_table_rlc_record(name_fn, ram_type, table_spec, record, rlc_record)
}
pub fn w_table_record<NR, N>(
&mut self,
name_fn: N,
ram_type: RAMType,
table_spec: SetTableSpec,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs
.w_table_record(name_fn, ram_type, table_spec, record)
}
pub fn w_table_rlc_record<NR, N>(
&mut self,
name_fn: N,
ram_type: Expression<E>,
table_spec: SetTableSpec,
record: Vec<Expression<E>>,
rlc_record: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs
.w_table_rlc_record(name_fn, ram_type, table_spec, record, rlc_record)
}
pub fn read_record<NR, N>(
&mut self,
name_fn: N,
ram_type: RAMType,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.read_record(name_fn, ram_type, record)
}
pub fn read_rlc_record<NR, N>(
&mut self,
name_fn: N,
ram_type: Expression<E>,
record: Vec<Expression<E>>,
rlc_record: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs
.read_rlc_record(name_fn, ram_type, record, rlc_record)
}
pub fn write_record<NR, N>(
&mut self,
name_fn: N,
ram_type: RAMType,
record: Vec<Expression<E>>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs.write_record(name_fn, ram_type, record)
}
pub fn write_rlc_record<NR, N>(
&mut self,
name_fn: N,
ram_type: Expression<E>,
record: Vec<Expression<E>>,
rlc_record: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.cs
.write_rlc_record(name_fn, ram_type, record, rlc_record)
}
pub fn rlc_chip_record(&self, records: Vec<Expression<E>>) -> Expression<E> {
self.cs.rlc_chip_record(records)
}
pub fn ec_sum(
&mut self,
xs: Vec<Expression<E>>,
ys: Vec<Expression<E>>,
slope: Vec<Expression<E>>,
final_sum: Vec<Expression<E>>,
) {
self.cs.ec_sum(xs, ys, slope, final_sum);
}
pub fn create_bit<NR, N>(&mut self, name_fn: N) -> Result<WitIn, CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR + Clone,
{
let bit = self.cs.create_witin(name_fn.clone());
self.assert_bit(name_fn, bit.expr())?;
Ok(bit)
}
pub fn create_u8<NR, N>(&mut self, name_fn: N) -> Result<WitIn, CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR + Clone,
{
let byte = self.cs.create_witin(name_fn.clone());
self.assert_ux::<_, _, 8>(name_fn, byte.expr())?;
Ok(byte)
}
pub fn create_u16<NR, N>(&mut self, name_fn: N) -> Result<WitIn, CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR + Clone,
{
let limb = self.cs.create_witin(name_fn.clone());
self.assert_ux::<_, _, 16>(name_fn, limb.expr())?;
Ok(limb)
}
/// Create a new WitIn constrained to be equal to input expression.
pub fn flatten_expr<NR, N>(
&mut self,
name_fn: N,
expr: Expression<E>,
) -> Result<WitIn, CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR + Clone,
{
let wit = self.cs.create_witin(name_fn.clone());
self.require_equal(name_fn, wit.expr(), expr)?;
Ok(wit)
}
pub fn require_zero<NR, N>(
&mut self,
name_fn: N,
assert_zero_expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.namespace(
|| "require_zero",
|cb| cb.cs.require_zero(name_fn, assert_zero_expr),
)
}
pub fn require_equal<NR, N>(
&mut self,
name_fn: N,
a: Expression<E>,
b: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.namespace(
|| "require_equal",
|cb| {
cb.cs
.require_zero(name_fn, a.get_monomial_form() - b.get_monomial_form())
},
)
}
pub fn require_one<NR, N>(
&mut self,
name_fn: N,
expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.namespace(|| "require_one", |cb| cb.cs.require_zero(name_fn, 1 - expr))
}
pub fn condition_require_equal<NR, N>(
&mut self,
name_fn: N,
cond: Expression<E>,
target: Expression<E>,
true_expr: Expression<E>,
false_expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// cond * (true_expr) + (1 - cond) * false_expr
// => false_expr + cond * true_expr - cond * false_expr
self.namespace(
|| "cond_require_equal",
|cb| {
let cond_target = false_expr.clone() + cond.clone() * true_expr - cond * false_expr;
cb.cs.require_zero(name_fn, target - cond_target)
},
)
}
pub fn condition_require_zero<NR, N>(
&mut self,
name_fn: N,
cond: Expression<E>,
expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// cond * expr
self.namespace(
|| "cond_require_zero",
|cb| cb.cs.require_zero(name_fn, cond * expr.expr()),
)
}
pub fn condition_require_one<NR, N>(
&mut self,
name_fn: N,
cond: Expression<E>,
expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
// cond * expr
self.namespace(
|| "cond_require_one",
|cb| cb.cs.require_zero(name_fn, cond * (expr.expr() - 1)),
)
}
pub fn select(
&mut self,
cond: &Expression<E>,
when_true: &Expression<E>,
when_false: &Expression<E>,
) -> Expression<E> {
cond * when_true + (1 - cond) * when_false
}
pub fn assert_ux<NR, N, const C: usize>(
&mut self,
name_fn: N,
expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.assert_const_range(name_fn, expr, C)
}
pub fn assert_dynamic_range<NR, N>(
&mut self,
name_fn: N,
expr: Expression<E>,
bits: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.lk_record(name_fn, LookupTable::Dynamic, vec![expr, bits])?;
Ok(())
}
pub fn assert_const_range<NR, N>(
&mut self,
name_fn: N,
expr: Expression<E>,
max_bits: usize,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
if max_bits == 1 {
self.assert_bit(name_fn, expr)
} else {
self.namespace(
|| "assert_const_range",
|cb| {
cb.lk_record(
name_fn,
LookupTable::Dynamic,
vec![expr, E::BaseField::from_canonical_usize(max_bits).expr()],
)
},
)
}
}
pub fn assert_byte<NR, N>(
&mut self,
name_fn: N,
expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.namespace(
|| "assert_byte",
|cb| {
cb.lk_record(
name_fn,
LookupTable::Dynamic,
vec![expr, E::BaseField::from_canonical_usize(8).expr()],
)
},
)
}
pub fn assert_double_u8<NR, N>(
&mut self,
name_fn: N,
a_expr: Expression<E>,
b_expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.namespace(
|| "assert_double_u8",
|cb| cb.lk_record(name_fn, LookupTable::DoubleU8, vec![a_expr, b_expr]),
)
}
pub fn assert_bytes<NR, N>(
&mut self,
name_fn: N,
exprs: &[impl ToExpr<E, Output = Expression<E>> + Clone],
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
let name = name_fn().into();
for (i, pair) in exprs.chunks(2).enumerate() {
match pair {
[a, b] => {
self.assert_double_u8(|| format!("{}_{i:?}", name), a.expr(), b.expr())?
}
[a] => {
self.assert_double_u8(|| format!("{}_{i:?}", name), a.expr(), Expression::ZERO)?
}
_ => {}
}
}
Ok(())
}
pub fn assert_bit<NR, N>(
&mut self,
name_fn: N,
expr: Expression<E>,
) -> Result<(), CircuitBuilderError>
where
NR: Into<String>,
N: FnOnce() -> NR,
{
self.namespace(
|| "assert_bit",
|cb| cb.cs.require_zero(name_fn, &expr * (1 - &expr)),
)
}
/// Assert `rom_type(a, b) = c` and that `a, b, c` are all bytes.
pub fn logic_u8(
&mut self,
rom_type: LookupTable,
a: Expression<E>,
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr.rs | gkr_iop/src/gkr.rs | use core::fmt;
use ff_ext::ExtensionField;
use itertools::{Itertools, izip};
use layer::{Layer, LayerWitness, sumcheck_layer::LayerProof};
use multilinear_extensions::mle::{Point, PointAndEval};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use sumcheck::macros::{entered_span, exit_span};
use transcript::Transcript;
use crate::{
error::BackendError,
hal::{ProverBackend, ProverDevice},
selector::SelectorContext,
};
pub mod booleanhypercube;
pub mod layer;
pub mod layer_constraint_system;
pub mod mock;
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "E: ExtensionField + DeserializeOwned")]
pub struct GKRCircuit<E: ExtensionField> {
pub layers: Vec<Layer<E>>,
pub final_out_evals: Vec<usize>,
pub n_challenges: usize,
pub n_evaluations: usize,
}
#[derive(Clone, Debug)]
pub struct GKRCircuitWitness<'a, PB: ProverBackend> {
pub layers: Vec<LayerWitness<'a, PB>>,
}
#[derive(Clone, Debug)]
pub struct GKRCircuitOutput<'a, PB: ProverBackend>(pub LayerWitness<'a, PB>);
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize, Evaluation: Serialize",
deserialize = "E::BaseField: DeserializeOwned, Evaluation: DeserializeOwned"
))]
pub struct GKRProverOutput<E: ExtensionField, Evaluation> {
pub gkr_proof: GKRProof<E>,
pub opening_evaluations: Vec<Evaluation>,
pub rt: Vec<Point<E>>,
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct GKRProof<E: ExtensionField>(pub Vec<LayerProof<E>>);
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct Evaluation<E: ExtensionField> {
pub value: E,
pub point: Point<E>,
pub poly: usize,
}
pub struct GKRClaims<Evaluation>(pub Vec<Evaluation>);
impl<E: ExtensionField> GKRCircuit<E> {
#[allow(clippy::too_many_arguments)]
pub fn prove<PB: ProverBackend<E = E>, PD: ProverDevice<PB>>(
&self,
num_threads: usize,
max_num_variables: usize,
circuit_wit: GKRCircuitWitness<PB>,
out_evals: &[PointAndEval<E>],
pub_io_evals: &[E],
challenges: &[E],
transcript: &mut impl Transcript<E>,
selector_ctxs: &[SelectorContext],
) -> Result<GKRProverOutput<E, Evaluation<E>>, BackendError> {
let mut running_evals = out_evals.to_vec();
// running evals is a global referable within chip
running_evals.resize(self.n_evaluations, PointAndEval::default());
let mut challenges = challenges.to_vec();
let span = entered_span!("layer_proof", profiling_2 = true);
let (sumcheck_proofs, rt): (Vec<_>, Vec<_>) = izip!(&self.layers, circuit_wit.layers)
.enumerate()
.map(|(i, (layer, layer_wit))| {
tracing::debug!("prove layer {i} layer with layer name {}", layer.name);
let span = entered_span!("per_layer_proof", profiling_3 = true);
let res = layer.prove::<_, PB, PD>(
num_threads,
max_num_variables,
layer_wit,
&mut running_evals,
pub_io_evals,
&mut challenges,
transcript,
selector_ctxs,
);
exit_span!(span);
res
})
.unzip();
exit_span!(span);
let opening_evaluations = self.opening_evaluations(&running_evals);
Ok(GKRProverOutput {
gkr_proof: GKRProof(sumcheck_proofs),
opening_evaluations,
rt,
})
}
#[allow(clippy::too_many_arguments)]
pub fn verify(
&self,
max_num_variables: usize,
gkr_proof: GKRProof<E>,
out_evals: &[PointAndEval<E>],
pub_io_evals: &[E],
raw_pi: &[Vec<E::BaseField>],
challenges: &[E],
transcript: &mut impl Transcript<E>,
selector_ctxs: &[SelectorContext],
) -> Result<(GKRClaims<Evaluation<E>>, Point<E>), BackendError>
where
E: ExtensionField,
{
let GKRProof(sumcheck_proofs) = gkr_proof;
let mut challenges = challenges.to_vec();
let mut evaluations = out_evals.to_vec();
evaluations.resize(self.n_evaluations, PointAndEval::default());
let rt = izip!(&self.layers, sumcheck_proofs).enumerate().try_fold(
vec![],
|_, (i, (layer, layer_proof))| {
tracing::debug!("verifier layer {i} layer with layer name {}", layer.name);
let rt = layer.verify(
max_num_variables,
layer_proof,
&mut evaluations,
pub_io_evals,
raw_pi,
&mut challenges,
transcript,
selector_ctxs,
)?;
Ok(rt)
},
)?;
Ok((GKRClaims(self.opening_evaluations(&evaluations)), rt))
}
/// Output opening evaluations. First witin and then fixed.
fn opening_evaluations(&self, evaluations: &[PointAndEval<E>]) -> Vec<Evaluation<E>> {
let input_layer = self.layers.last().unwrap();
input_layer
.in_eval_expr
.iter()
.enumerate()
.map(|(poly, eval)| {
let PointAndEval { point, eval: value } = evaluations[*eval].clone();
Evaluation { value, point, poly }
})
.collect_vec()
}
}
impl<E: ExtensionField> fmt::Display for GKRProof<E> {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// overall size
let overall_size = bincode::serialized_size(&self).expect("serialization error");
write!(f, "overall_size {:.2}mb.", byte_to_mb(overall_size),)
}
}
fn byte_to_mb(byte_size: u64) -> f64 {
byte_size as f64 / (1024.0 * 1024.0)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/error.rs | gkr_iop/src/error.rs | use sumcheck::structs::VerifierError;
use thiserror::Error;
#[derive(Clone, Debug, Error)]
pub enum BackendError {
#[error("layer verification failed: {0:?}, {1:?}")]
LayerVerificationFailed(Box<str>, VerifierError),
#[error("circuit build faile")]
CircuitError(Box<str>),
}
#[derive(Clone, Debug, Error)]
pub enum CircuitBuilderError {
#[error("circuit build faile")]
CircuitError(Box<str>),
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/utils.rs | gkr_iop/src/utils.rs | pub mod lk_multiplicity;
use ff_ext::{ExtensionField, SmallField};
use itertools::Itertools;
use multilinear_extensions::{
Fixed, WitIn, WitnessId,
mle::{ArcMultilinearExtension, MultilinearExtension},
util::ceil_log2,
virtual_poly::{build_eq_x_r_vec, eq_eval},
};
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, IntoParallelIterator, ParallelExtend, ParallelIterator},
slice::{ParallelSlice, ParallelSliceMut},
};
use crate::gkr::booleanhypercube::BooleanHypercube;
pub fn rotation_next_base_mle<'a, E: ExtensionField>(
bh: &BooleanHypercube,
mle: &ArcMultilinearExtension<'a, E>,
cyclic_group_log2_size: usize,
) -> MultilinearExtension<'a, E> {
let cyclic_group_size = 1 << cyclic_group_log2_size;
let rotation_index = bh.into_iter().take(cyclic_group_size).collect_vec();
let mut rotated_mle_evals = Vec::with_capacity(mle.evaluations().len());
rotated_mle_evals.par_extend(
(0..mle.evaluations().len())
.into_par_iter()
.map(|_| E::BaseField::ZERO),
);
rotated_mle_evals
.par_chunks_mut(cyclic_group_size)
.zip(mle.get_base_field_vec().par_chunks(cyclic_group_size))
.for_each(|(rotate_chunk, original_chunk)| {
let first = rotation_index[0] as usize;
let last = rotation_index[rotation_index.len() - 1] as usize;
if first == last {
rotate_chunk[last] = original_chunk[first]
}
rotate_chunk[0] = original_chunk[0];
for i in (0..rotation_index.len() - 1).rev() {
let to = rotation_index[i] as usize;
let from = rotation_index[i + 1] as usize;
rotate_chunk[to] = original_chunk[from];
}
});
MultilinearExtension::from_evaluation_vec_smart(mle.num_vars(), rotated_mle_evals)
}
pub fn rotation_selector<'a, E: ExtensionField>(
bh: &BooleanHypercube,
eq: &[E],
cyclic_subgroup_size: usize,
cyclic_group_log2_size: usize,
total_len: usize,
) -> MultilinearExtension<'a, E> {
assert!(total_len.is_power_of_two());
let cyclic_group_size = 1 << cyclic_group_log2_size;
assert!(cyclic_subgroup_size <= cyclic_group_size);
let rotation_index = bh.into_iter().take(cyclic_subgroup_size).collect_vec();
let mut rotated_mle_evals = Vec::with_capacity(total_len);
rotated_mle_evals.par_extend((0..total_len).into_par_iter().map(|_| E::ZERO));
rotated_mle_evals
.par_chunks_mut(cyclic_group_size)
.zip_eq(eq.par_chunks(cyclic_group_size))
.for_each(|(rotate_chunk, eq_chunk)| {
for i in (0..rotation_index.len()).rev() {
let to = rotation_index[i] as usize;
rotate_chunk[to] = eq_chunk[to];
}
});
MultilinearExtension::from_evaluation_vec_smart(ceil_log2(total_len), rotated_mle_evals)
}
/// sel(rx)
/// = (\sum_{b = 0}^{cyclic_subgroup_size - 1} eq(out_point[..cyclic_group_log2_size], b) * eq(in_point[..cyclic_group_log2_size], b))
/// * \prod_{k = cyclic_group_log2_size}^{n - 1} eq(out_point[k], in_point[k])
pub fn rotation_selector_eval<E: ExtensionField>(
bh: &BooleanHypercube,
out_point: &[E],
in_point: &[E],
cyclic_subgroup_size: usize,
cyclic_group_log2_size: usize,
) -> E {
let cyclic_group_size = 1 << cyclic_group_log2_size;
assert!(cyclic_subgroup_size <= cyclic_group_size);
let rotation_index = bh.into_iter().take(cyclic_subgroup_size).collect_vec();
let out_subgroup_eq = build_eq_x_r_vec(&out_point[..cyclic_group_log2_size]);
let in_subgroup_eq = build_eq_x_r_vec(&in_point[..cyclic_group_log2_size]);
let mut eval = E::ZERO;
for b in rotation_index {
let b = b as usize;
eval += out_subgroup_eq[b] * in_subgroup_eq[b];
}
eval * eq_eval(
&out_point[cyclic_group_log2_size..],
&in_point[cyclic_group_log2_size..],
)
}
pub fn i64_to_base<F: SmallField>(x: i64) -> F {
if x >= 0 {
F::from_canonical_u64(x as u64)
} else {
-F::from_canonical_u64((-x) as u64)
}
}
/// Returns `[0 + offset, ..., N - 1 + offset]`.
#[must_use]
pub const fn indices_arr_with_offset<const N: usize, const OFFSET: usize>() -> [usize; N] {
let mut indices_arr = [0; N];
let mut i = 0;
while i < N {
indices_arr[i] = i + OFFSET;
i += 1;
}
indices_arr
}
pub fn indices_arr_with_offset_non_const<const N: usize>(offset: usize) -> [usize; N] {
let mut indices_arr = [0; N];
let mut i = 0;
while i < N {
indices_arr[i] = i + offset;
i += 1;
}
indices_arr
}
/// Returns `[WitIn(0), ..., WitIn(N - 1)], [Fixed(N), Fixed(N + 1), ..., Fixed(N + M)], [WitIn(N + M + 1), ...]`.
/// TODO remove me
#[must_use]
pub const fn wits_fixed_and_eqs<const N: usize, const M: usize, const Q: usize>()
-> ([WitIn; N], [Fixed; M], [WitIn; Q]) {
let mut wits = [WitIn { id: 0 }; N];
let mut i = 0;
while i < N {
wits[i] = WitIn { id: i as WitnessId };
i += 1;
}
let mut i = 0;
let mut fixed = [Fixed(0); M];
while i < M {
fixed[i] = Fixed(i);
i += 1;
}
let mut i = 0;
let mut eqs = [WitIn { id: 0 }; Q];
while i < Q {
eqs[i] = WitIn {
id: (i + N + M) as WitnessId,
};
i += 1;
}
(wits, fixed, eqs)
}
/// This is to compute a variant of eq(\mathbf{x}, \mathbf{y}) for indices in
/// [0..=max_idx]. Specifically, it is an MLE of the following vector:
/// partial_eq_{\mathbf{x}}(\mathbf{y})
/// = \sum_{\mathbf{b}=0}^{max_idx} \prod_{i=0}^{n-1} (x_i y_i b_i + (1 - x_i)(1 - y_i)(1 - b_i))
pub fn eq_eval_less_or_equal_than<E: ExtensionField>(max_idx: usize, a: &[E], b: &[E]) -> E {
assert!(a.len() >= b.len());
// Compute running product of ( x_i y_i + (1 - x_i)(1 - y_i) )_{0 <= i <= n}
let running_product = {
let mut running_product = Vec::with_capacity(b.len() + 1);
running_product.push(E::ONE);
for i in 0..b.len() {
let x = running_product[i] * (a[i] * b[i] + (E::ONE - a[i]) * (E::ONE - b[i]));
running_product.push(x);
}
running_product
};
let running_product2 = {
let mut running_product = vec![E::ZERO; b.len() + 1];
running_product[b.len()] = E::ONE;
for i in (0..b.len()).rev() {
let bit = E::from_canonical_u64(((max_idx >> i) & 1) as u64);
running_product[i] = running_product[i + 1]
* (a[i] * b[i] * bit + (E::ONE - a[i]) * (E::ONE - b[i]) * (E::ONE - bit));
}
running_product
};
// Here is an example of how this works:
// Suppose max_idx = (110101)_2
// Then ans = eq(a, b)
// - eq(11011, a[1..6], b[1..6])eq(a[0..1], b[0..1])
// - eq(111, a[3..6], b[3..6])eq(a[0..3], b[0..3])
let mut ans = running_product[b.len()];
for i in 0..b.len() {
let bit = (max_idx >> i) & 1;
if bit == 1 {
continue;
}
ans -= running_product[i] * running_product2[i + 1] * a[i] * b[i];
}
for v in a.iter().skip(b.len()) {
ans *= E::ONE - *v;
}
ans
}
/// evaluate MLE M(x0, x1, x2, ..., xn) address vector with it evaluation format
/// on r = [r0, r1, r2, ...rn] succinctly
/// where `M = descending * scaled * M' + offset`
/// offset, scaled, is constant, descending = +1/-1
/// and M' := [0, 1, 2, 3, ....2^n-1]
/// succinctly format of M'(r) = r0 + r1 * 2 + r2 * 2^2 + .... rn * 2^n
pub fn eval_wellform_address_vec<E: ExtensionField>(
offset: u64,
scaled: u64,
r: &[E],
descending: bool,
) -> E {
let (offset, scaled) = (E::from_canonical_u64(offset), E::from_canonical_u64(scaled));
let tmp = scaled
* r.iter()
.scan(E::ONE, |state, x| {
let result = *x * *state;
*state *= E::from_canonical_u64(2); // Update the state for the next power of 2
Some(result)
})
.sum::<E>();
let tmp = if descending { tmp.neg() } else { tmp };
offset + tmp
}
/// Evaluate MLE with the following evaluation over the hypercube:
/// [0, 0, 0, 1, 0, 1, 2, 3, 0, 1, 2, 3, 4, 5, 6, 7, ..., 0, 1, 2, ..., 2^n-1]
/// which is the concatenation of
/// [0]
/// [0, 1]
/// [0, 1, 2, 3]
/// ...
/// [0, 1, 2, ..., 2^n-1]
/// which is then prefixed by a single zero to make all the subvectors aligned to powers of two.
/// This function is used to support dynamic range check.
/// Note that this MLE has n+1 variables, so r should have length n+1.
///
/// conceptually, we traverse evaluations in the sequence:
/// [0, 0], [0, 1], [0, 1, 2, 3], ...
/// for every `next` element is already in a well-formed incremental structure,
/// so we can reuse `eval_wellform_address_vec` to obtain its value.
///
/// at each step `i`, we combine:
/// - the accumulated result so far, weighted by `(1 - r[i])`
/// - the evaluation of the current prefix `r[..i]`, weighted by `r[i]`.
///
/// this iterative version avoids recursion for efficiency and clarity.
pub fn eval_stacked_wellform_address_vec<E: ExtensionField>(r: &[E]) -> E {
if r.len() < 2 {
return E::ZERO;
}
let mut res = E::ZERO;
for i in 1..r.len() {
res = res * (E::ONE - r[i]) + eval_wellform_address_vec(0, 1, &r[..i], false) * r[i];
}
res
}
/// Evaluate MLE with the following evaluation over the hypercube:
/// [0, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, ..., n, n, n, ..., n]
/// which is the concatenation of
/// [0]
/// [1, 1]
/// [2, 2, 2, 2]
/// ...
/// [n, n, n, ..., n]
/// which is then prefixed by a single zero to make all the subvectors aligned to powers of two.
/// This function is used to support dynamic range check.
/// Note that this MLE has n+1 variables, so r should have length n+1.
pub fn eval_stacked_constant_vec<E: ExtensionField>(r: &[E]) -> E {
if r.len() < 2 {
return E::ZERO;
}
let mut res = E::ZERO;
for (i, r) in r.iter().enumerate().skip(1) {
res = res * (E::ONE - *r) + E::from_canonical_usize(i) * *r;
}
res
}
/// evaluate MLE M(x0, x1, x2, ..., xn) address vector with it evaluation format
/// on r = [r0, r1, r2, ...rn] succinctly
/// where `M = [0 ... 0 1 ... 1 ... 2^(n-k)-1 ... 2^(n-k)-1]`
/// where each element is repeated 2^k times
/// The value is the same as M(xk, xk+1, ..., xn), i.e., just abandoning
/// the first k elements from r
pub fn eval_inner_repeated_incremental_vec<E: ExtensionField>(k: u64, r: &[E]) -> E {
eval_wellform_address_vec(0, 1, &r[k as usize..], false)
}
/// evaluate MLE M(x0, x1, x2, ..., xn) address vector with it evaluation format
/// on r = [r0, r1, r2, ...rn] succinctly
/// where `M = [0 1 ... 2^k-1] * 2^(n-k)`
/// The value is the same as M(x0, ..., xk), i.e., just taking
/// the first k elements from r
pub fn eval_outer_repeated_incremental_vec<E: ExtensionField>(k: u64, r: &[E]) -> E {
eval_wellform_address_vec(0, 1, &r[..k as usize], false)
}
#[cfg(test)]
mod tests {
use ff_ext::{FromUniformBytes, GoldilocksExt2};
use p3::{field::FieldAlgebra, goldilocks::Goldilocks};
use std::{iter, sync::Arc};
type E = GoldilocksExt2;
use multilinear_extensions::mle::MultilinearExtension;
use super::*;
fn make_mle<'a, E: ExtensionField>(
values: Vec<E::BaseField>,
) -> ArcMultilinearExtension<'a, E> {
Arc::new(MultilinearExtension::from_evaluation_vec_smart(
values.len().ilog2() as usize,
values,
))
}
#[test]
fn test_rotation_next_base_mle_eval() {
type E = GoldilocksExt2;
let bh = BooleanHypercube::new(5);
let poly = make_mle::<E>(
(0..128u64)
.map(Goldilocks::from_canonical_u64)
.collect_vec(),
);
let rotated = rotation_next_base_mle(&bh, &poly, 5);
let mut rng = rand::thread_rng();
let point: Vec<_> = (0..7).map(|_| E::random(&mut rng)).collect();
let (left_point, right_point) = bh.get_rotation_points(&point);
let rotated_eval = rotated.evaluate(&point);
let left_eval = poly.evaluate(&left_point);
let right_eval = poly.evaluate(&right_point);
assert_eq!(
rotated_eval,
(E::ONE - point[4]) * left_eval + point[4] * right_eval
);
assert_eq!(
right_eval,
bh.get_rotation_right_eval_from_left(rotated_eval, left_eval, &point)
);
}
#[test]
fn test_eval_stacked_wellform_address_vec() {
let r = [
E::from_canonical_usize(123),
E::from_canonical_usize(456),
E::from_canonical_usize(789),
E::from_canonical_usize(3210),
E::from_canonical_usize(9876),
];
for n in 0..r.len() {
let v = iter::once(E::ZERO)
.chain((0..=n).flat_map(|i| (0..(1 << i)).map(E::from_canonical_usize)))
.collect::<Vec<E>>();
let poly = MultilinearExtension::from_evaluations_ext_vec(n + 1, v);
assert_eq!(
eval_stacked_wellform_address_vec(&r[0..=n]),
poly.evaluate(&r[0..=n])
)
}
}
#[test]
fn test_eval_stacked_constant_vec() {
let r = [
E::from_canonical_usize(123),
E::from_canonical_usize(456),
E::from_canonical_usize(789),
E::from_canonical_usize(3210),
E::from_canonical_usize(9876),
];
for n in 0..r.len() {
let v = iter::once(E::ZERO)
.chain((0..=n).flat_map(|i| iter::repeat_n(i, 1 << i).map(E::from_canonical_usize)))
.collect::<Vec<E>>();
let poly = MultilinearExtension::from_evaluations_ext_vec(n + 1, v);
assert_eq!(
eval_stacked_constant_vec(&r[0..=n]),
poly.evaluate(&r[0..=n])
)
}
}
#[test]
fn test_eval_inner_repeating_incremental_vec() {
let r = [
E::from_canonical_usize(123),
E::from_canonical_usize(456),
E::from_canonical_usize(789),
E::from_canonical_usize(3210),
E::from_canonical_usize(9876),
];
for n in 1..=r.len() {
for k in 0..=n {
let v = (0..(1 << (n - k)))
.flat_map(|i| iter::repeat_n(E::from_canonical_usize(i), 1 << k))
.collect::<Vec<E>>();
let poly = MultilinearExtension::from_evaluations_ext_vec(n, v);
assert_eq!(
eval_inner_repeated_incremental_vec(k as u64, &r[0..n]),
poly.evaluate(&r[0..n])
)
}
}
}
#[test]
fn test_eval_outer_repeating_incremental_vec() {
let r = [
E::from_canonical_usize(123),
E::from_canonical_usize(456),
E::from_canonical_usize(789),
E::from_canonical_usize(3210),
E::from_canonical_usize(9876),
];
for n in 1..=r.len() {
for k in 0..=n {
let v = iter::repeat_n(0, 1 << (n - k))
.flat_map(|_| (0..(1 << k)).map(E::from_canonical_usize))
.collect::<Vec<E>>();
let poly = MultilinearExtension::from_evaluations_ext_vec(n, v);
assert_eq!(
eval_outer_repeated_incremental_vec(k as u64, &r[0..n]),
poly.evaluate(&r[0..n])
)
}
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/hal.rs | gkr_iop/src/hal.rs | use crate::gkr::layer::{
Layer,
hal::{LinearLayerProver, SumcheckLayerProver, ZerocheckLayerProver},
};
use either::Either;
use ff_ext::ExtensionField;
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::mle::Point;
use std::{fmt::Debug, sync::Arc};
pub trait MultilinearPolynomial<E: ExtensionField> {
fn num_vars(&self) -> usize;
fn eval(&self, point: Point<E>) -> E;
/// Get the length of evaluation data
fn evaluations_len(&self) -> usize;
/// Debug utility: generate a semantic signature value to represent the whole boolean hypercube elements
/// this function is very heavily as traverse whole boolean hypercube
fn bh_signature(&self) -> E;
}
/// Defines basic types like field, pcs that are common among all devices
/// and also defines the types that are specific to device.
pub trait ProverBackend {
/// types that are common across all devices
type E: ExtensionField;
type Pcs: PolynomialCommitmentScheme<Self::E>;
/// device-specific types
// TODO: remove lifetime bound
type MultilinearPoly<'a>: Send + Sync + Clone + Debug + Default + MultilinearPolynomial<Self::E>;
type Matrix: Send + Sync + Clone;
type PcsData;
fn get_pp(&self) -> &<Self::Pcs as PolynomialCommitmentScheme<Self::E>>::ProverParam;
fn get_vp(&self) -> &<Self::Pcs as PolynomialCommitmentScheme<Self::E>>::VerifierParam;
}
pub trait ProverDevice<PB>:
LinearLayerProver<PB>
+ SumcheckLayerProver<PB>
+ ZerocheckLayerProver<PB>
+ ProtocolWitnessGeneratorProver<PB>
where
PB: ProverBackend,
{
}
pub trait ProtocolWitnessGeneratorProver<PB: ProverBackend> {
fn layer_witness<'a>(
layer: &Layer<PB::E>,
layer_wits: &[Arc<PB::MultilinearPoly<'a>>],
pub_io_evals: &[Either<<PB::E as ExtensionField>::BaseField, PB::E>],
challenges: &[PB::E],
) -> Vec<Arc<PB::MultilinearPoly<'a>>>;
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/selector.rs | gkr_iop/src/selector.rs | use std::iter::repeat_n;
use rayon::iter::IndexedParallelIterator;
use crate::{gkr::booleanhypercube::CYCLIC_POW2_5, utils::eq_eval_less_or_equal_than};
use ff_ext::ExtensionField;
use multilinear_extensions::{
Expression, WitnessId,
mle::{IntoMLE, MultilinearExtension, Point},
util::ceil_log2,
virtual_poly::{build_eq_x_r_vec, eq_eval},
};
use p3::field::FieldAlgebra;
use rayon::{
iter::{IntoParallelIterator, ParallelIterator},
slice::ParallelSliceMut,
};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use witness::next_pow2_instance_padding;
/// Provide context for selector's instantiation at runtime
#[derive(Clone, Debug)]
pub struct SelectorContext {
pub offset: usize,
pub num_instances: usize,
pub num_vars: usize,
}
impl SelectorContext {
pub fn new(offset: usize, num_instances: usize, num_vars: usize) -> Self {
Self {
offset,
num_instances,
num_vars,
}
}
}
/// Selector selects part of the witnesses in the sumcheck protocol.
#[derive(Clone, Debug, PartialEq, Eq, Hash, PartialOrd, Ord, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub enum SelectorType<E: ExtensionField> {
None,
Whole(Expression<E>),
/// Select part of the instances, other parts padded with a field element.
Prefix(Expression<E>),
/// selector activates on the specified `indices`, which are assumed to be in ascending order.
/// each index corresponds to a position within a fixed-size chunk (e.g., size 32),
OrderedSparse32 {
indices: Vec<usize>,
expression: Expression<E>,
},
/// binary tree [`quark`] from paper
QuarkBinaryTreeLessThan(Expression<E>),
}
impl<E: ExtensionField> SelectorType<E> {
/// Returns an MultilinearExtension with `ctx.num_vars` variables whenever applicable
pub fn to_mle(&self, ctx: &SelectorContext) -> Option<MultilinearExtension<'_, E>> {
match self {
SelectorType::None => None,
SelectorType::Whole(_) => {
assert_eq!(ceil_log2(ctx.num_instances), ctx.num_vars);
Some(
(0..(1 << ctx.num_vars))
.into_par_iter()
.map(|_| E::BaseField::ONE)
.collect::<Vec<_>>()
.into_mle(),
)
}
SelectorType::Prefix(_) => {
assert!(ctx.offset + ctx.num_instances <= (1 << ctx.num_vars));
let start = ctx.offset;
let end = start + ctx.num_instances;
Some(
(0..start)
.into_par_iter()
.map(|_| E::BaseField::ZERO)
.chain((start..end).into_par_iter().map(|_| E::BaseField::ONE))
.chain(
(end..(1 << ctx.num_vars))
.into_par_iter()
.map(|_| E::BaseField::ZERO),
)
.collect::<Vec<_>>()
.into_mle(),
)
}
SelectorType::OrderedSparse32 {
indices,
expression: _,
} => {
assert_eq!(ceil_log2(ctx.num_instances) + 5, ctx.num_vars);
Some(
(0..(1 << (ctx.num_vars - 5)))
.into_par_iter()
.flat_map(|chunk_index| {
if chunk_index >= ctx.num_instances {
vec![E::ZERO; 32]
} else {
let mut chunk = vec![E::ZERO; 32];
let mut indices_iter = indices.iter().copied();
let mut next_keep = indices_iter.next();
for (i, e) in chunk.iter_mut().enumerate() {
if let Some(idx) = next_keep
&& i == idx
{
*e = E::ONE;
next_keep = indices_iter.next(); // Keep this one
}
}
chunk
}
})
.collect::<Vec<E>>()
.into_mle(),
)
}
SelectorType::QuarkBinaryTreeLessThan(..) => unimplemented!(),
}
}
/// Compute true and false mle eq(1; b[..5]) * sel(y; b[5..]), and eq(1; b[..5]) * (eq() - sel(y; b[5..]))
pub fn compute(
&self,
out_point: &Point<E>,
ctx: &SelectorContext,
) -> Option<MultilinearExtension<'_, E>> {
assert_eq!(out_point.len(), ctx.num_vars);
match self {
SelectorType::None => None,
SelectorType::Whole(_) => Some(build_eq_x_r_vec(out_point).into_mle()),
SelectorType::Prefix(_) => {
let start = ctx.offset;
let end = start + ctx.num_instances;
assert!(
end <= (1 << ctx.num_vars),
"start: {}, num_instances: {}, num_vars: {}",
start,
ctx.num_instances,
ctx.num_vars
);
let mut sel = build_eq_x_r_vec(out_point);
sel.splice(0..start, repeat_n(E::ZERO, start));
sel.splice(end..sel.len(), repeat_n(E::ZERO, sel.len() - end));
Some(sel.into_mle())
}
// compute true and false mle eq(1; b[..5]) * sel(y; b[5..]), and eq(1; b[..5]) * (eq() - sel(y; b[5..]))
SelectorType::OrderedSparse32 { indices, .. } => {
assert_eq!(
out_point.len(),
next_pow2_instance_padding(ctx.num_instances).ilog2() as usize + 5
);
let mut sel = build_eq_x_r_vec(out_point);
sel.par_chunks_exact_mut(CYCLIC_POW2_5.len())
.enumerate()
.for_each(|(chunk_index, chunk)| {
if chunk_index >= ctx.num_instances {
// Zero out the entire chunk if out of instance range
chunk.iter_mut().for_each(|e| *e = E::ZERO);
return;
}
let mut indices_iter = indices.iter().copied();
let mut next_keep = indices_iter.next();
for (i, e) in chunk.iter_mut().enumerate() {
match next_keep {
Some(idx) if i == idx => {
next_keep = indices_iter.next(); // Keep this one
}
_ => *e = E::ZERO, // Not in indices
}
}
});
Some(sel.into_mle())
}
// also see evaluate() function for more explanation
SelectorType::QuarkBinaryTreeLessThan(_) => {
assert_eq!(ctx.offset, 0);
// num_instances: number of prefix one in leaf layer
let mut sel: Vec<E> = build_eq_x_r_vec(out_point);
let n = sel.len();
let num_instances_sequence = (0..out_point.len())
// clean up sig bits
.scan(ctx.num_instances, |n_instance, _| {
// n points to sum means we have n/2 addition pairs
let cur = *n_instance / 2;
// the next layer has ceil(n/2) points to sum
*n_instance = (*n_instance).div_ceil(2);
Some(cur)
})
.collect::<Vec<_>>();
// split sel into different size of region, set tailing 0 of respective chunk size
// 1st round: take v = sel[0..sel.len()/2], zero out v[num_instances_sequence[0]..]
// 2nd round: take v = sel[sel.len()/2 .. sel.len()/4], zero out v[num_instances_sequence[1]..]
// ...
// each round: progressively smaller chunk
// example: round 0 uses first half, round 1 uses next quarter, etc.
// compute cumulative start indices:
// e.g. chunk = n/2, then start = 0, chunk, chunk + chunk/2, chunk + chunk/2 + chunk/4, ...
// compute disjoint start indices and lengths
let chunks: Vec<(usize, usize)> = {
let mut result = Vec::new();
let mut start = 0;
let mut chunk_len = n / 2;
while chunk_len > 0 {
result.push((start, chunk_len));
start += chunk_len;
chunk_len /= 2;
}
result
};
for (i, (start, len)) in chunks.into_iter().enumerate() {
let slice = &mut sel[start..start + len];
// determine from which index to zero
let zero_start = num_instances_sequence.get(i).copied().unwrap_or(0).min(len);
for x in &mut slice[zero_start..] {
*x = E::ZERO;
}
}
// zero out last bh evaluations
*sel.last_mut().unwrap() = E::ZERO;
Some(sel.into_mle())
}
}
}
pub fn evaluate(
&self,
out_point: &Point<E>,
in_point: &Point<E>,
ctx: &SelectorContext,
) -> Option<(E, WitnessId)> {
assert_eq!(in_point.len(), ctx.num_vars);
assert_eq!(out_point.len(), ctx.num_vars);
let (expr, eval) = match self {
SelectorType::None => return None,
SelectorType::Whole(expr) => {
debug_assert_eq!(out_point.len(), in_point.len());
(expr, eq_eval(out_point, in_point))
}
SelectorType::Prefix(expression) => {
let start = ctx.offset;
let end = start + ctx.num_instances;
assert_eq!(in_point.len(), out_point.len());
assert!(
end <= (1 << out_point.len()),
"start: {}, num_instances: {}, num_vars: {}",
start,
ctx.num_instances,
ctx.num_vars
);
if end == 0 {
(expression, E::ZERO)
} else {
let eq_end = eq_eval_less_or_equal_than(end - 1, out_point, in_point);
let sel = if start > 0 {
let eq_start = eq_eval_less_or_equal_than(start - 1, out_point, in_point);
eq_end - eq_start
} else {
eq_end
};
(expression, sel)
}
}
// evaluate true and false mle eq(CYCLIC_POW2_5[round]; b[..5]) * sel(y; b[5..]), and eq(1; b[..5]) * (1 - sel(y; b[5..]))
SelectorType::OrderedSparse32 {
indices,
expression,
} => {
let out_subgroup_eq = build_eq_x_r_vec(&out_point[..5]);
let in_subgroup_eq = build_eq_x_r_vec(&in_point[..5]);
let mut eval = E::ZERO;
for index in indices {
eval += out_subgroup_eq[*index] * in_subgroup_eq[*index];
}
let sel = eq_eval_less_or_equal_than(
ctx.num_instances - 1,
&out_point[5..],
&in_point[5..],
);
(expression, eval * sel)
}
SelectorType::QuarkBinaryTreeLessThan(expr) => {
// num_instances count on leaf layer
// where nodes size is 2^(N) / 2
// out_point.len() is also log(2^(N)) - 1
// so num_instances and 1 << out_point.len() are on same scaling
assert!(ctx.num_instances > 0);
assert!(ctx.num_instances <= (1 << out_point.len()));
assert!(!out_point.is_empty());
assert_eq!(out_point.len(), in_point.len());
// we break down this special selector evaluation into recursive structure
// iterating through out_point and in_point, for each i
// next_eval = lhs * (1-out_point[i]) * (1 - in_point[i]) + prev_eval * out_point[i] * in_point[i]
// where the lhs is in consecutive prefix 1 follow by 0
// calculate prefix 1 length of each layer
let mut prefix_one_seq = (0..out_point.len())
.scan(ctx.num_instances, |n_instance, _| {
// n points to sum means we have n/2 addition pairs
let cur = *n_instance / 2;
// next layer has ceil(n/2) points to sum
*n_instance = (*n_instance).div_ceil(2);
Some(cur)
})
.collect::<Vec<_>>();
prefix_one_seq.reverse();
let mut res = if prefix_one_seq[0] == 0 {
E::ZERO
} else {
assert_eq!(prefix_one_seq[0], 1);
(E::ONE - out_point[0]) * (E::ONE - in_point[0])
};
for i in 1..out_point.len() {
let num_prefix_one_lhs = prefix_one_seq[i];
let lhs_res = if num_prefix_one_lhs == 0 {
E::ZERO
} else {
(E::ONE - out_point[i])
* (E::ONE - in_point[i])
* eq_eval_less_or_equal_than(
num_prefix_one_lhs - 1,
&out_point[..i],
&in_point[..i],
)
};
let rhs_res = (out_point[i] * in_point[i]) * res;
res = lhs_res + rhs_res;
}
(expr, res)
}
};
let Expression::StructuralWitIn(wit_id, _) = expr else {
panic!("Wrong selector expression format");
};
Some((eval, *wit_id))
}
/// return ordered indices of OrderedSparse32
pub fn sparse32_indices(&self) -> &[usize] {
match self {
Self::OrderedSparse32 { indices, .. } => indices,
_ => panic!("invalid calling on non sparse type"),
}
}
pub fn selector_expr(&self) -> &Expression<E> {
match self {
Self::OrderedSparse32 { expression, .. }
| Self::Whole(expression)
| Self::Prefix(expression) => expression,
e => unimplemented!("no selector expression in {:?}", e),
}
}
}
#[cfg(test)]
mod tests {
use ff_ext::{BabyBearExt4, FromUniformBytes};
use multilinear_extensions::{
StructuralWitIn, ToExpr, util::ceil_log2, virtual_poly::build_eq_x_r_vec,
};
use p3::field::FieldAlgebra;
use rand::thread_rng;
use crate::selector::{SelectorContext, SelectorType};
type E = BabyBearExt4;
#[test]
fn test_quark_lt_selector() {
let mut rng = thread_rng();
let n_points = 5;
let n_vars = ceil_log2(n_points);
let witin = StructuralWitIn {
id: 0,
witin_type: multilinear_extensions::StructuralWitInType::EqualDistanceSequence {
max_len: 0,
offset: 0,
multi_factor: 0,
descending: false,
},
};
let selector = SelectorType::QuarkBinaryTreeLessThan(witin.expr());
let ctx = SelectorContext::new(0, n_points, n_vars);
let out_rt = E::random_vec(n_vars, &mut rng);
let sel_mle = selector.compute(&out_rt, &ctx).unwrap();
// if we have 5 points to sum, then
// in 1st layer: two additions p12 = p1 + p2, p34 = p3 + p4, p5 kept
// in 2nd layer: one addition p14 = p12 + p34, p5 kept
// in 3rd layer: one addition p15 = p14 + p5
let eq = build_eq_x_r_vec(&out_rt);
let vec = sel_mle.get_ext_field_vec();
assert_eq!(vec[0], eq[0]); // p1+p2
assert_eq!(vec[1], eq[1]); // p3+p4
assert_eq!(vec[2], E::ZERO); // p5
assert_eq!(vec[3], E::ZERO);
assert_eq!(vec[4], eq[4]); // p1+p2+p3+p4
assert_eq!(vec[5], E::ZERO); // p5
assert_eq!(vec[6], eq[6]); // p1+p2+p3+p4+p5
assert_eq!(vec[7], E::ZERO);
let in_rt = E::random_vec(n_vars, &mut rng);
let Some((eval, _)) = selector.evaluate(&out_rt, &in_rt, &ctx) else {
unreachable!()
};
assert_eq!(sel_mle.evaluate(&in_rt), eval);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/evaluation.rs | gkr_iop/src/evaluation.rs | use ff_ext::ExtensionField;
use itertools::{Itertools, izip};
use multilinear_extensions::{
Expression, mle::PointAndEval, utils::eval_by_expr_with_fixed,
virtual_poly::build_eq_x_r_vec_sequential,
};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
/// Evaluation expression for the gkr layer reduction and PCS opening
/// preparation.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound = "E: ExtensionField + DeserializeOwned")]
pub enum EvalExpression<E: ExtensionField> {
Zero,
/// Single entry in the evaluation vector.
Single(usize),
/// Linear expression of an entry with the scalar and offset.
Linear(usize, Box<Expression<E>>, Box<Expression<E>>),
/// Merging multiple evaluations which denotes a partition of the original
/// polynomial. `(usize, Constant)` denote the modification of the point.
/// For example, when it receive a point `(p0, p1, p2, p3)` from a
/// succeeding layer, `vec![(2, c0), (4, c1)]` will modify the point to
/// `(p0, p1, c0, p2, c1, p3)`. where the indices specify how the
/// partition applied to the original polynomial.
Partition(
Vec<Box<EvalExpression<E>>>,
Vec<(usize, Box<Expression<E>>)>,
),
}
impl<E: ExtensionField> Default for EvalExpression<E> {
fn default() -> Self {
EvalExpression::Single(0)
}
}
fn evaluate<E: ExtensionField>(expr: &Expression<E>, challenges: &[E]) -> E {
eval_by_expr_with_fixed(&[], &[], &[], challenges, expr)
}
impl<E: ExtensionField> EvalExpression<E> {
pub fn evaluate(&self, evals: &[PointAndEval<E>], challenges: &[E]) -> PointAndEval<E> {
match self {
// assume all point in evals are derived in random, thus pick arbirary one is ok
// here we pick first point as representative.
// for zero eval, eval is always zero
EvalExpression::Zero => PointAndEval {
point: evals[0].point.clone(),
eval: E::ZERO,
},
EvalExpression::Single(i) => evals[*i].clone(),
EvalExpression::Linear(i, c0, c1) => PointAndEval {
point: evals[*i].point.clone(),
eval: evals[*i].eval * evaluate(c0, challenges) + evaluate(c1, challenges),
},
EvalExpression::Partition(parts, indices) => {
assert!(izip!(indices.iter(), indices.iter().skip(1)).all(|(a, b)| a.0 < b.0));
let vars = indices
.iter()
.map(|(_, c)| evaluate(c, challenges))
.collect_vec();
let parts = parts
.iter()
.map(|part| part.evaluate(evals, &vars))
.collect_vec();
assert_eq!(parts.len(), 1 << indices.len());
assert!(parts.iter().all(|part| part.point == parts[0].point));
let mut new_point = parts[0].point.to_vec();
for (index_in_point, c) in indices {
new_point.insert(*index_in_point, evaluate(c, challenges));
}
let eq = build_eq_x_r_vec_sequential(&vars);
let eval = izip!(parts, &eq).fold(E::ZERO, |acc, (part, eq)| acc + part.eval * *eq);
PointAndEval {
point: new_point,
eval,
}
}
}
}
pub fn entry<'a, T>(&self, evals: &'a [T]) -> &'a T {
match self {
EvalExpression::Single(i) => &evals[*i],
_ => panic!("invalid operation"),
}
}
pub fn entry_mut<'a, T>(&self, evals: &'a mut [T]) -> &'a mut T {
match self {
EvalExpression::Single(i) => &mut evals[*i],
_ => panic!("invalid operation"),
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/chip.rs | gkr_iop/src/chip.rs | use crate::{circuit_builder::CircuitBuilder, gkr::layer::Layer};
use ff_ext::ExtensionField;
use itertools::Itertools;
use serde::{Deserialize, Serialize, de::DeserializeOwned};
pub mod builder;
pub mod protocol;
/// Chip stores all information required in the GKR protocol.
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct Chip<E: ExtensionField> {
/// The number of fixed inputs committed in the whole protocol.
pub n_fixed: usize,
/// The number of base inputs committed in the whole protocol.
pub n_committed: usize,
/// The number of challenges generated through the whole protocols
/// (except the ones inside sumcheck protocols).
pub n_challenges: usize,
/// All input evaluations generated at the end of layer protocols will be stored
/// in a vector and this is the length.
pub n_evaluations: usize,
/// The layers of the GKR circuit, in the order outputs-to-inputs.
pub layers: Vec<Layer<E>>,
/// The output of the circuit.
pub final_out_evals: Vec<usize>,
}
impl<E: ExtensionField> Chip<E> {
pub fn new_from_cb(cb: &CircuitBuilder<E>, n_challenges: usize) -> Chip<E> {
Self {
n_fixed: cb.cs.num_fixed,
n_committed: cb.cs.num_witin as usize,
n_challenges,
n_evaluations: cb.cs.w_expressions.len()
+ cb.cs.r_expressions.len()
+ cb.cs.lk_expressions.len()
+ cb.cs.w_table_expressions.len()
+ cb.cs.r_table_expressions.len()
+ cb.cs.lk_table_expressions.len() * 2
+ cb.cs.num_fixed
+ cb.cs.num_witin as usize
+ cb.cs.instance_openings.len(),
final_out_evals: (0..cb.cs.w_expressions.len()
+ cb.cs.r_expressions.len()
+ cb.cs.lk_expressions.len()
+ cb.cs.w_table_expressions.len()
+ cb.cs.r_table_expressions.len()
+ cb.cs.lk_table_expressions.len() * 2)
.collect_vec(),
layers: vec![],
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/chip/builder.rs | gkr_iop/src/chip/builder.rs | use ff_ext::ExtensionField;
use crate::gkr::layer::{Layer, LayerType};
use super::Chip;
impl<E: ExtensionField> Chip<E> {
/// Add a layer to the circuit. Note that we assume the fixed inputs only occur in the first layer.
pub fn add_layer(&mut self, layer: Layer<E>) {
assert_eq!(
layer
.out_sel_and_eval_exprs
.iter()
.map(|(_, outs)| outs.len())
.sum::<usize>(),
layer.exprs.len()
);
if let LayerType::Linear = layer.ty {
assert!(layer.exprs.iter().all(|expr| expr.degree() == 1));
}
self.layers.push(layer);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/chip/protocol.rs | gkr_iop/src/chip/protocol.rs | use ff_ext::ExtensionField;
use crate::gkr::GKRCircuit;
use super::Chip;
impl<E: ExtensionField> Chip<E> {
/// Extract information from Chip that required in the GKR phase.
pub fn gkr_circuit(&self) -> GKRCircuit<E> {
GKRCircuit {
layers: self.layers.clone(),
n_challenges: self.n_challenges,
n_evaluations: self.n_evaluations,
final_out_evals: self.final_out_evals.clone(),
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer.rs | gkr_iop/src/gkr/layer.rs | use either::Either;
use ff_ext::ExtensionField;
use itertools::{Itertools, chain, izip};
use linear_layer::{LayerClaims, LinearLayer};
use multilinear_extensions::{
Expression, Instance, StructuralWitIn, ToExpr,
mle::{Point, PointAndEval},
monomial::Term,
};
use p3::field::FieldAlgebra;
use rayon::iter::{IntoParallelIterator, IntoParallelRefIterator};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use std::{ops::Neg, sync::Arc, vec::IntoIter};
use sumcheck_layer::LayerProof;
use transcript::Transcript;
use zerocheck_layer::ZerocheckLayer;
use crate::{
OutEvalGroups,
circuit_builder::{CircuitBuilder, ConstraintSystem, RotationParams},
error::BackendError,
evaluation::EvalExpression,
hal::{MultilinearPolynomial, ProverBackend, ProverDevice},
selector::{SelectorContext, SelectorType},
};
pub mod cpu;
#[cfg(feature = "gpu")]
pub mod gpu;
pub mod hal;
pub mod linear_layer;
pub mod sumcheck_layer;
pub mod zerocheck_layer;
pub type ExprEvalType<E> = (SelectorType<E>, Vec<EvalExpression<E>>);
pub type RotateExprs<E> = (
Option<[Expression<E>; ROTATION_OPENING_COUNT]>,
Vec<(Expression<E>, Expression<E>)>,
);
// rotation contribute
// left + right + target, overall 3
pub const ROTATION_OPENING_COUNT: usize = 3;
#[derive(Clone, Debug, Serialize, Deserialize)]
pub enum LayerType {
Zerocheck,
Linear,
}
#[derive(Clone, Debug, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct Layer<E: ExtensionField> {
pub name: String,
pub ty: LayerType,
pub n_witin: usize,
pub n_structural_witin: usize,
pub n_fixed: usize,
pub n_instance: usize,
pub max_expr_degree: usize,
/// keep all structural witin which could be evaluated succinctly without PCS
pub structural_witins: Vec<StructuralWitIn>,
/// instance openings
pub instance_openings: Vec<Instance>,
/// num challenges dedicated to this layer.
pub n_challenges: usize,
/// Expressions to prove in this layer. For zerocheck and linear layers,
/// each expression corresponds to an output. While in sumcheck, there
/// is only 1 expression, which corresponds to the sum of all outputs.
/// This design is for the convenience when building the following
/// expression: `r^0 e_0 + r^1 * e_1 + ...
/// = \sum_x (r^0 eq_0(X) \cdot expr_0(x) + r^1 eq_1(X) \cdot expr_1(x) + ...)`.
/// where `vec![e_0, e_1, ...]` will be the output evaluation expressions.
/// TODO we should convert into monimial format Vec<Vec<Term<Expression<E>, Expression<E>>>
/// TODO once we make eq, zero_check rlc challenge alpha all encoded into static expression
pub exprs: Vec<Expression<E>>,
/// `exprs` in monomial form
pub exprs_with_selector_out_eval_monomial_form: Vec<Vec<Term<Expression<E>, Expression<E>>>>,
/// Positions to place the evaluations of the base inputs of this layer.
pub in_eval_expr: Vec<usize>,
/// The expressions of the evaluations from the succeeding layers, which are
/// connected to the outputs of this layer.
/// It formats indicated as different output group
/// first tuple value is optional eq
pub out_sel_and_eval_exprs: Vec<ExprEvalType<E>>,
// format: ([eq0, eq1, eq2], Vec<(rotatition_expr, expr)>) such that rotation_expr - expr == 0
// there got 3 different eq for (left, right, target) during rotation argument
// refer https://hackmd.io/HAAj1JTQQiKfu0SIwOJDRw?view#Rotation
pub rotation_exprs: RotateExprs<E>,
pub rotation_cyclic_group_log2: usize,
pub rotation_cyclic_subgroup_size: usize,
// For debugging purposes
pub expr_names: Vec<String>,
// static expression, only valid for zerocheck & sumcheck layer
// store in 2 forms: expression & monomial
pub main_sumcheck_expression_monomial_terms: Option<Vec<Term<Expression<E>, Expression<E>>>>,
pub main_sumcheck_expression: Option<Expression<E>>,
// rotation sumcheck expression, only optionally valid for zerocheck
// store in 2 forms: expression & monomial
pub rotation_sumcheck_expression_monomial_terms:
Option<Vec<Term<Expression<E>, Expression<E>>>>,
pub rotation_sumcheck_expression: Option<Expression<E>>,
}
#[derive(Clone, Debug)]
pub struct LayerWitness<'a, PB: ProverBackend>(pub Vec<Arc<PB::MultilinearPoly<'a>>>);
impl<'a, PB: ProverBackend> std::ops::Index<usize> for LayerWitness<'a, PB> {
type Output = Arc<PB::MultilinearPoly<'a>>;
fn index(&self, index: usize) -> &Self::Output {
&self.0[index]
}
}
impl<E: ExtensionField> Layer<E> {
#[allow(clippy::too_many_arguments)]
pub fn new(
name: String,
ty: LayerType,
n_witin: usize,
n_structural_witin: usize,
n_fixed: usize,
n_instance: usize,
// exprs concat zero/non-zero expression.
exprs: Vec<Expression<E>>,
n_challenges: usize,
in_eval_expr: Vec<usize>,
// first tuple value is eq
out_sel_and_eval_exprs: Vec<ExprEvalType<E>>,
((rotation_eq, rotation_exprs), rotation_cyclic_group_log2, rotation_cyclic_subgroup_size): (
RotateExprs<E>,
usize,
usize,
),
expr_names: Vec<String>,
structural_witins: Vec<StructuralWitIn>,
instance_openings: Vec<Instance>,
) -> Self {
assert_eq!(expr_names.len(), exprs.len(), "there are expr without name");
let max_expr_degree = exprs
.iter()
.map(|expr| expr.degree())
.max()
.expect("empty exprs");
match ty {
LayerType::Zerocheck => {
let mut layer = Self {
name,
ty,
n_witin,
n_structural_witin,
n_fixed,
n_instance,
max_expr_degree,
structural_witins,
instance_openings,
n_challenges,
exprs,
exprs_with_selector_out_eval_monomial_form: vec![],
in_eval_expr,
out_sel_and_eval_exprs,
rotation_exprs: (rotation_eq, rotation_exprs),
rotation_cyclic_group_log2,
rotation_cyclic_subgroup_size,
expr_names,
main_sumcheck_expression_monomial_terms: None,
main_sumcheck_expression: None,
rotation_sumcheck_expression_monomial_terms: None,
rotation_sumcheck_expression: None,
};
<Self as ZerocheckLayer<E>>::build_static_expression(&mut layer);
layer
}
LayerType::Linear => unimplemented!(""),
}
}
#[allow(clippy::too_many_arguments)]
pub fn prove<T: Transcript<E>, PB: ProverBackend<E = E>, PD: ProverDevice<PB>>(
&self,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<PB>,
claims: &mut [PointAndEval<E>],
pub_io_evals: &[E],
challenges: &mut Vec<E>,
transcript: &mut T,
selector_ctxs: &[SelectorContext],
) -> (LayerProof<E>, Point<E>) {
self.update_challenges(challenges, transcript);
let mut eval_and_dedup_points = self.extract_claim_and_point(claims, challenges);
let (sumcheck_layer_proof, point) = match self.ty {
LayerType::Zerocheck => {
let out_points = eval_and_dedup_points
.into_iter()
.map(|(_, point)| point.expect("point must exist"))
.collect_vec();
<Layer<PB::E> as ZerocheckLayer<E>>::prove::<PB, PD>(
self,
num_threads,
max_num_variables,
wit,
&out_points,
pub_io_evals,
challenges,
transcript,
selector_ctxs,
)
}
LayerType::Linear => {
assert_eq!(eval_and_dedup_points.len(), 1);
let (_, point) = eval_and_dedup_points.remove(0);
let point = point.clone().unwrap();
(
<Layer<E> as LinearLayer<E>>::prove::<PB, PD>(self, wit, &point, transcript),
point,
)
}
};
self.update_claims(claims, &sumcheck_layer_proof.main.evals, &point);
(sumcheck_layer_proof, point)
}
#[allow(clippy::too_many_arguments)]
pub fn verify<Trans: Transcript<E>>(
&self,
max_num_variables: usize,
proof: LayerProof<E>,
claims: &mut [PointAndEval<E>],
pub_io_evals: &[E],
raw_pi: &[Vec<E::BaseField>],
challenges: &mut Vec<E>,
transcript: &mut Trans,
selector_ctxs: &[SelectorContext],
) -> Result<Point<E>, BackendError> {
self.update_challenges(challenges, transcript);
let mut eval_and_dedup_points = self.extract_claim_and_point(claims, challenges);
let LayerClaims { in_point, evals } = match self.ty {
LayerType::Zerocheck => <Layer<_> as ZerocheckLayer<E>>::verify(
self,
max_num_variables,
proof,
eval_and_dedup_points,
pub_io_evals,
raw_pi,
challenges,
transcript,
selector_ctxs,
)?,
LayerType::Linear => {
assert_eq!(eval_and_dedup_points.len(), 1);
let (sigmas, point) = eval_and_dedup_points.remove(0);
<Layer<_> as LinearLayer<E>>::verify(
self,
proof,
&sigmas,
point.as_ref().unwrap(),
challenges,
transcript,
)?
}
};
self.update_claims(claims, &evals, &in_point);
Ok(in_point)
}
// extract claim and dudup point
fn extract_claim_and_point(
&self,
claims: &[PointAndEval<E>],
challenges: &[E],
) -> Vec<(Vec<E>, Option<Point<E>>)> {
self.out_sel_and_eval_exprs
.iter()
.map(|(_, out_evals)| {
let evals = out_evals
.iter()
.map(|out_eval| {
let PointAndEval { eval, .. } = out_eval.evaluate(claims, challenges);
eval
})
.collect_vec();
// within same group, all the point should be the same
// so we assume only take first point as representative
let point = out_evals.first().map(|out_eval| {
let PointAndEval { point, .. } = out_eval.evaluate(claims, challenges);
point
});
(evals, point)
})
.collect_vec()
}
// generate layer challenge by order, starting from index 2
// as challenge id 0, 1 are occupied
fn update_challenges(&self, challenges: &mut Vec<E>, transcript: &mut impl Transcript<E>) {
if challenges.len() <= self.n_challenges + 2 {
challenges.resize(self.n_challenges + 2, E::default());
};
challenges[2..].copy_from_slice(
&transcript.sample_and_append_challenge_pows(self.n_challenges, b"layer challenge"),
);
}
fn update_claims(&self, claims: &mut [PointAndEval<E>], evals: &[E], point: &Point<E>) {
for (value, pos) in izip!(chain![evals], chain![&self.in_eval_expr]) {
claims[*pos] = PointAndEval {
point: point.clone(),
eval: *value,
};
}
}
pub fn from_circuit_builder(
cb: &CircuitBuilder<E>,
layer_name: String,
n_challenges: usize,
out_evals: OutEvalGroups,
) -> Layer<E> {
let w_len = cb.cs.w_expressions.len() + cb.cs.w_table_expressions.len();
let r_len = cb.cs.r_expressions.len() + cb.cs.r_table_expressions.len();
let lk_len = cb.cs.lk_expressions.len() + cb.cs.lk_table_expressions.len() * 2; // logup lk table include p, q
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
let [r_record_evals, w_record_evals, lookup_evals, zero_evals] = out_evals;
assert_eq!(r_record_evals.len(), r_len);
assert_eq!(w_record_evals.len(), w_len);
assert_eq!(lookup_evals.len(), lk_len);
assert_eq!(zero_evals.len(), zero_len);
let non_zero_expr_len = cb.cs.w_expressions.len()
+ cb.cs.w_table_expressions.len()
+ cb.cs.r_expressions.len()
+ cb.cs.r_table_expressions.len()
+ cb.cs.lk_expressions.len()
+ cb.cs.lk_table_expressions.len() * 2;
let zero_expr_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
let mut expr_evals = Vec::with_capacity(4);
let mut expr_names = Vec::with_capacity(non_zero_expr_len + zero_expr_len);
let mut expressions = Vec::with_capacity(non_zero_expr_len + zero_expr_len);
if let Some(r_selector) = cb.cs.r_selector.as_ref() {
// process r_record
let evals = Self::dedup_last_selector_evals(r_selector, &mut expr_evals);
for (idx, ((ram_expr, name), ram_eval)) in (cb
.cs
.r_expressions
.iter()
.chain(cb.cs.r_table_expressions.iter().map(|t| &t.expr)))
.zip_eq(
cb.cs
.r_expressions_namespace_map
.iter()
.chain(&cb.cs.r_table_expressions_namespace_map),
)
.zip_eq(&r_record_evals)
.enumerate()
{
expressions.push(ram_expr - E::BaseField::ONE.expr());
evals.push(EvalExpression::<E>::Linear(
// evaluation = claim * one - one (padding)
*ram_eval,
E::BaseField::ONE.expr().into(),
E::BaseField::ONE.neg().expr().into(),
));
expr_names.push(format!("{}/{idx}", name));
}
}
if let Some(w_selector) = cb.cs.w_selector.as_ref() {
// process w_record
let evals = Self::dedup_last_selector_evals(w_selector, &mut expr_evals);
for (idx, ((ram_expr, name), ram_eval)) in (cb
.cs
.w_expressions
.iter()
.chain(cb.cs.w_table_expressions.iter().map(|t| &t.expr)))
.zip_eq(
cb.cs
.w_expressions_namespace_map
.iter()
.chain(&cb.cs.w_table_expressions_namespace_map),
)
.zip_eq(&w_record_evals)
.enumerate()
{
expressions.push(ram_expr - E::BaseField::ONE.expr());
evals.push(EvalExpression::<E>::Linear(
// evaluation = claim * one - one (padding)
*ram_eval,
E::BaseField::ONE.expr().into(),
E::BaseField::ONE.neg().expr().into(),
));
expr_names.push(format!("{}/{idx}", name));
}
}
if let Some(lk_selector) = cb.cs.lk_selector.as_ref() {
// process lookup records
let evals = Self::dedup_last_selector_evals(lk_selector, &mut expr_evals);
for (idx, ((lookup, name), lookup_eval)) in (cb
.cs
.lk_expressions
.iter()
.chain(cb.cs.lk_table_expressions.iter().map(|t| &t.multiplicity))
.chain(cb.cs.lk_table_expressions.iter().map(|t| &t.values)))
.zip_eq(if cb.cs.lk_table_expressions.is_empty() {
Either::Left(cb.cs.lk_expressions_namespace_map.iter())
} else {
// repeat expressions_namespace_map twice to deal with lk p, q
Either::Right(
cb.cs
.lk_expressions_namespace_map
.iter()
.chain(&cb.cs.lk_expressions_namespace_map),
)
})
.zip_eq(&lookup_evals)
.enumerate()
{
expressions.push(lookup - cb.cs.chip_record_alpha.clone());
evals.push(EvalExpression::<E>::Linear(
// evaluation = claim * one - alpha (padding)
*lookup_eval,
E::BaseField::ONE.expr().into(),
cb.cs.chip_record_alpha.clone().neg().into(),
));
expr_names.push(format!("{}/{idx}", name));
}
}
if let Some(zero_selector) = cb.cs.zero_selector.as_ref() {
// process zero_record
let evals = Self::dedup_last_selector_evals(zero_selector, &mut expr_evals);
for (idx, (zero_expr, name)) in izip!(
0..,
chain!(
cb.cs
.assert_zero_expressions
.iter()
.zip_eq(&cb.cs.assert_zero_expressions_namespace_map),
cb.cs
.assert_zero_sumcheck_expressions
.iter()
.zip_eq(&cb.cs.assert_zero_sumcheck_expressions_namespace_map)
)
) {
expressions.push(zero_expr.clone());
evals.push(EvalExpression::Zero);
expr_names.push(format!("{}/{idx}", name));
}
}
// Sort expressions, expr_names, and evals according to eval.0 and classify evals.
let ConstraintSystem {
rotation_params,
rotations,
..
} = &cb.cs;
let in_eval_expr = (non_zero_expr_len..)
.take(cb.cs.num_witin as usize + cb.cs.num_fixed + cb.cs.instance_openings.len())
.collect_vec();
if rotations.is_empty() {
Layer::new(
layer_name,
LayerType::Zerocheck,
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
cb.cs.num_fixed,
cb.cs.instance_openings.len(),
expressions,
n_challenges,
in_eval_expr,
expr_evals,
((None, vec![]), 0, 0),
expr_names,
cb.cs.structural_witins.clone(),
cb.cs.instance_openings.clone(),
)
} else {
let Some(RotationParams {
rotation_eqs,
rotation_cyclic_group_log2,
rotation_cyclic_subgroup_size,
}) = rotation_params
else {
panic!("rotation params not set");
};
Layer::new(
layer_name,
LayerType::Zerocheck,
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
cb.cs.num_fixed,
cb.cs.instance_openings.len(),
expressions,
n_challenges,
in_eval_expr,
expr_evals,
(
(rotation_eqs.clone(), rotations.clone()),
*rotation_cyclic_group_log2,
*rotation_cyclic_subgroup_size,
),
expr_names,
cb.cs.structural_witins.clone(),
cb.cs.instance_openings.clone(),
)
}
}
// return previous evals for extend, if new selector match with last selector
// otherwise push new evals and return it for mutability
fn dedup_last_selector_evals<'a>(
new_selector: &SelectorType<E>,
expr_evals: &'a mut Vec<(SelectorType<E>, Vec<EvalExpression<E>>)>,
) -> &'a mut Vec<EvalExpression<E>>
where
SelectorType<E>: Clone + PartialEq,
{
let need_push = match expr_evals.last() {
Some((last_sel, _)) => last_sel != new_selector,
None => true,
};
if need_push {
expr_evals.push((new_selector.clone(), vec![]));
}
&mut expr_evals.last_mut().unwrap().1
}
}
impl<'a, PB: ProverBackend> LayerWitness<'a, PB> {
pub fn new(
wits: Vec<Arc<PB::MultilinearPoly<'a>>>,
fixed: Vec<Arc<PB::MultilinearPoly<'a>>>,
) -> Self {
let mut wits_and_fixed = wits;
wits_and_fixed.extend(fixed);
assert!(!wits_and_fixed.is_empty());
assert!(wits_and_fixed.iter().map(|b| b.num_vars()).all_equal());
Self(wits_and_fixed)
}
pub fn num_vars(&self) -> usize {
if self.0.is_empty() {
0
} else {
self[0].num_vars()
}
}
}
impl<'a, PB: ProverBackend> IntoIterator for LayerWitness<'a, PB> {
type Item = Arc<PB::MultilinearPoly<'a>>;
type IntoIter = IntoIter<Arc<PB::MultilinearPoly<'a>>>;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
impl<'a, PB: ProverBackend> LayerWitness<'a, PB> {
pub fn iter(&self) -> impl Iterator<Item = &Arc<PB::MultilinearPoly<'a>>> {
self.0.iter()
}
}
impl<'a, PB: ProverBackend> IntoParallelIterator for LayerWitness<'a, PB> {
type Iter = rayon::vec::IntoIter<Arc<PB::MultilinearPoly<'a>>>;
type Item = Arc<PB::MultilinearPoly<'a>>;
fn into_par_iter(self) -> Self::Iter {
self.0.into_par_iter()
}
}
impl<'a, PB: ProverBackend> IntoParallelIterator for &'a LayerWitness<'a, PB> {
type Iter = rayon::slice::Iter<'a, Arc<PB::MultilinearPoly<'a>>>;
type Item = &'a Arc<PB::MultilinearPoly<'a>>;
fn into_par_iter(self) -> Self::Iter {
self.0.par_iter()
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/booleanhypercube.rs | gkr_iop/src/gkr/booleanhypercube.rs | use ff_ext::ExtensionField;
use itertools::Itertools;
use multilinear_extensions::mle::Point;
const BH_MAX_NUM_VAR: usize = 5;
pub struct BooleanHypercube {
num_vars: usize,
}
// 2^5-1 cyclic group
pub const CYCLIC_POW2_5: [u64; 32] = [
0b00001, // 0 = decimal 1
0b00010, // 1 = decimal 2
0b00100, // 2 = decimal 4
0b01000, // 3 = decimal 8
0b10000, // 4 = decimal 16
0b00101, // 5 = decimal 5
0b01010, // 6 = decimal 10
0b10100, // 7 = decimal 20
0b01101, // 8 = decimal 13
0b11010, // 9 = decimal 26
0b10001, // 10 = decimal 17
0b00111, // 11 = decimal 7
0b01110, // 12 = decimal 14
0b11100, // 13 = decimal 28
0b11101, // 14 = decimal 29
0b11111, // 15 = decimal 31
0b11011, // 16 = decimal 27
0b10011, // 17 = decimal 19
0b00011, // 18 = decimal 3
0b00110, // 19 = decimal 6
0b01100, // 20 = decimal 12
0b11000, // 21 = decimal 24
0b10101, // 22 = decimal 21
0b01111, // 23 = decimal 15
0b11110, // 24 = decimal 30
0b11001, // 25 = decimal 25
0b10111, // 26 = decimal 23
0b01011, // 27 = decimal 11
0b10110, // 28 = decimal 22
0b01001, // 29 = decimal 9
0b10010, // 30 = decimal 18
0b00001, // 31 = decimal 1
];
#[allow(dead_code)]
const CYCLIC_POW2_5_MODULUS: u8 = 0b100101; // X^5 + X^2 + 1
impl BooleanHypercube {
// giving num_vars, cyclic group size is 2^num_vars - 1, as excluding 0
pub fn new(num_vars: usize) -> Self {
assert!(num_vars <= BH_MAX_NUM_VAR);
Self { num_vars }
}
pub fn get_rotation_points<E: ExtensionField>(&self, point: &Point<E>) -> (Point<E>, Point<E>) {
match self.num_vars {
5 => (
// derive from CYCLIC_POW2_5_MODULUS
// left: (0, r0, r1, r2, r3, r5, r6, ....)
std::iter::once(E::ZERO)
.chain(point[..4].iter().copied())
.chain(point[5..].iter().copied())
.take(point.len())
.collect_vec(),
// right: (1, r0, 1-r1, r2, r3, r5, r6, ....)
std::iter::once(E::ONE)
.chain(std::iter::once(point[0]))
.chain(std::iter::once(E::ONE - point[1]))
.chain(point[2..4].iter().copied())
.chain(point[5..].iter().copied())
.take(point.len())
.collect_vec(),
),
num_vars => unimplemented!("not support {num_vars}"),
}
}
pub fn get_rotation_right_eval_from_left<E: ExtensionField>(
&self,
rotated_eval: E,
left_eval: E,
point: &Point<E>,
) -> E {
match self.num_vars {
// rotated_eval = (1-r4) * left_eval + r4 * right_eval
// right_eval = (rotated_eval - (1-r4) * left_eval) / r4
5 => (rotated_eval - (E::ONE - point[4]) * left_eval) / point[4],
num_vars => unimplemented!("not support {num_vars}"),
}
}
}
impl IntoIterator for &BooleanHypercube {
type Item = u64;
type IntoIter = std::array::IntoIter<u64, 32>;
fn into_iter(self) -> Self::IntoIter {
match self.num_vars {
5 => CYCLIC_POW2_5.into_iter(),
_ => unimplemented!(),
}
}
}
#[cfg(test)]
mod tests {
use crate::gkr::booleanhypercube::CYCLIC_POW2_5_MODULUS;
#[test]
fn test_generate_f_31_cyclic_group_element() {
let _x = 0b00010; // generator x = X
let mut powers = Vec::with_capacity(31);
powers.push(1); // x^0 = 1
let mut current = 1u8;
for _ in 1..32 {
current <<= 1; // multiply by x (shift left)
if current & 0b100000 != 0 {
// degree 5 overflow
current ^= CYCLIC_POW2_5_MODULUS; // reduce modulo polynomial
}
powers.push(current);
}
for &_val in powers.iter() {
// println!("0b{:05b}, // {} = decimal {} ", val, i, val);
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer_constraint_system.rs | gkr_iop/src/gkr/layer_constraint_system.rs | /// TODO: LayerConstrainSystem is deprecated
use std::{cmp::Ordering, collections::BTreeMap};
use crate::{
evaluation::EvalExpression,
gkr::layer::{Layer, LayerType, ROTATION_OPENING_COUNT},
selector::SelectorType,
tables::LookupTable,
};
use ff_ext::ExtensionField;
use itertools::{Itertools, chain, izip};
use multilinear_extensions::{Expression, Fixed, ToExpr, WitnessId, rlc_chip_record};
use p3::field::FieldAlgebra;
#[derive(Clone, Debug, Default)]
pub struct RotationParams<E: ExtensionField> {
pub rotation_eqs: Option<[Expression<E>; ROTATION_OPENING_COUNT]>,
pub rotation_cyclic_group_log2: usize,
pub rotation_cyclic_subgroup_size: usize,
}
#[allow(clippy::type_complexity)]
pub struct LayerConstraintSystem<E: ExtensionField> {
num_witin: usize,
num_structural_witin: usize,
#[allow(unused)]
num_fixed: usize,
eq_zero: Option<Expression<E>>,
// expressions include zero & non-zero expression, differentiate via evals
// zero expr represented as Linear with all 0 value
// TODO we should define an Zero enum for it
pub expressions: Vec<Expression<E>>,
pub expr_names: Vec<String>,
pub evals: Vec<(SelectorType<E>, EvalExpression<E>)>,
pub rotations: Vec<(Expression<E>, Expression<E>)>,
pub rotation_params: Option<RotationParams<E>>,
pub and_lookups: Vec<Expression<E>>,
pub xor_lookups: Vec<Expression<E>>,
pub range_lookups: Vec<Expression<E>>,
pub ram_read: Vec<Expression<E>>,
pub ram_write: Vec<Expression<E>>,
// global challenge
pub alpha: Expression<E>,
pub beta: Expression<E>,
}
impl<E: ExtensionField> LayerConstraintSystem<E> {
pub fn new(
num_witin: usize,
num_structural_witin: usize,
num_fixed: usize,
eq_zero: Option<Expression<E>>,
alpha: Expression<E>,
beta: Expression<E>,
) -> Self {
LayerConstraintSystem {
num_witin,
num_structural_witin,
num_fixed,
eq_zero,
rotations: vec![],
rotation_params: None,
expressions: vec![],
expr_names: vec![],
evals: vec![],
and_lookups: vec![],
xor_lookups: vec![],
range_lookups: vec![],
ram_read: vec![],
ram_write: vec![],
alpha,
beta,
}
}
pub fn add_zero_constraint(&mut self, expr: Expression<E>, name: String) {
assert!(self.eq_zero.is_some());
self.expressions.push(expr);
self.evals.push((
SelectorType::Whole(self.eq_zero.clone().unwrap()),
EvalExpression::Zero,
));
self.expr_names.push(name);
}
pub fn add_non_zero_constraint(
&mut self,
expr: Expression<E>,
eval: (SelectorType<E>, EvalExpression<E>),
name: String,
) {
self.expressions.push(expr);
self.evals.push(eval);
self.expr_names.push(name);
}
pub fn lookup_and8(&mut self, a: Expression<E>, b: Expression<E>, c: Expression<E>) {
let rlc_record = rlc_chip_record(
vec![
E::BaseField::from_canonical_u64(LookupTable::And as u64).expr(),
a,
b,
c,
],
self.alpha.clone(),
self.beta.clone(),
);
self.and_lookups.push(rlc_record);
}
pub fn lookup_xor8(&mut self, a: Expression<E>, b: Expression<E>, c: Expression<E>) {
let rlc_record = rlc_chip_record(
vec![
E::BaseField::from_canonical_u64(LookupTable::Xor as u64).expr(),
a,
b,
c,
],
self.alpha.clone(),
self.beta.clone(),
);
self.xor_lookups.push(rlc_record);
}
/// Generates U16 lookups to prove that `value` fits on `size < 16` bits.
/// In general it can be done by two U16 checks: one for `value` and one for
/// `value << (16 - size)`.
pub fn lookup_range(&mut self, value: Expression<E>, size: usize) {
assert!(size <= 16);
let rlc_record = rlc_chip_record(
vec![
// TODO: layer constrain system is deprecated
E::BaseField::from_canonical_u64(LookupTable::Dynamic as u64).expr(),
value.clone(),
],
self.alpha.clone(),
self.beta.clone(),
);
self.range_lookups.push(rlc_record);
if size < 16 {
let rlc_record = rlc_chip_record(
vec![
E::BaseField::from_canonical_u64(LookupTable::Dynamic as u64).expr(),
value * E::BaseField::from_canonical_u64(1 << (16 - size)).expr(),
],
self.alpha.clone(),
self.beta.clone(),
);
self.range_lookups.push(rlc_record)
}
}
pub fn constrain_eq(&mut self, lhs: Expression<E>, rhs: Expression<E>, name: String) {
self.add_zero_constraint(lhs - rhs, name);
}
// Constrains that lhs and rhs encode the same value of SIZE bits
// WARNING: Assumes that forall i, (lhs[i].1 < (2 ^ lhs[i].0))
// This needs to be constrained separately
pub fn constrain_reps_eq<const SIZE: usize>(
&mut self,
lhs: &[(usize, Expression<E>)],
rhs: &[(usize, Expression<E>)],
name: String,
) {
self.add_zero_constraint(
expansion_expr::<E, SIZE>(lhs) - expansion_expr::<E, SIZE>(rhs),
name,
);
}
/// Checks that `rot8` is equal to `input8` left-rotated by `delta`.
/// `rot8` and `input8` each consist of 8 chunks of 8-bits.
///
/// `split_rep` is a chunk representation of the input which
/// allows to reduce the required rotation to an array rotation. It may use
/// non-uniform chunks.
///
/// For example, when `delta = 2`, the 64 bits are split into chunks of
/// sizes `[16a, 14b, 2c, 16d, 14e, 2f]` (here the first chunks contains the
/// least significant bits so a left rotation will become a right rotation
/// of the array). To perform the required rotation, we can
/// simply rotate the array: [2f, 16a, 14b, 2c, 16d, 14e].
///
/// In the first step, we check that `rot8` and `split_rep` represent the
/// same 64 bits. In the second step we check that `rot8` and the appropiate
/// array rotation of `split_rep` represent the same 64 bits.
///
/// This type of representation-equality check is done by packing chunks
/// into sizes of exactly 32 (so for `delta = 2` we compare [16a, 14b,
/// 2c] to the first 4 elements of `rot8`). In addition, we do range
/// checks on `split_rep` which check that the felts meet the required
/// sizes.
///
/// This algorithm imposes the following general requirements for
/// `split_rep`:
/// - There exists a suffix of `split_rep` which sums to exactly `delta`.
/// This suffix can contain several elements.
/// - Chunk sizes are at most 16 (so they can be range-checked) or they are
/// exactly equal to 32.
/// - There exists a prefix of chunks which sums exactly to 32. This must
/// hold for the rotated array as well.
/// - The number of chunks should be as small as possible.
///
/// Consult the method `rotation_split` to see how splits are computed for a
/// given `delta
///
/// Note that the function imposes range checks on chunk values, but it
/// makes two exceptions:
/// 1. It doesn't check the 8-bit reps (input and output). This is
/// because all 8-bit reps in the global circuit are implicitly
/// range-checked because they are lookup arguments.
/// 2. It doesn't range-check 32-bit chunks. This is because a 32-bit
/// chunk value is checked to be equal to the composition of 4 8-bit
/// chunks. As mentioned in 1., these can be trusted to be range
/// checked, so the resulting 32-bit is correct by construction as
/// well.
pub fn constrain_left_rotation64(
&mut self,
input8: &[Expression<E>],
split_rep: &[(usize, Expression<E>)],
rot8: &[Expression<E>],
delta: usize,
label: String,
) {
assert_eq!(input8.len(), 8);
assert_eq!(rot8.len(), 8);
// Assert that the given split witnesses are correct for this delta
let (sizes, chunks_rotation) = rotation_split(delta);
assert_eq!(sizes, split_rep.iter().map(|e| e.0).collect_vec());
// Lookup ranges
for (size, elem) in split_rep {
if *size != 32 {
self.lookup_range(elem.expr(), *size);
}
}
// constrain the fact that rep8 and repX.rotate_left(chunks_rotation) are
// the same 64 bitstring
let mut helper = |rep8: &[Expression<E>],
rep_x: &[(usize, Expression<E>)],
chunks_rotation: usize| {
// Do the same thing for the two 32-bit halves
let mut rep_x = rep_x.to_owned();
rep_x.rotate_right(chunks_rotation);
for i in 0..2 {
// The respective 4 elements in the byte representation
let lhs = rep8[4 * i..4 * (i + 1)]
.iter()
.map(|wit| (8, wit.expr()))
.collect_vec();
let cnt = rep_x.len() / 2;
let rhs = &rep_x[cnt * i..cnt * (i + 1)];
assert_eq!(rhs.iter().map(|e| e.0).sum::<usize>(), 32);
self.constrain_reps_eq::<32>(
&lhs,
rhs,
format!(
"rotation internal {label}, round {i}, rot: {chunks_rotation}, delta: {delta}, {:?}",
sizes
),
);
}
};
helper(input8, split_rep, 0);
helper(rot8, split_rep, chunks_rotation);
}
pub fn set_rotation_params(&mut self, params: RotationParams<E>) {
assert!(self.rotation_params.is_none());
self.rotation_params = Some(params);
}
pub fn rotate_and_assert_eq(&mut self, a: Expression<E>, b: Expression<E>) {
self.rotations.push((a, b));
}
pub fn into_layer_with_lookup_eval_iter(
mut self,
layer_name: String,
in_expr_evals: Vec<usize>,
n_challenges: usize,
ram_write_evals: impl ExactSizeIterator<Item = (SelectorType<E>, usize)>,
ram_read_evals: impl ExactSizeIterator<Item = (SelectorType<E>, usize)>,
lookup_evals: impl ExactSizeIterator<Item = (SelectorType<E>, usize)>,
) -> Layer<E> {
// process ram read/write record
assert_eq!(ram_write_evals.len(), self.ram_write.len(),);
assert_eq!(ram_read_evals.len(), self.ram_read.len(),);
for (idx, ram_expr, ram_eval) in izip!(
0..,
chain!(self.ram_write.clone(), self.ram_read.clone(),),
ram_write_evals.chain(ram_read_evals)
) {
self.add_non_zero_constraint(
ram_expr - E::BaseField::ONE.expr(), // ONE is for padding
(ram_eval.0, EvalExpression::Single(ram_eval.1)),
format!("round 0th: {idx}th ram read/write operation"),
);
}
// process lookup records
assert_eq!(
lookup_evals.len(),
self.and_lookups.len() + self.xor_lookups.len() + self.range_lookups.len()
);
for (idx, lookup, lookup_eval) in izip!(
0..,
chain!(
self.and_lookups.clone(),
self.xor_lookups.clone(),
self.range_lookups.clone()
),
lookup_evals
) {
self.add_non_zero_constraint(
lookup,
(lookup_eval.0, EvalExpression::Single(lookup_eval.1)),
format!("round 0th: {idx}th lookup felt"),
);
}
self.into_layer(layer_name, in_expr_evals, n_challenges)
}
/// n_challenges: num of challenges dedicated to this layer
pub fn into_layer(
self,
layer_name: String,
in_eval_expr: Vec<usize>,
n_challenges: usize,
) -> Layer<E> {
let witin_offset = 0 as WitnessId;
let structural_witin_offset = witin_offset + (self.num_witin as WitnessId);
let fixed_offset = structural_witin_offset + (self.num_structural_witin as WitnessId);
// Sort expressions, expr_names, and evals according to eval.0 and classify evals.
let Self {
expr_names,
mut expressions,
evals,
rotation_params,
rotations,
..
} = self;
let mut is_layer_linear =
expressions
.iter_mut()
.fold(rotations.is_empty(), |is_linear_so_far, t| {
// replace `Fixed` and `StructuralWitIn` with `WitIn`, keep other unchanged
*t = t.transform_all(
&|Fixed(fixed_id)| {
Expression::WitIn(fixed_offset + (*fixed_id as WitnessId))
},
&|id| Expression::WitIn(id),
&|structural_wit_id, _| {
Expression::WitIn(structural_witin_offset + structural_wit_id)
},
&|i| Expression::InstanceScalar(i),
&|i| Expression::Instance(i),
&|c| Expression::Constant(c),
&|cid, pow, s, o| Expression::Challenge(cid, pow, s, o),
);
is_linear_so_far && t.is_linear()
});
// process evaluation group by eq expression
let mut eq_map = BTreeMap::new();
izip!(
evals.into_iter(),
expr_names.into_iter(),
expressions.into_iter()
)
.for_each(|((sel_type, eval), name, expr)| {
let (eval_group, names, exprs) =
eq_map.entry(sel_type).or_insert((vec![], vec![], vec![]));
eval_group.push(eval);
names.push(name);
exprs.push(expr);
});
let mut expr_evals = vec![];
let mut expr_names = vec![];
let mut expressions = vec![];
eq_map
.into_iter()
.for_each(|(sel_type, (evals, names, exprs))| {
expr_evals.push((sel_type, evals));
expr_names.extend(names);
expressions.extend(exprs);
});
is_layer_linear = is_layer_linear && expr_evals.len() == 1;
let layer_type = if is_layer_linear {
LayerType::Linear
} else {
LayerType::Zerocheck
};
if rotations.is_empty() {
Layer::new(
layer_name,
layer_type,
self.num_witin,
0,
self.num_fixed,
0,
expressions,
n_challenges,
in_eval_expr,
expr_evals,
((None, vec![]), 0, 0),
expr_names,
vec![],
vec![],
)
} else {
let Some(RotationParams {
rotation_eqs,
rotation_cyclic_group_log2,
rotation_cyclic_subgroup_size,
}) = rotation_params
else {
panic!("rotation params not set");
};
Layer::new(
layer_name,
layer_type,
self.num_witin,
0,
self.num_fixed,
0,
expressions,
n_challenges,
in_eval_expr,
expr_evals,
(
(rotation_eqs, rotations),
rotation_cyclic_group_log2,
rotation_cyclic_subgroup_size,
),
expr_names,
vec![],
vec![],
)
}
}
}
pub fn expansion_expr<E: ExtensionField, const SIZE: usize>(
expansion: &[(usize, Expression<E>)],
) -> Expression<E> {
let (total, ret) =
expansion
.iter()
.rev()
.fold((0, E::BaseField::ZERO.expr()), |acc, (sz, felt)| {
(
acc.0 + sz,
acc.1 * E::BaseField::from_canonical_u64(1 << sz).expr() + felt.expr(),
)
});
assert_eq!(total, SIZE);
ret
}
/// Compute an adequate split of 64-bits into chunks for performing a rotation
/// by `delta`. The first element of the return value is the vec of chunk sizes.
/// The second one is the length of its suffix that needs to be rotated
pub fn rotation_split(delta: usize) -> (Vec<usize>, usize) {
let delta = delta % 64;
if delta == 0 {
return (vec![32, 32], 0);
}
// This split meets all requirements except for <= 16 sizes
let split32 = match delta.cmp(&32) {
Ordering::Less => vec![32 - delta, delta, 32 - delta, delta],
Ordering::Equal => vec![32, 32],
Ordering::Greater => vec![32 - (delta - 32), delta - 32, 32 - (delta - 32), delta - 32],
};
// Split off large chunks
let split16 = split32
.into_iter()
.flat_map(|size| {
assert!(size < 32);
if size <= 16 {
vec![size]
} else {
vec![16, size - 16]
}
})
.collect_vec();
let mut sum = 0;
for (i, size) in split16.iter().rev().enumerate() {
sum += size;
if sum == delta {
return (split16, i + 1);
}
}
panic!();
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/mock.rs | gkr_iop/src/gkr/mock.rs | use std::{iter, marker::PhantomData};
use ff_ext::ExtensionField;
use itertools::{Itertools, izip};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression, WitnessId,
mle::{ArcMultilinearExtension, FieldType, MultilinearExtension},
smart_slice::SmartSlice,
util::ceil_log2,
wit_infer_by_expr,
};
use rand::thread_rng;
use thiserror::Error;
use crate::{cpu::CpuBackend, evaluation::EvalExpression, selector::SelectorType};
use super::{GKRCircuit, GKRCircuitWitness, layer::LayerType};
pub struct MockProver<E: ExtensionField>(PhantomData<E>);
#[derive(Clone, Debug, Error)]
pub enum MockProverError<'a, E: ExtensionField> {
#[error("sumcheck layer should have only one expression, got {0}")]
SumcheckExprLenError(usize),
#[error("sumcheck expression not match, out: {0:?}, expr: {1:?}, expect: {2:?}. got: {3:?}")]
SumcheckExpressionNotMatch(
Box<Vec<EvalExpression<E>>>,
Box<Expression<E>>,
Box<FieldType<'a, E>>,
Box<FieldType<'a, E>>,
),
#[error("zerocheck expression not match, out: {0:?}, expr: {1:?}, expr_name: {2:?}")]
ZerocheckExpressionNotMatch(Box<EvalExpression<E>>, Box<Expression<E>>, String),
#[error("zerocheck expression not match, type: {0:?}, expect: {1:?}. got: {2:?}")]
ZerocheckSelectorError(SelectorType<E>, E, E),
#[error("linear expression not match, out: {0:?}, expr: {1:?}")]
LinearExpressionNotMatch(Box<EvalExpression<E>>, Box<Expression<E>>),
}
impl<E: ExtensionField> MockProver<E> {
pub fn check<'a, 'b, PCS: PolynomialCommitmentScheme<E>>(
circuit: &'a GKRCircuit<E>,
circuit_wit: &'a GKRCircuitWitness<'b, CpuBackend<E, PCS>>,
mut evaluations: Vec<ArcMultilinearExtension<'b, E>>,
mut challenges: Vec<E>,
) -> Result<(), MockProverError<'a, E>>
where
'b: 'a,
{
// TODO: check the rotation argument.
let mut rng = thread_rng();
evaluations.resize_with(circuit.n_evaluations, Default::default);
challenges.resize_with(2 + circuit.n_challenges, || E::random(&mut rng));
// check the input layer
for (layer, layer_wit) in izip!(&circuit.layers, &circuit_wit.layers) {
let num_vars = layer_wit.num_vars();
let mut wits = layer_wit
.iter()
.map(|mle| mle.as_view().into())
.collect::<Vec<_>>();
let structural_wits = wits.split_off(layer.n_witin);
let gots = layer
.exprs
.iter()
.zip_eq(
layer
.out_sel_and_eval_exprs
.iter()
.flat_map(|(sel_type, out)| izip!(iter::repeat(sel_type), out)),
)
.map(|(expr, (sel, _))| {
wit_infer_by_expr(
&(sel.selector_expr() * expr),
layer.n_witin as WitnessId,
layer.n_fixed as WitnessId,
layer.n_instance,
&[],
&wits,
&structural_wits,
&[],
&[],
&challenges,
)
})
.collect_vec();
let expects = layer
.out_sel_and_eval_exprs
.iter()
.flat_map(|(_, out)| {
out.iter().map(|out| {
out.mock_evaluate(
layer.n_witin as WitnessId,
layer.n_fixed as WitnessId,
layer.n_instance,
&evaluations,
&challenges,
num_vars,
)
})
})
.collect::<Result<Vec<_>, _>>()?;
match layer.ty {
LayerType::Zerocheck => {
for (got, expect, expr, expr_name, (_, out_eval)) in izip!(
gots,
expects,
&layer.exprs,
&layer.expr_names,
layer
.out_sel_and_eval_exprs
.iter()
.flat_map(|(sel_type, out)| izip!(iter::repeat(sel_type), out))
) {
if expect != got {
return Err(MockProverError::ZerocheckExpressionNotMatch(
Box::new(out_eval.clone()),
Box::new(expr.clone()),
expr_name.to_string(),
));
}
}
}
LayerType::Linear => {
for (got, expect, expr, out) in
izip!(gots, expects, &layer.exprs, &layer.out_sel_and_eval_exprs)
{
if expect != got {
return Err(MockProverError::LinearExpressionNotMatch(
Box::new(out.1[0].clone()),
Box::new(expr.clone()),
));
}
}
}
}
for (in_pos, wit) in izip!(layer.in_eval_expr.iter(), layer_wit.iter()) {
evaluations[*in_pos] = wit.clone();
}
}
Ok(())
}
}
impl<E: ExtensionField> EvalExpression<E> {
pub fn mock_evaluate<'a>(
&self,
n_witin: WitnessId,
n_fixed: WitnessId,
n_instance: usize,
evals: &[ArcMultilinearExtension<'a, E>],
challenges: &[E],
num_vars: usize,
) -> Result<ArcMultilinearExtension<'a, E>, MockProverError<'a, E>> {
let output = match self {
EvalExpression::Zero => {
MultilinearExtension::from_field_type(num_vars, FieldType::zero(num_vars)).into()
}
EvalExpression::Single(i) => evals[*i].clone(),
EvalExpression::Linear(i, c0, c1) => wit_infer_by_expr(
&(Expression::WitIn(*i as WitnessId) * *c0.clone() + *c1.clone()),
n_witin,
n_fixed,
n_instance,
&[],
evals,
&[],
&[],
&[],
challenges,
),
EvalExpression::Partition(parts, indices) => {
assert_eq!(parts.len(), 1 << indices.len());
let parts = parts
.iter()
.map(|part| {
part.mock_evaluate(
n_witin, n_fixed, n_instance, evals, challenges, num_vars,
)
})
.collect::<Result<Vec<_>, _>>()?;
indices
.iter()
.fold(parts, |acc, (i, _c)| {
let step_size = 1 << i;
acc.chunks_exact(2)
.map(|chunk| {
MultilinearExtension::from_field_type(
ceil_log2(chunk[0].evaluations().len()),
match (&chunk[0].evaluations(), &chunk[1].evaluations()) {
(FieldType::Base(v0), FieldType::Base(v1)) => {
let res = (0..v0.len())
.step_by(step_size)
.flat_map(|j| {
v0[j..j + step_size]
.iter()
.chain(v1[j..j + step_size].iter())
.cloned()
})
.collect_vec();
FieldType::Base(SmartSlice::Owned(res))
}
(FieldType::Ext(v0), FieldType::Ext(v1)) => {
let res = (0..v0.len())
.step_by(step_size)
.flat_map(|j| {
v0[j..j + step_size]
.iter()
.chain(v1[j..j + step_size].iter())
.cloned()
})
.collect_vec();
FieldType::Ext(SmartSlice::Owned(res))
}
_ => unreachable!(),
},
)
.into()
})
.collect_vec()
})
.pop()
.unwrap()
}
};
Ok(output)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer/sumcheck_layer.rs | gkr_iop/src/gkr/layer/sumcheck_layer.rs | use std::marker::PhantomData;
use ff_ext::ExtensionField;
use itertools::Itertools;
use multilinear_extensions::{utils::eval_by_expr_with_instance, virtual_poly::VPAuxInfo};
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use sumcheck::structs::{IOPProof, IOPVerifierState, SumCheckSubClaim, VerifierError};
use transcript::Transcript;
use crate::{
error::BackendError,
gkr::layer::hal::SumcheckLayerProver,
hal::{ProverBackend, ProverDevice},
};
use super::{Layer, LayerWitness, linear_layer::LayerClaims};
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct LayerProof<E: ExtensionField> {
pub rotation: Option<SumcheckLayerProof<E>>,
pub main: SumcheckLayerProof<E>,
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct SumcheckLayerProof<E: ExtensionField> {
pub proof: IOPProof<E>,
pub evals: Vec<E>,
}
pub trait SumcheckLayer<E: ExtensionField> {
#[allow(clippy::too_many_arguments)]
fn prove<PB: ProverBackend<E = E>, PD: ProverDevice<PB>>(
&self,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<PB>,
challenges: &[PB::E],
transcript: &mut impl Transcript<PB::E>,
) -> LayerProof<PB::E>;
fn verify(
&self,
max_num_variables: usize,
proof: LayerProof<E>,
sigma: &E,
challenges: &[E],
transcript: &mut impl Transcript<E>,
) -> Result<LayerClaims<E>, BackendError>;
}
impl<E: ExtensionField> SumcheckLayer<E> for Layer<E> {
fn prove<PB: ProverBackend<E = E>, PD: ProverDevice<PB>>(
&self,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<PB>,
challenges: &[PB::E],
transcript: &mut impl Transcript<PB::E>,
) -> LayerProof<PB::E> {
<PD as SumcheckLayerProver<PB>>::prove(
self,
num_threads,
max_num_variables,
wit,
challenges,
transcript,
)
}
fn verify(
&self,
max_num_variables: usize,
proof: LayerProof<E>,
sigma: &E,
challenges: &[E],
transcript: &mut impl Transcript<E>,
) -> Result<LayerClaims<E>, BackendError> {
let LayerProof {
main:
SumcheckLayerProof {
proof: IOPProof { proofs, .. },
evals,
},
..
} = proof;
let SumCheckSubClaim {
point: in_point,
expected_evaluation,
} = IOPVerifierState::verify(
*sigma,
&IOPProof { proofs },
&VPAuxInfo {
max_degree: self.exprs[0].degree(),
max_num_variables,
phantom: PhantomData,
},
transcript,
);
// Check the final evaluations.
let got_claim =
eval_by_expr_with_instance(&[], &evals, &[], &[], challenges, &self.exprs[0])
.right()
.unwrap();
if got_claim != expected_evaluation {
return Err(BackendError::LayerVerificationFailed(
"sumcheck verify failed".to_string().into(),
VerifierError::ClaimNotMatch(
format!("{}", expected_evaluation).into(),
format!("{}", got_claim).into(),
),
));
}
Ok(LayerClaims {
in_point: in_point.into_iter().map(|c| c.elements).collect_vec(),
evals,
})
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer/linear_layer.rs | gkr_iop/src/gkr/layer/linear_layer.rs | use ff_ext::ExtensionField;
use itertools::Itertools;
use multilinear_extensions::{mle::Point, utils::eval_by_expr_with_instance};
use sumcheck::structs::VerifierError;
use transcript::Transcript;
use crate::{
error::BackendError,
gkr::layer::{hal::LinearLayerProver, sumcheck_layer::SumcheckLayerProof},
hal::{ProverBackend, ProverDevice},
};
use super::{Layer, LayerWitness, sumcheck_layer::LayerProof};
pub struct LayerClaims<E: ExtensionField> {
pub in_point: Point<E>,
pub evals: Vec<E>,
}
pub trait LinearLayer<E: ExtensionField> {
fn prove<PB: ProverBackend<E = E>, PD: ProverDevice<PB>>(
&self,
wit: LayerWitness<PB>,
out_point: &Point<PB::E>,
transcript: &mut impl Transcript<PB::E>,
) -> LayerProof<PB::E>;
fn verify(
&self,
proof: LayerProof<E>,
sigmas: &[E],
out_point: &Point<E>,
challenges: &[E],
transcript: &mut impl Transcript<E>,
) -> Result<LayerClaims<E>, BackendError>;
}
impl<E: ExtensionField> LinearLayer<E> for Layer<E> {
fn prove<PB: ProverBackend<E = E>, PD: ProverDevice<PB>>(
&self,
wit: LayerWitness<PB>,
out_point: &Point<PB::E>,
transcript: &mut impl Transcript<PB::E>,
) -> LayerProof<PB::E> {
<PD as LinearLayerProver<PB>>::prove(self, wit, out_point, transcript)
}
fn verify(
&self,
proof: LayerProof<E>,
sigmas: &[E],
out_point: &Point<E>,
challenges: &[E],
transcript: &mut impl Transcript<E>,
) -> Result<LayerClaims<E>, BackendError> {
let LayerProof {
main: SumcheckLayerProof { evals, .. },
..
} = proof;
transcript.append_field_element_exts(&evals);
for (sigma, expr) in sigmas.iter().zip_eq(&self.exprs) {
let got = eval_by_expr_with_instance(&[], &evals, &[], &[], challenges, expr)
.right()
.unwrap();
if *sigma != got {
return Err(BackendError::LayerVerificationFailed(
self.name.clone().into(),
VerifierError::ClaimNotMatch(
format!("{}", *sigma).into(),
format!("{}", got).into(),
),
));
}
}
Ok(LayerClaims {
evals,
in_point: out_point.clone(),
})
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer/hal.rs | gkr_iop/src/gkr/layer/hal.rs | use multilinear_extensions::mle::Point;
use transcript::Transcript;
use crate::{
gkr::layer::{Layer, LayerWitness, sumcheck_layer::LayerProof},
hal::ProverBackend,
selector::SelectorContext,
};
pub trait LinearLayerProver<PB: ProverBackend> {
fn prove(
layer: &Layer<PB::E>,
wit: LayerWitness<PB>,
out_point: &Point<PB::E>,
transcript: &mut impl Transcript<PB::E>,
) -> LayerProof<PB::E>;
}
pub trait SumcheckLayerProver<PB: ProverBackend> {
fn prove(
layer: &Layer<PB::E>,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<'_, PB>,
challenges: &[PB::E],
transcript: &mut impl Transcript<PB::E>,
) -> LayerProof<PB::E>;
}
pub trait ZerocheckLayerProver<PB: ProverBackend> {
#[allow(clippy::too_many_arguments)]
fn prove(
layer: &Layer<PB::E>,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<PB>,
out_points: &[Point<PB::E>],
pub_io_evals: &[PB::E],
challenges: &[PB::E],
transcript: &mut impl Transcript<PB::E>,
selector_ctxs: &[SelectorContext],
) -> (LayerProof<PB::E>, Point<PB::E>);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer/zerocheck_layer.rs | gkr_iop/src/gkr/layer/zerocheck_layer.rs | use ff_ext::ExtensionField;
use itertools::{Itertools, chain, izip};
use multilinear_extensions::{
ChallengeId, Expression, StructuralWitIn, StructuralWitInType, ToExpr, WitnessId,
macros::{entered_span, exit_span},
mle::{IntoMLE, Point},
monomialize_expr_to_wit_terms,
utils::{eval_by_expr, eval_by_expr_with_instance, expr_convert_to_witins},
virtual_poly::VPAuxInfo,
};
use p3::field::{FieldAlgebra, dot_product};
use std::{marker::PhantomData, ops::Neg};
use sumcheck::{
structs::{IOPProof, IOPVerifierState, SumCheckSubClaim, VerifierError},
util::get_challenge_pows,
};
use transcript::Transcript;
use super::{Layer, LayerWitness, linear_layer::LayerClaims, sumcheck_layer::LayerProof};
use crate::{
error::BackendError,
evaluation::EvalExpression,
gkr::{
booleanhypercube::BooleanHypercube,
layer::{
ROTATION_OPENING_COUNT, hal::ZerocheckLayerProver, sumcheck_layer::SumcheckLayerProof,
},
},
hal::{ProverBackend, ProverDevice},
selector::{SelectorContext, SelectorType},
utils::{
eval_inner_repeated_incremental_vec, eval_outer_repeated_incremental_vec,
eval_stacked_constant_vec, eval_stacked_wellform_address_vec, eval_wellform_address_vec,
rotation_selector_eval,
},
};
pub(crate) struct RotationPoints<E: ExtensionField> {
pub left: Point<E>,
pub right: Point<E>,
pub origin: Point<E>,
}
pub(crate) struct RotationClaims<E: ExtensionField> {
left_evals: Vec<E>,
right_evals: Vec<E>,
target_evals: Vec<E>,
rotation_points: RotationPoints<E>,
}
pub trait ZerocheckLayer<E: ExtensionField> {
// static expression on circuit setup
fn build_static_expression(&mut self);
#[allow(clippy::too_many_arguments)]
fn prove<PB: ProverBackend<E = E>, PD: ProverDevice<PB>>(
&self,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<PB>,
out_points: &[Point<PB::E>],
pub_io_evals: &[PB::E],
challenges: &[PB::E],
transcript: &mut impl Transcript<PB::E>,
selector_ctxs: &[SelectorContext],
) -> (LayerProof<PB::E>, Point<PB::E>);
#[allow(clippy::too_many_arguments)]
fn verify(
&self,
max_num_variables: usize,
proof: LayerProof<E>,
eval_and_dedup_points: Vec<(Vec<E>, Option<Point<E>>)>,
pub_io_evals: &[E],
raw_pi: &[Vec<E::BaseField>],
challenges: &[E],
transcript: &mut impl Transcript<E>,
selector_ctxs: &[SelectorContext],
) -> Result<LayerClaims<E>, BackendError>;
}
impl<E: ExtensionField> ZerocheckLayer<E> for Layer<E> {
fn build_static_expression(&mut self) {
let span = entered_span!("gen_expr", profiling_4 = true);
// build rotation expression
let num_rotations = self.rotation_exprs.1.len();
let rotation_expr = if num_rotations > 0 {
let alpha_pows_expr = (2..)
.take(num_rotations)
.map(|id| Expression::Challenge(id as ChallengeId, 1, E::ONE, E::ZERO))
.collect_vec();
let rotation_expr = (0..)
.tuples()
.take(num_rotations)
.zip_eq(&alpha_pows_expr)
.map(|((rotate_wit_id, target_wit_id), alpha)| {
alpha * (Expression::WitIn(rotate_wit_id) - Expression::WitIn(target_wit_id))
})
.sum::<Expression<E>>();
let rotation_selector_expr = Expression::<E>::WitIn((num_rotations * 2) as WitnessId);
Some(rotation_selector_expr * rotation_expr)
} else {
None
};
// generate static expression
let out_evals: Vec<_> = self
.out_sel_and_eval_exprs
.iter()
.flat_map(|(sel_type, out_eval)| izip!(std::iter::repeat(sel_type), out_eval.iter()))
.collect();
self.exprs_with_selector_out_eval_monomial_form = self
.exprs
.iter()
.zip_eq(out_evals.iter())
.map(|(expr, (sel_type, out_eval))| {
let sel_expr = sel_type.selector_expr();
let expr = match out_eval {
EvalExpression::Linear(_, a, b) => {
assert_eq!(
a.as_ref().clone(),
E::BaseField::ONE.expr(),
"need to extend expression to support a.inverse()"
);
// sel * exp - b
sel_expr.clone() * expr + b.as_ref().neg().clone()
}
EvalExpression::Single(_) => sel_expr.clone() * expr,
EvalExpression::Zero => Expression::ZERO,
EvalExpression::Partition(_, _) => unimplemented!(),
};
monomialize_expr_to_wit_terms(
&expr,
self.n_witin as WitnessId,
self.n_fixed as WitnessId,
self.n_instance,
)
})
.collect::<Vec<_>>();
// build main sumcheck expression
let alpha_pows_expr = (2..)
.take(self.exprs.len() + num_rotations * ROTATION_OPENING_COUNT)
.map(|id| Expression::Challenge(id as ChallengeId, 1, E::ONE, E::ZERO))
.collect_vec();
let mut zero_expr = extend_exprs_with_rotation(self, &alpha_pows_expr)
.into_iter()
.sum::<Expression<E>>();
self.rotation_sumcheck_expression = rotation_expr.clone();
self.rotation_sumcheck_expression_monomial_terms =
self.rotation_sumcheck_expression.as_ref().map(|expr| {
monomialize_expr_to_wit_terms(
expr,
self.n_witin as WitnessId,
self.n_fixed as WitnessId,
self.n_instance,
)
});
expr_convert_to_witins(
&mut zero_expr,
self.n_witin as WitnessId,
self.n_fixed as WitnessId,
self.n_instance,
);
tracing::trace!("{} main sumcheck degree: {}", self.name, zero_expr.degree());
self.main_sumcheck_expression = Some(zero_expr);
self.main_sumcheck_expression_monomial_terms = self
.main_sumcheck_expression
.as_ref()
.map(|expr| expr.get_monomial_terms());
tracing::trace!(
"{} main sumcheck monomial terms count: {}",
self.name,
self.main_sumcheck_expression_monomial_terms
.as_ref()
.map_or(0, |terms| terms.len()),
);
exit_span!(span);
}
fn prove<PB: ProverBackend<E = E>, PD: ProverDevice<PB>>(
&self,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<PB>,
out_points: &[Point<PB::E>],
pub_io_evals: &[PB::E],
challenges: &[PB::E],
transcript: &mut impl Transcript<PB::E>,
selector_ctxs: &[SelectorContext],
) -> (LayerProof<PB::E>, Point<PB::E>) {
<PD as ZerocheckLayerProver<PB>>::prove(
self,
num_threads,
max_num_variables,
wit,
out_points,
pub_io_evals,
challenges,
transcript,
selector_ctxs,
)
}
fn verify(
&self,
max_num_variables: usize,
proof: LayerProof<E>,
mut eval_and_dedup_points: Vec<(Vec<E>, Option<Point<E>>)>,
pub_io_evals: &[E],
raw_pi: &[Vec<E::BaseField>],
challenges: &[E],
transcript: &mut impl Transcript<E>,
selector_ctxs: &[SelectorContext],
) -> Result<LayerClaims<E>, BackendError> {
assert_eq!(
self.out_sel_and_eval_exprs.len(),
eval_and_dedup_points.len(),
"out eval length {} != with eval_and_dedup_points {}",
self.out_sel_and_eval_exprs.len(),
eval_and_dedup_points.len(),
);
let LayerProof {
main:
SumcheckLayerProof {
proof: IOPProof { proofs },
evals: main_evals,
},
rotation: rotation_proof,
} = proof;
assert_eq!(
main_evals.len(),
self.n_witin + self.n_fixed + self.n_instance + self.n_structural_witin,
"invalid main_evals length",
);
if let Some(rotation_proof) = rotation_proof {
// verify rotation proof
let rt = eval_and_dedup_points
.first()
.and_then(|(_, rt)| rt.as_ref())
.expect("rotation proof should have at least one point");
let RotationClaims {
left_evals,
right_evals,
target_evals,
rotation_points:
RotationPoints {
left: left_point,
right: right_point,
origin: origin_point,
},
} = verify_rotation(
max_num_variables,
self.rotation_exprs.1.len(),
self.rotation_sumcheck_expression.as_ref().unwrap(),
rotation_proof,
self.rotation_cyclic_subgroup_size,
self.rotation_cyclic_group_log2,
rt,
challenges,
transcript,
)?;
eval_and_dedup_points.push((left_evals, Some(left_point)));
eval_and_dedup_points.push((right_evals, Some(right_point)));
eval_and_dedup_points.push((target_evals, Some(origin_point)));
}
let rotation_exprs_len = self.rotation_exprs.1.len();
let main_sumcheck_challenges = chain!(
challenges.iter().copied(),
get_challenge_pows(
self.exprs.len() + rotation_exprs_len * ROTATION_OPENING_COUNT,
transcript,
)
)
.collect_vec();
let sigma = dot_product(
main_sumcheck_challenges.iter().skip(2).copied(), // skip first 2 global challenges
eval_and_dedup_points
.iter()
.flat_map(|(sigmas, _)| sigmas)
.copied(),
);
let SumCheckSubClaim {
point: in_point,
expected_evaluation,
} = IOPVerifierState::verify(
sigma,
&IOPProof { proofs },
&VPAuxInfo {
max_degree: self.max_expr_degree + 1, // +1 due to eq
max_num_variables,
phantom: PhantomData,
},
transcript,
);
let in_point = in_point.into_iter().map(|c| c.elements).collect_vec();
let structural_witin_offset = self.n_witin + self.n_fixed + self.n_instance;
// eval selector and set to respective witin
izip!(
&self.out_sel_and_eval_exprs,
&eval_and_dedup_points,
selector_ctxs.iter()
)
.for_each(|((sel_type, _), (_, out_point), selector_ctx)| {
if let Some((expected_eval, wit_id)) =
sel_type.evaluate(out_point.as_ref().unwrap(), &in_point, selector_ctx)
{
let wit_id = wit_id as usize + structural_witin_offset;
assert_eq!(main_evals[wit_id], expected_eval);
}
});
// check structural witin
for StructuralWitIn { id, witin_type } in &self.structural_witins {
let wit_id = *id as usize + structural_witin_offset;
let expected_eval = match witin_type {
StructuralWitInType::EqualDistanceSequence {
offset,
multi_factor,
descending,
..
} => eval_wellform_address_vec(
*offset as u64,
*multi_factor as u64,
&in_point,
*descending,
),
StructuralWitInType::EqualDistanceDynamicSequence {
offset_instance_id,
multi_factor,
descending,
..
} => {
let offset = pub_io_evals[*offset_instance_id as usize].to_canonical_u64();
eval_wellform_address_vec(offset, *multi_factor as u64, &in_point, *descending)
}
StructuralWitInType::StackedIncrementalSequence { .. } => {
eval_stacked_wellform_address_vec(&in_point)
}
StructuralWitInType::StackedConstantSequence { .. } => {
eval_stacked_constant_vec(&in_point)
}
StructuralWitInType::InnerRepeatingIncrementalSequence { k, .. } => {
eval_inner_repeated_incremental_vec(*k as u64, &in_point)
}
StructuralWitInType::OuterRepeatingIncrementalSequence { k, .. } => {
eval_outer_repeated_incremental_vec(*k as u64, &in_point)
}
StructuralWitInType::Empty => continue,
};
if expected_eval != main_evals[wit_id] {
return Err(BackendError::LayerVerificationFailed(
format!("layer {} structural witin mismatch", self.name.clone()).into(),
VerifierError::ClaimNotMatch(
format!("{}", expected_eval).into(),
format!("{}", main_evals[wit_id]).into(),
),
));
}
}
// check pub-io
// assume public io is tiny vector, so we evaluate it directly without PCS
let pubio_offset = self.n_witin + self.n_fixed;
for (index, instance) in self.instance_openings.iter().enumerate() {
let index = pubio_offset + index;
let poly = raw_pi[instance.0].to_vec().into_mle();
let expected_eval = poly.evaluate(&in_point[..poly.num_vars()]);
if expected_eval != main_evals[index] {
return Err(BackendError::LayerVerificationFailed(
format!("layer {} pi mismatch", self.name.clone()).into(),
VerifierError::ClaimNotMatch(
format!("{}", expected_eval).into(),
format!("{}", main_evals[index]).into(),
),
));
}
}
let got_claim = eval_by_expr_with_instance(
&[],
&main_evals,
&[],
pub_io_evals,
&main_sumcheck_challenges,
self.main_sumcheck_expression.as_ref().unwrap(),
)
.map_either(E::from, |v| v)
.into_inner();
if got_claim != expected_evaluation {
return Err(BackendError::LayerVerificationFailed(
self.name.clone().into(),
VerifierError::ClaimNotMatch(
format!("{}", expected_evaluation).into(),
format!("{}", got_claim).into(),
),
));
}
Ok(LayerClaims {
in_point,
evals: main_evals,
})
}
}
#[allow(clippy::too_many_arguments)]
fn verify_rotation<E: ExtensionField>(
max_num_variables: usize,
num_rotations: usize,
rotation_sumcheck_expression: &Expression<E>,
rotation_proof: SumcheckLayerProof<E>,
rotation_cyclic_subgroup_size: usize,
rotation_cyclic_group_log2: usize,
rt: &Point<E>,
challenges: &[E],
transcript: &mut impl Transcript<E>,
) -> Result<RotationClaims<E>, BackendError> {
let SumcheckLayerProof { proof, evals } = rotation_proof;
assert_eq!(num_rotations * 3, evals.len());
let rotation_challenges = chain!(
challenges.iter().copied(),
get_challenge_pows(num_rotations, transcript)
)
.collect_vec();
let sigma = E::ZERO;
let SumCheckSubClaim {
point: in_point,
expected_evaluation,
} = IOPVerifierState::verify(
sigma,
&proof,
&VPAuxInfo {
max_degree: 2, // selector * (rotated - target)
max_num_variables,
phantom: PhantomData,
},
transcript,
);
let origin_point = in_point.into_iter().map(|c| c.elements).collect_vec();
// compute the selector evaluation
let bh = BooleanHypercube::new(rotation_cyclic_group_log2);
let selector_eval = rotation_selector_eval(
&bh,
rt,
&origin_point,
rotation_cyclic_subgroup_size,
rotation_cyclic_group_log2,
);
// check the final evaluations.
let mut left_evals = Vec::with_capacity(evals.len() / 3);
let mut right_evals = Vec::with_capacity(evals.len() / 3);
let mut target_evals = Vec::with_capacity(evals.len() / 3);
let got_claim = eval_by_expr(
&evals
.chunks_exact(3)
.flat_map(|evals| {
let [left_eval, right_eval, target_eval] = evals else {
unreachable!()
};
left_evals.push(*left_eval);
right_evals.push(*right_eval);
target_evals.push(*target_eval);
[
(E::ONE - origin_point[rotation_cyclic_group_log2 - 1]) * *left_eval
+ origin_point[rotation_cyclic_group_log2 - 1] * *right_eval,
*target_eval,
]
})
.chain(std::iter::once(selector_eval))
.collect_vec(),
&[],
&rotation_challenges,
rotation_sumcheck_expression,
);
if got_claim != expected_evaluation {
return Err(BackendError::LayerVerificationFailed(
"rotation verify failed".to_string().into(),
VerifierError::ClaimNotMatch(
format!("{}", expected_evaluation).into(),
format!("{}", got_claim).into(),
),
));
}
let (left_point, right_point) =
BooleanHypercube::new(rotation_cyclic_group_log2).get_rotation_points(&origin_point);
Ok(RotationClaims {
left_evals,
right_evals,
target_evals,
rotation_points: RotationPoints {
left: left_point,
right: right_point,
origin: origin_point,
},
})
}
pub fn extend_exprs_with_rotation<E: ExtensionField>(
layer: &Layer<E>,
alpha_pows: &[Expression<E>],
) -> Vec<Expression<E>> {
let offset_structural_witid = (layer.n_witin + layer.n_fixed + layer.n_instance) as WitnessId;
let mut alpha_pows_iter = alpha_pows.iter();
let mut expr_iter = layer.exprs.iter();
let mut zero_check_exprs = Vec::with_capacity(layer.out_sel_and_eval_exprs.len());
let match_expr = |sel_expr: &Expression<E>| match sel_expr {
Expression::StructuralWitIn(id, ..) => Expression::WitIn(offset_structural_witid + *id),
invalid => panic!("invalid eq format {:?}", invalid),
};
for (sel_type, out_evals) in layer.out_sel_and_eval_exprs.iter() {
let group_length = out_evals.len();
let zero_check_expr = expr_iter
.by_ref()
.take(group_length)
.cloned()
.zip_eq(alpha_pows_iter.by_ref().take(group_length))
.map(|(expr, alpha)| alpha * expr)
.sum::<Expression<E>>();
let expr = match sel_type {
SelectorType::None => zero_check_expr,
SelectorType::Whole(sel)
| SelectorType::Prefix(sel)
| SelectorType::OrderedSparse32 {
expression: sel, ..
}
| SelectorType::QuarkBinaryTreeLessThan(sel) => match_expr(sel) * zero_check_expr,
};
zero_check_exprs.push(expr);
}
// prepare rotation expr
let (rotation_eq, rotation_exprs) = &layer.rotation_exprs;
if rotation_eq.is_none() {
return zero_check_exprs;
}
let left_rotation_expr: Expression<E> = izip!(
rotation_exprs.iter(),
alpha_pows_iter.by_ref().take(rotation_exprs.len())
)
.map(|((rotate_expr, _), alpha)| {
assert!(matches!(rotate_expr, Expression::WitIn(_)));
alpha * rotate_expr
})
.sum();
let right_rotation_expr: Expression<E> = izip!(
rotation_exprs.iter(),
alpha_pows_iter.by_ref().take(rotation_exprs.len())
)
.map(|((rotate_expr, _), alpha)| {
assert!(matches!(rotate_expr, Expression::WitIn(_)));
alpha * rotate_expr
})
.sum();
let rotation_expr: Expression<E> = izip!(
rotation_exprs.iter(),
alpha_pows_iter.by_ref().take(rotation_exprs.len())
)
.map(|((_, expr), alpha)| {
assert!(matches!(expr, Expression::WitIn(_)));
alpha * expr
})
.sum();
// push rotation expr to zerocheck expr
if let Some(
[
rotation_left_eq_expr,
rotation_right_eq_expr,
rotation_eq_expr,
],
) = rotation_eq.as_ref()
{
let (rotation_left_eq_expr, rotation_right_eq_expr, rotation_eq_expr) = match (
rotation_left_eq_expr,
rotation_right_eq_expr,
rotation_eq_expr,
) {
(
Expression::StructuralWitIn(left_eq_id, ..),
Expression::StructuralWitIn(right_eq_id, ..),
Expression::StructuralWitIn(eq_id, ..),
) => (
Expression::WitIn(offset_structural_witid + *left_eq_id),
Expression::WitIn(offset_structural_witid + *right_eq_id),
Expression::WitIn(offset_structural_witid + *eq_id),
),
invalid => panic!("invalid eq format {:?}", invalid),
};
// add rotation left expr
zero_check_exprs.push(rotation_left_eq_expr * left_rotation_expr);
// add rotation right expr
zero_check_exprs.push(rotation_right_eq_expr * right_rotation_expr);
// add target expr
zero_check_exprs.push(rotation_eq_expr * rotation_expr);
}
assert!(expr_iter.next().is_none() && alpha_pows_iter.next().is_none());
zero_check_exprs
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer/gpu/utils.rs | gkr_iop/src/gkr/layer/gpu/utils.rs | use crate::{
gkr::{booleanhypercube::BooleanHypercube, layer::LayerWitness},
gpu::GpuBackend,
};
use either::Either;
use ff_ext::ExtensionField;
use itertools::Itertools;
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression, mle::Point, monomial::Term, utils::eval_by_expr_constant,
};
use crate::selector::{SelectorContext, SelectorType};
use crate::gpu::{MultilinearExtensionGpu, gpu_prover::*};
use crate::hal::MultilinearPolynomial;
#[allow(clippy::type_complexity)]
pub fn extract_mle_relationships_from_monomial_terms<'a, E: ExtensionField>(
monomial_terms: &[Term<Expression<E>, Expression<E>>],
all_mles: &[&MultilinearExtensionGpu<'a, E>],
public_io_evals: &[Either<E::BaseField, E>],
challenges: &[E],
) -> (Vec<E>, Vec<Vec<usize>>, Vec<(usize, usize)>) {
let mut term_coefficients = Vec::new();
let mut mle_indices_per_term = Vec::new();
let mut mle_size_info = Vec::new();
for term in monomial_terms {
// scalar - convert Either<E::BaseField, E> to E
let scalar_either = eval_by_expr_constant(public_io_evals, challenges, &term.scalar);
let scalar = match scalar_either {
Either::Left(base_field_val) => E::from(base_field_val),
Either::Right(ext_field_val) => ext_field_val,
};
term_coefficients.push(scalar);
// MLE indices
let mut indices = Vec::new();
for expr in &term.product {
match expr {
Expression::WitIn(witin_id) => {
indices.push(*witin_id as usize);
}
_ => panic!("Unsupported expression in product: {:?}", expr),
}
}
// MLE size - get this before moving indices
let first_idx = indices.first().copied();
mle_indices_per_term.push(indices);
if let Some(first_idx) = first_idx {
let num_vars = all_mles[first_idx].mle.num_vars();
mle_size_info.push((num_vars, num_vars));
}
}
(term_coefficients, mle_indices_per_term, mle_size_info)
}
pub fn build_eq_x_r_with_sel_gpu<E: ExtensionField>(
hal: &CudaHalBB31,
point: &Point<E>,
selector_ctx: &SelectorContext,
selector: &SelectorType<E>,
) -> MultilinearExtensionGpu<'static, E> {
if std::any::TypeId::of::<E::BaseField>() != std::any::TypeId::of::<BB31Base>() {
panic!("GPU backend only supports Goldilocks base field");
}
let eq_len = 1 << point.len();
let (num_instances, is_sp32, indices) = match selector {
SelectorType::None => panic!("SelectorType::None"),
SelectorType::Whole(_expr) => (eq_len, false, vec![]),
SelectorType::Prefix(_expr) => (selector_ctx.num_instances, false, vec![]),
SelectorType::OrderedSparse32 { indices, .. } => {
(selector_ctx.num_instances, true, indices.clone())
}
SelectorType::QuarkBinaryTreeLessThan(..) => unimplemented!(),
};
// type eq
let eq_mle = if is_sp32 {
assert_eq!(selector_ctx.offset, 0);
let eq = build_eq_x_r_gpu(hal, point);
let mut eq_buf = match eq.mle {
GpuFieldType::Base(_) => panic!("should be ext field"),
GpuFieldType::Ext(mle) => mle,
GpuFieldType::Unreachable => panic!("Unreachable GpuFieldType"),
};
let indices_u32 = indices.iter().map(|x| *x as u32).collect_vec();
ordered_sparse32_selector_gpu::<CudaHalBB31, BB31Ext, BB31Base>(
&hal.inner,
&mut eq_buf.buf,
&indices_u32,
num_instances,
)
.unwrap();
eq_buf
} else {
let point_gl64: &Point<BB31Ext> = unsafe { std::mem::transmute(point) };
let mut gpu_output = hal.alloc_ext_elems_on_device(eq_len, false).unwrap();
let gpu_points = hal.alloc_ext_elems_from_host(point_gl64).unwrap();
build_mle_as_ceno::<CudaHalBB31, BB31Ext, BB31Base>(
&hal.inner,
&gpu_points,
&mut gpu_output,
selector_ctx.offset,
num_instances,
)
.unwrap();
GpuPolynomialExt::new(gpu_output, point.len())
};
let mle_gl64 = MultilinearExtensionGpu::from_ceno_gpu_ext(eq_mle);
unsafe {
std::mem::transmute::<
MultilinearExtensionGpu<'static, BB31Ext>,
MultilinearExtensionGpu<'static, E>,
>(mle_gl64)
}
}
pub fn build_eq_x_r_gpu<E: ExtensionField>(
hal: &CudaHalBB31,
point: &Point<E>,
) -> MultilinearExtensionGpu<'static, E> {
if std::any::TypeId::of::<E::BaseField>() != std::any::TypeId::of::<BB31Base>() {
panic!("GPU backend only supports Goldilocks base field");
}
let eq_len = 1 << point.len();
// type eq
let point_gl64: &Point<BB31Ext> = unsafe { std::mem::transmute(point) };
let eq_mle = {
let mut gpu_output = hal.alloc_ext_elems_on_device(eq_len, false).unwrap();
let gpu_points = hal.alloc_ext_elems_from_host(point_gl64).unwrap();
build_mle_as_ceno::<CudaHalBB31, BB31Ext, BB31Base>(
&hal.inner,
&gpu_points,
&mut gpu_output,
0,
eq_len,
)
.unwrap();
GpuPolynomialExt::new(gpu_output, point.len())
};
let mle_gl64 = MultilinearExtensionGpu::from_ceno_gpu_ext(eq_mle);
unsafe {
std::mem::transmute::<
MultilinearExtensionGpu<'static, BB31Ext>,
MultilinearExtensionGpu<'static, E>,
>(mle_gl64)
}
}
pub fn build_rotation_mles_gpu<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>(
cuda_hal: &CudaHalBB31,
raw_rotation_exprs: &[(Expression<E>, Expression<E>)],
wit: &LayerWitness<GpuBackend<E, PCS>>,
bh: &BooleanHypercube,
rotation_cyclic_group_log2: usize,
) -> Vec<MultilinearExtensionGpu<'static, E>> {
raw_rotation_exprs
.iter()
.map(|rotation_expr| match rotation_expr {
(Expression::WitIn(source_wit_id), _) => {
let cyclic_group_size = 1 << rotation_cyclic_group_log2;
let rotation_index = bh
.into_iter()
.take(cyclic_group_size)
.map(|x| x as u32)
.collect_vec();
let input_mle = wit[*source_wit_id as usize].as_ref();
let input_buf = match &input_mle.mle {
GpuFieldType::Base(poly) => poly.evaluations(),
GpuFieldType::Ext(_) => panic!("should be base field"),
_ => panic!("unimplemented input mle"),
};
let mut output_buf = cuda_hal
.alloc_elems_on_device(input_buf.len(), false)
.unwrap();
// Safety: GPU buffers are actually 'static lifetime. We only read from input_buf
// during the GPU kernel execution, which completes synchronously before returning.
let input_buf_static: &BufferImpl<'static, BB31Base> =
unsafe { std::mem::transmute(input_buf) };
rotation_next_base_mle_gpu::<CudaHalBB31, BB31Ext, BB31Base>(
&cuda_hal.inner,
&mut output_buf,
input_buf_static,
&rotation_index,
cyclic_group_size,
)
.unwrap();
let output_mle = MultilinearExtensionGpu::from_ceno_gpu_base(GpuPolynomial::new(
output_buf,
input_mle.mle.num_vars(),
));
unsafe {
std::mem::transmute::<
MultilinearExtensionGpu<'static, BB31Ext>,
MultilinearExtensionGpu<'static, E>,
>(output_mle)
}
}
_ => unimplemented!("unimplemented rotation"),
})
.collect::<Vec<_>>()
}
pub fn build_rotation_selector_gpu<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>(
cuda_hal: &CudaHalBB31,
wit: &LayerWitness<GpuBackend<E, PCS>>,
rt: &Point<E>,
bh: &BooleanHypercube,
rotation_cyclic_subgroup_size: usize,
rotation_cyclic_group_log2: usize,
) -> MultilinearExtensionGpu<'static, E> {
let total_len = wit[0].evaluations_len(); // Take first mle just to retrieve total length
assert!(total_len.is_power_of_two());
let mut output_buf = cuda_hal
.alloc_ext_elems_on_device(total_len, false)
.unwrap();
let eq = build_eq_x_r_gpu(cuda_hal, rt);
let eq_buf_owned = match eq.mle {
GpuFieldType::Base(_) => panic!("should be ext field"),
GpuFieldType::Ext(mle) => mle.buf,
GpuFieldType::Unreachable => panic!("Unreachable GpuFieldType"),
};
let rotation_index = bh
.into_iter()
.take(rotation_cyclic_subgroup_size)
.map(|x| x as u32)
.collect_vec();
rotation_selector_gpu::<CudaHalBB31, BB31Ext, BB31Base>(
&cuda_hal.inner,
&mut output_buf,
&eq_buf_owned,
&rotation_index,
1 << rotation_cyclic_group_log2,
rotation_cyclic_subgroup_size,
)
.unwrap();
let output_mle = MultilinearExtensionGpu::from_ceno_gpu_ext(GpuPolynomialExt::new(
output_buf,
total_len.ilog2() as usize,
));
unsafe {
std::mem::transmute::<
MultilinearExtensionGpu<'static, BB31Ext>,
MultilinearExtensionGpu<'static, E>,
>(output_mle)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer/gpu/mod.rs | gkr_iop/src/gkr/layer/gpu/mod.rs | use crate::{
cpu::{CpuBackend, CpuProver},
gkr::{
booleanhypercube::BooleanHypercube,
layer::{
Layer, LayerWitness,
hal::{SumcheckLayerProver, ZerocheckLayerProver},
zerocheck_layer::RotationPoints,
},
},
gpu::{GpuBackend, GpuProver},
};
use either::Either;
use ff_ext::ExtensionField;
use itertools::{Itertools, chain};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression,
mle::{MultilinearExtension, Point},
monomial::Term,
};
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::ParallelSlice,
};
use std::sync::Arc;
use sumcheck::{
macros::{entered_span, exit_span},
structs::IOPProof,
util::get_challenge_pows,
};
use transcript::{BasicTranscript, Transcript};
use crate::{
gkr::layer::{
ROTATION_OPENING_COUNT,
hal::LinearLayerProver,
sumcheck_layer::{LayerProof, SumcheckLayerProof},
},
hal::ProverBackend,
};
use crate::gpu::{MultilinearExtensionGpu, gpu_prover::*};
pub mod utils;
use crate::selector::SelectorContext;
use utils::*;
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> LinearLayerProver<GpuBackend<E, PCS>>
for GpuProver<GpuBackend<E, PCS>>
{
fn prove(
layer: &Layer<E>,
wit: LayerWitness<GpuBackend<E, PCS>>,
out_point: &multilinear_extensions::mle::Point<E>,
transcript: &mut impl transcript::Transcript<E>,
) -> crate::gkr::layer::sumcheck_layer::LayerProof<E> {
let span = entered_span!("LinearLayerProver", profiling_2 = true);
let cpu_wits: Vec<Arc<MultilinearExtension<'_, E>>> = wit
.0
.into_iter()
.map(|gpu_mle| Arc::new(gpu_mle.inner_to_mle()))
.collect();
let cpu_wit = LayerWitness::<CpuBackend<E, PCS>>(cpu_wits);
let res = <CpuProver<CpuBackend<E, PCS>> as LinearLayerProver<CpuBackend<E, PCS>>>::prove(
layer, cpu_wit, out_point, transcript,
);
exit_span!(span);
res
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> SumcheckLayerProver<GpuBackend<E, PCS>>
for GpuProver<GpuBackend<E, PCS>>
{
fn prove(
layer: &Layer<E>,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<'_, GpuBackend<E, PCS>>,
challenges: &[<GpuBackend<E, PCS> as ProverBackend>::E],
transcript: &mut impl Transcript<<GpuBackend<E, PCS> as ProverBackend>::E>,
) -> LayerProof<<GpuBackend<E, PCS> as ProverBackend>::E> {
let span = entered_span!("SumcheckLayerProver", profiling_2 = true);
let cpu_wits: Vec<Arc<MultilinearExtension<'_, E>>> = wit
.0
.into_iter()
.map(|gpu_mle| Arc::new(gpu_mle.inner_to_mle()))
.collect();
let cpu_wit = LayerWitness::<CpuBackend<E, PCS>>(cpu_wits);
let res = <CpuProver<CpuBackend<E, PCS>> as SumcheckLayerProver<CpuBackend<E, PCS>>>::prove(
layer,
num_threads,
max_num_variables,
cpu_wit,
challenges,
transcript,
);
exit_span!(span);
res
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> ZerocheckLayerProver<GpuBackend<E, PCS>>
for GpuProver<GpuBackend<E, PCS>>
{
fn prove(
layer: &Layer<<GpuBackend<E, PCS> as ProverBackend>::E>,
_num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<GpuBackend<E, PCS>>,
out_points: &[Point<<GpuBackend<E, PCS> as ProverBackend>::E>],
pub_io_evals: &[<GpuBackend<E, PCS> as ProverBackend>::E],
challenges: &[<GpuBackend<E, PCS> as ProverBackend>::E],
transcript: &mut impl Transcript<<GpuBackend<E, PCS> as ProverBackend>::E>,
selector_ctxs: &[SelectorContext],
) -> (
LayerProof<<GpuBackend<E, PCS> as ProverBackend>::E>,
Point<<GpuBackend<E, PCS> as ProverBackend>::E>,
) {
let span = entered_span!("ZerocheckLayerProver", profiling_2 = true);
let num_threads = 1; // VP builder for GPU: do not use _num_threads
assert_eq!(challenges.len(), 2);
assert_eq!(
layer.out_sel_and_eval_exprs.len(),
out_points.len(),
"out eval length {} != with distinct out_point {}",
layer.out_sel_and_eval_exprs.len(),
out_points.len(),
);
let (_, raw_rotation_exprs) = &layer.rotation_exprs;
let (rotation_proof, rotation_left, rotation_right, rotation_point) =
if let Some(rotation_sumcheck_expression) =
layer.rotation_sumcheck_expression_monomial_terms.as_ref()
{
// 1st sumcheck: process rotation_exprs
let rt = out_points.first().unwrap();
let (
proof,
RotationPoints {
left,
right,
origin,
},
) = prove_rotation_gpu(
num_threads,
max_num_variables,
layer.rotation_cyclic_subgroup_size,
layer.rotation_cyclic_group_log2,
&wit,
raw_rotation_exprs,
rotation_sumcheck_expression.clone(),
rt,
challenges,
transcript,
);
(Some(proof), Some(left), Some(right), Some(origin))
} else {
(None, None, None, None)
};
// 2th sumcheck: batch rotation with other constrains
let main_sumcheck_challenges = chain!(
challenges.iter().copied(),
get_challenge_pows(
layer.exprs.len() + raw_rotation_exprs.len() * ROTATION_OPENING_COUNT,
transcript,
)
)
.collect_vec();
let span_eq = entered_span!("build eqs", profiling_2 = true);
let cuda_hal = get_cuda_hal().unwrap();
let eqs_gpu = layer
.out_sel_and_eval_exprs
.iter()
.zip(out_points.iter())
.zip(selector_ctxs.iter())
.map(|(((sel_type, _), point), selector_ctx)| {
build_eq_x_r_with_sel_gpu(&cuda_hal, point, selector_ctx, sel_type)
})
// for rotation left point
.chain(
rotation_left
.iter()
.map(|rotation_left| build_eq_x_r_gpu(&cuda_hal, rotation_left)),
)
// for rotation right point
.chain(
rotation_right
.iter()
.map(|rotation_right| build_eq_x_r_gpu(&cuda_hal, rotation_right)),
)
// for rotation point
.chain(
rotation_point
.iter()
.map(|rotation_point| build_eq_x_r_gpu(&cuda_hal, rotation_point)),
)
.collect::<Vec<_>>();
// `wit` := witin ++ fixed ++ pubio
let all_witins_gpu = wit
.iter()
.take(layer.n_witin + layer.n_fixed + layer.n_instance)
.map(|mle| mle.as_ref())
.chain(
// some non-selector structural witin
wit.iter()
.skip(layer.n_witin + layer.n_fixed + layer.n_instance)
.take(
layer.n_structural_witin
- layer.out_sel_and_eval_exprs.len()
- layer
.rotation_exprs
.0
.as_ref()
.map(|_| ROTATION_OPENING_COUNT)
.unwrap_or(0),
)
.map(|mle| mle.as_ref()),
)
.chain(eqs_gpu.iter())
.collect_vec();
assert_eq!(
all_witins_gpu.len(),
layer.n_witin + layer.n_structural_witin + layer.n_fixed + layer.n_instance,
"all_witins.len() {} != layer.n_witin {} + layer.n_structural_witin {} + layer.n_fixed {} + layer.n_instance {}",
all_witins_gpu.len(),
layer.n_witin,
layer.n_structural_witin,
layer.n_fixed,
layer.n_instance,
);
exit_span!(span_eq);
// Calculate max_num_var and max_degree from the extracted relationships
let (term_coefficients, mle_indices_per_term, mle_size_info) =
extract_mle_relationships_from_monomial_terms(
&layer
.main_sumcheck_expression_monomial_terms
.clone()
.unwrap(),
&all_witins_gpu,
&pub_io_evals.iter().map(|v| Either::Right(*v)).collect_vec(),
&main_sumcheck_challenges,
);
let max_num_var = max_num_variables;
let max_degree = mle_indices_per_term
.iter()
.map(|indices| indices.len())
.max()
.unwrap_or(0);
// Convert types for GPU function Call
let basic_tr: &mut BasicTranscript<BB31Ext> =
unsafe { &mut *(transcript as *mut _ as *mut BasicTranscript<BB31Ext>) };
let term_coefficients_gl64: Vec<BB31Ext> =
unsafe { std::mem::transmute(term_coefficients) };
let all_witins_gpu_gl64: Vec<&MultilinearExtensionGpu<BB31Ext>> =
unsafe { std::mem::transmute(all_witins_gpu) };
let all_witins_gpu_type_gl64 = all_witins_gpu_gl64.iter().map(|mle| &mle.mle).collect_vec();
let (proof_gpu, evals_gpu, challenges_gpu) = cuda_hal
.prove_generic_sumcheck_gpu(
all_witins_gpu_type_gl64,
&mle_size_info,
&term_coefficients_gl64,
&mle_indices_per_term,
max_num_var,
max_degree,
basic_tr,
)
.unwrap();
let evals_gpu = evals_gpu.into_iter().flatten().collect_vec();
let row_challenges = challenges_gpu.iter().map(|c| c.elements).collect_vec();
// convert back to E: ExtensionField
let proof_gpu_e =
unsafe { std::mem::transmute::<IOPProof<BB31Ext>, IOPProof<E>>(proof_gpu) };
let evals_gpu_e = unsafe { std::mem::transmute::<Vec<BB31Ext>, Vec<E>>(evals_gpu) };
let row_challenges_e =
unsafe { std::mem::transmute::<Vec<BB31Ext>, Vec<E>>(row_challenges) };
exit_span!(span);
(
LayerProof {
main: SumcheckLayerProof {
proof: proof_gpu_e,
evals: evals_gpu_e,
},
rotation: rotation_proof,
},
row_challenges_e,
)
}
}
/// This is to prove the following n rotation arguments:
/// For the i-th argument, we check rotated(rotation_expr[i].0) == rotation_expr[i].1
/// This is proved through the following arguments:
/// 0 = \sum_{b = 0}^{N - 1} sel(b) * \sum_i alpha^i * (rotated_rotation_expr[i].0(b) - rotation_expr[i].1(b))
/// With the randomness rx, we check: (currently we only support cycle with length 32)
/// rotated_rotation_expr[i].0(rx) == (1 - rx_4) * rotation_expr[i].1(0, rx_0, rx_1, ..., rx_3, rx_5, ...)
/// + rx_4 * rotation_expr[i].1(1, rx_0, 1 - rx_1, ..., rx_3, rx_5, ...)
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip_all, name = "prove_rotation_gpu", level = "info")]
pub(crate) fn prove_rotation_gpu<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>(
_num_threads: usize,
max_num_variables: usize,
rotation_cyclic_subgroup_size: usize,
rotation_cyclic_group_log2: usize,
wit: &LayerWitness<GpuBackend<E, PCS>>,
raw_rotation_exprs: &[(Expression<E>, Expression<E>)],
rotation_sumcheck_expression: Vec<Term<Expression<E>, Expression<E>>>,
rt: &Point<E>,
global_challenges: &[E],
transcript: &mut impl Transcript<E>,
) -> (SumcheckLayerProof<E>, RotationPoints<E>) {
let bh = BooleanHypercube::new(rotation_cyclic_group_log2);
let cuda_hal = get_cuda_hal().unwrap();
// rotated_mles is non-deterministic input, rotated from existing witness polynomial
// we will reduce it to zero check, and finally reduce to committed polynomial opening
let span = entered_span!("rotate_witin_selector", profiling_3 = true);
let rotated_mles_gpu = build_rotation_mles_gpu(
&cuda_hal,
raw_rotation_exprs,
wit,
&bh,
rotation_cyclic_group_log2,
);
let selector_gpu = build_rotation_selector_gpu(
&cuda_hal,
wit,
rt,
&bh,
rotation_cyclic_subgroup_size,
rotation_cyclic_group_log2,
);
let rotation_challenges = chain!(
global_challenges.iter().copied(),
get_challenge_pows(raw_rotation_exprs.len(), transcript)
)
.collect_vec();
exit_span!(span);
let span = entered_span!("rotation IOPProverState::prove", profiling_3 = true);
// gpu mles
let mle_gpu_ref: Vec<&MultilinearExtensionGpu<E>> = rotated_mles_gpu
.iter()
.zip_eq(raw_rotation_exprs)
.flat_map(|(mle, (_, expr))| match expr {
Expression::WitIn(wit_id) => {
vec![mle, wit[*wit_id as usize].as_ref()]
}
_ => panic!(""),
})
.chain(std::iter::once(&selector_gpu))
.collect_vec();
// Calculate max_num_var and max_degree from the extracted relationships
let (term_coefficients, mle_indices_per_term, mle_size_info) =
extract_mle_relationships_from_monomial_terms(
&rotation_sumcheck_expression,
&mle_gpu_ref,
&[],
&rotation_challenges,
);
let max_num_var = max_num_variables;
let max_degree = mle_indices_per_term
.iter()
.map(|indices| indices.len())
.max()
.unwrap_or(0);
// Convert types for GPU function call
let basic_tr: &mut BasicTranscript<BB31Ext> =
unsafe { &mut *(transcript as *mut _ as *mut BasicTranscript<BB31Ext>) };
let term_coefficients_gl64: Vec<BB31Ext> = unsafe { std::mem::transmute(term_coefficients) };
let all_witins_gpu_gl64: Vec<&MultilinearExtensionGpu<BB31Ext>> =
unsafe { std::mem::transmute(mle_gpu_ref) };
let all_witins_gpu_type_gl64 = all_witins_gpu_gl64.iter().map(|mle| &mle.mle).collect_vec();
// gpu prover
let (proof_gpu, evals_gpu, challenges_gpu) = cuda_hal
.prove_generic_sumcheck_gpu(
all_witins_gpu_type_gl64,
&mle_size_info,
&term_coefficients_gl64,
&mle_indices_per_term,
max_num_var,
max_degree,
basic_tr,
)
.unwrap();
let evals_gpu = evals_gpu.into_iter().flatten().collect_vec();
let row_challenges = challenges_gpu.iter().map(|c| c.elements).collect_vec();
let proof_gpu_e = unsafe { std::mem::transmute::<IOPProof<BB31Ext>, IOPProof<E>>(proof_gpu) };
let mut evals_gpu_e = unsafe { std::mem::transmute::<Vec<BB31Ext>, Vec<E>>(evals_gpu) };
let row_challenges_e = unsafe { std::mem::transmute::<Vec<BB31Ext>, Vec<E>>(row_challenges) };
// skip selector/eq as verifier can derive itself
evals_gpu_e.truncate(raw_rotation_exprs.len() * 2);
exit_span!(span);
let span = entered_span!("rotation derived left/right eval", profiling_3 = true);
let bh = BooleanHypercube::new(rotation_cyclic_group_log2);
let (left_point, right_point) = bh.get_rotation_points(&row_challenges_e);
let evals = evals_gpu_e
.par_chunks_exact(2)
.zip_eq(raw_rotation_exprs.par_iter())
.flat_map(|(evals, (rotated_expr, _))| {
let [rotated_eval, target_eval] = evals else {
unreachable!()
};
let left_eval = match rotated_expr {
Expression::WitIn(source_wit_id) => {
wit[*source_wit_id as usize].evaluate(&left_point)
}
_ => unreachable!(),
};
let right_eval =
bh.get_rotation_right_eval_from_left(*rotated_eval, left_eval, &row_challenges_e);
#[cfg(debug_assertions)]
{
use multilinear_extensions::Expression;
let expected_right_eval = match rotated_expr {
Expression::WitIn(source_wit_id) => {
wit[*source_wit_id as usize].evaluate(&right_point)
}
_ => unreachable!(),
};
assert_eq!(
expected_right_eval, right_eval,
"rotation right eval mismatch: expected {expected_right_eval}, got {right_eval}"
);
}
[left_eval, right_eval, *target_eval]
})
.collect::<Vec<E>>();
exit_span!(span);
(
SumcheckLayerProof {
proof: proof_gpu_e,
evals,
},
RotationPoints {
left: left_point,
right: right_point,
origin: row_challenges_e,
},
)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gkr/layer/cpu/mod.rs | gkr_iop/src/gkr/layer/cpu/mod.rs | use crate::{
cpu::{CpuBackend, CpuProver},
gkr::{
booleanhypercube::BooleanHypercube,
layer::{
Layer, LayerWitness,
hal::{SumcheckLayerProver, ZerocheckLayerProver},
zerocheck_layer::RotationPoints,
},
},
selector::SelectorContext,
utils::{rotation_next_base_mle, rotation_selector},
};
use either::Either;
use ff_ext::ExtensionField;
use itertools::{Itertools, chain};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression,
mle::{MultilinearExtension, Point},
monomial::Term,
virtual_poly::build_eq_x_r_vec,
virtual_polys::VirtualPolynomialsBuilder,
};
use rayon::{
iter::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,
},
slice::ParallelSlice,
};
use sumcheck::{
macros::{entered_span, exit_span},
structs::{IOPProof, IOPProverState},
util::get_challenge_pows,
};
use transcript::Transcript;
use crate::{
gkr::layer::{
ROTATION_OPENING_COUNT,
hal::LinearLayerProver,
sumcheck_layer::{LayerProof, SumcheckLayerProof},
},
hal::ProverBackend,
};
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> LinearLayerProver<CpuBackend<E, PCS>>
for CpuProver<CpuBackend<E, PCS>>
{
fn prove(
_layer: &Layer<E>,
wit: LayerWitness<CpuBackend<E, PCS>>,
out_point: &multilinear_extensions::mle::Point<E>,
transcript: &mut impl transcript::Transcript<E>,
) -> crate::gkr::layer::sumcheck_layer::LayerProof<E> {
let evals: Vec<_> = wit
.into_par_iter()
.map(|base| base.evaluate(out_point))
.collect();
transcript.append_field_element_exts(&evals);
LayerProof {
main: SumcheckLayerProof {
proof: IOPProof { proofs: vec![] },
evals,
},
rotation: None,
}
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> SumcheckLayerProver<CpuBackend<E, PCS>>
for CpuProver<CpuBackend<E, PCS>>
{
fn prove(
layer: &Layer<E>,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<'_, CpuBackend<E, PCS>>,
challenges: &[<CpuBackend<E, PCS> as ProverBackend>::E],
transcript: &mut impl Transcript<<CpuBackend<E, PCS> as ProverBackend>::E>,
) -> LayerProof<<CpuBackend<E, PCS> as ProverBackend>::E> {
let builder = VirtualPolynomialsBuilder::new_with_mles(
num_threads,
max_num_variables,
wit.iter()
.map(|mle| Either::Left(mle.as_ref()))
.collect_vec(),
);
let (proof, prover_state) = IOPProverState::prove(
builder.to_virtual_polys(&[layer.exprs[0].clone()], challenges),
transcript,
);
LayerProof {
main: SumcheckLayerProof {
proof,
evals: prover_state.get_mle_flatten_final_evaluations(),
},
rotation: None,
}
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> ZerocheckLayerProver<CpuBackend<E, PCS>>
for CpuProver<CpuBackend<E, PCS>>
{
fn prove(
layer: &Layer<<CpuBackend<E, PCS> as ProverBackend>::E>,
num_threads: usize,
max_num_variables: usize,
wit: LayerWitness<CpuBackend<E, PCS>>,
out_points: &[Point<<CpuBackend<E, PCS> as ProverBackend>::E>],
pub_io_evals: &[<CpuBackend<E, PCS> as ProverBackend>::E],
challenges: &[<CpuBackend<E, PCS> as ProverBackend>::E],
transcript: &mut impl Transcript<<CpuBackend<E, PCS> as ProverBackend>::E>,
selector_ctxs: &[SelectorContext],
) -> (
LayerProof<<CpuBackend<E, PCS> as ProverBackend>::E>,
Point<<CpuBackend<E, PCS> as ProverBackend>::E>,
) {
assert_eq!(challenges.len(), 2);
assert_eq!(
layer.out_sel_and_eval_exprs.len(),
out_points.len(),
"out eval length {} != with distinct out_point {}",
layer.out_sel_and_eval_exprs.len(),
out_points.len(),
);
assert_eq!(
layer.out_sel_and_eval_exprs.len(),
selector_ctxs.len(),
"selector_ctxs length {}",
selector_ctxs.len()
);
let (_, raw_rotation_exprs) = &layer.rotation_exprs;
let (rotation_proof, rotation_left, rotation_right, rotation_point) =
if let Some(rotation_sumcheck_expression) =
layer.rotation_sumcheck_expression_monomial_terms.as_ref()
{
// 1st sumcheck: process rotation_exprs
let rt = out_points.first().unwrap();
let (
proof,
RotationPoints {
left,
right,
origin,
},
) = prove_rotation(
num_threads,
max_num_variables,
layer.rotation_cyclic_subgroup_size,
layer.rotation_cyclic_group_log2,
&wit,
raw_rotation_exprs,
rotation_sumcheck_expression.clone(),
rt,
challenges,
transcript,
);
(Some(proof), Some(left), Some(right), Some(origin))
} else {
(None, None, None, None)
};
// 2th sumcheck: batch rotation with other constrains
let span = entered_span!("build_out_points_eq", profiling_4 = true);
let main_sumcheck_challenges = chain!(
challenges.iter().copied(),
get_challenge_pows(
layer.exprs.len() + raw_rotation_exprs.len() * ROTATION_OPENING_COUNT,
transcript,
)
)
.collect_vec();
// zero check eq || rotation eq
let mut eqs = layer
.out_sel_and_eval_exprs
.par_iter()
.zip(out_points.par_iter())
.zip(selector_ctxs.par_iter())
.filter_map(|(((sel_type, _), point), selector_ctx)| {
sel_type.compute(point, selector_ctx)
})
// for rotation left point
.chain(rotation_left.par_iter().map(|rotation_left| {
MultilinearExtension::from_evaluations_ext_vec(
rotation_left.len(),
build_eq_x_r_vec(rotation_left),
)
}))
// for rotation right point
.chain(rotation_right.par_iter().map(|rotation_right| {
MultilinearExtension::from_evaluations_ext_vec(
rotation_right.len(),
build_eq_x_r_vec(rotation_right),
)
}))
// for rotation point
.chain(rotation_point.par_iter().map(|rotation_point| {
MultilinearExtension::from_evaluations_ext_vec(
rotation_point.len(),
build_eq_x_r_vec(rotation_point),
)
}))
.collect::<Vec<_>>();
exit_span!(span);
// `wit` := witin ++ fixed ++ pubio
// we concat eq in between `wit` := witin ++ eqs ++ fixed
let all_witins = wit
.iter()
.take(layer.n_witin + layer.n_fixed + layer.n_instance)
.map(|mle| Either::Left(mle.as_ref()))
.chain(
// some non-selector structural witin
wit.iter()
.skip(layer.n_witin + layer.n_fixed + layer.n_instance)
.take(
layer.n_structural_witin
- layer.out_sel_and_eval_exprs.len()
- layer
.rotation_exprs
.0
.as_ref()
.map(|_| ROTATION_OPENING_COUNT)
.unwrap_or(0),
)
.map(|mle| Either::Left(mle.as_ref())),
)
.chain(eqs.iter_mut().map(Either::Right))
.collect_vec();
assert_eq!(
all_witins.len(),
layer.n_witin + layer.n_structural_witin + layer.n_fixed + layer.n_instance,
"all_witins.len() {} != layer.n_witin {} + layer.n_structural_witin {} + layer.n_fixed {} + layer.n_instance {}",
all_witins.len(),
layer.n_witin,
layer.n_structural_witin,
layer.n_fixed,
layer.n_instance,
);
let builder =
VirtualPolynomialsBuilder::new_with_mles(num_threads, max_num_variables, all_witins);
let span = entered_span!("IOPProverState::prove", profiling_4 = true);
let (proof, prover_state) = IOPProverState::prove(
builder.to_virtual_polys_with_monomial_terms(
layer
.main_sumcheck_expression_monomial_terms
.as_ref()
.unwrap(),
pub_io_evals,
&main_sumcheck_challenges,
),
transcript,
);
let evals = prover_state.get_mle_flatten_final_evaluations();
exit_span!(span);
(
LayerProof {
main: SumcheckLayerProof { proof, evals },
rotation: rotation_proof,
},
prover_state.collect_raw_challenges(),
)
}
}
/// This is to prove the following n rotation arguments:
/// For the i-th argument, we check rotated(rotation_expr[i].0) == rotation_expr[i].1
/// This is proved through the following arguments:
/// 0 = \sum_{b = 0}^{N - 1} sel(b) * \sum_i alpha^i * (rotated_rotation_expr[i].0(b) - rotation_expr[i].1(b))
/// With the randomness rx, we check: (currently we only support cycle with length 32)
/// rotated_rotation_expr[i].0(rx) == (1 - rx_4) * rotation_expr[i].1(0, rx_0, rx_1, ..., rx_3, rx_5, ...)
/// + rx_4 * rotation_expr[i].1(1, rx_0, 1 - rx_1, ..., rx_3, rx_5, ...)
#[allow(clippy::too_many_arguments)]
pub(crate) fn prove_rotation<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>(
num_threads: usize,
max_num_variables: usize,
rotation_cyclic_subgroup_size: usize,
rotation_cyclic_group_log2: usize,
wit: &LayerWitness<CpuBackend<E, PCS>>,
raw_rotation_exprs: &[(Expression<E>, Expression<E>)],
rotation_sumcheck_expression: Vec<Term<Expression<E>, Expression<E>>>,
rt: &Point<E>,
global_challenges: &[E],
transcript: &mut impl Transcript<E>,
) -> (SumcheckLayerProof<E>, RotationPoints<E>) {
let span = entered_span!("rotate_witin_selector", profiling_4 = true);
let bh = BooleanHypercube::new(rotation_cyclic_group_log2);
// rotated_mles is non-deterministic input, rotated from existing witness polynomial
// we will reduce it to zero check, and finally reduce to committed polynomial opening
let (mut selector, mut rotated_mles) = {
let eq = build_eq_x_r_vec(rt);
let mut mles = raw_rotation_exprs
.par_iter()
.map(|rotation_expr| match rotation_expr {
(Expression::WitIn(source_wit_id), _) => rotation_next_base_mle(
&bh,
&wit[*source_wit_id as usize],
rotation_cyclic_group_log2,
),
_ => unimplemented!("unimplemented rotation"),
})
.chain(rayon::iter::once(rotation_selector(
&bh,
&eq,
rotation_cyclic_subgroup_size,
rotation_cyclic_group_log2,
wit[0].evaluations().len(), // Take first mle just to retrieve total length
)))
.collect::<Vec<_>>();
let selector = mles.pop().unwrap();
(selector, mles)
};
let rotation_challenges = chain!(
global_challenges.iter().copied(),
get_challenge_pows(raw_rotation_exprs.len(), transcript)
)
.collect_vec();
exit_span!(span);
// TODO: we pick a random point from output point, does it sound?
let builder = VirtualPolynomialsBuilder::new_with_mles(
num_threads,
max_num_variables,
// mles format [rotation_mle1, target_mle1, rotation_mle2, target_mle2, ....., selector, eq]
rotated_mles
.iter_mut()
.zip_eq(raw_rotation_exprs)
.flat_map(|(mle, (_, expr))| match expr {
Expression::WitIn(wit_id) => {
vec![
Either::Right(mle),
Either::Left(wit[*wit_id as usize].as_ref()),
]
}
_ => panic!(""),
})
.chain(std::iter::once(Either::Right(&mut selector)))
.collect_vec(),
);
let span = entered_span!("rotation IOPProverState::prove", profiling_4 = true);
let (rotation_proof, prover_state) = IOPProverState::prove(
builder.to_virtual_polys_with_monomial_terms(
&rotation_sumcheck_expression,
&[],
&rotation_challenges,
),
transcript,
);
exit_span!(span);
let mut evals = prover_state.get_mle_flatten_final_evaluations();
let origin_point = prover_state.collect_raw_challenges();
// skip selector/eq as verifier can derive itself
evals.truncate(raw_rotation_exprs.len() * 2);
let span = entered_span!("rotation derived left/right eval", profiling_4 = true);
// post process: giving opening of rotated polys (point, evals), derive original opening before rotate
// final format: [
// left_eval_0th,
// right_eval_0th,
// target_eval_0th,
// left_eval_1st,
// right_eval_1st,
// target_eval_1st,
// ...
// ]
let bh = BooleanHypercube::new(rotation_cyclic_group_log2);
let (left_point, right_point) = bh.get_rotation_points(&origin_point);
let evals = evals
.par_chunks_exact(2)
.zip_eq(raw_rotation_exprs.par_iter())
.flat_map(|(evals, (rotated_expr, _))| {
let [rotated_eval, target_eval] = evals else {
unreachable!()
};
let left_eval = match rotated_expr {
Expression::WitIn(source_wit_id) => {
wit[*source_wit_id as usize].evaluate(&left_point)
}
_ => unreachable!(),
};
let right_eval =
bh.get_rotation_right_eval_from_left(*rotated_eval, left_eval, &origin_point);
#[cfg(debug_assertions)]
{
use multilinear_extensions::Expression;
let expected_right_eval = match rotated_expr {
Expression::WitIn(source_wit_id) => {
wit[*source_wit_id as usize].evaluate(&right_point)
}
_ => unreachable!(),
};
assert_eq!(
expected_right_eval, right_eval,
"rotation right eval mismatch: expected {expected_right_eval}, got {right_eval}"
);
}
[left_eval, right_eval, *target_eval]
})
.collect::<Vec<E>>();
exit_span!(span);
(
SumcheckLayerProof {
proof: rotation_proof,
evals,
},
RotationPoints {
left: left_point,
right: right_point,
origin: origin_point,
},
)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gpu/mod.rs | gkr_iop/src/gpu/mod.rs | use crate::{
gkr::layer::gpu::utils::extract_mle_relationships_from_monomial_terms,
hal::{MultilinearPolynomial, ProtocolWitnessGeneratorProver, ProverBackend, ProverDevice},
};
use ff_ext::ExtensionField;
use mpcs::{PolynomialCommitmentScheme, SecurityLevel};
use multilinear_extensions::{
macros::{entered_span, exit_span},
mle::{FieldType, MultilinearExtension, Point},
};
use p3::field::TwoAdicField;
use std::{rc::Rc, sync::Arc};
use witness::RowMajorMatrix;
use crate::cpu::default_backend_config;
use either::Either;
use itertools::{Itertools, izip};
use std::marker::PhantomData;
pub mod gpu_prover {
pub use ceno_gpu::{
BasefoldCommitmentWithWitness as BasefoldCommitmentWithWitnessGpu, Buffer, CudaHal,
bb31::{
CudaHalBB31, GpuDigestLayer, GpuFieldType, GpuMatrix, GpuPolynomial, GpuPolynomialExt,
},
common::{
basefold::utils::convert_ceno_to_gpu_basefold_commitment,
buffer::BufferImpl,
get_ceno_gpu_device_id,
mle::{
build_mle_as_ceno, ordered_sparse32_selector_gpu, rotation_next_base_mle_gpu,
rotation_selector_gpu,
},
utils::HasUtils,
},
};
use once_cell::sync::Lazy;
use std::sync::{Arc, Mutex, MutexGuard};
pub type BB31Base = p3::babybear::BabyBear;
pub type BB31Ext = ff_ext::BabyBearExt4;
#[allow(clippy::type_complexity)]
pub static CUDA_HAL: Lazy<
Result<Arc<Mutex<CudaHalBB31>>, Box<dyn std::error::Error + Send + Sync>>,
> = Lazy::new(|| {
// can be overridden by env variable `CENO_GPU_DEVICE_ID`
let device_id: usize = get_ceno_gpu_device_id(0);
CudaHalBB31::new(device_id)
.map(|hal| Arc::new(Mutex::new(hal)))
.map_err(|e| Box::new(e) as Box<dyn std::error::Error + Send + Sync>)
});
pub fn get_cuda_hal() -> Result<MutexGuard<'static, CudaHalBB31>, String> {
let hal_arc = CUDA_HAL
.as_ref()
.map_err(|e| format!("HAL not available: {:?}", e))?;
let hal = hal_arc
.lock()
.map_err(|e| format!("Failed to lock HAL: {:?}", e))?;
hal.inner()
.synchronize()
.map_err(|e| format!("Failed to sync: {:?}", e))?;
Ok(hal)
}
}
use crate::{evaluation::EvalExpression, gkr::layer::Layer};
pub use gpu_prover::*;
/// Stores a multilinear polynomial in dense evaluation form.
pub struct MultilinearExtensionGpu<'a, E: ExtensionField> {
/// GPU polynomial data, supporting both base field and extension field
pub mle: GpuFieldType<'a>,
_phantom: PhantomData<E>,
}
impl<'a, E: ExtensionField> Default for MultilinearExtensionGpu<'a, E> {
fn default() -> Self {
Self {
mle: GpuFieldType::default(),
_phantom: PhantomData,
}
}
}
impl<'a, E: ExtensionField> Clone for MultilinearExtensionGpu<'a, E> {
fn clone(&self) -> Self {
match &self.mle {
GpuFieldType::Base(poly) => Self {
mle: GpuFieldType::Base(poly.clone()),
_phantom: PhantomData,
},
GpuFieldType::Ext(_poly) => {
// Since GpuPolynomialExt may not support Clone, we panic for now
panic!("Clone not supported for GpuPolynomialExt variant")
}
GpuFieldType::Unreachable => Self::default(),
}
}
}
impl<'a, E: ExtensionField> std::fmt::Debug for MultilinearExtensionGpu<'a, E> {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("MultilinearExtensionGpu")
.field("num_vars", &self.mle.num_vars())
.field("evaluations_len", &self.mle.evaluations_len())
.finish()
}
}
impl<'a, E: ExtensionField> MultilinearPolynomial<E> for MultilinearExtensionGpu<'a, E> {
fn num_vars(&self) -> usize {
self.mle.num_vars()
}
fn eval(&self, point: Point<E>) -> E {
self.evaluate(&point)
}
/// Get the length of evaluation data
fn evaluations_len(&self) -> usize {
self.mle.evaluations_len()
}
fn bh_signature(&self) -> E {
if std::any::TypeId::of::<E::BaseField>()
!= std::any::TypeId::of::<p3::goldilocks::Goldilocks>()
{
panic!("GPU backend only supports Goldilocks");
}
match &self.mle {
GpuFieldType::Base(poly) => {
let res: Vec<E> = unsafe { std::mem::transmute(vec![poly.bh_signature()]) };
res[0]
}
GpuFieldType::Ext(poly) => {
let res: Vec<E> = unsafe { std::mem::transmute(vec![poly.bh_signature()]) };
res[0]
}
GpuFieldType::Unreachable => unreachable!(),
}
}
}
impl<'a, E: ExtensionField> MultilinearExtensionGpu<'a, E> {
/// Get reference to internal GPU polynomial
pub fn inner(&self) -> &GpuFieldType<'_> {
&self.mle
}
pub fn as_view_chunks(&self, num_fanin: usize) -> Vec<GpuPolynomialExt<'a>> {
match &self.mle {
GpuFieldType::Base(_) => panic!("not supported yet"),
GpuFieldType::Ext(poly) => poly.as_view_chunk(num_fanin),
GpuFieldType::Unreachable => panic!("Unreachable GpuFieldType"),
}
}
/// Convert to CPU version of MultilinearExtension
pub fn inner_to_mle(&self) -> MultilinearExtension<'a, E> {
match &self.mle {
GpuFieldType::Base(poly) => {
let cpu_evaluations = poly.to_cpu_vec();
let cpu_evaluations_base: Vec<E::BaseField> =
unsafe { std::mem::transmute(cpu_evaluations) };
MultilinearExtension::from_evaluations_vec(
self.mle.num_vars(),
cpu_evaluations_base,
)
}
GpuFieldType::Ext(poly) => {
let cpu_evaluations = poly.to_cpu_vec();
let cpu_evaluations_ext: Vec<E> = unsafe { std::mem::transmute(cpu_evaluations) };
MultilinearExtension::from_evaluations_ext_vec(
self.mle.num_vars(),
cpu_evaluations_ext,
)
}
GpuFieldType::Unreachable => panic!("Unreachable GpuFieldType"),
}
}
/// Evaluate polynomial at given point
pub fn evaluate(&self, point: &[E]) -> E {
self.inner_to_mle().evaluate(point)
}
/// Create GPU version from CPU version of MultilinearExtension
pub fn from_ceno(cuda_hal: &CudaHalBB31, mle: &MultilinearExtension<'a, E>) -> Self {
// check type of mle
match mle.evaluations {
FieldType::Base(_) => {
let mle_vec_ref = mle.get_base_field_vec();
let mle_vec_ref_gl64: &[BB31Base] = unsafe { std::mem::transmute(mle_vec_ref) };
let mle_gpu =
GpuPolynomial::from_ceno_vec(cuda_hal, mle_vec_ref_gl64, mle.num_vars())
.unwrap();
Self {
mle: GpuFieldType::Base(mle_gpu),
_phantom: PhantomData,
}
}
FieldType::Ext(_) => {
let mle_vec_ref = mle.get_ext_field_vec();
let mle_vec_ref_gl64_ext: &[BB31Ext] = unsafe { std::mem::transmute(mle_vec_ref) };
let mle_gpu =
GpuPolynomialExt::from_ceno_vec(cuda_hal, mle_vec_ref_gl64_ext, mle.num_vars())
.unwrap();
Self {
mle: GpuFieldType::Ext(mle_gpu),
_phantom: PhantomData,
}
}
FieldType::Unreachable => panic!("Unreachable FieldType"),
}
}
/// Create from base field GpuPolynomial
pub fn from_ceno_gpu_base(mle_gpu: GpuPolynomial<'a>) -> Self {
Self {
mle: GpuFieldType::Base(mle_gpu),
_phantom: PhantomData,
}
}
/// Create from extension field GpuPolynomialExt
pub fn from_ceno_gpu_ext(mle_gpu: GpuPolynomialExt<'a>) -> Self {
Self {
mle: GpuFieldType::Ext(mle_gpu),
_phantom: PhantomData,
}
}
/// Method for backward compatibility
pub fn from_ceno_gpu(mle_gpu: GpuPolynomial<'a>) -> Self {
Self::from_ceno_gpu_base(mle_gpu)
}
/// get inner poly reference with base field claim
pub fn as_ceno_gpu_base(&self) -> &GpuPolynomial<'_> {
match &self.mle {
GpuFieldType::Base(poly) => poly,
GpuFieldType::Ext(_) => panic!("poly in ext field"),
GpuFieldType::Unreachable => panic!("Unreachable GpuFieldType"),
}
}
/// get inner poly reference with ext field claim
pub fn as_ceno_gpu_ext(&self) -> &GpuPolynomialExt<'_> {
match &self.mle {
GpuFieldType::Base(_) => panic!("poly in base field"),
GpuFieldType::Ext(poly) => poly,
GpuFieldType::Unreachable => panic!("Unreachable GpuFieldType"),
}
}
}
pub struct GpuBackend<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> {
pub pp: <PCS as PolynomialCommitmentScheme<E>>::ProverParam,
pub vp: <PCS as PolynomialCommitmentScheme<E>>::VerifierParam,
pub max_poly_size_log2: usize,
pub security_level: SecurityLevel,
_marker: std::marker::PhantomData<E>,
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> Default for GpuBackend<E, PCS> {
fn default() -> Self {
let (max_poly_size_log2, security_level) = default_backend_config();
Self::new(max_poly_size_log2, security_level)
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> GpuBackend<E, PCS> {
pub fn new(max_poly_size_log2: usize, security_level: SecurityLevel) -> Self {
let param = PCS::setup(1 << E::BaseField::TWO_ADICITY, security_level).unwrap();
let (pp, vp) = PCS::trim(param, 1 << max_poly_size_log2).unwrap();
Self {
pp,
vp,
max_poly_size_log2,
security_level,
_marker: std::marker::PhantomData,
}
}
}
pub type ArcMultilinearExtensionGpu<'a, E> = Arc<MultilinearExtensionGpu<'a, E>>;
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> ProverBackend for GpuBackend<E, PCS> {
type E = E;
type Pcs = PCS;
type MultilinearPoly<'a> = MultilinearExtensionGpu<'a, E>;
type Matrix = RowMajorMatrix<E::BaseField>;
#[cfg(feature = "gpu")]
type PcsData = BasefoldCommitmentWithWitnessGpu<
E::BaseField,
BufferImpl<'static, E::BaseField>,
GpuDigestLayer,
GpuMatrix<'static>,
GpuPolynomial<'static>,
>;
#[cfg(not(feature = "gpu"))]
type PcsData = <PCS as PolynomialCommitmentScheme<E>>::CommitmentWithWitness;
fn get_pp(&self) -> &<Self::Pcs as PolynomialCommitmentScheme<Self::E>>::ProverParam {
&self.pp
}
fn get_vp(&self) -> &<Self::Pcs as PolynomialCommitmentScheme<Self::E>>::VerifierParam {
&self.vp
}
}
pub struct GpuProver<PB: ProverBackend + 'static> {
pub backend: Rc<PB>,
}
impl<PB: ProverBackend> GpuProver<PB> {
pub fn new(backend: Rc<PB>) -> Self {
Self { backend }
}
}
impl<E, PCS> ProverDevice<GpuBackend<E, PCS>> for GpuProver<GpuBackend<E, PCS>>
where
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E>,
{
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>
ProtocolWitnessGeneratorProver<GpuBackend<E, PCS>> for GpuProver<GpuBackend<E, PCS>>
{
#[tracing::instrument(skip_all, name = "layer_witness", fields(profiling_2), level = "trace")]
fn layer_witness<'a>(
layer: &Layer<E>,
layer_wits: &[Arc<<GpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>],
pub_io_evals: &[Either<E::BaseField, E>],
challenges: &[E],
) -> Vec<Arc<<GpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>> {
let span = entered_span!("preprocess", profiling_2 = true);
if std::any::TypeId::of::<E::BaseField>() != std::any::TypeId::of::<BB31Base>() {
panic!("GPU backend only supports Goldilocks base field");
}
let out_evals: Vec<_> = layer
.out_sel_and_eval_exprs
.iter()
.flat_map(|(sel_type, out_eval)| izip!(std::iter::repeat(sel_type), out_eval.iter()))
.collect();
// pre-process and flatten indices into friendly GPU format
let (num_non_zero_expr, term_coefficients, mle_indices_per_term, mle_size_info) = layer
.exprs_with_selector_out_eval_monomial_form
.iter()
.zip_eq(out_evals.iter())
.filter(|(_, (_, out_eval))| {
match out_eval {
// only take linear/single to process
EvalExpression::Linear(_, _, _) | EvalExpression::Single(_) => true,
EvalExpression::Partition(..) => unimplemented!("Partition"),
EvalExpression::Zero => false,
}
})
.map(|(expr, _)| {
let (coeffs, indices, size_info) = extract_mle_relationships_from_monomial_terms(
expr,
&layer_wits.iter().map(|mle| mle.as_ref()).collect_vec(),
&pub_io_evals,
challenges,
);
let coeffs_gl64: Vec<BB31Ext> = unsafe { std::mem::transmute(coeffs) };
(coeffs_gl64, indices, size_info)
})
.fold(
(0, Vec::new(), Vec::new(), Vec::new()),
|(mut num_non_zero_expr, mut coeff_acc, mut indices_acc, mut size_acc),
(coeffs, indices, size_info)| {
num_non_zero_expr += 1;
coeff_acc.push(coeffs);
indices_acc.push(indices);
size_acc.push(size_info);
(num_non_zero_expr, coeff_acc, indices_acc, size_acc)
},
);
let num_vars = mle_size_info
.first()
.and_then(|f| f.first())
.as_ref()
.unwrap()
.0;
exit_span!(span);
let span = entered_span!("witness_infer", profiling_2 = true);
// process & transmute poly
let all_witins_gpu = layer_wits.iter().map(|mle| mle.as_ref()).collect_vec();
let all_witins_gpu_gl64: Vec<&MultilinearExtensionGpu<BB31Ext>> =
unsafe { std::mem::transmute(all_witins_gpu) };
let all_witins_gpu_type_gl64 = all_witins_gpu_gl64.iter().map(|mle| &mle.mle).collect_vec();
// buffer for output witness from gpu
let cuda_hal = get_cuda_hal().unwrap();
let mut next_witness_buf = (0..num_non_zero_expr)
.map(|_| {
cuda_hal
.alloc_ext_elems_on_device(1 << num_vars, false)
.map_err(|e| format!("Failed to allocate prod GPU buffer: {:?}", e))
})
.collect::<Result<Vec<_>, _>>()
.unwrap();
cuda_hal
.witness_infer
.wit_infer_by_monomial_expr(
&*cuda_hal,
all_witins_gpu_type_gl64,
&term_coefficients,
&mle_indices_per_term,
&mut next_witness_buf,
)
.unwrap();
exit_span!(span);
// recover it back and interleaving with default gpu
let mut next_iter = next_witness_buf.into_iter();
out_evals
.into_iter()
.map(|(_, out_eval)| {
if matches!(
out_eval,
EvalExpression::Linear(..) | EvalExpression::Single(_)
) {
// take next element from next_witness_buf
MultilinearExtensionGpu::from_ceno_gpu_ext(GpuPolynomialExt::new(
next_iter
.next()
.expect("not enough elements in next_witness_buf"),
num_vars,
))
} else {
MultilinearExtensionGpu::default()
}
})
.map(Arc::new)
.collect_vec()
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/cpu/mod.rs | gkr_iop/src/cpu/mod.rs | use crate::{
evaluation::EvalExpression,
gkr::layer::Layer,
hal::{MultilinearPolynomial, ProtocolWitnessGeneratorProver, ProverBackend, ProverDevice},
};
use either::Either;
use ff_ext::ExtensionField;
use itertools::izip;
use mpcs::{PolynomialCommitmentScheme, SecurityLevel, SecurityLevel::Conjecture100bits};
use multilinear_extensions::{
macros::{entered_span, exit_span},
mle::{MultilinearExtension, Point},
wit_infer_by_monomial_expr,
};
use p3::field::TwoAdicField;
use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator};
use std::{iter, rc::Rc, sync::Arc};
use witness::RowMajorMatrix;
pub struct CpuBackend<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> {
pub pp: <PCS as PolynomialCommitmentScheme<E>>::ProverParam,
pub vp: <PCS as PolynomialCommitmentScheme<E>>::VerifierParam,
pub max_poly_size_log2: usize,
pub security_level: SecurityLevel,
_marker: std::marker::PhantomData<E>,
}
pub const DEFAULT_MAX_NUM_VARIABLES: usize = 24;
pub fn default_backend_config() -> (usize, SecurityLevel) {
(DEFAULT_MAX_NUM_VARIABLES, Conjecture100bits)
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> Default for CpuBackend<E, PCS> {
fn default() -> Self {
let (max_poly_size_log2, security_level) = default_backend_config();
Self::new(max_poly_size_log2, security_level)
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> CpuBackend<E, PCS> {
pub fn new(max_poly_size_log2: usize, security_level: SecurityLevel) -> Self {
let param = PCS::setup(1 << E::BaseField::TWO_ADICITY, security_level).unwrap();
let (pp, vp) = PCS::trim(param, 1 << max_poly_size_log2).unwrap();
Self {
pp,
vp,
max_poly_size_log2,
security_level,
_marker: std::marker::PhantomData,
}
}
}
impl<'a, E: ExtensionField> MultilinearPolynomial<E> for MultilinearExtension<'a, E> {
fn num_vars(&self) -> usize {
self.num_vars()
}
fn eval(&self, point: Point<E>) -> E {
self.evaluate(&point)
}
fn evaluations_len(&self) -> usize {
self.evaluations.len()
}
fn bh_signature(&self) -> E {
self.bh_signature()
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> ProverBackend for CpuBackend<E, PCS> {
type E = E;
type Pcs = PCS;
type MultilinearPoly<'a> = MultilinearExtension<'a, E>;
type Matrix = RowMajorMatrix<E::BaseField>;
type PcsData = PCS::CommitmentWithWitness;
fn get_pp(&self) -> &<Self::Pcs as PolynomialCommitmentScheme<Self::E>>::ProverParam {
&self.pp
}
fn get_vp(&self) -> &<Self::Pcs as PolynomialCommitmentScheme<Self::E>>::VerifierParam {
&self.vp
}
}
/// CPU prover for CPU backend
pub struct CpuProver<PB: ProverBackend + 'static> {
pub backend: Rc<PB>,
}
impl<PB: ProverBackend> CpuProver<PB> {
pub fn new(backend: Rc<PB>) -> Self {
Self { backend }
}
}
impl<E, PCS> ProverDevice<CpuBackend<E, PCS>> for CpuProver<CpuBackend<E, PCS>>
where
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E>,
{
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>
ProtocolWitnessGeneratorProver<CpuBackend<E, PCS>> for CpuProver<CpuBackend<E, PCS>>
{
fn layer_witness<'a>(
layer: &Layer<E>,
layer_wits: &[Arc<<CpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>],
pub_io_evals: &[Either<E::BaseField, E>],
challenges: &[E],
) -> Vec<Arc<<CpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>> {
let span = entered_span!("witness_infer", profiling_2 = true);
let out_evals: Vec<_> = layer
.out_sel_and_eval_exprs
.iter()
.flat_map(|(sel_type, out_eval)| izip!(iter::repeat(sel_type), out_eval.iter()))
.collect();
let res = layer
.exprs_with_selector_out_eval_monomial_form
.par_iter()
.zip_eq(layer.expr_names.par_iter())
.zip_eq(out_evals.par_iter())
.map(|((expr, expr_name), (_, out_eval))| {
if cfg!(debug_assertions)
&& let EvalExpression::Zero = out_eval
{
assert!(
wit_infer_by_monomial_expr(expr, layer_wits, pub_io_evals, challenges)
.evaluations()
.is_zero(),
"layer name: {}, expr name: \"{expr_name}\" got non_zero mle",
layer.name
);
};
match out_eval {
EvalExpression::Linear(_, _, _) | EvalExpression::Single(_) => {
wit_infer_by_monomial_expr(expr, layer_wits, pub_io_evals, challenges)
}
EvalExpression::Zero => MultilinearExtension::default().into(),
EvalExpression::Partition(_, _) => unimplemented!(),
}
})
.collect::<Vec<_>>();
exit_span!(span);
res
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/utils/lk_multiplicity.rs | gkr_iop/src/utils/lk_multiplicity.rs | use ff_ext::SmallField;
use itertools::izip;
use std::{
cell::RefCell,
collections::HashMap,
fmt::Debug,
hash::Hash,
mem::{self},
ops::{AddAssign, Deref, DerefMut},
sync::Arc,
};
use thread_local::ThreadLocal;
use crate::tables::{
LookupTable, OpsTable,
ops::{AndTable, LtuTable, OrTable, PowTable, XorTable},
};
pub type MultiplicityRaw<K> = [HashMap<K, usize>; mem::variant_count::<LookupTable>()];
#[derive(Clone, Default, Debug)]
pub struct Multiplicity<K>(pub MultiplicityRaw<K>);
impl<K> Deref for Multiplicity<K> {
type Target = MultiplicityRaw<K>;
fn deref(&self) -> &Self::Target {
&self.0
}
}
impl<K> DerefMut for Multiplicity<K> {
fn deref_mut(&mut self) -> &mut Self::Target {
&mut self.0
}
}
/// for consuming the wrapper
impl<K> IntoIterator for Multiplicity<K>
where
MultiplicityRaw<K>: IntoIterator,
{
type Item = <MultiplicityRaw<K> as IntoIterator>::Item;
type IntoIter = <MultiplicityRaw<K> as IntoIterator>::IntoIter;
fn into_iter(self) -> Self::IntoIter {
self.0.into_iter()
}
}
/// for immutable references
impl<'a, K> IntoIterator for &'a Multiplicity<K>
where
&'a MultiplicityRaw<K>: IntoIterator,
{
type Item = <&'a MultiplicityRaw<K> as IntoIterator>::Item;
type IntoIter = <&'a MultiplicityRaw<K> as IntoIterator>::IntoIter;
#[allow(clippy::into_iter_on_ref)]
fn into_iter(self) -> Self::IntoIter {
(&self.0).into_iter()
}
}
/// for mutable references
impl<'a, K> IntoIterator for &'a mut Multiplicity<K>
where
&'a mut MultiplicityRaw<K>: IntoIterator,
{
type Item = <&'a mut MultiplicityRaw<K> as IntoIterator>::Item;
type IntoIter = <&'a mut MultiplicityRaw<K> as IntoIterator>::IntoIter;
#[allow(clippy::into_iter_on_ref)]
fn into_iter(self) -> Self::IntoIter {
(&mut self.0).into_iter()
}
}
/// A lock-free thread safe struct to count logup multiplicity for each ROM type
/// Lock-free by thread-local such that each thread will only have its local copy
/// struct is cloneable, for internallly it use Arc so the clone will be low cost
#[derive(Clone, Default, Debug)]
#[allow(clippy::type_complexity)]
pub struct LkMultiplicityRaw<K: Copy + Clone + Debug + Eq + Hash + Send> {
multiplicity: Arc<ThreadLocal<RefCell<Multiplicity<K>>>>,
}
impl<K> AddAssign<Self> for LkMultiplicityRaw<K>
where
K: Copy + Clone + Debug + Default + Eq + Hash + Send,
{
fn add_assign(&mut self, rhs: Self) {
*self += Multiplicity(rhs.into_finalize_result().0);
}
}
impl<K> AddAssign<Self> for Multiplicity<K>
where
K: Eq + Hash,
{
fn add_assign(&mut self, rhs: Self) {
for (lhs, rhs) in izip!(&mut self.0, rhs.0) {
for (key, value) in rhs {
*lhs.entry(key).or_default() += value;
}
}
}
}
impl<K> AddAssign<Multiplicity<K>> for LkMultiplicityRaw<K>
where
K: Copy + Clone + Debug + Default + Eq + Hash + Send,
{
fn add_assign(&mut self, rhs: Multiplicity<K>) {
let multiplicity = self.multiplicity.get_or_default();
for (lhs, rhs) in izip!(&mut multiplicity.borrow_mut().0, rhs.0) {
for (key, value) in rhs {
*lhs.entry(key).or_default() += value;
}
}
}
}
impl<K> AddAssign<((LookupTable, K), usize)> for LkMultiplicityRaw<K>
where
K: Copy + Clone + Debug + Default + Eq + Hash + Send,
{
fn add_assign(&mut self, ((rom_type, key), value): ((LookupTable, K), usize)) {
let multiplicity = self.multiplicity.get_or_default();
(*multiplicity.borrow_mut().0[rom_type as usize]
.entry(key)
.or_default()) += value;
}
}
impl<K> AddAssign<(LookupTable, K)> for LkMultiplicityRaw<K>
where
K: Copy + Clone + Debug + Default + Eq + Hash + Send,
{
fn add_assign(&mut self, (rom_type, key): (LookupTable, K)) {
let multiplicity = self.multiplicity.get_or_default();
(*multiplicity.borrow_mut().0[rom_type as usize]
.entry(key)
.or_default()) += 1;
}
}
impl<K: Copy + Clone + Debug + Default + Eq + Hash + Send> LkMultiplicityRaw<K> {
/// Merge result from multiple thread local to single result.
pub fn into_finalize_result(self) -> Multiplicity<K> {
let mut results = Multiplicity::default();
for y in Arc::try_unwrap(self.multiplicity).unwrap() {
results += y.into_inner();
}
results
}
pub fn increment(&mut self, rom_type: LookupTable, key: K) {
*self += (rom_type, key);
}
pub fn set_count(&mut self, rom_type: LookupTable, key: K, count: usize) {
if count == 0 {
return;
}
let multiplicity = self.multiplicity.get_or_default();
let table = &mut multiplicity.borrow_mut().0[rom_type as usize];
if count == 0 {
table.remove(&key);
} else {
table.insert(key, count);
}
}
}
/// Default LkMultiplicity with u64 key.
pub type LkMultiplicity = LkMultiplicityRaw<u64>;
impl LkMultiplicity {
#[inline(always)]
pub fn assert_dynamic_range(&mut self, v: u64, bits: u64) {
self.increment(LookupTable::Dynamic, (1 << bits) + v);
}
#[inline(always)]
pub fn assert_const_range(&mut self, v: u64, max_bits: usize) {
// skip max_bits = 1 range check as it was constrained as (v)*(1-v) without lookup
if max_bits > 1 {
self.assert_dynamic_range(v, max_bits as u64);
}
}
/// TODO remove `assert_ux` and use `assert_const_range` instead
/// assert within range
#[inline(always)]
pub fn assert_ux<const C: usize>(&mut self, v: u64) {
self.increment(LookupTable::Dynamic, (1 << C) + v);
}
#[inline(always)]
pub fn assert_double_u8(&mut self, a: u64, b: u64) {
self.increment(LookupTable::DoubleU8, (a << 8) + b);
}
/// assert slices of field elements within range
#[inline]
pub fn assert_byte_fields<F: SmallField>(&mut self, vs: &[F]) {
// process in pairs
for pair in vs.chunks(2) {
match pair {
[a, b] => self.assert_double_u8(a.to_canonical_u64(), b.to_canonical_u64()),
[a] => self.assert_double_u8(a.to_canonical_u64(), 0),
_ => {}
}
}
}
/// Track a lookup into a logic table (AndTable, etc).
pub fn logic_u8<OP: OpsTable>(&mut self, a: u64, b: u64) {
self.increment(OP::ROM_TYPE, OP::pack(a, b));
}
/// lookup a AND b
pub fn lookup_and_byte(&mut self, a: u64, b: u64) {
self.logic_u8::<AndTable>(a, b)
}
/// lookup a OR b
pub fn lookup_or_byte(&mut self, a: u64, b: u64) {
self.logic_u8::<OrTable>(a, b)
}
/// lookup a XOR b
pub fn lookup_xor_byte(&mut self, a: u64, b: u64) {
self.logic_u8::<XorTable>(a, b)
}
/// lookup a < b as unsigned byte
pub fn lookup_ltu_byte(&mut self, a: u64, b: u64) {
self.logic_u8::<LtuTable>(a, b)
}
pub fn lookup_pow2(&mut self, v: u64) {
self.logic_u8::<PowTable>(2, v)
}
/// Fetch instruction at pc
pub fn fetch(&mut self, pc: u32) {
self.increment(LookupTable::Instruction, pc as u64);
}
}
#[cfg(test)]
mod tests {
use std::thread;
use crate::{tables::LookupTable, utils::lk_multiplicity::LkMultiplicity};
#[test]
fn test_lk_multiplicity_threads() {
// TODO figure out a way to verify thread_local hit/miss in unittest env
let lkm = LkMultiplicity::default();
let thread_count = 20;
// each thread calling assert_byte once
for _ in 0..thread_count {
let mut lkm = lkm.clone();
thread::spawn(move || lkm.assert_ux::<8>(8u64))
.join()
.unwrap();
}
let res = lkm.into_finalize_result();
// check multiplicity counts of assert_byte
assert_eq!(
res[LookupTable::Dynamic as usize][&((1 << 8) + 8)],
thread_count
);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gadgets/is_lt.rs | gkr_iop/src/gadgets/is_lt.rs | use crate::utils::i64_to_base;
use ff_ext::{ExtensionField, FieldInto, SmallField};
use itertools::izip;
use multilinear_extensions::{Expression, ToExpr, WitIn, power_sequence};
use p3::field::Field;
use std::fmt::Display;
use witness::set_val;
use crate::{
circuit_builder::CircuitBuilder, error::CircuitBuilderError,
utils::lk_multiplicity::LkMultiplicity,
};
#[derive(Debug, Clone)]
pub struct AssertLtConfig(InnerLtConfig);
impl AssertLtConfig {
pub fn construct_circuit<
E: ExtensionField,
NR: Into<String> + Display + Clone,
N: FnOnce() -> NR,
>(
cb: &mut CircuitBuilder<E>,
name_fn: N,
lhs: Expression<E>,
rhs: Expression<E>,
max_bits: usize,
) -> Result<Self, CircuitBuilderError> {
cb.namespace(
|| "assert_lt",
|cb| {
let name = name_fn();
let config = InnerLtConfig::construct_circuit(
cb,
name,
lhs,
rhs,
Expression::ONE,
max_bits,
)?;
Ok(Self(config))
},
)
}
pub fn assign_instance<F: SmallField>(
&self,
instance: &mut [F],
lkm: &mut LkMultiplicity,
lhs: u64,
rhs: u64,
) -> Result<(), CircuitBuilderError> {
self.0.assign_instance_u64(instance, lkm, lhs, rhs)?;
Ok(())
}
pub fn assign_instance_signed<F: SmallField>(
&self,
instance: &mut [F],
lkm: &mut LkMultiplicity,
lhs: i64,
rhs: i64,
) -> Result<(), CircuitBuilderError> {
self.0.assign_instance_i64(instance, lkm, lhs, rhs)?;
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct IsLtConfig {
pub is_lt: WitIn,
config: InnerLtConfig,
}
impl IsLtConfig {
pub fn expr<E: ExtensionField>(&self) -> Expression<E> {
self.is_lt.expr()
}
pub fn construct_circuit<
E: ExtensionField,
NR: Into<String> + Display + Clone,
N: FnOnce() -> NR,
>(
cb: &mut CircuitBuilder<E>,
name_fn: N,
lhs: Expression<E>,
rhs: Expression<E>,
max_bits: usize,
) -> Result<Self, CircuitBuilderError> {
cb.namespace(
|| "is_lt",
|cb| {
let name = name_fn();
let is_lt = cb.create_witin(|| format!("{name} is_lt witin"));
cb.assert_bit(|| "is_lt_bit", is_lt.expr())?;
let config =
InnerLtConfig::construct_circuit(cb, name, lhs, rhs, is_lt.expr(), max_bits)?;
Ok(Self { is_lt, config })
},
)
}
pub fn assign_instance<F: SmallField>(
&self,
instance: &mut [F],
lkm: &mut LkMultiplicity,
lhs: u64,
rhs: u64,
) -> Result<(), CircuitBuilderError> {
set_val!(instance, self.is_lt, (lhs < rhs) as u64);
self.config.assign_instance_u64(instance, lkm, lhs, rhs)?;
Ok(())
}
pub fn assign_instance_signed<F: SmallField>(
&self,
instance: &mut [F],
lkm: &mut LkMultiplicity,
lhs: i64,
rhs: i64,
) -> Result<(), CircuitBuilderError> {
set_val!(instance, self.is_lt, (lhs < rhs) as u64);
self.config.assign_instance_i64(instance, lkm, lhs, rhs)?;
Ok(())
}
pub fn assign_instance_field<F: SmallField>(
&self,
instance: &mut [F],
lkm: &mut LkMultiplicity,
lhs: F,
rhs: F,
is_lt: bool,
) -> Result<(), CircuitBuilderError> {
set_val!(instance, self.is_lt, is_lt as u64);
self.config
.assign_instance_field(instance, lkm, lhs, rhs, is_lt)?;
Ok(())
}
}
#[derive(Debug, Clone)]
pub struct InnerLtConfig {
pub diff: Vec<WitIn>,
pub max_bits: usize,
}
impl InnerLtConfig {
fn range(max_bits: usize) -> u64 {
1u64 << max_bits
}
/// Construct an `InnerLtConfig` circuit which constrains two input
/// expressions `lhs` and `rhs` to satisfy the relation
///
/// - `rhs - lhs \in {1, ..., 2^(max_bits)}` when `is_lt_expr = 1`; and
/// - `lhs - rhs \in {0, ..., 2^(max_bits) - 1}` when `is_lt_expr = 0`
///
/// In the above, values are to be interpreted as finite field elements.
///
/// This is accomplished by witnessing as a `max_bits`-bit value
/// either `lhs - rhs` when `lhs` is required to
/// be at least as large as `rhs`, or `lhs - rhs + 2^ (max_bits)`
/// when `lhs` is required to be smaller than `lhs`.
///
/// Note that the specific values of `lhs` and `rhs` are not relevant to the
/// above conditions -- this means that the value of `max_bits`
/// only needs to depend on the size of the *difference* between values,
/// not on their absolute magnitude. That is, one limb is sufficient to
/// express that 2^48 - 12 is less than 2^48 + 71, since their difference
/// of 83 is within the magnitudes representable by a single 16-bit limb.
///
/// Since there is ambiguity in ordering of values when they are interpreted
/// as elements in a finite field, several functions are available for
/// witness assignment which take unsigned or signed inputs (which have a
/// standard ordering interpretation which is used for the witness
/// assignment), or field elements with an additional explicit boolean
/// input indicating directly whether `is_lt_expr` is 0 or 1.
pub fn construct_circuit<E: ExtensionField, NR: Into<String> + Display + Clone>(
cb: &mut CircuitBuilder<E>,
name: NR,
lhs: Expression<E>,
rhs: Expression<E>,
is_lt_expr: Expression<E>,
max_bits: usize,
) -> Result<Self, CircuitBuilderError> {
assert!(E::BaseField::bits() - 1 > max_bits);
let mut diff = (0..max_bits / u16::BITS as usize)
.map(|i| cb.create_u16(|| format!("diff_{i}")))
.collect::<Result<Vec<WitIn>, _>>()?;
let remain_bits = max_bits % u16::BITS as usize;
if remain_bits > 0 {
let msl = cb.create_witin(|| "msl");
cb.assert_const_range(|| name.clone(), msl.expr(), remain_bits)?;
diff.push(msl);
}
let pows = power_sequence((1 << u16::BITS).into());
// this operation is safe even limb
let diff_expr = izip!(&diff, pows)
.map(|(record, beta)| beta * record.expr())
.sum::<Expression<E>>();
let range = Self::range(max_bits);
cb.require_equal(|| name.clone(), lhs - rhs, diff_expr - is_lt_expr * range)?;
Ok(Self { diff, max_bits })
}
/// Assign instance values to this configuration where the ordering is
/// determined by u64 value ordering.
pub fn assign_instance_u64<F: SmallField>(
&self,
instance: &mut [F],
lkm: &mut LkMultiplicity,
lhs: u64,
rhs: u64,
) -> Result<(), CircuitBuilderError> {
self.assign_instance_field(
instance,
lkm,
F::from_canonical_u64(lhs),
F::from_canonical_u64(rhs),
lhs < rhs,
)
}
/// Assign instance values to this configuration where the ordering is
/// determined by i64 value ordering.
pub fn assign_instance_i64<F: SmallField>(
&self,
instance: &mut [F],
lkm: &mut LkMultiplicity,
lhs: i64,
rhs: i64,
) -> Result<(), CircuitBuilderError> {
let lhs_f = i64_to_base::<F>(lhs);
let rhs_f = i64_to_base::<F>(rhs);
self.assign_instance_field(instance, lkm, lhs_f, rhs_f, lhs < rhs)
}
/// Assign values to this instance using field inputs, where the intended
/// ordering of the field values is specified by the `is_lt` boolean input,
/// indicating whether `lhs` is meant to be less than `rhs`.
pub fn assign_instance_field<F: SmallField>(
&self,
instance: &mut [F],
lkm: &mut LkMultiplicity,
lhs: F,
rhs: F,
is_lt: bool,
) -> Result<(), CircuitBuilderError> {
let range_offset: F = if is_lt {
Self::range(self.max_bits).into_f()
} else {
F::ZERO
};
let diff = (lhs - rhs + range_offset).to_canonical_u64();
(0..self.max_bits / u16::BITS as usize).for_each(|i| {
let wit = &self.diff[i];
// extract the 16 bit limb from diff and assign to instance
let val = (diff >> (i * u16::BITS as usize)) & 0xffff;
lkm.assert_ux::<16>(val);
set_val!(instance, wit, val);
});
let remain_bits = self.max_bits % u16::BITS as usize;
if remain_bits > 0 {
let wit = self.diff.last().unwrap();
// extract remaining bits limb from diff and assign to instance
let val = (diff >> ((self.diff.len() - 1) * u16::BITS as usize)) & 0xffff;
lkm.assert_const_range(val, remain_bits);
set_val!(instance, wit, val);
}
Ok(())
}
}
pub fn cal_lt_diff(is_lt: bool, max_bits: usize, lhs: u64, rhs: u64) -> u64 {
(if is_lt {
InnerLtConfig::range(max_bits)
} else {
0
} + lhs
- rhs)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gadgets/mod.rs | gkr_iop/src/gadgets/mod.rs | mod is_lt;
mod is_zero;
pub use is_lt::{AssertLtConfig, InnerLtConfig, IsLtConfig, cal_lt_diff};
pub use is_zero::{IsEqualConfig, IsZeroConfig};
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/gadgets/is_zero.rs | gkr_iop/src/gadgets/is_zero.rs | use ff_ext::{ExtensionField, SmallField};
use multilinear_extensions::{Expression, ToExpr, WitIn};
use witness::set_val;
use crate::{circuit_builder::CircuitBuilder, error::CircuitBuilderError};
pub struct IsZeroConfig {
is_zero: Option<WitIn>,
inverse: WitIn,
}
impl IsZeroConfig {
pub fn expr<E: ExtensionField>(&self) -> Expression<E> {
self.is_zero.map(|wit| wit.expr()).unwrap_or(0.into())
}
pub fn construct_circuit<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR>(
cb: &mut CircuitBuilder<E>,
name_fn: N,
x: Expression<E>,
) -> Result<Self, CircuitBuilderError> {
Self::construct(cb, name_fn, x, false)
}
pub fn construct_non_zero<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR>(
cb: &mut CircuitBuilder<E>,
name_fn: N,
x: Expression<E>,
) -> Result<Self, CircuitBuilderError> {
Self::construct(cb, name_fn, x, true)
}
fn construct<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR>(
cb: &mut CircuitBuilder<E>,
name_fn: N,
x: Expression<E>,
assert_non_zero: bool,
) -> Result<Self, CircuitBuilderError> {
cb.namespace(name_fn, |cb| {
let (is_zero, is_zero_expr) = if assert_non_zero {
(None, 0.into())
} else {
let is_zero = cb.create_witin(|| "is_zero");
// x!=0 => is_zero=0
cb.require_zero(|| "is_zero_0", is_zero.expr() * x.clone())?;
(Some(is_zero), is_zero.expr())
};
let inverse = cb.create_witin(|| "inv");
// x==0 => is_zero=1
cb.require_one(|| "is_zero_1", is_zero_expr + x.clone() * inverse.expr())?;
Ok(IsZeroConfig { is_zero, inverse })
})
}
pub fn assign_instance<F: SmallField>(
&self,
instance: &mut [F],
x: F,
) -> Result<(), CircuitBuilderError> {
let (is_zero, inverse) = if x.is_zero() {
(F::ONE, F::ZERO)
} else {
(F::ZERO, x.try_inverse().expect("not zero"))
};
if let Some(wit) = self.is_zero {
set_val!(instance, wit, is_zero);
}
set_val!(instance, self.inverse, inverse);
Ok(())
}
}
pub struct IsEqualConfig(IsZeroConfig);
impl IsEqualConfig {
pub fn expr<E: ExtensionField>(&self) -> Expression<E> {
self.0.expr()
}
pub fn construct_circuit<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR>(
cb: &mut CircuitBuilder<E>,
name_fn: N,
a: Expression<E>,
b: Expression<E>,
) -> Result<Self, CircuitBuilderError> {
Ok(IsEqualConfig(IsZeroConfig::construct_circuit(
cb,
name_fn,
a - b,
)?))
}
pub fn construct_non_equal<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR>(
cb: &mut CircuitBuilder<E>,
name_fn: N,
a: Expression<E>,
b: Expression<E>,
) -> Result<Self, CircuitBuilderError> {
Ok(IsEqualConfig(IsZeroConfig::construct_non_zero(
cb,
name_fn,
a - b,
)?))
}
pub fn assign_instance<F: SmallField>(
&self,
instance: &mut [F],
a: F,
b: F,
) -> Result<(), CircuitBuilderError> {
self.0.assign_instance(instance, a - b)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/tables/mod.rs | gkr_iop/src/tables/mod.rs | pub mod ops;
use strum_macros::EnumIter;
#[derive(
Copy, Clone, Debug, EnumIter, PartialEq, Eq, Hash, serde::Serialize, serde::Deserialize,
)]
#[repr(usize)]
pub enum LookupTable {
Dynamic = 0, // Range type for all bits up to 18 bits
DoubleU8, // Range type for two 8-bit checks together
And, // a & b where a, b are bytes
Or, // a | b where a, b are bytes
Xor, // a ^ b where a, b are bytes
Ltu, // a <(usign) b where a, b are bytes and the result is 0/1.
Pow, // a ** b where a is 2 and b is 5-bit value
Instruction, // Decoded instruction from the fixed program.
}
/// Use this trait as parameter to OpsTableCircuit.
pub trait OpsTable {
const ROM_TYPE: LookupTable;
fn len() -> usize;
/// The content of the table: [[a, b, result], ...]
fn content() -> Vec<[u64; 3]>;
fn pack(a: u64, b: u64) -> u64 {
a | (b << 8)
}
fn unpack(i: u64) -> (u64, u64) {
(i & 0xff, (i >> 8) & 0xff)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/tables/ops.rs | gkr_iop/src/tables/ops.rs | use crate::tables::{LookupTable, OpsTable};
pub struct AndTable;
impl OpsTable for AndTable {
const ROM_TYPE: LookupTable = LookupTable::And;
fn len() -> usize {
1 << 16
}
fn content() -> Vec<[u64; 3]> {
(0..Self::len() as u64)
.map(|i| {
let (a, b) = Self::unpack(i);
[a, b, a & b]
})
.collect()
}
}
pub struct OrTable;
impl OpsTable for OrTable {
const ROM_TYPE: LookupTable = LookupTable::Or;
fn len() -> usize {
1 << 16
}
fn content() -> Vec<[u64; 3]> {
(0..Self::len() as u64)
.map(|i| {
let (a, b) = Self::unpack(i);
[a, b, a | b]
})
.collect()
}
}
pub struct XorTable;
impl OpsTable for XorTable {
const ROM_TYPE: LookupTable = LookupTable::Xor;
fn len() -> usize {
1 << 16
}
fn content() -> Vec<[u64; 3]> {
(0..Self::len() as u64)
.map(|i| {
let (a, b) = Self::unpack(i);
[a, b, a ^ b]
})
.collect()
}
}
pub struct LtuTable;
impl OpsTable for LtuTable {
const ROM_TYPE: LookupTable = LookupTable::Ltu;
fn len() -> usize {
1 << 16
}
fn content() -> Vec<[u64; 3]> {
(0..Self::len() as u64)
.map(|i| {
let (a, b) = Self::unpack(i);
[a, b, if a < b { 1 } else { 0 }]
})
.collect()
}
}
pub struct PowTable;
impl OpsTable for PowTable {
const ROM_TYPE: LookupTable = LookupTable::Pow;
fn len() -> usize {
1 << 5
}
fn content() -> Vec<[u64; 3]> {
(0..Self::len() as u64)
.map(|exponent| [2, exponent, 1 << exponent])
.collect()
}
fn pack(base: u64, exponent: u64) -> u64 {
assert_eq!(base, 2);
exponent
}
fn unpack(exponent: u64) -> (u64, u64) {
(2, exponent)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/gkr_iop/src/circuit_builder/ram.rs | gkr_iop/src/circuit_builder/ram.rs | use crate::{RAMType, error::CircuitBuilderError};
use ff_ext::ExtensionField;
use crate::circuit_builder::DebugIndex;
use itertools::izip;
use multilinear_extensions::{Expression, ToExpr, power_sequence};
use p3::field::Field;
use crate::{circuit_builder::CircuitBuilder, gadgets::AssertLtConfig};
impl<E: ExtensionField> CircuitBuilder<'_, E> {
// MAX_TS_BITS need to be smaller than prime field
pub const MAX_TS_BITS: usize = 29;
pub fn ram_type_read<const LIMBS: usize, NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
ram_type: RAMType,
identifier: impl ToExpr<E, Output = Expression<E>>,
prev_ts: Expression<E>,
ts: Expression<E>,
value: [Expression<E>; LIMBS],
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError> {
assert!(E::BaseField::bits() > Self::MAX_TS_BITS);
self.namespace(name_fn, |cb| {
// READ (a, v, t)
let read_record = [
vec![ram_type.into()],
vec![identifier.expr()],
value.to_vec(),
vec![prev_ts.clone()],
]
.concat();
// Write (a, v, t)
let write_record = [
vec![ram_type.into()],
vec![identifier.expr()],
value.to_vec(),
vec![ts.clone()],
]
.concat();
cb.read_record(|| "read_record", ram_type, read_record)?;
cb.write_record(|| "write_record", ram_type, write_record)?;
// assert prev_ts < current_ts
let lt_cfg = AssertLtConfig::construct_circuit(
cb,
|| "prev_ts < ts",
prev_ts,
ts.clone(),
Self::MAX_TS_BITS,
)?;
let next_ts = ts + 1;
Ok((next_ts, lt_cfg))
})
}
#[allow(clippy::too_many_arguments)]
pub fn ram_type_write<const LIMBS: usize, NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
ram_type: RAMType,
identifier: impl ToExpr<E, Output = Expression<E>>,
prev_ts: Expression<E>,
ts: Expression<E>,
prev_values: [Expression<E>; LIMBS],
value: [Expression<E>; LIMBS],
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError> {
assert!(identifier.expr().degree() <= 1);
assert!(E::BaseField::bits() > Self::MAX_TS_BITS);
self.namespace(name_fn, |cb| {
// READ (a, v, t)
let read_record = [
vec![ram_type.into()],
vec![identifier.expr()],
prev_values.to_vec(),
vec![prev_ts.clone()],
]
.concat();
// Write (a, v, t)
let write_record = [
vec![ram_type.into()],
vec![identifier.expr()],
value.to_vec(),
vec![ts.clone()],
]
.concat();
cb.read_record(|| "read_record", ram_type, read_record)?;
cb.write_record(|| "write_record", ram_type, write_record)?;
let lt_cfg = AssertLtConfig::construct_circuit(
cb,
|| "prev_ts < ts",
prev_ts,
ts.clone(),
Self::MAX_TS_BITS,
)?;
let next_ts = ts + 1;
if matches!(ram_type, RAMType::Register) {
let pow_u16 = power_sequence((1 << u16::BITS as u64).into());
cb.register_debug_expr(
DebugIndex::RdWrite as usize,
izip!(value.clone(), pow_u16).map(|(v, pow)| v * pow).sum(),
);
} else if matches!(ram_type, RAMType::Memory) {
let pow_u16 = power_sequence((1 << u16::BITS as u64).into());
cb.register_debug_expr(
DebugIndex::MemWrite as usize,
izip!(value, pow_u16).map(|(v, pow)| v * pow).sum(),
);
}
Ok((next_ts, lt_cfg))
})
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/build.rs | ceno_cli/build.rs | use std::{env, fmt::Write};
use vergen_git2::*;
fn main() -> Result<(), Box<dyn std::error::Error>> {
let build = BuildBuilder::default().build_timestamp(true).build()?;
let git2 = Git2Builder::default().sha(true).dirty(true).build()?;
let rustc = RustcBuilder::default()
.channel(true)
.commit_date(true)
.build()?;
Emitter::default()
.add_instructions(&build)?
.add_instructions(&git2)?
.add_instructions(&rustc)?
.emit_and_set()?;
let mut ceno_version = String::from(env!("CARGO_PKG_VERSION"));
write!(ceno_version, " ({}", env::var("VERGEN_GIT_SHA")?)?;
if env::var("VERGEN_GIT_DIRTY")? == "true" {
write!(ceno_version, "-dirty")?;
}
writeln!(
ceno_version,
" rustc-{}-{} {})",
env::var("VERGEN_RUSTC_CHANNEL")?,
env::var("VERGEN_RUSTC_COMMIT_DATE")?,
env::var("VERGEN_BUILD_TIMESTAMP")?
)?;
println!("cargo:rustc-env=CENO_VERSION={}", ceno_version);
Ok(())
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/sdk.rs | ceno_cli/src/sdk.rs | use ceno_emul::{Platform, Program};
use ceno_host::CenoStdin;
use ceno_recursion::{
aggregation::{CenoAggregationProver, CenoRecursionProvingKeys, CenoRecursionVerifierKeys},
zkvm_verifier::binding::E,
};
use ceno_zkvm::{
e2e::{MultiProver, run_e2e_proof, setup_program},
scheme::{
ZKVMProof, create_backend, create_prover, hal::ProverDevice,
mock_prover::LkMultiplicityKey, prover::ZKVMProver, verifier::ZKVMVerifier,
},
structs::{ZKVMProvingKey, ZKVMVerifyingKey},
};
use ff_ext::{BabyBearExt4, ExtensionField};
#[cfg(not(feature = "gpu"))]
use gkr_iop::cpu::{CpuBackend, CpuProver};
#[cfg(feature = "gpu")]
use gkr_iop::gpu::{GpuBackend, GpuProver};
use gkr_iop::hal::ProverBackend;
use mpcs::{Basefold, BasefoldRSParams, PolynomialCommitmentScheme, SecurityLevel};
use openvm_continuations::verifier::internal::types::VmStarkProof;
#[cfg(feature = "gpu")]
use openvm_cuda_backend::engine::GpuBabyBearPoseidon2Engine as BabyBearPoseidon2Engine;
use openvm_native_circuit::{NativeBuilder, NativeConfig};
use openvm_sdk::prover::vm::new_local_prover;
use openvm_stark_backend::config::StarkGenericConfig;
use openvm_stark_sdk::config::baby_bear_poseidon2::BabyBearPoseidon2Config;
#[cfg(not(feature = "gpu"))]
use openvm_stark_sdk::config::baby_bear_poseidon2::BabyBearPoseidon2Engine;
use serde::Serialize;
use std::sync::Arc;
#[allow(clippy::type_complexity)]
pub struct Sdk<
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E>,
PB,
PD,
SC: StarkGenericConfig,
VC,
> where
PB: ProverBackend<E = E, Pcs = PCS> + 'static,
PD: ProverDevice<PB> + 'static,
{
pub device: Option<PD>,
pub app_program: Option<Program>,
pub platform: Option<Platform>,
pub multi_prover: Option<MultiProver>,
// base(app) layer
pub zkvm_pk: Option<Arc<ZKVMProvingKey<E, PCS>>>,
pub zkvm_vk: Option<ZKVMVerifyingKey<E, PCS>>,
pub zkvm_prover: Option<ZKVMProver<E, PCS, PB, PD>>,
// aggregation
pub agg_pk: Option<CenoRecursionProvingKeys<SC, VC>>,
}
impl<
E: ExtensionField + LkMultiplicityKey,
PCS: PolynomialCommitmentScheme<E> + 'static + Serialize,
PB,
PD,
SC: StarkGenericConfig,
VC,
> Sdk<E, PCS, PB, PD, SC, VC>
where
PB: ProverBackend<E = E, Pcs = PCS> + 'static,
PD: ProverDevice<PB> + 'static,
{
pub fn new() -> Self {
Self {
device: None,
app_program: None,
platform: None,
multi_prover: None,
zkvm_pk: None,
zkvm_vk: None,
zkvm_prover: None,
agg_pk: None,
}
}
pub fn new_with_app_config(
program: Program,
platform: Platform,
multi_prover: MultiProver,
) -> Self {
Self {
device: None,
app_program: Some(program),
platform: Some(platform),
multi_prover: Some(multi_prover),
zkvm_pk: None,
zkvm_vk: None,
zkvm_prover: None,
agg_pk: None,
}
}
pub fn set_app_pk(&mut self, pk: ZKVMProvingKey<E, PCS>) {
self.zkvm_pk = Some(Arc::new(pk));
}
// allow us to read the app vk from file and then set it
pub fn set_app_vk(&mut self, vk: ZKVMVerifyingKey<E, PCS>) {
self.zkvm_vk = Some(vk);
}
pub fn set_agg_pk(&mut self, agg_pk: CenoRecursionProvingKeys<SC, VC>) {
self.agg_pk = Some(agg_pk);
}
fn set_zkvm_prover(&mut self, device: PD) {
let (pk, vk) = self
.zkvm_pk
.clone()
.zip(self.zkvm_vk.clone())
.unwrap_or_else(|| {
tracing::debug!(
"empty app proving/verifying key detected — running key generation..."
);
let (Some(program), Some(platform), Some(multi_prover)) = (
self.app_program.as_ref(),
self.platform.as_ref(),
self.multi_prover.as_ref(),
) else {
panic!("empty app config")
};
let start = std::time::Instant::now();
let ctx =
setup_program::<E>(program.clone(), platform.clone(), multi_prover.clone());
tracing::debug!("setup_program done in {:?}", start.elapsed());
// Keygen
let start = std::time::Instant::now();
let (pk, vk) = ctx.keygen_with_pb(device.get_pb());
tracing::debug!("keygen done in {:?}", start.elapsed());
(pk.into(), vk)
});
self.zkvm_vk = Some(vk.clone());
self.zkvm_pk = Some(pk.clone());
self.zkvm_prover = Some(ZKVMProver::new(pk, device));
}
pub fn generate_base_proof(
&self,
hints: CenoStdin,
pub_io: CenoStdin,
max_steps: usize,
shard_id: Option<usize>,
) -> Vec<ZKVMProof<E, PCS>> {
if let Some(zkvm_prover) = self.zkvm_prover.as_ref() {
let init_full_mem = zkvm_prover.setup_init_mem(&Vec::from(&hints), &Vec::from(&pub_io));
run_e2e_proof::<E, PCS, PB, PD>(zkvm_prover, &init_full_mem, max_steps, false, shard_id)
} else {
panic!("ZKVMProver is not initialized")
}
}
pub fn get_app_pk(&self) -> Arc<ZKVMProvingKey<E, PCS>> {
self.zkvm_pk.clone().expect("zkvm pk is not set")
}
pub fn get_app_vk(&self) -> ZKVMVerifyingKey<E, PCS> {
self.zkvm_vk.clone().expect("zkvm vk is not set")
}
pub fn get_agg_pk(&self) -> CenoRecursionProvingKeys<SC, VC> {
self.agg_pk.clone().expect("agg pk is not set")
}
pub fn get_agg_vk(&self) -> CenoRecursionVerifierKeys<SC> {
self.agg_pk.as_ref().expect("agg pk is not set").get_vk()
}
pub fn create_zkvm_verifier(&self) -> ZKVMVerifier<E, PCS> {
let Some(app_vk) = self.zkvm_vk.clone() else {
panic!("empty zkvm vk");
};
ZKVMVerifier::new(app_vk)
}
}
impl<PB, PD>
Sdk<BabyBearExt4, Basefold<E, BasefoldRSParams>, PB, PD, BabyBearPoseidon2Config, NativeConfig>
where
PB: ProverBackend<E = BabyBearExt4, Pcs = Basefold<E, BasefoldRSParams>> + 'static,
PD: ProverDevice<PB> + 'static,
{
/// aggregating base proofs into a root STARK proof
pub fn compress_to_root_proof(
&mut self,
base_proofs: Vec<ZKVMProof<BabyBearExt4, Basefold<E, BasefoldRSParams>>>,
) -> VmStarkProof<BabyBearPoseidon2Config> {
let vb = NativeBuilder::default();
// TODO: cache agg_prover
let mut agg_prover = if let Some(agg_pk) = self.agg_pk.as_ref() {
let leaf_prover = new_local_prover::<BabyBearPoseidon2Engine, NativeBuilder>(
vb.clone(),
&agg_pk.leaf_vm_pk,
agg_pk.leaf_committed_exe.exe.clone(),
)
.expect("leaf prover");
let internal_prover = new_local_prover::<BabyBearPoseidon2Engine, NativeBuilder>(
vb.clone(),
&agg_pk.internal_vm_pk,
agg_pk.internal_committed_exe.exe.clone(),
)
.expect("internal prover");
CenoAggregationProver::new(leaf_prover, internal_prover, agg_pk.clone())
} else {
let agg_prover = CenoAggregationProver::from_base_vk(self.zkvm_vk.clone().unwrap());
self.agg_pk = Some(agg_prover.pk.clone());
agg_prover
};
agg_prover.generate_root_proof(base_proofs)
}
pub fn init_agg_pk(
&mut self,
) -> CenoRecursionProvingKeys<BabyBearPoseidon2Config, NativeConfig> {
assert!(self.zkvm_vk.is_some(), "zkvm_vk is not set");
if self.agg_pk.is_none() {
let agg_prover = CenoAggregationProver::from_base_vk(self.zkvm_vk.clone().unwrap());
self.agg_pk = Some(agg_prover.pk.clone());
}
self.agg_pk.clone().unwrap()
}
pub fn get_agg_verifier(&self) -> CenoRecursionVerifierKeys<BabyBearPoseidon2Config> {
let Some(agg_pk) = self.agg_pk.as_ref() else {
panic!("empty agg_pk")
};
agg_pk.get_vk()
}
}
impl<
E: ExtensionField + LkMultiplicityKey,
PCS: PolynomialCommitmentScheme<E> + Serialize + 'static,
PB,
PD,
SC: StarkGenericConfig,
VC,
> Default for Sdk<E, PCS, PB, PD, SC, VC>
where
PB: ProverBackend<E = E, Pcs = PCS> + 'static,
PD: ProverDevice<PB> + 'static,
{
fn default() -> Self {
Self::new()
}
}
#[cfg(not(feature = "gpu"))]
pub type CenoSDK<E, PCS, SC, VC> =
Sdk<E, PCS, CpuBackend<E, PCS>, CpuProver<CpuBackend<E, PCS>>, SC, VC>;
#[cfg(not(feature = "gpu"))]
impl<E, PCS, SC, VC> CenoSDK<E, PCS, SC, VC>
where
E: ExtensionField + LkMultiplicityKey,
PCS: PolynomialCommitmentScheme<E> + Serialize + 'static,
SC: StarkGenericConfig,
VC: 'static,
{
pub fn init_base_prover(&mut self, max_num_variables: usize, level: SecurityLevel) {
let backend = create_backend(max_num_variables, level);
let device = create_prover(backend);
self.set_zkvm_prover(device);
}
}
#[cfg(feature = "gpu")]
pub type CenoSDK<E, PCS, SC, VC> =
Sdk<E, PCS, GpuBackend<E, PCS>, GpuProver<GpuBackend<E, PCS>>, SC, VC>;
#[cfg(feature = "gpu")]
impl<E, PCS, SC, VC> CenoSDK<E, PCS, SC, VC>
where
E: ExtensionField + LkMultiplicityKey,
PCS: PolynomialCommitmentScheme<E> + Serialize + 'static,
SC: StarkGenericConfig,
VC: 'static,
{
pub fn init_base_prover(&mut self, max_num_variables: usize, level: SecurityLevel) {
let backend = create_backend(max_num_variables, level);
let device = create_prover(backend);
self.set_zkvm_prover(device);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/lib.rs | ceno_cli/src/lib.rs | pub mod sdk;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/utils.rs | ceno_cli/src/utils.rs | use anyhow::bail;
use console::style;
use get_dir::{FileTarget, GetDir, Target};
use std::{
backtrace::BacktraceStatus,
fmt,
fs::File,
io,
io::Write,
path::{Path, PathBuf},
process::Command,
sync::OnceLock,
};
use tempfile::TempDir;
/// Controls whether we should print the progress of the command.
pub static QUITE: OnceLock<bool> = OnceLock::new();
/// The rustc target triple name for ceno.
pub const RUSTC_TARGET: &str = "riscv32im-ceno-zkvm-elf";
/// Search for a `Cargo.toml` in the given path and its parent directories.
pub fn search_cargo_manifest<P: AsRef<Path>>(path: P) -> anyhow::Result<PathBuf> {
let path = path.as_ref().canonicalize()?;
match GetDir::new()
.directory(&path)
.targets(vec![Target::File(FileTarget { name: "Cargo.toml" })])
.run_reverse()
{
Err(e) if e.kind() == io::ErrorKind::NotFound => {
bail!(
"Could not find a `Cargo.toml` in {} or its parent directories",
path.display()
);
}
path => Ok(path?.join("Cargo.toml")),
}
}
/// Search for a workspace root in the given path and its parent directories.
pub fn search_workspace_root<P: AsRef<Path>>(path: P) -> anyhow::Result<PathBuf> {
let path = path.as_ref().canonicalize()?;
match GetDir::new()
.directory(&path)
.targets(vec![Target::File(FileTarget { name: "Cargo.lock" })])
.run_reverse()
{
Err(e) if e.kind() == io::ErrorKind::NotFound => {
// try to generate a lockfile if we are in a workspace
eprintln!(
"{}{} No Cargo.lock found, try to generating one.",
style("warning").yellow().bold(),
style(":").white().bold(),
);
}
path => return Ok(path?),
}
let result = Command::new("cargo").arg("generate-lockfile").status()?;
if !result.success() {
bail!("failed to generate lockfile");
}
match GetDir::new()
.targets(vec![Target::File(FileTarget { name: "Cargo.lock" })])
.run_reverse()
{
Err(e) if e.kind() == io::ErrorKind::NotFound => {
bail!(
"Could not find a cargo workspace in {} or its parent directories",
path.display()
);
}
path => Ok(path?),
}
}
/// Get `RUSTFLAGS` env (if any) and append the base flags.
pub fn get_rust_flags() -> String {
const BASE_RUST_FLAGS: &[&str] = &[
"-C",
"panic=abort",
"-C",
"link-arg=-Tmemory.x",
"-C",
"link-arg=-Tceno_link.x",
"-Zlocation-detail=none",
"-C",
"passes=lower-atomic",
"--cfg",
"getrandom_backend=\"custom\"",
];
let mut rust_flags = std::env::var("RUSTFLAGS").unwrap_or_else(|_| String::new());
if !rust_flags.is_empty() {
rust_flags.push(' ');
}
rust_flags.push_str(&BASE_RUST_FLAGS.join(" "));
rust_flags
}
/// Apply the build-std args to the cargo command.
pub fn apply_cargo_build_std_args(command: &mut Command) {
const BASE_CARGO_ARGS: &[&str] = &[
"-Z",
"build-std=alloc,core,compiler_builtins,std,panic_abort,proc_macro",
"-Z",
"build-std-features=compiler-builtins-mem,panic_immediate_abort,default",
];
command.args(BASE_CARGO_ARGS);
}
/// Release the target definition json into a temp file.
pub fn release_target_json() -> io::Result<(TempDir, PathBuf)> {
const TARGET_DEFINITION: &[u8] = include_bytes!("../../ceno_rt/riscv32im-ceno-zkvm-elf.json");
let temp_dir = TempDir::new()?;
let path = temp_dir.path().join("riscv32im-ceno-zkvm-elf.json");
let mut target_json_file = File::create(&path)?;
target_json_file.write_all(TARGET_DEFINITION)?;
Ok((temp_dir, path))
}
/// Print the entire command line to stderr.
pub fn print_cmdline(command: &Command) {
if *QUITE.get_or_init(|| false) {
return;
}
eprint!("{:>12} ", style("Running").green().bold());
eprint!("{}", command.get_program().to_string_lossy());
for arg in command.get_args() {
eprint!(" {}", arg.to_string_lossy());
}
eprintln!();
}
/// Print the error message and backtrace (if any).
pub fn print_error(e: anyhow::Error) {
for e in e.chain().rev() {
eprintln!(
"{}{} {}",
style("error").red().bold(),
style(":").white().bold(),
e
);
}
let bt = e.backtrace();
if bt.status() == BacktraceStatus::Captured {
eprintln!("error backtrace:");
eprintln!("{bt}");
}
}
/// Print cargo style message to stderr
pub fn print_cargo_message(status: &str, msg: fmt::Arguments) {
if *QUITE.get_or_init(|| false) {
return;
}
eprint!("{:>12} ", style(status).green().bold());
eprintln!("{}", msg);
}
/// Parse the binary size from a string.
pub fn parse_size(s: &str) -> Result<u32, parse_size::Error> {
parse_size::Config::new()
.with_binary()
.parse_size(s)
.map(|size| size as u32)
}
/// Canonicalize a path allowing for non-existent paths.
pub fn canonicalize_allow_nx<P: AsRef<Path>>(path: P) -> io::Result<PathBuf> {
let path = path.as_ref();
if path.exists() {
return path.canonicalize();
}
let mut cur = if path.is_absolute() {
path.to_path_buf()
} else {
std::env::current_dir()?.join(path)
};
let mut tails = Vec::new();
while !cur.exists() {
let name = cur.file_name().ok_or_else(|| {
io::Error::new(
io::ErrorKind::InvalidInput,
format!("cannot peel off component from `{}`", cur.display()),
)
})?;
tails.push(name.to_os_string());
cur.pop();
}
let mut canon = cur.canonicalize()?;
for seg in tails.into_iter().rev() {
canon.push(seg);
}
Ok(canon)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/main.rs | ceno_cli/src/main.rs | use crate::{commands::*, utils::*};
use anyhow::Context;
#[cfg(all(feature = "jemalloc", unix, not(test)))]
use ceno_zkvm::print_allocated_bytes;
use clap::{Args, Parser, Subcommand};
mod commands;
mod utils;
// Use jemalloc as global allocator for performance
#[cfg(all(feature = "jemalloc", unix, not(test)))]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
const CENO_VERSION: &str = env!("CENO_VERSION");
#[derive(Parser)]
#[command(name = "cargo", bin_name = "cargo")]
enum Cargo {
#[command(name = "ceno")]
Ceno(VmCli),
}
#[derive(Args)]
#[command(
author,
about,
long_about = None,
version = CENO_VERSION
)]
struct VmCli {
toolchain: Option<String>,
#[clap(subcommand)]
command: VmCliCommands,
}
#[derive(Subcommand)]
pub enum VmCliCommands {
// Bench(BenchCmd),
Build(BuildCmd),
Keygen(KeygenCmd),
Prove(ProveCmd),
Run(RunCmd),
RawKeygen(RawKeygenCmd),
RawProve(RawProveCmd),
RawRun(RawRunCmd),
// Setup(EvmProvingSetupCmd),
Verify(VerifyCmd),
Info(InfoCmd),
}
fn main() {
let Cargo::Ceno(args) = Cargo::parse();
let mut toolchain = args.toolchain;
if let Some(toolchain) = toolchain.as_mut() {
if !toolchain.starts_with("+") {
print_error(anyhow::anyhow!("invalid toolchain selector: {toolchain}"));
std::process::exit(1);
}
*toolchain = toolchain.strip_prefix("+").unwrap().to_string();
}
let command = args.command;
let result = match command {
// VmCliCommands::Bench(cmd) => cmd.run(),
VmCliCommands::Build(cmd) => cmd
.run(toolchain)
.context("could not build ceno program due to previous error"),
VmCliCommands::Prove(cmd) => cmd
.run(toolchain)
.context("could not run and prove ceno program due to previous error"),
VmCliCommands::Run(cmd) => cmd
.run(toolchain)
.context("could not run ceno program due to previous error"),
VmCliCommands::Keygen(cmd) => cmd
.run(toolchain)
.context("could not run ceno program due to previous error"),
VmCliCommands::RawKeygen(cmd) => cmd
.run()
.context("could not generate vk for given elf due to previous error"),
VmCliCommands::RawProve(cmd) => cmd
.run()
.context("could not run and prove given elf due to previous error"),
VmCliCommands::RawRun(cmd) => cmd
.run()
.context("could not run given elf due to previous error"),
// VmCliCommands::Setup(cmd) => cmd.run().await,
VmCliCommands::Verify(cmd) => cmd.run(),
VmCliCommands::Info(cmd) => cmd.run(),
};
if let Err(e) = result {
print_error(e);
std::process::exit(1);
}
#[cfg(all(feature = "jemalloc", unix, not(test)))]
{
print_allocated_bytes();
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/raw_run.rs | ceno_cli/src/commands/raw_run.rs | use crate::commands::common_args::*;
use clap::Parser;
use std::path::PathBuf;
#[derive(Parser)]
#[command(name = "keygen", about = "Generate vk for an elf file")]
pub struct RawKeygenCmd {
/// Path to the Ceno elf file
elf: PathBuf,
#[clap(flatten, next_help_heading = "Ceno Options")]
ceno_options: CenoOptions,
#[clap(flatten, next_help_heading = "Compilation Options")]
compilation_options: CompilationOptions,
}
impl RawKeygenCmd {
pub fn run(self) -> anyhow::Result<()> {
self.ceno_options.try_setup_logger();
self.ceno_options
.keygen(&self.compilation_options, self.elf)
}
}
#[derive(Parser)]
#[command(name = "run", about = "Run an elf file")]
pub struct RawRunCmd {
/// Path to the Ceno elf file
elf: PathBuf,
#[clap(flatten, next_help_heading = "Ceno Options")]
ceno_options: CenoOptions,
#[clap(flatten, next_help_heading = "Compilation Options")]
compilation_options: CompilationOptions,
}
impl RawRunCmd {
pub fn run(self) -> anyhow::Result<()> {
self.ceno_options.try_setup_logger();
self.ceno_options.run(&self.compilation_options, self.elf)
}
}
#[derive(Parser)]
#[command(name = "run", about = "Run and prove an elf file")]
pub struct RawProveCmd {
/// Path to the Ceno elf file
elf: PathBuf,
#[clap(flatten, next_help_heading = "Ceno Options")]
ceno_options: CenoOptions,
#[clap(flatten, next_help_heading = "Compilation Options")]
compilation_options: CompilationOptions,
}
impl RawProveCmd {
pub fn run(self) -> anyhow::Result<()> {
self.ceno_options.try_setup_logger();
self.ceno_options.prove(&self.compilation_options, self.elf)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/info.rs | ceno_cli/src/commands/info.rs | use clap::Parser;
#[derive(Parser)]
#[command(name = "info", about = "Show info of current ceno cli")]
pub struct InfoCmd;
impl InfoCmd {
pub fn run(self) -> anyhow::Result<()> {
eprintln!("OS: {}", std::env::consts::OS);
eprintln!("Arch: {}", std::env::consts::ARCH);
let mut enabled_features = vec![];
if cfg!(debug_assertions) {
enabled_features.push("debug_assertions");
}
if cfg!(feature = "nightly-features") {
enabled_features.push("nightly-features");
}
if cfg!(target_feature = "neon") {
enabled_features.push("neon");
}
if cfg!(target_feature = "avx2") {
enabled_features.push("avx2");
}
if cfg!(target_feature = "avx512f") {
enabled_features.push("avx512f");
}
if cfg!(target_feature = "sha3") {
enabled_features.push("sha3");
}
eprintln!("Enabled features: {}", enabled_features.join(", "));
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/build.rs | ceno_cli/src/commands/build.rs | //! Build ceno program
//!
//! Reference cargo.toml file:
//! ```toml
//! [unstable]
//! build-std = [
//! "alloc",
//! "core",
//! "compiler_builtins",
//! "std",
//! "panic_abort",
//! "proc_macro",
//! ]
//! build-std-features = [
//! "compiler-builtins-mem",
//! "panic_immediate_abort",
//! "default",
//! ]
//!
//! [profile.dev]
//! panic = "abort"
//!
//! [build]
//! rustflags = [
//! "-C",
//! "link-arg=-Tmemory.x",
//! "-C",
//! "link-arg=-Tceno_link.x",
//! "-Zlocation-detail=none",
//! "-C",
//! "passes=lower-atomic",
//! ]
//! target = "../ceno_rt/riscv32im-ceno-zkvm-elf.json"
//! ```
use crate::{commands::common_args::*, utils::*};
use anyhow::bail;
use clap::Parser;
use std::process::{Command, Stdio};
#[derive(Parser)]
#[command(name = "build", about = "Compile an Ceno program")]
pub struct BuildCmd {
#[clap(flatten, next_help_heading = "Options")]
pub cargo_options: CargoOptions,
#[clap(flatten, next_help_heading = "Package Selection")]
pub package_selection: PackageSelection,
#[clap(flatten, next_help_heading = "Target Selection")]
pub target_selection: TargetSelection,
#[clap(flatten, next_help_heading = "Feature Selection")]
pub feature_selection: FeatureSelection,
#[clap(flatten, next_help_heading = "Compilation Options")]
pub compilation_options: CompilationOptions,
#[clap(flatten, next_help_heading = "Manifest Options")]
pub manifest_options: ManifestOptions,
}
impl BuildCmd {
pub fn run(self, toolchain: Option<String>) -> anyhow::Result<()> {
self.cargo_options.set_global();
let mut command = Command::new("cargo");
command
.stdin(Stdio::null())
.stdout(Stdio::inherit())
.stderr(Stdio::inherit())
.env("RUSTFLAGS", get_rust_flags());
if let Some(toolchain) = toolchain {
command.arg(format!("+{}", toolchain));
}
command.arg("build");
self.cargo_options.apply_to(&mut command);
self.package_selection.apply_to(&mut command);
self.target_selection.apply_to(&mut command);
self.feature_selection.apply_to(&mut command);
let _guard = self.compilation_options.apply_to(&mut command)?;
self.manifest_options.apply_to(&mut command)?;
apply_cargo_build_std_args(&mut command);
print_cmdline(&command);
let status = command.status()?;
if !status.success() {
match status.code() {
Some(code) => bail!("cargo exited with status code: {code}"),
None => bail!("cargo terminated by signal"),
}
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/verify.rs | ceno_cli/src/commands/verify.rs | use crate::utils::print_cargo_message;
use anyhow::{Context, bail};
use ceno_zkvm::{
e2e::{FieldType, PcsKind, verify},
scheme::{ZKVMProof, verifier::ZKVMVerifier},
structs::ZKVMVerifyingKey,
};
use clap::Parser;
use ff_ext::{BabyBearExt4, ExtensionField, GoldilocksExt2};
use mpcs::{Basefold, BasefoldRSParams, PolynomialCommitmentScheme, Whir, WhirDefaultSpec};
use serde::Serialize;
use std::{fs::File, path::PathBuf};
#[derive(Parser)]
#[command(name = "run", about = "Verify a Ceno proof")]
pub struct VerifyCmd {
/// The polynomial commitment scheme to use.
#[arg(long, value_enum, default_value_t = PcsKind::default())]
pcs: PcsKind,
/// The field to use, eg. goldilocks
#[arg(long, value_enum, default_value_t = FieldType::default())]
field: FieldType,
/// Path to the serialized proof file
#[clap(long)]
proof: PathBuf,
/// Path to the verifying key file
#[clap(long)]
vk: PathBuf,
}
impl VerifyCmd {
pub fn run(self) -> anyhow::Result<()> {
match (self.pcs, self.field) {
(PcsKind::Basefold, FieldType::Goldilocks) => {
run_inner::<GoldilocksExt2, Basefold<GoldilocksExt2, BasefoldRSParams>>(self)
}
(PcsKind::Basefold, FieldType::BabyBear) => {
run_inner::<BabyBearExt4, Basefold<BabyBearExt4, BasefoldRSParams>>(self)
}
(PcsKind::Whir, FieldType::Goldilocks) => {
run_inner::<GoldilocksExt2, Whir<GoldilocksExt2, WhirDefaultSpec>>(self)
}
(PcsKind::Whir, FieldType::BabyBear) => {
run_inner::<BabyBearExt4, Whir<BabyBearExt4, WhirDefaultSpec>>(self)
}
}
}
}
fn run_inner<E: ExtensionField, PCS: PolynomialCommitmentScheme<E> + Serialize>(
args: VerifyCmd,
) -> anyhow::Result<()> {
let start = std::time::Instant::now();
let zkvm_proofs: Vec<ZKVMProof<E, PCS>> =
bincode::deserialize_from(File::open(&args.proof).context("Failed to open proof file")?)
.context("Failed to deserialize proof file")?;
print_cargo_message(
"Loaded",
format_args!(
"proof from {} in {:.2}s",
args.proof.canonicalize().unwrap().display(),
start.elapsed().as_secs_f32()
),
);
let start = std::time::Instant::now();
let vk: ZKVMVerifyingKey<E, PCS> =
bincode::deserialize_from(File::open(&args.vk).context("Failed to open vk file")?)
.context("Failed to deserialize vk file")?;
print_cargo_message(
"Loaded",
format_args!(
"verifying key from {} in {:.2}s",
args.vk.canonicalize().unwrap().display(),
start.elapsed().as_secs_f32()
),
);
let start = std::time::Instant::now();
let verifier = ZKVMVerifier::new(vk);
if let Err(e) = verify(zkvm_proofs, &verifier) {
bail!("Verification failed: {e:?}");
}
print_cargo_message(
"Verified",
format_args!("in {:.2}s", start.elapsed().as_secs_f32()),
);
Ok(())
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/mod.rs | ceno_cli/src/commands/mod.rs | mod common_args;
mod build;
pub use build::*;
mod run;
pub use run::*;
mod raw_run;
pub use raw_run::*;
mod verify;
pub use verify::*;
mod info;
pub use info::*;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/run.rs | ceno_cli/src/commands/run.rs | use crate::{
commands::{BuildCmd, common_args::*},
utils::*,
};
use clap::Parser;
use std::env::current_dir;
#[derive(Parser)]
#[command(name = "keygen", about = "Generate vk for a Cargo Ceno program")]
pub struct KeygenCmd {
#[clap(flatten)]
inner: CmdInner,
}
#[derive(Parser)]
#[command(name = "run", about = "Run a Cargo Ceno program")]
pub struct RunCmd {
#[clap(flatten)]
inner: CmdInner,
}
#[derive(Parser)]
#[command(name = "prove", about = "Run and Prove a Cargo Ceno program")]
pub struct ProveCmd {
#[clap(flatten)]
inner: CmdInner,
}
#[derive(Parser)]
#[command(name = "run", about = "Run a Cargo Ceno program")]
struct CmdInner {
#[clap(flatten, next_help_heading = "Ceno Options")]
ceno_options: CenoOptions,
#[clap(flatten, next_help_heading = "Cargo Options")]
cargo_options: CargoOptions,
#[clap(flatten, next_help_heading = "Package Selection")]
package_selection: PackageSelection,
#[clap(flatten, next_help_heading = "Target Selection")]
target_selection: TargetSelection,
#[clap(flatten, next_help_heading = "Feature Selection")]
feature_selection: FeatureSelection,
#[clap(flatten, next_help_heading = "Compilation Options")]
compilation_options: CompilationOptions,
#[clap(flatten, next_help_heading = "Manifest Options")]
manifest_options: ManifestOptions,
}
enum RunKind {
Prove,
Keygen,
Run,
}
impl KeygenCmd {
pub fn run(self, toolchain: Option<String>) -> anyhow::Result<()> {
let start = std::time::Instant::now();
self.inner.run(toolchain, RunKind::Keygen)?;
print_cargo_message(
"Finished",
format_args!("keygen in {:.2}s", start.elapsed().as_secs_f32()),
);
Ok(())
}
}
impl RunCmd {
pub fn run(self, toolchain: Option<String>) -> anyhow::Result<()> {
let start = std::time::Instant::now();
self.inner.run(toolchain, RunKind::Run)?;
print_cargo_message(
"Finished",
format_args!("running elf in {:.2}s", start.elapsed().as_secs_f32()),
);
Ok(())
}
}
impl ProveCmd {
pub fn run(self, toolchain: Option<String>) -> anyhow::Result<()> {
let start = std::time::Instant::now();
self.inner.run(toolchain, RunKind::Prove)?;
print_cargo_message(
"Finished",
format_args!(
"running elf and proving in {:.2}s",
start.elapsed().as_secs_f32()
),
);
Ok(())
}
}
impl CmdInner {
fn run(self, toolchain: Option<String>, kind: RunKind) -> anyhow::Result<()> {
let workspace_root = search_workspace_root(current_dir()?)?;
let manifest_path = match self.manifest_options.manifest_path.clone() {
Some(path) => path,
None => search_cargo_manifest(current_dir()?)?,
};
println!("Using manifest {}", manifest_path.display());
let target_selection = self.target_selection.canonicalize(
&workspace_root,
&manifest_path,
&self.package_selection,
)?;
let build = BuildCmd {
cargo_options: self.cargo_options.clone(),
package_selection: self.package_selection.clone(),
target_selection: target_selection.clone(),
feature_selection: self.feature_selection.clone(),
compilation_options: self.compilation_options.clone(),
manifest_options: self.manifest_options.clone(),
};
build.run(toolchain.clone())?;
let target_elf =
target_selection.get_target_path(&workspace_root, &self.compilation_options);
assert!(target_elf.exists(), "{}", target_elf.display());
match kind {
RunKind::Keygen => {
self.ceno_options
.keygen(&self.compilation_options, target_elf)?;
}
RunKind::Run => {
self.ceno_options
.run(&self.compilation_options, target_elf)?;
}
RunKind::Prove => {
self.ceno_options
.prove(&self.compilation_options, target_elf)?;
}
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/common_args/mod.rs | ceno_cli/src/commands/common_args/mod.rs | mod cargo;
pub use cargo::*;
mod ceno;
pub use ceno::*;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/common_args/cargo.rs | ceno_cli/src/commands/common_args/cargo.rs | use crate::utils::{RUSTC_TARGET, release_target_json};
use anyhow::{Context, bail};
use cargo_metadata::{MetadataCommand, TargetKind};
use clap::Args;
use std::{
path::{Path, PathBuf},
process::Command,
};
use tempfile::TempDir;
/// Options:
/// --message-format <FMT> Error format
/// -v, --verbose... Use verbose output (-vv very verbose/build.rs output)
/// -q, --quiet Do not print cargo log messages
/// --color <WHEN> Coloring: auto, always, never
/// --config <KEY=VALUE|PATH> Override a configuration value
/// -Z <FLAG> Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details
#[derive(Clone, Args)]
pub struct CargoOptions {
/// Error format
#[arg(long)]
pub message_format: Option<String>,
/// Use verbose output (-vv very verbose/build.rs output)
#[arg(short, long, action = clap::ArgAction::Count)]
pub verbose: u8,
/// Do not print cargo log messages
#[arg(short, long)]
pub quiet: bool,
/// Coloring: auto, always, never
#[arg(long)]
pub color: Option<String>,
/// Override a configuration value
#[arg(long)]
pub config: Option<Vec<String>>,
/// Unstable (nightly-only) flags to Cargo, see 'cargo -Z help' for details
#[arg(short = 'Z', action = clap::ArgAction::Append)]
pub unstable_flags: Option<Vec<String>>,
}
/// Package Selection:
/// -p, --package [<SPEC>] Package with the target to run
#[derive(Clone, Args)]
pub struct PackageSelection {
/// Package with the target to run
#[arg(short = 'p', long)]
pub package: Option<String>,
}
/// Target Selection:
/// --bin [<NAME>] Name of the bin target to run
/// --example [<NAME>] Name of the example target to run
#[derive(Clone, Args)]
pub struct TargetSelection {
/// Name of the bin target to run
#[arg(long, conflicts_with = "example")]
pub bin: Option<String>,
/// Name of the example target to run
#[arg(long, conflicts_with = "bin")]
pub example: Option<String>,
}
/// Feature Selection:
/// -F, --features <FEATURES> Space or comma separated list of features to activate
/// --all-features Activate all available features
/// --no-default-features Do not activate the `default` feature
#[derive(Clone, Args)]
pub struct FeatureSelection {
/// Space or comma separated list of features to activate
#[arg(short = 'F', long)]
pub features: Option<Vec<String>>,
/// Activate all available features
#[arg(long)]
pub all_features: bool,
/// Do not activate the `default` feature
#[arg(long)]
pub no_default_features: bool,
}
/// Compilation Options:
/// -j, --jobs <N> Number of parallel jobs, defaults to # of CPUs.
/// --keep-going Do not abort the build as soon as there is an error
/// -r, --release Build artifacts in release mode, with optimizations
/// --profile <PROFILE-NAME> Build artifacts with the specified profile
/// --target [<TRIPLE>] Build for the target triple
/// --target-dir <DIRECTORY> Directory for all generated artifacts
#[derive(Clone, Args)]
pub struct CompilationOptions {
/// Number of parallel jobs, defaults to # of CPUs.
#[arg(short, long)]
pub jobs: Option<u32>,
/// Do not abort the build as soon as there is an error
#[arg(long)]
pub keep_going: bool,
/// Build artifacts in release mode, with optimizations
#[arg(short, long)]
pub release: bool,
/// Build artifacts with the specified profile
#[arg(long)]
pub profile: Option<String>,
/// Build for the target triple
#[arg(long)]
pub target: Option<String>,
/// Directory for all generated artifacts
#[arg(long)]
pub target_dir: Option<PathBuf>,
}
/// Manifest Options:
/// --manifest-path <PATH> Path to Cargo.toml
/// --lockfile-path <PATH> Path to Cargo.lock (unstable)
/// --ignore-rust-version Ignore `rust-version` specification in packages
/// --locked Assert that `Cargo.lock` will remain unchanged
/// --offline Run without accessing the network
/// --frozen Equivalent to specifying both --locked and --offline
#[derive(Clone, Args)]
pub struct ManifestOptions {
/// Path to Cargo.toml
#[arg(long)]
pub manifest_path: Option<PathBuf>,
/// Path to Cargo.lock (unstable)
#[arg(long)]
pub lockfile_path: Option<String>,
/// Ignore `rust-version` specification in packages
#[arg(long)]
pub ignore_rust_version: bool,
/// Assert that `Cargo.lock` will remain unchanged
#[arg(long)]
pub locked: bool,
/// Run without accessing the network
#[arg(long)]
pub offline: bool,
/// Equivalent to specifying both --locked and --offline
#[arg(long)]
pub frozen: bool,
}
impl CargoOptions {
/// Set the global options based on the command line arguments.
pub fn set_global(&self) {
crate::utils::QUITE
.set(self.quiet)
.expect("failed to set quiet flag, this is a bug");
if let Some(color) = self.color.as_ref() {
if color == "always" {
console::set_colors_enabled(true);
} else if color == "never" {
console::set_colors_enabled(false);
}
}
}
/// Apply the args to the cargo command.
pub fn apply_to(&self, command: &mut Command) {
if let Some(message_format) = self.message_format.as_ref() {
command.arg("--message-format").arg(message_format);
}
if self.verbose > 0 {
command.arg(format!("-{}", "v".repeat(self.verbose as usize)));
}
if self.quiet {
command.arg("--quiet");
}
if let Some(color) = self.color.as_ref() {
command.arg("--color").arg(color);
}
if let Some(config) = self.config.as_ref() {
for item in config {
command.arg("--config").arg(item);
}
}
if let Some(unstable_flags) = self.unstable_flags.as_ref() {
for flag in unstable_flags {
command.arg("-Z").arg(flag);
}
}
}
}
impl PackageSelection {
/// Apply the args to the cargo command.
pub fn apply_to(&self, command: &mut Command) {
if let Some(package) = self.package.as_ref() {
command.arg("--package").arg(package);
}
}
}
impl TargetSelection {
/// Check if any target is set.
pub fn is_set(&self) -> bool {
self.bin.is_some() || self.example.is_some()
}
/// Apply the args to the cargo command.
pub fn apply_to(&self, command: &mut Command) {
if let Some(bin) = self.bin.as_ref() {
command.arg("--bin").arg(bin);
} else if let Some(example) = self.example.as_ref() {
command.arg("--example").arg(example);
}
}
/// Get the target path for a target.
///
/// # Panics
///
/// Panics if neither `bin` nor `example` is set.
pub fn get_target_path<P: AsRef<Path>>(
&self,
workspace_root: P,
compilation_options: &CompilationOptions,
) -> PathBuf {
let workspace_root = workspace_root.as_ref();
let prefix = workspace_root
.join(
compilation_options
.target_dir
.as_deref()
.unwrap_or_else(|| Path::new("target")),
)
.join(RUSTC_TARGET)
.join(compilation_options.get_profile());
if let Some(bin) = self.bin.as_ref() {
prefix.join(bin)
} else if let Some(example) = self.example.as_ref() {
prefix.join("examples").join(example)
} else {
panic!("target need to be set");
}
}
pub fn canonicalize<P: AsRef<Path>>(
self,
workspace_root: P,
manifest_path: P,
package_selection: &PackageSelection,
) -> anyhow::Result<TargetSelection> {
if self.is_set() {
return Ok(self);
}
let workspace_root = workspace_root.as_ref().canonicalize()?;
let root_manifest_path = workspace_root.join("Cargo.toml");
let manifest_path = manifest_path.as_ref().canonicalize()?;
let metadata = MetadataCommand::new()
.manifest_path(&manifest_path)
.no_deps()
.exec()?;
let mut packages = vec![];
if let Some(package) = package_selection.package.as_ref() {
packages.push(
metadata
.packages
.iter()
.find(|p| p.name == *package)
.context(format!("package `{package}` not found",))?,
);
} else {
packages.extend(metadata.packages.iter());
}
let mut binary_targets = vec![];
for package in packages {
let package_manifest_path = package.manifest_path.canonicalize()?;
if manifest_path != root_manifest_path && package_manifest_path != manifest_path {
continue;
}
if let Some(default_run) = package.default_run.as_ref() {
binary_targets.push((true, default_run));
continue;
}
binary_targets.extend(package.targets.iter().filter_map(|target| {
let is_bin = target
.kind
.iter()
.any(|kind| matches!(kind, TargetKind::Bin));
let is_example = target
.kind
.iter()
.any(|kind| matches!(kind, TargetKind::Example));
if is_example {
Some((true, &target.name))
} else if is_bin {
Some((false, &target.name))
} else {
None
}
}));
}
if binary_targets.len() > 1 {
bail!(format!(
"multiple binaries found, please specify one with `--bin` or `--example`. possible binaries: {}",
binary_targets
.iter()
.map(|(_, name)| name.as_str())
.collect::<Vec<_>>()
.join(", ")
))
}
let (is_example, name) = binary_targets.pop().unwrap();
Ok(if is_example {
TargetSelection {
bin: None,
example: Some(name.to_string()),
}
} else {
TargetSelection {
bin: Some(name.to_string()),
example: None,
}
})
}
}
impl FeatureSelection {
/// Apply the args to the cargo command.
pub fn apply_to(&self, command: &mut Command) {
if let Some(features) = self.features.as_ref() {
command.arg("--features").arg(features.join(","));
}
if self.all_features {
command.arg("--all-features");
}
if self.no_default_features {
command.arg("--no-default-features");
}
}
}
impl CompilationOptions {
/// Get the target profile
pub fn get_profile(&self) -> &str {
if self.release {
"release"
} else {
self.profile.as_deref().unwrap_or("debug")
}
}
/// Apply the args to the cargo command.
pub fn apply_to(&self, command: &mut Command) -> anyhow::Result<Option<TempDir>> {
if let Some(jobs) = self.jobs {
command.arg("--jobs").arg(jobs.to_string());
}
if self.keep_going {
command.arg("--keep-going");
}
if self.release {
command.arg("--release");
}
if let Some(profile) = self.profile.as_ref() {
command.arg("--profile").arg(profile);
}
if let Some(target) = self.target.as_ref() {
command.arg("--target").arg(target);
} else {
let (guard, target_json_path) =
release_target_json().context("failed to release target definition")?;
command.arg("--target").arg(&target_json_path);
return Ok(Some(guard));
}
Ok(None)
}
}
impl ManifestOptions {
/// Apply the args to the cargo command.
pub fn apply_to(&self, command: &mut Command) -> anyhow::Result<()> {
if let Some(manifest_path) = self.manifest_path.as_ref() {
command.arg("--manifest-path").arg(manifest_path);
}
if let Some(lockfile_path) = self.lockfile_path.as_ref() {
command.arg("--lockfile").arg(lockfile_path);
}
if self.ignore_rust_version {
command.arg("--ignore-rust-version");
}
if self.locked {
command.arg("--locked");
}
if self.offline {
command.arg("--offline");
}
if self.frozen {
command.arg("--frozen");
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/src/commands/common_args/ceno.rs | ceno_cli/src/commands/common_args/ceno.rs | use super::CompilationOptions;
use crate::utils::*;
use anyhow::{Context, bail};
use ceno_emul::{IterAddresses, Program, WORD_SIZE, Word};
use ceno_host::{CenoStdin, memory_from_file};
use ceno_zkvm::{
e2e::*,
scheme::{
constants::MAX_NUM_VARIABLES, create_backend, create_prover,
mock_prover::LkMultiplicityKey, verifier::ZKVMVerifier,
},
};
use clap::Args;
use ff_ext::{BabyBearExt4, ExtensionField, GoldilocksExt2};
use mpcs::{
Basefold, BasefoldRSParams, PolynomialCommitmentScheme, SecurityLevel, Whir, WhirDefaultSpec,
};
use serde::Serialize;
use std::{
fs::File,
path::{Path, PathBuf},
};
/// Ceno options
#[derive(Clone, Args)]
pub struct CenoOptions {
/// The preset configuration to use.
#[arg(long, value_enum, default_value_t = Preset::Ceno)]
pub platform: Preset,
/// The polynomial commitment scheme to use.
#[arg(long, value_enum, default_value_t = PcsKind::default())]
pcs: PcsKind,
/// The field to use, eg. goldilocks
#[arg(long, value_enum, default_value_t = FieldType::default())]
field: FieldType,
/// The maximum number of steps to execute the program.
#[arg(long, default_value_t = usize::MAX)]
pub max_steps: usize,
/// The maximum number of variables the polynomial commitment scheme
#[arg(long, default_value_t = MAX_NUM_VARIABLES)]
pub max_num_variables: usize,
/// Prover-private unconstrained input.
/// This is a raw file mapped as a memory segment. Zero-padded to the right to the next power-of-two size.
#[arg(long, conflicts_with = "hints")]
hints_file: Option<PathBuf>,
/// Prover-private unconstrained input as a list of words separated by commas or spaces.
#[arg(long, conflicts_with = "hints_file", value_parser, num_args = 1..)]
hints: Option<Vec<Word>>,
/// Public constrained input.
#[arg(long, value_parser, num_args = 1.., value_delimiter = ',')]
public_io: Option<Vec<Word>>,
/// pub io size in byte
#[arg(long, default_value = "1k", value_parser = parse_size)]
public_io_size: u32,
/// The preset configuration to use.
#[arg(short, long, value_enum, default_value_t = SecurityLevel::default())]
security_level: SecurityLevel,
/// Stack size in bytes.
#[arg(long, default_value = "2M", value_parser = parse_size)]
stack_size: u32,
/// Heap size in bytes.
#[arg(long, default_value = "2M", value_parser = parse_size)]
heap_size: u32,
/// The path to the proof file to write.
#[arg(long)]
pub out_proof: Option<PathBuf>,
/// The path to the verification key file to write.
#[arg(long)]
pub out_vk: Option<PathBuf>,
/// prover id
#[arg(long, default_value = "0")]
prover_id: u32,
/// number of available prover.
#[arg(long, default_value = "1")]
num_provers: u32,
// max cycle per shard
#[arg(long, default_value = "536870912")] // 536870912 = 2^29
max_cycle_per_shard: u64,
// max cycle per shard
// default value: 16GB VRAM, each cell 4 byte, log explosion 2
// => 2^30 * 16 / 4 / 2
#[arg(long, default_value = "2147483648")]
max_cell_per_shard: u64,
/// Profiling granularity.
/// Setting any value restricts logs to profiling information
#[arg(long)]
profiling: Option<usize>,
// for debug purpose
// only generate respective shard id and skip others
#[arg(long)]
shard_id: Option<u64>,
}
impl CenoOptions {
/// Try set up the logger based on the verbosity level
pub fn try_setup_logger(&self) {
use tracing_forest::ForestLayer;
use tracing_subscriber::{
EnvFilter, Registry,
filter::{LevelFilter, filter_fn},
fmt,
layer::SubscriberExt,
util::SubscriberInitExt,
};
if *QUITE.get_or_init(|| false) {
return;
}
// default filter
let default_filter = EnvFilter::builder()
.with_default_directive(LevelFilter::DEBUG.into())
.from_env_lossy();
// filter by profiling level;
// spans with level i contain the field "profiling_{i}"
// this restricts statistics to first (args.profiling) levels
let profiling_level = self.profiling.unwrap_or(1);
let filter_by_profiling_level = filter_fn(move |metadata| {
(1..=profiling_level)
.map(|i| format!("profiling_{i}"))
.any(|field| metadata.fields().field(&field).is_some())
});
let fmt_layer = fmt::layer()
.compact()
.with_thread_ids(false)
.with_thread_names(false)
.without_time();
Registry::default()
.with(self.profiling.is_some().then_some(ForestLayer::default()))
.with(fmt_layer)
// if some profiling granularity is specified, use the profiling filter,
// otherwise use the default
.with(
self.profiling
.is_some()
.then_some(filter_by_profiling_level),
)
.with(self.profiling.is_none().then_some(default_filter))
.try_init()
.ok();
}
/// Get stack size
pub fn stack_size(&self) -> u32 {
self.stack_size.next_multiple_of(WORD_SIZE as u32)
}
/// Get heap size
pub fn heap_size(&self) -> u32 {
self.heap_size.next_multiple_of(WORD_SIZE as u32)
}
/// Read the public io into ceno stdin
pub fn read_public_io(&self) -> anyhow::Result<Vec<u32>> {
if let Some(public_io) = &self.public_io {
// if vector contains only one element, write it as a raw `u32`
// otherwise, write the entire vector
// in both cases, convert the resulting `CenoStdin` into a `Vec<u32>`
if public_io.len() == 1 {
CenoStdin::default()
.write(&public_io[0])
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
} else {
CenoStdin::default()
.write(public_io)
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
}
.context("failed to get public_io".to_string())
} else {
Ok(vec![])
}
}
/// Read the hints
pub fn read_hints(&self) -> anyhow::Result<Vec<u32>> {
if self.hints_file.is_some() {
let file_path = self.hints_file.as_deref().unwrap();
tracing::info!("Loading hints file: {:?}", file_path);
memory_from_file(file_path).context(format!("failed to read {}", file_path.display()))
} else if self.hints.is_some() {
let hints = self.hints.as_ref().unwrap();
// if the vector contains only one element, write it as a raw `u32`
// otherwise, write the entire vector
// in both cases, convert the resulting `CenoStdin` into a `Vec<u32>`
if hints.len() == 1 {
CenoStdin::default()
.write(&hints[0])
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
} else {
CenoStdin::default()
.write(hints)
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
}
.context("failed to get hints".to_string())
} else {
Ok(vec![])
}
}
/// Run keygen the ceno elf file with given options
pub fn keygen<P: AsRef<Path>>(
&self,
compilation_options: &CompilationOptions,
elf_path: P,
) -> anyhow::Result<()> {
self.try_setup_logger();
match (self.pcs, self.field) {
(PcsKind::Basefold, FieldType::Goldilocks) => {
keygen_inner::<GoldilocksExt2, Basefold<GoldilocksExt2, BasefoldRSParams>, P>(
self,
compilation_options,
elf_path,
)
}
(PcsKind::Basefold, FieldType::BabyBear) => {
keygen_inner::<BabyBearExt4, Basefold<BabyBearExt4, BasefoldRSParams>, P>(
self,
compilation_options,
elf_path,
)
}
(PcsKind::Whir, FieldType::Goldilocks) => {
keygen_inner::<GoldilocksExt2, Whir<GoldilocksExt2, WhirDefaultSpec>, P>(
self,
compilation_options,
elf_path,
)
}
(PcsKind::Whir, FieldType::BabyBear) => {
keygen_inner::<BabyBearExt4, Whir<BabyBearExt4, WhirDefaultSpec>, P>(
self,
compilation_options,
elf_path,
)
}
}
}
/// Run the ceno elf file with given options
pub fn run<P: AsRef<Path>>(
&self,
compilation_options: &CompilationOptions,
elf_path: P,
) -> anyhow::Result<()> {
self.try_setup_logger();
match (self.pcs, self.field) {
(PcsKind::Basefold, FieldType::Goldilocks) => {
run_elf_inner::<GoldilocksExt2, Basefold<GoldilocksExt2, BasefoldRSParams>, P>(
self,
compilation_options,
elf_path,
Checkpoint::PrepWitnessGen,
)?;
}
(PcsKind::Basefold, FieldType::BabyBear) => {
run_elf_inner::<BabyBearExt4, Basefold<BabyBearExt4, BasefoldRSParams>, P>(
self,
compilation_options,
elf_path,
Checkpoint::PrepWitnessGen,
)?;
}
(PcsKind::Whir, FieldType::Goldilocks) => {
run_elf_inner::<GoldilocksExt2, Whir<GoldilocksExt2, WhirDefaultSpec>, P>(
self,
compilation_options,
elf_path,
Checkpoint::PrepWitnessGen,
)?;
}
(PcsKind::Whir, FieldType::BabyBear) => {
run_elf_inner::<BabyBearExt4, Whir<BabyBearExt4, WhirDefaultSpec>, P>(
self,
compilation_options,
elf_path,
Checkpoint::PrepWitnessGen,
)?;
}
};
Ok(())
}
/// Run and prove the ceno elf file with given options
pub fn prove<P: AsRef<Path>>(
&self,
compilation_options: &CompilationOptions,
elf_path: P,
) -> anyhow::Result<()> {
self.try_setup_logger();
match (self.pcs, self.field) {
(PcsKind::Basefold, FieldType::Goldilocks) => {
prove_inner::<GoldilocksExt2, Basefold<GoldilocksExt2, BasefoldRSParams>, P>(
self,
compilation_options,
elf_path,
Checkpoint::Complete,
)
}
(PcsKind::Basefold, FieldType::BabyBear) => {
prove_inner::<BabyBearExt4, Basefold<BabyBearExt4, BasefoldRSParams>, P>(
self,
compilation_options,
elf_path,
Checkpoint::Complete,
)
}
(PcsKind::Whir, FieldType::Goldilocks) => {
prove_inner::<GoldilocksExt2, Whir<GoldilocksExt2, WhirDefaultSpec>, P>(
self,
compilation_options,
elf_path,
Checkpoint::PrepVerify, // TODO: when whir and babybear is ready
)
}
(PcsKind::Whir, FieldType::BabyBear) => {
prove_inner::<BabyBearExt4, Whir<BabyBearExt4, WhirDefaultSpec>, P>(
self,
compilation_options,
elf_path,
Checkpoint::PrepVerify, // TODO: when whir and babybear is ready
)
}
}
}
}
fn run_elf_inner<
E: ExtensionField + LkMultiplicityKey,
PCS: PolynomialCommitmentScheme<E> + Serialize + 'static,
P: AsRef<Path>,
>(
options: &CenoOptions,
compilation_options: &CompilationOptions,
elf_path: P,
checkpoint: Checkpoint,
) -> anyhow::Result<E2ECheckpointResult<E, PCS>> {
let elf_path = elf_path.as_ref();
let elf_bytes =
std::fs::read(elf_path).context(format!("failed to read {}", elf_path.display()))?;
let program = Program::load_elf(&elf_bytes, u32::MAX).context("failed to load elf")?;
print_cargo_message("Loaded", format_args!("{}", elf_path.display()));
let multi_prover = MultiProver::new(
options.prover_id as usize,
options.num_provers as usize,
options.max_cell_per_shard,
options.max_cycle_per_shard,
);
let public_io = options
.read_public_io()
.context("failed to read public io")?;
let public_io_size = options.public_io_size;
assert!(
public_io.len() <= public_io_size as usize / WORD_SIZE,
"require pub io length {} < max public_io_size {}",
public_io.len(),
public_io_size as usize / WORD_SIZE
);
let platform = if compilation_options.release {
setup_platform(
options.platform,
&program,
options.stack_size(),
options.heap_size(),
public_io_size,
)
} else {
setup_platform_debug(
options.platform,
&program,
options.stack_size(),
options.heap_size(),
public_io_size,
)
};
tracing::info!("Running on platform {:?} {}", options.platform, platform);
tracing::info!(
"Stack: {} bytes. Heap: {} bytes.",
options.stack_size(),
options.heap_size()
);
let hints = options.read_hints().context("failed to read hints")?;
assert!(
hints.len() <= platform.hints.iter_addresses().len(),
"hints must fit in {} bytes",
platform.hints.len()
);
let backend = create_backend(options.max_num_variables, options.security_level);
Ok(run_e2e_with_checkpoint::<E, PCS, _, _>(
create_prover(backend.clone()),
program,
platform,
multi_prover,
&hints,
&public_io,
options.max_steps,
checkpoint,
options.shard_id.map(|v| v as usize),
))
}
fn keygen_inner<
E: ExtensionField + LkMultiplicityKey,
PCS: PolynomialCommitmentScheme<E> + Serialize + 'static,
P: AsRef<Path>,
>(
args: &CenoOptions,
compilation_options: &CompilationOptions,
elf_path: P,
) -> anyhow::Result<()> {
let result = run_elf_inner::<E, PCS, P>(
args,
compilation_options,
elf_path,
Checkpoint::PrepE2EProving,
)?;
let vk = result.vk.expect("Keygen should yield vk.");
if let Some(out_vk) = args.out_vk.as_ref() {
let path = canonicalize_allow_nx(out_vk)?;
print_cargo_message("Writing", format_args!("vk to {}", path.display()));
let vk_file =
File::create(&path).context(format!("failed to create {}", path.display()))?;
bincode::serialize_into(vk_file, &vk).context("failed to serialize vk")?;
}
Ok(())
}
fn prove_inner<
E: ExtensionField + LkMultiplicityKey,
PCS: PolynomialCommitmentScheme<E> + Serialize + 'static,
P: AsRef<Path>,
>(
args: &CenoOptions,
compilation_options: &CompilationOptions,
elf_path: P,
checkpoint: Checkpoint,
) -> anyhow::Result<()> {
let result = run_elf_inner::<E, PCS, P>(args, compilation_options, elf_path, checkpoint)?;
let zkvm_proofs = result.proofs.expect("PrepSanityCheck should yield proof.");
let vk = result.vk.expect("PrepSanityCheck should yield vk.");
let start = std::time::Instant::now();
let verifier = ZKVMVerifier::new(vk);
if let Err(e) = verify(zkvm_proofs.clone(), &verifier) {
bail!("Verification failed: {e:?}");
}
print_cargo_message(
"Verified",
format_args!("proof in {:.2}s", start.elapsed().as_secs_f32()),
);
if let Some(out_proof) = args.out_proof.as_ref() {
let path = canonicalize_allow_nx(out_proof)?;
print_cargo_message("Writing", format_args!("proof to {}", path.display()));
let proof_file =
File::create(&path).context(format!("failed to create {}", path.display()))?;
bincode::serialize_into(proof_file, &zkvm_proofs)
.context("failed to serialize zkvm proof")?;
}
if let Some(out_vk) = args.out_vk.as_ref() {
let path = canonicalize_allow_nx(out_vk)?;
print_cargo_message("Writing", format_args!("vk to {}", path.display()));
let vk_file =
File::create(&path).context(format!("failed to create {}", path.display()))?;
bincode::serialize_into(vk_file, &verifier.into_inner())
.context("failed to serialize vk")?;
}
Ok(())
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_cli/example/src/main.rs | ceno_cli/example/src/main.rs | extern crate ceno_rt;
fn is_prime(n: u32) -> bool {
if n < 2 {
return false;
}
let mut i = 2;
while i * i <= n {
if n % i == 0 {
return false;
}
i += 1;
}
true
}
fn main() {
let n: u32 = ceno_rt::read();
let mut cnt_primes = 0;
for i in 0..=n {
cnt_primes += is_prime(i) as u32;
}
if cnt_primes > 1000 * 1000 {
panic!();
}
ceno_rt::commit(&cnt_primes);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/lib.rs | ceno_recursion/src/lib.rs | #![allow(clippy::type_complexity)]
#![allow(clippy::too_many_arguments)]
mod arithmetics;
mod basefold_verifier;
pub mod constants;
mod tower_verifier;
mod transcript;
pub mod zkvm_verifier;
pub mod aggregation;
#[allow(dead_code)]
mod extensions;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/extensions/mod.rs | ceno_recursion/src/extensions/mod.rs | #[cfg(test)]
mod tests {
use crate::{
arithmetics::{challenger_multi_observe, exts_to_felts},
zkvm_verifier::binding::F,
};
use openvm_circuit::arch::{SystemConfig, VmExecutor};
use openvm_instructions::exe::VmExe;
use openvm_native_circuit::{Native, NativeConfig};
use openvm_native_compiler::{
asm::AsmBuilder,
conversion::{CompilerOptions, convert_program},
prelude::*,
};
use openvm_native_recursion::challenger::{
CanObserveVariable, CanSampleVariable, duplex::DuplexChallengerVariable,
};
use openvm_stark_backend::{
config::StarkGenericConfig,
p3_field::{Field, FieldAlgebra},
};
use openvm_stark_sdk::{
config::baby_bear_poseidon2::BabyBearPoseidon2Config, p3_baby_bear::BabyBear,
};
type SC = BabyBearPoseidon2Config;
type EF = <SC as StarkGenericConfig>::Challenge;
#[test]
pub fn test_native_multi_observe() {
// OpenVM DSL
let mut builder = AsmBuilder::<F, EF>::default();
vm_program(&mut builder);
builder.halt();
// Pass in witness stream
let witness_stream: Vec<Vec<F>> = Vec::new();
// Compile program
let options = CompilerOptions::default().with_cycle_tracker();
let mut compiler = AsmCompiler::new(options.word_size);
compiler.build(builder.operations);
let asm_code = compiler.code();
let program = convert_program(asm_code, options);
let mut system_config = SystemConfig::default()
.with_public_values(4)
.with_max_segment_len((1 << 25) - 100);
system_config.profiling = true;
let config = NativeConfig::new(system_config, Native);
let executor = VmExecutor::<BabyBear, NativeConfig>::new(config).unwrap();
let exe = VmExe::new(program);
let interpreter = executor.instance(&exe).unwrap();
interpreter
.execute(witness_stream, None)
.expect("test_native_multi_observe should not fail");
}
fn vm_program<C: Config>(builder: &mut Builder<C>) {
let e1: Ext<C::F, C::EF> = builder.constant(C::EF::GENERATOR.exp_power_of_2(16));
let e2: Ext<C::F, C::EF> = builder.constant(C::EF::GENERATOR.exp_power_of_2(32));
let e3: Ext<C::F, C::EF> = builder.constant(C::EF::GENERATOR.exp_power_of_2(64));
let e4: Ext<C::F, C::EF> = builder.constant(C::EF::GENERATOR.exp_power_of_2(128));
let e5: Ext<C::F, C::EF> = builder.constant(C::EF::GENERATOR.exp_power_of_2(256));
let len: usize = 5;
let e_arr: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(len);
builder.set(&e_arr, 0, e1);
builder.set(&e_arr, 1, e2);
builder.set(&e_arr, 2, e3);
builder.set(&e_arr, 3, e4);
builder.set(&e_arr, 4, e5);
unsafe {
let mut c1 = DuplexChallengerVariable::new(builder);
let mut c2 = DuplexChallengerVariable::new(builder);
let f_arr1 = exts_to_felts(builder, &e_arr);
let f_arr2 = f_arr1.clone();
challenger_multi_observe(builder, &mut c1, &f_arr1);
let test_e1 = c1.sample(builder);
c2.observe_slice(builder, f_arr2);
let test_e2 = c2.sample(builder);
builder.assert_felt_eq(test_e1, test_e2);
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/transcript/mod.rs | ceno_recursion/src/transcript/mod.rs | use ff_ext::{BabyBearExt4, ExtensionField as CenoExtensionField, SmallField};
use openvm_native_compiler::prelude::*;
use openvm_native_recursion::challenger::{
CanObserveVariable, CanSampleBitsVariable, duplex::DuplexChallengerVariable,
};
use openvm_stark_backend::p3_field::FieldAlgebra;
pub fn transcript_observe_label<C: Config>(
builder: &mut Builder<C>,
challenger: &mut DuplexChallengerVariable<C>,
label: &[u8],
) {
let label_f = <BabyBearExt4 as CenoExtensionField>::BaseField::bytes_to_field_elements(label);
for n in label_f {
let f: Felt<C::F> = builder.constant(C::F::from_canonical_u64(n.to_canonical_u64()));
challenger.observe(builder, f);
}
}
pub fn transcript_check_pow_witness<C: Config>(
builder: &mut Builder<C>,
challenger: &mut DuplexChallengerVariable<C>,
nbits: usize,
witness: Felt<C::F>,
) {
let nbits = builder.eval_expr(Usize::from(nbits));
challenger.observe(builder, witness);
let bits = challenger.sample_bits(builder, nbits);
builder.range(0, nbits).for_each(|index_vec, builder| {
let bit = builder.get(&bits, index_vec[0]);
builder.assert_eq::<Var<C::N>>(bit, Usize::from(0));
});
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/bin/e2e_aggregate.rs | ceno_recursion/src/bin/e2e_aggregate.rs | use ceno_emul::{IterAddresses, Program, WORD_SIZE, Word};
use ceno_host::{CenoStdin, memory_from_file};
use ceno_recursion::aggregation::CenoAggregationProver;
use ceno_zkvm::{
e2e::{
Checkpoint, FieldType, MultiProver, PcsKind, Preset, run_e2e_with_checkpoint,
setup_platform, setup_platform_debug,
},
scheme::{constants::MAX_NUM_VARIABLES, create_backend, create_prover},
};
use clap::Parser;
use ff_ext::BabyBearExt4;
use mpcs::{Basefold, BasefoldRSParams, SecurityLevel};
use std::{fs, path::PathBuf};
use tracing::level_filters::LevelFilter;
use tracing_forest::ForestLayer;
use tracing_subscriber::{
EnvFilter, Registry, filter::filter_fn, fmt, layer::SubscriberExt, util::SubscriberInitExt,
};
fn parse_size(s: &str) -> Result<u32, parse_size::Error> {
parse_size::Config::new()
.with_binary()
.parse_size(s)
.map(|size| size as u32)
}
/// Prove the execution of a fixed RISC-V program.
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
/// The path to the ELF file to execute.
elf: PathBuf,
/// The path to the proof file to write.
#[arg(default_value = "proof.bin")]
proof_file: PathBuf,
/// The path to the verification key file to write.
#[arg(default_value = "vk.bin")]
vk_file: PathBuf,
/// The maximum number of steps to execute the program.
#[arg(short, long)]
max_steps: Option<usize>,
// Profiling granularity.
// Setting any value restricts logs to profiling information
#[arg(long)]
profiling: Option<usize>,
/// The preset configuration to use.
#[arg(long, value_enum, default_value_t = Preset::Ceno)]
platform: Preset,
/// The polynomial commitment scheme to use.
#[arg(long, value_enum, default_value_t = PcsKind::default())]
pcs: PcsKind,
/// The field to use, eg. goldilocks
#[arg(long, value_enum, default_value_t = FieldType::default())]
field: FieldType,
/// Hints: prover-private unconstrained input.
/// This is a raw file mapped as a memory segment.
/// Zero-padded to the right to the next power-of-two size.
#[arg(long, conflicts_with = "hints")]
hints_file: Option<String>,
#[arg(long, conflicts_with = "hints_file", value_parser, num_args = 1.., value_delimiter = ',')]
hints: Option<Vec<Word>>,
#[arg(long, default_value = "100")]
n: u32,
/// Stack size in bytes.
#[arg(long, default_value = "2M", value_parser = parse_size)]
stack_size: u32,
/// Heap size in bytes.
#[arg(long, default_value = "2M", value_parser = parse_size)]
heap_size: u32,
/// Max number of variables
#[clap(long, default_value_t = MAX_NUM_VARIABLES)]
max_num_variables: usize,
#[arg(long, value_parser, num_args = 1.., value_delimiter = ',')]
public_io: Option<Vec<Word>>,
/// pub io size in byte
#[arg(long, default_value = "1k", value_parser = parse_size)]
public_io_size: u32,
/// The security level to use.
#[arg(short, long, value_enum, default_value_t = SecurityLevel::default())]
security_level: SecurityLevel,
// prover id
#[arg(long, default_value = "0")]
prover_id: u32,
// number of available prover.
#[arg(long, default_value = "1")]
num_provers: u32,
// max cycle per shard
// default value: 16GB VRAM, each cell 4 byte, log explosion 2
// => 2^30 * 16 / 4 / 2
#[arg(long, default_value = "2147483648")]
max_cell_per_shard: u64,
// max cycle per shard
#[arg(long, default_value = "536870912")] // 536870912 = 2^29
max_cycle_per_shard: u64,
}
fn main() {
let args = {
let mut args = Args::parse();
args.stack_size = args.stack_size.next_multiple_of(WORD_SIZE as u32);
args.heap_size = args.heap_size.next_multiple_of(WORD_SIZE as u32);
args
};
// default filter
let default_filter = EnvFilter::builder()
.with_default_directive(LevelFilter::DEBUG.into())
.from_env_lossy();
// filter by profiling level;
// spans with level i contain the field "profiling_{i}"
// this restricts statistics to first (args.profiling) levels
let profiling_level = args.profiling.unwrap_or(1);
let filter_by_profiling_level = filter_fn(move |metadata| {
(1..=profiling_level)
.map(|i| format!("profiling_{i}"))
.any(|field| metadata.fields().field(&field).is_some())
});
let fmt_layer = fmt::layer()
.compact()
.with_thread_ids(false)
.with_thread_names(false)
.without_time();
Registry::default()
.with(args.profiling.is_some().then_some(ForestLayer::default()))
.with(fmt_layer)
// if some profiling granularity is specified, use the profiling filter,
// otherwise use the default
.with(
args.profiling
.is_some()
.then_some(filter_by_profiling_level),
)
.with(args.profiling.is_none().then_some(default_filter))
.init();
// process public input first
let public_io = args
.public_io
.and_then(|public_io| {
// if the vector contains only one element, write it as a raw `u32`
// otherwise, write the entire vector
// in both cases, convert the resulting `CenoStdin` into a `Vec<u32>`
if public_io.len() == 1 {
CenoStdin::default()
.write(&public_io[0])
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
} else {
CenoStdin::default()
.write(&public_io)
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
}
})
.unwrap_or_default();
assert!(
public_io.len() <= args.public_io_size as usize / WORD_SIZE,
"require pub io length {} < max public_io_size {}",
public_io.len(),
args.public_io_size as usize / WORD_SIZE
);
tracing::info!("Loading ELF file: {}", args.elf.display());
let elf_bytes = fs::read(&args.elf).expect("read elf file");
let program = Program::load_elf(&elf_bytes, u32::MAX).unwrap();
let platform = if cfg!(debug_assertions) {
setup_platform_debug(
args.platform,
&program,
args.stack_size,
args.heap_size,
args.public_io_size,
)
} else {
setup_platform(
args.platform,
&program,
args.stack_size,
args.heap_size,
args.public_io_size,
)
};
tracing::info!("Running on platform {:?} {}", args.platform, platform);
tracing::info!(
"Stack: {} bytes. Heap: {} bytes.",
args.stack_size,
args.heap_size
);
let hints = args
.hints_file
.as_ref()
.map(|file_path| {
tracing::info!("Loading hints file: {:?}", file_path);
let hints = memory_from_file(file_path).expect("failed to read hints file");
assert!(
hints.len() <= platform.hints.iter_addresses().len(),
"hints must fit in {} bytes",
platform.hints.len()
);
hints
})
.or_else(|| {
args.hints.and_then(|hint| {
// if the vector contains only one element, write it as a raw `u32`
// otherwise, write the entire vector
// in both cases, convert the resulting `CenoStdin` into a `Vec<u32>`
if hint.len() == 1 {
CenoStdin::default()
.write(&hint[0])
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
} else {
CenoStdin::default()
.write(&hint)
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
}
})
})
.unwrap_or_default();
let max_steps = args.max_steps.unwrap_or(usize::MAX);
let multi_prover = MultiProver::new(
args.prover_id as usize,
args.num_provers as usize,
args.max_cell_per_shard,
args.max_cycle_per_shard,
);
let backend = create_backend(args.max_num_variables, args.security_level);
let prover = create_prover(backend);
let result =
run_e2e_with_checkpoint::<BabyBearExt4, Basefold<BabyBearExt4, BasefoldRSParams>, _, _>(
prover,
program,
platform,
multi_prover,
&hints,
&public_io,
max_steps,
Checkpoint::Complete,
None,
);
let zkvm_proofs = result
.proofs
.expect("PrepSanityCheck should yield zkvm_proof.");
let vk = result.vk.expect("PrepSanityCheck should yield vk.");
let mut agg_prover = CenoAggregationProver::from_base_vk(vk);
let _ = agg_prover.generate_root_proof(zkvm_proofs);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/aggregation/internal.rs | ceno_recursion/src/aggregation/internal.rs | /// Most of the codes in this file are copied from the OpenVM project.
/// And we made a few modifications to fit in our continuation scheme.
/// https://github.com/openvm-org/openvm/blob/main/crates/continuations/src/verifier/common/non_leaf.rs
use std::{array, borrow::Borrow};
use openvm_circuit::arch::PUBLIC_VALUES_AIR_ID;
use openvm_instructions::program::Program;
use openvm_native_compiler::{
conversion::CompilerOptions,
ir::{Array, Builder, Config, DIGEST_SIZE, Felt, RVar},
prelude::Var,
};
use openvm_native_recursion::{
challenger::duplex::DuplexChallengerVariable, fri::TwoAdicFriPcsVariable, stark::StarkVerifier,
types::MultiStarkVerificationAdvice, utils::const_fri_config, vars::StarkProofVariable,
};
use openvm_stark_backend::keygen::types::MultiStarkVerifyingKey;
use openvm_stark_sdk::{
config::{FriParameters, baby_bear_poseidon2::BabyBearPoseidon2Config},
openvm_stark_backend::p3_field::PrimeField32,
};
use p3::field::FieldAlgebra;
use openvm_continuations::verifier::{
common::{
assert_or_assign_connector_pvs, assert_required_air_for_agg_vm_present,
assert_single_segment_vm_exit_successfully, get_program_commit, types::VmVerifierPvs,
},
internal::{
types::{InternalVmVerifierExtraPvs, InternalVmVerifierInput, InternalVmVerifierPvs},
vars::InternalVmVerifierInputVariable,
},
};
use openvm_native_recursion::hints::Hintable;
use openvm_continuations::{C, F};
use openvm_native_recursion::types::new_from_inner_multi_vk;
pub(crate) fn assign_array_to_slice<C: Config>(
builder: &mut Builder<C>,
dst_slice: &[Felt<C::F>],
src: &Array<C, Felt<C::F>>,
src_offset: usize,
) {
for (i, dst) in dst_slice.iter().enumerate() {
let pv = builder.get(src, i + src_offset);
builder.assign(dst, pv);
}
}
/// Returns 1 if lhs == rhs, 0 otherwise.
pub(crate) fn eq_felt_slice<C: Config, const N: usize>(
builder: &mut Builder<C>,
lhs: &[Felt<C::F>; N],
rhs: &[Felt<C::F>; N],
) -> Var<C::N> {
let sub_res: [Felt<C::F>; N] = array::from_fn(|i| builder.eval(lhs[i] - rhs[i]));
let var_res = sub_res.map(|f| builder.cast_felt_to_var(f));
let ret: Var<C::N> = builder.eval(C::N::ONE);
var_res.into_iter().for_each(|v| {
builder
.if_ne(v, C::N::ZERO)
.then(|builder| builder.assign(&ret, C::N::ZERO))
});
ret
}
pub struct NonLeafVerifierVariables<C: Config> {
pub internal_program_commit: [Felt<C::F>; DIGEST_SIZE],
pub leaf_pcs: TwoAdicFriPcsVariable<C>,
pub leaf_advice: MultiStarkVerificationAdvice<C>,
pub internal_pcs: TwoAdicFriPcsVariable<C>,
pub internal_advice: MultiStarkVerificationAdvice<C>,
}
impl<C: Config> NonLeafVerifierVariables<C> {
/// Verify proofs of internal verifier or leaf verifier.
/// Returns aggregated VmVerifierPvs and leaf verifier commitment of these proofs.
#[allow(clippy::type_complexity)]
pub fn verify_internal_or_leaf_verifier_proofs(
&self,
builder: &mut Builder<C>,
proofs: &Array<C, StarkProofVariable<C>>,
) -> (VmVerifierPvs<Felt<C::F>>, [Felt<C::F>; DIGEST_SIZE])
where
C::F: PrimeField32,
{
// At least 1 proof should be provided.
builder.assert_nonzero(&proofs.len());
// TODO: use our own variant of VmVerifierPvs (defined in type.rs)
let pvs = VmVerifierPvs::<Felt<C::F>>::uninit(builder);
let leaf_verifier_commit = array::from_fn(|_| builder.uninit());
builder.range(0, proofs.len()).for_each(|i_vec, builder| {
let i = i_vec[0];
let proof = builder.get(proofs, i);
assert_required_air_for_agg_vm_present(builder, &proof);
let proof_vm_pvs = self.verify_internal_or_leaf_verifier_proof(builder, &proof);
assert_single_segment_vm_exit_successfully(builder, &proof);
builder.if_eq(i, RVar::zero()).then_or_else(
|builder| {
builder.assign(&pvs.app_commit, proof_vm_pvs.vm_verifier_pvs.app_commit);
builder.assign(
&leaf_verifier_commit,
proof_vm_pvs.extra_pvs.leaf_verifier_commit,
);
},
|builder| {
builder.assert_eq::<[_; DIGEST_SIZE]>(
pvs.app_commit,
proof_vm_pvs.vm_verifier_pvs.app_commit,
);
builder.assert_eq::<[_; DIGEST_SIZE]>(
leaf_verifier_commit,
proof_vm_pvs.extra_pvs.leaf_verifier_commit,
);
},
);
assert_or_assign_connector_pvs(
builder,
&pvs.connector,
i,
&proof_vm_pvs.vm_verifier_pvs.connector,
);
// TODO: sum shard ram ec point in each proof
// // EC sum verification
// let expected_last_shard_id = Usize::uninit(builder);
// builder.assign(&expected_last_shard_id, pv.len() - Usize::from(1));
// let shard_id_fs = builder.get(&shard_raw_pi, SHARD_ID_IDX);
// let shard_id_f = builder.get(&shard_id_fs, 0);
// let shard_id = Usize::Var(builder.cast_felt_to_var(shard_id_f));
// builder.assert_usize_eq(expected_last_shard_id, shard_id);
// let ec_sum = SepticPointVariable {
// x: SepticExtensionVariable {
// vs: builder.dyn_array(7),
// },
// y: SepticExtensionVariable {
// vs: builder.dyn_array(7),
// },
// is_infinity: Usize::uninit(builder),
// };
// builder.assign(&ec_sum.is_infinity, Usize::from(1));
// builder.range(0, pv.len()).for_each(|idx_vec, builder| {
// let shard_pv = builder.get(&pv, idx_vec[0]);
// let x = SepticExtensionVariable {
// vs: shard_pv.slice(
// builder,
// SHARD_RW_SUM_IDX,
// SHARD_RW_SUM_IDX + SEPTIC_EXTENSION_DEGREE,
// ),
// };
// let y = SepticExtensionVariable {
// vs: shard_pv.slice(
// builder,
// SHARD_RW_SUM_IDX + SEPTIC_EXTENSION_DEGREE,
// SHARD_RW_SUM_IDX + 2 * SEPTIC_EXTENSION_DEGREE,
// ),
// };
// let shard_ec = SepticPointVariable {
// x: x.clone(),
// y: y.clone(),
// is_infinity: Usize::uninit(builder),
// };
// let is_x_zero = x.is_zero(builder);
// let is_y_zero = y.is_zero(builder);
// builder.if_eq(is_x_zero, Usize::from(1)).then_or_else(
// |builder| {
// builder
// .if_eq(is_y_zero.clone(), Usize::from(1))
// .then_or_else(
// |builder| {
// builder.assign(&shard_ec.is_infinity, Usize::from(1));
// },
// |builder| {
// builder.assign(&shard_ec.is_infinity, Usize::from(0));
// },
// );
// },
// |builder| {
// builder.assign(&shard_ec.is_infinity, Usize::from(0));
// },
// );
// add_septic_points_in_place(builder, &ec_sum, &shard_ec);
// });
// add_septic_points_in_place(builder, &ec_sum, &calculated_shard_ec_sum);
// This is only needed when `is_terminate` but branching here won't save much, so we
// always assign it.
builder.assign(
&pvs.public_values_commit,
proof_vm_pvs.vm_verifier_pvs.public_values_commit,
);
});
(pvs, leaf_verifier_commit)
}
fn verify_internal_or_leaf_verifier_proof(
&self,
builder: &mut Builder<C>,
proof: &StarkProofVariable<C>,
) -> InternalVmVerifierPvs<Felt<C::F>>
where
C::F: PrimeField32,
{
let flatten_proof_vm_pvs = InternalVmVerifierPvs::<Felt<C::F>>::uninit(builder).flatten();
let proof_vm_pvs_arr = builder
.get(&proof.per_air, PUBLIC_VALUES_AIR_ID)
.public_values;
let program_commit = get_program_commit(builder, proof);
let is_self_program =
eq_felt_slice(builder, &self.internal_program_commit, &program_commit);
builder.if_eq(is_self_program, RVar::one()).then_or_else(
|builder| {
builder.cycle_tracker_start("verify stark");
StarkVerifier::verify::<DuplexChallengerVariable<C>>(
builder,
&self.internal_pcs,
&self.internal_advice,
proof,
);
builder.cycle_tracker_end("verify stark");
assign_array_to_slice(builder, &flatten_proof_vm_pvs, &proof_vm_pvs_arr, 0);
let proof_vm_pvs: &InternalVmVerifierPvs<_> =
flatten_proof_vm_pvs.as_slice().borrow();
// Handle recursive verification
// For proofs, its program commitment should be committed.
builder.assert_eq::<[_; DIGEST_SIZE]>(
proof_vm_pvs.extra_pvs.internal_program_commit,
program_commit,
);
},
|builder| {
builder.cycle_tracker_start("verify stark");
StarkVerifier::verify::<DuplexChallengerVariable<C>>(
builder,
&self.leaf_pcs,
&self.leaf_advice,
proof,
);
builder.cycle_tracker_end("verify stark");
// Leaf verifier doesn't have extra public values.
assign_array_to_slice(
builder,
&flatten_proof_vm_pvs[..VmVerifierPvs::<u8>::width()],
&proof_vm_pvs_arr,
0,
);
let proof_vm_pvs: &InternalVmVerifierPvs<_> =
flatten_proof_vm_pvs.as_slice().borrow();
builder.assign(&proof_vm_pvs.extra_pvs.leaf_verifier_commit, program_commit);
},
);
*flatten_proof_vm_pvs.as_slice().borrow()
}
}
/// Config to generate internal VM verifier program.
pub struct InternalVmVerifierConfig {
pub leaf_fri_params: FriParameters,
pub internal_fri_params: FriParameters,
pub compiler_options: CompilerOptions,
}
impl InternalVmVerifierConfig {
pub fn build_program(
&self,
leaf_vm_vk: &MultiStarkVerifyingKey<BabyBearPoseidon2Config>,
internal_vm_vk: &MultiStarkVerifyingKey<BabyBearPoseidon2Config>,
) -> Program<F> {
let leaf_advice = new_from_inner_multi_vk(leaf_vm_vk);
let internal_advice = new_from_inner_multi_vk(internal_vm_vk);
let mut builder = Builder::<C>::default();
{
builder.cycle_tracker_start("ReadProofsFromInput");
let InternalVmVerifierInputVariable {
self_program_commit,
proofs,
} = InternalVmVerifierInput::<BabyBearPoseidon2Config>::read(&mut builder);
builder.cycle_tracker_end("ReadProofsFromInput");
builder.cycle_tracker_start("InitializePcsConst");
let leaf_pcs = TwoAdicFriPcsVariable {
config: const_fri_config(&mut builder, &self.leaf_fri_params),
};
let internal_pcs = TwoAdicFriPcsVariable {
config: const_fri_config(&mut builder, &self.internal_fri_params),
};
builder.cycle_tracker_end("InitializePcsConst");
let non_leaf_verifier = NonLeafVerifierVariables {
internal_program_commit: self_program_commit,
leaf_pcs,
leaf_advice,
internal_pcs,
internal_advice,
};
builder.cycle_tracker_start("VerifyProofs");
let (vm_verifier_pvs, leaf_verifier_commit) =
non_leaf_verifier.verify_internal_or_leaf_verifier_proofs(&mut builder, &proofs);
builder.cycle_tracker_end("VerifyProofs");
let pvs = InternalVmVerifierPvs {
vm_verifier_pvs,
extra_pvs: InternalVmVerifierExtraPvs {
internal_program_commit: self_program_commit,
leaf_verifier_commit,
},
};
for pv in pvs.flatten() {
builder.commit_public_value(pv);
}
builder.halt();
}
builder.compile_isa_with_options(self.compiler_options)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.