repo
stringlengths
6
65
file_url
stringlengths
81
311
file_path
stringlengths
6
227
content
stringlengths
0
32.8k
language
stringclasses
1 value
license
stringclasses
7 values
commit_sha
stringlengths
40
40
retrieved_at
stringdate
2026-01-04 15:31:58
2026-01-04 20:25:31
truncated
bool
2 classes
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/hal.rs
ceno_zkvm/src/scheme/hal.rs
use crate::{ error::ZKVMError, scheme::cpu::TowerRelationOutput, structs::{ComposedConstrainSystem, EccQuarkProof, ZKVMProvingKey}, }; use either::Either; use ff_ext::ExtensionField; use gkr_iop::{ gkr::GKRProof, hal::{ProtocolWitnessGeneratorProver, ProverBackend}, }; use mpcs::{Point, PolynomialCommitmentScheme}; use multilinear_extensions::{mle::MultilinearExtension, util::ceil_log2}; use std::{collections::BTreeMap, sync::Arc}; use sumcheck::structs::IOPProverMessage; use transcript::Transcript; use witness::next_pow2_instance_padding; pub trait ProverDevice<PB>: TraceCommitter<PB> + TowerProver<PB> + MainSumcheckProver<PB> + OpeningProver<PB> + DeviceTransporter<PB> + ProtocolWitnessGeneratorProver<PB> + EccQuarkProver<PB> // + FixedMLEPadder<PB> where PB: ProverBackend, { fn get_pb(&self) -> &PB; } // TODO: remove the lifetime bound pub struct ProofInput<'a, PB: ProverBackend> { pub witness: Vec<Arc<PB::MultilinearPoly<'a>>>, pub structural_witness: Vec<Arc<PB::MultilinearPoly<'a>>>, pub fixed: Vec<Arc<PB::MultilinearPoly<'a>>>, pub public_input: Vec<Arc<PB::MultilinearPoly<'a>>>, pub pub_io_evals: Vec<Either<<PB::E as ExtensionField>::BaseField, PB::E>>, pub num_instances: Vec<usize>, pub has_ecc_ops: bool, } impl<'a, PB: ProverBackend> ProofInput<'a, PB> { pub fn num_instances(&self) -> usize { self.num_instances.iter().sum() } #[inline] pub fn log2_num_instances(&self) -> usize { let num_instance = self.num_instances(); let log2 = ceil_log2(next_pow2_instance_padding(num_instance)); if self.has_ecc_ops { // the mles have one extra variable to store // the internal partial sums for ecc additions log2 + 1 } else { log2 } } } #[derive(Clone)] pub struct TowerProverSpec<'a, PB: ProverBackend> { pub witness: Vec<Vec<PB::MultilinearPoly<'a>>>, } pub trait TraceCommitter<PB: ProverBackend> { // commit to the traces using merkle tree and return // the traces in the form of multilinear polynomials #[allow(clippy::type_complexity)] fn commit_traces<'a>( &self, traces: BTreeMap<usize, witness::RowMajorMatrix<<PB::E as ExtensionField>::BaseField>>, ) -> ( Vec<PB::MultilinearPoly<'a>>, PB::PcsData, <PB::Pcs as PolynomialCommitmentScheme<PB::E>>::Commitment, ); /// Return an iterator over witness polynomials so backends can decide how to source them fn extract_witness_mles<'a, 'b>( &self, witness_mles: &'b mut Vec<PB::MultilinearPoly<'a>>, pcs_data: &'b PB::PcsData, // used by GPU backend ) -> Box<dyn Iterator<Item = Arc<PB::MultilinearPoly<'a>>> + 'b>; } /// Accumulate N (not necessarily power of 2) EC points into one EC point using affine coordinates /// in one layer which borrows ideas from the [Quark paper](https://eprint.iacr.org/2020/1275.pdf) /// Note that these points are defined over the septic extension field of BabyBear. /// /// The main constraint enforced in this quark layer is: /// p[1,b] = affine_add(p[b,0], p[b,1]) for all b < N pub trait EccQuarkProver<PB: ProverBackend> { fn prove_ec_sum_quark<'a>( &self, num_instances: usize, xs: Vec<Arc<PB::MultilinearPoly<'a>>>, ys: Vec<Arc<PB::MultilinearPoly<'a>>>, invs: Vec<Arc<PB::MultilinearPoly<'a>>>, transcript: &mut impl Transcript<PB::E>, ) -> Result<EccQuarkProof<PB::E>, ZKVMError>; } pub trait TowerProver<PB: ProverBackend> { // infer read/write/logup records from the read/write/logup expressions and then // build multiple complete binary trees (tower tree) to accumulate these records // either in product or fractional sum form. #[allow(clippy::type_complexity)] fn build_tower_witness<'a, 'b, 'c>( &self, cs: &ComposedConstrainSystem<PB::E>, input: &ProofInput<'a, PB>, records: &'c [Arc<PB::MultilinearPoly<'b>>], ) -> ( Vec<Vec<Vec<PB::E>>>, Vec<TowerProverSpec<'c, PB>>, Vec<TowerProverSpec<'c, PB>>, ) where 'a: 'b, 'b: 'c; // the validity of value of first layer in the tower tree is reduced to // the validity of value of last layer in the tower tree through sumchecks #[allow(clippy::type_complexity)] fn prove_tower_relation<'a, 'b, 'c>( &self, composed_cs: &ComposedConstrainSystem<PB::E>, input: &ProofInput<'a, PB>, records: &'c [Arc<PB::MultilinearPoly<'b>>], challenges: &[PB::E; 2], transcript: &mut impl Transcript<PB::E>, ) -> TowerRelationOutput<PB::E> where 'a: 'b, 'b: 'c; } pub struct MainSumcheckEvals<E: ExtensionField> { pub wits_in_evals: Vec<E>, pub fixed_in_evals: Vec<E>, } pub trait MainSumcheckProver<PB: ProverBackend> { // this prover aims to achieve two goals: // 1. the validity of last layer in the tower tree is reduced to // the validity of read/write/logup records through sumchecks; // 2. multiple multiplication relations between witness multilinear polynomials // achieved via zerochecks. #[allow(clippy::type_complexity)] fn prove_main_constraints<'a, 'b>( &self, rt_tower: Vec<PB::E>, input: &'b ProofInput<'a, PB>, cs: &ComposedConstrainSystem<PB::E>, challenges: &[PB::E; 2], transcript: &mut impl Transcript<PB::E>, ) -> Result< ( Point<PB::E>, MainSumcheckEvals<PB::E>, Option<Vec<IOPProverMessage<PB::E>>>, Option<GKRProof<PB::E>>, ), ZKVMError, >; } pub trait OpeningProver<PB: ProverBackend> { #[allow(clippy::too_many_arguments)] fn open( &self, witness_data: PB::PcsData, fixed_data: Option<Arc<PB::PcsData>>, points: Vec<Point<PB::E>>, evals: Vec<Vec<Vec<PB::E>>>, transcript: &mut (impl Transcript<PB::E> + 'static), ) -> <PB::Pcs as PolynomialCommitmentScheme<PB::E>>::Proof; } pub struct DeviceProvingKey<'a, PB: ProverBackend> { pub fixed_mles: Vec<Arc<PB::MultilinearPoly<'a>>>, pub pcs_data: Arc<PB::PcsData>, } pub trait DeviceTransporter<PB: ProverBackend> { fn transport_proving_key( &self, is_first_shard: bool, proving_key: Arc<ZKVMProvingKey<PB::E, PB::Pcs>>, ) -> DeviceProvingKey<'static, PB>; fn transport_mles<'a>( &self, mles: &[MultilinearExtension<'a, PB::E>], ) -> Vec<Arc<PB::MultilinearPoly<'a>>>; } // pub trait FixedMLEPadder<PB: ProverBackend> { // fn padding_fixed_mle<'a, 'b>( // &self, // cs: &ComposedConstrainSystem<PB::E>, // fixed_mles: Vec<Arc<PB::MultilinearPoly<'b>>>, // num_instances: usize, // ) -> Vec<Arc<PB::MultilinearPoly<'a>>> // where // 'b: 'a; // }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/verifier.rs
ceno_zkvm/src/scheme/verifier.rs
use either::Either; use ff_ext::ExtensionField; use std::{iter, marker::PhantomData}; #[cfg(debug_assertions)] use ff_ext::{Instrumented, PoseidonField}; use super::{ZKVMChipProof, ZKVMProof}; use crate::{ error::ZKVMError, instructions::riscv::constants::{ END_PC_IDX, HEAP_LENGTH_IDX, HEAP_START_ADDR_IDX, INIT_CYCLE_IDX, INIT_PC_IDX, SHARD_ID_IDX, }, scheme::{ constants::{NUM_FANIN, SEPTIC_EXTENSION_DEGREE}, septic_curve::{SepticExtension, SepticPoint}, }, structs::{ ComposedConstrainSystem, EccQuarkProof, PointAndEval, TowerProofs, VerifyingKey, ZKVMVerifyingKey, }, }; use ceno_emul::{FullTracer as Tracer, WORD_SIZE}; use gkr_iop::{ self, selector::{SelectorContext, SelectorType}, }; use itertools::{Itertools, chain, interleave, izip}; use mpcs::{Point, PolynomialCommitmentScheme}; use multilinear_extensions::{ Expression, StructuralWitIn, StructuralWitInType::StackedConstantSequence, mle::IntoMLE, util::ceil_log2, utils::eval_by_expr_with_instance, virtual_poly::{VPAuxInfo, build_eq_x_r_vec_sequential, eq_eval}, }; use p3::field::FieldAlgebra; use sumcheck::{ structs::{IOPProof, IOPVerifierState}, util::get_challenge_pows, }; use transcript::{ForkableTranscript, Transcript}; use witness::next_pow2_instance_padding; pub struct ZKVMVerifier<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> { pub vk: ZKVMVerifyingKey<E, PCS>, } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> ZKVMVerifier<E, PCS> { pub fn new(vk: ZKVMVerifyingKey<E, PCS>) -> Self { ZKVMVerifier { vk } } pub fn into_inner(self) -> ZKVMVerifyingKey<E, PCS> { self.vk } /// Verify a trace from start to halt. #[tracing::instrument(skip_all, name = "verify_proof")] pub fn verify_proof( &self, vm_proof: ZKVMProof<E, PCS>, transcript: impl ForkableTranscript<E>, ) -> Result<bool, ZKVMError> { self.verify_proof_halt(vm_proof, transcript, true) } #[tracing::instrument(skip_all, name = "verify_proofs")] pub fn verify_proofs( &self, vm_proofs: Vec<ZKVMProof<E, PCS>>, transcripts: Vec<impl ForkableTranscript<E>>, ) -> Result<bool, ZKVMError> { self.verify_proofs_halt(vm_proofs, transcripts, true) } /// Verify a trace from start to optional halt. pub fn verify_proof_halt( &self, vm_proof: ZKVMProof<E, PCS>, transcript: impl ForkableTranscript<E>, expect_halt: bool, ) -> Result<bool, ZKVMError> { self.verify_proofs_halt(vec![vm_proof], vec![transcript], expect_halt) } /// Verify a trace from start to optional halt. pub fn verify_proofs_halt( &self, vm_proofs: Vec<ZKVMProof<E, PCS>>, transcripts: Vec<impl ForkableTranscript<E>>, expect_halt: bool, ) -> Result<bool, ZKVMError> { assert!(!vm_proofs.is_empty()); let num_proofs = vm_proofs.len(); let (_end_pc, _end_heap_addr, shard_ec_sum) = vm_proofs .into_iter() .zip_eq(transcripts) // optionally halt on last chunk .zip_eq(iter::repeat_n(false, num_proofs - 1).chain(iter::once(expect_halt))) .enumerate() .try_fold((None, None, SepticPoint::<E::BaseField>::default()), |(prev_pc, prev_heap_addr_end, mut shard_ec_sum), (shard_id, ((vm_proof, transcript), expect_halt))| { // require ecall/halt proof to exist, depend on whether we expect a halt. let has_halt = vm_proof.has_halt(&self.vk); if has_halt != expect_halt { return Err(ZKVMError::VerifyError( format!( "{shard_id}th proof ecall/halt mismatch: expected {expect_halt} != {has_halt}", ) .into(), )); } // each shard set init cycle = Tracer::SUBCYCLES_PER_INSN // to satisfy initial reads for all prev_cycle = 0 < init_cycle assert_eq!(vm_proof.pi_evals[INIT_CYCLE_IDX], E::from_canonical_u64(Tracer::SUBCYCLES_PER_INSN)); // check init_pc match prev end_pc if let Some(prev_pc) = prev_pc { assert_eq!(vm_proof.pi_evals[INIT_PC_IDX], prev_pc); } else { // first chunk, check program entry assert_eq!(vm_proof.pi_evals[INIT_PC_IDX], E::from_canonical_u32(self.vk.entry_pc)); } let end_pc = vm_proof.pi_evals[END_PC_IDX]; // check memory continuation consistency let heap_addr_start_u32 = vm_proof.pi_evals[HEAP_START_ADDR_IDX].to_canonical_u64() as u32; let heap_len= vm_proof.pi_evals[HEAP_LENGTH_IDX].to_canonical_u64() as u32; if let Some(prev_heap_addr_end) = prev_heap_addr_end { assert_eq!(heap_addr_start_u32, prev_heap_addr_end); // TODO check heap addr in prime field within range } else { // TODO first chunk, check initial heap addr }; // TODO check heap_len == heap chip num_instances let next_heap_addr_end: u32 = heap_addr_start_u32 + heap_len * WORD_SIZE as u32; // add to shard ec sum // _debug // println!("=> shard pi: {:?}", vm_proof.pi_evals.clone()); let shard_ec = self.verify_proof_validity(shard_id, vm_proof, transcript)?; // println!("=> start_ec_sum: {:?}", shard_ec_sum); // println!("=> shard_ec: {:?}", shard_ec); // shard_ec_sum = shard_ec_sum + self.verify_proof_validity(shard_id, vm_proof, transcript)?; shard_ec_sum = shard_ec_sum + shard_ec; // println!("=> new_ec_sum: {:?}", shard_ec_sum); Ok((Some(end_pc), Some(next_heap_addr_end), shard_ec_sum)) })?; // TODO check _end_heap_addr within heap range from vk // check shard ec_sum is_infinity if !shard_ec_sum.is_infinity { return Err(ZKVMError::VerifyError( "shard_ec_sum is not infinity".into(), )); } Ok(true) } fn verify_proof_validity( &self, shard_id: usize, vm_proof: ZKVMProof<E, PCS>, mut transcript: impl ForkableTranscript<E>, ) -> Result<SepticPoint<E::BaseField>, ZKVMError> { // main invariant between opcode circuits and table circuits let mut prod_r = E::ONE; let mut prod_w = E::ONE; let mut logup_sum = E::ZERO; let pi_evals = &vm_proof.pi_evals; // make sure circuit index of chip proofs are // subset of that of self.vk.circuit_vks for chip_idx in vm_proof.chip_proofs.keys() { if *chip_idx >= self.vk.circuit_vks.len() { return Err(ZKVMError::VKNotFound( format!( "{shard_id}th shard chip index {chip_idx} not found in vk set [0..{})", self.vk.circuit_vks.len() ) .into(), )); } } // TODO fix soundness: construct raw public input by ourself and trustless from proof // including raw public input to transcript vm_proof .raw_pi .iter() .for_each(|v| v.iter().for_each(|v| transcript.append_field_element(v))); // check shard id assert_eq!( vm_proof.raw_pi[SHARD_ID_IDX], vec![E::BaseField::from_canonical_usize(shard_id)] ); // verify constant poly(s) evaluation result match // we can evaluate at this moment because constant always evaluate to same value // non-constant poly(s) will be verified in respective (table) proof accordingly izip!(&vm_proof.raw_pi, pi_evals) .enumerate() .try_for_each(|(i, (raw, eval))| { if raw.len() == 1 && E::from(raw[0]) != *eval { Err(ZKVMError::VerifyError( format!("{shard_id}th shard pub input on index {i} mismatch {raw:?} != {eval:?}").into(), )) } else { Ok(()) } })?; // write fixed commitment to transcript // TODO check soundness if there is no fixed_commit but got fixed proof? if let Some(fixed_commit) = self.vk.fixed_commit.as_ref() && shard_id == 0 { PCS::write_commitment(fixed_commit, &mut transcript).map_err(ZKVMError::PCSError)?; } else if let Some(fixed_commit) = self.vk.fixed_no_omc_init_commit.as_ref() && shard_id > 0 { PCS::write_commitment(fixed_commit, &mut transcript).map_err(ZKVMError::PCSError)?; } // write (circuit_idx, num_instance) to transcript for (circuit_idx, proofs) in vm_proof.chip_proofs.iter() { transcript.append_field_element(&E::BaseField::from_canonical_u32(*circuit_idx as u32)); // length of proof.num_instances will be constrained in verify_chip_proof for num_instance in proofs.iter().flat_map(|proof| &proof.num_instances) { transcript.append_field_element(&E::BaseField::from_canonical_usize(*num_instance)); } } // write witin commitment to transcript PCS::write_commitment(&vm_proof.witin_commit, &mut transcript) .map_err(ZKVMError::PCSError)?; #[cfg(debug_assertions)] { Instrumented::<<<E as ExtensionField>::BaseField as PoseidonField>::P>::log_label( "batch_commit", ); } // alpha, beta let challenges = [ transcript.read_challenge().elements, transcript.read_challenge().elements, ]; tracing::debug!( "{shard_id}th shard challenges in verifier: {:?}", challenges ); let dummy_table_item = challenges[0]; let mut dummy_table_item_multiplicity = 0; let point_eval = PointAndEval::default(); let mut witin_openings = Vec::with_capacity(vm_proof.chip_proofs.len()); let mut fixed_openings = Vec::with_capacity(vm_proof.chip_proofs.len()); let mut shard_ec_sum = SepticPoint::<E::BaseField>::default(); // check num proofs for (index, proofs) in &vm_proof.chip_proofs { let circuit_name = &self.vk.circuit_index_to_name[index]; let circuit_vk = &self.vk.circuit_vks[circuit_name]; if shard_id > 0 && circuit_vk.get_cs().with_omc_init_only() { return Err(ZKVMError::InvalidProof( format!("{shard_id}th shard non-first shard got omc dynamic table init",) .into(), )); } if shard_id == 0 && circuit_vk.get_cs().with_omc_init_only() && proofs.len() != 1 { return Err(ZKVMError::InvalidProof( format!("{shard_id}th shard first shard got > 1 omc dynamic table init",) .into(), )); } } for (index, proof) in vm_proof .chip_proofs .iter() .flat_map(|(index, proofs)| iter::repeat_n(index, proofs.len()).zip(proofs)) { let num_instance: usize = proof.num_instances.iter().sum(); assert!(num_instance > 0); let circuit_name = &self.vk.circuit_index_to_name[index]; let circuit_vk = &self.vk.circuit_vks[circuit_name]; // check chip proof is well-formed if proof.wits_in_evals.len() != circuit_vk.get_cs().num_witin() || proof.fixed_in_evals.len() != circuit_vk.get_cs().num_fixed() { return Err(ZKVMError::InvalidProof( format!( "{shard_id}th shard witness/fixed evaluations length mismatch: ({}, {}) != ({}, {})", proof.wits_in_evals.len(), proof.fixed_in_evals.len(), circuit_vk.get_cs().num_witin(), circuit_vk.get_cs().num_fixed(), ) .into(), )); } if proof.r_out_evals.len() != circuit_vk.get_cs().num_reads() || proof.w_out_evals.len() != circuit_vk.get_cs().num_writes() { return Err(ZKVMError::InvalidProof( format!( "{shard_id}th shard read/write evaluations length mismatch: ({}, {}) != ({}, {})", proof.r_out_evals.len(), proof.w_out_evals.len(), circuit_vk.get_cs().num_reads(), circuit_vk.get_cs().num_writes(), ) .into(), )); } if proof.lk_out_evals.len() != circuit_vk.get_cs().num_lks() { return Err(ZKVMError::InvalidProof( format!( "{shard_id}th shard lookup evaluations length mismatch: {} != {}", proof.lk_out_evals.len(), circuit_vk.get_cs().num_lks(), ) .into(), )); } let chip_logup_sum = proof .lk_out_evals .iter() .map(|evals| { let (p1, p2, q1, q2) = (evals[0], evals[1], evals[2], evals[3]); p1 * q1.inverse() + p2 * q2.inverse() }) .sum::<E>(); transcript.append_field_element(&E::BaseField::from_canonical_u64(*index as u64)); if circuit_vk.get_cs().is_with_lk_table() { logup_sum -= chip_logup_sum; } else { // getting the number of dummy padding item that we used in this opcode circuit let num_lks = circuit_vk.get_cs().num_lks(); // each padding instance contribute to (2^rotation_vars) dummy lookup padding let num_padded_instance = (next_pow2_instance_padding(num_instance) - num_instance) * (1 << circuit_vk.get_cs().rotation_vars().unwrap_or(0)); // each instance contribute to (2^rotation_vars - rotated) dummy lookup padding let num_instance_non_selected = num_instance * ((1 << circuit_vk.get_cs().rotation_vars().unwrap_or(0)) - (circuit_vk.get_cs().rotation_subgroup_size().unwrap_or(0) + 1)); dummy_table_item_multiplicity += num_lks * (num_padded_instance + num_instance_non_selected); logup_sum += chip_logup_sum; }; let (input_opening_point, chip_shard_ec_sum) = self.verify_chip_proof( circuit_name, circuit_vk, proof, pi_evals, &vm_proof.raw_pi, &mut transcript, NUM_FANIN, &point_eval, &challenges, )?; if circuit_vk.get_cs().num_witin() > 0 { witin_openings.push(( input_opening_point.len(), (input_opening_point.clone(), proof.wits_in_evals.clone()), )); } if circuit_vk.get_cs().num_fixed() > 0 { fixed_openings.push(( input_opening_point.len(), (input_opening_point.clone(), proof.fixed_in_evals.clone()), )); } prod_w *= proof.w_out_evals.iter().flatten().copied().product::<E>(); prod_r *= proof.r_out_evals.iter().flatten().copied().product::<E>(); tracing::debug!( "{shard_id}th shard verified proof for circuit {}", circuit_name ); if let Some(chip_shard_ec_sum) = chip_shard_ec_sum { shard_ec_sum = shard_ec_sum + chip_shard_ec_sum; } } logup_sum -= E::from_canonical_u64(dummy_table_item_multiplicity as u64) * dummy_table_item.inverse(); #[cfg(debug_assertions)] { Instrumented::<<<E as ExtensionField>::BaseField as PoseidonField>::P>::log_label( "tower_verify+main-sumcheck", ); } // verify mpcs let mut rounds = vec![(vm_proof.witin_commit.clone(), witin_openings)]; if let Some(fixed_commit) = self.vk.fixed_commit.as_ref() && shard_id == 0 { rounds.push((fixed_commit.clone(), fixed_openings)); } else if let Some(fixed_commit) = self.vk.fixed_no_omc_init_commit.as_ref() && shard_id > 0 { rounds.push((fixed_commit.clone(), fixed_openings)); } PCS::batch_verify( &self.vk.vp, rounds, &vm_proof.opening_proof, &mut transcript, ) .map_err(ZKVMError::PCSError)?; let initial_global_state = eval_by_expr_with_instance( &[], &[], &[], pi_evals, &challenges, &self.vk.initial_global_state_expr, ) .right() .unwrap(); prod_w *= initial_global_state; let finalize_global_state = eval_by_expr_with_instance( &[], &[], &[], pi_evals, &challenges, &self.vk.finalize_global_state_expr, ) .right() .unwrap(); prod_r *= finalize_global_state; // check rw_set equality of shard proof if prod_r != prod_w { return Err(ZKVMError::VerifyError( format!("{shard_id}th prod_r != prod_w").into(), )); } // check logup sum of shard proof if logup_sum != E::ZERO { return Err(ZKVMError::VerifyError( format!("{shard_id}th logup_sum({:?}) != 0", logup_sum).into(), )); } Ok(shard_ec_sum) } /// verify proof and return input opening point #[allow(clippy::too_many_arguments, clippy::type_complexity)] pub fn verify_chip_proof( &self, _name: &str, circuit_vk: &VerifyingKey<E>, proof: &ZKVMChipProof<E>, pi: &[E], raw_pi: &[Vec<E::BaseField>], transcript: &mut impl Transcript<E>, num_product_fanin: usize, _out_evals: &PointAndEval<E>, challenges: &[E; 2], // derive challenge from PCS ) -> Result<(Point<E>, Option<SepticPoint<E::BaseField>>), ZKVMError> { let composed_cs = circuit_vk.get_cs(); let ComposedConstrainSystem { zkvm_v1_css: cs, gkr_circuit, } = &composed_cs; let num_instances = proof.num_instances.iter().sum(); let (r_counts_per_instance, w_counts_per_instance, lk_counts_per_instance) = ( cs.r_expressions.len() + cs.r_table_expressions.len(), cs.w_expressions.len() + cs.w_table_expressions.len(), cs.lk_expressions.len() + cs.lk_table_expressions.len(), ); let num_batched = r_counts_per_instance + w_counts_per_instance + lk_counts_per_instance; let next_pow2_instance = next_pow2_instance_padding(num_instances); let mut log2_num_instances = ceil_log2(next_pow2_instance); if composed_cs.has_ecc_ops() { // for opcode circuit with ecc ops, the mles have one extra variable // to store the internal partial sums for ecc additions log2_num_instances += 1; } let num_var_with_rotation = log2_num_instances + composed_cs.rotation_vars().unwrap_or(0); // constrain log2_num_instances within max length cs.r_table_expressions .iter() .chain(&cs.w_table_expressions) .for_each(|set_table_expr| { // iterate through structural witins and collect max round. let num_vars = set_table_expr .table_spec .len .map(ceil_log2) .unwrap_or_else(|| { set_table_expr .table_spec .structural_witins .iter() .map(|StructuralWitIn { witin_type, .. }| { let hint_num_vars = log2_num_instances; assert!((1 << hint_num_vars) <= witin_type.max_len()); hint_num_vars }) .max() .unwrap_or(log2_num_instances) }); assert_eq!(num_vars, log2_num_instances); }); cs.lk_table_expressions.iter().for_each(|l| { // iterate through structural witins and collect max round. let num_vars = l.table_spec.len.map(ceil_log2).unwrap_or_else(|| { l.table_spec .structural_witins .iter() .map(|StructuralWitIn { witin_type, .. }| { let hint_num_vars = log2_num_instances; assert!((1 << hint_num_vars) <= witin_type.max_len()); hint_num_vars }) .max() .unwrap_or(log2_num_instances) }); assert_eq!(num_vars, log2_num_instances); }); // verify ecc proof if exists let shard_ec_sum: Option<SepticPoint<E::BaseField>> = if composed_cs.has_ecc_ops() { tracing::debug!("verifying ecc proof..."); assert!(proof.ecc_proof.is_some()); let ecc_proof = proof.ecc_proof.as_ref().unwrap(); // let expected_septic_xy = cs // .ec_final_sum // .iter() // .map(|expr| { // eval_by_expr_with_instance(&[], &[], &[], pi, challenges, expr) // .right() // .and_then(|v| v.as_base()) // .unwrap() // }) // .collect_vec(); // let expected_septic_x: SepticExtension<E::BaseField> = // expected_septic_xy[0..SEPTIC_EXTENSION_DEGREE].into(); // let expected_septic_y: SepticExtension<E::BaseField> = // expected_septic_xy[SEPTIC_EXTENSION_DEGREE..].into(); // assert_eq!(&ecc_proof.sum.x, &expected_septic_x); // assert_eq!(&ecc_proof.sum.y, &expected_septic_y); assert!(!ecc_proof.sum.is_infinity); EccVerifier::verify_ecc_proof(ecc_proof, transcript)?; tracing::debug!("ecc proof verified."); Some(ecc_proof.sum.clone()) } else { None }; // verify and reduce product tower sumcheck let tower_proofs = &proof.tower_proof; let (_, record_evals, logup_p_evals, logup_q_evals) = TowerVerify::verify( proof .r_out_evals .iter() .cloned() .chain(proof.w_out_evals.iter().cloned()) .collect_vec(), proof.lk_out_evals.clone(), tower_proofs, vec![num_var_with_rotation; num_batched], num_product_fanin, transcript, )?; if cs.lk_table_expressions.is_empty() { // verify LogUp witness nominator p(x) ?= constant vector 1 logup_p_evals .iter() .try_for_each(|PointAndEval { eval, .. }| { if *eval != E::ONE { Err(ZKVMError::VerifyError( "Lookup table witness p(x) != constant 1".into(), )) } else { Ok(()) } })?; } debug_assert!( chain!(&record_evals, &logup_p_evals, &logup_q_evals) .map(|e| &e.point) .all_equal() ); let num_rw_records = r_counts_per_instance + w_counts_per_instance; debug_assert_eq!(record_evals.len(), num_rw_records); debug_assert_eq!(logup_p_evals.len(), lk_counts_per_instance); debug_assert_eq!(logup_q_evals.len(), lk_counts_per_instance); let evals = record_evals .iter() // append p_evals if there got lk table expressions .chain(if cs.lk_table_expressions.is_empty() { Either::Left(iter::empty()) } else { Either::Right(logup_p_evals.iter()) }) .chain(&logup_q_evals) .cloned() .collect_vec(); let gkr_circuit = gkr_circuit.as_ref().unwrap(); let selector_ctxs = if cs.ec_final_sum.is_empty() { assert_eq!(proof.num_instances.len(), 1); // it's not shard chip vec![ SelectorContext::new(0, num_instances, num_var_with_rotation); gkr_circuit .layers .first() .map(|layer| layer.out_sel_and_eval_exprs.len()) .unwrap_or(0) ] } else { assert_eq!(proof.num_instances.len(), 2); // it's shard chip tracing::debug!( "num_reads: {}, num_writes: {}, total: {}", proof.num_instances[0], proof.num_instances[1], proof.num_instances[0] + proof.num_instances[1], ); vec![ SelectorContext { offset: 0, num_instances: proof.num_instances[0], num_vars: num_var_with_rotation, }, SelectorContext { offset: proof.num_instances[0], num_instances: proof.num_instances[1], num_vars: num_var_with_rotation, }, SelectorContext { offset: 0, num_instances: proof.num_instances[0] + proof.num_instances[1], num_vars: num_var_with_rotation, }, ] }; let (_, rt) = gkr_circuit.verify( num_var_with_rotation, proof.gkr_iop_proof.clone().unwrap(), &evals, pi, raw_pi, challenges, transcript, &selector_ctxs, )?; Ok((rt, shard_ec_sum)) } } pub struct TowerVerify; pub type TowerVerifyResult<E> = Result< ( Point<E>, Vec<PointAndEval<E>>, Vec<PointAndEval<E>>, Vec<PointAndEval<E>>, ), ZKVMError, >; impl TowerVerify { pub fn verify<E: ExtensionField>( prod_out_evals: Vec<Vec<E>>, logup_out_evals: Vec<Vec<E>>, tower_proofs: &TowerProofs<E>, num_variables: Vec<usize>, num_fanin: usize, transcript: &mut impl Transcript<E>, ) -> TowerVerifyResult<E> { // XXX to sumcheck batched product argument with logup, we limit num_product_fanin to 2 // TODO mayber give a better naming? assert_eq!(num_fanin, 2); let num_prod_spec = prod_out_evals.len(); let num_logup_spec = logup_out_evals.len(); let log2_num_fanin = ceil_log2(num_fanin); // sanity check assert_eq!(num_prod_spec, tower_proofs.prod_spec_size()); assert!(prod_out_evals.iter().all(|evals| evals.len() == num_fanin)); assert_eq!(num_logup_spec, tower_proofs.logup_spec_size()); assert!(logup_out_evals.iter().all(|evals| { evals.len() == 4 // [p1, p2, q1, q2] })); assert_eq!(num_variables.len(), num_prod_spec + num_logup_spec); let alpha_pows = get_challenge_pows( num_prod_spec + num_logup_spec * 2, /* logup occupy 2 sumcheck: numerator and denominator */ transcript, ); let initial_rt: Point<E> = transcript.sample_and_append_vec(b"product_sum", log2_num_fanin); // initial_claim = \sum_j alpha^j * out_j[rt] // out_j[rt] := (record_{j}[rt]) // out_j[rt] := (logup_p{j}[rt]) // out_j[rt] := (logup_q{j}[rt]) // bookkeeping records of latest (point, evaluation) of each layer // prod argument let mut prod_spec_point_n_eval = prod_out_evals .into_iter() .map(|evals| { PointAndEval::new(initial_rt.clone(), evals.into_mle().evaluate(&initial_rt)) }) .collect::<Vec<_>>(); // logup argument for p, q let (mut logup_spec_p_point_n_eval, mut logup_spec_q_point_n_eval) = logup_out_evals .into_iter() .map(|evals| { let (p1, p2, q1, q2) = (evals[0], evals[1], evals[2], evals[3]); ( PointAndEval::new( initial_rt.clone(), vec![p1, p2].into_mle().evaluate(&initial_rt), ), PointAndEval::new( initial_rt.clone(), vec![q1, q2].into_mle().evaluate(&initial_rt), ), ) }) .unzip::<_, _, Vec<_>, Vec<_>>(); // initial claim = \sum_j alpha^j * out_j[rt] let initial_claim = izip!(&prod_spec_point_n_eval, &alpha_pows) .map(|(point_n_eval, alpha)| point_n_eval.eval * *alpha) .sum::<E>() + izip!( interleave(&logup_spec_p_point_n_eval, &logup_spec_q_point_n_eval), &alpha_pows[num_prod_spec..] ) .map(|(point_n_eval, alpha)| point_n_eval.eval * *alpha) .sum::<E>(); let max_num_variables = num_variables.iter().max().unwrap(); let (next_rt, _) = (0..(max_num_variables - 1)).try_fold( ( PointAndEval { point: initial_rt, eval: initial_claim, }, alpha_pows, ), |(point_and_eval, alpha_pows), round| { let (out_rt, out_claim) = (&point_and_eval.point, &point_and_eval.eval); let sumcheck_claim = IOPVerifierState::verify( *out_claim, &IOPProof { proofs: tower_proofs.proofs[round].clone(), }, &VPAuxInfo { max_degree: NUM_FANIN + 1, // + 1 for eq max_num_variables: (round + 1) * log2_num_fanin, phantom: PhantomData, }, transcript, ); // check expected_evaluation let rt: Point<E> = sumcheck_claim.point.iter().map(|c| c.elements).collect(); let eq = eq_eval(out_rt, &rt); let expected_evaluation: E = (0..num_prod_spec) .zip(alpha_pows.iter()) .zip(num_variables.iter()) .map(|((spec_index, alpha), max_round)| { // prod'[b] = prod[0,b] * prod[1,b] // prod'[out_rt] = \sum_b eq(out_rt,b) * prod'[b] = \sum_b eq(out_rt,b) * prod[0,b] * prod[1,b] eq * *alpha * if round < *max_round - 1 { tower_proofs.prod_specs_eval[spec_index][round].iter().copied().product() } else { E::ZERO } }) .sum::<E>() + (0..num_logup_spec) .zip_eq(alpha_pows[num_prod_spec..].chunks(2)) .zip_eq(num_variables[num_prod_spec..].iter()) .map(|((spec_index, alpha), max_round)| { // logup_q'[b] = logup_q[0,b] * logup_q[1,b]
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
true
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/gpu/mod.rs
ceno_zkvm/src/scheme/gpu/mod.rs
use super::hal::{ DeviceTransporter, EccQuarkProver, MainSumcheckProver, OpeningProver, ProverDevice, TowerProver, TraceCommitter, }; use crate::{ error::ZKVMError, scheme::{ cpu::TowerRelationOutput, hal::{DeviceProvingKey, MainSumcheckEvals, ProofInput, TowerProverSpec}, }, structs::{ComposedConstrainSystem, PointAndEval, TowerProofs}, }; use ff_ext::ExtensionField; use gkr_iop::{ gkr::{self, Evaluation, GKRProof, GKRProverOutput, layer::LayerWitness}, gpu::{GpuBackend, GpuProver}, hal::ProverBackend, }; use itertools::{Itertools, chain}; use mpcs::{Point, PolynomialCommitmentScheme}; use multilinear_extensions::{mle::MultilinearExtension, util::ceil_log2}; use std::{collections::BTreeMap, sync::Arc}; use sumcheck::{ macros::{entered_span, exit_span}, structs::IOPProverMessage, util::optimal_sumcheck_threads, }; use transcript::{BasicTranscript, Transcript}; use witness::next_pow2_instance_padding; use tracing::info_span; #[cfg(feature = "gpu")] use gkr_iop::gpu::gpu_prover::*; pub struct GpuTowerProver; use crate::{ scheme::{constants::NUM_FANIN, cpu::CpuEccProver}, structs::EccQuarkProof, }; use gkr_iop::{ gpu::{ArcMultilinearExtensionGpu, MultilinearExtensionGpu}, selector::SelectorContext, }; // Extract out_evals from GPU-built tower witnesses #[allow(clippy::type_complexity)] fn extract_out_evals_from_gpu_towers<E: ff_ext::ExtensionField>( prod_gpu: &[ceno_gpu::GpuProverSpec], // GPU-built product towers logup_gpu: &[ceno_gpu::GpuProverSpec], // GPU-built logup towers r_set_len: usize, ) -> (Vec<Vec<E>>, Vec<Vec<E>>, Vec<Vec<E>>) { // Extract product out_evals from GPU towers let mut r_out_evals = Vec::new(); let mut w_out_evals = Vec::new(); for (i, gpu_spec) in prod_gpu.iter().enumerate() { let first_layer_evals: Vec<E> = gpu_spec .get_output_evals() .expect("Failed to extract final evals from GPU product tower"); // Product tower first layer should have 2 MLEs assert_eq!( first_layer_evals.len(), 2, "Product tower first layer should have 2 MLEs" ); // Split into r_out_evals and w_out_evals based on r_set_len if i < r_set_len { r_out_evals.push(first_layer_evals); } else { w_out_evals.push(first_layer_evals); } } // Extract logup out_evals from GPU towers let mut lk_out_evals = Vec::new(); for gpu_spec in logup_gpu.iter() { let first_layer_evals: Vec<E> = gpu_spec .get_output_evals() .expect("Failed to extract final evals from GPU logup tower"); // Logup tower first layer should have 4 MLEs assert_eq!( first_layer_evals.len(), 4, "Logup tower first layer should have 4 MLEs" ); lk_out_evals.push(first_layer_evals); } (r_out_evals, w_out_evals, lk_out_evals) } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> TraceCommitter<GpuBackend<E, PCS>> for GpuProver<GpuBackend<E, PCS>> { fn commit_traces<'a>( &self, traces: BTreeMap<usize, witness::RowMajorMatrix<E::BaseField>>, ) -> ( Vec<MultilinearExtensionGpu<'a, E>>, <GpuBackend<E, PCS> as ProverBackend>::PcsData, PCS::Commitment, ) { if std::any::TypeId::of::<E::BaseField>() != std::any::TypeId::of::<BB31Base>() { panic!("GPU backend only supports Goldilocks base field"); } let span = entered_span!("[gpu] init pp", profiling_2 = true); let max_poly_size_log2 = traces .values() .map(|trace| ceil_log2(next_pow2_instance_padding(trace.num_instances()))) .max() .unwrap(); if max_poly_size_log2 > self.backend.max_poly_size_log2 { panic!( "max_poly_size_log2 {} > max_poly_size_log2 backend {}", max_poly_size_log2, self.backend.max_poly_size_log2 ) } exit_span!(span); let is_pcs_match = std::mem::size_of::<mpcs::BasefoldCommitmentWithWitness<BB31Ext>>() == std::mem::size_of::<PCS::CommitmentWithWitness>(); let (mles, pcs_data, commit) = if is_pcs_match { let vec_traces: Vec<witness::RowMajorMatrix<E::BaseField>> = traces.into_values().collect(); let span = entered_span!("[gpu] hal init", profiling_2 = true); let cuda_hal = get_cuda_hal().unwrap(); exit_span!(span); let traces_gl64: Vec<witness::RowMajorMatrix<BB31Base>> = unsafe { std::mem::transmute(vec_traces) }; let span = entered_span!("[gpu] batch_commit", profiling_2 = true); let pcs_data = cuda_hal .basefold .batch_commit(&cuda_hal, traces_gl64) .unwrap(); exit_span!(span); let span = entered_span!("[gpu] get_pure_commitment", profiling_2 = true); let basefold_commit = cuda_hal.basefold.get_pure_commitment(&pcs_data); exit_span!(span); let span = entered_span!("[gpu] transmute back", profiling_2 = true); let commit: PCS::Commitment = unsafe { std::mem::transmute_copy(&basefold_commit) }; // transmute pcs_data from GPU specific type to generic PcsData type let pcs_data_generic: <GpuBackend<E, PCS> as ProverBackend>::PcsData = unsafe { std::mem::transmute_copy(&pcs_data) }; std::mem::forget(pcs_data); exit_span!(span); (vec![], pcs_data_generic, commit) } else { panic!("GPU commitment data is not compatible with the PCS"); }; // Note: mles are not used by GPU backend // `fn extract_witness_mles` uses `hal.basefold.get_trace` to extract mles from pcs_data (mles, pcs_data, commit) } fn extract_witness_mles<'a, 'b>( &self, _witness_mles: &'b mut Vec<<GpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>, pcs_data: &'b <GpuBackend<E, PCS> as ProverBackend>::PcsData, ) -> Box< dyn Iterator<Item = Arc<<GpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>> + 'b, > { // transmute pcs_data from generic PcsData type to GPU-specific type let pcs_data_basefold: &BasefoldCommitmentWithWitnessGpu< BB31Base, BufferImpl<BB31Base>, GpuDigestLayer, GpuMatrix<'static>, GpuPolynomial<'static>, > = unsafe { std::mem::transmute(pcs_data) }; let total_traces = pcs_data_basefold.num_traces(); let mut trace_idx = 0usize; let mut current_iter: std::vec::IntoIter< Arc<<GpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>, > = Vec::new().into_iter(); let iter = std::iter::from_fn(move || { loop { if let Some(poly) = current_iter.next() { return Some(poly); } if trace_idx >= total_traces { return None; } let cuda_hal = get_cuda_hal().unwrap(); let poly_group = cuda_hal .basefold .get_trace(&cuda_hal, pcs_data_basefold, trace_idx) .unwrap_or_else(|err| panic!("Failed to extract trace {trace_idx}: {err}")); trace_idx += 1; drop(cuda_hal); current_iter = poly_group .into_iter() .map(|poly| Arc::new(MultilinearExtensionGpu::from_ceno_gpu(poly))) .collect::<Vec<_>>() .into_iter(); } }); Box::new(iter) } } fn build_tower_witness_gpu<'buf, E: ExtensionField>( composed_cs: &ComposedConstrainSystem<E>, input: &ProofInput<'_, GpuBackend<E, impl PolynomialCommitmentScheme<E>>>, records: &[ArcMultilinearExtensionGpu<'_, E>], challenges: &[E; 2], cuda_hal: &CudaHalBB31, big_buffers: &'buf mut Vec<BufferImpl<BB31Ext>>, ones_buffer: &mut Vec<GpuPolynomialExt<'static>>, view_last_layers: &mut Vec<Vec<Vec<GpuPolynomialExt<'static>>>>, ) -> Result< ( Vec<ceno_gpu::GpuProverSpec<'buf>>, Vec<ceno_gpu::GpuProverSpec<'buf>>, ), String, > { use crate::scheme::constants::{NUM_FANIN, NUM_FANIN_LOGUP}; use ceno_gpu::{CudaHal as _, bb31::GpuPolynomialExt}; use p3::field::FieldAlgebra; let ComposedConstrainSystem { zkvm_v1_css: cs, .. } = composed_cs; let _num_instances_with_rotation = input.num_instances() << composed_cs.rotation_vars().unwrap_or(0); let _chip_record_alpha = challenges[0]; // TODO: safety ? let records = unsafe { std::mem::transmute::< &[ArcMultilinearExtensionGpu<'_, E>], &[ArcMultilinearExtensionGpu<'static, E>], >(records) }; // Parse records into different categories (same as build_tower_witness) let num_reads = cs.r_expressions.len() + cs.r_table_expressions.len(); let num_writes = cs.w_expressions.len() + cs.w_table_expressions.len(); let mut offset = 0; let r_set_wit = &records[offset..][..num_reads]; offset += num_reads; let w_set_wit = &records[offset..][..num_writes]; offset += num_writes; let lk_n_wit = &records[offset..][..cs.lk_table_expressions.len()]; offset += cs.lk_table_expressions.len(); let lk_d_wit = if !cs.lk_table_expressions.is_empty() { &records[offset..][..cs.lk_table_expressions.len()] } else { &records[offset..][..cs.lk_expressions.len()] }; assert_eq!(big_buffers.len(), 0, "expect no big buffers"); // prod: last layes & buffer let mut is_prod_buffer_exists = false; let prod_last_layers = r_set_wit .iter() .chain(w_set_wit.iter()) .map(|wit| wit.as_view_chunks(NUM_FANIN)) .collect::<Vec<_>>(); if !prod_last_layers.is_empty() { let first_layer = &prod_last_layers[0]; assert_eq!(first_layer.len(), 2, "prod last_layer must have 2 MLEs"); let num_vars = first_layer[0].num_vars(); let num_towers = prod_last_layers.len(); view_last_layers.push(prod_last_layers); // Allocate one big buffer for all product towers and add it to big_buffers let tower_size = 1 << (num_vars + 1); // 2 * mle_len elements per tower let total_buffer_size = num_towers * tower_size; tracing::debug!( "prod tower request buffer size: {:.2} MB", (total_buffer_size * std::mem::size_of::<BB31Ext>()) as f64 / (1024.0 * 1024.0) ); let big_buffer = cuda_hal .alloc_ext_elems_on_device(total_buffer_size, false) .map_err(|e| format!("Failed to allocate prod GPU buffer: {:?}", e))?; big_buffers.push(big_buffer); is_prod_buffer_exists = true; } // logup: last layes let mut is_logup_buffer_exists = false; let lk_numerator_last_layer = lk_n_wit .iter() .map(|wit| wit.as_view_chunks(NUM_FANIN_LOGUP)) .collect::<Vec<_>>(); let lk_denominator_last_layer = lk_d_wit .iter() .map(|wit| wit.as_view_chunks(NUM_FANIN_LOGUP)) .collect::<Vec<_>>(); let logup_last_layers = if !lk_numerator_last_layer.is_empty() { // Case when we have both numerator and denominator // Combine [p1, p2] from numerator and [q1, q2] from denominator lk_numerator_last_layer .into_iter() .zip(lk_denominator_last_layer) .map(|(lk_n_chunks, lk_d_chunks)| { let mut last_layer = lk_n_chunks; last_layer.extend(lk_d_chunks); last_layer }) .collect::<Vec<_>>() } else { if lk_denominator_last_layer.is_empty() { vec![] } else { // Case when numerator is empty - create shared ones_buffer and use views // This saves memory by having all p1, p2 polynomials reference the same buffer let nv = lk_denominator_last_layer[0][0].num_vars(); // Create one shared ones_buffer as Owned (can be 'static) let ones_poly = GpuPolynomialExt::new_with_scalar(&cuda_hal.inner, nv, BB31Ext::ONE) .map_err(|e| format!("Failed to create shared ones_buffer: {:?}", e)) .unwrap(); // SAFETY: Owned buffer can be safely treated as 'static let ones_poly_static: GpuPolynomialExt<'static> = unsafe { std::mem::transmute(ones_poly) }; ones_buffer.push(ones_poly_static); // Get reference from storage to ensure proper lifetime let ones_poly_ref = ones_buffer.last().unwrap(); let mle_len_bytes = ones_poly_ref.evaluations().len() * std::mem::size_of::<BB31Ext>(); // Create views referencing the shared ones_buffer for each tower's p1, p2 lk_denominator_last_layer .into_iter() .map(|lk_d_chunks| { // Create views of ones_buffer for p1 and p2 let p1_view = ones_poly_ref.evaluations().as_slice_range(0..mle_len_bytes); let p2_view = ones_poly_ref.evaluations().as_slice_range(0..mle_len_bytes); let p1_gpu = GpuPolynomialExt::new(BufferImpl::new_from_view(p1_view), nv); let p2_gpu = GpuPolynomialExt::new(BufferImpl::new_from_view(p2_view), nv); // SAFETY: views from 'static buffer can be 'static let p1_gpu: GpuPolynomialExt<'static> = unsafe { std::mem::transmute(p1_gpu) }; let p2_gpu: GpuPolynomialExt<'static> = unsafe { std::mem::transmute(p2_gpu) }; // Use [p1, p2, q1, q2] format for the last layer let mut last_layer = vec![p1_gpu, p2_gpu]; last_layer.extend(lk_d_chunks); last_layer }) .collect::<Vec<_>>() } }; if !logup_last_layers.is_empty() { let first_layer = &logup_last_layers[0]; assert_eq!(first_layer.len(), 4, "logup last_layer must have 4 MLEs"); let num_vars = first_layer[0].num_vars(); let num_towers = logup_last_layers.len(); view_last_layers.push(logup_last_layers); // Allocate one big buffer for all towers and add it to big_buffers let tower_size = 1 << (num_vars + 2); // 4 * mle_len elements per tower let total_buffer_size = num_towers * tower_size; tracing::debug!( "logup tower request buffer size: {:.2} MB", (total_buffer_size * std::mem::size_of::<BB31Ext>()) as f64 / (1024.0 * 1024.0) ); let big_buffer = cuda_hal .alloc_ext_elems_on_device(total_buffer_size, false) .unwrap(); big_buffers.push(big_buffer); is_logup_buffer_exists = true; } let (_, pushed_big_buffers) = big_buffers.split_at_mut(0); let (prod_big_buffer, logup_big_buffer) = match ( is_prod_buffer_exists, is_logup_buffer_exists, pushed_big_buffers, ) { (false, false, []) => (None, None), (true, false, [prod]) => (Some(prod), None), (false, true, [logup]) => (None, Some(logup)), (true, true, [prod, logup]) => (Some(prod), Some(logup)), (prod_flag, logup_flag, slice) => { panic!( "unexpected state: prod={}, logup={}, newly_pushed_len={}", prod_flag, logup_flag, slice.len() ); } }; // Build product GpuProverSpecs let mut prod_gpu_specs = Vec::new(); if is_prod_buffer_exists { let prod_last_layers = &view_last_layers[0]; let first_layer = &prod_last_layers[0]; assert_eq!(first_layer.len(), 2, "prod last_layer must have 2 MLEs"); let num_vars = first_layer[0].num_vars(); let num_towers = prod_last_layers.len(); let Some(prod_big_buffer) = prod_big_buffer else { panic!("prod big buffer not found"); }; let span_prod = entered_span!( "build_prod_tower", prod_layers = prod_last_layers.len(), profiling_3 = true ); let last_layers_refs: Vec<&[GpuPolynomialExt<'_>]> = prod_last_layers.iter().map(|v| v.as_slice()).collect(); let gpu_specs = { cuda_hal.tower.build_prod_tower_from_gpu_polys_batch( cuda_hal, prod_big_buffer, &last_layers_refs, num_vars, num_towers, ) } .map_err(|e| format!("build_prod_tower_from_gpu_polys_batch failed: {:?}", e))?; prod_gpu_specs.extend(gpu_specs); exit_span!(span_prod); } // Build logup GpuProverSpecs let mut logup_gpu_specs = Vec::new(); if is_logup_buffer_exists { let logup_last_layers = view_last_layers.last().unwrap(); let first_layer = &logup_last_layers[0]; assert_eq!(first_layer.len(), 4, "logup last_layer must have 4 MLEs"); let num_vars = first_layer[0].num_vars(); let num_towers = logup_last_layers.len(); let Some(logup_big_buffer) = logup_big_buffer else { panic!("logup big buffer not found"); }; let span_logup = entered_span!( "build_logup_tower", logup_layers = logup_last_layers.len(), profiling_3 = true ); let last_layers_refs: Vec<&[GpuPolynomialExt<'_>]> = logup_last_layers.iter().map(|v| v.as_slice()).collect(); let gpu_specs = cuda_hal .tower .build_logup_tower_from_gpu_polys_batch( cuda_hal, logup_big_buffer, &last_layers_refs, num_vars, num_towers, ) .map_err(|e| format!("build_logup_tower_from_gpu_polys_batch failed: {:?}", e))?; logup_gpu_specs.extend(gpu_specs); exit_span!(span_logup); } Ok((prod_gpu_specs, logup_gpu_specs)) } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> TowerProver<GpuBackend<E, PCS>> for GpuProver<GpuBackend<E, PCS>> { #[allow(clippy::type_complexity)] #[tracing::instrument( skip_all, name = "build_tower_witness", fields(profiling_3), level = "trace" )] fn build_tower_witness<'a, 'b, 'c>( &self, _composed_cs: &ComposedConstrainSystem<E>, _input: &ProofInput<'a, GpuBackend<E, PCS>>, _records: &'c [ArcMultilinearExtensionGpu<'b, E>], ) -> ( Vec<Vec<Vec<E>>>, Vec<TowerProverSpec<'c, GpuBackend<E, PCS>>>, Vec<TowerProverSpec<'c, GpuBackend<E, PCS>>>, ) where 'a: 'b, 'b: 'c, { panic!("use fn build_tower_witness_gpu instead"); // (vec![], vec![], vec![]) } #[tracing::instrument( skip_all, name = "prove_tower_relation", fields(profiling_3), level = "trace" )] fn prove_tower_relation<'a, 'b, 'c>( &self, composed_cs: &ComposedConstrainSystem<E>, input: &ProofInput<'a, GpuBackend<E, PCS>>, records: &'c [ArcMultilinearExtensionGpu<'b, E>], challenges: &[E; 2], transcript: &mut impl Transcript<E>, ) -> TowerRelationOutput<E> where 'a: 'b, 'b: 'c, { if std::any::TypeId::of::<E::BaseField>() != std::any::TypeId::of::<BB31Base>() { panic!("GPU backend only supports Goldilocks base field"); } // Calculate r_set_len directly from constraint system let ComposedConstrainSystem { zkvm_v1_css: cs, .. } = composed_cs; let r_set_len = cs.r_expressions.len() + cs.r_table_expressions.len(); let cuda_hal = get_cuda_hal().unwrap(); let (point, proof, lk_out_evals, w_out_evals, r_out_evals) = { // build_tower_witness_gpu will allocate buffers and build GPU specs let span = entered_span!("build_tower_witness", profiling_2 = true); let mut _big_buffers: Vec<BufferImpl<BB31Ext>> = Vec::new(); let mut _ones_buffer: Vec<GpuPolynomialExt<'static>> = Vec::new(); let mut _view_last_layers: Vec<Vec<Vec<GpuPolynomialExt<'static>>>> = Vec::new(); let (prod_gpu, logup_gpu) = info_span!("[ceno] build_tower_witness_gpu").in_scope(|| { build_tower_witness_gpu( composed_cs, input, records, challenges, &cuda_hal, &mut _big_buffers, &mut _ones_buffer, &mut _view_last_layers, ) .map_err(|e| format!("build_tower_witness_gpu failed: {}", e)) .unwrap() }); exit_span!(span); // GPU optimization: Extract out_evals from GPU-built towers before consuming them // This is the true optimization - using GPU tower results instead of CPU inference let span = entered_span!("extract_out_evals_from_gpu_towers", profiling_2 = true); let (r_out_evals, w_out_evals, lk_out_evals) = extract_out_evals_from_gpu_towers(&prod_gpu, &logup_gpu, r_set_len); exit_span!(span); // transcript >>> BasicTranscript<E> let basic_tr: &mut BasicTranscript<BB31Ext> = unsafe { &mut *(transcript as *mut _ as *mut BasicTranscript<BB31Ext>) }; let input = ceno_gpu::TowerInput { prod_specs: prod_gpu, logup_specs: logup_gpu, }; let span = entered_span!("prove_tower_relation", profiling_2 = true); let (point_gl, proof_gpu) = info_span!("[ceno] prove_tower_relation_gpu").in_scope(|| { cuda_hal .tower .create_proof(&cuda_hal, &input, NUM_FANIN, basic_tr) .expect("gpu tower create_proof failed") }); exit_span!(span); // TowerProofs let point: Point<E> = unsafe { std::mem::transmute(point_gl) }; let proof: TowerProofs<E> = unsafe { std::mem::transmute(proof_gpu) }; (point, proof, lk_out_evals, w_out_evals, r_out_evals) }; (point, proof, lk_out_evals, w_out_evals, r_out_evals) } } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> MainSumcheckProver<GpuBackend<E, PCS>> for GpuProver<GpuBackend<E, PCS>> { #[allow(clippy::type_complexity)] #[tracing::instrument( skip_all, name = "prove_main_constraints", fields(profiling_3), level = "trace" )] fn prove_main_constraints<'a, 'b>( &self, rt_tower: Vec<E>, // _records: Vec<ArcMultilinearExtensionGpu<'b, E>>, // not used by GPU after delegation input: &'b ProofInput<'a, GpuBackend<E, PCS>>, composed_cs: &ComposedConstrainSystem<E>, challenges: &[E; 2], transcript: &mut impl Transcript<<GpuBackend<E, PCS> as ProverBackend>::E>, ) -> Result< ( Point<E>, MainSumcheckEvals<E>, Option<Vec<IOPProverMessage<E>>>, Option<GKRProof<E>>, ), ZKVMError, > { let ComposedConstrainSystem { zkvm_v1_css: cs, gkr_circuit, } = composed_cs; let num_instances = input.num_instances(); let log2_num_instances = input.log2_num_instances(); let num_threads = optimal_sumcheck_threads(log2_num_instances); let num_var_with_rotation = log2_num_instances + composed_cs.rotation_vars().unwrap_or(0); let Some(gkr_circuit) = gkr_circuit else { panic!("empty gkr circuit") }; let selector_ctxs = if cs.ec_final_sum.is_empty() { // it's not global chip vec![ SelectorContext { offset: 0, num_instances, num_vars: num_var_with_rotation, }; gkr_circuit .layers .first() .map(|layer| layer.out_sel_and_eval_exprs.len()) .unwrap_or(0) ] } else { // it's global chip vec![ SelectorContext { offset: 0, num_instances: input.num_instances[0], num_vars: num_var_with_rotation, }, SelectorContext { offset: input.num_instances[0], num_instances: input.num_instances[1], num_vars: num_var_with_rotation, }, SelectorContext { offset: 0, num_instances, num_vars: num_var_with_rotation, }, ] }; let pub_io_mles = cs .instance_openings .iter() .map(|instance| input.public_input[instance.0].clone()) .collect_vec(); let GKRProverOutput { gkr_proof, opening_evaluations, mut rt, } = gkr_circuit.prove::<GpuBackend<E, PCS>, GpuProver<_>>( num_threads, num_var_with_rotation, gkr::GKRCircuitWitness { layers: vec![LayerWitness( chain!( &input.witness, &input.fixed, &pub_io_mles, &input.structural_witness, ) .cloned() .collect_vec(), )], }, // eval value doesnt matter as it wont be used by prover &vec![PointAndEval::new(rt_tower, E::ZERO); gkr_circuit.final_out_evals.len()], &input .pub_io_evals .iter() .map(|v| v.map_either(E::from, |v| v).into_inner()) .collect_vec(), challenges, transcript, &selector_ctxs, )?; assert_eq!(rt.len(), 1, "TODO support multi-layer gkr iop"); Ok(( rt.remove(0), MainSumcheckEvals { wits_in_evals: opening_evaluations .iter() .take(cs.num_witin as usize) .map(|Evaluation { value, .. }| value) .copied() .collect_vec(), fixed_in_evals: opening_evaluations .iter() .skip(cs.num_witin as usize) .take(cs.num_fixed) .map(|Evaluation { value, .. }| value) .copied() .collect_vec(), }, None, Some(gkr_proof), )) } } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> EccQuarkProver<GpuBackend<E, PCS>> for GpuProver<GpuBackend<E, PCS>> { fn prove_ec_sum_quark<'a>( &self, num_instances: usize, xs: Vec<Arc<MultilinearExtensionGpu<'a, E>>>, ys: Vec<Arc<MultilinearExtensionGpu<'a, E>>>, invs: Vec<Arc<MultilinearExtensionGpu<'a, E>>>, transcript: &mut impl Transcript<E>, ) -> Result<EccQuarkProof<E>, ZKVMError> { // TODO implement GPU version of `create_ecc_proof` let xs = xs.iter().map(|mle| mle.inner_to_mle().into()).collect_vec(); let ys = ys.iter().map(|mle| mle.inner_to_mle().into()).collect_vec(); let invs = invs .iter() .map(|mle| mle.inner_to_mle().into()) .collect_vec(); Ok(CpuEccProver::create_ecc_proof( num_instances, xs, ys, invs, transcript, )) } } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> OpeningProver<GpuBackend<E, PCS>> for GpuProver<GpuBackend<E, PCS>> { fn open( &self, witness_data: <GpuBackend<E, PCS> as ProverBackend>::PcsData, fixed_data: Option<Arc<<GpuBackend<E, PCS> as ProverBackend>::PcsData>>, points: Vec<Point<E>>, mut evals: Vec<Vec<Vec<E>>>, // where each inner Vec<E> = wit_evals + fixed_evals transcript: &mut (impl Transcript<E> + 'static), ) -> PCS::Proof { if std::any::TypeId::of::<E::BaseField>() != std::any::TypeId::of::<BB31Base>() { panic!("GPU backend only supports Goldilocks base field"); } let mut rounds = vec![]; rounds.push((&witness_data, { evals .iter_mut() .zip(&points) .filter_map(|(evals, point)| { let witin_evals = evals.remove(0); if !witin_evals.is_empty() { Some((point.clone(), witin_evals)) } else { None } }) .collect_vec() })); if let Some(fixed_data) = fixed_data.as_ref().map(|f| f.as_ref()) { rounds.push((fixed_data, { evals .iter_mut() .zip(points) .filter_map(|(evals, point)| { if !evals.is_empty() && !evals[0].is_empty() { Some((point.clone(), evals.remove(0))) } else { None } }) .collect_vec() })); } // Type conversions using unsafe transmute let prover_param = &self.backend.pp; let pp_gl64: &mpcs::basefold::structure::BasefoldProverParams< BB31Ext, mpcs::BasefoldRSParams, > = unsafe { std::mem::transmute(prover_param) }; let rounds_gl64: Vec<_> = rounds .iter() .map(|(commitment, point_eval_pairs)| { let commitment_gl64: &BasefoldCommitmentWithWitnessGpu< BB31Base, BufferImpl<BB31Base>, GpuDigestLayer, GpuMatrix<'static>, GpuPolynomial<'static>, > = unsafe { std::mem::transmute(*commitment) }; let point_eval_pairs_gl64: Vec<_> = point_eval_pairs .iter() .map(|(point, evals)| { let point_gl64: &Vec<BB31Ext> = unsafe { std::mem::transmute(point) }; let evals_gl64: &Vec<BB31Ext> = unsafe { std::mem::transmute(evals) }; (point_gl64.clone(), evals_gl64.clone()) }) .collect(); (commitment_gl64, point_eval_pairs_gl64) }) .collect(); let gpu_proof = if std::any::TypeId::of::<E>() == std::any::TypeId::of::<BB31Ext>() { let transcript_any = transcript as &mut dyn std::any::Any; let basic_transcript = transcript_any .downcast_mut::<BasicTranscript<BB31Ext>>() .expect("Type should match"); let cuda_hal = get_cuda_hal().unwrap(); let gpu_proof_basefold = cuda_hal .basefold .batch_open(&cuda_hal, pp_gl64, rounds_gl64, basic_transcript) .unwrap(); let gpu_proof: PCS::Proof = unsafe { std::mem::transmute_copy(&gpu_proof_basefold) }; std::mem::forget(gpu_proof_basefold); gpu_proof } else { panic!("GPU backend only supports Goldilocks base field"); }; gpu_proof } } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> DeviceTransporter<GpuBackend<E, PCS>> for GpuProver<GpuBackend<E, PCS>> { fn transport_proving_key( &self, is_first_shard: bool, pk: Arc< crate::structs::ZKVMProvingKey< <GpuBackend<E, PCS> as ProverBackend>::E,
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
true
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/cpu/mod.rs
ceno_zkvm/src/scheme/cpu/mod.rs
use super::hal::{ DeviceTransporter, MainSumcheckEvals, MainSumcheckProver, OpeningProver, ProverDevice, TowerProver, TraceCommitter, }; use crate::{ error::ZKVMError, scheme::{ constants::{NUM_FANIN, SEPTIC_EXTENSION_DEGREE}, hal::{DeviceProvingKey, EccQuarkProver, ProofInput, TowerProverSpec}, septic_curve::{SepticExtension, SepticPoint, SymbolicSepticExtension}, utils::{infer_tower_logup_witness, infer_tower_product_witness}, }, structs::{ComposedConstrainSystem, EccQuarkProof, PointAndEval, TowerProofs}, }; use either::Either; use ff_ext::ExtensionField; use gkr_iop::{ cpu::{CpuBackend, CpuProver}, gkr::{self, Evaluation, GKRProof, GKRProverOutput, layer::LayerWitness}, hal::ProverBackend, selector::{SelectorContext, SelectorType}, }; use itertools::{Itertools, chain}; use mpcs::{Point, PolynomialCommitmentScheme}; use multilinear_extensions::{ Expression, mle::{ArcMultilinearExtension, FieldType, IntoMLE, MultilinearExtension}, util::ceil_log2, virtual_poly::build_eq_x_r_vec, virtual_polys::VirtualPolynomialsBuilder, }; use rayon::iter::{ IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelIterator, }; use std::{collections::BTreeMap, sync::Arc}; use sumcheck::{ macros::{entered_span, exit_span}, structs::{IOPProverMessage, IOPProverState}, util::{get_challenge_pows, optimal_sumcheck_threads}, }; use transcript::Transcript; use witness::next_pow2_instance_padding; pub type TowerRelationOutput<E> = ( Point<E>, TowerProofs<E>, Vec<Vec<E>>, Vec<Vec<E>>, Vec<Vec<E>>, ); // accumulate N=2^n EC points into one EC point using affine coordinates // in one layer which borrows ideas from the [Quark paper](https://eprint.iacr.org/2020/1275.pdf) pub struct CpuEccProver; impl CpuEccProver { pub fn create_ecc_proof<'a, E: ExtensionField>( num_instances: usize, xs: Vec<Arc<MultilinearExtension<'a, E>>>, ys: Vec<Arc<MultilinearExtension<'a, E>>>, invs: Vec<Arc<MultilinearExtension<'a, E>>>, transcript: &mut impl Transcript<E>, ) -> EccQuarkProof<E> { assert_eq!(xs.len(), SEPTIC_EXTENSION_DEGREE); assert_eq!(ys.len(), SEPTIC_EXTENSION_DEGREE); let n = xs[0].num_vars() - 1; tracing::debug!( "Creating EC Summation Quark proof with {} points in {n} variables", num_instances ); let out_rt = transcript.sample_and_append_vec(b"ecc", n); let num_threads = optimal_sumcheck_threads(out_rt.len()); // expression with add (3 zero constrains) and bypass (2 zero constrains) let alpha_pows = transcript.sample_and_append_challenge_pows( SEPTIC_EXTENSION_DEGREE * 3 + SEPTIC_EXTENSION_DEGREE * 2, b"ecc_alpha", ); let mut alpha_pows_iter = alpha_pows.iter(); let mut expr_builder = VirtualPolynomialsBuilder::new(num_threads, out_rt.len()); let sel_add = SelectorType::QuarkBinaryTreeLessThan(0.into()); let sel_add_ctx = SelectorContext { offset: 0, num_instances, num_vars: n, }; let mut sel_add_mle: MultilinearExtension<'_, E> = sel_add.compute(&out_rt, &sel_add_ctx).unwrap(); // we construct sel_bypass witness here // verifier can derive it via `sel_bypass = eq - sel_add - sel_last_onehot` let mut sel_bypass_mle: Vec<E> = build_eq_x_r_vec(&out_rt); match sel_add_mle.evaluations() { FieldType::Ext(sel_add_mle) => sel_add_mle .par_iter() .zip_eq(sel_bypass_mle.par_iter_mut()) .for_each(|(sel_add, sel_bypass)| { if *sel_add != E::ZERO { *sel_bypass = E::ZERO; } }), _ => unreachable!(), } *sel_bypass_mle.last_mut().unwrap() = E::ZERO; let mut sel_bypass_mle = sel_bypass_mle.into_mle(); let sel_add_expr = expr_builder.lift(sel_add_mle.to_either()); let sel_bypass_expr = expr_builder.lift(sel_bypass_mle.to_either()); let mut exprs_add = vec![]; let mut exprs_bypass = vec![]; let filter_bj = |v: &[Arc<MultilinearExtension<'_, E>>], j: usize| { v.iter() .map(|v| { v.get_base_field_vec() .iter() .enumerate() .filter(|(i, _)| *i % 2 == j) .map(|(_, v)| v) .cloned() .collect_vec() .into_mle() }) .collect_vec() }; // build x[b,0], x[b,1], y[b,0], y[b,1] let mut x0 = filter_bj(&xs, 0); let mut y0 = filter_bj(&ys, 0); let mut x1 = filter_bj(&xs, 1); let mut y1 = filter_bj(&ys, 1); // build x[1,b], y[1,b], s[1,b] let mut x3 = xs.iter().map(|x| x.as_view_slice(2, 1)).collect_vec(); let mut y3 = ys.iter().map(|x| x.as_view_slice(2, 1)).collect_vec(); let mut s = invs.iter().map(|x| x.as_view_slice(2, 1)).collect_vec(); let s = SymbolicSepticExtension::new( s.iter_mut() .map(|s| expr_builder.lift(s.to_either())) .collect(), ); let x0 = SymbolicSepticExtension::new( x0.iter_mut() .map(|x| expr_builder.lift(x.to_either())) .collect(), ); let y0 = SymbolicSepticExtension::new( y0.iter_mut() .map(|y| expr_builder.lift(y.to_either())) .collect(), ); let x1 = SymbolicSepticExtension::new( x1.iter_mut() .map(|x| expr_builder.lift(x.to_either())) .collect(), ); let y1 = SymbolicSepticExtension::new( y1.iter_mut() .map(|y| expr_builder.lift(y.to_either())) .collect(), ); let x3 = SymbolicSepticExtension::new( x3.iter_mut() .map(|x| expr_builder.lift(x.to_either())) .collect(), ); let y3 = SymbolicSepticExtension::new( y3.iter_mut() .map(|y| expr_builder.lift(y.to_either())) .collect(), ); // affine addition // zerocheck: 0 = s[1,b] * (x[b,0] - x[b,1]) - (y[b,0] - y[b,1]) with b != (1,...,1) exprs_add.extend( (s.clone() * (&x0 - &x1) - (&y0 - &y1)) .to_exprs() .into_iter() .zip_eq(alpha_pows_iter.by_ref().take(SEPTIC_EXTENSION_DEGREE)) .map(|(e, alpha)| e * Expression::Constant(Either::Right(*alpha))), ); // zerocheck: 0 = s[1,b]^2 - x[b,0] - x[b,1] - x[1,b] with b != (1,...,1) exprs_add.extend( ((&s * &s) - &x0 - &x1 - &x3) .to_exprs() .into_iter() .zip_eq(alpha_pows_iter.by_ref().take(SEPTIC_EXTENSION_DEGREE)) .map(|(e, alpha)| e * Expression::Constant(Either::Right(*alpha))), ); // zerocheck: 0 = s[1,b] * (x[b,0] - x[1,b]) - (y[b,0] + y[1,b]) with b != (1,...,1) exprs_add.extend( (s.clone() * (&x0 - &x3) - (&y0 + &y3)) .to_exprs() .into_iter() .zip_eq(alpha_pows_iter.by_ref().take(SEPTIC_EXTENSION_DEGREE)) .map(|(e, alpha)| e * Expression::Constant(Either::Right(*alpha))), ); let exprs_add = exprs_add.into_iter().sum::<Expression<E>>() * sel_add_expr; // deal with bypass // 0 = (x[1,b] - x[b,0]) exprs_bypass.extend( (&x3 - &x0) .to_exprs() .into_iter() .zip_eq(alpha_pows_iter.by_ref().take(SEPTIC_EXTENSION_DEGREE)) .map(|(e, alpha)| e * Expression::Constant(Either::Right(*alpha))), ); // 0 = (y[1,b] - y[b,0]) exprs_bypass.extend( (&y3 - &y0) .to_exprs() .into_iter() .zip_eq(alpha_pows_iter.by_ref().take(SEPTIC_EXTENSION_DEGREE)) .map(|(e, alpha)| e * Expression::Constant(Either::Right(*alpha))), ); assert!(alpha_pows_iter.next().is_none()); let exprs_bypass = exprs_bypass.into_iter().sum::<Expression<E>>() * sel_bypass_expr; let (zerocheck_proof, state) = IOPProverState::prove( expr_builder.to_virtual_polys(&[exprs_add + exprs_bypass], &[]), transcript, ); let rt = state.collect_raw_challenges(); let evals = state.get_mle_flatten_final_evaluations(); // 7 for x[rt,0], x[rt,1], y[rt,0], y[rt,1], x[1,rt], y[1,rt], s[1,rt] assert_eq!(evals.len(), 2 + SEPTIC_EXTENSION_DEGREE * 7); let last_evaluation_index = (1 << n) - 1; let x3 = xs.iter().map(|x| x.as_view_slice(2, 1)).collect_vec(); let y3 = ys.iter().map(|y| y.as_view_slice(2, 1)).collect_vec(); let final_sum_x: SepticExtension<E::BaseField> = (x3.iter()) .map(|x| x.get_base_field_vec()[last_evaluation_index - 1]) // x[1,...,1,0] .collect_vec() .into(); let final_sum_y: SepticExtension<E::BaseField> = (y3.iter()) .map(|y| y.get_base_field_vec()[last_evaluation_index - 1]) // x[1,...,1,0] .collect_vec() .into(); let final_sum = SepticPoint::from_affine(final_sum_x, final_sum_y); #[cfg(feature = "sanity-check")] { let s = invs.iter().map(|x| x.as_view_slice(2, 1)).collect_vec(); let x0 = filter_bj(&xs, 0); let y0 = filter_bj(&ys, 0); let x1 = filter_bj(&xs, 1); let y1 = filter_bj(&ys, 1); let evals = &evals[2..]; // check evaluations for i in 0..SEPTIC_EXTENSION_DEGREE { assert_eq!(s[i].evaluate(&rt), evals[i]); assert_eq!(x0[i].evaluate(&rt), evals[SEPTIC_EXTENSION_DEGREE + i]); assert_eq!(y0[i].evaluate(&rt), evals[SEPTIC_EXTENSION_DEGREE * 2 + i]); assert_eq!(x1[i].evaluate(&rt), evals[SEPTIC_EXTENSION_DEGREE * 3 + i]); assert_eq!(y1[i].evaluate(&rt), evals[SEPTIC_EXTENSION_DEGREE * 4 + i]); assert_eq!(x3[i].evaluate(&rt), evals[SEPTIC_EXTENSION_DEGREE * 5 + i]); assert_eq!(y3[i].evaluate(&rt), evals[SEPTIC_EXTENSION_DEGREE * 6 + i]); } } assert_eq!(zerocheck_proof.extract_sum(), E::ZERO); EccQuarkProof { zerocheck_proof, num_instances, evals, rt, sum: final_sum, } } } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> EccQuarkProver<CpuBackend<E, PCS>> for CpuProver<CpuBackend<E, PCS>> { fn prove_ec_sum_quark<'a>( &self, num_instances: usize, xs: Vec<Arc<MultilinearExtension<'a, E>>>, ys: Vec<Arc<MultilinearExtension<'a, E>>>, invs: Vec<Arc<MultilinearExtension<'a, E>>>, transcript: &mut impl Transcript<E>, ) -> Result<EccQuarkProof<E>, ZKVMError> { Ok(CpuEccProver::create_ecc_proof( num_instances, xs, ys, invs, transcript, )) } } pub struct CpuTowerProver; impl CpuTowerProver { pub fn create_proof<'a, E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>( prod_specs: Vec<TowerProverSpec<'a, CpuBackend<E, PCS>>>, logup_specs: Vec<TowerProverSpec<'a, CpuBackend<E, PCS>>>, num_fanin: usize, transcript: &mut impl Transcript<E>, ) -> (Point<E>, TowerProofs<E>) { #[derive(Debug, Clone)] enum GroupedMLE<'a, E: ExtensionField> { Prod((usize, Vec<MultilinearExtension<'a, E>>)), // usize is the index in prod_specs Logup((usize, Vec<MultilinearExtension<'a, E>>)), // usize is the index in logup_specs } // XXX to sumcheck batched product argument with logup, we limit num_product_fanin to 2 // TODO maybe give a better naming? assert_eq!(num_fanin, 2); let (prod_specs_len, logup_specs_len) = (prod_specs.len(), logup_specs.len()); let mut proofs = TowerProofs::new(prod_specs_len, logup_specs_len); let log_num_fanin = ceil_log2(num_fanin); // -1 for sliding windows size 2: (cur_layer, next_layer) w.r.t total size let max_round_index = prod_specs .iter() .chain(logup_specs.iter()) .map(|m| m.witness.len()) .max() .unwrap() - 1; // index start from 0 // generate alpha challenge let alpha_pows = get_challenge_pows( prod_specs_len + // logup occupy 2 sumcheck: numerator and denominator logup_specs_len * 2, transcript, ); let initial_rt: Point<E> = transcript.sample_and_append_vec(b"product_sum", log_num_fanin); let (mut out_rt, mut alpha_pows) = (initial_rt, alpha_pows); let mut layer_witness: Vec<Vec<GroupedMLE<'a, E>>> = vec![Vec::new(); max_round_index + 1]; #[allow(clippy::type_complexity)] fn merge_spec_witness<'b, E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>( merged: &mut [Vec<GroupedMLE<'b, E>>], spec: TowerProverSpec<'b, CpuBackend<E, PCS>>, index: usize, group_ctor: fn((usize, Vec<MultilinearExtension<'b, E>>)) -> GroupedMLE<'b, E>, ) { for (round_idx, round_vec) in spec.witness.into_iter().enumerate() { merged[round_idx].push(group_ctor((index, round_vec))); } } // merge prod_specs for (i, spec) in prod_specs.into_iter().enumerate() { merge_spec_witness(&mut layer_witness, spec, i, GroupedMLE::Prod); } // merge logup_specs for (i, spec) in logup_specs.into_iter().enumerate() { merge_spec_witness(&mut layer_witness, spec, i, GroupedMLE::Logup); } // skip(1) for output layer for (round, mut layer_witness) in layer_witness.into_iter().enumerate().skip(1) { // in first few round we just run on single thread let num_threads = optimal_sumcheck_threads(out_rt.len()); let mut exprs = Vec::<Expression<E>>::with_capacity(prod_specs_len + logup_specs_len); let mut expr_builder = VirtualPolynomialsBuilder::new(num_threads, out_rt.len()); let mut witness_prod_expr = vec![vec![]; prod_specs_len]; let mut witness_lk_expr = vec![vec![]; logup_specs_len]; let mut eq: MultilinearExtension<E> = build_eq_x_r_vec(&out_rt).into_mle(); let eq_expr = expr_builder.lift(Either::Right(&mut eq)); // processing exprs for group_witness in layer_witness.iter_mut() { match group_witness { GroupedMLE::Prod((i, layer_polys)) => { let alpha_expr = Expression::Constant(Either::Right(alpha_pows[*i])); // sanity check assert_eq!(layer_polys.len(), num_fanin); assert!( layer_polys .iter() .all(|f| { f.evaluations().len() == 1 << (log_num_fanin * round) }) ); let layer_polys = layer_polys .iter_mut() .map(|layer_poly| expr_builder.lift(layer_poly.to_either())) .collect_vec(); witness_prod_expr[*i].extend(layer_polys.clone()); let layer_polys_product = layer_polys.into_iter().product::<Expression<E>>(); // \sum_s eq(rt, s) * alpha^{i} * ([in_i0[s] * in_i1[s] * .... in_i{num_product_fanin}[s]]) exprs.push(eq_expr.clone() * alpha_expr * layer_polys_product); } GroupedMLE::Logup((i, layer_polys)) => { // sanity check assert_eq!(layer_polys.len(), 2 * num_fanin); // p1, p2, q1, q2 assert!( layer_polys .iter() .all(|f| f.evaluations().len() == 1 << (log_num_fanin * round)), ); let (alpha_numerator, alpha_denominator) = ( Expression::Constant(Either::Right( alpha_pows[prod_specs_len + *i * 2], // numerator and denominator )), Expression::Constant(Either::Right( alpha_pows[prod_specs_len + *i * 2 + 1], )), ); let (p1, rest) = layer_polys.split_at_mut(1); let (p2, rest) = rest.split_at_mut(1); let (q1, q2) = rest.split_at_mut(1); let (p1, p2, q1, q2) = ( expr_builder.lift(p1[0].to_either()), expr_builder.lift(p2[0].to_either()), expr_builder.lift(q1[0].to_either()), expr_builder.lift(q2[0].to_either()), ); witness_lk_expr[*i].extend(vec![ p1.clone(), p2.clone(), q1.clone(), q2.clone(), ]); // \sum_s eq(rt, s) * (alpha_numerator^{i} * (p1 * q2 + p2 * q1) + alpha_denominator^{i} * q1 * q2) exprs.push( eq_expr.clone() * (alpha_numerator * (p1 * q2.clone() + p2 * q1.clone()) + alpha_denominator * q1 * q2), ); } } } let wrap_batch_span = entered_span!("wrap_batch"); let (sumcheck_proofs, state) = IOPProverState::prove( expr_builder.to_virtual_polys(&[exprs.into_iter().sum()], &[]), transcript, ); exit_span!(wrap_batch_span); proofs.push_sumcheck_proofs(sumcheck_proofs.proofs); // rt' = r_merge || rt let r_merge = transcript.sample_and_append_vec(b"merge", log_num_fanin); let rt_prime = [state.collect_raw_challenges(), r_merge].concat(); // generate next round challenge let next_alpha_pows = get_challenge_pows( prod_specs_len + logup_specs_len * 2, /* logup occupy 2 sumcheck: numerator and denominator */ transcript, ); let evals = state.get_mle_flatten_final_evaluations(); // retrieve final evaluation to proof for (i, witness_prod_expr) in witness_prod_expr.iter().enumerate().take(prod_specs_len) { let evals = witness_prod_expr .iter() .map(|expr| match expr { Expression::WitIn(wit_id) => evals[*wit_id as usize], _ => unreachable!(), }) .collect_vec(); if !evals.is_empty() { assert_eq!(evals.len(), num_fanin); proofs.push_prod_evals_and_point(i, evals, rt_prime.clone()); } } for (i, witness_lk_expr) in witness_lk_expr.iter().enumerate().take(logup_specs_len) { let evals = witness_lk_expr .iter() .map(|expr| match expr { Expression::WitIn(wit_id) => evals[*wit_id as usize], _ => unreachable!(), }) .collect_vec(); if !evals.is_empty() { assert_eq!(evals.len(), 4); // p1, p2, q1, q2 proofs.push_logup_evals_and_point(i, evals, rt_prime.clone()); } } out_rt = rt_prime; alpha_pows = next_alpha_pows; } let next_rt = out_rt; (next_rt, proofs) } } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> TraceCommitter<CpuBackend<E, PCS>> for CpuProver<CpuBackend<E, PCS>> { fn commit_traces<'a>( &self, traces: BTreeMap<usize, witness::RowMajorMatrix<E::BaseField>>, ) -> ( Vec<MultilinearExtension<'a, E>>, PCS::CommitmentWithWitness, PCS::Commitment, ) { let max_poly_size_log2 = traces .values() .map(|trace| ceil_log2(next_pow2_instance_padding(trace.num_instances()))) .max() .unwrap(); if max_poly_size_log2 > self.backend.max_poly_size_log2 { panic!( "max_poly_size_log2 {max_poly_size_log2} > max_poly_size_log2 backend {}", self.backend.max_poly_size_log2 ) } let prover_param = &self.backend.pp; let pcs_data = PCS::batch_commit(prover_param, traces.into_values().collect_vec()).unwrap(); let commit = PCS::get_pure_commitment(&pcs_data); let mles = PCS::get_arc_mle_witness_from_commitment(&pcs_data) .into_par_iter() .map(|mle| mle.as_ref().clone()) .collect::<Vec<_>>(); (mles, pcs_data, commit) } fn extract_witness_mles<'a, 'b>( &self, witness_mles: &'b mut Vec<<CpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>, _pcs_data: &'b <CpuBackend<E, PCS> as ProverBackend>::PcsData, ) -> Box< dyn Iterator<Item = Arc<<CpuBackend<E, PCS> as ProverBackend>::MultilinearPoly<'a>>> + 'b, > { let iter = witness_mles.drain(..).map(Arc::new); Box::new(iter) } } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> TowerProver<CpuBackend<E, PCS>> for CpuProver<CpuBackend<E, PCS>> { #[allow(clippy::type_complexity)] #[tracing::instrument( skip_all, name = "build_tower_witness", fields(profiling_3), level = "trace" )] fn build_tower_witness<'a, 'b, 'c>( &self, composed_cs: &ComposedConstrainSystem<E>, input: &ProofInput<'a, CpuBackend<E, PCS>>, records: &'c [ArcMultilinearExtension<'b, E>], ) -> ( Vec<Vec<Vec<E>>>, Vec<TowerProverSpec<'c, CpuBackend<E, PCS>>>, Vec<TowerProverSpec<'c, CpuBackend<E, PCS>>>, ) where 'a: 'b, 'b: 'c, { let ComposedConstrainSystem { zkvm_v1_css: cs, .. } = composed_cs; let num_var_with_rotation = input.log2_num_instances() + composed_cs.rotation_vars().unwrap_or(0); let num_reads = cs.r_expressions.len() + cs.r_table_expressions.len(); let num_writes = cs.w_expressions.len() + cs.w_table_expressions.len(); let mut offset = 0; let r_set_wit = &records[offset..][..num_reads]; assert_eq!(r_set_wit.len(), num_reads); offset += num_reads; let w_set_wit = &records[offset..][..num_writes]; assert_eq!(w_set_wit.len(), num_writes); offset += num_writes; let lk_n_wit = &records[offset..][..cs.lk_table_expressions.len()]; offset += cs.lk_table_expressions.len(); let lk_d_wit = if !cs.lk_table_expressions.is_empty() { &records[offset..][..cs.lk_table_expressions.len()] } else { &records[offset..][..cs.lk_expressions.len()] }; // infer all tower witness after last layer let span = entered_span!("tower_witness_last_layer"); let mut r_set_last_layer = r_set_wit .iter() .chain(w_set_wit.iter()) .map(|wit| wit.as_view_chunks(NUM_FANIN)) .collect::<Vec<_>>(); let w_set_last_layer = r_set_last_layer.split_off(r_set_wit.len()); let mut lk_numerator_last_layer = lk_n_wit .iter() .chain(lk_d_wit.iter()) .map(|wit| wit.as_view_chunks(NUM_FANIN)) .collect::<Vec<_>>(); let lk_denominator_last_layer = lk_numerator_last_layer.split_off(lk_n_wit.len()); exit_span!(span); let span = entered_span!("tower_tower_witness"); let r_wit_layers = r_set_last_layer .into_iter() .map(|last_layer| { infer_tower_product_witness(num_var_with_rotation, last_layer, NUM_FANIN) }) .collect_vec(); let w_wit_layers = w_set_last_layer .into_iter() .map(|last_layer| { infer_tower_product_witness(num_var_with_rotation, last_layer, NUM_FANIN) }) .collect_vec(); let lk_wit_layers = if !lk_numerator_last_layer.is_empty() { lk_numerator_last_layer .into_iter() .zip(lk_denominator_last_layer) .map(|(lk_n, lk_d)| infer_tower_logup_witness(Some(lk_n), lk_d)) .collect_vec() } else { lk_denominator_last_layer .into_iter() .map(|lk_d| infer_tower_logup_witness(None, lk_d)) .collect_vec() }; exit_span!(span); if cfg!(test) { // sanity check assert_eq!(r_wit_layers.len(), num_reads); assert!( r_wit_layers .iter() .zip(r_set_wit.iter()) // depth equals to num_vars .all(|(layers, origin_mle)| layers.len() == origin_mle.num_vars()) ); assert!(r_wit_layers.iter().all(|layers| { layers.iter().enumerate().all(|(i, w)| { let expected_size = 1 << i; w[0].evaluations().len() == expected_size && w[1].evaluations().len() == expected_size }) })); assert_eq!(w_wit_layers.len(), num_writes); assert!( w_wit_layers .iter() .zip(w_set_wit.iter()) // depth equals to num_vars .all(|(layers, origin_mle)| layers.len() == origin_mle.num_vars()) ); assert!(w_wit_layers.iter().all(|layers| { layers.iter().enumerate().all(|(i, w)| { let expected_size = 1 << i; w[0].evaluations().len() == expected_size && w[1].evaluations().len() == expected_size }) })); assert_eq!( lk_wit_layers.len(), cs.lk_table_expressions.len() + cs.lk_expressions.len() ); assert!( lk_wit_layers .iter() .zip(lk_n_wit.iter()) // depth equals to num_vars .all(|(layers, origin_mle)| layers.len() == origin_mle.num_vars()) ); assert!(lk_wit_layers.iter().all(|layers| { layers.iter().enumerate().all(|(i, w)| { let expected_size = 1 << i; let (p1, p2, q1, q2) = (&w[0], &w[1], &w[2], &w[3]); p1.evaluations().len() == expected_size && p2.evaluations().len() == expected_size && q1.evaluations().len() == expected_size && q2.evaluations().len() == expected_size }) })); } // final evals for verifier let r_out_evals = r_wit_layers .iter() .map(|r_wit_layers| { r_wit_layers[0] .iter() .map(|mle| mle.get_ext_field_vec()[0]) .collect_vec() }) .collect_vec(); let w_out_evals = w_wit_layers .iter() .map(|w_wit_layers| { w_wit_layers[0] .iter() .map(|mle| mle.get_ext_field_vec()[0]) .collect_vec() }) .collect_vec(); let lk_out_evals = lk_wit_layers .iter() .map(|lk_wit_layers| { lk_wit_layers[0] .iter() .map(|mle| mle.get_ext_field_vec()[0]) .collect_vec() }) .collect_vec(); let prod_specs = r_wit_layers .into_iter() .chain(w_wit_layers) .map(|witness| TowerProverSpec { witness }) .collect_vec(); let lookup_specs = lk_wit_layers .into_iter() .map(|witness| TowerProverSpec { witness }) .collect_vec(); let out_evals = vec![r_out_evals, w_out_evals, lk_out_evals]; (out_evals, prod_specs, lookup_specs) } #[tracing::instrument( skip_all, name = "prove_tower_relation", fields(profiling_3), level = "trace" )] fn prove_tower_relation<'a, 'b, 'c>( &self, composed_cs: &ComposedConstrainSystem<E>, input: &ProofInput<'a, CpuBackend<E, PCS>>, records: &'c [Arc<MultilinearExtension<'b, E>>], _challenges: &[E; 2], transcript: &mut impl Transcript<E>, ) -> TowerRelationOutput<E> where 'a: 'b, 'b: 'c, { // First build tower witness let span = entered_span!("build_tower_witness", profiling_2 = true); let (mut out_evals, prod_specs, logup_specs) = self.build_tower_witness(composed_cs, input, records); exit_span!(span); // Then prove the tower relation let span = entered_span!("prove_tower_relation", profiling_2 = true); let (rt, proofs) = CpuTowerProver::create_proof(prod_specs, logup_specs, 2, transcript); let lk_out_evals = out_evals.pop().unwrap(); let w_out_evals = out_evals.pop().unwrap(); let r_out_evals = out_evals.pop().unwrap(); exit_span!(span); (rt, proofs, lk_out_evals, w_out_evals, r_out_evals) } } impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> MainSumcheckProver<CpuBackend<E, PCS>> for CpuProver<CpuBackend<E, PCS>> { #[allow(clippy::type_complexity)] #[tracing::instrument( skip_all, name = "prove_main_constraints", fields(profiling_3), level = "trace" )] fn prove_main_constraints<'a, 'b>( &self, rt_tower: Vec<E>, input: &'b ProofInput<'a, CpuBackend<E, PCS>>, composed_cs: &ComposedConstrainSystem<E>, challenges: &[E; 2], transcript: &mut impl Transcript<<CpuBackend<E, PCS> as ProverBackend>::E>, ) -> Result< ( Point<E>, MainSumcheckEvals<E>, Option<Vec<IOPProverMessage<E>>>, Option<GKRProof<E>>, ), ZKVMError, > { let ComposedConstrainSystem { zkvm_v1_css: cs, gkr_circuit, } = composed_cs; let num_instances = input.num_instances(); let log2_num_instances = input.log2_num_instances(); let num_threads = optimal_sumcheck_threads(log2_num_instances); let num_var_with_rotation = log2_num_instances + composed_cs.rotation_vars().unwrap_or(0); let Some(gkr_circuit) = gkr_circuit else { panic!("empty gkr circuit") }; let pub_io_mles = cs .instance_openings .iter() .map(|instance| input.public_input[instance.0].clone()) .collect_vec(); let selector_ctxs = if cs.ec_final_sum.is_empty() { // it's not global chip vec![ SelectorContext { offset: 0, num_instances, num_vars: num_var_with_rotation, }; gkr_circuit .layers .first()
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
true
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/field.rs
ceno_zkvm/src/gadgets/field.rs
use serde::{Deserialize, Serialize}; pub mod field_inner_product; pub mod field_op; pub mod field_sqrt; pub mod range; /// This is an arithmetic operation for emulating modular arithmetic. #[derive(Default, PartialEq, Copy, Clone, Debug, Serialize, Deserialize)] pub enum FieldOperation { /// Addition. #[default] Add, /// Multiplication. Mul, /// Subtraction. Sub, /// Division. Div, }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/is_lt.rs
ceno_zkvm/src/gadgets/is_lt.rs
use std::fmt::Display; use ceno_emul::{SWord, Word}; use ff_ext::ExtensionField; use gkr_iop::error::CircuitBuilderError; use super::SignedExtendConfig; use crate::{ Value, circuit_builder::CircuitBuilder, gadgets::InnerLtConfig, instructions::riscv::constants::{LIMB_BITS, UINT_LIMBS, UInt}, witness::{LkMultiplicity, set_val}, }; use ff_ext::FieldInto; use multilinear_extensions::{Expression, ToExpr, WitIn}; #[derive(Debug)] pub struct AssertSignedLtConfig<E> { config: InnerSignedLtConfig<E>, } impl<E: ExtensionField> AssertSignedLtConfig<E> { pub fn construct_circuit<NR: Into<String> + Display + Clone, N: FnOnce() -> NR>( cb: &mut CircuitBuilder<E>, name_fn: N, lhs: &UInt<E>, rhs: &UInt<E>, ) -> Result<Self, CircuitBuilderError> { cb.namespace( || "assert_signed_lt", |cb| { let name = name_fn(); let config = InnerSignedLtConfig::construct_circuit(cb, name, lhs, rhs, Expression::ONE)?; Ok(Self { config }) }, ) } pub fn assign_instance( &self, instance: &mut [E::BaseField], lkm: &mut LkMultiplicity, lhs: SWord, rhs: SWord, ) -> Result<(), CircuitBuilderError> { self.config.assign_instance(instance, lkm, lhs, rhs)?; Ok(()) } } #[derive(Debug)] pub struct SignedLtConfig<E> { is_lt: WitIn, config: InnerSignedLtConfig<E>, } impl<E: ExtensionField> SignedLtConfig<E> { pub fn expr(&self) -> Expression<E> { self.is_lt.expr() } pub fn construct_circuit<NR: Into<String> + Display + Clone, N: FnOnce() -> NR>( cb: &mut CircuitBuilder<E>, name_fn: N, lhs: &UInt<E>, rhs: &UInt<E>, ) -> Result<Self, CircuitBuilderError> { cb.namespace( || "is_signed_lt", |cb| { let name = name_fn(); let is_lt = cb.create_witin(|| format!("{name} is_signed_lt witin")); cb.assert_bit(|| "is_lt_bit", is_lt.expr())?; let config = InnerSignedLtConfig::construct_circuit(cb, name, lhs, rhs, is_lt.expr())?; Ok(SignedLtConfig { is_lt, config }) }, ) } pub fn assign_instance( &self, instance: &mut [E::BaseField], lkm: &mut LkMultiplicity, lhs: SWord, rhs: SWord, ) -> Result<(), CircuitBuilderError> { set_val!(instance, self.is_lt, (lhs < rhs) as u64); self.config .assign_instance(instance, lkm, lhs as SWord, rhs as SWord)?; Ok(()) } } #[derive(Debug)] struct InnerSignedLtConfig<E> { is_lhs_neg: SignedExtendConfig<E>, is_rhs_neg: SignedExtendConfig<E>, config: InnerLtConfig, } impl<E: ExtensionField> InnerSignedLtConfig<E> { pub fn construct_circuit<NR: Into<String> + Display + Clone>( cb: &mut CircuitBuilder<E>, name: NR, lhs: &UInt<E>, rhs: &UInt<E>, is_lt_expr: Expression<E>, ) -> Result<Self, CircuitBuilderError> { // Extract the sign bit. let is_lhs_neg = lhs.is_negative(cb)?; let is_rhs_neg = rhs.is_negative(cb)?; // Convert to field arithmetic. let lhs_value = lhs.to_field_expr(is_lhs_neg.expr()); let rhs_value = rhs.to_field_expr(is_rhs_neg.expr()); let config = InnerLtConfig::construct_circuit( cb, format!("{name} (lhs < rhs)"), lhs_value, rhs_value, is_lt_expr, UINT_LIMBS * LIMB_BITS, )?; Ok(Self { is_lhs_neg, is_rhs_neg, config, }) } pub fn assign_instance( &self, instance: &mut [E::BaseField], lkm: &mut LkMultiplicity, lhs: SWord, rhs: SWord, ) -> Result<(), CircuitBuilderError> { let lhs_value = Value::new_unchecked(lhs as Word); let rhs_value = Value::new_unchecked(rhs as Word); self.is_lhs_neg.assign_instance( instance, lkm, *lhs_value.as_u16_limbs().last().unwrap() as u64, )?; self.is_rhs_neg.assign_instance( instance, lkm, *rhs_value.as_u16_limbs().last().unwrap() as u64, )?; self.config .assign_instance_i64(instance, lkm, lhs as i64, rhs as i64)?; Ok(()) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/div.rs
ceno_zkvm/src/gadgets/div.rs
use std::fmt::Display; use ff_ext::ExtensionField; use gkr_iop::error::CircuitBuilderError; use super::AssertLtConfig; use crate::{ Value, circuit_builder::CircuitBuilder, instructions::riscv::constants::{LIMB_BITS, UINT_LIMBS, UInt}, witness::LkMultiplicity, }; /// divide gadget #[derive(Debug, Clone)] pub struct DivConfig<E: ExtensionField> { pub dividend: UInt<E>, pub r_lt: AssertLtConfig, pub intermediate_mul: UInt<E>, } impl<E: ExtensionField> DivConfig<E> { /// giving divisor, quotient, and remainder /// deriving dividend and respective constrains /// NOTE once divisor is zero, then constrain will always failed pub fn construct_circuit<NR: Into<String> + Display + Clone, N: FnOnce() -> NR>( circuit_builder: &mut CircuitBuilder<E>, name_fn: N, divisor: &mut UInt<E>, quotient: &mut UInt<E>, remainder: &UInt<E>, ) -> Result<Self, CircuitBuilderError> { circuit_builder.namespace(name_fn, |cb| { let (dividend, intermediate_mul) = divisor.mul_add(|| "divisor * outcome + r", cb, quotient, remainder, true)?; let r_lt = AssertLtConfig::construct_circuit( cb, || "remainder < divisor", remainder.value(), divisor.value(), UINT_LIMBS * LIMB_BITS, )?; Ok(Self { dividend, intermediate_mul, r_lt, }) }) } pub fn assign_instance<'a>( &self, instance: &mut [E::BaseField], lkm: &mut LkMultiplicity, divisor: &Value<'a, u32>, quotient: &Value<'a, u32>, remainder: &Value<'a, u32>, ) -> Result<(), CircuitBuilderError> { let (dividend, intermediate) = divisor.mul_add(quotient, remainder, lkm, true); self.r_lt .assign_instance(instance, lkm, remainder.as_u64(), divisor.as_u64())?; self.intermediate_mul .assign_mul_outcome(instance, lkm, &intermediate)?; self.dividend.assign_add_outcome(instance, &dividend); Ok(()) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/util.rs
ceno_zkvm/src/gadgets/util.rs
// This file is modified from succinctlabs/sp1 under MIT license // The MIT License (MIT) // Copyright (c) 2023 Succinct Labs // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use ff_ext::SmallField; use num::BigUint; use sp1_curves::polynomial::Polynomial; fn biguint_to_field<F: SmallField>(num: BigUint) -> F { let mut x = F::ZERO; let mut power = F::from_canonical_u32(1u32); let base = F::from_canonical_u64((1 << 32) % F::MODULUS_U64); let digits = num.iter_u32_digits(); for digit in digits.into_iter() { x += F::from_canonical_u32(digit) * power; power *= base; } x } #[inline] pub fn compute_root_quotient_and_shift<F: SmallField>( p_vanishing: &Polynomial<F>, offset: usize, nb_bits_per_limb: u32, nb_limbs: usize, ) -> Vec<F> { // Evaluate the vanishing polynomial at x = 2^nb_bits_per_limb. let p_vanishing_eval = p_vanishing .coefficients() .iter() .enumerate() .map(|(i, x)| { biguint_to_field::<F>(BigUint::from(2u32) << (nb_bits_per_limb * i as u32)) * *x }) .sum::<F>(); debug_assert_eq!(p_vanishing_eval, F::ZERO); // Compute the witness polynomial by witness(x) = vanishing(x) / (x - 2^nb_bits_per_limb). let root_monomial = F::from_canonical_u32(2u32.pow(nb_bits_per_limb)); let p_quotient = p_vanishing.root_quotient(root_monomial); debug_assert_eq!(p_quotient.degree(), p_vanishing.degree() - 1); // Sanity Check #1: For all i, |w_i| < 2^20 to prevent overflows. let offset_u64 = offset as u64; for c in p_quotient.coefficients().iter() { debug_assert!(c.neg().to_canonical_u64() < offset_u64 || c.to_canonical_u64() < offset_u64); } // Sanity Check #2: w(x) * (x - 2^nb_bits_per_limb) = vanishing(x). let x_minus_root = Polynomial::<F>::from_coefficients(&[-root_monomial, F::ONE]); debug_assert_eq!(&p_quotient * &x_minus_root, *p_vanishing); let mut p_quotient_coefficients = p_quotient.as_coefficients(); p_quotient_coefficients.resize(nb_limbs, F::ZERO); // Shifting the witness polynomial to make it positive p_quotient_coefficients .into_iter() .map(|x| x + F::from_canonical_u64(offset_u64)) .collect::<Vec<F>>() } #[inline] pub fn split_u16_limbs_to_u8_limbs<F: SmallField>(slice: &[F]) -> (Vec<F>, Vec<F>) { ( slice .iter() .map(|x| x.to_canonical_u64() as u8) .map(|x| F::from_canonical_u8(x)) .collect(), slice .iter() .map(|x| (x.to_canonical_u64() >> 8) as u8) .map(|x| F::from_canonical_u8(x)) .collect(), ) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/util_expr.rs
ceno_zkvm/src/gadgets/util_expr.rs
// This file is modified from succinctlabs/sp1 under MIT license // The MIT License (MIT) // Copyright (c) 2023 Succinct Labs // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use ff_ext::ExtensionField; use gkr_iop::{circuit_builder::CircuitBuilder, error::CircuitBuilderError}; use multilinear_extensions::{Expression, ToExpr}; use p3::field::FieldAlgebra; use sp1_curves::{params::FieldParameters, polynomial::Polynomial}; pub fn eval_field_operation<E: ExtensionField, P: FieldParameters>( builder: &mut CircuitBuilder<E>, p_vanishing: &Polynomial<Expression<E>>, p_witness_low: &Polynomial<Expression<E>>, p_witness_high: &Polynomial<Expression<E>>, ) -> Result<(), CircuitBuilderError> { // Reconstruct and shift back the witness polynomial let limb: Expression<E> = E::BaseField::from_canonical_u32(2u32.pow(P::NB_BITS_PER_LIMB as u32)).expr(); let p_witness_shifted = p_witness_low + &(p_witness_high * limb.clone()); // Shift down the witness polynomial. Shifting is needed to range check that each // coefficient w_i of the witness polynomial satisfies |w_i| < 2^WITNESS_OFFSET. let offset: Expression<E> = E::BaseField::from_canonical_u32(P::WITNESS_OFFSET as u32).expr(); let len = p_witness_shifted.coefficients().len(); let p_witness = p_witness_shifted - Polynomial::new(vec![offset; len]); // Multiply by (x-2^NB_BITS_PER_LIMB) and make the constraint let root_monomial = Polynomial::new(vec![-limb, E::BaseField::ONE.expr()]); let constraints = p_vanishing - &(p_witness * root_monomial); for constr in constraints.as_coefficients() { builder.require_zero(|| "eval_field_operation require zero", constr)?; } Ok(()) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/mod.rs
ceno_zkvm/src/gadgets/mod.rs
mod div; mod field; mod is_lt; mod is_zero; mod poseidon2; mod signed; mod signed_ext; mod signed_limbs; mod util; mod util_expr; pub use div::DivConfig; pub use field::*; pub use gkr_iop::gadgets::{ AssertLtConfig, InnerLtConfig, IsEqualConfig, IsLtConfig, IsZeroConfig, cal_lt_diff, }; pub use is_lt::{AssertSignedLtConfig, SignedLtConfig}; pub use is_zero::IsZeroOperation; pub use poseidon2::{Poseidon2BabyBearConfig, Poseidon2Config}; pub use signed::Signed; pub use signed_ext::SignedExtendConfig; pub use signed_limbs::{UIntLimbsLT, UIntLimbsLTConfig};
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/is_zero.rs
ceno_zkvm/src/gadgets/is_zero.rs
// The crate is zero gadget is modified from succinctlabs/sp1 under MIT license // The MIT License (MIT) // Copyright (c) 2023 Succinct Labs // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. //! An operation to check if the input is 0. //! //! This is guaranteed to return 1 if and only if the input is 0. //! //! The idea is that 1 - input * inverse is exactly the boolean value indicating whether the input //! is 0. use derive::AlignedBorrow; use ff_ext::{ExtensionField, SmallField}; use gkr_iop::{circuit_builder::CircuitBuilder, error::CircuitBuilderError}; use multilinear_extensions::{Expression, ToExpr, WitIn}; /// A set of columns needed to compute whether the given word is 0. #[derive(AlignedBorrow, Default, Debug, Clone, Copy)] #[repr(C)] pub struct IsZeroOperation<T> { /// The inverse of the input. pub inverse: T, /// Result indicating whether the input is 0. This equals `inverse * input == 0`. pub result: T, } impl IsZeroOperation<WitIn> { pub fn create<E: ExtensionField>(cb: &mut CircuitBuilder<E>) -> Self { Self { inverse: cb.create_witin(|| "IsZeroOperation::inverse"), result: cb.create_bit(|| "IsZeroOperation::result").unwrap(), } } pub fn eval<E: ExtensionField>( &self, builder: &mut CircuitBuilder<E>, a: Expression<E>, ) -> Result<(), CircuitBuilderError> { let one: Expression<E> = Expression::ONE; // 1. Input == 0 => is_zero = 1 regardless of the inverse. // 2. Input != 0 // 2.1. inverse is correctly set => is_zero = 0. // 2.2. inverse is incorrect // 2.2.1 inverse is nonzero => is_zero isn't bool, it fails. // 2.2.2 inverse is 0 => is_zero is 1. But then we would assert that a = 0. And that // assert fails. // If the input is 0, then any product involving it is 0. If it is nonzero and its inverse // is correctly set, then the product is 1. let is_zero = one - self.inverse.expr() * a.expr(); builder.require_equal( || "IsZeroOperation: is_zero == self.result", is_zero, self.result.expr(), )?; // If the result is 1, then the input is 0. builder.require_zero( || "IsZeroOperation: result * input == 0", self.result.expr() * a, ) } } impl<F: SmallField> IsZeroOperation<F> { pub fn populate(&mut self, a: u32) -> u32 { self.populate_from_field_element(F::from_canonical_u32(a)) } pub fn populate_from_field_element(&mut self, a: F) -> u32 { if a == F::ZERO { self.inverse = F::ZERO; self.result = F::ONE; } else { self.inverse = a.inverse(); self.result = F::ZERO; } let prod = self.inverse * a; debug_assert!(prod == F::ONE || prod == F::ZERO); debug_assert_eq!(self.result, F::ONE - self.inverse * a); (a == F::ZERO) as u32 } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/signed.rs
ceno_zkvm/src/gadgets/signed.rs
use std::fmt::Display; use ff_ext::ExtensionField; use crate::{ Value, circuit_builder::CircuitBuilder, error::ZKVMError, instructions::riscv::constants::UInt, witness::LkMultiplicity, }; use multilinear_extensions::Expression; use super::SignedExtendConfig; /// Interprets a `UInt` value as a 2s-complement signed value. /// /// Uses 1 `WitIn` to represent the most sigificant bit of the value. pub struct Signed<E: ExtensionField> { pub is_negative: SignedExtendConfig<E>, val: Expression<E>, } impl<E: ExtensionField> Signed<E> { pub fn construct_circuit<NR: Into<String> + Display + Clone, N: FnOnce() -> NR>( cb: &mut CircuitBuilder<E>, name_fn: N, unsigned_val: &UInt<E>, ) -> Result<Self, ZKVMError> { Ok(cb.namespace(name_fn, |cb| { let is_negative = unsigned_val.is_negative(cb)?; let val = unsigned_val.to_field_expr(is_negative.expr()); Ok(Self { is_negative, val }) })?) } pub fn assign_instance( &self, instance: &mut [E::BaseField], lkm: &mut LkMultiplicity, val: &Value<u32>, ) -> Result<i32, ZKVMError> { self.is_negative.assign_instance( instance, lkm, *val.as_u16_limbs().last().unwrap() as u64, )?; Ok(i32::from(val)) } pub fn expr(&self) -> Expression<E> { self.val.clone() } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/signed_limbs.rs
ceno_zkvm/src/gadgets/signed_limbs.rs
/// circuit implementation refer from https://github.com/openvm-org/openvm/blob/ca36de3803213da664b03d111801ab903d55e360/extensions/rv32im/circuit/src/branch_lt/core.rs use crate::{ circuit_builder::CircuitBuilder, error::ZKVMError, instructions::riscv::constants::{LIMB_BITS, UINT_LIMBS, UInt}, }; use ff_ext::{ExtensionField, FieldInto, SmallField}; use gkr_iop::error::CircuitBuilderError; use multilinear_extensions::{Expression, ToExpr, WitIn}; use p3::field::FieldAlgebra; use std::{array, marker::PhantomData}; use witness::set_val; /// gadget for comparing two `UInt` values, supporting both signed and unsigned modes. /// /// this configuration structure allows flexible comparison logic depending on whether /// the operands should be interpreted as signed or unsigned integers. #[derive(Debug)] pub struct UIntLimbsLTConfig<E: ExtensionField> { // Most significant limb of a and b respectively as a field element, will be range // checked to be within [-32768, 32767) if signed and [0, 65536) if unsigned. pub a_msb_f: WitIn, pub b_msb_f: WitIn, // 1 at the most significant index i such that a[i] != b[i], otherwise 0. If such // an i exists, diff_val = a[i] - b[i]. pub diff_marker: [WitIn; UINT_LIMBS], pub diff_val: WitIn, // 1 if a < b, 0 otherwise. pub cmp_lt: WitIn, phantom: PhantomData<E>, } impl<E: ExtensionField> UIntLimbsLTConfig<E> { pub fn is_lt(&self) -> Expression<E> { self.cmp_lt.expr() } } pub struct UIntLimbsLT<E: ExtensionField> { phantom: PhantomData<E>, } impl<E: ExtensionField> UIntLimbsLT<E> { pub fn construct_circuit( circuit_builder: &mut CircuitBuilder<E>, a: &UInt<E>, b: &UInt<E>, is_sign_comparison: bool, ) -> Result<UIntLimbsLTConfig<E>, ZKVMError> { // 1 if a < b, 0 otherwise. let cmp_lt = circuit_builder.create_bit(|| "cmp_lt")?; let a_expr = a.expr(); let b_expr = b.expr(); let a_msb_f = circuit_builder.create_witin(|| "a_msb_f"); let b_msb_f = circuit_builder.create_witin(|| "b_msb_f"); let diff_marker: [WitIn; UINT_LIMBS] = array::from_fn(|i| { circuit_builder .create_bit(|| format!("diff_maker_{i}")) .expect("create_bit_error") }); let diff_val = circuit_builder.create_witin(|| "diff_val"); // Check if a_msb_f and b_msb_f are signed values of a[NUM_LIMBS - 1] and b[NUM_LIMBS - 1] in prime field F. let a_diff = a_expr[UINT_LIMBS - 1].expr() - a_msb_f.expr(); let b_diff = b_expr[UINT_LIMBS - 1].expr() - b_msb_f.expr(); circuit_builder.require_zero( || "a_diff", a_diff.expr() * (E::BaseField::from_canonical_u32(1 << LIMB_BITS).expr() - a_diff.expr()), )?; circuit_builder.require_zero( || "b_diff", b_diff.expr() * (E::BaseField::from_canonical_u32(1 << LIMB_BITS).expr() - b_diff.expr()), )?; let mut prefix_sum = Expression::ZERO; for i in (0..UINT_LIMBS).rev() { let diff = (if i == UINT_LIMBS - 1 { b_msb_f.expr() - a_msb_f.expr() } else { b_expr[i].expr() - a_expr[i].expr() }) * (E::BaseField::from_canonical_u8(2).expr() * cmp_lt.expr() - E::BaseField::ONE.expr()); prefix_sum += diff_marker[i].expr(); circuit_builder.require_zero( || format!("prefix_diff_zero_{i}"), (E::BaseField::ONE.expr() - prefix_sum.expr()) * diff.clone(), )?; circuit_builder.condition_require_zero( || format!("diff_maker_conditional_equal_{i}"), diff_marker[i].expr(), diff_val.expr() - diff.expr(), )?; } // - If x != y, then prefix_sum = 1 so marker[i] must be 1 iff i is the first index where diff != 0. // Constrains that diff == diff_val where diff_val is non-zero. // - If x == y, then prefix_sum = 0 and cmp_lt = 0. // Here, prefix_sum cannot be 1 because all diff are zero, making diff == diff_val fails. circuit_builder.assert_bit(|| "prefix_sum_bit", prefix_sum.expr())?; circuit_builder.condition_require_zero( || "cmp_lt_conditional_zero", E::BaseField::ONE.expr() - prefix_sum.expr(), cmp_lt.expr(), )?; // Range check to ensure diff_val is non-zero. circuit_builder.assert_ux::<_, _, LIMB_BITS>( || "diff_val is non-zero", prefix_sum.expr() * (diff_val.expr() - E::BaseField::ONE.expr()), )?; circuit_builder.assert_ux::<_, _, LIMB_BITS>( || "a_msb_f_signed_range_check", a_msb_f.expr() + if is_sign_comparison { E::BaseField::from_canonical_u32(1 << (LIMB_BITS - 1)).expr() } else { Expression::ZERO }, )?; circuit_builder.assert_ux::<_, _, LIMB_BITS>( || "b_msb_f_signed_range_check", b_msb_f.expr() + if is_sign_comparison { E::BaseField::from_canonical_u32(1 << (LIMB_BITS - 1)).expr() } else { Expression::ZERO }, )?; Ok(UIntLimbsLTConfig { a_msb_f, b_msb_f, diff_marker, diff_val, cmp_lt, phantom: PhantomData, }) } pub fn assign( config: &UIntLimbsLTConfig<E>, instance: &mut [E::BaseField], lkm: &mut gkr_iop::utils::lk_multiplicity::LkMultiplicity, a: &[u16], b: &[u16], is_sign_comparison: bool, ) -> Result<(), CircuitBuilderError> { let (cmp_lt, diff_idx, is_a_neg, is_b_neg) = run_cmp(is_sign_comparison, a, b); config .diff_marker .iter() .enumerate() .for_each(|(i, witin)| { set_val!(instance, witin, (i == diff_idx) as u64); }); set_val!(instance, config.cmp_lt, cmp_lt as u64); // We range check (read_rs1_msb_f + 2^(LIMB_BITS - 1)) and (read_rs2_msb_f + 2^(LIMB_BITS - 1)) if negative, // otherwise read_rs1_msb_f and read_rs2_msb_f let (a_msb_f, a_msb_range) = if is_a_neg { ( -E::BaseField::from_canonical_u32((1 << LIMB_BITS) - a[UINT_LIMBS - 1] as u32), a[UINT_LIMBS - 1] - (1 << (LIMB_BITS - 1)), ) } else { ( E::BaseField::from_canonical_u16(a[UINT_LIMBS - 1]), a[UINT_LIMBS - 1] + ((is_sign_comparison as u16) << (LIMB_BITS - 1)), ) }; let (b_msb_f, b_msb_range) = if is_b_neg { ( -E::BaseField::from_canonical_u32((1 << LIMB_BITS) - b[UINT_LIMBS - 1] as u32), b[UINT_LIMBS - 1] - (1 << (LIMB_BITS - 1)), ) } else { ( E::BaseField::from_canonical_u16(b[UINT_LIMBS - 1]), b[UINT_LIMBS - 1] + ((is_sign_comparison as u16) << (LIMB_BITS - 1)), ) }; set_val!(instance, config.a_msb_f, a_msb_f); set_val!(instance, config.b_msb_f, b_msb_f); let diff_val = if diff_idx == UINT_LIMBS { 0 } else if diff_idx == (UINT_LIMBS - 1) { if cmp_lt { b_msb_f - a_msb_f } else { a_msb_f - b_msb_f } .to_canonical_u64() as u16 } else if cmp_lt { b[diff_idx] - a[diff_idx] } else { a[diff_idx] - b[diff_idx] }; set_val!(instance, config.diff_val, diff_val as u64); if diff_idx != UINT_LIMBS { lkm.assert_ux::<LIMB_BITS>((diff_val - 1) as u64); } else { lkm.assert_ux::<LIMB_BITS>(0); } lkm.assert_ux::<LIMB_BITS>(a_msb_range as u64); lkm.assert_ux::<LIMB_BITS>(b_msb_range as u64); Ok(()) } } // returns (cmp_lt, diff_idx, is_x_neg, is_y_neg) // cmp_lt = true if a < b else false pub fn run_cmp(signed: bool, x: &[u16], y: &[u16]) -> (bool, usize, bool, bool) { let is_x_neg = (x[UINT_LIMBS - 1] >> (LIMB_BITS - 1) == 1) && signed; let is_y_neg = (y[UINT_LIMBS - 1] >> (LIMB_BITS - 1) == 1) && signed; for i in (0..UINT_LIMBS).rev() { if x[i] != y[i] { return ((x[i] < y[i]) ^ is_x_neg ^ is_y_neg, i, is_x_neg, is_y_neg); } } (false, UINT_LIMBS, is_x_neg, is_y_neg) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/poseidon2.rs
ceno_zkvm/src/gadgets/poseidon2.rs
// Poseidon2 over BabyBear field use std::{ borrow::{Borrow, BorrowMut}, iter::from_fn, mem::transmute, }; use ff_ext::{BabyBearExt4, ExtensionField}; use gkr_iop::error::CircuitBuilderError; use itertools::Itertools; use multilinear_extensions::{Expression, ToExpr, WitIn}; use num_bigint::BigUint; use p3::{ babybear::BabyBearInternalLayerParameters, field::{Field, FieldAlgebra, PrimeField}, monty_31::InternalLayerBaseParameters, poseidon2::{GenericPoseidon2LinearLayers, MDSMat4, mds_light_permutation}, poseidon2_air::{FullRound, PartialRound, Poseidon2Cols, SBox, num_cols}, }; use crate::circuit_builder::CircuitBuilder; // copied from poseidon2-air/src/constants.rs // as the original one cannot be accessed here #[derive(Debug, Clone)] pub struct RoundConstants< F: Field, const WIDTH: usize, const HALF_FULL_ROUNDS: usize, const PARTIAL_ROUNDS: usize, > { pub beginning_full_round_constants: [[F; WIDTH]; HALF_FULL_ROUNDS], pub partial_round_constants: [F; PARTIAL_ROUNDS], pub ending_full_round_constants: [[F; WIDTH]; HALF_FULL_ROUNDS], } impl<F: Field, const WIDTH: usize, const HALF_FULL_ROUNDS: usize, const PARTIAL_ROUNDS: usize> From<Vec<F>> for RoundConstants<F, WIDTH, HALF_FULL_ROUNDS, PARTIAL_ROUNDS> { fn from(value: Vec<F>) -> Self { let mut iter = value.into_iter(); let mut beginning_full_round_constants = [[F::ZERO; WIDTH]; HALF_FULL_ROUNDS]; beginning_full_round_constants.iter_mut().for_each(|arr| { arr.iter_mut() .for_each(|c| *c = iter.next().expect("insufficient round constants")) }); let mut partial_round_constants = [F::ZERO; PARTIAL_ROUNDS]; partial_round_constants .iter_mut() .for_each(|arr| *arr = iter.next().expect("insufficient round constants")); let mut ending_full_round_constants = [[F::ZERO; WIDTH]; HALF_FULL_ROUNDS]; ending_full_round_constants.iter_mut().for_each(|arr| { arr.iter_mut() .for_each(|c| *c = iter.next().expect("insufficient round constants")) }); assert!(iter.next().is_none(), "round constants are too many"); RoundConstants { beginning_full_round_constants, partial_round_constants, ending_full_round_constants, } } } pub type Poseidon2BabyBearConfig = Poseidon2Config<BabyBearExt4, 16, 7, 1, 4, 13>; pub struct Poseidon2Config< E: ExtensionField, const STATE_WIDTH: usize, const SBOX_DEGREE: u64, const SBOX_REGISTERS: usize, const HALF_FULL_ROUNDS: usize, const PARTIAL_ROUNDS: usize, > { p3_cols: Vec<WitIn>, // columns in the plonky3-air post_linear_layer_cols: Vec<WitIn>, /* additional columns to hold the state after linear layers */ constants: RoundConstants<E::BaseField, STATE_WIDTH, HALF_FULL_ROUNDS, PARTIAL_ROUNDS>, } #[derive(Debug, Clone)] pub struct Poseidon2LinearLayers; impl<F: Field, const WIDTH: usize> GenericPoseidon2LinearLayers<F, WIDTH> for Poseidon2LinearLayers { fn internal_linear_layer(state: &mut [F; WIDTH]) { // this only works when F is BabyBear field for now let babybear_prime = BigUint::from(0x7800_0001u32); if F::order() == babybear_prime { let diag_m1_matrix = &<BabyBearInternalLayerParameters as InternalLayerBaseParameters< _, 16, >>::INTERNAL_DIAG_MONTY; let diag_m1_matrix: &[F; WIDTH] = unsafe { transmute(diag_m1_matrix) }; let sum = state.iter().cloned().sum::<F>(); for (input, diag_m1) in state.iter_mut().zip(diag_m1_matrix) { *input = sum + F::from_f(*diag_m1) * *input; } } else { panic!("Unsupported field"); } } fn external_linear_layer(state: &mut [F; WIDTH]) { mds_light_permutation(state, &MDSMat4); } } impl< E: ExtensionField, const STATE_WIDTH: usize, const SBOX_DEGREE: u64, const SBOX_REGISTERS: usize, const HALF_FULL_ROUNDS: usize, const PARTIAL_ROUNDS: usize, > Poseidon2Config<E, STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS, HALF_FULL_ROUNDS, PARTIAL_ROUNDS> { // constraints taken from poseidon2_air/src/air.rs fn eval_sbox( sbox: &SBox<Expression<E>, SBOX_DEGREE, SBOX_REGISTERS>, x: &mut Expression<E>, cb: &mut CircuitBuilder<E>, ) -> Result<(), CircuitBuilderError> { *x = match (SBOX_DEGREE, SBOX_REGISTERS) { (3, 0) => x.cube(), (5, 0) => x.exp_const_u64::<5>(), (7, 0) => x.exp_const_u64::<7>(), (5, 1) => { let committed_x3: Expression<E> = sbox.0[0].clone(); let x2: Expression<E> = x.square(); cb.require_zero( || "x3 = x.cube()", committed_x3.clone() - x2.clone() * x.clone(), )?; committed_x3 * x2 } (7, 1) => { let committed_x3: Expression<E> = sbox.0[0].clone(); cb.require_zero(|| "x3 = x.cube()", committed_x3.clone() - x.cube())?; committed_x3.square() * x.clone() } _ => panic!( "Unexpected (SBOX_DEGREE, SBOX_REGISTERS) of ({}, {})", SBOX_DEGREE, SBOX_REGISTERS ), }; Ok(()) } fn eval_full_round( state: &mut [Expression<E>; STATE_WIDTH], full_round: &FullRound<Expression<E>, STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS>, round_constants: &[E::BaseField], cb: &mut CircuitBuilder<E>, ) -> Result<(), CircuitBuilderError> { for (i, (s, r)) in state.iter_mut().zip_eq(round_constants.iter()).enumerate() { *s = s.clone() + r.expr(); Self::eval_sbox(&full_round.sbox[i], s, cb)?; } Self::external_linear_layer(state); for (state_i, post_i) in state.iter_mut().zip_eq(full_round.post.iter()) { cb.require_equal(|| "post_i = state_i", state_i.clone(), post_i.clone())?; *state_i = post_i.clone(); } Ok(()) } fn eval_partial_round( state: &mut [Expression<E>; STATE_WIDTH], post_linear_layer: &WitIn, partial_round: &PartialRound<Expression<E>, STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS>, round_constant: &E::BaseField, cb: &mut CircuitBuilder<E>, ) -> Result<(), CircuitBuilderError> { cb.require_equal( || "post_linear_layer[0] = state[0]", post_linear_layer.expr(), state[0].clone() + round_constant.expr(), )?; state[0] = post_linear_layer.expr(); Self::eval_sbox(&partial_round.sbox, &mut state[0], cb)?; cb.require_zero( || "state[0] = post_sbox", state[0].clone() - partial_round.post_sbox.clone(), )?; state[0] = partial_round.post_sbox.clone(); Self::internal_linear_layer(state); Ok(()) } fn external_linear_layer(state: &mut [Expression<E>; STATE_WIDTH]) { mds_light_permutation(state, &MDSMat4); } fn internal_linear_layer(state: &mut [Expression<E>; STATE_WIDTH]) { let sum: Expression<E> = state.iter().map(|s| s.get_monomial_form()).sum(); // reduce to monomial form let sum = sum.get_monomial_form(); let babybear_prime = BigUint::from(0x7800_0001u32); if E::BaseField::order() == babybear_prime { // BabyBear let diag_m1_matrix_bb = &<BabyBearInternalLayerParameters as InternalLayerBaseParameters<_, 16>>:: INTERNAL_DIAG_MONTY; let diag_m1_matrix: &[E::BaseField; STATE_WIDTH] = unsafe { transmute(diag_m1_matrix_bb) }; for (input, diag_m1) in state.iter_mut().zip_eq(diag_m1_matrix) { let updated = sum.clone() + Expression::from_f(*diag_m1) * input.clone(); // reduce to monomial form *input = updated.get_monomial_form(); } } else { panic!("Unsupported field"); } } pub fn construct( cb: &mut CircuitBuilder<E>, round_constants: RoundConstants< E::BaseField, STATE_WIDTH, HALF_FULL_ROUNDS, PARTIAL_ROUNDS, >, ) -> Self { let num_p3_cols = num_cols::<STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS, HALF_FULL_ROUNDS, PARTIAL_ROUNDS>( ); let p3_cols = from_fn(|| Some(cb.create_witin(|| "poseidon2 col"))) .take(num_p3_cols) .collect::<Vec<_>>(); let mut col_exprs = p3_cols .iter() .map(|c| c.expr()) .collect::<Vec<Expression<E>>>(); // allocate columns to cache the state after each linear layer // 1. before 0th full round let mut post_linear_layer_cols = (0..STATE_WIDTH) .map(|j| { cb.create_witin(|| format!("[before 0th full round] post linear layer col[{j}]")) }) .collect::<Vec<WitIn>>(); // 2. before each partial round for i in 0..PARTIAL_ROUNDS { post_linear_layer_cols.push(cb.create_witin(|| { format!("[round {}] post linear layer col", i + HALF_FULL_ROUNDS) })); } // 3. before HALF_FULL_ROUNDS-th full round post_linear_layer_cols.extend((0..STATE_WIDTH).map(|j| { cb.create_witin(|| { format!( "[before {}th full round] post linear layer col[{j}]", HALF_FULL_ROUNDS ) }) })); let poseidon2_cols: &mut Poseidon2Cols< Expression<E>, STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS, HALF_FULL_ROUNDS, PARTIAL_ROUNDS, > = col_exprs.as_mut_slice().borrow_mut(); // external linear layer Self::external_linear_layer(&mut poseidon2_cols.inputs); // after linear layer, each state_i has ~STATE_WIDTH terms // therefore, we want to reduce that to one as the number of terms // after sbox(state_i + rc_i) = (state_i + rc_i)^d will explode poseidon2_cols .inputs .iter_mut() .zip_eq(post_linear_layer_cols[0..STATE_WIDTH].iter()) .for_each(|(input, post_linear)| { cb.require_equal( || "post_linear_layer = input", post_linear.expr(), input.clone(), ) .unwrap(); *input = post_linear.expr(); }); // eval full round for round in 0..HALF_FULL_ROUNDS { Self::eval_full_round( &mut poseidon2_cols.inputs, &poseidon2_cols.beginning_full_rounds[round], &round_constants.beginning_full_round_constants[round], cb, ) .unwrap(); } // eval partial round for round in 0..PARTIAL_ROUNDS { Self::eval_partial_round( &mut poseidon2_cols.inputs, &post_linear_layer_cols[STATE_WIDTH + round], &poseidon2_cols.partial_rounds[round], &round_constants.partial_round_constants[round], cb, ) .unwrap(); } poseidon2_cols .inputs .iter_mut() .zip_eq(post_linear_layer_cols[STATE_WIDTH + PARTIAL_ROUNDS..].iter()) .for_each(|(input, post_linear)| { cb.require_equal( || "post_linear_layer = input", post_linear.expr(), input.clone(), ) .unwrap(); *input = post_linear.expr(); }); // eval full round for round in 0..HALF_FULL_ROUNDS { Self::eval_full_round( &mut poseidon2_cols.inputs, &poseidon2_cols.ending_full_rounds[round], &round_constants.ending_full_round_constants[round], cb, ) .unwrap(); } Poseidon2Config { p3_cols, post_linear_layer_cols, constants: round_constants, } } pub fn inputs(&self) -> Vec<Expression<E>> { let col_exprs = self.p3_cols.iter().map(|c| c.expr()).collect::<Vec<_>>(); let poseidon2_cols: &Poseidon2Cols< Expression<E>, STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS, HALF_FULL_ROUNDS, PARTIAL_ROUNDS, > = col_exprs.as_slice().borrow(); poseidon2_cols.inputs.to_vec() } pub fn output(&self) -> Vec<Expression<E>> { let col_exprs = self.p3_cols.iter().map(|c| c.expr()).collect::<Vec<_>>(); let poseidon2_cols: &Poseidon2Cols< Expression<E>, STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS, HALF_FULL_ROUNDS, PARTIAL_ROUNDS, > = col_exprs.as_slice().borrow(); poseidon2_cols .ending_full_rounds .last() .map(|r| r.post.to_vec()) .unwrap() } fn num_p3_cols(&self) -> usize { self.p3_cols.len() } pub fn num_cols(&self) -> usize { self.p3_cols.len() + self.post_linear_layer_cols.len() } pub fn assign_instance( &self, instance: &mut [E::BaseField], state: [E::BaseField; STATE_WIDTH], ) { let (p3_cols, post_linear_layer_cols) = instance.split_at_mut(self.num_p3_cols()); let poseidon2_cols: &mut Poseidon2Cols< E::BaseField, STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS, HALF_FULL_ROUNDS, PARTIAL_ROUNDS, > = p3_cols.borrow_mut(); generate_trace_rows_for_perm::< E::BaseField, Poseidon2LinearLayers, STATE_WIDTH, SBOX_DEGREE, SBOX_REGISTERS, HALF_FULL_ROUNDS, PARTIAL_ROUNDS, >( poseidon2_cols, post_linear_layer_cols, state, &self.constants, ); } } ////////////////////////////////////////////////////////////////////////// /// The following routines are taken from poseidon2-air/src/generation.rs ////////////////////////////////////////////////////////////////////////// fn generate_trace_rows_for_perm< F: PrimeField, LinearLayers: GenericPoseidon2LinearLayers<F, WIDTH>, const WIDTH: usize, const SBOX_DEGREE: u64, const SBOX_REGISTERS: usize, const HALF_FULL_ROUNDS: usize, const PARTIAL_ROUNDS: usize, >( perm: &mut Poseidon2Cols< F, WIDTH, SBOX_DEGREE, SBOX_REGISTERS, HALF_FULL_ROUNDS, PARTIAL_ROUNDS, >, post_linear_layers: &mut [F], mut state: [F; WIDTH], constants: &RoundConstants<F, WIDTH, HALF_FULL_ROUNDS, PARTIAL_ROUNDS>, ) { perm.export = F::ONE; perm.inputs .iter_mut() .zip(state.iter()) .for_each(|(input, &x)| { *input = x; }); LinearLayers::external_linear_layer(&mut state); // 1. before 0th full round // post_linear_layer[i] = state[i] post_linear_layers[0..WIDTH] .iter_mut() .zip(state.iter()) .for_each(|(post, &x)| { *post = x; }); for (full_round, constants) in perm .beginning_full_rounds .iter_mut() .zip(&constants.beginning_full_round_constants) { generate_full_round::<F, LinearLayers, WIDTH, SBOX_DEGREE, SBOX_REGISTERS>( &mut state, full_round, constants, ); } for (i, (partial_round, constant)) in perm .partial_rounds .iter_mut() .zip(&constants.partial_round_constants) .enumerate() { generate_partial_round::<F, LinearLayers, WIDTH, SBOX_DEGREE, SBOX_REGISTERS>( &mut state, &mut post_linear_layers[WIDTH + i], partial_round, *constant, ); } // 3. before HALF_FULL_ROUNDS-th full round // post_linear_layer[i] = state[i] post_linear_layers[WIDTH + PARTIAL_ROUNDS..] .iter_mut() .zip(state.iter()) .for_each(|(post, &x)| { *post = x; }); for (full_round, constants) in perm .ending_full_rounds .iter_mut() .zip(&constants.ending_full_round_constants) { generate_full_round::<F, LinearLayers, WIDTH, SBOX_DEGREE, SBOX_REGISTERS>( &mut state, full_round, constants, ); } } #[inline] fn generate_full_round< F: PrimeField, LinearLayers: GenericPoseidon2LinearLayers<F, WIDTH>, const WIDTH: usize, const SBOX_DEGREE: u64, const SBOX_REGISTERS: usize, >( state: &mut [F; WIDTH], full_round: &mut FullRound<F, WIDTH, SBOX_DEGREE, SBOX_REGISTERS>, round_constants: &[F; WIDTH], ) { for (state_i, const_i) in state.iter_mut().zip(round_constants) { *state_i += *const_i; } for (state_i, sbox_i) in state.iter_mut().zip(full_round.sbox.iter_mut()) { generate_sbox(sbox_i, state_i); } LinearLayers::external_linear_layer(state); full_round .post .iter_mut() .zip(*state) .for_each(|(post, x)| { *post = x; }); } #[inline] fn generate_partial_round< F: PrimeField, LinearLayers: GenericPoseidon2LinearLayers<F, WIDTH>, const WIDTH: usize, const SBOX_DEGREE: u64, const SBOX_REGISTERS: usize, >( state: &mut [F; WIDTH], post_linear_layer: &mut F, partial_round: &mut PartialRound<F, WIDTH, SBOX_DEGREE, SBOX_REGISTERS>, round_constant: F, ) { state[0] += round_constant; *post_linear_layer = state[0]; generate_sbox(&mut partial_round.sbox, &mut state[0]); partial_round.post_sbox = state[0]; LinearLayers::internal_linear_layer(state); } #[inline] fn generate_sbox<F: PrimeField, const DEGREE: u64, const REGISTERS: usize>( sbox: &mut SBox<F, DEGREE, REGISTERS>, x: &mut F, ) { *x = match (DEGREE, REGISTERS) { (3, 0) => x.cube(), (5, 0) => x.exp_const_u64::<5>(), (7, 0) => x.exp_const_u64::<7>(), (5, 1) => { let x2 = x.square(); let x3 = x2 * *x; sbox.0[0] = x3; x3 * x2 } (7, 1) => { let x3 = x.cube(); sbox.0[0] = x3; x3 * x3 * *x } (11, 2) => { let x2 = x.square(); let x3 = x2 * *x; let x9 = x3.cube(); sbox.0[0] = x3; sbox.0[1] = x9; x9 * x2 } _ => panic!( "Unexpected (DEGREE, REGISTERS) of ({}, {})", DEGREE, REGISTERS ), } } #[cfg(test)] mod tests { use crate::gadgets::poseidon2::Poseidon2BabyBearConfig; use ff_ext::{BabyBearExt4, PoseidonField}; use gkr_iop::circuit_builder::{CircuitBuilder, ConstraintSystem}; use p3::babybear::BabyBear; type E = BabyBearExt4; type F = BabyBear; #[test] fn test_poseidon2_gadget() { let mut cs = ConstraintSystem::new(|| "poseidon2 gadget test"); let mut cb = CircuitBuilder::<E>::new(&mut cs); // let poseidon2_constants = horizen_round_consts(); let rc = <F as PoseidonField>::get_default_perm_rc().into(); let _ = Poseidon2BabyBearConfig::construct(&mut cb, rc); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/signed_ext.rs
ceno_zkvm/src/gadgets/signed_ext.rs
use crate::{ circuit_builder::CircuitBuilder, instructions::riscv::constants::{LIMB_BITS, UInt}, witness::LkMultiplicity, }; use ff_ext::{ExtensionField, FieldInto}; use gkr_iop::error::CircuitBuilderError; use multilinear_extensions::{Expression, ToExpr, WitIn}; use p3::field::FieldAlgebra; use std::marker::PhantomData; use witness::set_val; /// Extract the most significant bit from an expression previously constrained /// to an 8 or 16-bit length. /// /// Uses 1 `WitIn` value to store the bit, one `assert_bit` constraint, and one /// `u8` or `u16` table lookup. #[derive(Debug)] pub struct SignedExtendConfig<E> { /// Most significant bit msb: WitIn, /// Number of bits of the represented value n_bits: usize, _marker: PhantomData<E>, } impl<E: ExtensionField> SignedExtendConfig<E> { pub fn construct_limb( cb: &mut CircuitBuilder<E>, val: Expression<E>, ) -> Result<Self, CircuitBuilderError> { Self::construct_circuit(cb, LIMB_BITS, val) } pub fn construct_byte( cb: &mut CircuitBuilder<E>, val: Expression<E>, ) -> Result<Self, CircuitBuilderError> { Self::construct_circuit(cb, 8, val) } pub fn expr(&self) -> Expression<E> { self.msb.expr() } fn construct_circuit( cb: &mut CircuitBuilder<E>, n_bits: usize, val: Expression<E>, // it's assumed that val is within [0, 2^N_BITS) ) -> Result<Self, CircuitBuilderError> { assert!(n_bits == 8 || n_bits == 16); let msb = cb.create_witin(|| "msb"); // require msb is boolean cb.assert_bit(|| "msb is boolean", msb.expr())?; // assert 2*val - msb*2^N_BITS is within range [0, 2^N_BITS) // - if val < 2^(N_BITS-1), then 2*val < 2^N_BITS, msb can only be zero. // - otherwise, 2*val >= 2^N_BITS, then msb can only be one. cb.assert_const_range( || "0 <= 2*val - msb*2^N_BITS < 2^N_BITS", 2 * val - (msb.expr() << n_bits), n_bits, )?; Ok(SignedExtendConfig { msb, n_bits, _marker: PhantomData, }) } /// Get the signed extended value pub fn signed_extended_value(&self, val: Expression<E>) -> UInt<E> { assert_eq!(UInt::<E>::LIMB_BITS, 16); let limb0 = match self.n_bits { 8 => self.msb.expr() * 0xff00 + val, 16 => val, _ => unreachable!("unsupported N_BITS = {}", self.n_bits), }; UInt::from_exprs_unchecked(vec![limb0, self.msb.expr() * 0xffff]) } pub fn assign_instance( &self, instance: &mut [E::BaseField], lk_multiplicity: &mut LkMultiplicity, val: u64, ) -> Result<(), CircuitBuilderError> { let msb = val >> (self.n_bits - 1); lk_multiplicity.assert_const_range(2 * val - (msb << self.n_bits), self.n_bits); set_val!(instance, self.msb, E::BaseField::from_canonical_u64(msb)); Ok(()) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/field/field_op.rs
ceno_zkvm/src/gadgets/field/field_op.rs
// The struct `FieldOpCols` is modified from succinctlabs/sp1 under MIT license // The MIT License (MIT) // Copyright (c) 2023 Succinct Labs // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use derive::AlignedBorrow; use ff_ext::{ExtensionField, SmallField}; use generic_array::{GenericArray, sequence::GenericSequence, typenum::Unsigned}; use gkr_iop::{circuit_builder::CircuitBuilder, error::CircuitBuilderError}; use multilinear_extensions::{Expression, ToExpr, WitIn}; use num::BigUint; use sp1_curves::{ params::{FieldParameters, Limbs}, polynomial::Polynomial, }; use std::fmt::Debug; use crate::{ gadgets::{ field::FieldOperation, util::{compute_root_quotient_and_shift, split_u16_limbs_to_u8_limbs}, util_expr::eval_field_operation, }, witness::LkMultiplicity, }; /// A set of columns to compute an emulated modular arithmetic operation. /// /// *Safety* The input operands (a, b) (not included in the operation columns) are assumed to be /// elements within the range `[0, 2^{P::nb_bits()})`. the result is also assumed to be within the /// same range. Let `M = P:modulus()`. The constraints of the function [`FieldOpCols::eval`] assert /// that: /// * When `op` is `FieldOperation::Add`, then `result = a + b mod M`. /// * When `op` is `FieldOperation::Mul`, then `result = a * b mod M`. /// * When `op` is `FieldOperation::Sub`, then `result = a - b mod M`. /// * When `op` is `FieldOperation::Div`, then `result * b = a mod M`. /// /// **Warning**: The constraints do not check for division by zero. The caller is responsible for /// ensuring that the division operation is valid. #[derive(Debug, Clone, AlignedBorrow)] #[repr(C)] pub struct FieldOpCols<T, P: FieldParameters> { /// The result of `a op b`, where a, b are field elements pub result: Limbs<T, P::Limbs>, pub carry: Limbs<T, P::Limbs>, pub(crate) witness_low: Limbs<T, P::Witness>, pub(crate) witness_high: Limbs<T, P::Witness>, } impl<P: FieldParameters> FieldOpCols<WitIn, P> { pub fn create<E: ExtensionField, NR, N>(cb: &mut CircuitBuilder<E>, name_fn: N) -> Self where NR: Into<String>, N: FnOnce() -> NR, { let name: String = name_fn().into(); Self { result: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_result", name)) })), carry: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_carry", name)) })), witness_low: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_witness_low", name)) })), witness_high: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_witness_high", name)) })), } } } impl<F: SmallField, P: FieldParameters> FieldOpCols<F, P> { pub fn populate_carry_and_witness( &mut self, a: &BigUint, b: &BigUint, op: FieldOperation, modulus: &BigUint, ) -> BigUint { let p_a: Polynomial<F> = P::to_limbs_field::<F, _>(a).into(); let p_b: Polynomial<F> = P::to_limbs_field::<F, _>(b).into(); let (result, carry) = match op { FieldOperation::Add => ((a + b) % modulus, (a + b - (a + b) % modulus) / modulus), FieldOperation::Mul => ((a * b) % modulus, (a * b - (a * b) % modulus) / modulus), FieldOperation::Sub | FieldOperation::Div => unreachable!(), }; debug_assert!(&result < modulus); debug_assert!(&carry < modulus); match op { FieldOperation::Add => debug_assert_eq!(&carry * modulus, a + b - &result), FieldOperation::Mul => debug_assert_eq!(&carry * modulus, a * b - &result), FieldOperation::Sub | FieldOperation::Div => unreachable!(), } // Here we have special logic for p_modulus because to_limbs_field only works for numbers in // the field, but modulus can == the field modulus so it can have 1 extra limb (ex. // uint256). let p_modulus_limbs = modulus .to_bytes_le() .iter() .map(|x| F::from_canonical_u8(*x)) .collect::<Vec<F>>(); let p_modulus: Polynomial<F> = p_modulus_limbs.iter().into(); let p_result: Polynomial<F> = P::to_limbs_field::<F, _>(&result).into(); let p_carry: Polynomial<F> = P::to_limbs_field::<F, _>(&carry).into(); // Compute the vanishing polynomial. let p_op = match op { FieldOperation::Add => &p_a + &p_b, FieldOperation::Mul => &p_a * &p_b, FieldOperation::Sub | FieldOperation::Div => unreachable!(), }; let p_vanishing: Polynomial<F> = &p_op - &p_result - &p_carry * &p_modulus; let p_witness = compute_root_quotient_and_shift( &p_vanishing, P::WITNESS_OFFSET, P::NB_BITS_PER_LIMB as u32, P::NB_WITNESS_LIMBS, ); let (mut p_witness_low, mut p_witness_high) = split_u16_limbs_to_u8_limbs(&p_witness); self.result = p_result.into(); self.carry = p_carry.into(); p_witness_low.resize(P::Witness::USIZE, F::ZERO); p_witness_high.resize(P::Witness::USIZE, F::ZERO); self.witness_low = Limbs(p_witness_low.try_into().unwrap()); self.witness_high = Limbs(p_witness_high.try_into().unwrap()); result } /// Populate these columns with a specified modulus. This is useful in the `mulmod` precompile /// as an example. #[allow(clippy::too_many_arguments)] pub fn populate_with_modulus( &mut self, record: &mut LkMultiplicity, a: &BigUint, b: &BigUint, modulus: &BigUint, op: FieldOperation, ) -> BigUint { if b == &BigUint::ZERO && op == FieldOperation::Div { // Division by 0 is allowed only when dividing 0 so that padded rows can be all 0. assert_eq!( *a, BigUint::ZERO, "division by zero is allowed only when dividing zero" ); } let result = match op { // If doing the subtraction operation, a - b = result, equivalent to a = result + b. FieldOperation::Sub => { let result = (modulus.clone() + a - b) % modulus; // We populate the carry, witness_low, witness_high as if we were doing an addition // with result + b. But we populate `result` with the actual result // of the subtraction because those columns are expected to contain // the result by the user. Note that this reversal means we have to // flip result, a correspondingly in the `eval` function. self.populate_carry_and_witness(&result, b, FieldOperation::Add, modulus); self.result = P::to_limbs_field::<F, _>(&result); result } // a / b = result is equivalent to a = result * b. FieldOperation::Div => { // As modulus is prime, we can use Fermat's little theorem to compute the // inverse. cfg_if::cfg_if! { if #[cfg(feature = "bigint-rug")] { use sp1_curves::utils::{biguint_to_rug, rug_to_biguint}; let rug_a = biguint_to_rug(a); let rug_b = biguint_to_rug(b); let rug_modulus = biguint_to_rug(modulus); let rug_result = (rug_a * rug_b.pow_mod(&(rug_modulus.clone() - 2u32), &rug_modulus.clone()).unwrap()) % rug_modulus.clone(); let result = rug_to_biguint(&rug_result); } else { let result = (a * b.modpow(&(modulus.clone() - 2u32), &modulus.clone())) % modulus.clone(); } } // We populate the carry, witness_low, witness_high as if we were doing a // multiplication with result * b. But we populate `result` with the // actual result of the multiplication because those columns are // expected to contain the result by the user. Note that this // reversal means we have to flip result, a correspondingly in the `eval` // function. self.populate_carry_and_witness(&result, b, FieldOperation::Mul, modulus); self.result = P::to_limbs_field::<F, _>(&result); result } _ => self.populate_carry_and_witness(a, b, op, modulus), }; // Range checks record.assert_byte_fields(&self.result.0); record.assert_byte_fields(&self.carry.0); record.assert_byte_fields(&self.witness_low.0); record.assert_byte_fields(&self.witness_high.0); result } /// Populate these columns without a specified modulus (will use the modulus of the field /// parameters). pub fn populate( &mut self, record: &mut LkMultiplicity, a: &BigUint, b: &BigUint, op: FieldOperation, ) -> BigUint { self.populate_with_modulus(record, a, b, &P::modulus(), op) } } impl<Expr: Clone, P: FieldParameters> FieldOpCols<Expr, P> { /// Allows an evaluation over opetations specified by boolean flags. #[allow(clippy::too_many_arguments)] pub fn eval_variable<E>( &self, builder: &mut CircuitBuilder<E>, a: &(impl Into<Polynomial<Expression<E>>> + Clone), b: &(impl Into<Polynomial<Expression<E>>> + Clone), modulus: &(impl Into<Polynomial<Expression<E>>> + Clone), is_add: impl ToExpr<E, Output = Expression<E>> + Clone, is_sub: impl ToExpr<E, Output = Expression<E>> + Clone, is_mul: impl ToExpr<E, Output = Expression<E>> + Clone, is_div: impl ToExpr<E, Output = Expression<E>> + Clone, ) -> Result<(), CircuitBuilderError> where E: ExtensionField, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { let p_a_param: Polynomial<Expression<E>> = (a).clone().into(); let p_b: Polynomial<Expression<E>> = (b).clone().into(); let p_res_param: Polynomial<Expression<E>> = self.result.clone().into(); let is_add: Expression<E> = is_add.expr(); let is_sub: Expression<E> = is_sub.expr(); let is_mul: Expression<E> = is_mul.expr(); let is_div: Expression<E> = is_div.expr(); let p_result = p_res_param.clone() * (is_add.clone() + is_mul.clone()) + p_a_param.clone() * (is_sub.clone() + is_div.clone()); let p_add = p_a_param.clone() + p_b.clone(); let p_sub = p_res_param.clone() + p_b.clone(); let p_mul = p_a_param.clone() * p_b.clone(); let p_div = p_res_param * p_b.clone(); let p_op = p_add * is_add + p_sub * is_sub + p_mul * is_mul + p_div * is_div; self.eval_with_polynomials(builder, p_op, modulus.clone(), p_result) } #[allow(clippy::too_many_arguments)] pub fn build_mul_and_carry<E>( &self, builder: &mut CircuitBuilder<E>, a: &(impl Into<Polynomial<Expression<E>>> + Clone), b: &(impl Into<Polynomial<Expression<E>>> + Clone), c: &(impl Into<Polynomial<Expression<E>>> + Clone), modulus: &(impl Into<Polynomial<Expression<E>>> + Clone), ) -> Result<(), CircuitBuilderError> where E: ExtensionField, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { let p_a: Polynomial<Expression<E>> = (a).clone().into(); let p_b: Polynomial<Expression<E>> = (b).clone().into(); let p_c: Polynomial<Expression<E>> = (c).clone().into(); let p_result: Polynomial<_> = self.result.clone().into(); let p_op = p_a * p_b + p_c; self.eval_with_polynomials(builder, p_op, modulus.clone(), p_result) } #[allow(clippy::too_many_arguments)] pub fn eval_with_modulus<E>( &self, builder: &mut CircuitBuilder<E>, a: &(impl Into<Polynomial<Expression<E>>> + Clone), b: &(impl Into<Polynomial<Expression<E>>> + Clone), modulus: &(impl Into<Polynomial<Expression<E>>> + Clone), op: FieldOperation, ) -> Result<(), CircuitBuilderError> where E: ExtensionField, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { let p_a_param: Polynomial<Expression<E>> = (a).clone().into(); let p_b: Polynomial<Expression<E>> = (b).clone().into(); let (p_a, p_result): (Polynomial<_>, Polynomial<_>) = match op { FieldOperation::Add | FieldOperation::Mul => (p_a_param, self.result.clone().into()), FieldOperation::Sub | FieldOperation::Div => (self.result.clone().into(), p_a_param), }; let p_op: Polynomial<Expression<E>> = match op { FieldOperation::Add | FieldOperation::Sub => p_a + p_b, FieldOperation::Mul | FieldOperation::Div => p_a * p_b, }; self.eval_with_polynomials(builder, p_op, modulus.clone(), p_result) } #[allow(clippy::too_many_arguments)] pub fn eval_with_polynomials<E>( &self, builder: &mut CircuitBuilder<E>, op: impl Into<Polynomial<Expression<E>>>, modulus: impl Into<Polynomial<Expression<E>>>, result: impl Into<Polynomial<Expression<E>>>, ) -> Result<(), CircuitBuilderError> where E: ExtensionField, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { let p_op: Polynomial<Expression<E>> = op.into(); let p_result: Polynomial<Expression<E>> = result.into(); let p_modulus: Polynomial<Expression<E>> = modulus.into(); let p_carry: Polynomial<Expression<E>> = self.carry.clone().into(); let p_op_minus_result: Polynomial<Expression<E>> = p_op - &p_result; let p_vanishing = p_op_minus_result - &(&p_carry * &p_modulus); let p_witness_low = self.witness_low.0.iter().into(); let p_witness_high = self.witness_high.0.iter().into(); eval_field_operation::<E, P>(builder, &p_vanishing, &p_witness_low, &p_witness_high)?; // Range checks for the result, carry, and witness columns.assert_ux<const C: usize> builder.assert_bytes(|| "field_op result", &self.result.0)?; builder.assert_bytes(|| "field_op carry", &self.carry.0)?; builder.assert_bytes(|| "field_op p_witness_low", p_witness_low.coefficients())?; builder.assert_bytes(|| "field_op p_witness_high", p_witness_high.coefficients()) } #[allow(clippy::too_many_arguments)] pub fn eval<E>( &self, builder: &mut CircuitBuilder<E>, a: &(impl Into<Polynomial<Expression<E>>> + Clone), b: &(impl Into<Polynomial<Expression<E>>> + Clone), op: FieldOperation, ) -> Result<(), CircuitBuilderError> where E: ExtensionField, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { let p_limbs = Polynomial::from_iter(P::modulus_field_iter::<E::BaseField>().map(|x| x.expr())); self.eval_with_modulus(builder, a, b, &p_limbs, op) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/field/range.rs
ceno_zkvm/src/gadgets/field/range.rs
// The struct `FieldLtCols` is modified from succinctlabs/sp1 under MIT license // The MIT License (MIT) // Copyright (c) 2023 Succinct Labs // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use ff_ext::{ExtensionField, SmallField}; use generic_array::{GenericArray, sequence::GenericSequence}; use gkr_iop::{circuit_builder::CircuitBuilder, error::CircuitBuilderError}; use itertools::izip; use multilinear_extensions::{Expression, ToExpr, WitIn}; use num::BigUint; use sp1_curves::{ params::{FieldParameters, Limbs}, polynomial::Polynomial, }; use std::fmt::Debug; use crate::witness::LkMultiplicity; /// Operation columns for verifying that `lhs < rhs`. #[derive(Debug, Clone)] #[repr(C)] pub struct FieldLtCols<T, P: FieldParameters> { /// Boolean flags to indicate the first byte in which the element is smaller than the modulus. pub(crate) byte_flags: Limbs<T, P::Limbs>, pub(crate) lhs_comparison_byte: T, pub(crate) rhs_comparison_byte: T, } impl<P: FieldParameters> FieldLtCols<WitIn, P> { pub fn create<E: ExtensionField, NR, N>(cb: &mut CircuitBuilder<E>, name_fn: N) -> Self where NR: Into<String>, N: FnOnce() -> NR, { let name: String = name_fn().into(); Self { byte_flags: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_byte_flag", name)) })), lhs_comparison_byte: cb.create_witin(|| format!("{}_lhs_comparison_byte", name)), rhs_comparison_byte: cb.create_witin(|| format!("{}_rhs_comparison_byte", name)), } } } impl<F: SmallField, P: FieldParameters> FieldLtCols<F, P> { pub fn populate(&mut self, record: &mut LkMultiplicity, lhs: &BigUint, rhs: &BigUint) { assert!(lhs < rhs); let value_limbs = P::to_limbs(lhs); let modulus = P::to_limbs(rhs); let mut byte_flags = vec![0u8; P::NB_LIMBS]; for (byte, modulus_byte, flag) in izip!( value_limbs.iter().rev(), modulus.iter().rev(), byte_flags.iter_mut().rev() ) { assert!(byte <= modulus_byte); if byte < modulus_byte { *flag = 1; self.lhs_comparison_byte = F::from_canonical_u8(*byte); self.rhs_comparison_byte = F::from_canonical_u8(*modulus_byte); record.lookup_ltu_byte(*byte as u64, *modulus_byte as u64); break; } } for (byte, flag) in izip!(byte_flags.iter(), self.byte_flags.0.iter_mut()) { *flag = F::from_canonical_u8(*byte); } } } impl<Expr: Clone, P: FieldParameters> FieldLtCols<Expr, P> { pub fn eval<E, E1, E2>( &self, builder: &mut CircuitBuilder<E>, lhs: &E1, rhs: &E2, ) -> Result<(), CircuitBuilderError> where E: ExtensionField, E1: Into<Polynomial<Expression<E>>> + Clone, E2: Into<Polynomial<Expression<E>>> + Clone, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { // The byte flags give a specification of which byte is `first_eq`, i,e, the first most // significant byte for which the lhs is smaller than the modulus. To verify the // less-than claim we need to check that: // * For all bytes until `first_eq` the lhs byte is equal to the modulus byte. // * For the `first_eq` byte the lhs byte is smaller than the modulus byte. // * all byte flags are boolean. // * only one byte flag is set to one, and the rest are set to zero. // Check the flags are of valid form. // Verify that only one flag is set to one. let mut sum_flags: Expression<E> = 0.into(); for flag in self.byte_flags.0.iter() { // Assert that the flag is boolean. builder.assert_bit(|| "flag", flag.expr())?; // Add the flag to the sum. sum_flags = sum_flags.clone() + flag.expr(); } // Assert that the sum is equal to one. builder.require_one(|| "sum_flags", sum_flags)?; // Check the less-than condition. // A flag to indicate whether an equality check is necessary (this is for all bytes from // most significant until the first inequality. let mut is_inequality_visited: Expression<E> = 0.into(); let rhs: Polynomial<_> = rhs.clone().into(); let lhs: Polynomial<_> = lhs.clone().into(); let mut lhs_comparison_byte: Expression<E> = 0.into(); let mut rhs_comparison_byte: Expression<E> = 0.into(); for (lhs_byte, rhs_byte, flag) in izip!( lhs.coefficients().iter().rev(), rhs.coefficients().iter().rev(), self.byte_flags.0.iter().rev() ) { // Once the byte flag was set to one, we turn off the quality check flag. // We can do this by calculating the sum of the flags since only `1` is set to `1`. is_inequality_visited = is_inequality_visited.expr() + flag.expr(); lhs_comparison_byte = lhs_comparison_byte.expr() + lhs_byte.expr() * flag.expr(); rhs_comparison_byte = rhs_comparison_byte.expr() + flag.expr() * rhs_byte.expr(); builder.require_zero( || "when not inequality visited, assert lhs_byte == rhs_byte", (1 - is_inequality_visited.clone()) * (lhs_byte.clone() - rhs_byte.clone()), )?; } builder.require_equal( || "lhs_comparison_byte == lhs_comparison_byte", self.lhs_comparison_byte.expr(), lhs_comparison_byte, )?; builder.require_equal( || "rhs_comparison_byte == rhs_comparison_byte", self.rhs_comparison_byte.expr(), rhs_comparison_byte, )?; // Send the comparison interaction. builder.lookup_ltu_byte( self.lhs_comparison_byte.expr(), self.rhs_comparison_byte.expr(), 1.into(), ) } pub fn condition_eval<E, E1, E2>( &self, builder: &mut CircuitBuilder<E>, lhs: &E1, rhs: &E2, cond: Expression<E>, ) -> Result<(), CircuitBuilderError> where E: ExtensionField, E1: Into<Polynomial<Expression<E>>> + Clone, E2: Into<Polynomial<Expression<E>>> + Clone, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { // The byte flags give a specification of which byte is `first_eq`, i,e, the first most // significant byte for which the lhs is smaller than the modulus. To verify the // less-than claim we need to check that: // * For all bytes until `first_eq` the lhs byte is equal to the modulus byte. // * For the `first_eq` byte the lhs byte is smaller than the modulus byte. // * all byte flags are boolean. // * only one byte flag is set to one, and the rest are set to zero. // Check the flags are of valid form. // Verify that only one flag is set to one. let mut sum_flags: Expression<E> = 0.into(); for flag in self.byte_flags.0.iter() { // Assert that the flag is boolean. // It should be builder.assert_bit(|| "if cond, flag", cond.expr() * flag.expr())?; // But this makes the degree of sumcheck to be 5 (which is not supported by the backend), // therefore, we just assume byte flags are boolean no matter cond is 1 or 0. builder.assert_bit(|| "flag", flag.expr())?; // Add the flag to the sum. sum_flags = sum_flags.clone() + flag.expr(); } // Assert that the sum is equal to one. builder.condition_require_one(|| "sum_flags", cond.expr(), sum_flags.expr())?; builder.condition_require_zero(|| "sum_flags", 1 - cond.expr(), sum_flags.expr())?; // Check the less-than condition. // A flag to indicate whether an equality check is necessary (this is for all bytes from // most significant until the first inequality. let mut is_inequality_visited: Expression<E> = 0.into(); let rhs: Polynomial<_> = rhs.clone().into(); let lhs: Polynomial<_> = lhs.clone().into(); let mut lhs_comparison_byte: Expression<E> = 0.into(); let mut rhs_comparison_byte: Expression<E> = 0.into(); for (lhs_byte, rhs_byte, flag) in izip!( lhs.coefficients().iter().rev(), rhs.coefficients().iter().rev(), self.byte_flags.0.iter().rev() ) { // Once the byte flag was set to one, we turn off the quality check flag. // We can do this by calculating the sum of the flags since only `1` is set to `1`. is_inequality_visited = is_inequality_visited.expr() + flag.expr(); lhs_comparison_byte = lhs_comparison_byte.expr() + lhs_byte.expr() * flag.expr(); rhs_comparison_byte = rhs_comparison_byte.expr() + flag.expr() * rhs_byte.expr(); builder.condition_require_zero( || "if cond, when not inequality visited, assert lhs_byte == rhs_byte", cond.expr(), (1 - is_inequality_visited.clone()) * (lhs_byte.clone() - rhs_byte.clone()), )?; } builder.condition_require_zero( || "if cond, lhs_comparison_byte == lhs_comparison_byte", cond.expr(), self.lhs_comparison_byte.expr() - lhs_comparison_byte, )?; builder.condition_require_zero( || "if cond, rhs_comparison_byte == rhs_comparison_byte", cond.expr(), self.rhs_comparison_byte.expr() - rhs_comparison_byte, )?; // Send the comparison interaction. // Since sum_flags = 1 when cond = 1, and sum_flags = 0 when cond = 0, // we have (self.lhs_comparison_byte < self.rhs_comparison_byte) == sum_flags builder.lookup_ltu_byte( self.lhs_comparison_byte.expr(), self.rhs_comparison_byte.expr(), sum_flags, ) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/field/field_sqrt.rs
ceno_zkvm/src/gadgets/field/field_sqrt.rs
// The struct `FieldSqrtCols` is modified from succinctlabs/sp1 under MIT license // The MIT License (MIT) // Copyright (c) 2023 Succinct Labs // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use derive::AlignedBorrow; use ff_ext::{ExtensionField, SmallField}; use gkr_iop::{circuit_builder::CircuitBuilder, error::CircuitBuilderError}; use multilinear_extensions::{Expression, ToExpr, WitIn}; use num::BigUint; use sp1_curves::params::{FieldParameters, Limbs}; use std::fmt::Debug; use crate::{ gadgets::{field::FieldOperation, field_op::FieldOpCols, range::FieldLtCols}, witness::LkMultiplicity, }; /// A set of columns to compute the square root in emulated arithmetic. /// /// *Safety*: The `FieldSqrtCols` asserts that `multiplication.result` is a square root of the given /// input lying within the range `[0, modulus)` with the least significant bit `lsb`. #[derive(Debug, Clone, AlignedBorrow)] #[repr(C)] pub struct FieldSqrtCols<T, P: FieldParameters> { /// The multiplication operation to verify that the sqrt and the input match. /// /// In order to save space, we actually store the sqrt of the input in `multiplication.result` /// since we'll receive the input again in the `eval` function. pub multiplication: FieldOpCols<T, P>, pub range: FieldLtCols<T, P>, // The least significant bit of the square root. pub lsb: T, } impl<P: FieldParameters> FieldSqrtCols<WitIn, P> { pub fn create<E: ExtensionField, NR, N>(cb: &mut CircuitBuilder<E>, name_fn: N) -> Self where NR: Into<String>, N: FnOnce() -> NR, { let name: String = name_fn().into(); Self { multiplication: FieldOpCols::create(cb, || format!("{}_multiplication", name)), range: FieldLtCols::create(cb, || format!("{}_range", name)), lsb: cb.create_bit(|| format!("{}_lsb", name)).unwrap(), } } } impl<F: SmallField, P: FieldParameters> FieldSqrtCols<F, P> { /// Populates the trace. /// /// `P` is the parameter of the field that each limb lives in. pub fn populate( &mut self, record: &mut LkMultiplicity, a: &BigUint, sqrt_fn: impl Fn(&BigUint) -> BigUint, ) -> BigUint { let modulus = P::modulus(); assert!(a < &modulus); let sqrt = sqrt_fn(a); debug_assert!(sqrt.clone() * sqrt.clone() % &modulus == *a); // Use FieldOpCols to compute result * result. let sqrt_squared = self .multiplication .populate(record, &sqrt, &sqrt, FieldOperation::Mul); // If the result is indeed the square root of a, then result * result = a. assert_eq!(sqrt_squared, a.clone()); // This is a hack to save a column in FieldSqrtCols. We will receive the value a again in // the eval function, so we'll overwrite it with the sqrt. self.multiplication.result = P::to_limbs_field::<F, _>(&sqrt); // Populate the range columns. self.range.populate(record, &sqrt, &modulus); let sqrt_bytes = P::to_limbs(&sqrt); self.lsb = F::from_canonical_u8(sqrt_bytes[0] & 1); record.lookup_and_byte(sqrt_bytes[0] as u64, 1); // Add the byte range check for `sqrt`. record.assert_byte_fields(&self.multiplication.result.0); sqrt } } impl<Expr: Clone, P: FieldParameters> FieldSqrtCols<Expr, P> { /// Calculates the square root of `a`. pub fn eval<E>( &self, builder: &mut CircuitBuilder<E>, a: &Limbs<Expr, P::Limbs>, is_odd: impl ToExpr<E, Output = Expression<E>> + Clone, ) -> Result<(), CircuitBuilderError> where E: ExtensionField, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { // As a space-saving hack, we store the sqrt of the input in `self.multiplication.result` // even though it's technically not the result of the multiplication. Now, we should // retrieve that value and overwrite that member variable with a. let sqrt = self.multiplication.result.clone(); let mut multiplication = self.multiplication.clone(); multiplication.result = a.clone(); // Compute sqrt * sqrt. We pass in P since we want its BaseField to be the mod. multiplication.eval(builder, &sqrt, &sqrt, FieldOperation::Mul)?; let modulus_limbs = P::to_limbs_expr(&P::modulus()); self.range.eval(builder, &sqrt, &modulus_limbs)?; // Range check that `sqrt` limbs are bytes. builder.assert_bytes(|| "sqrt", sqrt.0.as_slice())?; // Assert that the square root is the positive one, i.e., with least significant bit 0. // This is done by computing LSB = least_significant_byte & 1. builder.assert_bit(|| "lsb", self.lsb.clone().into())?; builder.require_equal(|| "lsb equality", self.lsb.clone().into(), is_odd.expr())?; builder.lookup_and_byte(sqrt[0].clone().into(), 1.into(), self.lsb.clone().into()) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/gadgets/field/field_inner_product.rs
ceno_zkvm/src/gadgets/field/field_inner_product.rs
// The struct `FieldInnerProductCols` is modified from succinctlabs/sp1 under MIT license // The MIT License (MIT) // Copyright (c) 2023 Succinct Labs // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. use derive::AlignedBorrow; use ff_ext::{ExtensionField, SmallField}; use generic_array::{GenericArray, sequence::GenericSequence}; use gkr_iop::{circuit_builder::CircuitBuilder, error::CircuitBuilderError}; use multilinear_extensions::{Expression, ToExpr, WitIn}; use num::{BigUint, Zero}; use sp1_curves::{ params::{FieldParameters, Limbs}, polynomial::Polynomial, }; use std::fmt::Debug; use crate::{ gadgets::{ util::{compute_root_quotient_and_shift, split_u16_limbs_to_u8_limbs}, util_expr::eval_field_operation, }, witness::LkMultiplicity, }; /// A set of columns to compute `InnerProduct([a], [b])` where a, b are emulated elements. /// /// *Safety*: The `FieldInnerProductCols` asserts that `result = sum_i a_i * b_i mod M` where /// `M` is the modulus `P::modulus()` under the assumption that the length of `a` and `b` is small /// enough so that the vanishing polynomial has limbs bounded by the witness shift. It is the /// responsibility of the caller to ensure that the length of `a` and `b` is small enough. #[derive(Debug, Clone, AlignedBorrow)] #[repr(C)] pub struct FieldInnerProductCols<T, P: FieldParameters> { /// The result of `a inner product b`, where a, b are field elements pub result: Limbs<T, P::Limbs>, pub(crate) carry: Limbs<T, P::Limbs>, pub(crate) witness_low: Limbs<T, P::Witness>, pub(crate) witness_high: Limbs<T, P::Witness>, } impl<P: FieldParameters> FieldInnerProductCols<WitIn, P> { pub fn create<E: ExtensionField, NR, N>(cb: &mut CircuitBuilder<E>, name_fn: N) -> Self where NR: Into<String>, N: FnOnce() -> NR, { let name: String = name_fn().into(); Self { result: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_result", name)) })), carry: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_carry", name)) })), witness_low: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_witness_low", name)) })), witness_high: Limbs(GenericArray::generate(|_| { cb.create_witin(|| format!("{}_witness_high", name)) })), } } } impl<F: SmallField, P: FieldParameters> FieldInnerProductCols<F, P> { pub fn populate( &mut self, record: &mut LkMultiplicity, a: &[BigUint], b: &[BigUint], ) -> BigUint { let p_a_vec: Vec<Polynomial<F>> = a .iter() .map(|x| P::to_limbs_field::<F, _>(x).into()) .collect(); let p_b_vec: Vec<Polynomial<F>> = b .iter() .map(|x| P::to_limbs_field::<F, _>(x).into()) .collect(); let modulus = &P::modulus(); let inner_product = a .iter() .zip(b.iter()) .fold(BigUint::zero(), |acc, (c, d)| acc + c * d); let result = &(&inner_product % modulus); let carry = &((&inner_product - result) / modulus); assert!(result < modulus); assert!(carry < &(2u32 * modulus)); assert_eq!(carry * modulus, inner_product - result); let p_modulus: Polynomial<F> = P::to_limbs_field::<F, _>(modulus).into(); let p_result: Polynomial<F> = P::to_limbs_field::<F, _>(result).into(); let p_carry: Polynomial<F> = P::to_limbs_field::<F, _>(carry).into(); // Compute the vanishing polynomial. let p_inner_product = p_a_vec .into_iter() .zip(p_b_vec) .fold(Polynomial::<F>::new(vec![F::ZERO]), |acc, (c, d)| { acc + &c * &d }); let p_vanishing = p_inner_product - &p_result - &p_carry * &p_modulus; assert_eq!(p_vanishing.degree(), P::NB_WITNESS_LIMBS); let p_witness = compute_root_quotient_and_shift( &p_vanishing, P::WITNESS_OFFSET, P::NB_BITS_PER_LIMB as u32, P::NB_WITNESS_LIMBS, ); let (p_witness_low, p_witness_high) = split_u16_limbs_to_u8_limbs(&p_witness); self.result = p_result.into(); self.carry = p_carry.into(); self.witness_low = Limbs(p_witness_low.try_into().unwrap()); self.witness_high = Limbs(p_witness_high.try_into().unwrap()); // Range checks record.assert_byte_fields(&self.result.0); record.assert_byte_fields(&self.carry.0); record.assert_byte_fields(&self.witness_low.0); record.assert_byte_fields(&self.witness_high.0); result.clone() } } impl<Expr: Clone, P: FieldParameters> FieldInnerProductCols<Expr, P> { pub fn eval<E>( &self, builder: &mut CircuitBuilder<E>, a: &[impl Into<Polynomial<Expression<E>>> + Clone], b: &[impl Into<Polynomial<Expression<E>>> + Clone], ) -> Result<(), CircuitBuilderError> where E: ExtensionField, Expr: ToExpr<E, Output = Expression<E>>, Expression<E>: From<Expr>, { let p_a_vec: Vec<Polynomial<Expression<E>>> = a.iter().cloned().map(|x| x.into()).collect(); let p_b_vec: Vec<Polynomial<Expression<E>>> = b.iter().cloned().map(|x| x.into()).collect(); let p_result: Polynomial<Expression<E>> = self.result.clone().into(); let p_carry: Polynomial<Expression<E>> = self.carry.clone().into(); let p_zero = Polynomial::<Expression<E>>::new(vec![Expression::<E>::ZERO]); let p_inner_product = p_a_vec .iter() .zip(p_b_vec.iter()) .map(|(p_a, p_b)| p_a * p_b) .collect::<Vec<_>>() .iter() .fold(p_zero, |acc, x| acc + x); let p_inner_product_minus_result = &p_inner_product - &p_result; let p_limbs = Polynomial::from_iter(P::modulus_field_iter::<E::BaseField>().map(|x| x.expr())); let p_vanishing = &p_inner_product_minus_result - &(&p_carry * &p_limbs); let p_witness_low = self.witness_low.0.iter().into(); let p_witness_high = self.witness_high.0.iter().into(); eval_field_operation::<E, P>(builder, &p_vanishing, &p_witness_low, &p_witness_high)?; // Range checks for the result, carry, and witness columns. builder.assert_bytes(|| "field_inner_product result", &self.result.0)?; builder.assert_bytes(|| "field_inner_product carry", &self.carry.0)?; builder.assert_bytes(|| "field_inner_product witness_low", &self.witness_low.0)?; builder.assert_bytes(|| "field_inner_product witness_high", &self.witness_high.0) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/program.rs
ceno_zkvm/src/tables/program.rs
use super::RMMCollections; use crate::{ circuit_builder::{CircuitBuilder, SetTableSpec}, error::ZKVMError, instructions::riscv::constants::LIMB_BITS, structs::{ProgramParams, ROMType}, tables::TableCircuit, }; use ceno_emul::{ InsnFormat, InsnFormat::*, InsnKind::*, Instruction, PC_STEP_SIZE, Program, WORD_SIZE, }; use ff_ext::{ExtensionField, FieldInto, SmallField}; use gkr_iop::utils::i64_to_base; use itertools::Itertools; use multilinear_extensions::{Expression, Fixed, ToExpr, WitIn}; use p3::field::FieldAlgebra; use rayon::iter::{IndexedParallelIterator, ParallelIterator}; use std::{collections::HashMap, marker::PhantomData}; use witness::{ InstancePaddingStrategy, RowMajorMatrix, next_pow2_instance_padding, set_fixed_val, set_val, }; /// This structure establishes the order of the fields in instruction records, common to the program table and circuit fetches. #[cfg(not(feature = "u16limb_circuit"))] #[derive(Clone, Debug)] pub struct InsnRecord<T>([T; 6]); #[cfg(feature = "u16limb_circuit")] #[derive(Clone, Debug)] pub struct InsnRecord<T>([T; 7]); impl<T> InsnRecord<T> { #[cfg(not(feature = "u16limb_circuit"))] pub fn new(pc: T, kind: T, rd: Option<T>, rs1: T, rs2: T, imm_internal: T) -> Self where T: From<u32>, { let rd = rd.unwrap_or_else(|| T::from(Instruction::RD_NULL)); InsnRecord([pc, kind, rd, rs1, rs2, imm_internal]) } #[cfg(feature = "u16limb_circuit")] pub fn new(pc: T, kind: T, rd: Option<T>, rs1: T, rs2: T, imm_internal: T, imm_sign: T) -> Self where T: From<u32>, { let rd = rd.unwrap_or_else(|| T::from(Instruction::RD_NULL)); InsnRecord([pc, kind, rd, rs1, rs2, imm_internal, imm_sign]) } pub fn as_slice(&self) -> &[T] { &self.0 } } impl<F: SmallField> InsnRecord<F> { fn from_decoded(pc: u32, insn: &Instruction) -> Self { #[cfg(not(feature = "u16limb_circuit"))] { InsnRecord([ (pc as u64).into_f(), (insn.kind as u64).into_f(), (insn.rd_internal() as u64).into_f(), (insn.rs1_or_zero() as u64).into_f(), (insn.rs2_or_zero() as u64).into_f(), InsnRecord::imm_internal(insn).1, ]) } #[cfg(feature = "u16limb_circuit")] { InsnRecord([ (pc as u64).into_f(), (insn.kind as u64).into_f(), (insn.rd_internal() as u64).into_f(), (insn.rs1_or_zero() as u64).into_f(), (insn.rs2_or_zero() as u64).into_f(), InsnRecord::imm_internal(insn).1, InsnRecord::<F>::imm_signed_internal(insn).1, ]) } } } impl<F: SmallField> InsnRecord<F> { /// The internal view of the immediate in the program table. /// This is encoded in a way that is efficient for circuits, depending on the instruction. /// /// These conversions are legal: /// - `as u32` and `as i32` as usual. /// - `i64_to_base(imm)` gives the field element going into the program table. /// - `as u64` in unsigned cases. #[cfg(not(feature = "u16limb_circuit"))] pub fn imm_internal(insn: &Instruction) -> (i64, F) { match (insn.kind, InsnFormat::from(insn.kind)) { // Prepare the immediate for ShiftImmInstruction. // The shift is implemented as a multiplication/division by 1 << immediate. (SLLI | SRLI | SRAI, _) => (1 << insn.imm, i64_to_base(1 << insn.imm)), // Unsigned view. // For example, u32::MAX is `u32::MAX mod p` in the finite field (_, R | U) | (ADDI | SLTIU | ANDI | XORI | ORI, _) => { (insn.imm as u32 as i64, i64_to_base(insn.imm as u32 as i64)) } // Signed view. // For example, u32::MAX is `-1 mod p` in the finite field. _ => (insn.imm as i64, i64_to_base(insn.imm as i64)), } } #[cfg(feature = "u16limb_circuit")] pub fn imm_internal(insn: &Instruction) -> (i64, F) { match (insn.kind, InsnFormat::from(insn.kind)) { // logic imm (XORI | ORI | ANDI, _) => ( insn.imm as i16 as i64, F::from_canonical_u16(insn.imm as u16), ), // for imm operate with program counter => convert to field value (_, B | J) => (insn.imm as i64, i64_to_base(insn.imm as i64)), // AUIPC (AUIPC, U) => ( // riv32 u type lower 12 bits are 0 // take all except for least significant limb (8 bit) (insn.imm as u32 >> 8) as i64, F::from_wrapped_u32(insn.imm as u32 >> 8), ), // U type (_, U) => ( (insn.imm as u32 >> 12) as i64, F::from_wrapped_u32(insn.imm as u32 >> 12), ), (JALR, _) => ( insn.imm as i16 as i64, F::from_canonical_u16(insn.imm as i16 as u16), ), // for default imm to operate with register value _ => ( insn.imm as i16 as i64, F::from_canonical_u16(insn.imm as i16 as u16), ), } } pub fn imm_signed_internal(insn: &Instruction) -> (i64, F) { match (insn.kind, InsnFormat::from(insn.kind)) { (SLLI | SRLI | SRAI, _) => (false as i64, F::from_bool(false)), // logic imm (XORI | ORI | ANDI, _) => ( (insn.imm >> LIMB_BITS) as i16 as i64, F::from_canonical_u16((insn.imm >> LIMB_BITS) as u16), ), // Unsigned view. (_, R | U) => (false as i64, F::from_bool(false)), // in particular imm operated with program counter // encode as field element, which do not need extra sign extension of imm (_, B | J) => (false as i64, F::from_bool(false)), // Signed views _ => ((insn.imm < 0) as i64, F::from_bool(insn.imm < 0)), } } } #[derive(Clone, Debug)] pub struct ProgramTableConfig { /// The fixed table of instruction records. record: InsnRecord<Fixed>, /// Multiplicity of the record - how many times an instruction is visited. mlt: WitIn, program_size: usize, } pub struct ProgramTableCircuit<E>(PhantomData<E>); impl<E: ExtensionField> TableCircuit<E> for ProgramTableCircuit<E> { type TableConfig = ProgramTableConfig; type FixedInput = Program; type WitnessInput<'a> = Program; fn name() -> String { "PROGRAM".into() } fn construct_circuit( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<ProgramTableConfig, ZKVMError> { assert!(params.program_size.is_power_of_two()); #[cfg(not(feature = "u16limb_circuit"))] let record = InsnRecord([ cb.create_fixed(|| "pc"), cb.create_fixed(|| "kind"), cb.create_fixed(|| "rd"), cb.create_fixed(|| "rs1"), cb.create_fixed(|| "rs2"), cb.create_fixed(|| "imm_internal"), ]); #[cfg(feature = "u16limb_circuit")] let record = InsnRecord([ cb.create_fixed(|| "pc"), cb.create_fixed(|| "kind"), cb.create_fixed(|| "rd"), cb.create_fixed(|| "rs1"), cb.create_fixed(|| "rs2"), cb.create_fixed(|| "imm_internal"), cb.create_fixed(|| "imm_sign"), ]); let mlt = cb.create_witin(|| "mlt"); let record_exprs = record .as_slice() .iter() .map(|f| Expression::Fixed(*f)) .collect_vec(); cb.lk_table_record( || "prog table", SetTableSpec { len: Some(params.program_size), structural_witins: vec![], }, ROMType::Instruction, record_exprs, mlt.expr(), )?; Ok(ProgramTableConfig { record, mlt, program_size: params.program_size, }) } fn generate_fixed_traces( config: &ProgramTableConfig, num_fixed: usize, program: &Self::FixedInput, ) -> RowMajorMatrix<E::BaseField> { let num_instructions = program.instructions.len(); let pc_base = program.base_address; assert!(num_instructions <= config.program_size); let mut fixed = RowMajorMatrix::<E::BaseField>::new( config.program_size, num_fixed, InstancePaddingStrategy::Default, ); fixed .par_rows_mut() .zip(0..num_instructions) .for_each(|(row, i)| { let pc = pc_base + (i * PC_STEP_SIZE) as u32; let insn = program.instructions[i]; let values: InsnRecord<_> = InsnRecord::from_decoded(pc, &insn); // Copy all the fields. for (col, val) in config.record.as_slice().iter().zip_eq(values.as_slice()) { set_fixed_val!(row, *col, *val); } }); fixed } fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, multiplicity: &[HashMap<u64, usize>], program: &Program, ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { assert!(!program.instructions.is_empty()); assert!(num_structural_witin == 0 || num_structural_witin == 1); let multiplicity = &multiplicity[ROMType::Instruction as usize]; let mut prog_mlt = vec![0_usize; next_pow2_instance_padding(program.instructions.len())]; for (pc, mlt) in multiplicity { let i = (*pc as usize - program.base_address as usize) / WORD_SIZE; prog_mlt[i] = *mlt; } let mut witness = RowMajorMatrix::<E::BaseField>::new( config.program_size, num_witin, InstancePaddingStrategy::Default, ); let mut structural_witness = RowMajorMatrix::<E::BaseField>::new( config.program_size, 1, InstancePaddingStrategy::Default, ); witness .par_rows_mut() .zip_eq(structural_witness.par_rows_mut()) .zip(prog_mlt) .for_each(|((row, structural_row), mlt)| { set_val!( row, config.mlt, E::BaseField::from_canonical_u64(mlt as u64) ); *structural_row.last_mut().unwrap() = E::BaseField::ONE; }); Ok([witness, structural_witness]) } } #[cfg(test)] mod tests { use super::*; use crate::{ circuit_builder::ConstraintSystem, structs::ProgramParams, witness::LkMultiplicity, }; use ceno_emul::encode_rv32; use ff_ext::GoldilocksExt2 as E; use p3::goldilocks::Goldilocks as F; #[test] fn test_program_padding() { let mut cs = ConstraintSystem::<E>::new(|| "riscv"); let mut cb = CircuitBuilder::new(&mut cs); let actual_len = 3; let instructions = vec![encode_rv32(ADD, 1, 2, 3, 0); actual_len]; let program = Program::new( 0x2000_0000, 0x2000_0000, 0x2000_0000, instructions, Default::default(), ); let params = ProgramParams::default(); let config = ProgramTableCircuit::construct_circuit(&mut cb, &params).unwrap(); let check = |matrix: &RowMajorMatrix<F>| { assert_eq!( matrix.num_instances() + matrix.num_padding_instances(), params.program_size ); for row in matrix.iter_rows().skip(actual_len) { for col in row.iter() { assert_eq!(*col, F::ZERO); } } }; let fixed = ProgramTableCircuit::<E>::generate_fixed_traces(&config, cb.cs.num_fixed, &program); check(&fixed); let lkm = LkMultiplicity::default().into_finalize_result(); let witness = ProgramTableCircuit::<E>::assign_instances( &config, cb.cs.num_witin as usize, cb.cs.num_structural_witin as usize, &lkm.0, &program, ) .unwrap(); check(&witness[0]); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/range.rs
ceno_zkvm/src/tables/range.rs
//! Definition of the range tables and their circuits. mod range_impl; mod range_circuit; pub use range_circuit::{DoubleRangeTableCircuit, DynamicRangeTableCircuit, RangeTable}; use crate::ROMType; pub struct DoubleU8Table; impl RangeTable for DoubleU8Table { const ROM_TYPE: ROMType = ROMType::DoubleU8; fn len() -> usize { 1 << 16 } } pub type DoubleU8TableCircuit<E> = DoubleRangeTableCircuit<E, 8, 8, DoubleU8Table>;
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/mod.rs
ceno_zkvm/src/tables/mod.rs
use crate::{circuit_builder::CircuitBuilder, error::ZKVMError, structs::ProgramParams}; use ff_ext::ExtensionField; use gkr_iop::{ chip::Chip, gkr::{GKRCircuit, layer::Layer}, selector::SelectorType, }; use itertools::Itertools; use multilinear_extensions::ToExpr; use std::collections::HashMap; use witness::RowMajorMatrix; mod shard_ram; pub use shard_ram::*; mod range; pub use range::*; mod ops; pub use ops::*; mod program; pub use program::{InsnRecord, ProgramTableCircuit, ProgramTableConfig}; mod ram; pub use ram::*; /// format: [witness, structural_witness] pub type RMMCollections<F> = [RowMajorMatrix<F>; 2]; pub trait TableCircuit<E: ExtensionField> { type TableConfig: Send + Sync; type FixedInput: Send + Sync + ?Sized; type WitnessInput<'a>: Send + Sync + ?Sized; fn name() -> String; fn construct_circuit( circuit_builder: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self::TableConfig, ZKVMError>; fn build_gkr_iop_circuit( cb: &mut CircuitBuilder<E>, param: &ProgramParams, ) -> Result<(Self::TableConfig, Option<GKRCircuit<E>>), ZKVMError> { let config = Self::construct_circuit(cb, param)?; let r_table_len = cb.cs.r_table_expressions.len(); let w_table_len = cb.cs.w_table_expressions.len(); let lk_table_len = cb.cs.lk_table_expressions.len() * 2; let selector = cb.create_placeholder_structural_witin(|| "selector"); let selector_type = SelectorType::Prefix(selector.expr()); // all shared the same selector let (out_evals, mut chip) = ( [ // r_record (0..r_table_len).collect_vec(), // w_record (r_table_len..r_table_len + w_table_len).collect_vec(), // lk_record (r_table_len + w_table_len..r_table_len + w_table_len + lk_table_len).collect_vec(), // zero_record vec![], ], Chip::new_from_cb(cb, 0), ); // register selector to legacy constrain system if r_table_len > 0 { cb.cs.r_selector = Some(selector_type.clone()); } if w_table_len > 0 { cb.cs.w_selector = Some(selector_type.clone()); } if lk_table_len > 0 { cb.cs.lk_selector = Some(selector_type.clone()); } let layer = Layer::from_circuit_builder(cb, Self::name(), 0, out_evals); chip.add_layer(layer); Ok((config, Some(chip.gkr_circuit()))) } fn generate_fixed_traces( config: &Self::TableConfig, num_fixed: usize, input: &Self::FixedInput, ) -> RowMajorMatrix<E::BaseField>; fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, multiplicity: &[HashMap<u64, usize>], input: &Self::WitnessInput<'_>, ) -> Result<RMMCollections<E::BaseField>, ZKVMError>; }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/ram.rs
ceno_zkvm/src/tables/ram.rs
use ceno_emul::{Addr, VM_REG_COUNT, WORD_SIZE}; use ff_ext::ExtensionField; use gkr_iop::error::CircuitBuilderError; use multilinear_extensions::{Expression, StructuralWitIn, StructuralWitInType, ToExpr}; use ram_circuit::{DynVolatileRamCircuit, NonVolatileRamCircuit, PubIORamInitCircuit}; use crate::{ instructions::riscv::constants::UINT_LIMBS, structs::{ProgramParams, RAMType}, }; mod ram_circuit; mod ram_impl; use crate::{ chip_handler::general::PublicValuesQuery, circuit_builder::CircuitBuilder, scheme::PublicValues, structs::WitnessId, tables::ram::{ ram_circuit::LocalFinalRamCircuit, ram_impl::{DynVolatileRamTableInitConfig, NonVolatileInitTableConfig}, }, }; pub use ram_circuit::{DynVolatileRamTable, MemFinalRecord, MemInitRecord, NonVolatileTable}; #[derive(Clone)] pub struct HeapTable; impl DynVolatileRamTable for HeapTable { const RAM_TYPE: RAMType = RAMType::Memory; const V_LIMBS: usize = UINT_LIMBS; const ZERO_INIT: bool = true; const DESCENDING: bool = false; const DYNAMIC_OFFSET: bool = true; fn addr_expr<E: ExtensionField>( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<(Expression<E>, StructuralWitIn), CircuitBuilderError> { let max_len = Self::max_len(params); let addr = cb.create_structural_witin( || "addr", StructuralWitInType::EqualDistanceDynamicSequence { max_len, offset_instance_id: cb.query_heap_start_addr()?.0 as WitnessId, multi_factor: WORD_SIZE, descending: Self::DESCENDING, }, ); Ok((addr.expr(), addr)) } fn max_len(params: &ProgramParams) -> usize { let max_size = (params.platform.heap.end - params.platform.heap.start) .div_ceil(WORD_SIZE as u32) as Addr; 1 << (u32::BITS - 1 - max_size.leading_zeros()) } fn offset_addr(_params: &ProgramParams) -> Addr { unimplemented!("heap offset is dynamic") } fn dynamic_offset_addr(params: &ProgramParams, pv: &PublicValues) -> Addr { let heap_start = pv.heap_start_addr; assert!( heap_start >= params.platform.heap.start, "heap_start {:x} < platform min heap start {:x}", heap_start, params.platform.heap.start ); heap_start } fn end_addr(_params: &ProgramParams) -> Addr { unimplemented!("heap end address is dynamic") } fn name() -> &'static str { "HeapTable" } fn dynamic_addr(params: &ProgramParams, entry_index: usize, pv: &PublicValues) -> Addr { let addr = Self::dynamic_offset_addr(params, pv) + (entry_index * WORD_SIZE) as Addr; assert!( addr < params.platform.heap.end, "heap addr {:x} >= platform max heap end {:x}", addr, params.platform.heap.end ); addr } } pub type HeapInitCircuit<E> = DynVolatileRamCircuit<E, HeapTable, DynVolatileRamTableInitConfig<HeapTable>>; #[derive(Clone)] pub struct StackTable; impl DynVolatileRamTable for StackTable { const RAM_TYPE: RAMType = RAMType::Memory; const V_LIMBS: usize = UINT_LIMBS; const ZERO_INIT: bool = true; const DESCENDING: bool = true; fn offset_addr(params: &ProgramParams) -> Addr { // stack address goes in descending order // end address is exclusive params.platform.stack.end - WORD_SIZE as u32 } fn end_addr(params: &ProgramParams) -> Addr { // stack address goes in descending order params.platform.stack.start } fn name() -> &'static str { "StackTable" } fn max_len(params: &ProgramParams) -> usize { let max_size = (Self::offset_addr(params) - Self::end_addr(params)) .div_ceil(WORD_SIZE as u32) as Addr + 1; 1 << (u32::BITS - 1 - max_size.leading_zeros()) // prev_power_of_2 } } pub type StackInitCircuit<E> = DynVolatileRamCircuit<E, StackTable, DynVolatileRamTableInitConfig<StackTable>>; #[derive(Clone)] pub struct HintsTable; impl DynVolatileRamTable for HintsTable { const RAM_TYPE: RAMType = RAMType::Memory; const V_LIMBS: usize = UINT_LIMBS; const ZERO_INIT: bool = false; const DESCENDING: bool = false; const DYNAMIC_OFFSET: bool = true; fn addr_expr<E: ExtensionField>( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<(Expression<E>, StructuralWitIn), CircuitBuilderError> { let max_len = Self::max_len(params); let addr = cb.create_structural_witin( || "addr", StructuralWitInType::EqualDistanceDynamicSequence { max_len, offset_instance_id: cb.query_hint_start_addr()?.0 as WitnessId, multi_factor: WORD_SIZE, descending: Self::DESCENDING, }, ); Ok((addr.expr(), addr)) } fn max_len(params: &ProgramParams) -> usize { let max_size = (params.platform.hints.end - params.platform.hints.start) .div_ceil(WORD_SIZE as u32) as Addr; 1 << (u32::BITS - 1 - max_size.leading_zeros()) } fn offset_addr(_params: &ProgramParams) -> Addr { unimplemented!("hints offset is dynamic") } fn dynamic_offset_addr(params: &ProgramParams, pv: &PublicValues) -> Addr { let hint_start = pv.hint_start_addr; assert!( hint_start >= params.platform.hints.start, "hint_start {:x} < platform min hint start {:x}", hint_start, params.platform.hints.start ); hint_start } fn end_addr(_params: &ProgramParams) -> Addr { unimplemented!("hints end address is dynamic") } fn dynamic_addr(params: &ProgramParams, entry_index: usize, pv: &PublicValues) -> Addr { let addr = Self::dynamic_offset_addr(params, pv) + (entry_index * WORD_SIZE) as Addr; assert!( addr < params.platform.hints.end, "hint addr {:x} >= platform max hint end {:x}", addr, params.platform.hints.end ); addr } fn name() -> &'static str { "HintsTable" } } pub type HintsInitCircuit<E> = DynVolatileRamCircuit<E, HintsTable, DynVolatileRamTableInitConfig<HintsTable>>; /// RegTable, fix size without offset #[derive(Clone)] pub struct RegTable; impl NonVolatileTable for RegTable { const RAM_TYPE: RAMType = RAMType::Register; const V_LIMBS: usize = UINT_LIMBS; const WRITABLE: bool = true; fn name() -> &'static str { "RegTable" } fn len(_params: &ProgramParams) -> usize { VM_REG_COUNT.next_power_of_two() } } pub type RegTableInitCircuit<E> = NonVolatileRamCircuit<E, RegTable, NonVolatileInitTableConfig<RegTable>>; #[derive(Clone)] pub struct StaticMemTable; impl NonVolatileTable for StaticMemTable { const RAM_TYPE: RAMType = RAMType::Memory; const V_LIMBS: usize = UINT_LIMBS; const WRITABLE: bool = true; fn name() -> &'static str { "StaticMemTable" } fn len(params: &ProgramParams) -> usize { params.static_memory_len } } pub type StaticMemInitCircuit<E> = NonVolatileRamCircuit<E, StaticMemTable, NonVolatileInitTableConfig<StaticMemTable>>; #[derive(Clone)] pub struct PubIOTable; impl NonVolatileTable for PubIOTable { const RAM_TYPE: RAMType = RAMType::Memory; const V_LIMBS: usize = UINT_LIMBS; const WRITABLE: bool = false; fn name() -> &'static str { "PubIOTable" } fn len(params: &ProgramParams) -> usize { params.pubio_len } } pub type PubIOInitCircuit<E> = PubIORamInitCircuit<E, PubIOTable>; pub type LocalFinalCircuit<E> = LocalFinalRamCircuit<UINT_LIMBS, E>;
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/ops.rs
ceno_zkvm/src/tables/ops.rs
//! Definition of the ops tables and their circuits. mod ops_impl; mod ops_circuit; use gkr_iop::tables::ops::{AndTable, LtuTable, OrTable, PowTable, XorTable}; pub use ops_circuit::OpsTableCircuit; pub type AndTableCircuit<E> = OpsTableCircuit<E, AndTable>; pub type OrTableCircuit<E> = OpsTableCircuit<E, OrTable>; pub type XorTableCircuit<E> = OpsTableCircuit<E, XorTable>; pub type LtuTableCircuit<E> = OpsTableCircuit<E, LtuTable>; pub type PowTableCircuit<E> = OpsTableCircuit<E, PowTable>; #[cfg(test)] mod tests { use super::*; use crate::{ circuit_builder::{CircuitBuilder, ConstraintSystem}, structs::ProgramParams, tables::TableCircuit, }; use ff_ext::{GoldilocksExt2 as E, SmallField}; use gkr_iop::tables::OpsTable; #[test] fn test_ops_pow_table_assign() { let mut cs = ConstraintSystem::<E>::new(|| "riscv"); let mut cb = CircuitBuilder::new(&mut cs); let config = PowTableCircuit::<E>::construct_circuit(&mut cb, &ProgramParams::default()).unwrap(); let fixed = PowTableCircuit::<E>::generate_fixed_traces(&config, cb.cs.num_fixed, &()); for (i, row) in fixed.iter_rows().enumerate() { let (base, exp) = PowTable::unpack(i as u64); assert_eq!(PowTable::pack(base, exp), i as u64); assert_eq!(base, row[0].to_canonical_u64()); assert_eq!(exp, row[1].to_canonical_u64()); assert_eq!(base.pow(exp.try_into().unwrap()), row[2].to_canonical_u64()); } } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/shard_ram.rs
ceno_zkvm/src/tables/shard_ram.rs
use std::{collections::HashMap, iter::repeat_n, marker::PhantomData}; use crate::{ Value, chip_handler::general::PublicValuesQuery, e2e::RAMRecord, error::ZKVMError, gadgets::Poseidon2Config, instructions::riscv::constants::UINT_LIMBS, scheme::septic_curve::{SepticExtension, SepticPoint}, structs::{ProgramParams, RAMType}, tables::{RMMCollections, TableCircuit}, witness::LkMultiplicity, }; use ceno_emul::WordAddr; use ff_ext::{ExtensionField, FieldInto, PoseidonField, SmallField}; use gkr_iop::{ chip::Chip, circuit_builder::CircuitBuilder, error::CircuitBuilderError, gkr::{GKRCircuit, layer::Layer}, selector::SelectorType, }; use itertools::{Itertools, chain}; use multilinear_extensions::{Expression, ToExpr, WitIn, util::max_usable_threads}; use p3::{ field::{Field, FieldAlgebra}, matrix::{Matrix, dense::RowMajorMatrix}, symmetric::Permutation, }; use rayon::{ iter::{ IndexedParallelIterator, IntoParallelIterator, IntoParallelRefMutIterator, ParallelExtend, ParallelIterator, }, prelude::ParallelSliceMut, slice::ParallelSlice, }; use std::ops::Deref; use witness::{InstancePaddingStrategy, next_pow2_instance_padding, set_val}; use crate::{instructions::riscv::constants::UInt, scheme::constants::SEPTIC_EXTENSION_DEGREE}; /// A record for a read/write into the shard RAM #[derive(Debug, Clone)] pub struct ShardRamRecord { pub addr: u32, pub ram_type: RAMType, pub value: u32, pub shard: u64, pub local_clk: u64, pub global_clk: u64, pub is_to_write_set: bool, } impl From<(&WordAddr, &RAMRecord, bool)> for ShardRamRecord { fn from((vma, record, is_to_write_set): (&WordAddr, &RAMRecord, bool)) -> Self { let addr = match record.ram_type { RAMType::Register => record.reg_id as u32, RAMType::Memory => (*vma).into(), _ => unreachable!(), }; let (shard, local_clk, global_clk, value) = if is_to_write_set { // global write -> local read ( record.shard_id, record.shard_cycle, record.cycle, // local read is for cancel final write value in `Write` set record.value, ) } else { // global read -> local write debug_assert_eq!(record.shard_cycle, 0); ( record.shard_id, 0, record.prev_cycle, // local write is for adapting write from previous shard record.prev_value.unwrap_or(record.value), ) }; ShardRamRecord { addr, ram_type: record.ram_type, value, shard: shard as u64, local_clk, global_clk, is_to_write_set, } } } /// An EC point corresponding to a cross chunk read/write record /// whose x-coordinate is derived from Poseidon2 hash of the record #[derive(Clone, Debug)] pub struct ECPoint<E: ExtensionField> { pub nonce: u32, pub point: SepticPoint<E::BaseField>, } impl ShardRamRecord { pub fn to_ec_point<E: ExtensionField, P: Permutation<Vec<E::BaseField>>>( &self, hasher: &P, ) -> ECPoint<E> { let mut nonce = 0; let mut input = vec![ E::BaseField::from_canonical_u32(self.addr), E::BaseField::from_canonical_u32(self.ram_type as u32), E::BaseField::from_canonical_u32(self.value & 0xFFFF), // lower 16 bits E::BaseField::from_canonical_u32((self.value >> 16) & 0xFFFF), // higher 16 bits E::BaseField::from_canonical_u64(self.shard), E::BaseField::from_canonical_u64(self.global_clk), E::BaseField::from_canonical_u32(nonce), E::BaseField::ZERO, E::BaseField::ZERO, E::BaseField::ZERO, E::BaseField::ZERO, E::BaseField::ZERO, E::BaseField::ZERO, E::BaseField::ZERO, E::BaseField::ZERO, E::BaseField::ZERO, ]; let prime = E::BaseField::order().to_u64_digits()[0]; loop { let x: SepticExtension<E::BaseField> = hasher.permute(input.clone())[0..SEPTIC_EXTENSION_DEGREE].into(); if let Some(p) = SepticPoint::from_x(x) { let y6 = (p.y.0)[SEPTIC_EXTENSION_DEGREE - 1].to_canonical_u64(); let is_y_in_2nd_half = y6 >= (prime / 2); // we negate y if needed // to ensure read => y in [0, p/2) and write => y in [p/2, p) let negate = match (self.is_to_write_set, is_y_in_2nd_half) { (true, false) => true, // write, y in [0, p/2) (false, true) => true, // read, y in [p/2, p) _ => false, }; let point = if negate { -p } else { p }; return ECPoint { nonce, point }; } else { // try again with different nonce nonce += 1; input[6] = E::BaseField::from_canonical_u32(nonce); } } } } /// opcode circuit + mem init/final table + local finalize circuit + shard ram circuit /// shard ram circuit is used to ensure the **local** reads and writes produced by /// opcode circuits / memory init / memory finalize table / local finalize circuit /// can balance out. /// /// 1. For a local memory read record whose previous write is not in the same shard, /// the shard ram circuit will read it from the **global set** and insert a local write record. /// 2. For a local memory write record which will **not** be read in the future, /// the local finalize circuit will consume it by inserting a local read record. /// 3. For a local memory write record which will be read in the future, /// the shard ram circuit will insert a local read record and write it to the **global set**. pub struct ShardRamConfig<E: ExtensionField> { addr: WitIn, is_ram_register: WitIn, value: UInt<E>, shard: WitIn, global_clk: WitIn, local_clk: WitIn, nonce: WitIn, // if it's write to global set, then insert a local read record // s.t. local offline memory checking can cancel out // serves as propagating local write to global. is_global_write: WitIn, x: Vec<WitIn>, y: Vec<WitIn>, slope: Vec<WitIn>, perm_config: Poseidon2Config<E, 16, 7, 1, 4, 13>, } impl<E: ExtensionField> ShardRamConfig<E> { // TODO: make `WIDTH`, `HALF_FULL_ROUNDS`, `PARTIAL_ROUNDS` generic parameters pub fn configure(cb: &mut CircuitBuilder<E>) -> Result<Self, CircuitBuilderError> { let x: Vec<WitIn> = (0..SEPTIC_EXTENSION_DEGREE) .map(|i| cb.create_witin(|| format!("x{}", i))) .collect(); let y: Vec<WitIn> = (0..SEPTIC_EXTENSION_DEGREE) .map(|i| cb.create_witin(|| format!("y{}", i))) .collect(); let slope: Vec<WitIn> = (0..SEPTIC_EXTENSION_DEGREE) .map(|i| cb.create_witin(|| format!("slope{}", i))) .collect(); let addr = cb.create_witin(|| "addr"); let is_ram_register = cb.create_witin(|| "is_ram_register"); let value = UInt::new_unchecked(|| "value", cb)?; let shard = cb.create_witin(|| "shard"); let global_clk = cb.create_witin(|| "global_clk"); let local_clk = cb.create_witin(|| "local_clk"); let nonce = cb.create_witin(|| "nonce"); let is_global_write = cb.create_witin(|| "is_global_write"); let is_ram_reg: Expression<E> = is_ram_register.expr(); let reg: Expression<E> = RAMType::Register.into(); let mem: Expression<E> = RAMType::Memory.into(); let ram_type: Expression<E> = is_ram_reg.clone() * reg + (1 - is_ram_reg) * mem; let rc = <E::BaseField as PoseidonField>::get_default_perm_rc().into(); let perm_config = Poseidon2Config::construct(cb, rc); let mut input = vec![]; input.push(addr.expr()); input.push(ram_type.clone()); // memory expr has same number of limbs as register expr input.extend(value.memory_expr()); input.push(shard.expr()); input.push(global_clk.expr()); // add nonce to ensure poseidon2(input) always map to a valid ec point input.push(nonce.expr()); input.extend(repeat_n(E::BaseField::ZERO.expr(), 16 - input.len())); let mut record = vec![]; record.push(ram_type.clone()); record.push(addr.expr()); record.extend(value.memory_expr()); record.push(local_clk.expr()); // if is_global_write = 1, then it means we are propagating a local write to global // so we need to insert a local read record to cancel out this local write cb.assert_bit(|| "is_global_write must be boolean", is_global_write.expr())?; // TODO: for all local reads, enforce they come to global writes // TODO: for all local writes, enforce they come from global reads // global read => insert a local write with local_clk = 0 cb.condition_require_zero( || "is_global_read => local_clk = 0", 1 - is_global_write.expr(), local_clk.expr(), )?; // TODO: enforce shard = shard_id in the public values cb.read_rlc_record( || "r_record", ram_type.clone(), record.clone(), cb.rlc_chip_record(record.clone()), )?; cb.write_rlc_record( || "w_record", ram_type, record.clone(), cb.rlc_chip_record(record), )?; // enforces final_sum = \sum_i (x_i, y_i) using ecc quark protocol let final_sum = cb.query_global_rw_sum()?; cb.ec_sum( x.iter().map(|xi| xi.expr()).collect::<Vec<_>>(), y.iter().map(|yi| yi.expr()).collect::<Vec<_>>(), slope.iter().map(|si| si.expr()).collect::<Vec<_>>(), final_sum.into_iter().map(|x| x.expr()).collect::<Vec<_>>(), ); // enforces x = poseidon2([addr, ram_type, value[0], value[1], shard, global_clk, nonce, 0, ..., 0]) for (input_expr, hasher_input) in input.into_iter().zip_eq(perm_config.inputs().into_iter()) { cb.require_equal(|| "poseidon2 input", input_expr, hasher_input)?; } for (xi, hasher_output) in x.iter().zip(perm_config.output().into_iter()) { cb.require_equal(|| "x = poseidon2's output", xi.expr(), hasher_output)?; } // both (x, y) and (x, -y) are valid ec points // if is_global_write = 1, then y should be in [0, p/2) // if is_global_write = 0, then y should be in [p/2, p) // TODO: enforce 0 <= y < p/2 if is_global_write = 1 // enforce p/2 <= y < p if is_global_write = 0 Ok(ShardRamConfig { x, y, slope, addr, is_ram_register, value, shard, global_clk, local_clk, nonce, is_global_write, perm_config, }) } } /// This chip is used to manage read/write into a global set /// shared among multiple shards #[derive(Default)] pub struct ShardRamCircuit<E> { _marker: PhantomData<E>, } #[derive(Clone, Debug)] pub struct ShardRamInput<E: ExtensionField> { pub name: &'static str, pub record: ShardRamRecord, pub ec_point: ECPoint<E>, } impl<E: ExtensionField> ShardRamCircuit<E> { fn assign_instance( config: &ShardRamConfig<E>, instance: &mut [E::BaseField], _lk_multiplicity: &mut LkMultiplicity, input: &ShardRamInput<E>, ) -> Result<(), crate::error::ZKVMError> { // assign basic fields let record = &input.record; let is_ram_register = match record.ram_type { RAMType::Register => 1, RAMType::Memory => 0, _ => unreachable!(), }; set_val!(instance, config.addr, record.addr as u64); set_val!(instance, config.is_ram_register, is_ram_register as u64); let value = Value::new_unchecked(record.value); config.value.assign_limbs(instance, value.as_u16_limbs()); set_val!(instance, config.shard, record.shard); set_val!(instance, config.global_clk, record.global_clk); set_val!(instance, config.local_clk, record.local_clk); set_val!( instance, config.is_global_write, record.is_to_write_set as u64 ); // assign (x, y) and nonce let ECPoint { nonce, point } = &input.ec_point; set_val!(instance, config.nonce, *nonce as u64); config .x .iter() .chain(config.y.iter()) .zip_eq((point.x.deref()).iter().chain((point.y.deref()).iter())) .for_each(|(witin, fe)| { instance[witin.id as usize] = *fe; }); let ram_type = E::BaseField::from_canonical_u32(record.ram_type as u32); let mut input = [E::BaseField::ZERO; 16]; let k = UINT_LIMBS; input[0] = E::BaseField::from_canonical_u32(record.addr); input[1] = ram_type; input[2..(k + 2)] .iter_mut() .zip(value.as_u16_limbs().iter()) .for_each(|(i, v)| *i = E::BaseField::from_canonical_u16(*v)); input[2 + k] = E::BaseField::from_canonical_u64(record.shard); input[2 + k + 1] = E::BaseField::from_canonical_u64(record.global_clk); input[2 + k + 2] = E::BaseField::from_canonical_u32(*nonce); config .perm_config // TODO: remove hardcoded constant 28 .assign_instance(&mut instance[28 + UINT_LIMBS..], input); Ok(()) } pub fn extract_ec_sum( config: &ShardRamConfig<E>, rmm: &witness::RowMajorMatrix<<E as ExtensionField>::BaseField>, ) -> SepticPoint<<E as ExtensionField>::BaseField> { assert!(rmm.height() >= 2); let instance = &rmm[rmm.height() - 2]; let xy = config .x .iter() .chain(config.y.iter()) .map(|witin| instance[witin.id as usize]) .collect_vec(); let x: SepticExtension<E::BaseField> = xy[0..SEPTIC_EXTENSION_DEGREE].into(); let y: SepticExtension<E::BaseField> = xy[SEPTIC_EXTENSION_DEGREE..].into(); SepticPoint::from_affine(x, y) } } impl<E: ExtensionField> TableCircuit<E> for ShardRamCircuit<E> { type TableConfig = ShardRamConfig<E>; type FixedInput = (); type WitnessInput<'a> = [ShardRamInput<E>]; fn name() -> String { "ShardRamCircuit".to_string() } fn construct_circuit( cb: &mut CircuitBuilder<E>, _param: &ProgramParams, ) -> Result<Self::TableConfig, crate::error::ZKVMError> { let config = ShardRamConfig::configure(cb)?; Ok(config) } fn build_gkr_iop_circuit( cb: &mut CircuitBuilder<E>, param: &ProgramParams, ) -> Result<(Self::TableConfig, Option<GKRCircuit<E>>), crate::error::ZKVMError> { // create three selectors: selector_r, selector_w, selector_zero let selector_r = cb.create_placeholder_structural_witin(|| "selector_r"); let selector_w = cb.create_placeholder_structural_witin(|| "selector_w"); let selector_zero = cb.create_placeholder_structural_witin(|| "selector_zero"); let config = Self::construct_circuit(cb, param)?; let w_len = cb.cs.w_expressions.len(); let r_len = cb.cs.r_expressions.len(); let lk_len = cb.cs.lk_expressions.len(); let zero_len = cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len(); let selector_r = SelectorType::Prefix(selector_r.expr()); // note that the actual offset should be set by prover // depending on the number of local read instances let selector_w = SelectorType::Prefix(selector_w.expr()); // TODO: when selector_r = 1 => selector_zero = 1 // when selector_w = 1 => selector_zero = 1 let selector_zero = SelectorType::Prefix(selector_zero.expr()); cb.cs.r_selector = Some(selector_r); cb.cs.w_selector = Some(selector_w); cb.cs.zero_selector = Some(selector_zero.clone()); cb.cs.lk_selector = Some(selector_zero); // all shared the same selector let (out_evals, mut chip) = ( [ // r_record (0..r_len).collect_vec(), // w_record (r_len..r_len + w_len).collect_vec(), // lk_record (r_len + w_len..r_len + w_len + lk_len).collect_vec(), // zero_record (0..zero_len).collect_vec(), ], Chip::new_from_cb(cb, 0), ); let layer = Layer::from_circuit_builder(cb, format!("{}_main", Self::name()), 0, out_evals); chip.add_layer(layer); Ok((config, Some(chip.gkr_circuit()))) } fn generate_fixed_traces( _config: &Self::TableConfig, _num_fixed: usize, _input: &Self::FixedInput, ) -> witness::RowMajorMatrix<<E as ExtensionField>::BaseField> { unimplemented!() } /// steps format: local reads ++ local writes fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, _multiplicity: &[HashMap<u64, usize>], steps: &Self::WitnessInput<'_>, ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { if steps.is_empty() { return Ok([ witness::RowMajorMatrix::empty(), witness::RowMajorMatrix::empty(), ]); } // FIXME selector is the only structural witness // this is workaround, as call `construct_circuit` will not initialized selector // we can remove this one all opcode unittest migrate to call `build_gkr_iop_circuit` assert_eq!(num_structural_witin, 3); let selector_r_witin = WitIn { id: 0 }; let selector_w_witin = WitIn { id: 1 }; let selector_zero_witin = WitIn { id: 2 }; let nthreads = max_usable_threads(); // local read iff it's global write let num_local_reads = steps .iter() .take_while(|s| s.record.is_to_write_set) .count(); tracing::debug!( "{} local reads / {} local writes in global chip", num_local_reads, steps.len() - num_local_reads ); let num_instance_per_batch = if steps.len() > 256 { steps.len().div_ceil(nthreads) } else { steps.len() } .max(1); let n = next_pow2_instance_padding(steps.len()); // compute the input for the binary tree for ec point summation let lk_multiplicity = LkMultiplicity::default(); // *2 because we need to store the internal nodes of binary tree for ec point summation let num_rows_padded = 2 * n; let mut raw_witin = { let matrix_size = num_rows_padded * num_witin; let mut value = Vec::with_capacity(matrix_size); value.par_extend( (0..matrix_size) .into_par_iter() .map(|_| E::BaseField::default()), ); RowMajorMatrix::new(value, num_witin) }; let mut raw_structual_witin = { let matrix_size = num_rows_padded * num_structural_witin; let mut value = Vec::with_capacity(matrix_size); value.par_extend( (0..matrix_size) .into_par_iter() .map(|_| E::BaseField::default()), ); RowMajorMatrix::new(value, num_structural_witin) }; let raw_witin_iter = raw_witin.values[0..steps.len() * num_witin] .par_chunks_mut(num_instance_per_batch * num_witin); let raw_structual_witin_iter = raw_structual_witin.values [0..steps.len() * num_structural_witin] .par_chunks_mut(num_instance_per_batch * num_structural_witin); raw_witin_iter .zip_eq(raw_structual_witin_iter) .zip_eq(steps.par_chunks(num_instance_per_batch)) .enumerate() .flat_map(|(chunk_idx, ((instances, structural_instance), steps))| { let mut lk_multiplicity = lk_multiplicity.clone(); instances .chunks_mut(num_witin) .zip_eq(structural_instance.chunks_mut(num_structural_witin)) .zip_eq(steps) .enumerate() .map(|(i, ((instance, structural_instance), step))| { let row = chunk_idx * num_instance_per_batch + i; let (sel_r, sel_w) = if row < num_local_reads { (E::BaseField::ONE, E::BaseField::ZERO) } else { (E::BaseField::ZERO, E::BaseField::ONE) }; set_val!(structural_instance, selector_r_witin, sel_r); set_val!(structural_instance, selector_w_witin, sel_w); set_val!(structural_instance, selector_zero_witin, E::BaseField::ONE); Self::assign_instance(config, instance, &mut lk_multiplicity, step) }) .collect::<Vec<_>>() }) .collect::<Result<(), ZKVMError>>()?; // allocate num_rows_padded size, fill points on first half let mut cur_layer_points_buffer: Vec<_> = (0..num_rows_padded) .into_par_iter() .map(|i| { steps .get(i) .map(|step| step.ec_point.point.clone()) .unwrap_or_else(SepticPoint::default) }) .collect(); // raw_witin offset start from n. // left node is at b, right node is at b + 1 // op(left node, right node) = offset + b / 2 let mut offset = num_rows_padded / 2; let mut current_layer_len = cur_layer_points_buffer.len() / 2; // slope[1,b] = (input[b,0].y - input[b,1].y) / (input[b,0].x - input[b,1].x) loop { if current_layer_len <= 1 { break; } let (current_layer, next_layer) = cur_layer_points_buffer.split_at_mut(current_layer_len); current_layer .par_chunks(2) .zip_eq(next_layer[..current_layer_len / 2].par_iter_mut()) .zip(raw_witin.values[offset * num_witin..].par_chunks_mut(num_witin)) .for_each(|((pair, parent), instance)| { let p1 = &pair[0]; let p2 = &pair[1]; let (slope, q) = if p2.is_infinity { // input[1,b] = bypass_left(input[b,0], input[b,1]) (SepticExtension::zero(), p1.clone()) } else { // input[1,b] = affine_add(input[b,0], input[b,1]) let slope = (&p1.y - &p2.y) * (&p1.x - &p2.x).inverse().unwrap(); let q = p1.clone() + p2.clone(); (slope, q) }; config .x .iter() .chain(config.y.iter()) .chain(config.slope.iter()) .zip_eq(chain!( q.x.deref().iter(), q.y.deref().iter(), slope.deref().iter(), )) .for_each(|(witin, fe)| { set_val!(instance, *witin, *fe); }); *parent = q.clone(); }); cur_layer_points_buffer = cur_layer_points_buffer.split_off(current_layer_len); current_layer_len /= 2; offset += current_layer_len; } let raw_witin = witness::RowMajorMatrix::new_by_inner_matrix( raw_witin, InstancePaddingStrategy::Default, ); let raw_structual_witin = witness::RowMajorMatrix::new_by_inner_matrix( raw_structual_witin, InstancePaddingStrategy::Default, ); Ok([raw_witin, raw_structual_witin]) } } #[cfg(test)] mod tests { use either::Either; use ff_ext::{BabyBearExt4, FromUniformBytes, PoseidonField}; use itertools::Itertools; use mpcs::{BasefoldDefault, PolynomialCommitmentScheme, SecurityLevel}; use p3::babybear::BabyBear; use rand::thread_rng; use std::{ops::Index, sync::Arc}; use tracing_forest::{ForestLayer, util::LevelFilter}; use tracing_subscriber::{EnvFilter, Registry, layer::SubscriberExt, util::SubscriberInitExt}; use transcript::BasicTranscript; use crate::{ circuit_builder::{CircuitBuilder, ConstraintSystem}, scheme::{ PublicValues, create_backend, create_prover, hal::ProofInput, prover::ZKVMProver, septic_curve::SepticPoint, verifier::ZKVMVerifier, }, structs::{ComposedConstrainSystem, PointAndEval, ProgramParams, RAMType, ZKVMProvingKey}, tables::{ShardRamCircuit, ShardRamInput, ShardRamRecord, TableCircuit}, }; use multilinear_extensions::mle::IntoMLE; use p3::field::PrimeField32; type E = BabyBearExt4; type F = BabyBear; type Perm = <F as PoseidonField>::P; type Pcs = BasefoldDefault<E>; #[test] fn test_shard_ram_circuit() { // default filter let default_filter = EnvFilter::builder() .with_default_directive(LevelFilter::DEBUG.into()) .from_env_lossy(); Registry::default() .with(ForestLayer::default()) .with(default_filter) .init(); // init global chip with horizen_rc_consts let perm = <F as PoseidonField>::get_default_perm(); let mut cs = ConstraintSystem::new(|| "global chip test"); let mut cb = CircuitBuilder::new(&mut cs); let (config, gkr_circuit) = ShardRamCircuit::build_gkr_iop_circuit(&mut cb, &ProgramParams::default()).unwrap(); // create a bunch of random memory read/write records let n_global_reads = 170000; let n_global_writes = 1420; let global_reads = (0..n_global_reads) .map(|i| { let addr = i * 8; let value = (i + 1) * 8; ShardRamRecord { addr: addr as u32, ram_type: RAMType::Memory, value: value as u32, shard: 0, local_clk: 0, global_clk: i, is_to_write_set: false, } }) .collect::<Vec<_>>(); let global_writes = (0..n_global_writes) .map(|i| { let addr = i * 8; let value = (i + 1) * 8; ShardRamRecord { addr: addr as u32, ram_type: RAMType::Memory, value: value as u32, shard: 1, local_clk: i, global_clk: i, is_to_write_set: true, } }) .collect::<Vec<_>>(); let input = global_writes // local reads .into_iter() .chain(global_reads) // local writes .map(|record| { let ec_point = record.to_ec_point::<E, Perm>(&perm); ShardRamInput { name: "dummy_test", record, ec_point, } }) .collect::<Vec<_>>(); let global_ec_sum: SepticPoint<F> = input .iter() .map(|record| record.ec_point.point.clone()) .sum(); let public_value = PublicValues::new( 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, vec![0], // dummy global_ec_sum .x .iter() .chain(global_ec_sum.y.iter()) .map(|fe| fe.as_canonical_u32()) .collect_vec(), ); // assign witness let witness = ShardRamCircuit::assign_instances( &config, cs.num_witin as usize, cs.num_structural_witin as usize, &[], &input, ) .unwrap(); // api extract ec sum from rmm witness assert_eq!( global_ec_sum, ShardRamCircuit::extract_ec_sum(&config, &witness[0]) ); let composed_cs = ComposedConstrainSystem { zkvm_v1_css: cs, gkr_circuit, }; let pk = composed_cs.key_gen(); // create chip proof for global chip let pcs_param = Pcs::setup(1 << 20, SecurityLevel::Conjecture100bits).unwrap(); let (pp, vp) = Pcs::trim(pcs_param, 1 << 20).unwrap(); let backend = create_backend::<E, Pcs>(20, SecurityLevel::Conjecture100bits); let pd = create_prover(backend); let zkvm_pk = ZKVMProvingKey::new(pp, vp); let zkvm_vk = zkvm_pk.get_vk_slow(); let zkvm_prover = ZKVMProver::new(zkvm_pk.into(), pd); let mut transcript = BasicTranscript::new(b"global chip test"); let public_input_mles = public_value .to_vec::<E>() .into_iter() .map(|v| Arc::new(v.into_mle())) .collect_vec(); let pub_io_evals = public_value .to_vec::<E>() .into_iter() .map(|v| Either::Right(E::from(*v.index(0)))) .collect_vec(); let proof_input = ProofInput { witness: witness[0].to_mles().into_iter().map(Arc::new).collect(), structural_witness: witness[1].to_mles().into_iter().map(Arc::new).collect(), fixed: vec![], public_input: public_input_mles.clone(), pub_io_evals, num_instances: vec![n_global_writes as usize, n_global_reads as usize], has_ecc_ops: true, }; let mut rng = thread_rng(); let challenges = [E::random(&mut rng), E::random(&mut rng)]; let (proof, _, point) = zkvm_prover .create_chip_proof( ShardRamCircuit::<E>::name().as_str(), &pk, proof_input, &mut transcript, &challenges, ) .unwrap(); let mut transcript = BasicTranscript::new(b"global chip test"); let verifier = ZKVMVerifier::new(zkvm_vk); let pi_evals = public_input_mles .iter() .map(|mle| mle.evaluate(&point[..mle.num_vars()])) .collect_vec(); let (vrf_point, _) = verifier .verify_chip_proof( "global", &pk.vk, &proof, &pi_evals, &public_value.to_vec::<E>(), &mut transcript, 2, &PointAndEval::default(), &challenges, ) .expect("verify global chip proof"); assert_eq!(vrf_point, point); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/range/range_circuit.rs
ceno_zkvm/src/tables/range/range_circuit.rs
//! Range tables as circuits with trait TableCircuit. use std::{collections::HashMap, marker::PhantomData}; use crate::{ circuit_builder::CircuitBuilder, error::ZKVMError, structs::{ProgramParams, ROMType}, tables::{ RMMCollections, TableCircuit, range::range_impl::{DoubleRangeTableConfig, DynamicRangeTableConfig}, }, }; use ff_ext::ExtensionField; use gkr_iop::{ chip::Chip, gkr::{GKRCircuit, layer::Layer}, selector::SelectorType, tables::LookupTable, }; use itertools::Itertools; use multilinear_extensions::ToExpr; use witness::{InstancePaddingStrategy, RowMajorMatrix}; /// Use this trait as parameter to RangeTableCircuit. pub trait RangeTable { const ROM_TYPE: ROMType; fn len() -> usize; fn content() -> Vec<u64> { (0..Self::len() as u64).collect() } } pub struct DynamicRangeTableCircuit<E, const MAX_BITS: usize>(PhantomData<E>); impl<E: ExtensionField, const MAX_BITS: usize> TableCircuit<E> for DynamicRangeTableCircuit<E, MAX_BITS> { type TableConfig = DynamicRangeTableConfig; type FixedInput = (); type WitnessInput<'a> = (); fn name() -> String { format!("DYNAMIC_RANGE_{}", MAX_BITS) } fn construct_circuit( cb: &mut CircuitBuilder<E>, _params: &ProgramParams, ) -> Result<DynamicRangeTableConfig, ZKVMError> { Ok(cb.namespace( || Self::name(), |cb| DynamicRangeTableConfig::construct_circuit(cb, MAX_BITS), )?) } fn generate_fixed_traces( _config: &DynamicRangeTableConfig, _num_fixed: usize, _input: &(), ) -> RowMajorMatrix<E::BaseField> { RowMajorMatrix::<E::BaseField>::new(0, 0, InstancePaddingStrategy::Default) } fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, multiplicity: &[HashMap<u64, usize>], _input: &(), ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { let multiplicity = &multiplicity[LookupTable::Dynamic as usize]; Ok(config.assign_instances(num_witin, num_structural_witin, multiplicity, MAX_BITS)?) } } pub struct DoubleRangeTableCircuit<E, const MAX_BITS_1: usize, const MAX_BITS_2: usize, R>( PhantomData<(E, R)>, ); impl<E: ExtensionField, const MAX_BITS_1: usize, const MAX_BITS_2: usize, R: RangeTable> TableCircuit<E> for DoubleRangeTableCircuit<E, MAX_BITS_1, MAX_BITS_2, R> { type TableConfig = DoubleRangeTableConfig; type FixedInput = (); type WitnessInput<'a> = (); fn name() -> String { format!("DOUBLE_RANGE_{:?}", R::ROM_TYPE) } fn construct_circuit( cb: &mut CircuitBuilder<E>, _params: &ProgramParams, ) -> Result<DoubleRangeTableConfig, ZKVMError> { assert_eq!(MAX_BITS_1 + MAX_BITS_2, R::len().ilog2() as usize); Ok(cb.namespace( || Self::name(), |cb| DoubleRangeTableConfig::construct_circuit(cb, R::ROM_TYPE, MAX_BITS_1, MAX_BITS_2), )?) } fn build_gkr_iop_circuit( cb: &mut CircuitBuilder<E>, param: &ProgramParams, ) -> Result<(Self::TableConfig, Option<GKRCircuit<E>>), ZKVMError> { let config = Self::construct_circuit(cb, param)?; let lk_table_len = cb.cs.lk_table_expressions.len() * 2; let selector = cb.create_placeholder_structural_witin(|| "selector"); let selector_type = SelectorType::Whole(selector.expr()); // all shared the same selector let (out_evals, mut chip) = ( [ // r_record vec![], // w_record vec![], // lk_record (0..lk_table_len).collect_vec(), // zero_record vec![], ], Chip::new_from_cb(cb, 0), ); // register selector to legacy constrain system cb.cs.lk_selector = Some(selector_type.clone()); let layer = Layer::from_circuit_builder(cb, Self::name(), 0, out_evals); chip.add_layer(layer); Ok((config, Some(chip.gkr_circuit()))) } fn generate_fixed_traces( _config: &DoubleRangeTableConfig, _num_fixed: usize, _input: &(), ) -> RowMajorMatrix<E::BaseField> { RowMajorMatrix::<E::BaseField>::new(0, 0, InstancePaddingStrategy::Default) } fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, multiplicity: &[HashMap<u64, usize>], _input: &(), ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { let multiplicity = &multiplicity[R::ROM_TYPE as usize]; Ok(config.assign_instances(num_witin, num_structural_witin, multiplicity)?) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/range/range_impl.rs
ceno_zkvm/src/tables/range/range_impl.rs
//! The implementation of range tables. No generics. use ff_ext::{ExtensionField, SmallField}; use gkr_iop::{error::CircuitBuilderError, tables::LookupTable}; use rayon::iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator}; use std::collections::HashMap; use witness::{InstancePaddingStrategy, RowMajorMatrix, set_val}; use crate::{ circuit_builder::{CircuitBuilder, SetTableSpec}, structs::ROMType, }; use multilinear_extensions::{StructuralWitIn, StructuralWitInType, ToExpr, WitIn}; #[derive(Clone, Debug)] pub struct DynamicRangeTableConfig { range: StructuralWitIn, bits: StructuralWitIn, mlt: WitIn, } impl DynamicRangeTableConfig { pub fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, max_bits: usize, ) -> Result<Self, CircuitBuilderError> { let range = cb.create_structural_witin( || "structural range witin", StructuralWitInType::StackedIncrementalSequence { max_bits }, ); let bits = cb.create_structural_witin( || "structural bits witin", StructuralWitInType::StackedConstantSequence { max_value: max_bits, }, ); let mlt = cb.create_witin(|| "mlt"); let record_exprs = vec![range.expr(), bits.expr()]; cb.lk_table_record( || "record", SetTableSpec { len: Some(1 << (max_bits + 1)), structural_witins: vec![range, bits], }, LookupTable::Dynamic, record_exprs, mlt.expr(), )?; Ok(Self { range, bits, mlt }) } pub fn assign_instances<F: SmallField>( &self, num_witin: usize, num_structural_witin: usize, multiplicity: &HashMap<u64, usize>, max_bits: usize, ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError> { let length = 1 << (max_bits + 1); let mut witness: RowMajorMatrix<F> = RowMajorMatrix::<F>::new(length, num_witin, InstancePaddingStrategy::Default); let mut structural_witness = RowMajorMatrix::<F>::new( length, num_structural_witin, InstancePaddingStrategy::Default, ); let mut mlts = vec![0; length]; for (idx, mlt) in multiplicity { mlts[*idx as usize] = *mlt; } let range_content = std::iter::once(F::ZERO) .chain((0..=max_bits).flat_map(|i| (0..(1 << i)).map(|j| F::from_canonical_usize(j)))) .collect::<Vec<_>>(); let bits_content = std::iter::once(F::ZERO) .chain((0..=max_bits).flat_map(|i| { std::iter::repeat_n(i, 1 << i).map(|j| F::from_canonical_usize(j)) })) .collect::<Vec<_>>(); witness .par_rows_mut() .zip(structural_witness.par_rows_mut()) .zip(mlts.par_iter()) .zip(range_content.par_iter()) .zip(bits_content.par_iter()) .for_each(|((((row, structural_row), mlt), i), b)| { set_val!(row, self.mlt, F::from_canonical_u64(*mlt as u64)); set_val!(structural_row, self.range, i); set_val!(structural_row, self.bits, b); *structural_row.last_mut().unwrap() = F::ONE; }); Ok([witness, structural_witness]) } } #[derive(Clone, Debug)] pub struct DoubleRangeTableConfig { range_a: StructuralWitIn, range_a_bits: usize, range_b: StructuralWitIn, range_b_bits: usize, mlt: WitIn, } impl DoubleRangeTableConfig { pub fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, rom_type: ROMType, range_a_bits: usize, range_b_bits: usize, ) -> Result<Self, CircuitBuilderError> { let range_a = cb.create_structural_witin( || "structural range witin a", StructuralWitInType::InnerRepeatingIncrementalSequence { k: range_a_bits, n: range_a_bits + range_b_bits, }, ); let range_b = cb.create_structural_witin( || "structural range witin b", StructuralWitInType::OuterRepeatingIncrementalSequence { k: range_a_bits, n: range_a_bits + range_b_bits, }, ); let mlt = cb.create_witin(|| "mlt"); let record_exprs = vec![range_a.expr(), range_b.expr()]; cb.lk_table_record( || "record", SetTableSpec { len: Some(1 << (range_a_bits + range_b_bits)), structural_witins: vec![range_a, range_b], }, rom_type, record_exprs, mlt.expr(), )?; Ok(Self { range_a, range_a_bits, range_b, range_b_bits, mlt, }) } pub fn assign_instances<F: SmallField>( &self, num_witin: usize, num_structural_witin: usize, multiplicity: &HashMap<u64, usize>, ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError> { let length = 1 << (self.range_a_bits + self.range_b_bits); let mut witness: RowMajorMatrix<F> = RowMajorMatrix::<F>::new(length, num_witin, InstancePaddingStrategy::Default); let mut structural_witness = RowMajorMatrix::<F>::new( length, num_structural_witin, InstancePaddingStrategy::Default, ); let mut mlts = vec![0; length]; for (idx, mlt) in multiplicity { mlts[*idx as usize] = *mlt; } witness .par_rows_mut() .zip(structural_witness.par_rows_mut()) .zip(mlts.par_iter().enumerate()) .for_each(|((row, structural_row), (idx, mlt))| { let a = idx >> self.range_a_bits; let b = idx & ((1 << self.range_a_bits) - 1); set_val!(row, self.mlt, F::from_canonical_u64(*mlt as u64)); set_val!(structural_row, self.range_a, F::from_canonical_usize(a)); set_val!(structural_row, self.range_b, F::from_canonical_usize(b)); *structural_row.last_mut().unwrap() = F::ONE; }); Ok([witness, structural_witness]) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/ram/ram_impl.rs
ceno_zkvm/src/tables/ram/ram_impl.rs
use ceno_emul::Addr; use ff_ext::{ExtensionField, SmallField}; use gkr_iop::error::CircuitBuilderError; use itertools::Itertools; use rayon::iter::{ IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, IntoParallelRefMutIterator, ParallelExtend, ParallelIterator, }; use std::{marker::PhantomData, ops::Range}; use witness::{InstancePaddingStrategy, RowMajorMatrix, set_fixed_val, set_val}; use super::{ MemInitRecord, ram_circuit::{DynVolatileRamTable, MemFinalRecord, NonVolatileTable}, }; use crate::{ chip_handler::general::PublicValuesQuery, circuit_builder::{CircuitBuilder, SetTableSpec}, e2e::ShardContext, instructions::riscv::constants::{LIMB_BITS, LIMB_MASK}, scheme::PublicValues, structs::ProgramParams, tables::ram::ram_circuit::DynVolatileRamTableConfigTrait, }; use ff_ext::FieldInto; use multilinear_extensions::{Expression, Fixed, StructuralWitIn, ToExpr, WitIn}; pub trait NonVolatileTableConfigTrait<NVRAM>: Sized + Send + Sync { type Config: Sized + Send + Sync; fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self::Config, CircuitBuilderError>; fn gen_init_state<F: SmallField>( config: &Self::Config, num_fixed: usize, init_mem: &[MemInitRecord], ) -> RowMajorMatrix<F>; fn assign_instances<F: SmallField>( config: &Self::Config, num_witin: usize, num_structural_witin: usize, final_mem: &[MemFinalRecord], ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError>; } /// define a non-volatile memory with init value #[derive(Clone, Debug)] pub struct NonVolatileInitTableConfig<NVRAM: NonVolatileTable + Send + Sync + Clone> { init_v: Vec<Fixed>, addr: Fixed, phantom: PhantomData<NVRAM>, params: ProgramParams, } impl<NVRAM: NonVolatileTable + Send + Sync + Clone> NonVolatileTableConfigTrait<NVRAM> for NonVolatileInitTableConfig<NVRAM> { type Config = NonVolatileInitTableConfig<NVRAM>; fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self, CircuitBuilderError> { cb.set_omc_init_only(); assert!(NVRAM::WRITABLE); let init_v = (0..NVRAM::V_LIMBS) .map(|i| cb.create_fixed(|| format!("init_v_limb_{i}"))) .collect_vec(); let addr = cb.create_fixed(|| "addr"); let init_table = [ vec![(NVRAM::RAM_TYPE as usize).into()], vec![Expression::Fixed(addr)], init_v.iter().map(|v| v.expr()).collect_vec(), vec![Expression::ZERO], // Initial cycle. ] .concat(); cb.w_table_record( || "init_table", NVRAM::RAM_TYPE, SetTableSpec { len: Some(NVRAM::len(params)), structural_witins: vec![], }, init_table, )?; Ok(Self { init_v, addr, phantom: PhantomData, params: params.clone(), }) } fn gen_init_state<F: SmallField>( config: &Self::Config, num_fixed: usize, init_mem: &[MemInitRecord], ) -> RowMajorMatrix<F> { assert!( NVRAM::len(&config.params).is_power_of_two(), "{} len {} must be a power of 2", NVRAM::name(), NVRAM::len(&config.params) ); let mut init_table = RowMajorMatrix::<F>::new( NVRAM::len(&config.params), num_fixed, InstancePaddingStrategy::Default, ); assert_eq!(init_table.num_padding_instances(), 0); init_table .par_rows_mut() .zip_eq(init_mem) .for_each(|(row, rec)| { if config.init_v.len() == 1 { // Assign value directly. set_fixed_val!(row, config.init_v[0], (rec.value as u64).into_f()); } else { // Assign value limbs. config.init_v.iter().enumerate().for_each(|(l, limb)| { let val = (rec.value >> (l * LIMB_BITS)) & LIMB_MASK; set_fixed_val!(row, limb, (val as u64).into_f()); }); } set_fixed_val!(row, config.addr, (rec.addr as u64).into_f()); }); init_table } /// TODO consider taking RowMajorMatrix as argument to save allocations. fn assign_instances<F: SmallField>( config: &Self::Config, _num_witin: usize, num_structural_witin: usize, final_mem: &[MemFinalRecord], ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError> { if final_mem.is_empty() { return Ok([RowMajorMatrix::empty(), RowMajorMatrix::empty()]); } assert!(num_structural_witin == 0 || num_structural_witin == 1); let mut value = Vec::with_capacity(NVRAM::len(&config.params)); value.par_extend( (0..NVRAM::len(&config.params)) .into_par_iter() .map(|_| F::ONE), ); let structural_witness = RowMajorMatrix::<F>::new_by_values(value, 1, InstancePaddingStrategy::Default); Ok([RowMajorMatrix::empty(), structural_witness]) } } /// define public io /// init value set by instance #[derive(Clone, Debug)] pub struct PubIOTableInitConfig<NVRAM: NonVolatileTable + Send + Sync + Clone> { addr: Fixed, phantom: PhantomData<NVRAM>, params: ProgramParams, } impl<NVRAM: NonVolatileTable + Send + Sync + Clone> PubIOTableInitConfig<NVRAM> { pub fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self, CircuitBuilderError> { assert!(!NVRAM::WRITABLE); let init_v = cb.query_public_io()?; let addr = cb.create_fixed(|| "addr"); let init_table = [ vec![(NVRAM::RAM_TYPE as usize).into()], vec![Expression::Fixed(addr)], init_v.iter().map(|v| v.expr_as_instance()).collect_vec(), vec![Expression::ZERO], // Initial cycle. ] .concat(); cb.w_table_record( || "init_table", NVRAM::RAM_TYPE, SetTableSpec { len: Some(NVRAM::len(params)), structural_witins: vec![], }, init_table, )?; Ok(Self { addr, phantom: PhantomData, params: params.clone(), }) } /// assign to fixed address pub fn gen_init_state<F: SmallField>( &self, num_fixed: usize, io_addrs: &[Addr], ) -> RowMajorMatrix<F> { assert!(NVRAM::len(&self.params).is_power_of_two()); let mut init_table = RowMajorMatrix::<F>::new( NVRAM::len(&self.params), num_fixed, InstancePaddingStrategy::Default, ); assert_eq!(init_table.num_padding_instances(), 0); init_table .par_rows_mut() .zip_eq(io_addrs) .for_each(|(row, addr)| { set_fixed_val!(row, self.addr, (*addr as u64).into_f()); }); init_table } /// TODO consider taking RowMajorMatrix as argument to save allocations. pub fn assign_instances<F: SmallField>( &self, _num_witin: usize, num_structural_witin: usize, final_mem: &[MemFinalRecord], ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError> { if final_mem.is_empty() { return Ok([RowMajorMatrix::empty(), RowMajorMatrix::empty()]); } assert!(num_structural_witin == 0 || num_structural_witin == 1); let mut value = Vec::with_capacity(NVRAM::len(&self.params)); value.par_extend( (0..NVRAM::len(&self.params)) .into_par_iter() .map(|_| F::ONE), ); let structural_witness = RowMajorMatrix::<F>::new_by_values(value, 1, InstancePaddingStrategy::Default); Ok([RowMajorMatrix::empty(), structural_witness]) } } /// volatile with all init value as 0 /// dynamic address as witin, relied on augment of knowledge to prove address form #[derive(Clone, Debug)] pub struct DynVolatileRamTableInitConfig<DVRAM: DynVolatileRamTable + Send + Sync + Clone> { addr: StructuralWitIn, init_v: Option<Vec<WitIn>>, phantom: PhantomData<DVRAM>, params: ProgramParams, } impl<DVRAM: DynVolatileRamTable + Send + Sync + Clone> DynVolatileRamTableInitConfig<DVRAM> { fn find_record_index(final_mem: &[MemFinalRecord], addr: Addr) -> Option<usize> { final_mem.binary_search_by_key(&addr, |rec| rec.addr).ok() } fn assign_instances<F: SmallField>( config: &Self, num_witin: usize, num_structural_witin: usize, (final_mem, _pv, _num_instances): &(&[MemFinalRecord], &PublicValues, usize), ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError> { if final_mem.is_empty() { return Ok([RowMajorMatrix::empty(), RowMajorMatrix::empty()]); } assert_eq!(num_structural_witin, 2); let num_instances = final_mem.len(); assert!(num_instances <= DVRAM::max_len(&config.params)); assert!(DVRAM::max_len(&config.params).is_power_of_two()); // got some duplicated code segment to simplify parallel assignment flow if let Some(init_v) = config.init_v.as_ref() { let mut witness = RowMajorMatrix::<F>::new( num_instances, num_witin, InstancePaddingStrategy::Default, ); let mut structural_witness = RowMajorMatrix::<F>::new( num_instances, num_structural_witin, InstancePaddingStrategy::Default, ); witness .par_rows_mut() .zip_eq(structural_witness.par_rows_mut()) .enumerate() .for_each(|(i, (row, structural_row))| { if cfg!(debug_assertions) && let Some(addr) = final_mem.get(i).map(|rec| rec.addr) { debug_assert_eq!( addr, DVRAM::addr(&config.params, i), "rec.addr {:x} != expected {:x}", addr, DVRAM::addr(&config.params, i), ); } if let Some(rec) = final_mem.get(i) { if init_v.len() == 1 { // Assign value directly. set_val!(row, init_v[0], rec.init_value as u64); } else { // Assign value limbs. init_v.iter().enumerate().for_each(|(l, limb)| { let val = (rec.init_value >> (l * LIMB_BITS)) & LIMB_MASK; set_val!(row, limb, val as u64); }); } } set_val!( structural_row, config.addr, DVRAM::addr(&config.params, i) as u64 ); if i < num_instances { *structural_row.last_mut().unwrap() = F::ONE; } }); Ok([witness, structural_witness]) } else { let mut structural_witness = RowMajorMatrix::<F>::new( num_instances, num_structural_witin, InstancePaddingStrategy::Default, ); structural_witness .par_rows_mut() .enumerate() .for_each(|(i, structural_row)| { if cfg!(debug_assertions) && let Some(addr) = final_mem.get(i).map(|rec| rec.addr) { debug_assert_eq!( addr, DVRAM::addr(&config.params, i), "rec.addr {:x} != expected {:x}", addr, DVRAM::addr(&config.params, i), ); } set_val!( structural_row, config.addr, DVRAM::addr(&config.params, i) as u64 ); if i < num_instances { *structural_row.last_mut().unwrap() = F::ONE; } }); Ok([RowMajorMatrix::empty(), structural_witness]) } } /// support offset address and length from public values fn assign_instances_dynamic<F: SmallField>( config: &Self, num_witin: usize, num_structural_witin: usize, (final_mem, pv, num_instances): &(&[MemFinalRecord], &PublicValues, usize), ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError> { // got some duplicated code segment to simplify parallel assignment flow let start_addr = DVRAM::dynamic_addr(&config.params, 0, pv); let start_index = Self::find_record_index(final_mem, start_addr); let fetch_rec = |entry_index: usize| -> (Addr, Option<&MemFinalRecord>) { let expected_addr = DVRAM::dynamic_addr(&config.params, entry_index, pv); let rec = start_index .and_then(|start| start.checked_add(entry_index)) .and_then(|idx| final_mem.get(idx)); (expected_addr, rec) }; if let Some(init_v) = config.init_v.as_ref() { let mut witness = RowMajorMatrix::<F>::new( *num_instances, num_witin, InstancePaddingStrategy::Default, ); let mut structural_witness = RowMajorMatrix::<F>::new( *num_instances, num_structural_witin, InstancePaddingStrategy::Default, ); witness .par_rows_mut() .zip_eq(structural_witness.par_rows_mut()) .enumerate() .for_each(|(i, (row, structural_row))| { let (expected_addr, rec_opt) = fetch_rec(i); if cfg!(debug_assertions) && let Some(rec) = rec_opt { debug_assert_eq!( rec.addr, expected_addr, "rec.addr {:x} != expected {:x}", rec.addr, expected_addr, ); } if let Some(rec) = rec_opt { if init_v.len() == 1 { // Assign value directly. set_val!(row, init_v[0], rec.init_value as u64); } else { // Assign value limbs. init_v.iter().enumerate().for_each(|(l, limb)| { let val = (rec.init_value >> (l * LIMB_BITS)) & LIMB_MASK; set_val!(row, limb, val as u64); }); } } set_val!(structural_row, config.addr, expected_addr as u64); if i < *num_instances { *structural_row.last_mut().unwrap() = F::ONE; } }); Ok([witness, structural_witness]) } else { let mut structural_witness = RowMajorMatrix::<F>::new( *num_instances, num_structural_witin, InstancePaddingStrategy::Default, ); structural_witness .par_rows_mut() .enumerate() .for_each(|(i, structural_row)| { let (expected_addr, rec_opt) = fetch_rec(i); if cfg!(debug_assertions) && let Some(rec) = rec_opt { debug_assert_eq!( rec.addr, expected_addr, "rec.addr {:x} != expected {:x}", rec.addr, expected_addr, ); } set_val!(structural_row, config.addr, expected_addr as u64); if i < *num_instances { *structural_row.last_mut().unwrap() = F::ONE; } }); Ok([RowMajorMatrix::empty(), structural_witness]) } } } impl<DVRAM: DynVolatileRamTable + Send + Sync + Clone> DynVolatileRamTableConfigTrait<DVRAM> for DynVolatileRamTableInitConfig<DVRAM> { type Config = DynVolatileRamTableInitConfig<DVRAM>; type WitnessInput<'a> = (&'a [MemFinalRecord], &'a PublicValues, usize); fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self, CircuitBuilderError> { if !DVRAM::DYNAMIC_OFFSET { cb.set_omc_init_only(); } let (addr_expr, addr) = DVRAM::addr_expr(cb, params)?; let (init_expr, init_v) = if DVRAM::ZERO_INIT { (vec![Expression::ZERO; DVRAM::V_LIMBS], None) } else { let init_v = (0..DVRAM::V_LIMBS) .map(|i| cb.create_witin(|| format!("init_v_limb_{i}"))) .collect::<Vec<WitIn>>(); (init_v.iter().map(|v| v.expr()).collect_vec(), Some(init_v)) }; let init_table = [ vec![(DVRAM::RAM_TYPE as usize).into()], vec![addr_expr.expr()], init_expr, vec![Expression::ZERO], // Initial cycle. ] .concat(); cb.w_table_record( || "init_table", DVRAM::RAM_TYPE, SetTableSpec { len: None, structural_witins: vec![addr], }, init_table, )?; Ok(Self { addr, init_v, phantom: PhantomData, params: params.clone(), }) } /// TODO consider taking RowMajorMatrix as argument to save allocations. fn assign_instances<'a, F: SmallField>( config: &Self::Config, num_witin: usize, num_structural_witin: usize, data: &(&[MemFinalRecord], &PublicValues, usize), ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError> { let (final_mem, _, _) = &data; if final_mem.is_empty() { return Ok([RowMajorMatrix::empty(), RowMajorMatrix::empty()]); } assert_eq!(num_structural_witin, 2); if DVRAM::DYNAMIC_OFFSET { Self::assign_instances_dynamic(config, num_witin, num_structural_witin, data) } else { Self::assign_instances(config, num_witin, num_structural_witin, data) } } } /// This table is generalized version to handle all mmio records #[derive(Clone, Debug)] pub struct LocalFinalRAMTableConfig<const V_LIMBS: usize> { addr_subset: WitIn, ram_type: WitIn, final_v: Vec<WitIn>, final_cycle: WitIn, } impl<const V_LIMBS: usize> LocalFinalRAMTableConfig<V_LIMBS> { pub fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, _params: &ProgramParams, ) -> Result<Self, CircuitBuilderError> { let addr_subset = cb.create_witin(|| "addr_subset"); let ram_type = cb.create_witin(|| "ram_type"); let final_v = (0..V_LIMBS) .map(|i| cb.create_witin(|| format!("final_v_limb_{i}"))) .collect::<Vec<WitIn>>(); let final_cycle = cb.create_witin(|| "final_cycle"); let final_expr = final_v.iter().map(|v| v.expr()).collect_vec(); let raw_final_table = [ // a v t vec![ram_type.expr()], vec![addr_subset.expr()], final_expr, vec![final_cycle.expr()], ] .concat(); let rlc_record = cb.rlc_chip_record(raw_final_table.clone()); cb.r_table_rlc_record( || "final_table", // XXX we mixed all ram type here to save column allocation ram_type.expr(), SetTableSpec { len: None, structural_witins: vec![], }, raw_final_table, rlc_record, )?; Ok(Self { addr_subset, ram_type, final_v, final_cycle, }) } /// TODO consider taking RowMajorMatrix as argument to save allocations. #[allow(clippy::type_complexity)] pub fn assign_instances<F: SmallField>( &self, shard_ctx: &ShardContext, num_witin: usize, num_structural_witin: usize, final_mem: &[(&'static str, Option<Range<Addr>>, &[MemFinalRecord])], ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError> { assert!(num_structural_witin == 0 || num_structural_witin == 1); let num_structural_witin = num_structural_witin.max(1); let is_current_shard_mem_record = |range: Option<&Range<Addr>>, record: &&MemFinalRecord| -> bool { shard_ctx.is_in_current_shard(record.cycle) || if let Some(range) = range { range.contains(&record.addr) && record.cycle == 0 } else { shard_ctx.is_first_shard() && record.cycle == 0 } }; // collect each raw mem belong to this shard, BEFORE padding length let mem_lens: Vec<usize> = final_mem .par_iter() .map(|(_, range, mem)| { mem.par_iter() .filter(|mem| is_current_shard_mem_record(range.as_ref(), mem)) .count() }) .collect(); // calculate mem length let total_records = mem_lens.iter().sum(); let mut witness = RowMajorMatrix::<F>::new(total_records, num_witin, InstancePaddingStrategy::Default); let mut structural_witness = RowMajorMatrix::<F>::new( total_records, num_structural_witin, InstancePaddingStrategy::Default, ); let mut witness_mut_slices = Vec::with_capacity(final_mem.len()); let mut structural_witness_mut_slices = Vec::with_capacity(final_mem.len()); let mut witness_value_rest = witness.values.as_mut_slice(); let mut structural_witness_value_rest = structural_witness.values.as_mut_slice(); for mem_len in mem_lens { let witness_length = mem_len * num_witin; let structural_witness_length = mem_len * num_structural_witin; assert!( witness_length <= witness_value_rest.len(), "chunk size exceeds remaining data" ); assert!( structural_witness_length <= structural_witness_value_rest.len(), "chunk size exceeds remaining data" ); let (witness_left, witness_r) = witness_value_rest.split_at_mut(witness_length); let (structural_witness_left, structural_witness_r) = structural_witness_value_rest.split_at_mut(structural_witness_length); witness_mut_slices.push(witness_left); structural_witness_mut_slices.push(structural_witness_left); witness_value_rest = witness_r; structural_witness_value_rest = structural_witness_r; } witness_mut_slices .par_iter_mut() .zip_eq(structural_witness_mut_slices.par_iter_mut()) .zip_eq(final_mem.par_iter()) .for_each(|((witness, structural_witness), (_, range, final_mem))| { witness .chunks_mut(num_witin) .zip_eq(structural_witness.chunks_mut(num_structural_witin)) .zip( final_mem .iter() .filter(|record| is_current_shard_mem_record(range.as_ref(), record)), ) .for_each(|((row, structural_row), rec)| { if self.final_v.len() == 1 { // Assign value directly. set_val!(row, self.final_v[0], rec.value as u64); } else { // Assign value limbs. self.final_v.iter().enumerate().for_each(|(l, limb)| { let val = (rec.value >> (l * LIMB_BITS)) & LIMB_MASK; set_val!(row, limb, val as u64); }); } let shard_cycle = shard_ctx.aligned_current_ts(rec.cycle); set_val!(row, self.final_cycle, shard_cycle); set_val!(row, self.ram_type, rec.ram_type as u64); set_val!(row, self.addr_subset, rec.addr as u64); *structural_row.last_mut().unwrap() = F::ONE; }); }); Ok([witness, structural_witness]) } } #[cfg(test)] mod tests { use std::iter::successors; use crate::{ circuit_builder::{CircuitBuilder, ConstraintSystem}, structs::ProgramParams, tables::{DynVolatileRamTable, HintsInitCircuit, HintsTable, MemFinalRecord, TableCircuit}, witness::LkMultiplicity, }; use crate::scheme::PublicValues; use ceno_emul::{CENO_PLATFORM, WORD_SIZE}; use ff_ext::GoldilocksExt2 as E; use gkr_iop::RAMType; use itertools::Itertools; use multilinear_extensions::mle::MultilinearExtension; use p3::{field::FieldAlgebra, goldilocks::Goldilocks as F}; use witness::next_pow2_instance_padding; #[test] fn test_well_formed_address_padding() { let mut cs = ConstraintSystem::<E>::new(|| "riscv"); let mut cb = CircuitBuilder::new(&mut cs); let (config, _) = HintsInitCircuit::build_gkr_iop_circuit(&mut cb, &ProgramParams::default()).unwrap(); let def_params = ProgramParams::default(); let lkm = LkMultiplicity::default().into_finalize_result(); let pv = PublicValues { hint_start_addr: CENO_PLATFORM.hints.start, ..Default::default() }; // ensure non-empty padding is required let some_non_2_pow = 26; let input = (0..some_non_2_pow) .map(|i| MemFinalRecord { ram_type: RAMType::Memory, addr: HintsTable::dynamic_addr(&def_params, i, &pv), cycle: 0, value: 0, init_value: 0, }) .collect_vec(); let [_, mut structural_witness] = HintsInitCircuit::<E>::assign_instances( &config, cb.cs.num_witin as usize, cb.cs.num_structural_witin as usize, &lkm.0, &(&input, &pv, 0), ) .unwrap(); let addr_column = cb .cs .structural_witin_namespace_map .iter() .position(|name| name == "riscv/HintsTable_Memory_RAM/addr") .unwrap(); structural_witness.padding_by_strategy(); let addr_padded_view: MultilinearExtension<E> = structural_witness.to_mles()[addr_column].clone(); // Expect addresses to proceed consecutively inside the padding as well let expected = successors(Some(addr_padded_view.get_base_field_vec()[0]), |idx| { Some(*idx + F::from_canonical_u64(WORD_SIZE as u64)) }) .take(next_pow2_instance_padding( structural_witness.num_instances(), )) .collect::<Vec<_>>(); assert_eq!(addr_padded_view.get_base_field_vec(), expected) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/ram/ram_circuit.rs
ceno_zkvm/src/tables/ram/ram_circuit.rs
use super::ram_impl::{ LocalFinalRAMTableConfig, NonVolatileTableConfigTrait, PubIOTableInitConfig, }; use crate::{ circuit_builder::CircuitBuilder, e2e::ShardContext, error::ZKVMError, scheme::PublicValues, structs::{ProgramParams, RAMType}, tables::{RMMCollections, TableCircuit}, }; use ceno_emul::{Addr, Cycle, GetAddr, WORD_SIZE, Word}; use ff_ext::{ExtensionField, SmallField}; use gkr_iop::{ chip::Chip, error::CircuitBuilderError, gkr::{GKRCircuit, layer::Layer}, selector::SelectorType, }; use itertools::Itertools; use multilinear_extensions::{Expression, StructuralWitIn, StructuralWitInType, ToExpr}; use std::{collections::HashMap, marker::PhantomData, ops::Range}; use witness::{InstancePaddingStrategy, RowMajorMatrix}; #[derive(Clone, Debug)] pub struct MemInitRecord { pub addr: Addr, pub value: Word, } #[derive(Clone, Debug)] pub struct MemFinalRecord { pub ram_type: RAMType, pub addr: Addr, pub cycle: Cycle, pub value: Word, // initial state value // same as `value` for read-only table // probably different for rw table pub init_value: Word, } impl GetAddr for MemInitRecord { fn get_addr(&self) -> Addr { self.addr } } impl GetAddr for MemFinalRecord { fn get_addr(&self) -> Addr { self.addr } } /// - **Non-Volatile**: The initial values can be set to any arbitrary value. /// /// **Special Note**: /// Setting `WRITABLE = false` does not strictly enforce immutability in this protocol. /// it only guarantees that the initial and final values remain invariant, /// allowing for temporary modifications within the lifecycle. pub trait NonVolatileTable { const RAM_TYPE: RAMType; const V_LIMBS: usize; const WRITABLE: bool; fn name() -> &'static str; /// Maximum number of words in the table. fn len(params: &ProgramParams) -> usize; } /// NonVolatileRamCircuit initializes and finalizes memory /// - at fixed addresses, /// - with fixed initial content, /// - with witnessed final content that the program wrote, if WRITABLE, /// - or final content equal to initial content, if not WRITABLE. pub struct NonVolatileRamCircuit<E, R, C>(PhantomData<(E, R, C)>); impl< E: ExtensionField, NVRAM: NonVolatileTable + Send + Sync + Clone, C: NonVolatileTableConfigTrait<NVRAM>, > TableCircuit<E> for NonVolatileRamCircuit<E, NVRAM, C> { type TableConfig = C::Config; type FixedInput = [MemInitRecord]; type WitnessInput<'a> = [MemFinalRecord]; fn name() -> String { format!("RAM_{:?}_{}", NVRAM::RAM_TYPE, NVRAM::name()) } fn construct_circuit( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self::TableConfig, ZKVMError> { Ok(cb.namespace(|| Self::name(), |cb| C::construct_circuit(cb, params))?) } fn generate_fixed_traces( config: &Self::TableConfig, num_fixed: usize, init_v: &Self::FixedInput, ) -> RowMajorMatrix<E::BaseField> { // assume returned table is well-formed include padding C::gen_init_state(config, num_fixed, init_v) } fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, _multiplicity: &[HashMap<u64, usize>], final_v: &Self::WitnessInput<'_>, ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { // assume returned table is well-formed include padding Ok(C::assign_instances( config, num_witin, num_structural_witin, final_v, )?) } } /// PubIORamCircuit initializes and finalizes memory /// - at fixed addresses, /// - with content from the public input of proofs. /// /// This circuit does not and cannot decide whether the memory is mutable or not. /// It supports LOAD where the program reads the public input, /// or STORE where the memory content must equal the public input after execution. pub struct PubIORamInitCircuit<E, R>(PhantomData<(E, R)>); impl<E: ExtensionField, NVRAM: NonVolatileTable + Send + Sync + Clone> TableCircuit<E> for PubIORamInitCircuit<E, NVRAM> { type TableConfig = PubIOTableInitConfig<NVRAM>; type FixedInput = [Addr]; type WitnessInput<'a> = [MemFinalRecord]; fn name() -> String { format!("RAM_{:?}_{}", NVRAM::RAM_TYPE, NVRAM::name()) } fn construct_circuit( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self::TableConfig, ZKVMError> { cb.set_omc_init_only(); Ok(cb.namespace( || Self::name(), |cb| Self::TableConfig::construct_circuit(cb, params), )?) } fn generate_fixed_traces( config: &Self::TableConfig, num_fixed: usize, io_addrs: &[Addr], ) -> RowMajorMatrix<E::BaseField> { // assume returned table is well-formed including padding config.gen_init_state(num_fixed, io_addrs) } fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, _multiplicity: &[HashMap<u64, usize>], final_mem: &[MemFinalRecord], ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { // assume returned table is well-formed including padding Ok(config.assign_instances(num_witin, num_structural_witin, final_mem)?) } } /// - **Dynamic**: The address space is bounded within a specific range, /// though the range itself may be dynamically determined per proof. /// - **Volatile**: The initial values are set to `0` pub trait DynVolatileRamTable { const RAM_TYPE: RAMType; const V_LIMBS: usize; const ZERO_INIT: bool; const DESCENDING: bool; const DYNAMIC_OFFSET: bool = false; fn addr_expr<E: ExtensionField>( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<(Expression<E>, StructuralWitIn), CircuitBuilderError> { let max_len = Self::max_len(params); let addr = cb.create_structural_witin( || "addr", StructuralWitInType::EqualDistanceSequence { max_len, offset: Self::offset_addr(params), multi_factor: WORD_SIZE, descending: Self::DESCENDING, }, ); Ok((addr.expr(), addr)) } fn offset_addr(params: &ProgramParams) -> Addr; fn dynamic_offset_addr(_params: &ProgramParams, _pv: &PublicValues) -> Addr { unimplemented!() } fn end_addr(params: &ProgramParams) -> Addr; fn name() -> &'static str; fn max_len(params: &ProgramParams) -> usize { let max_size = (if Self::DESCENDING { Self::offset_addr(params) - Self::end_addr(params) } else { Self::end_addr(params) - Self::offset_addr(params) }) .div_ceil(WORD_SIZE as u32) as Addr; 1 << (u32::BITS - 1 - max_size.leading_zeros()) // prev_power_of_2 } fn addr(params: &ProgramParams, entry_index: usize) -> Addr { if Self::DESCENDING { Self::offset_addr(params) - (entry_index * WORD_SIZE) as Addr } else { // ascending Self::offset_addr(params) + (entry_index * WORD_SIZE) as Addr } } fn dynamic_addr(_params: &ProgramParams, _entry_index: usize, _pv: &PublicValues) -> Addr { unimplemented!() } } pub trait DynVolatileRamTableConfigTrait<DVRAM>: Sized + Send + Sync { type Config: Sized + Send + Sync; type WitnessInput<'a>: Send + Sync + ?Sized; fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self::Config, CircuitBuilderError>; fn assign_instances<F: SmallField>( config: &Self::Config, num_witin: usize, num_structural_witin: usize, data: &Self::WitnessInput<'_>, ) -> Result<[RowMajorMatrix<F>; 2], CircuitBuilderError>; } /// DynVolatileRamCircuit initializes and finalizes memory /// - at witnessed addresses, in a contiguous range chosen by the prover, /// - with zeros as initial content if ZERO_INIT, /// - with witnessed final content that the program wrote. /// /// If not ZERO_INIT: /// - The initial content is an unconstrained prover hint. /// - The final content is equal to this initial content. pub struct DynVolatileRamCircuit<E, R, C>(PhantomData<(E, R, C)>); impl< E: ExtensionField, DVRAM: DynVolatileRamTable + Send + Sync + Clone, C: DynVolatileRamTableConfigTrait<DVRAM>, > TableCircuit<E> for DynVolatileRamCircuit<E, DVRAM, C> { type TableConfig = C::Config; type FixedInput = (); type WitnessInput<'a> = C::WitnessInput<'a>; fn name() -> String { format!("{}_{:?}_RAM", DVRAM::name(), DVRAM::RAM_TYPE,) } fn construct_circuit( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self::TableConfig, ZKVMError> { Ok(cb.namespace(|| Self::name(), |cb| C::construct_circuit(cb, params))?) } fn generate_fixed_traces( _config: &Self::TableConfig, _num_fixed: usize, _init_v: &Self::FixedInput, ) -> RowMajorMatrix<E::BaseField> { RowMajorMatrix::<E::BaseField>::new(0, 0, InstancePaddingStrategy::Default) } fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, _multiplicity: &[HashMap<u64, usize>], data: &Self::WitnessInput<'_>, ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { // assume returned table is well-formed include padding Ok( <C as DynVolatileRamTableConfigTrait<DVRAM>>::assign_instances( config, num_witin, num_structural_witin, data, )?, ) } } /// This circuit is generalized version to handle all mmio records pub struct LocalFinalRamCircuit<const V_LIMBS: usize, E>(PhantomData<E>); impl<E: ExtensionField, const V_LIMBS: usize> TableCircuit<E> for LocalFinalRamCircuit<V_LIMBS, E> { type TableConfig = LocalFinalRAMTableConfig<V_LIMBS>; type FixedInput = (); type WitnessInput<'a> = ( &'a ShardContext<'a>, &'a [(&'static str, Option<Range<Addr>>, &'a [MemFinalRecord])], ); fn name() -> String { "LocalRAMTableFinal".to_string() } fn construct_circuit( cb: &mut CircuitBuilder<E>, params: &ProgramParams, ) -> Result<Self::TableConfig, ZKVMError> { Ok(cb.namespace( || Self::name(), |cb| Self::TableConfig::construct_circuit(cb, params), )?) } fn build_gkr_iop_circuit( cb: &mut CircuitBuilder<E>, param: &ProgramParams, ) -> Result<(Self::TableConfig, Option<GKRCircuit<E>>), ZKVMError> { let config = Self::construct_circuit(cb, param)?; let r_table_len = cb.cs.r_table_expressions.len(); let selector = cb.create_placeholder_structural_witin(|| "selector"); let selector_type = SelectorType::Prefix(selector.expr()); // all shared the same selector let (out_evals, mut chip) = ( [ // r_record (0..r_table_len).collect_vec(), // w_record vec![], // lk_record vec![], // zero_record vec![], ], Chip::new_from_cb(cb, 0), ); // register selector to legacy constrain system cb.cs.r_selector = Some(selector_type.clone()); let layer = Layer::from_circuit_builder(cb, Self::name(), 0, out_evals); chip.add_layer(layer); Ok((config, Some(chip.gkr_circuit()))) } fn generate_fixed_traces( _config: &Self::TableConfig, _num_fixed: usize, _init_v: &Self::FixedInput, ) -> RowMajorMatrix<E::BaseField> { RowMajorMatrix::<E::BaseField>::new(0, 0, InstancePaddingStrategy::Default) } fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, _multiplicity: &[HashMap<u64, usize>], (shard_ctx, final_mem): &Self::WitnessInput<'_>, ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { // assume returned table is well-formed include padding Ok(Self::TableConfig::assign_instances( config, shard_ctx, num_witin, num_structural_witin, final_mem, )?) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/ops/ops_circuit.rs
ceno_zkvm/src/tables/ops/ops_circuit.rs
//! Ops tables as circuits with trait TableCircuit. use super::ops_impl::OpTableConfig; use std::{collections::HashMap, marker::PhantomData}; use crate::{ circuit_builder::CircuitBuilder, error::ZKVMError, structs::ProgramParams, tables::{RMMCollections, TableCircuit}, }; use ff_ext::ExtensionField; use gkr_iop::tables::OpsTable; use witness::RowMajorMatrix; pub struct OpsTableCircuit<E, R>(PhantomData<(E, R)>); impl<E: ExtensionField, OP: OpsTable> TableCircuit<E> for OpsTableCircuit<E, OP> { type TableConfig = OpTableConfig; type FixedInput = (); type WitnessInput<'a> = (); fn name() -> String { format!("{:?}_OPS_ROM_TABLE", OP::ROM_TYPE) } fn construct_circuit( cb: &mut CircuitBuilder<E>, _params: &ProgramParams, ) -> Result<OpTableConfig, ZKVMError> { Ok(cb.namespace( || Self::name(), |cb| OpTableConfig::construct_circuit(cb, OP::ROM_TYPE, OP::len()), )?) } fn generate_fixed_traces( config: &OpTableConfig, num_fixed: usize, _input: &(), ) -> RowMajorMatrix<E::BaseField> { config.generate_fixed_traces(num_fixed, OP::content()) } fn assign_instances( config: &Self::TableConfig, num_witin: usize, num_structural_witin: usize, multiplicity: &[HashMap<u64, usize>], _input: &(), ) -> Result<RMMCollections<E::BaseField>, ZKVMError> { let multiplicity = &multiplicity[OP::ROM_TYPE as usize]; Ok(config.assign_instances(num_witin, num_structural_witin, multiplicity, OP::len())?) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/tables/ops/ops_impl.rs
ceno_zkvm/src/tables/ops/ops_impl.rs
//! The implementation of ops tables. No generics. use ff_ext::{ExtensionField, SmallField}; use gkr_iop::error::CircuitBuilderError; use itertools::Itertools; use rayon::iter::{IndexedParallelIterator, ParallelIterator}; use std::collections::HashMap; use witness::{InstancePaddingStrategy, RowMajorMatrix, set_fixed_val, set_val}; use crate::{ circuit_builder::{CircuitBuilder, SetTableSpec}, structs::ROMType, tables::RMMCollections, }; use multilinear_extensions::{Expression, Fixed, ToExpr, WitIn}; #[derive(Clone, Debug)] pub struct OpTableConfig { abc: [Fixed; 3], mlt: WitIn, } impl OpTableConfig { pub fn construct_circuit<E: ExtensionField>( cb: &mut CircuitBuilder<E>, rom_type: ROMType, table_len: usize, ) -> Result<Self, CircuitBuilderError> { let abc = [ cb.create_fixed(|| "a"), cb.create_fixed(|| "b"), cb.create_fixed(|| "c"), ]; let mlt = cb.create_witin(|| "mlt"); let record_exprs = abc.into_iter().map(|f| Expression::Fixed(f)).collect_vec(); cb.lk_table_record( || "record", SetTableSpec { len: Some(table_len), structural_witins: vec![], }, rom_type, record_exprs, mlt.expr(), )?; Ok(Self { abc, mlt }) } pub fn generate_fixed_traces<F: SmallField>( &self, num_fixed: usize, content: Vec<[u64; 3]>, ) -> RowMajorMatrix<F> { let mut fixed = RowMajorMatrix::<F>::new(content.len(), num_fixed, InstancePaddingStrategy::Default); fixed.par_rows_mut().zip(content).for_each(|(row, abc)| { for (col, val) in self.abc.iter().zip(abc.iter()) { set_fixed_val!(row, *col, F::from_v(*val)); } }); fixed } pub fn assign_instances<F: SmallField>( &self, num_witin: usize, num_structural_witin: usize, multiplicity: &HashMap<u64, usize>, length: usize, ) -> Result<RMMCollections<F>, CircuitBuilderError> { assert_eq!(num_structural_witin, 1); let num_structural_witin = num_structural_witin.max(1); let mut witness = RowMajorMatrix::<F>::new(length, num_witin, InstancePaddingStrategy::Default); let mut structural_witness = RowMajorMatrix::<F>::new( length, num_structural_witin, InstancePaddingStrategy::Default, ); let mut mlts = vec![0; length]; for (idx, mlt) in multiplicity { mlts[*idx as usize] = *mlt; } witness .par_rows_mut() .zip_eq(structural_witness.par_rows_mut()) .zip(mlts) .for_each(|((row, structural_row), mlt)| { set_val!(row, self.mlt, F::from_v(mlt as u64)); *structural_row.last_mut().unwrap() = F::ONE; }); Ok([witness, structural_witness]) } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/weierstrass_add.rs
ceno_zkvm/benches/weierstrass_add.rs
use std::time::Duration; use ceno_zkvm::precompiles::{ random_point_pairs, run_weierstrass_add, setup_weierstrass_add_circuit, }; use criterion::*; use ff_ext::BabyBearExt4; use mpcs::BasefoldDefault; use sp1_curves::weierstrass::{ SwCurve, WeierstrassParameters, bls12_381::Bls12381, bn254::Bn254, secp256k1::Secp256k1, secp256r1::Secp256r1, }; mod alloc; criterion_group!( benches, weierstrass_add_fn_bn254, weierstrass_add_fn_bls12_381, weierstrass_add_fn_secp256k1, weierstrass_add_fn_secp256r1 ); fn real_main() { benches(); Criterion::default().configure_from_args().final_summary(); } fn main() { std::thread::Builder::new() .stack_size(32 * 1024 * 1024) // 32MB .spawn(real_main) .unwrap() .join() .unwrap(); } const NUM_SAMPLES: usize = 10; fn weierstrass_add_fn_helper<WP: WeierstrassParameters>(c: &mut Criterion) { // Benchmark the proving time for log_instances in 12..14 { let num_instances = 1 << log_instances; // expand more input size once runtime is acceptable let mut group = c.benchmark_group(format!("weierstrass_add_{}", num_instances)); group.sample_size(NUM_SAMPLES); group.bench_function( BenchmarkId::new( "weierstrass_add", format!("prove_weierstrass_add_{}", num_instances), ), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { let points = random_point_pairs::<WP>(5); let instant = std::time::Instant::now(); let circuit = setup_weierstrass_add_circuit::<_, SwCurve<WP>>() .expect("setup circuit error"); #[allow(clippy::unit_arg)] let _ = run_weierstrass_add::< BabyBearExt4, BasefoldDefault<BabyBearExt4>, SwCurve<WP>, >(circuit, black_box(points), false, false) .expect("unable to get proof"); let elapsed = instant.elapsed(); println!( "weierstrass_add::create_proof, instances = {}, time = {}", num_instances, elapsed.as_secs_f64() ); time += elapsed; } time }); }, ); group.finish(); } } fn weierstrass_add_fn_bn254(c: &mut Criterion) { weierstrass_add_fn_helper::<Bn254>(c); } fn weierstrass_add_fn_bls12_381(c: &mut Criterion) { weierstrass_add_fn_helper::<Bls12381>(c); } fn weierstrass_add_fn_secp256k1(c: &mut Criterion) { weierstrass_add_fn_helper::<Secp256k1>(c); } fn weierstrass_add_fn_secp256r1(c: &mut Criterion) { weierstrass_add_fn_helper::<Secp256r1>(c); }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/is_prime.rs
ceno_zkvm/benches/is_prime.rs
use std::time::Duration; use ceno_emul::{Platform, Program}; use ceno_host::CenoStdin; use ceno_zkvm::{ self, e2e::{Checkpoint, Preset, run_e2e_with_checkpoint, setup_platform}, scheme::{create_backend, create_prover}, }; mod alloc; use ceno_zkvm::e2e::MultiProver; use criterion::*; use ff_ext::BabyBearExt4; use gkr_iop::cpu::default_backend_config; use mpcs::BasefoldDefault; criterion_group! { name = is_prime; config = Criterion::default().warm_up_time(Duration::from_millis(5000)); targets = is_prime_1 } criterion_main!(is_prime); const NUM_SAMPLES: usize = 10; type Pcs = BasefoldDefault<E>; type E = BabyBearExt4; // Relevant init data for fibonacci run fn setup() -> (Program, Platform) { let stack_size = 32768; let heap_size = 2097152; let pub_io_size = 16; let program = Program::load_elf(ceno_examples::is_prime, u32::MAX).unwrap(); let platform = setup_platform(Preset::Ceno, &program, stack_size, heap_size, pub_io_size); (program, platform) } fn is_prime_1(c: &mut Criterion) { let (program, platform) = setup(); let (max_num_variables, security_level) = default_backend_config(); let backend = create_backend::<E, Pcs>(max_num_variables, security_level); for n in [100u32, 10000u32, 50000u32] { let max_steps = usize::MAX; let mut hints = CenoStdin::default(); _ = hints.write(&n); let hints: Vec<u32> = (&hints).into(); let mut group = c.benchmark_group(format!("is_prime_{}", max_steps)); group.sample_size(NUM_SAMPLES); // Benchmark the proving time group.bench_function( BenchmarkId::new("is_prime", format!("is_prime_n={}", n)), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { let result = run_e2e_with_checkpoint::<E, Pcs, _, _>( create_prover(backend.clone()), program.clone(), platform.clone(), MultiProver::default(), &hints, &[], max_steps, Checkpoint::PrepE2EProving, None, ); let instant = std::time::Instant::now(); result.next_step(); time += instant.elapsed(); } time }); }, ); group.finish(); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/weierstrass_double.rs
ceno_zkvm/benches/weierstrass_double.rs
use std::time::Duration; use ceno_zkvm::precompiles::{ random_points, run_weierstrass_double, setup_weierstrass_double_circuit, }; use criterion::*; use ff_ext::BabyBearExt4; use mpcs::BasefoldDefault; use sp1_curves::weierstrass::{ SwCurve, WeierstrassParameters, bls12_381::Bls12381, bn254::Bn254, secp256k1::Secp256k1, secp256r1::Secp256r1, }; mod alloc; criterion_group!( benches, weierstrass_double_fn_bn254, weierstrass_double_fn_bls12_381, weierstrass_double_fn_secp256k1, weierstrass_double_fn_secp256r1 ); fn real_main() { benches(); Criterion::default().configure_from_args().final_summary(); } fn main() { std::thread::Builder::new() .stack_size(32 * 1024 * 1024) // 32MB .spawn(real_main) .unwrap() .join() .unwrap(); } const NUM_SAMPLES: usize = 10; fn weierstrass_double_fn_helper<WP: WeierstrassParameters>(c: &mut Criterion) { // Benchmark the proving time for log_instances in 12..14 { let num_instances = 1 << log_instances; // expand more input size once runtime is acceptable let mut group = c.benchmark_group(format!("weierstrass_double_{}", num_instances)); group.sample_size(NUM_SAMPLES); group.bench_function( BenchmarkId::new( "weierstrass_double", format!("prove_weierstrass_double_{}", num_instances), ), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { let points = random_points::<WP>(5); let instant = std::time::Instant::now(); let circuit = setup_weierstrass_double_circuit::<_, SwCurve<WP>>() .expect("setup circuit error"); #[allow(clippy::unit_arg)] let _ = run_weierstrass_double::< BabyBearExt4, BasefoldDefault<BabyBearExt4>, SwCurve<WP>, >(circuit, black_box(points), false, false) .expect("unable to get proof"); let elapsed = instant.elapsed(); println!( "weierstrass_double::create_proof, instances = {}, time = {}", num_instances, elapsed.as_secs_f64() ); time += elapsed; } time }); }, ); group.finish(); } } fn weierstrass_double_fn_bn254(c: &mut Criterion) { weierstrass_double_fn_helper::<Bn254>(c); } fn weierstrass_double_fn_bls12_381(c: &mut Criterion) { weierstrass_double_fn_helper::<Bls12381>(c); } fn weierstrass_double_fn_secp256k1(c: &mut Criterion) { weierstrass_double_fn_helper::<Secp256k1>(c); } fn weierstrass_double_fn_secp256r1(c: &mut Criterion) { weierstrass_double_fn_helper::<Secp256r1>(c); }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/alloc.rs
ceno_zkvm/benches/alloc.rs
// Use jemalloc as global allocator for performance #[cfg(all(feature = "jemalloc", unix))] #[global_allocator] static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/lookup_keccakf.rs
ceno_zkvm/benches/lookup_keccakf.rs
use std::time::Duration; use ceno_zkvm::precompiles::{run_lookup_keccakf, setup_lookup_keccak_gkr_circuit}; use criterion::*; use ff_ext::BabyBearExt4; use itertools::Itertools; use mpcs::BasefoldDefault; use rand::{RngCore, SeedableRng}; mod alloc; criterion_group!(benches, keccak_f_fn); criterion_main!(benches); const NUM_SAMPLES: usize = 10; fn keccak_f_fn(c: &mut Criterion) { // Benchmark the proving time for log_instances in 12..14 { let num_instances = 1 << log_instances; // expand more input size once runtime is acceptable let mut group = c.benchmark_group(format!("keccak_lookup_f_{}", num_instances)); group.sample_size(NUM_SAMPLES); group.bench_function( BenchmarkId::new( "keccak_lookup_f", format!("prove_keccak_lookup_f_{}", num_instances), ), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { // Use seeded rng for debugging convenience let mut rng = rand::rngs::StdRng::seed_from_u64(42); let states: Vec<[u64; 25]> = (0..num_instances) .map(|_| std::array::from_fn(|_| rng.next_u64())) .collect_vec(); let instant = std::time::Instant::now(); let circuit = setup_lookup_keccak_gkr_circuit().expect("setup circuit error"); #[allow(clippy::unit_arg)] let _ = run_lookup_keccakf::<BabyBearExt4, BasefoldDefault<BabyBearExt4>>( circuit, black_box(states), false, false, ) .expect("unable to get proof"); let elapsed = instant.elapsed(); println!( "keccak_f::create_proof, instances = {}, time = {}", num_instances, elapsed.as_secs_f64() ); time += elapsed; } time }); }, ); group.finish(); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/riscv_add.rs
ceno_zkvm/benches/riscv_add.rs
use std::time::Duration; use ceno_zkvm::{ self, instructions::{Instruction, riscv::arith::AddInstruction}, scheme::{create_backend, create_prover, hal::ProofInput, prover::ZKVMProver}, structs::{ZKVMConstraintSystem, ZKVMFixedTraces}, }; mod alloc; use criterion::*; use ceno_zkvm::scheme::constants::MAX_NUM_VARIABLES; use mpcs::{BasefoldDefault, PolynomialCommitmentScheme, SecurityLevel}; use ff_ext::BabyBearExt4; use gkr_iop::cpu::default_backend_config; use rand::rngs::OsRng; use transcript::{BasicTranscript, Transcript}; use witness::RowMajorMatrix; #[cfg(feature = "gpu")] use gkr_iop::gpu::{MultilinearExtensionGpu, gpu_prover::*}; #[cfg(feature = "gpu")] use itertools::Itertools; #[cfg(feature = "gpu")] use std::sync::Arc; #[cfg(feature = "flamegraph")] criterion_group! { name = op_add; config = Criterion::default().warm_up_time(Duration::from_millis(3000)).with_profiler(pprof2::criterion::PProfProfiler::new(100, pprof2::criterion::Output::Flamegraph(None))); targets = bench_add } #[cfg(not(feature = "flamegraph"))] criterion_group! { name = op_add; config = Criterion::default().warm_up_time(Duration::from_millis(3000)); targets = bench_add } criterion_main!(op_add); const NUM_SAMPLES: usize = 10; fn bench_add(c: &mut Criterion) { type Pcs = BasefoldDefault<E>; let mut zkvm_cs = ZKVMConstraintSystem::default(); let config = zkvm_cs.register_opcode_circuit::<AddInstruction<E>>(); let mut zkvm_fixed_traces = ZKVMFixedTraces::default(); zkvm_fixed_traces.register_opcode_circuit::<AddInstruction<E>>(&zkvm_cs, &config); let param = Pcs::setup(1 << MAX_NUM_VARIABLES, SecurityLevel::default()).unwrap(); let (pp, vp) = Pcs::trim(param, 1 << MAX_NUM_VARIABLES).unwrap(); let pk = zkvm_cs .clone() .key_gen::<Pcs>(pp, vp, 0, zkvm_fixed_traces) .expect("keygen failed"); let (max_num_variables, security_level) = default_backend_config(); let backend = create_backend::<E, Pcs>(max_num_variables, security_level); let device = create_prover(backend); let prover = ZKVMProver::new_with_single_shard(pk, device); let circuit_pk = prover .pk .circuit_pks .get(&AddInstruction::<E>::name()) .unwrap(); let num_witin = circuit_pk.get_cs().num_witin(); for instance_num_vars in 20..22 { // expand more input size once runtime is acceptable let mut group = c.benchmark_group(format!("add_op_{}", instance_num_vars)); group.sample_size(NUM_SAMPLES); // Benchmark the proving time group.bench_function( BenchmarkId::new("prove_add", format!("prove_add_log2_{}", instance_num_vars)), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { // generate mock witness let num_instances = 1 << instance_num_vars; let rmms = vec![RowMajorMatrix::rand(&mut OsRng, num_instances, num_witin)]; let instant = std::time::Instant::now(); let num_instances = 1 << instance_num_vars; let mut transcript = BasicTranscript::new(b"riscv"); let commit = Pcs::batch_commit_and_write(&prover.pk.pp, rmms, &mut transcript) .unwrap(); let polys = Pcs::get_arc_mle_witness_from_commitment(&commit); let challenges = [ transcript.read_challenge().elements, transcript.read_challenge().elements, ]; // TODO: better way to handle this #[cfg(feature = "gpu")] let cuda_hal = get_cuda_hal().unwrap(); #[cfg(feature = "gpu")] let polys = polys .iter() .map(|v| Arc::new(MultilinearExtensionGpu::from_ceno(&cuda_hal, v))) .collect_vec(); let input = ProofInput { fixed: vec![], witness: polys, structural_witness: vec![], public_input: vec![], pub_io_evals: vec![], num_instances: vec![num_instances], has_ecc_ops: false, }; let _ = prover .create_chip_proof( "ADD", circuit_pk, input, &mut transcript, &challenges, ) .expect("create_proof failed"); let elapsed = instant.elapsed(); println!( "AddInstruction::create_proof, instance_num_vars = {}, time = {}", instance_num_vars, elapsed.as_secs_f64() ); time += elapsed; } time }); }, ); group.finish(); } type E = BabyBearExt4; }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/bitwise_keccakf.rs
ceno_zkvm/benches/bitwise_keccakf.rs
use std::time::Duration; use ceno_zkvm::precompiles::{run_bitwise_keccakf, setup_bitwise_keccak_gkr_circuit}; use criterion::*; use ff_ext::GoldilocksExt2; use itertools::Itertools; use mpcs::BasefoldDefault; use rand::{RngCore, SeedableRng}; mod alloc; criterion_group!(benches, keccak_f_fn); criterion_main!(benches); const NUM_SAMPLES: usize = 10; fn keccak_f_fn(c: &mut Criterion) { // Benchmark the proving time for log_instances in 10..12 { let num_instances = 1 << log_instances; // expand more input size once runtime is acceptable let mut group = c.benchmark_group(format!("keccak_f_{}", num_instances)); group.sample_size(NUM_SAMPLES); group.bench_function( BenchmarkId::new("keccak_f", format!("prove_keccek_f_{}", num_instances)), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { // Use seeded rng for debugging convenience let mut rng = rand::rngs::StdRng::seed_from_u64(42); let states: Vec<[u64; 25]> = (0..num_instances) .map(|_| std::array::from_fn(|_| rng.next_u64())) .collect_vec(); let instant = std::time::Instant::now(); #[allow(clippy::unit_arg)] run_bitwise_keccakf::<GoldilocksExt2, BasefoldDefault<GoldilocksExt2>>( setup_bitwise_keccak_gkr_circuit() .expect("setup circuit error") .1, black_box(states), false, false, ); let elapsed = instant.elapsed(); println!( "keccak_f::create_proof, instances = {}, time = {}", num_instances, elapsed.as_secs_f64() ); time += elapsed; } time }); }, ); group.finish(); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/fibonacci_witness.rs
ceno_zkvm/benches/fibonacci_witness.rs
use ceno_emul::{Platform, Program}; use ceno_host::CenoStdin; use ceno_zkvm::{ self, e2e::{Checkpoint, Preset, run_e2e_with_checkpoint, setup_platform}, scheme::{create_backend, create_prover}, }; use std::{fs, path::PathBuf, time::Duration}; mod alloc; use criterion::*; use ceno_zkvm::e2e::MultiProver; use ff_ext::BabyBearExt4; use gkr_iop::cpu::default_backend_config; use mpcs::BasefoldDefault; criterion_group! { name = fibonacci; config = Criterion::default().warm_up_time(Duration::from_millis(20000)); targets = fibonacci_witness } criterion_main!(fibonacci); const NUM_SAMPLES: usize = 10; type Pcs = BasefoldDefault<E>; type E = BabyBearExt4; // Relevant init data for fibonacci run fn setup() -> (Program, Platform) { let mut file_path = PathBuf::from(env!("CARGO_MANIFEST_DIR")); file_path.push("examples/fibonacci.elf"); let stack_size = 32768; let heap_size = 2097152; let pub_io_size = 16; let elf_bytes = fs::read(&file_path).expect("read elf file"); let program = Program::load_elf(&elf_bytes, u32::MAX).unwrap(); let platform = setup_platform(Preset::Ceno, &program, stack_size, heap_size, pub_io_size); (program, platform) } fn fibonacci_witness(c: &mut Criterion) { let (program, platform) = setup(); let (max_num_variables, security_level) = default_backend_config(); let backend = create_backend::<E, Pcs>(max_num_variables, security_level); let max_steps = usize::MAX; let mut group = c.benchmark_group(format!("fib_wit_max_steps_{}", max_steps)); group.sample_size(NUM_SAMPLES); // retrive 1 << 20th fibonacci element >> max_steps let mut hints = CenoStdin::default(); let _ = hints.write(&20); // Benchmark the proving time group.bench_function( BenchmarkId::new( "fibonacci_witness", format!("fib_wit_max_steps_{}", max_steps), ), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { let result = run_e2e_with_checkpoint::<E, Pcs, _, _>( create_prover(backend.clone()), program.clone(), platform.clone(), MultiProver::default(), &Vec::from(&hints), &[], max_steps, Checkpoint::PrepWitnessGen, None, ); let instant = std::time::Instant::now(); result.next_step(); let elapsed = instant.elapsed(); time += elapsed; } time }); }, ); group.finish(); type E = BabyBearExt4; }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/keccak.rs
ceno_zkvm/benches/keccak.rs
use std::time::Duration; use ceno_emul::{Platform, Program}; use ceno_host::CenoStdin; use ceno_zkvm::{ self, e2e::{Checkpoint, Preset, run_e2e_with_checkpoint, setup_platform}, scheme::{create_backend, create_prover}, }; mod alloc; use ceno_zkvm::{e2e::MultiProver, scheme::verifier::ZKVMVerifier}; use criterion::*; use ff_ext::BabyBearExt4; use gkr_iop::cpu::default_backend_config; use mpcs::BasefoldDefault; use transcript::BasicTranscript; criterion_group! { name = keccak_prove_group; config = Criterion::default().warm_up_time(Duration::from_millis(20000)); targets = keccak_prove, } criterion_main!(keccak_prove_group); const NUM_SAMPLES: usize = 10; type Pcs = BasefoldDefault<E>; type E = BabyBearExt4; // Relevant init data for keccak run fn setup() -> (Program, Platform) { let stack_size = 32768; let heap_size = 2097152; let pub_io_size = 16; let program = Program::load_elf(ceno_examples::keccak_syscall, u32::MAX).unwrap(); let platform = setup_platform(Preset::Ceno, &program, stack_size, heap_size, pub_io_size); (program, platform) } fn keccak_prove(c: &mut Criterion) { let (program, platform) = setup(); let (max_num_variables, security_level) = default_backend_config(); let backend = create_backend::<E, Pcs>(max_num_variables, security_level); // retrive 1 << 20th keccak element >> max_steps let mut hints = CenoStdin::default(); let _ = hints.write(&vec![1, 2, 3]); let max_steps = usize::MAX; // estimate proof size data first let result = run_e2e_with_checkpoint::<E, Pcs, _, _>( create_prover(backend.clone()), program.clone(), platform.clone(), MultiProver::default(), &Vec::from(&hints), &[], max_steps, Checkpoint::Complete, None, ); let proof = result .proofs .expect("PrepSanityCheck do not provide proof") .remove(0); let vk = result.vk.expect("PrepSanityCheck do not provide verifier"); println!("e2e proof {}", proof); let transcript = BasicTranscript::new(b"riscv"); let verifier = ZKVMVerifier::<E, Pcs>::new(vk); assert!( verifier .verify_proof_halt(proof, transcript, true) .expect("verify proof return with error"), ); println!(); println!("max_steps = {}", max_steps); // expand more input size once runtime is acceptable let mut group = c.benchmark_group(format!("keccak_max_steps_{}", max_steps)); group.sample_size(NUM_SAMPLES); // Benchmark the proving time group.bench_function( BenchmarkId::new("prove_keccak", format!("keccak_max_steps_{}", max_steps)), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { let result = run_e2e_with_checkpoint::<E, Pcs, _, _>( create_prover(backend.clone()), program.clone(), platform.clone(), MultiProver::default(), &Vec::from(&hints), &[], max_steps, Checkpoint::PrepE2EProving, None, ); let instant = std::time::Instant::now(); result.next_step(); let elapsed = instant.elapsed(); println!( "Keccak::create_proof, max_steps = {}, time = {}", max_steps, elapsed.as_secs_f64() ); time += elapsed; } time }); }, ); group.finish(); }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/quadratic_sorting.rs
ceno_zkvm/benches/quadratic_sorting.rs
use std::time::Duration; use ceno_emul::{Platform, Program}; use ceno_host::CenoStdin; use ceno_zkvm::{ self, e2e::{Checkpoint, Preset, run_e2e_with_checkpoint, setup_platform}, scheme::{create_backend, create_prover}, }; mod alloc; use ceno_zkvm::e2e::MultiProver; use criterion::*; use ff_ext::BabyBearExt4; use gkr_iop::cpu::default_backend_config; use mpcs::BasefoldDefault; use rand::{RngCore, SeedableRng}; criterion_group! { name = quadratic_sorting; config = Criterion::default().warm_up_time(Duration::from_millis(5000)); targets = quadratic_sorting_1 } criterion_main!(quadratic_sorting); const NUM_SAMPLES: usize = 10; type Pcs = BasefoldDefault<E>; type E = BabyBearExt4; // Relevant init data for fibonacci run fn setup() -> (Program, Platform) { let stack_size = 32768; let heap_size = 2097152; let pub_io_size = 16; let program = Program::load_elf(ceno_examples::quadratic_sorting, u32::MAX).unwrap(); let platform = setup_platform(Preset::Ceno, &program, stack_size, heap_size, pub_io_size); (program, platform) } fn quadratic_sorting_1(c: &mut Criterion) { let (program, platform) = setup(); let (max_num_variables, security_level) = default_backend_config(); let backend = create_backend::<E, Pcs>(max_num_variables, security_level); let mut rng = rand::rngs::StdRng::seed_from_u64(42); for n in [100, 500] { let max_steps = usize::MAX; let mut hints = CenoStdin::default(); _ = hints.write(&(0..n).map(|_| rng.next_u32()).collect::<Vec<_>>()); let hints: Vec<u32> = (&hints).into(); let mut group = c.benchmark_group("quadratic_sorting".to_string()); group.sample_size(NUM_SAMPLES); // Benchmark the proving time group.bench_function( BenchmarkId::new("quadratic_sorting", format!("n = {}", n)), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { let result = run_e2e_with_checkpoint::<E, Pcs, _, _>( create_prover(backend.clone()), program.clone(), platform.clone(), MultiProver::default(), &hints, &[], max_steps, Checkpoint::PrepE2EProving, None, ); let instant = std::time::Instant::now(); result.next_step(); time += instant.elapsed(); } time }); }, ); group.finish(); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/benches/fibonacci.rs
ceno_zkvm/benches/fibonacci.rs
use std::time::Duration; use ceno_emul::{Platform, Program}; use ceno_host::CenoStdin; use ceno_zkvm::{ self, e2e::{Checkpoint, Preset, run_e2e_with_checkpoint, setup_platform}, scheme::{create_backend, create_prover}, }; mod alloc; use criterion::*; use ff_ext::BabyBearExt4; use gkr_iop::cpu::default_backend_config; use ceno_zkvm::{e2e::MultiProver, scheme::verifier::ZKVMVerifier}; use mpcs::BasefoldDefault; use transcript::BasicTranscript; criterion_group! { name = fibonacci_prove_group; config = Criterion::default().warm_up_time(Duration::from_millis(20000)); targets = fibonacci_prove, } criterion_main!(fibonacci_prove_group); const NUM_SAMPLES: usize = 10; type Pcs = BasefoldDefault<E>; type E = BabyBearExt4; // Relevant init data for fibonacci run fn setup() -> (Program, Platform) { let stack_size = 32768; let heap_size = 2097152; let pub_io_size = 16; let program = Program::load_elf(ceno_examples::fibonacci, u32::MAX).unwrap(); let platform = setup_platform(Preset::Ceno, &program, stack_size, heap_size, pub_io_size); (program, platform) } fn fibonacci_prove(c: &mut Criterion) { let (program, platform) = setup(); let (max_num_variables, security_level) = default_backend_config(); let backend = create_backend::<E, Pcs>(max_num_variables, security_level); for max_steps in [1usize << 20, 1usize << 21, 1usize << 22] { // retrive 1 << 20th fibonacci element >> max_steps let mut hints = CenoStdin::default(); let _ = hints.write(&20); // estimate proof size data first let result = run_e2e_with_checkpoint::<E, Pcs, _, _>( create_prover(backend.clone()), program.clone(), platform.clone(), MultiProver::default(), &Vec::from(&hints), &[], max_steps, Checkpoint::Complete, None, ); let proof = result .proofs .expect("PrepSanityCheck do not provide proof") .remove(0); let vk = result.vk.expect("PrepSanityCheck do not provide verifier"); println!("e2e proof {}", proof); let transcript = BasicTranscript::new(b"riscv"); let verifier = ZKVMVerifier::<E, Pcs>::new(vk); assert!( verifier .verify_proof_halt(proof, transcript, false) .expect("verify proof return with error"), ); println!(); println!("max_steps = {}", max_steps); // expand more input size once runtime is acceptable let mut group = c.benchmark_group(format!("fibonacci_max_steps_{}", max_steps)); group.sample_size(NUM_SAMPLES); // Benchmark the proving time group.bench_function( BenchmarkId::new( "prove_fibonacci", format!("fibonacci_max_steps_{}", max_steps), ), |b| { b.iter_custom(|iters| { let mut time = Duration::new(0, 0); for _ in 0..iters { let result = run_e2e_with_checkpoint::<E, Pcs, _, _>( create_prover(backend.clone()), program.clone(), platform.clone(), MultiProver::default(), &Vec::from(&hints), &[], max_steps, Checkpoint::PrepE2EProving, None, ); let instant = std::time::Instant::now(); result.next_step(); let elapsed = instant.elapsed(); println!( "Fibonacci::create_proof, max_steps = {}, time = {}", max_steps, elapsed.as_secs_f64() ); time += elapsed; } time }); }, ); group.finish(); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_rt/build.rs
ceno_rt/build.rs
use std::{env, fs, path::PathBuf}; fn main() { let out_dir = PathBuf::from(env::var("OUT_DIR").unwrap()); // Put the linker script somewhere the linker can find it. fs::write(out_dir.join("memory.x"), include_bytes!("memory.x")).unwrap(); fs::write(out_dir.join("ceno_link.x"), include_bytes!("ceno_link.x")).unwrap(); println!("cargo:rustc-link-search={}", out_dir.display()); println!("cargo:rerun-if-changed=memory.x"); println!("cargo:rerun-if-changed=ceno_link.x"); println!("cargo:rerun-if-changed=build.rs"); }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_rt/src/lib.rs
ceno_rt/src/lib.rs
#![deny(clippy::cargo)] #![feature(linkage)] use getrandom::{Error, register_custom_getrandom}; #[cfg(target_arch = "riscv32")] use core::arch::{asm, global_asm}; use std::{ alloc::{Layout, alloc_zeroed}, ptr, }; #[cfg(target_arch = "riscv32")] mod allocator; mod mmio; pub use mmio::{commit, read, read_owned, read_slice}; mod io; #[cfg(debug_assertions)] pub use io::info_out; mod params; pub use params::*; #[unsafe(no_mangle)] #[linkage = "weak"] pub extern "C" fn sys_write(_fd: i32, _buf: *const u8, _count: usize) -> isize { 0 } #[unsafe(no_mangle)] #[linkage = "weak"] pub extern "C" fn sys_alloc_words(nwords: usize) -> *mut u32 { unsafe { alloc_zeroed(Layout::from_size_align(4 * nwords, 4).unwrap()) as *mut u32 } } #[unsafe(no_mangle)] #[linkage = "weak"] pub extern "C" fn sys_getenv(_name: *const u8) -> *const u8 { ptr::null() } /// Generates random bytes. /// /// # Safety /// /// Make sure that `buf` has at least `nwords` words. /// This generator is terrible. :) #[unsafe(no_mangle)] #[linkage = "weak"] pub unsafe extern "C" fn sys_rand(recv_buf: *mut u8, words: usize) { fn step() -> u32 { static mut X: u32 = 0xae569764; // We are stealing Borland Delphi's random number generator. // The random numbers here are only good enough to make eg // HashMap work. // // SAFETY: Used for hashing purposes so it is more or less OK to have conflicting reads // and writes. unsafe { X = X.wrapping_mul(134775813) + 1; X } } let mut idx = 0; let steps = words / 4; let rest = words % 4; for _ in 0..steps { let bytes = step().to_le_bytes(); // SAFETY: Up to the caller unsafe { ptr::copy_nonoverlapping(bytes.as_ptr(), recv_buf.add(idx), 4); } idx = idx.wrapping_add(4); } let [a, b, c, _] = step().to_le_bytes(); for (_, el) in (0..rest).zip([a, b, c]) { // SAFETY: Up to the caller unsafe { *recv_buf.add(idx) = el; } idx = idx.wrapping_add(1); } } /// Custom random number generator for getrandom /// /// One of sproll's dependencies uses the getrandom crate, /// and it will only build, if we provide a custom random number generator. /// /// Otherwise, it'll complain about an unsupported target. pub fn my_get_random(buf: &mut [u8]) -> Result<(), Error> { unsafe { sys_rand(buf.as_mut_ptr(), buf.len()) }; Ok(()) } register_custom_getrandom!(my_get_random); /// Custom getrandom implementation for getrandom v0.3 /// /// see also: <https://docs.rs/getrandom/0.3.3/getrandom/#custom-backend> /// /// # Safety /// - `dest` must be valid for writes of `len` bytes. #[unsafe(no_mangle)] pub unsafe extern "Rust" fn __getrandom_v03_custom( dest: *mut u8, len: usize, ) -> Result<(), getrandom_v3::Error> { unsafe { sys_rand(dest, len); } Ok(()) } pub fn halt(exit_code: u32) -> ! { #[cfg(target_arch = "riscv32")] unsafe { asm!( "ecall", in ("a0") exit_code, in ("t0") 0, ); unreachable!(); } #[cfg(not(target_arch = "riscv32"))] unimplemented!( "Halt is only implemented for RiscV, not for this target, exit_code: {}", exit_code ); } #[cfg(target_arch = "riscv32")] global_asm!( " // The entry point for the program. .section .init .global _start _start: // Set the global pointer somewhere towards the start of RAM. .option push .option norelax la gp, __global_pointer$ .option pop // Set the stack pointer and frame pointer to the top of the stack. la sp, _stack_start mv fp, sp // Call Rust's main function. call main // If we return from main, we halt with success: // Set the ecall code HALT. li t0, 0 // Set successful exit code, ie 0: li a0, 0 ecall ", ); unsafe extern "C" { // The address of this variable is the start of the stack (growing downwards). static _stack_start: u8; } #[cfg(test)] mod tests { use crate::sys_rand; #[test] fn fills_with_random_bytes() { let mut buf = [0u8; 65]; unsafe { sys_rand(buf.as_mut_ptr(), buf.len()); } assert_ne!(buf, [0u8; 65]); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_rt/src/io.rs
ceno_rt/src/io.rs
use crate::WORD_SIZE; use core::{cell::Cell, fmt, mem::size_of, slice}; pub struct IOWriter { cursor: Cell<*mut u32>, } // Safety: Only single-threaded programs are supported. // TODO: There may be a better way to handle this. unsafe impl Sync for IOWriter {} impl IOWriter { #[cfg(debug_assertions)] const fn new(addr: u32) -> Self { assert!(addr.is_multiple_of(WORD_SIZE as u32)); IOWriter { cursor: Cell::new(addr as *mut u32), } } // TODO docs on why design mut_from_ref // or justify this convention by citing from other place #[allow(clippy::mut_from_ref)] pub fn alloc<T>(&self, count: usize) -> &mut [T] { let byte_len = count * size_of::<T>(); let word_len = byte_len.div_ceil(WORD_SIZE); let cursor = self.cursor.get(); // Bump the cursor to the next word-aligned address. self.cursor.set(unsafe { cursor.add(word_len) }); // Return a slice of the allocated memory. unsafe { slice::from_raw_parts_mut(cursor as *mut T, count) } } pub fn write(&self, msg: &[u8]) { let buf = self.alloc(msg.len()); buf.copy_from_slice(msg); } pub fn write_frame(&self, msg: &[u8]) { let word_len = msg.len().div_ceil(WORD_SIZE); let words: &mut [u32] = self.alloc(1 + word_len); words[0] = msg.len() as u32; let bytes = unsafe { slice::from_raw_parts_mut(words[1..].as_mut_ptr() as *mut u8, msg.len()) }; bytes.copy_from_slice(msg); } } impl fmt::Write for &IOWriter { fn write_str(&mut self, s: &str) -> fmt::Result { self.write_frame(s.as_bytes()); Ok(()) } } #[cfg(debug_assertions)] use crate::INFO_OUT_ADDR; #[cfg(debug_assertions)] static INFO_OUT: IOWriter = IOWriter::new(INFO_OUT_ADDR); #[cfg(debug_assertions)] pub fn info_out() -> &'static IOWriter { &INFO_OUT } mod macros { #[macro_export] macro_rules! debug_print { ($($arg:tt)*) => { #[cfg(debug_assertions)] { let _ = core::write!($crate::info_out(), $($arg)*); } }; } #[macro_export] macro_rules! debug_println { ($($arg:tt)*) => { #[cfg(debug_assertions)] { let _ = core::writeln!($crate::info_out(), $($arg)*); } }; } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_rt/src/params.rs
ceno_rt/src/params.rs
pub const WORD_SIZE: usize = 4; /// address defined in `memory.x` under RAM section. pub const INFO_OUT_ADDR: u32 = 0x2000_0000;
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_rt/src/allocator.rs
ceno_rt/src/allocator.rs
//! A bump allocator. //! Based on https://doc.rust-lang.org/std/alloc/trait.GlobalAlloc.html use core::alloc::{GlobalAlloc, Layout}; struct SimpleAllocator { next_alloc: *mut u8, } unsafe impl GlobalAlloc for SimpleAllocator { unsafe fn alloc(&self, layout: Layout) -> *mut u8 { // SAFETY: Single threaded, so nothing else can touch this while we're working. let mut heap_pos = unsafe { HEAP.next_alloc }; let align = layout.align(); // `Layout` contract forbids making a `Layout` with align=0, or align not power of 2. unsafe { core::hint::assert_unchecked(align.is_power_of_two()); core::hint::assert_unchecked(align != 0); heap_pos = heap_pos.add(heap_pos.align_offset(align)); } let ptr = heap_pos; // We don't want to wrap around, and overwrite stack etc. // (We could also return a null pointer, but only malicious programs would ever hit this.) unsafe { heap_pos = heap_pos.add(layout.size()); HEAP.next_alloc = heap_pos; } ptr } unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 { unsafe { self.alloc(layout) } } /// Never deallocate. unsafe fn dealloc(&self, _ptr: *mut u8, _layout: Layout) {} } unsafe extern "C" { /// The address of this variable is the start of the heap (growing upwards). /// /// It is defined in the linker script. static mut _sheap: u8; } #[global_allocator] static mut HEAP: SimpleAllocator = SimpleAllocator { next_alloc: &raw mut _sheap, };
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_rt/src/mmio.rs
ceno_rt/src/mmio.rs
//! Memory-mapped I/O (MMIO) functions. use ceno_serde::from_slice; use core::{cell::UnsafeCell, ptr, slice::from_raw_parts}; use serde::de::DeserializeOwned; struct RegionState { next_len_at: *const usize, next_data_at: *const u8, alignment: usize, initialized: bool, } impl RegionState { const fn new() -> Self { Self { next_len_at: ptr::null(), next_data_at: ptr::null(), alignment: 1, initialized: false, } } unsafe fn ensure_initialized(&mut self, len_start: *const usize, data_start: *const u8) { if self.initialized { return; } let mut cursor = len_start; let data_offset = unsafe { ptr::read(cursor) }; cursor = unsafe { cursor.add(1) }; self.alignment = unsafe { ptr::read(cursor) }; cursor = unsafe { cursor.add(1) }; self.next_len_at = cursor; self.next_data_at = unsafe { data_start.add(data_offset) }; self.initialized = true; } unsafe fn take_len(&mut self, len_start: *const usize, data_start: *const u8) -> usize { unsafe { self.ensure_initialized(len_start, data_start) }; let len = unsafe { ptr::read(self.next_len_at) }; self.next_len_at = unsafe { self.next_len_at.add(1) }; len } unsafe fn take_slice<'a>( &mut self, len_start: *const usize, data_start: *const u8, ) -> &'a [u8] { let len = unsafe { self.take_len(len_start, data_start) }; let ptr = self.next_data_at; let padded = len.next_multiple_of(self.alignment); self.next_data_at = unsafe { self.next_data_at.add(padded) }; unsafe { from_raw_parts(ptr, len) } } } unsafe extern "C" { static _hints_start: u8; static _lengths_of_hints_start: usize; static _pubio_start: u8; static _lengths_of_pubio_start: usize; } struct RegionStateCell(UnsafeCell<RegionState>); impl RegionStateCell { const fn new() -> Self { Self(UnsafeCell::new(RegionState::new())) } unsafe fn with_mut<R>(&self, f: impl FnOnce(&mut RegionState) -> R) -> R { f(unsafe { &mut *self.0.get() }) } } unsafe impl Sync for RegionStateCell {} static HINT_STATE: RegionStateCell = RegionStateCell::new(); static PUBIO_STATE: RegionStateCell = RegionStateCell::new(); pub fn read_slice<'a>() -> &'a [u8] { unsafe { HINT_STATE .with_mut(|state| state.take_slice(&raw const _lengths_of_hints_start, &_hints_start)) } } pub fn read_owned<T>() -> T where T: DeserializeOwned, { from_slice(read_slice()).expect("Deserialised value failed.") } pub fn read<T>() -> T where T: DeserializeOwned, { read_owned() } pub fn pubio_read_slice<'a>() -> &'a [u8] { unsafe { PUBIO_STATE .with_mut(|state| state.take_slice(&raw const _lengths_of_pubio_start, &_pubio_start)) } } /// Read a value from public io, deserialize it, and assert that it matches the given value. pub fn commit<T>(v: &T) where T: DeserializeOwned + core::fmt::Debug + PartialEq, { let expected: T = from_slice(pubio_read_slice()).expect("Deserialised value failed."); assert_eq!(*v, expected); }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/sha2/src/consts.rs
guest_libs/sha2/src/consts.rs
//! Copied from <https://github.com/RustCrypto/hashes/blob/82c36a428f8d6f05f3bfccdedb243e9d1f85359d/sha2/src/consts.rs> #![allow(clippy::unreadable_literal)] pub const STATE_LEN: usize = 8; pub type State256 = [u32; STATE_LEN]; /// Constants necessary for SHA-256 family of digests. pub const K32: [u32; 64] = [ 0x428a2f98, 0x71374491, 0xb5c0fbcf, 0xe9b5dba5, 0x3956c25b, 0x59f111f1, 0x923f82a4, 0xab1c5ed5, 0xd807aa98, 0x12835b01, 0x243185be, 0x550c7dc3, 0x72be5d74, 0x80deb1fe, 0x9bdc06a7, 0xc19bf174, 0xe49b69c1, 0xefbe4786, 0x0fc19dc6, 0x240ca1cc, 0x2de92c6f, 0x4a7484aa, 0x5cb0a9dc, 0x76f988da, 0x983e5152, 0xa831c66d, 0xb00327c8, 0xbf597fc7, 0xc6e00bf3, 0xd5a79147, 0x06ca6351, 0x14292967, 0x27b70a85, 0x2e1b2138, 0x4d2c6dfc, 0x53380d13, 0x650a7354, 0x766a0abb, 0x81c2c92e, 0x92722c85, 0xa2bfe8a1, 0xa81a664b, 0xc24b8b70, 0xc76c51a3, 0xd192e819, 0xd6990624, 0xf40e3585, 0x106aa070, 0x19a4c116, 0x1e376c08, 0x2748774c, 0x34b0bcb5, 0x391c0cb3, 0x4ed8aa4a, 0x5b9cca4f, 0x682e6ff3, 0x748f82ee, 0x78a5636f, 0x84c87814, 0x8cc70208, 0x90befffa, 0xa4506ceb, 0xbef9a3f7, 0xc67178f2, ]; pub const H256_224: State256 = [ 0xc1059ed8, 0x367cd507, 0x3070dd17, 0xf70e5939, 0xffc00b31, 0x68581511, 0x64f98fa7, 0xbefa4fa4, ]; pub const H256_256: State256 = [ 0x6a09e667, 0xbb67ae85, 0x3c6ef372, 0xa54ff53a, 0x510e527f, 0x9b05688c, 0x1f83d9ab, 0x5be0cd19, ];
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/sha2/src/lib.rs
guest_libs/sha2/src/lib.rs
//! Copied from <https://github.com/RustCrypto/hashes/blob/82c36a428f8d6f05f3bfccdedb243e9d1f85359d/sha2/src/lib.rs> //! under MIT license. //! //! An implementation of the [SHA-2][1] cryptographic hash algorithms. //! //! There are 6 standard algorithms specified in the SHA-2 standard: [`Sha224`], //! [`Sha256`], [`Sha512_224`], [`Sha512_256`], [`Sha384`], and [`Sha512`]. //! //! Algorithmically, there are only 2 core algorithms: SHA-256 and SHA-512. //! All other algorithms are just applications of these with different initial //! hash values, and truncated to different digest bit lengths. The first two //! algorithms in the list are based on SHA-256, while the last four are based //! on SHA-512. //! //! # Usage //! //! ## One-shot API //! //! ```rust //! use hex_literal::hex; //! use sha2::{Sha256, Digest}; //! //! let result = Sha256::digest(b"hello world"); //! assert_eq!(result[..], hex!(" //! b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 //! ")[..]); //! ``` //! //! ## Incremental API //! //! ```rust //! use hex_literal::hex; //! use sha2::{Sha256, Sha512, Digest}; //! //! // create a Sha256 object //! let mut hasher = Sha256::new(); //! //! // write input message //! hasher.update(b"hello world"); //! //! // read hash digest and consume hasher //! let result = hasher.finalize(); //! //! assert_eq!(result[..], hex!(" //! b94d27b9934d3e08a52e52d7da7dabfac484efe37a5380ee9088f7ace2efcde9 //! ")[..]); //! //! // same for Sha512 //! let mut hasher = Sha512::new(); //! hasher.update(b"hello world"); //! let result = hasher.finalize(); //! //! assert_eq!(result[..], hex!(" //! 309ecc489c12d6eb4cc40f50c902f2b4d0ed77ee511a7c7a9bcd3ca86d4cd86f //! 989dd35bc5ff499670da34255b45b0cfd830e81f605dcf7dc5542e93ae9cd76f //! ")[..]); //! ``` //! //! Also see [RustCrypto/hashes][2] readme. //! //! [1]: https://en.wikipedia.org/wiki/SHA-2 //! [2]: https://github.com/RustCrypto/hashes pub use digest::{self, Digest}; #[cfg(feature = "oid")] use digest::const_oid::{AssociatedOid, ObjectIdentifier}; use digest::{ consts::{U28, U32}, core_api::{CoreWrapper, CtVariableCoreWrapper}, impl_oid_carrier, }; #[rustfmt::skip] mod consts; mod core_api; mod sha256; pub use sha256::compress256; pub use core_api::Sha256VarCore; impl_oid_carrier!(OidSha256, "2.16.840.1.101.3.4.2.1"); impl_oid_carrier!(OidSha224, "2.16.840.1.101.3.4.2.4"); /// SHA-224 hasher. pub type Sha224 = CoreWrapper<CtVariableCoreWrapper<Sha256VarCore, U28, OidSha224>>; /// SHA-256 hasher. pub type Sha256 = CoreWrapper<CtVariableCoreWrapper<Sha256VarCore, U32, OidSha256>>;
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/sha2/src/core_api.rs
guest_libs/sha2/src/core_api.rs
//! Copied from <https://github.com/RustCrypto/hashes/blob/82c36a428f8d6f05f3bfccdedb243e9d1f85359d/sha2/src/core_api.rs> use super::{consts, sha256::compress256}; use core::{fmt, slice::from_ref}; use digest::{ HashMarker, InvalidOutputSize, Output, block_buffer::Eager, core_api::{ AlgorithmName, Block, BlockSizeUser, Buffer, BufferKindUser, OutputSizeUser, TruncSide, UpdateCore, VariableOutputCore, }, typenum::{U32, U64, Unsigned}, }; /// Core block-level SHA-256 hasher with variable output size. /// /// Supports initialization only for 28 and 32 byte output sizes, /// i.e. 224 and 256 bits respectively. #[derive(Clone)] pub struct Sha256VarCore { state: consts::State256, block_len: u64, } impl HashMarker for Sha256VarCore {} impl BlockSizeUser for Sha256VarCore { type BlockSize = U64; } impl BufferKindUser for Sha256VarCore { type BufferKind = Eager; } impl UpdateCore for Sha256VarCore { #[inline] fn update_blocks(&mut self, blocks: &[Block<Self>]) { self.block_len += blocks.len() as u64; compress256(&mut self.state, blocks); } } impl OutputSizeUser for Sha256VarCore { type OutputSize = U32; } impl VariableOutputCore for Sha256VarCore { const TRUNC_SIDE: TruncSide = TruncSide::Left; #[inline] fn new(output_size: usize) -> Result<Self, InvalidOutputSize> { let state = match output_size { 28 => consts::H256_224, 32 => consts::H256_256, _ => return Err(InvalidOutputSize), }; let block_len = 0; Ok(Self { state, block_len }) } #[inline] fn finalize_variable_core(&mut self, buffer: &mut Buffer<Self>, out: &mut Output<Self>) { let bs = Self::BlockSize::U64; let bit_len = 8 * (buffer.get_pos() as u64 + bs * self.block_len); buffer.len64_padding_be(bit_len, |b| compress256(&mut self.state, from_ref(b))); for (chunk, v) in out.chunks_exact_mut(4).zip(self.state.iter()) { chunk.copy_from_slice(&v.to_be_bytes()); } } } impl AlgorithmName for Sha256VarCore { #[inline] fn write_alg_name(f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("Sha256") } } impl fmt::Debug for Sha256VarCore { #[inline] fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.write_str("Sha256VarCore { ... }") } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/sha2/src/sha256.rs
guest_libs/sha2/src/sha256.rs
use crate::consts::K32; use ceno_syscall::syscall_sha256_extend; use digest::{consts::U64, generic_array::GenericArray}; // Copied from <https://github.com/sp1-patches/RustCrypto-hashes/blob/bf1b2575ccc1bab0f0890f4c3064bcd1d8968a1f/sha2/src/sha256.rs#L42C1-L49C2> #[inline(always)] fn to_u32s(block: &[u8; 64]) -> [u32; 16] { core::array::from_fn(|i| { let chunk = block[4 * i..][..4].try_into().unwrap(); u32::from_be_bytes(chunk) }) } // Copied from <https://github.com/sp1-patches/RustCrypto-hashes/blob/bf1b2575ccc1bab0f0890f4c3064bcd1d8968a1f/sha2/src/sha256/soft_compact.rs> fn compress_u32(state: &mut [u32; 8], block: [u32; 16]) { let [mut a, mut b, mut c, mut d, mut e, mut f, mut g, mut h] = *state; let mut w = [0; 64]; w[..16].copy_from_slice(&block); // Replace extend with a syscall syscall_sha256_extend(&mut w); for i in 0..64 { let s1 = e.rotate_right(6) ^ e.rotate_right(11) ^ e.rotate_right(25); let ch = (e & f) ^ ((!e) & g); let t1 = s1 .wrapping_add(ch) .wrapping_add(K32[i]) .wrapping_add(w[i]) .wrapping_add(h); let s0 = a.rotate_right(2) ^ a.rotate_right(13) ^ a.rotate_right(22); let maj = (a & b) ^ (a & c) ^ (b & c); let t2 = s0.wrapping_add(maj); h = g; g = f; f = e; e = d.wrapping_add(t1); d = c; c = b; b = a; a = t1.wrapping_add(t2); } state[0] = state[0].wrapping_add(a); state[1] = state[1].wrapping_add(b); state[2] = state[2].wrapping_add(c); state[3] = state[3].wrapping_add(d); state[4] = state[4].wrapping_add(e); state[5] = state[5].wrapping_add(f); state[6] = state[6].wrapping_add(g); state[7] = state[7].wrapping_add(h); } fn compress(state: &mut [u32; 8], blocks: &[[u8; 64]]) { for block in blocks.iter() { compress_u32(state, to_u32s(block)); } } /// Raw SHA-256 compression function. /// /// This is a low-level "hazmat" API which provides direct access to the core /// functionality of SHA-256. pub fn compress256(state: &mut [u32; 8], blocks: &[GenericArray<u8, U64>]) { // SAFETY: GenericArray<u8, U64> and [u8; 64] have // exactly the same memory layout let p = blocks.as_ptr() as *const [u8; 64]; let blocks = unsafe { core::slice::from_raw_parts(p, blocks.len()) }; compress(state, blocks) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/crypto/src/lib.rs
guest_libs/crypto/src/lib.rs
#![deny(missing_docs)] //! Ceno zkVM guest implementations for the revm precompile crypto interface. //! //! This crate is revm version ir-relevant, unless the signature of the precompiles change. /// BN254 elliptic curve pub mod bn254; /// secp256k1 pub mod secp256k1; /// secp256r1 pub mod secp256r1; /// sha2 hashing pub use ceno_sha2; mod macros; /// Error returned when trying to install the crypto provider more than once. #[derive(Debug, thiserror::Error)] #[error("Crypto provider has already been installed")] pub struct CenoCryptoInstallError; /// Errors that can occur during cryptographic operations. #[derive(Debug, thiserror::Error)] #[non_exhaustive] pub enum CenoCryptoError { /// Bn254 errors #[error("Bn254 field point is not a member of the field")] Bn254FieldPointNotAMember, /// Bn254 affine g failed to create #[error("Bn254 affine G failed to create")] Bn254AffineGFailedToCreate, /// Bn254 pair length #[error("Bn254 pair length error")] Bn254PairLength, /// Sepk256k1 ecrecover error #[error("Secp256k1 ecrecover error")] Secp256k1Ecrecover(#[from] k256::ecdsa::Error), } #[cfg(test)] mod test { use super::*; #[test] fn test_install() { { ceno_crypto!(name = MyCustomName); } { use revm_precompile as another_crate; ceno_crypto!(revm_precompile = another_crate); } { use alloy_consensus as another_crate; ceno_crypto!(alloy_consensus = another_crate); } { use alloy_primitives::Address as AnotherAddress; ceno_crypto!(address_type = AnotherAddress); } { use alloy_primitives::Address as AnotherAddress; ceno_crypto!(address_type = AnotherAddress); } { use alloy_consensus as another_crate; use alloy_primitives::Address as AnotherAddress; ceno_crypto!( address_type = AnotherAddress, alloy_consensus = another_crate ); } { use alloy_consensus as another_crate; use alloy_primitives::Address as AnotherAddress; ceno_crypto!( alloy_consensus = another_crate, address_type = AnotherAddress ); } ceno_crypto!(); CenoCrypto::install(); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/crypto/src/bn254.rs
guest_libs/crypto/src/bn254.rs
//! Copied from <https://github.com/bluealloy/revm/blob/10ff66da1576a3532db657d7b953abcd59ec44a3/crates/precompile/src/bn254/substrate.rs> //! under MIT license. use crate::CenoCryptoError; use bn::{AffineG1, AffineG2, Fq, Fq2, G1, G2, Group, Gt}; use std::vec::Vec; /// FQ_LEN specifies the number of bytes needed to represent an /// Fq element. This is an element in the base field of BN254. /// /// Note: The base field is used to define G1 and G2 elements. const FQ_LEN: usize = 32; /// SCALAR_LEN specifies the number of bytes needed to represent an Fr element. /// This is an element in the scalar field of BN254. const SCALAR_LEN: usize = 32; /// FQ2_LEN specifies the number of bytes needed to represent an /// Fq^2 element. /// /// Note: This is the quadratic extension of Fq, and by definition /// means we need 2 Fq elements. const FQ2_LEN: usize = 2 * FQ_LEN; /// G1_LEN specifies the number of bytes needed to represent a G1 element. /// /// Note: A G1 element contains 2 Fq elements. const G1_LEN: usize = 2 * FQ_LEN; /// G2_LEN specifies the number of bytes needed to represent a G2 element. /// /// Note: A G2 element contains 2 Fq^2 elements. const G2_LEN: usize = 2 * FQ2_LEN; /// Input length for the add operation. /// `ADD` takes two uncompressed G1 points (64 bytes each). pub const ADD_INPUT_LEN: usize = 2 * G1_LEN; /// Input length for the multiplication operation. /// `MUL` takes an uncompressed G1 point (64 bytes) and scalar (32 bytes). pub const MUL_INPUT_LEN: usize = G1_LEN + SCALAR_LEN; /// Pair element length. /// `PAIR` elements are composed of an uncompressed G1 point (64 bytes) and an uncompressed G2 point /// (128 bytes). pub const PAIR_ELEMENT_LEN: usize = G1_LEN + G2_LEN; /// Reads a single `Fq` field element from the input slice. /// /// Takes a byte slice and attempts to interpret the first 32 bytes as an /// elliptic curve field element. Returns an error if the bytes do not form /// a valid field element. /// /// # Panics /// /// Panics if the input is not at least 32 bytes long. #[inline] fn read_fq(input: &[u8]) -> Result<Fq, CenoCryptoError> { Fq::from_slice(&input[..FQ_LEN]).map_err(|_| CenoCryptoError::Bn254FieldPointNotAMember) } /// Reads a Fq2 (quadratic extension field element) from the input slice. /// /// Parses two consecutive Fq field elements as the real and imaginary parts /// of an Fq2 element. /// The second component is parsed before the first, ie if a we represent an /// element in Fq2 as (x,y) -- `y` is parsed before `x` /// /// # Panics /// /// Panics if the input is not at least 64 bytes long. #[inline] fn read_fq2(input: &[u8]) -> Result<Fq2, CenoCryptoError> { let y = read_fq(&input[..FQ_LEN])?; let x = read_fq(&input[FQ_LEN..2 * FQ_LEN])?; Ok(Fq2::new(x, y)) } /// Creates a new `G1` point from the given `x` and `y` coordinates. /// /// Constructs a point on the G1 curve from its affine coordinates. /// /// Note: The point at infinity which is represented as (0,0) is /// handled specifically because `AffineG1` is not capable of /// representing such a point. /// In particular, when we convert from `AffineG1` to `G1`, the point /// will be (0,0,1) instead of (0,1,0) #[inline] fn new_g1_point(px: Fq, py: Fq) -> Result<G1, CenoCryptoError> { if px == Fq::zero() && py == Fq::zero() { Ok(G1::zero()) } else { AffineG1::new(px, py) .map(Into::into) .map_err(|_| CenoCryptoError::Bn254AffineGFailedToCreate) } } /// Creates a new `G2` point from the given Fq2 coordinates. /// /// G2 points in BN254 are defined over a quadratic extension field Fq2. /// This function takes two Fq2 elements representing the x and y coordinates /// and creates a G2 point. /// /// Note: The point at infinity which is represented as (0,0) is /// handled specifically because `AffineG2` is not capable of /// representing such a point. /// In particular, when we convert from `AffineG2` to `G2`, the point /// will be (0,0,1) instead of (0,1,0) #[inline] fn new_g2_point(x: Fq2, y: Fq2) -> Result<G2, CenoCryptoError> { let point = if x.is_zero() && y.is_zero() { G2::zero() } else { G2::from(AffineG2::new(x, y).map_err(|_| CenoCryptoError::Bn254AffineGFailedToCreate)?) }; Ok(point) } /// Reads a G1 point from the input slice. /// /// Parses a G1 point from a byte slice by reading two consecutive field elements /// representing the x and y coordinates. /// /// # Panics /// /// Panics if the input is not at least 64 bytes long. #[inline] pub fn read_g1_point(input: &[u8]) -> Result<G1, CenoCryptoError> { let px = read_fq(&input[0..FQ_LEN])?; let py = read_fq(&input[FQ_LEN..2 * FQ_LEN])?; new_g1_point(px, py) } /// Encodes a G1 point into a byte array. /// /// Converts a G1 point in Jacobian coordinates to affine coordinates and /// serializes the x and y coordinates as big-endian byte arrays. /// /// Note: If the point is the point at infinity, this function returns /// all zeroes. #[inline] pub fn encode_g1_point(point: G1) -> [u8; G1_LEN] { let mut output = [0u8; G1_LEN]; if let Some(point_affine) = AffineG1::from_jacobian(point) { point_affine .x() .to_big_endian(&mut output[..FQ_LEN]) .unwrap(); point_affine .y() .to_big_endian(&mut output[FQ_LEN..]) .unwrap(); } output } /// Reads a G2 point from the input slice. /// /// Parses a G2 point from a byte slice by reading four consecutive Fq field elements /// representing the two Fq2 coordinates (x and y) of the G2 point. /// /// # Panics /// /// Panics if the input is not at least 128 bytes long. #[inline] pub fn read_g2_point(input: &[u8]) -> Result<G2, CenoCryptoError> { let ba = read_fq2(&input[0..FQ2_LEN])?; let bb = read_fq2(&input[FQ2_LEN..2 * FQ2_LEN])?; new_g2_point(ba, bb) } /// Reads a scalar from the input slice /// /// Note: The scalar does not need to be canonical. /// /// # Panics /// /// If `input.len()` is not equal to [`SCALAR_LEN`]. #[inline] pub fn read_scalar(input: &[u8]) -> bn::Fr { assert_eq!( input.len(), SCALAR_LEN, "unexpected scalar length. got {}, expected {SCALAR_LEN}", input.len() ); // `Fr::from_slice` can only fail when the length is not `SCALAR_LEN`. bn::Fr::from_slice(input).unwrap() } /// Performs point addition on two G1 points. #[inline] pub fn g1_point_add(p1_bytes: &[u8], p2_bytes: &[u8]) -> Result<[u8; 64], CenoCryptoError> { let p1 = read_g1_point(p1_bytes)?; let p2 = read_g1_point(p2_bytes)?; let result = p1 + p2; Ok(encode_g1_point(result)) } /// Performs a G1 scalar multiplication. #[inline] pub fn g1_point_mul(point_bytes: &[u8], fr_bytes: &[u8]) -> Result<[u8; 64], CenoCryptoError> { let p = read_g1_point(point_bytes)?; let fr = read_scalar(fr_bytes); let result = p * fr; Ok(encode_g1_point(result)) } /// pairing_check performs a pairing check on a list of G1 and G2 point pairs and /// returns true if the result is equal to the identity element. /// /// Note: If the input is empty, this function returns true. /// This is different to EIP2537 which disallows the empty input. #[inline] pub fn pairing_check(pairs: &[(&[u8], &[u8])]) -> Result<bool, CenoCryptoError> { let mut parsed_pairs = Vec::with_capacity(pairs.len()); for (g1_bytes, g2_bytes) in pairs { let g1 = read_g1_point(g1_bytes)?; let g2 = read_g2_point(g2_bytes)?; // Skip pairs where either point is at infinity if !g1.is_zero() && !g2.is_zero() { parsed_pairs.push((g1, g2)); } } if parsed_pairs.is_empty() { return Ok(true); } Ok(bn::pairing_batch(&parsed_pairs) == Gt::one()) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/crypto/src/secp256r1.rs
guest_libs/crypto/src/secp256r1.rs
/// secp256r1 (P-256) signature verification. #[inline] pub fn secp256r1_verify_signature(_msg: &[u8; 32], _sig: &[u8; 64], _pk: &[u8; 64]) -> bool { unimplemented!() }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/crypto/src/macros.rs
guest_libs/crypto/src/macros.rs
/// Declare a crypto operations provider using the Ceno zkVM guest implementations. #[macro_export] macro_rules! ceno_crypto { ( $( $key:ident = $val:tt ),* $(,)? ) => { // default values ceno_crypto!(@parse { revm_precompile: ::revm_precompile, alloy_consensus: ::alloy_consensus, address_type: ::alloy_primitives::Address, name: CenoCrypto } $( $key = $val, )* ); }; // parse optional args (@parse { revm_precompile: $r:path, alloy_consensus: $ac:path, address_type: $addr:path, name: $n:tt } revm_precompile = $nr:path $(, $($rest:tt)*)? ) => { ceno_crypto!(@parse { revm_precompile: $nr, alloy_consensus: $ac, address_type: $addr, name: $n } $($($rest)*)? ); }; (@parse { revm_precompile: $r:path, alloy_consensus: $ac:path, address_type: $addr:path, name: $n:tt } alloy_consensus = $nac:path $(, $($rest:tt)*)? ) => { ceno_crypto!(@parse { revm_precompile: $r, alloy_consensus: $nac, address_type: $addr, name: $n } $($($rest)*)? ); }; (@parse { revm_precompile: $r:path, alloy_consensus: $ac:path, address_type: $addr:path, name: $n:tt } address_type = $na:path $(, $($rest:tt)*)? ) => { ceno_crypto!(@parse { revm_precompile: $r, alloy_consensus: $ac, address_type: $na, name: $n } $($($rest)*)? ); }; (@parse { revm_precompile: $r:path, alloy_consensus: $ac:path, address_type: $addr:path, name: $n:tt } name = $nn:ident $(, $($rest:tt)*)? ) => { ceno_crypto!(@parse { revm_precompile: $r, alloy_consensus: $ac, address_type: $addr, name: $nn } $($($rest)*)? ); }; // unknown key (@parse { $($state:tt)* } $bad:ident = $($rest:tt)*) => { compile_error!(concat!("unknown option: ", stringify!($bad))); }; // finish parsing (@parse { revm_precompile: $r:path, alloy_consensus: $ac:path, address_type: $addr:path, name: $n:ident } $(,)?) => { use $ac as __ac; use $r as __rp; /// Ceno zkVM crypto operations provider #[derive(Debug)] #[allow(dead_code)] pub struct $n; impl $n { /// Install this as the global crypto provider. /// /// # Panics /// /// Panics if a crypto provider has already been installed. #[allow(dead_code)] pub fn install() { Self::try_install().unwrap(); } /// Install this as the global crypto provider. #[allow(dead_code)] pub fn try_install() -> Result<(), $crate::CenoCryptoInstallError> { let revm_install = __rp::install_crypto(Self); let alloy_install = __ac::crypto::install_default_provider(::std::sync::Arc::new(Self)).is_ok(); if !(revm_install && alloy_install) { Err($crate::CenoCryptoInstallError) } else { Ok(()) } } } #[allow(dead_code)] fn __map_err(e: $crate::CenoCryptoError) -> __rp::PrecompileError { match e { $crate::CenoCryptoError::Bn254FieldPointNotAMember => { __rp::PrecompileError::Bn254FieldPointNotAMember } $crate::CenoCryptoError::Bn254AffineGFailedToCreate => { __rp::PrecompileError::Bn254AffineGFailedToCreate } $crate::CenoCryptoError::Bn254PairLength => __rp::PrecompileError::Bn254PairLength, _ => __rp::PrecompileError::Other(e.to_string()), } } impl __rp::Crypto for $n { #[inline] fn sha256(&self, input: &[u8]) -> [u8; 32] { use $crate::ceno_sha2::{Digest, Sha256}; let output = Sha256::digest(input); output.into() } #[inline] fn bn254_g1_add( &self, p1: &[u8], p2: &[u8], ) -> Result<[u8; 64], __rp::PrecompileError> { $crate::bn254::g1_point_add(p1, p2).map_err(__map_err) } #[inline] fn bn254_g1_mul( &self, point: &[u8], scalar: &[u8], ) -> Result<[u8; 64], __rp::PrecompileError> { $crate::bn254::g1_point_mul(point, scalar).map_err(__map_err) } #[inline] fn bn254_pairing_check( &self, pairs: &[(&[u8], &[u8])], ) -> Result<bool, __rp::PrecompileError> { $crate::bn254::pairing_check(pairs).map_err(__map_err) } #[inline] fn secp256k1_ecrecover( &self, sig: &[u8; 64], recid: u8, msg: &[u8; 32], ) -> Result<[u8; 32], __rp::PrecompileError> { $crate::secp256k1::secp256k1_ecrecover(sig, recid, msg).map_err(__map_err) } #[inline] fn secp256r1_verify_signature( &self, msg: &[u8; 32], sig: &[u8; 64], pk: &[u8; 64], ) -> bool { $crate::secp256r1::secp256r1_verify_signature(msg, sig, pk) } } impl __ac::crypto::backend::CryptoProvider for $n { #[inline] fn recover_signer_unchecked( &self, sig: &[u8; 65], msg: &[u8; 32], ) -> Result<$addr, __ac::crypto::RecoveryError> { use $addr as Address; $crate::secp256k1::secp256k1_ecrecover( (&sig[..64]).try_into().unwrap(), sig[64], msg, ) .map(|res| Address::from_slice(&res[12..])) .map_err(__ac::crypto::RecoveryError::from_source) } } }; }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/crypto/src/secp256k1.rs
guest_libs/crypto/src/secp256k1.rs
use crate::CenoCryptoError; use ceno_keccak::{Hasher, Keccak}; #[cfg(feature = "profiling")] use ceno_syscall::syscall_phantom_log_pc_cycle; use k256::ecdsa::{RecoveryId, Signature, VerifyingKey}; /// secp256k1 ECDSA signature recovery. #[inline] pub fn secp256k1_ecrecover( sig: &[u8; 64], mut recid: u8, msg: &[u8; 32], ) -> Result<[u8; 32], CenoCryptoError> { #[cfg(feature = "profiling")] syscall_phantom_log_pc_cycle("secp256k1_ecrecover start"); // Copied from <https://github.com/alloy-rs/alloy/blob/8e9be40eb0e7c27618db1316989f77f1cfe3accb/crates/consensus/src/crypto.rs#L311-L334> // parse signature let mut sig = Signature::from_slice(sig.as_slice())?; // normalize signature and flip recovery id if needed. if let Some(sig_normalized) = sig.normalize_s() { sig = sig_normalized; recid ^= 1; } let recid = RecoveryId::from_byte(recid).expect("recovery ID is valid"); #[cfg(feature = "profiling")] syscall_phantom_log_pc_cycle("recover_from_prehash start"); // recover key let recovered_key = VerifyingKey::recover_from_prehash(&msg[..], &sig, recid)?; #[cfg(feature = "profiling")] syscall_phantom_log_pc_cycle("recover_from_prehash end"); // hash it let mut hasher = Keccak::v256(); let mut hash = [0u8; 32]; hasher.update( &recovered_key .to_encoded_point(/* compress = */ false) .as_bytes()[1..], ); hasher.finalize(&mut hash); // truncate to 20 bytes hash[..12].fill(0); #[cfg(feature = "profiling")] syscall_phantom_log_pc_cycle("secp256k1_ecrecover end"); Ok(hash) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/keccak/src/lib.rs
guest_libs/keccak/src/lib.rs
//! Ceno Keccak zkVM Guest Library #![no_std] #![deny(missing_docs)] extern crate alloc; /// Re-export the `tiny_keccak` crate's `Hasher` trait. pub use tiny_keccak::{self, Hasher}; mod vendor; pub use vendor::keccak::Keccak; pub use ceno_syscall::syscall_keccak_permute as keccakf; mod keccakf { use crate::{ keccakf, vendor::{Buffer, Permutation}, }; pub struct KeccakF; impl Permutation for KeccakF { fn execute(buffer: &mut Buffer) { keccakf(buffer.words()); } } } /// Native hook for keccak256 for use with `alloy-primitives` "native-keccak" feature. /// /// # Safety /// /// The VM accepts the preimage by pointer and length, and writes the /// 32-byte hash. /// - `bytes` must point to an input buffer at least `len` long. /// - `output` must point to a buffer that is at least 32-bytes long. /// /// [`keccak256`]: https://en.wikipedia.org/wiki/SHA-3 /// [`sha3`]: https://docs.rs/sha3/latest/sha3/ /// [`tiny_keccak`]: https://docs.rs/tiny-keccak/latest/tiny_keccak/ #[unsafe(no_mangle)] pub unsafe extern "C" fn native_keccak256(bytes: *const u8, len: usize, output: *mut u8) { use crate::{Hasher, Keccak}; unsafe { let input = core::slice::from_raw_parts(bytes, len); let out = core::slice::from_raw_parts_mut(output, 32); let mut hasher = Keccak::v256(); hasher.update(input); hasher.finalize(out); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/keccak/src/vendor.rs
guest_libs/keccak/src/vendor.rs
//! Private types and traits copied from the `tiny-keccak`. use ceno_syscall::KECCAK_STATE_WORDS; pub mod keccak; #[derive(Default, Clone)] pub struct Buffer([u64; KECCAK_STATE_WORDS]); impl Buffer { pub fn words(&mut self) -> &mut [u64; KECCAK_STATE_WORDS] { &mut self.0 } #[inline] pub fn execute<F: FnOnce(&mut [u8])>(&mut self, offset: usize, len: usize, f: F) { // # Safety: little-endian target let buffer: &mut [u8; KECCAK_STATE_WORDS * 8] = unsafe { core::mem::transmute(&mut self.0) }; f(&mut buffer[offset..][..len]); } pub fn setout(&mut self, dst: &mut [u8], offset: usize, len: usize) { self.execute(offset, len, |buffer| dst[..len].copy_from_slice(buffer)); } pub fn xorin(&mut self, src: &[u8], offset: usize, len: usize) { self.execute(offset, len, |dst| { assert!(dst.len() <= src.len()); let len = dst.len(); let mut dst_ptr = dst.as_mut_ptr(); let mut src_ptr = src.as_ptr(); for _ in 0..len { unsafe { *dst_ptr ^= *src_ptr; src_ptr = src_ptr.offset(1); dst_ptr = dst_ptr.offset(1); } } }); } pub fn pad(&mut self, offset: usize, delim: u8, rate: usize) { self.execute(offset, 1, |buff| buff[0] ^= delim); self.execute(rate - 1, 1, |buff| buff[0] ^= 0x80); } } pub trait Permutation { fn execute(a: &mut Buffer); } #[derive(Clone, Copy)] pub enum Mode { Absorbing, Squeezing, } pub struct KeccakState<P> { buffer: Buffer, offset: usize, rate: usize, delim: u8, mode: Mode, permutation: core::marker::PhantomData<P>, } impl<P> Clone for KeccakState<P> { fn clone(&self) -> Self { KeccakState { buffer: self.buffer.clone(), offset: self.offset, rate: self.rate, delim: self.delim, mode: self.mode, permutation: core::marker::PhantomData, } } } impl<P: Permutation> KeccakState<P> { fn new(rate: usize, delim: u8) -> Self { assert!(rate != 0, "rate cannot be equal 0"); KeccakState { buffer: Buffer::default(), offset: 0, rate, delim, mode: Mode::Absorbing, permutation: core::marker::PhantomData, } } fn keccak(&mut self) { P::execute(&mut self.buffer); } fn update(&mut self, input: &[u8]) { if let Mode::Squeezing = self.mode { self.mode = Mode::Absorbing; self.fill_block(); } // first foldp let mut ip = 0; let mut l = input.len(); let mut rate = self.rate - self.offset; let mut offset = self.offset; while l >= rate { self.buffer.xorin(&input[ip..], offset, rate); self.keccak(); ip += rate; l -= rate; rate = self.rate; offset = 0; } self.buffer.xorin(&input[ip..], offset, l); self.offset = offset + l; } fn pad(&mut self) { self.buffer.pad(self.offset, self.delim, self.rate); } fn squeeze(&mut self, output: &mut [u8]) { if let Mode::Absorbing = self.mode { self.mode = Mode::Squeezing; self.pad(); self.fill_block(); } // second foldp let mut op = 0; let mut l = output.len(); let mut rate = self.rate - self.offset; let mut offset = self.offset; while l >= rate { self.buffer.setout(&mut output[op..], offset, rate); self.keccak(); op += rate; l -= rate; rate = self.rate; offset = 0; } self.buffer.setout(&mut output[op..], offset, l); self.offset = offset + l; } fn finalize(mut self, output: &mut [u8]) { self.squeeze(output); } fn fill_block(&mut self) { self.keccak(); self.offset = 0; } } pub const fn bits_to_rate(bits: usize) -> usize { 200 - bits / 4 }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/guest_libs/keccak/src/vendor/keccak.rs
guest_libs/keccak/src/vendor/keccak.rs
//! The `Keccak` hash functions. use crate::{ Hasher, keccakf::KeccakF, vendor::{KeccakState, bits_to_rate}, }; /// The `Keccak` hash functions defined in [`Keccak SHA3 submission`]. /// /// # Usage /// /// ```toml /// [dependencies] /// tiny-keccak = { version = "2.0.0", features = ["keccak"] } /// ``` /// /// [`Keccak SHA3 submission`]: https://keccak.team/files/Keccak-submission-3.pdf #[derive(Clone)] pub struct Keccak { state: KeccakState<KeccakF>, } impl Keccak { const DELIM: u8 = 0x01; /// Creates new [`Keccak`] hasher with a security level of 224 bits. /// /// [`Keccak`]: struct.Keccak.html pub fn v224() -> Keccak { Keccak::new(224) } /// Creates new [`Keccak`] hasher with a security level of 256 bits. /// /// [`Keccak`]: struct.Keccak.html pub fn v256() -> Keccak { Keccak::new(256) } /// Creates new [`Keccak`] hasher with a security level of 384 bits. /// /// [`Keccak`]: struct.Keccak.html pub fn v384() -> Keccak { Keccak::new(384) } /// Creates new [`Keccak`] hasher with a security level of 512 bits. /// /// [`Keccak`]: struct.Keccak.html pub fn v512() -> Keccak { Keccak::new(512) } fn new(bits: usize) -> Keccak { Keccak { state: KeccakState::new(bits_to_rate(bits), Self::DELIM), } } } impl Hasher for Keccak { /// Absorb additional input. Can be called multiple times. /// /// # Example /// /// ``` /// # use ceno_keccak::{Hasher, Keccak}; /// # /// # fn main() { /// # let mut keccak = Keccak::v256(); /// keccak.update(b"hello"); /// keccak.update(b" world"); /// # } /// ``` fn update(&mut self, input: &[u8]) { self.state.update(input); } /// Pad and squeeze the state to the output. /// /// # Example /// /// ``` /// # use ceno_keccak::{Hasher, Keccak}; /// # /// # fn main() { /// # let keccak = Keccak::v256(); /// # let mut output = [0u8; 32]; /// keccak.finalize(&mut output); /// # } /// # /// ``` fn finalize(self, output: &mut [u8]) { self.state.finalize(output); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/derive/src/lib.rs
derive/src/lib.rs
// The `aligned_borrow_derive` macro is taken from valida-xyz/valida under MIT license // // The MIT License (MIT) // // Copyright (c) 2023 The Valida Authors // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. extern crate proc_macro; use proc_macro::TokenStream; use quote::quote; use syn::{DeriveInput, GenericParam, parse_macro_input}; #[proc_macro_derive(AlignedBorrow)] pub fn aligned_borrow_derive(input: TokenStream) -> TokenStream { let ast = parse_macro_input!(input as DeriveInput); let name = &ast.ident; // Get first generic which must be type (ex. `T`) for input <T, N: NumLimbs, const M: usize> let type_generic = ast .generics .params .iter() .map(|param| match param { GenericParam::Type(type_param) => &type_param.ident, _ => panic!("Expected first generic to be a type"), }) .next() .expect("Expected at least one generic"); // Get generics after the first (ex. `N: NumLimbs, const M: usize`) // We need this because when we assert the size, we want to substitute u8 for T. let non_first_generics = ast .generics .params .iter() .skip(1) .filter_map(|param| match param { GenericParam::Type(type_param) => Some(&type_param.ident), GenericParam::Const(const_param) => Some(&const_param.ident), _ => None, }) .collect::<Vec<_>>(); // Get impl generics (`<T, N: NumLimbs, const M: usize>`), type generics (`<T, N>`), where // clause (`where T: Clone`) let (impl_generics, type_generics, where_clause) = ast.generics.split_for_impl(); let methods = quote! { impl #impl_generics core::borrow::Borrow<#name #type_generics> for [#type_generic] #where_clause { fn borrow(&self) -> &#name #type_generics { debug_assert_eq!(self.len(), std::mem::size_of::<#name<u8 #(, #non_first_generics)*>>()); let (prefix, shorts, _suffix) = unsafe { self.align_to::<#name #type_generics>() }; debug_assert!(prefix.is_empty(), "Alignment should match"); debug_assert_eq!(shorts.len(), 1); &shorts[0] } } impl #impl_generics core::borrow::BorrowMut<#name #type_generics> for [#type_generic] #where_clause { fn borrow_mut(&mut self) -> &mut #name #type_generics { debug_assert_eq!(self.len(), std::mem::size_of::<#name<u8 #(, #non_first_generics)*>>()); let (prefix, shorts, _suffix) = unsafe { self.align_to_mut::<#name #type_generics>() }; debug_assert!(prefix.is_empty(), "Alignment should match"); debug_assert_eq!(shorts.len(), 1); &mut shorts[0] } } }; TokenStream::from(methods) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_serde/src/lib.rs
ceno_serde/src/lib.rs
#![no_std] //! Word-addressed serialization utilities extracted from OpenVM. extern crate alloc; mod deserializer; mod err; mod serializer; pub use deserializer::{Deserializer, WordRead, from_slice}; pub use err::{Error, Result}; pub use serializer::{Serializer, WordWrite, to_vec, to_vec_with_capacity}; pub(crate) const WORD_SIZE: usize = 4; #[inline] pub(crate) const fn align_up(value: usize, alignment: usize) -> usize { if alignment == 0 { value } else { let mask = alignment - 1; (value + mask) & !mask } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_serde/src/serializer.rs
ceno_serde/src/serializer.rs
use alloc::{string::ToString, vec::Vec}; use crate::{ WORD_SIZE, err::{Error, Result}, }; /// A writer for writing streams preferring word-based data. pub trait WordWrite { /// Write the given words to the stream. fn write_words(&mut self, words: &[u32]) -> Result<()>; /// Write the given bytes to the stream, padding up to the next word /// boundary. fn write_padded_bytes(&mut self, bytes: &[u8]) -> Result<()>; } impl WordWrite for Vec<u32> { fn write_words(&mut self, words: &[u32]) -> Result<()> { self.extend_from_slice(words); Ok(()) } fn write_padded_bytes(&mut self, bytes: &[u8]) -> Result<()> { let chunks = bytes.chunks_exact(WORD_SIZE); let last_word = chunks.remainder(); self.extend(chunks.map(|word_bytes| u32::from_le_bytes(word_bytes.try_into().unwrap()))); if !last_word.is_empty() { let mut last_word_bytes = [0u8; WORD_SIZE]; last_word_bytes[..last_word.len()].clone_from_slice(last_word); self.push(u32::from_le_bytes(last_word_bytes)); } Ok(()) } } // Allow borrowed WordWrites to work transparently. impl<W: WordWrite + ?Sized> WordWrite for &mut W { #[inline] fn write_words(&mut self, words: &[u32]) -> Result<()> { (**self).write_words(words) } #[inline] fn write_padded_bytes(&mut self, bytes: &[u8]) -> Result<()> { (**self).write_padded_bytes(bytes) } } /// Serialize to a vector of u32 words pub fn to_vec<T>(value: &T) -> Result<Vec<u32>> where T: serde::Serialize + ?Sized, { // Use the in-memory size of the value as a guess for the length // of the serialized value. let mut vec: Vec<u32> = Vec::with_capacity(core::mem::size_of_val(value)); let mut serializer = Serializer::new(&mut vec); value.serialize(&mut serializer)?; Ok(vec) } /// Serialize to a vector of u32 words with size hinting /// /// Includes a caller-provided hint `cap` giving the capacity of u32 words /// necessary to serialize `value`. pub fn to_vec_with_capacity<T>(value: &T, cap: usize) -> Result<Vec<u32>> where T: serde::Serialize + ?Sized, { let mut vec: Vec<u32> = Vec::with_capacity(cap); let mut serializer = Serializer::new(&mut vec); value.serialize(&mut serializer)?; Ok(vec) } /// Enables serializing to a stream pub struct Serializer<W: WordWrite> { stream: W, } impl<W: WordWrite> Serializer<W> { /// Construct a Serializer /// /// Creates a serializer that writes to `stream`. pub fn new(stream: W) -> Self { Serializer { stream } } } impl<W: WordWrite> serde::ser::Serializer for &'_ mut Serializer<W> { type Ok = (); type Error = Error; type SerializeSeq = Self; type SerializeTuple = Self; type SerializeTupleStruct = Self; type SerializeTupleVariant = Self; type SerializeMap = Self; type SerializeStruct = Self; type SerializeStructVariant = Self; fn is_human_readable(&self) -> bool { false } fn collect_str<T>(self, value: &T) -> Result<()> where T: ?Sized + core::fmt::Display, { self.serialize_str(&value.to_string()) } fn serialize_bool(self, v: bool) -> Result<()> { self.serialize_u8(if v { 1 } else { 0 }) } fn serialize_i8(self, v: i8) -> Result<()> { self.serialize_i32(v as i32) } fn serialize_i16(self, v: i16) -> Result<()> { self.serialize_i32(v as i32) } fn serialize_i32(self, v: i32) -> Result<()> { self.serialize_u32(v as u32) } fn serialize_i64(self, v: i64) -> Result<()> { self.serialize_u64(v as u64) } fn serialize_i128(self, v: i128) -> Result<()> { self.serialize_u128(v as u128) } fn serialize_u8(self, v: u8) -> Result<()> { self.serialize_u32(v as u32) } fn serialize_u16(self, v: u16) -> Result<()> { self.serialize_u32(v as u32) } fn serialize_u32(self, v: u32) -> Result<()> { self.stream.write_words(&[v]) } fn serialize_u64(self, v: u64) -> Result<()> { self.serialize_u32((v & 0xFFFFFFFF) as u32)?; self.serialize_u32(((v >> 32) & 0xFFFFFFFF) as u32) } fn serialize_u128(self, v: u128) -> Result<()> { self.stream.write_padded_bytes(&v.to_le_bytes()) } fn serialize_f32(self, v: f32) -> Result<()> { self.serialize_u32(v.to_bits()) } fn serialize_f64(self, v: f64) -> Result<()> { self.serialize_u64(f64::to_bits(v)) } fn serialize_char(self, v: char) -> Result<()> { self.serialize_u32(v as u32) } fn serialize_str(self, v: &str) -> Result<()> { let bytes = v.as_bytes(); self.serialize_u32(bytes.len() as u32)?; self.stream.write_padded_bytes(bytes) } // NOTE: Serializing byte slices _does not_ currently call serialize_bytes. This // is because the default collect_seq implementation handles all [T] with // `collect_seq` which does not differentiate. Two options for enabling more // efficient serialization (or commit) of bytes values and // bytes-interpretable slices (e.g. [u32]) are: // A) Implement collect_seq and check at runtime whether a type could be // serialized as bytes. // B) Use the experimental Rust specialization // features. fn serialize_bytes(self, v: &[u8]) -> Result<()> { self.serialize_u32(v.len() as u32)?; self.stream.write_padded_bytes(v) } fn serialize_none(self) -> Result<()> { self.serialize_u32(0) } fn serialize_some<T>(self, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { self.serialize_u32(1)?; value.serialize(self) } fn serialize_unit(self) -> Result<()> { Ok(()) } fn serialize_unit_struct(self, _name: &'static str) -> Result<()> { Ok(()) } fn serialize_unit_variant( self, _name: &'static str, variant_index: u32, _variant: &'static str, ) -> Result<()> { self.serialize_u32(variant_index) } fn serialize_newtype_struct<T>(self, _name: &'static str, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { value.serialize(self) } fn serialize_newtype_variant<T>( self, _name: &'static str, variant_index: u32, _variant: &'static str, value: &T, ) -> Result<()> where T: serde::Serialize + ?Sized, { self.serialize_u32(variant_index)?; value.serialize(self) } fn serialize_seq(self, len: Option<usize>) -> Result<Self::SerializeSeq> { match len { Some(val) => { self.serialize_u32(val.try_into().unwrap())?; Ok(self) } None => Err(Error::NotSupported), } } fn serialize_tuple(self, _len: usize) -> Result<Self::SerializeTuple> { Ok(self) } fn serialize_tuple_struct( self, _name: &'static str, _len: usize, ) -> Result<Self::SerializeTupleStruct> { Ok(self) } fn serialize_tuple_variant( self, _name: &'static str, variant_index: u32, _variant: &'static str, _len: usize, ) -> Result<Self::SerializeTupleVariant> { self.serialize_u32(variant_index)?; Ok(self) } fn serialize_map(self, len: Option<usize>) -> Result<Self::SerializeMap> { match len { Some(val) => { self.serialize_u32(val.try_into().unwrap())?; Ok(self) } None => Err(Error::NotSupported), } } fn serialize_struct(self, _name: &'static str, _len: usize) -> Result<Self::SerializeStruct> { Ok(self) } fn serialize_struct_variant( self, _name: &'static str, variant_index: u32, _variant: &'static str, _len: usize, ) -> Result<Self::SerializeStructVariant> { self.serialize_u32(variant_index)?; Ok(self) } } impl<W: WordWrite> serde::ser::SerializeSeq for &'_ mut Serializer<W> { type Ok = (); type Error = Error; fn serialize_element<T>(&mut self, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<W: WordWrite> serde::ser::SerializeTuple for &'_ mut Serializer<W> { type Ok = (); type Error = Error; fn serialize_element<T>(&mut self, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<W: WordWrite> serde::ser::SerializeTupleStruct for &'_ mut Serializer<W> { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<W: WordWrite> serde::ser::SerializeTupleVariant for &'_ mut Serializer<W> { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<W: WordWrite> serde::ser::SerializeMap for &'_ mut Serializer<W> { type Ok = (); type Error = Error; fn serialize_key<T>(&mut self, key: &T) -> Result<()> where T: serde::Serialize + ?Sized, { key.serialize(&mut **self) } fn serialize_value<T>(&mut self, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<W: WordWrite> serde::ser::SerializeStruct for &'_ mut Serializer<W> { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, _key: &'static str, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } impl<W: WordWrite> serde::ser::SerializeStructVariant for &'_ mut Serializer<W> { type Ok = (); type Error = Error; fn serialize_field<T>(&mut self, _key: &'static str, value: &T) -> Result<()> where T: serde::Serialize + ?Sized, { value.serialize(&mut **self) } fn end(self) -> Result<()> { Ok(()) } } #[cfg(test)] mod tests { use alloc::string::String; use serde::Serialize; use super::*; #[test] #[allow(clippy::approx_constant)] fn test_struct() { #[derive(Serialize, PartialEq, Debug)] struct Test { bool: bool, i8: i8, u8: u8, i16: i16, u16: u16, i32: i32, u32: u32, f32: f32, i64: i64, u64: u64, f64: f64, } let expected = [ 1, -4_i32 as u32, 4, -5_i32 as u32, 5, -6_i32 as u32, 6, f32::to_bits(3.14_f32), -7_i32 as u32, 0xffffffff, 7, 0x00000000, f64::to_bits(2.71).checked_rem(0x100000000).unwrap() as u32, f64::to_bits(2.71).checked_shr(32).unwrap() as u32, ]; let input = Test { bool: true, i8: -4, u8: 4, i16: -5, u16: 5, i32: -6, u32: 6, f32: 3.14, i64: -7, u64: 7, f64: 2.71, }; assert_eq!(expected, to_vec(&input).unwrap().as_slice()); } #[test] fn test_str() { #[derive(Serialize, PartialEq, Debug)] struct Test { first: String, second: String, } let expected = [1, 0x00000061, 3, 0x00636261]; let input = Test { first: "a".into(), second: "abc".into(), }; assert_eq!(expected, to_vec(&input).unwrap().as_slice()); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_serde/src/err.rs
ceno_serde/src/err.rs
use alloc::string::{String, ToString}; use core::fmt::{Display, Formatter}; /// Errors used by Serde #[derive(Clone, Debug, Eq, PartialEq)] pub enum Error { /// A custom error Custom(String), /// Found a bool that wasn't 0 or 1 DeserializeBadBool, /// Found an invalid unicode char DeserializeBadChar, /// Found an Option discriminant that wasn't 0 or 1 DeserializeBadOption, /// Tried to parse invalid utf-8 DeserializeBadUtf8, /// Unexpected end during deserialization DeserializeUnexpectedEnd, /// Not supported NotSupported, /// The serialize buffer is full SerializeBufferFull, } /// A Result type for `openvm::serde` operations that can fail pub type Result<T> = core::result::Result<T, Error>; impl Display for Error { fn fmt(&self, formatter: &mut Formatter) -> core::fmt::Result { formatter.write_str(match self { Self::Custom(msg) => msg, Self::DeserializeBadBool => "Found a bool that wasn't 0 or 1", Self::DeserializeBadChar => "Found an invalid unicode char", Self::DeserializeBadOption => "Found an Option discriminant that wasn't 0 or 1", Self::DeserializeBadUtf8 => "Tried to parse invalid utf-8", Self::DeserializeUnexpectedEnd => "Unexpected end during deserialization", Self::NotSupported => "Not supported", Self::SerializeBufferFull => "The serialize buffer is full", }) } } impl serde::ser::Error for Error { fn custom<T: Display>(msg: T) -> Self { Error::Custom(msg.to_string()) } } impl serde::de::Error for Error { fn custom<T: Display>(msg: T) -> Self { Error::Custom(msg.to_string()) } } // This is an alias for either std::Error, or serde's no_std error replacement. impl serde::ser::StdError for Error {}
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_serde/src/deserializer.rs
ceno_serde/src/deserializer.rs
use alloc::{string::String, vec, vec::Vec}; use bytemuck::Pod; use serde::de::{DeserializeOwned, DeserializeSeed, IntoDeserializer, Visitor}; use crate::{ WORD_SIZE, align_up, err::{Error, Result}, }; /// A reader for reading streams with serialized word-based data pub trait WordRead { /// Fill the given buffer with words from input. Returns an error if EOF /// was encountered. fn read_words(&mut self, words: &mut [u32]) -> Result<()>; /// Fill the given buffer with bytes from input, and discard the /// padding up to the next word boundary. Returns an error if EOF was /// encountered. fn read_padded_bytes(&mut self, bytes: &mut [u8]) -> Result<()>; } // Allow borrowed WordReads to work transparently impl<R: WordRead + ?Sized> WordRead for &mut R { fn read_words(&mut self, words: &mut [u32]) -> Result<()> { (**self).read_words(words) } fn read_padded_bytes(&mut self, bytes: &mut [u8]) -> Result<()> { (**self).read_padded_bytes(bytes) } } impl WordRead for &[u32] { fn read_words(&mut self, out: &mut [u32]) -> Result<()> { if out.len() > self.len() { Err(Error::DeserializeUnexpectedEnd) } else { out.clone_from_slice(&self[..out.len()]); (_, *self) = self.split_at(out.len()); Ok(()) } } fn read_padded_bytes(&mut self, out: &mut [u8]) -> Result<()> { let bytes: &[u8] = bytemuck::cast_slice(self); if out.len() > bytes.len() { Err(Error::DeserializeUnexpectedEnd) } else { out.clone_from_slice(&bytes[..out.len()]); (_, *self) = self.split_at(align_up(out.len(), WORD_SIZE) / WORD_SIZE); Ok(()) } } } /// Deserialize a slice into the specified type. /// /// Deserialize `slice` into type `T`. Returns an `Err` if deserialization isn't /// possible, such as if `slice` is not the serialized form of an object of type /// `T`. pub fn from_slice<T: DeserializeOwned, P: Pod>(slice: &[P]) -> Result<T> { match bytemuck::try_cast_slice(slice) { Ok(slice) => { let mut deserializer = Deserializer::new(slice); T::deserialize(&mut deserializer) } // P is u8 or another value without word-alignment. Data must be copied. Err(bytemuck::PodCastError::TargetAlignmentGreaterAndInputNotAligned) => { let vec = bytemuck::allocation::pod_collect_to_vec::<P, u32>(slice); let mut deserializer = Deserializer::new(vec.as_slice()); T::deserialize(&mut deserializer) } Err(ref e) => panic!("failed to cast or read slice as [u32]: {}", e), } } /// Enables deserializing from a WordRead pub struct Deserializer<'de, R: WordRead + 'de> { reader: R, phantom: core::marker::PhantomData<&'de ()>, } struct SeqAccess<'a, 'de, R: WordRead + 'de> { deserializer: &'a mut Deserializer<'de, R>, len: usize, } impl<'de, R: WordRead + 'de> serde::de::SeqAccess<'de> for SeqAccess<'_, 'de, R> { type Error = Error; fn next_element_seed<T>(&mut self, seed: T) -> Result<Option<T::Value>> where T: DeserializeSeed<'de>, { if self.len > 0 { self.len -= 1; Ok(Some(DeserializeSeed::deserialize( seed, &mut *self.deserializer, )?)) } else { Ok(None) } } fn size_hint(&self) -> Option<usize> { Some(self.len) } } impl<'de, R: WordRead + 'de> serde::de::VariantAccess<'de> for &'_ mut Deserializer<'de, R> { type Error = Error; fn unit_variant(self) -> Result<()> { Ok(()) } fn newtype_variant_seed<V: DeserializeSeed<'de>>(self, seed: V) -> Result<V::Value> { DeserializeSeed::deserialize(seed, self) } fn tuple_variant<V: Visitor<'de>>(self, len: usize, visitor: V) -> Result<V::Value> { serde::de::Deserializer::deserialize_tuple(self, len, visitor) } fn struct_variant<V: Visitor<'de>>( self, fields: &'static [&'static str], visitor: V, ) -> Result<V::Value> { serde::de::Deserializer::deserialize_tuple(self, fields.len(), visitor) } } impl<'de, R: WordRead + 'de> serde::de::EnumAccess<'de> for &'_ mut Deserializer<'de, R> { type Error = Error; type Variant = Self; fn variant_seed<V: DeserializeSeed<'de>>(self, seed: V) -> Result<(V::Value, Self)> { let tag = self.try_take_word()?; let val = DeserializeSeed::deserialize(seed, tag.into_deserializer())?; Ok((val, self)) } } struct MapAccess<'a, 'de, R: WordRead + 'de> { deserializer: &'a mut Deserializer<'de, R>, len: usize, } impl<'a, 'de: 'a, R: WordRead + 'de> serde::de::MapAccess<'de> for MapAccess<'a, 'de, R> { type Error = Error; fn next_key_seed<K: DeserializeSeed<'de>>(&mut self, seed: K) -> Result<Option<K::Value>> { if self.len > 0 { self.len -= 1; Ok(Some(DeserializeSeed::deserialize( seed, &mut *self.deserializer, )?)) } else { Ok(None) } } fn next_value_seed<V: DeserializeSeed<'de>>(&mut self, seed: V) -> Result<V::Value> { DeserializeSeed::deserialize(seed, &mut *self.deserializer) } fn size_hint(&self) -> Option<usize> { Some(self.len) } } impl<'de, R: WordRead + 'de> Deserializer<'de, R> { /// Construct a Deserializer /// /// Creates a deserializer for deserializing from the given WordRead pub fn new(reader: R) -> Self { Deserializer { reader, phantom: core::marker::PhantomData, } } fn try_take_word(&mut self) -> Result<u32> { let mut val = 0u32; self.reader.read_words(core::slice::from_mut(&mut val))?; Ok(val) } fn try_take_dword(&mut self) -> Result<u64> { let low = self.try_take_word()? as u64; let high = self.try_take_word()? as u64; Ok(low | (high << 32)) } } impl<'de, R: WordRead + 'de> serde::Deserializer<'de> for &'_ mut Deserializer<'de, R> { type Error = Error; fn is_human_readable(&self) -> bool { false } fn deserialize_any<V>(self, _visitor: V) -> Result<V::Value> where V: Visitor<'de>, { Err(Error::NotSupported) } fn deserialize_bool<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { let val = match self.try_take_word()? { 0 => false, 1 => true, _ => return Err(Error::DeserializeBadBool), }; visitor.visit_bool(val) } fn deserialize_i8<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_i32(self.try_take_word()? as i32) } fn deserialize_i16<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_i32(self.try_take_word()? as i32) } fn deserialize_i32<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_i32(self.try_take_word()? as i32) } fn deserialize_i64<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_i64(self.try_take_dword()? as i64) } fn deserialize_i128<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { let mut bytes = [0u8; 16]; self.reader.read_padded_bytes(&mut bytes)?; visitor.visit_i128(i128::from_le_bytes(bytes)) } fn deserialize_u8<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_u32(self.try_take_word()?) } fn deserialize_u16<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_u32(self.try_take_word()?) } fn deserialize_u32<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_u32(self.try_take_word()?) } fn deserialize_u64<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_u64(self.try_take_dword()?) } fn deserialize_u128<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { let mut bytes = [0u8; 16]; self.reader.read_padded_bytes(&mut bytes)?; visitor.visit_u128(u128::from_le_bytes(bytes)) } fn deserialize_f32<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_f32(f32::from_bits(self.try_take_word()?)) } fn deserialize_f64<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_f64(f64::from_bits(self.try_take_dword()?)) } fn deserialize_char<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { let c = char::from_u32(self.try_take_word()?).ok_or(Error::DeserializeBadChar)?; visitor.visit_char(c) } fn deserialize_str<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { let len_bytes = self.try_take_word()? as usize; // Optimization opportunity: consider using MaybeUninit let mut bytes = vec![0u8; len_bytes]; self.reader.read_padded_bytes(&mut bytes)?; visitor.visit_string(String::from_utf8(bytes).map_err(|_| Error::DeserializeBadChar)?) } fn deserialize_string<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { self.deserialize_str(visitor) } fn deserialize_bytes<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { let len_bytes = self.try_take_word()? as usize; // We always allocate vec to be word-aligned let capacity = len_bytes.div_ceil(WORD_SIZE) * WORD_SIZE; // SAFETY: read_padded_bytes **must** error if the // buffer is not fully written to. let mut bytes = Vec::with_capacity(capacity); #[allow(clippy::uninit_vec)] unsafe { bytes.set_len(len_bytes); } self.reader.read_padded_bytes(&mut bytes)?; visitor.visit_byte_buf(bytes) } fn deserialize_byte_buf<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { self.deserialize_bytes(visitor) } fn deserialize_option<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { match self.try_take_word()? { 0 => visitor.visit_none(), 1 => visitor.visit_some(self), _ => Err(Error::DeserializeBadOption), } } fn deserialize_unit<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_unit() } fn deserialize_unit_struct<V>(self, _name: &'static str, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { self.deserialize_unit(visitor) } fn deserialize_newtype_struct<V>(self, _name: &'static str, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_newtype_struct(self) } fn deserialize_seq<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { let len = self.try_take_word()? as usize; visitor.visit_seq(SeqAccess { deserializer: self, len, }) } fn deserialize_tuple<V>(self, len: usize, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_seq(SeqAccess { deserializer: self, len, }) } fn deserialize_tuple_struct<V>( self, _name: &'static str, len: usize, visitor: V, ) -> Result<V::Value> where V: Visitor<'de>, { self.deserialize_tuple(len, visitor) } fn deserialize_map<V>(self, visitor: V) -> Result<V::Value> where V: Visitor<'de>, { let len = self.try_take_word()? as usize; visitor.visit_map(MapAccess { deserializer: self, len, }) } fn deserialize_struct<V>( self, _name: &'static str, fields: &'static [&'static str], visitor: V, ) -> Result<V::Value> where V: Visitor<'de>, { self.deserialize_tuple(fields.len(), visitor) } fn deserialize_enum<V>( self, _name: &'static str, _variants: &'static [&'static str], visitor: V, ) -> Result<V::Value> where V: Visitor<'de>, { visitor.visit_enum(self) } fn deserialize_identifier<V>(self, _visitor: V) -> Result<V::Value> where V: Visitor<'de>, { Err(Error::NotSupported) } fn deserialize_ignored_any<V>(self, _visitor: V) -> Result<V::Value> where V: Visitor<'de>, { Err(Error::NotSupported) } } #[cfg(test)] mod tests { use alloc::{string::String, vec::Vec}; use core::f32; use serde::{Deserialize, Serialize}; use super::*; #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub struct SomeStruct {} #[derive(Clone, Debug, Deserialize, Eq, PartialEq, Serialize)] pub enum MyEnum { MyUnaryConstructor(Vec<u8>), MyBinaryConstructor(Vec<u8>, SomeStruct), } #[test] fn test_enum_unary() { let a = MyEnum::MyUnaryConstructor(vec![1, 2, 3, 4, 5]); let encoded = crate::to_vec(&a).unwrap(); let decoded: MyEnum = from_slice(&encoded).unwrap(); assert_eq!(a, decoded); } #[test] fn test_enum_binary() { let a = MyEnum::MyBinaryConstructor(vec![1, 2, 3, 4, 5], SomeStruct {}); let encoded = crate::to_vec(&a).unwrap(); let decoded: MyEnum = from_slice(&encoded).unwrap(); assert_eq!(a, decoded); } #[test] fn test_struct() { use serde::Deserialize; #[derive(Deserialize, PartialEq, Debug)] struct Test { bool: bool, i8: i8, u8: u8, i16: i16, u16: u16, i32: i32, u32: u32, f32: f32, i64: i64, u64: u64, f64: f64, } let words = [ 1, -4_i32 as u32, 4, -5_i32 as u32, 5, -6_i32 as u32, 6, f32::to_bits(f32::consts::PI), -7_i32 as u32, 0xffffffff, 7, 0x00000000, f64::to_bits(2.71).checked_rem(0x100000000).unwrap() as u32, f64::to_bits(2.71).checked_shr(32).unwrap() as u32, ]; let expected = Test { bool: true, i8: -4, u8: 4, i16: -5, u16: 5, i32: -6, u32: 6, f32: f32::consts::PI, i64: -7, u64: 7, f64: 2.71, }; assert_eq!(expected, from_slice(&words).unwrap()); } #[test] fn test_str() { use serde::Deserialize; #[derive(Deserialize, PartialEq, Debug)] struct Test { first: String, second: String, } let words = [1, 0x00000061, 3, 0x00636261]; let expected = Test { first: "a".into(), second: "abc".into(), }; assert_eq!(expected, from_slice(&words).unwrap()); } }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_host/src/lib.rs
ceno_host/src/lib.rs
use anyhow::Result; use ceno_emul::{ IterAddresses, Platform, Program, VMState, WORD_SIZE, Word, host_utils::read_all_messages, }; use ceno_serde::to_vec; use core::mem::size_of; use itertools::Itertools; use serde::Serialize; use std::{fs, io, iter::zip, path::Path, sync::Arc}; pub const WORD_ALIGNMENT: usize = size_of::<u32>(); /// A structure for building the hints input to the Ceno emulator. /// /// Use the `write` method to add a hint to the input. /// When you are done, call `into` to convert to a `Vec<u32>` to pass to the emulator. /// /// Our guest programs have two requirements on the format: /// 1. The start of the hints buffer consists of a sequence of `usize` values, each representing the /// metadata describing the layout: first the offset where the serialized bytes begin, then the /// alignment used for each record, followed by the length of every hint in order. /// 2. hints[..current_hint_len] can deserialise into the expected type via `ceno_serde`. /// /// After the metadata we place every serialized blob back-to-back (with alignment padding), so the /// runtime can walk forward from the lowest address without needing any random access. #[derive(Default)] pub struct CenoStdin { pub items: Vec<Item>, } #[derive(Debug, Default, Clone)] pub struct Item { pub data: Vec<u8>, pub end_of_data: usize, } impl From<Vec<u32>> for Item { fn from(data: Vec<u32>) -> Self { let data: Vec<u8> = data.into_iter().flat_map(u32::to_le_bytes).collect(); let end_of_data = data.len(); let mut data = data; data.resize(data.len().next_multiple_of(WORD_ALIGNMENT), 0); Item { data, end_of_data } } } #[derive(Debug, Default, Clone)] pub struct Items { pub data: Vec<u8>, pub lens: Vec<usize>, } impl Items { pub fn total_length(&self) -> usize { self.data.len() } pub fn append(&mut self, item: &Item) { self.data.extend_from_slice(&item.data); self.lens.push(item.end_of_data); } /// Prepend metadata to the data buffer so that the raw /// serialized bytes live at the lowest addresses and can be /// consumed sequentially at runtime. pub fn finalise(self) -> Vec<u8> { let Items { data, lens } = self; let header_words = lens.len() + 2; let data_offset = (size_of::<u32>() * header_words).next_multiple_of(WORD_ALIGNMENT); // NOTE: serde format alignment with [`ceno_rt/src/mmio.rs`] let mut header = Vec::with_capacity(header_words); header.push(data_offset as u32); header.push(WORD_ALIGNMENT as u32); header.extend(lens.into_iter().map(|len| len as u32)); let mut bytes = header .into_iter() .flat_map(u32::to_le_bytes) .collect::<Vec<_>>(); bytes.resize(data_offset, 0); bytes.extend_from_slice(&data); bytes } } impl From<&CenoStdin> for Vec<u8> { fn from(stdin: &CenoStdin) -> Vec<u8> { let mut items = Items::default(); for item in &stdin.items { items.append(item); } items.finalise() } } impl From<&CenoStdin> for Vec<u32> { fn from(stdin: &CenoStdin) -> Vec<u32> { Vec::<u8>::from(stdin) .into_iter() .tuples() .map(|(a, b, c, d)| u32::from_le_bytes([a, b, c, d])) .collect() } } impl CenoStdin { pub fn write(&mut self, value: &impl Serialize) -> Result<&mut Self, ceno_serde::Error> { let item = Item::from(to_vec(value)?); self.items.push(item); Ok(self) } } pub fn run( platform: Platform, elf: &[u8], hints: &CenoStdin, public_io: Option<&CenoStdin>, ) -> Vec<Vec<u8>> { let program = Program::load_elf(elf, u32::MAX).unwrap(); let platform = Platform { prog_data: Arc::new(program.image.keys().copied().collect()), ..platform }; let hints: Vec<u32> = hints.into(); let pubio: Vec<u32> = public_io.map(|c| c.into()).unwrap_or_default(); let hints_range = platform.hints.clone(); let pubio_range = platform.public_io.clone(); let mut state = VMState::new(platform, Arc::new(program)); for (addr, value) in zip(hints_range.iter_addresses(), hints) { state.init_memory(addr.into(), value); } for (addr, value) in zip(pubio_range.iter_addresses(), pubio) { state.init_memory(addr.into(), value); } let steps = state .iter_until_halt() .collect::<Result<Vec<_>>>() .expect("Failed to run the program"); eprintln!("Emulator ran for {} steps.", steps.len()); read_all_messages(&state) } pub fn memory_from_file<P: AsRef<Path>>(path: P) -> io::Result<Vec<u32>> { let mut buf = fs::read(path)?; buf.resize(buf.len().next_multiple_of(WORD_SIZE), 0); Ok(buf .chunks_exact(WORD_SIZE) .map(|word| Word::from_le_bytes(word.try_into().unwrap())) .collect_vec()) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
scroll-tech/ceno
https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_host/tests/test_elf.rs
ceno_host/tests/test_elf.rs
use std::{collections::BTreeSet, iter::from_fn, sync::Arc}; use anyhow::Result; use ceno_emul::{ BN254_FP_WORDS, BN254_FP2_WORDS, BN254_POINT_WORDS, CENO_PLATFORM, EmuContext, InsnKind, Platform, Program, SECP256K1_ARG_WORDS, SECP256K1_COORDINATE_WORDS, SHA_EXTEND_WORDS, StepRecord, UINT256_WORDS_FIELD_ELEMENT, VMState, WORD_SIZE, Word, WordAddr, WriteOp, host_utils::{read_all_messages, read_all_messages_as_words}, }; use ceno_host::CenoStdin; use itertools::{Itertools, enumerate, izip}; use rand::{RngCore, thread_rng}; use tiny_keccak::{Hasher, Keccak, keccakf}; #[test] fn test_ceno_rt_mini() -> Result<()> { let program_elf = ceno_examples::ceno_rt_mini; let program = Program::load_elf(program_elf, u32::MAX)?; let platform = Platform { prog_data: Arc::new(program.image.keys().copied().collect()), ..CENO_PLATFORM.clone() }; let mut state = VMState::new(platform, Arc::new(program)); let _steps = run(&mut state)?; Ok(()) } // TODO(Matthias): We are using Rust's standard library's default panic handler now, // and they are indicated with a different instruction than our ecall. (But still work, // as you can tell, because this tests panics.) However, we should adapt this test // to properly check for the conventional Rust panic. #[test] #[should_panic(expected = "Trap IllegalInstruction")] fn test_ceno_rt_panic() { let program_elf = ceno_examples::ceno_rt_panic; let program = Program::load_elf(program_elf, u32::MAX).unwrap(); let platform = Platform { prog_data: Arc::new(program.image.keys().copied().collect()), ..CENO_PLATFORM.clone() }; let mut state = VMState::new(platform, Arc::new(program)); let steps = run(&mut state).unwrap(); let last = steps.last().unwrap(); assert_eq!(last.insn().kind, InsnKind::ECALL); assert_eq!(last.rs1().unwrap().value, Platform::ecall_halt()); assert_eq!(last.rs2().unwrap().value, 1); // panic / halt(1) } #[test] fn test_ceno_rt_mem() -> Result<()> { let program_elf = ceno_examples::ceno_rt_mem; let program = Program::load_elf(program_elf, u32::MAX)?; let platform = Platform { prog_data: Arc::new(program.image.keys().copied().collect()), ..CENO_PLATFORM.clone() }; let sheap = program.sheap.into(); let mut state = VMState::new(platform, Arc::new(program.clone())); let _steps = run(&mut state)?; let value = state.peek_memory(sheap); assert_eq!(value, 6765, "Expected Fibonacci 20, got {}", value); Ok(()) } #[test] fn test_ceno_rt_alloc() -> Result<()> { let program_elf = ceno_examples::ceno_rt_alloc; let program = Program::load_elf(program_elf, u32::MAX)?; let platform = Platform { prog_data: Arc::new(program.image.keys().copied().collect()), ..CENO_PLATFORM.clone() }; let mut state = VMState::new(platform, Arc::new(program)); let _steps = run(&mut state)?; // Search for the RAM action of the test program. let mut found = (false, false); for addr in state.tracer().final_accesses().addresses() { if !CENO_PLATFORM.is_ram((*addr).into()) { continue; } let value = state.peek_memory(*addr); if value == 0xf00d { found.0 = true; } if value == 0xbeef { found.1 = true; } } assert!(found.0); assert!(found.1); Ok(()) } #[test] fn test_ceno_rt_io() -> Result<()> { let program_elf = ceno_examples::ceno_rt_io; let program = Program::load_elf(program_elf, u32::MAX)?; let platform = Platform { prog_data: Arc::new(program.image.keys().copied().collect()), ..CENO_PLATFORM.clone() }; let mut state = VMState::new(platform, Arc::new(program)); let _steps = run(&mut state)?; let all_messages = messages_to_strings(&read_all_messages(&state)); for msg in &all_messages { print!("{msg}"); } assert_eq!(&all_messages[0], "📜📜📜 Hello, World!\n"); assert_eq!(&all_messages[1], "🌏🌍🌎\n"); Ok(()) } #[test] fn test_hints() -> Result<()> { let all_messages = messages_to_strings(&ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::hints, CenoStdin::default() .write(&true)? .write(&"This is my hint string.".to_string())? .write(&1997_u32)? .write(&1999_u32)?, None, )); for (i, msg) in enumerate(&all_messages) { println!("{i}: {msg}"); } assert_eq!(all_messages[3], "3992003"); Ok(()) } #[test] fn test_bubble_sorting() -> Result<()> { let mut rng = thread_rng(); let all_messages = messages_to_strings(&ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::quadratic_sorting, // Provide some random numbers to sort. CenoStdin::default().write(&(0..1_000).map(|_| rng.next_u32()).collect::<Vec<_>>())?, None, )); for msg in &all_messages { print!("{msg}"); } Ok(()) } #[test] fn test_sorting() -> Result<()> { let mut rng = thread_rng(); let all_messages = messages_to_strings(&ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::sorting, // Provide some random numbers to sort. CenoStdin::default().write(&(0..1000).map(|_| rng.next_u32()).collect::<Vec<_>>())?, None, )); for (i, msg) in enumerate(&all_messages) { println!("{i}: {msg}"); } Ok(()) } #[test] fn test_median() -> Result<()> { let mut hints = CenoStdin::default(); let mut rng = thread_rng(); // Provide some random numbers to find the median of. let mut nums = (0..1000).map(|_| rng.next_u32()).collect::<Vec<_>>(); hints.write(&nums)?; nums.sort(); hints.write(&nums[nums.len() / 2])?; let all_messages = messages_to_strings(&ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::median, &hints, None, )); assert!(!all_messages.is_empty()); for (i, msg) in enumerate(&all_messages) { println!("{i}: {msg}"); } Ok(()) } #[test] #[should_panic(expected = "Trap IllegalInstruction")] fn test_hashing_fail() { let mut rng = thread_rng(); let mut nums = (0..1_000).map(|_| rng.next_u32()).collect::<Vec<_>>(); // Add a duplicate number to make uniqueness check fail: nums[211] = nums[907]; let _ = ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::hashing, CenoStdin::default().write(&nums).unwrap(), None, ); } #[test] fn test_hashing() -> Result<()> { let mut rng = thread_rng(); // Provide some unique random numbers to verify: let uniques: Vec<u32> = { let mut seen_so_far = BTreeSet::default(); from_fn(move || Some(rng.next_u32())) .filter(|&item| seen_so_far.insert(item)) .take(1_000) .collect::<Vec<_>>() }; let all_messages = messages_to_strings(&ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::hashing, CenoStdin::default().write(&uniques)?, None, )); assert!(!all_messages.is_empty()); for (i, msg) in enumerate(&all_messages) { println!("{i}: {msg}"); } assert_eq!(all_messages[0], "The input is a set of unique numbers.\n"); Ok(()) } #[test] fn test_keccak_syscall() -> Result<()> { let program_elf = ceno_examples::keccak_syscall; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; // Expect the program to have written successive states between Keccak permutations. let keccak_first_iter_outs = sample_keccak_f(1); let all_messages = read_all_messages(&state); assert_eq!(all_messages.len(), 1); for (got, expect) in izip!(&all_messages, &keccak_first_iter_outs) { let got = got .chunks_exact(8) .map(|chunk| u64::from_le_bytes(chunk.try_into().unwrap())) .collect_vec(); assert_eq!(&got, expect); } // Find the syscall records. let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert_eq!(syscalls.len(), 100); // Check the syscall effects. for (witness, expect) in izip!(syscalls, keccak_first_iter_outs) { assert_eq!(witness.reg_ops.len(), 1); assert_eq!(witness.reg_ops[0].register_index(), Platform::reg_arg0()); assert_eq!(witness.mem_ops.len(), expect.len() * 2); let got = witness .mem_ops .chunks_exact(2) .map(|write_ops| { assert_eq!( write_ops[1].addr.baddr(), write_ops[0].addr.baddr() + WORD_SIZE as u32 ); let lo = write_ops[0].value.after as u64; let hi = write_ops[1].value.after as u64; lo | (hi << 32) }) .collect_vec(); assert_eq!(got, expect); } Ok(()) } fn bytes_to_words(bytes: [u8; 65]) -> [u32; 16] { // ignore the tag byte (specific to the secp repr.) let mut bytes: [u8; 64] = bytes[1..].try_into().unwrap(); // Reverse the order of bytes for each coordinate bytes[0..32].reverse(); bytes[32..].reverse(); std::array::from_fn(|i| u32::from_le_bytes(bytes[4 * i..4 * (i + 1)].try_into().unwrap())) } #[test] fn test_secp256k1() -> Result<()> { let program_elf = ceno_examples::secp256k1; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert!(!syscalls.is_empty()); Ok(()) } #[test] fn test_secp256k1_add() -> Result<()> { let program_elf = ceno_examples::secp256k1_add_syscall; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert_eq!(syscalls.len(), 1); let witness = syscalls[0]; assert_eq!(witness.reg_ops.len(), 2); assert_eq!(witness.reg_ops[0].register_index(), Platform::reg_arg0()); assert_eq!(witness.reg_ops[1].register_index(), Platform::reg_arg1()); let p_address = witness.reg_ops[0].value.after; assert_eq!(p_address, witness.reg_ops[0].value.before); let p_address: WordAddr = p_address.into(); let q_address = witness.reg_ops[1].value.after; assert_eq!(q_address, witness.reg_ops[1].value.before); let q_address: WordAddr = q_address.into(); const P_PLUS_Q: [u8; 65] = [ 4, 188, 11, 115, 232, 35, 63, 79, 186, 163, 11, 207, 165, 64, 247, 109, 81, 125, 56, 83, 131, 221, 140, 154, 19, 186, 109, 173, 9, 127, 142, 169, 219, 108, 17, 216, 218, 125, 37, 30, 87, 86, 194, 151, 20, 122, 64, 118, 123, 210, 29, 60, 209, 138, 131, 11, 247, 157, 212, 209, 123, 162, 111, 197, 70, ]; let expect = bytes_to_words(P_PLUS_Q); assert_eq!(witness.mem_ops.len(), 2 * SECP256K1_ARG_WORDS); // Expect first half to consist of read/writes on P for (i, write_op) in witness.mem_ops.iter().take(SECP256K1_ARG_WORDS).enumerate() { assert_eq!(write_op.addr, p_address + i); assert_eq!(write_op.value.after, expect[i]); } // Expect second half to consist of reads on Q for (i, write_op) in witness .mem_ops .iter() .skip(SECP256K1_ARG_WORDS) .take(SECP256K1_ARG_WORDS) .enumerate() { assert_eq!(write_op.addr, q_address + i); assert_eq!(write_op.value.after, write_op.value.before); } Ok(()) } #[test] fn test_secp256k1_double() -> Result<()> { let program_elf = ceno_examples::secp256k1_double_syscall; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert_eq!(syscalls.len(), 1); let witness = syscalls[0]; assert_eq!(witness.reg_ops.len(), 1); assert_eq!(witness.reg_ops[0].register_index(), Platform::reg_arg0()); let p_address = witness.reg_ops[0].value.after; assert_eq!(p_address, witness.reg_ops[0].value.before); let p_address: WordAddr = p_address.into(); // first byte is tag const DOUBLE_P: [u8; 65] = [ 1, 198, 4, 127, 148, 65, 237, 125, 109, 48, 69, 64, 110, 149, 192, 124, 216, 92, 119, 142, 75, 140, 239, 60, 167, 171, 172, 9, 185, 92, 112, 158, 229, 26, 225, 104, 254, 166, 61, 195, 57, 163, 197, 132, 25, 70, 108, 234, 238, 247, 246, 50, 101, 50, 102, 208, 225, 35, 100, 49, 169, 80, 207, 229, 42, ]; let expect = bytes_to_words(DOUBLE_P); assert_eq!(witness.mem_ops.len(), SECP256K1_ARG_WORDS); for (i, write_op) in witness.mem_ops.iter().enumerate() { assert_eq!(write_op.addr, p_address + i); assert_eq!(write_op.value.after, expect[i]); } Ok(()) } #[test] fn test_secp256k1_decompress() -> Result<()> { let program_elf = ceno_examples::secp256k1_decompress_syscall; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert_eq!(syscalls.len(), 1); let witness = syscalls[0]; assert_eq!(witness.reg_ops.len(), 2); assert_eq!(witness.reg_ops[0].register_index(), Platform::reg_arg0()); assert_eq!(witness.reg_ops[1].register_index(), Platform::reg_arg1()); let x_address = witness.reg_ops[0].value.after; assert_eq!(x_address, witness.reg_ops[0].value.before); let x_address: WordAddr = x_address.into(); // Y coordinate should be written immediately after X coordinate // X coordinate takes "half an argument" of words let y_address = x_address + SECP256K1_ARG_WORDS / 2; // Complete decompressed point (X and Y) let mut decompressed: [u8; 65] = [ 4, 180, 53, 9, 32, 85, 226, 220, 154, 20, 116, 218, 199, 119, 48, 44, 23, 45, 222, 10, 64, 50, 63, 8, 121, 191, 244, 141, 0, 37, 117, 182, 133, 190, 160, 239, 131, 180, 166, 242, 145, 107, 249, 24, 168, 27, 69, 86, 58, 86, 159, 10, 210, 164, 20, 152, 148, 67, 37, 222, 234, 108, 57, 84, 148, ]; decompressed[33..].reverse(); // Writes should cover the Y coordinate, i.e latter half of the repr let expect = bytes_to_words(decompressed)[8..].to_vec(); assert_eq!(witness.mem_ops.len(), 2 * SECP256K1_COORDINATE_WORDS); // Reads on X for (i, write_op) in witness .mem_ops .iter() .take(SECP256K1_COORDINATE_WORDS) .enumerate() { assert_eq!(write_op.addr, x_address + i); assert_eq!(write_op.value.after, write_op.value.before); } // Reads/writes on Y for (i, write_op) in witness .mem_ops .iter() .skip(SECP256K1_COORDINATE_WORDS) .take(SECP256K1_COORDINATE_WORDS) .enumerate() { assert_eq!(write_op.addr, y_address + i); assert_eq!(write_op.value.after, expect[i]); } Ok(()) } #[test] fn test_secp256k1_ecrecover() -> Result<()> { let program_elf = ceno_examples::secp256k1_ecrecover; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert!(!syscalls.is_empty()); Ok(()) } #[test] fn test_sha256_extend() -> Result<()> { let program_elf = ceno_examples::sha_extend_syscall; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert_eq!(syscalls.len(), 1); let witness = syscalls[0]; assert_eq!(witness.reg_ops.len(), 1); assert_eq!(witness.reg_ops[0].register_index(), Platform::reg_arg0()); let state_ptr = witness.reg_ops[0].value.after; assert_eq!(state_ptr, witness.reg_ops[0].value.before); let state_ptr: WordAddr = state_ptr.into(); let expected = [ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 34013193, 67559435, 1711661200, 3020350282, 1447362251, 3118632270, 4004188394, 690615167, 6070360, 1105370215, 2385558114, 2348232513, 507799627, 2098764358, 5845374, 823657968, 2969863067, 3903496557, 4274682881, 2059629362, 1849247231, 2656047431, 835162919, 2096647516, 2259195856, 1779072524, 3152121987, 4210324067, 1557957044, 376930560, 982142628, 3926566666, 4164334963, 789545383, 1028256580, 2867933222, 3843938318, 1135234440, 390334875, 2025924737, 3318322046, 3436065867, 652746999, 4261492214, 2543173532, 3334668051, 3166416553, 634956631, ]; assert_eq!(witness.mem_ops.len(), SHA_EXTEND_WORDS); for (i, write_op) in witness.mem_ops.iter().enumerate() { assert_eq!(write_op.addr, state_ptr + i); assert_eq!(write_op.value.after, expected[i]); if i < 16 { // sanity check: first 16 entries remain unchanged assert_eq!(write_op.value.before, write_op.value.after); } } Ok(()) } #[test] fn test_sha256_full() -> Result<()> { let public_io: &[u32; 8] = &[ 30689455, 3643278932, 1489987339, 1626711444, 3610619649, 1925764735, 581441152, 321290698, ]; let hints: &Vec<u32> = &vec![0u32; 10]; let all_messages = messages_to_strings(&ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::sha256, CenoStdin::default().write(hints)?, Some(CenoStdin::default().write(public_io)?), )); assert_eq!(all_messages.len(), 0); Ok(()) } #[test] fn test_bn254_fptower_syscalls() -> Result<()> { let program_elf = ceno_examples::bn254_fptower_syscalls; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; const RUNS: usize = 10; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert_eq!(syscalls.len(), 4 * RUNS); for witness in syscalls.iter() { assert_eq!(witness.reg_ops.len(), 2); assert_eq!(witness.reg_ops[0].register_index(), Platform::reg_arg0()); assert_eq!(witness.reg_ops[1].register_index(), Platform::reg_arg1()); } let messages = read_all_messages_as_words(&state); let mut m_iter = messages.iter(); // just Fp syscalls for witness in syscalls.iter().take(2 * RUNS) { assert_eq!(witness.mem_ops.len(), 2 * BN254_FP_WORDS); let [a_before, b, a_after] = [ m_iter.next().unwrap(), m_iter.next().unwrap(), m_iter.next().unwrap(), ]; check_writes(&witness.mem_ops[0..BN254_FP_WORDS], a_before, a_after); check_reads(&witness.mem_ops[BN254_FP_WORDS..], b); } // just Fp2 syscalls for witness in syscalls.iter().skip(2 * RUNS) { assert_eq!(witness.mem_ops.len(), 2 * BN254_FP2_WORDS); let [a_before, b, a_after] = [ m_iter.next().unwrap(), m_iter.next().unwrap(), m_iter.next().unwrap(), ]; check_writes(&witness.mem_ops[0..BN254_FP2_WORDS], a_before, a_after); check_reads(&witness.mem_ops[BN254_FP2_WORDS..], b); } let program_elf = ceno_examples::bn254_patched_fp; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let _ = run(&mut state)?; Ok(()) } #[test] fn test_bn254_curve() -> Result<()> { let program_elf = ceno_examples::bn254_curve_syscalls; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert_eq!(syscalls.len(), 3); // add assert_eq!(syscalls[0].reg_ops.len(), 2); assert_eq!( syscalls[0].reg_ops[0].register_index(), Platform::reg_arg0() ); assert_eq!( syscalls[0].reg_ops[1].register_index(), Platform::reg_arg1() ); // double assert_eq!(syscalls[1].reg_ops.len(), 1); assert_eq!( syscalls[1].reg_ops[0].register_index(), Platform::reg_arg0() ); // add assert_eq!(syscalls[2].reg_ops.len(), 2); assert_eq!( syscalls[2].reg_ops[0].register_index(), Platform::reg_arg0() ); assert_eq!( syscalls[2].reg_ops[1].register_index(), Platform::reg_arg1() ); let messages = read_all_messages_as_words(&state); let [a1, b, a2, c1, c2, one, c3]: [Vec<u32>; 7] = messages.try_into().unwrap(); { assert_eq!(syscalls[0].mem_ops.len(), 2 * BN254_POINT_WORDS); check_writes(&syscalls[0].mem_ops[..BN254_POINT_WORDS], &a1, &a2); check_reads(&syscalls[0].mem_ops[BN254_POINT_WORDS..], &b); } { assert_eq!(syscalls[1].mem_ops.len(), BN254_POINT_WORDS); check_writes(&syscalls[1].mem_ops, &c1, &c2); } { assert_eq!(syscalls[2].mem_ops.len(), 2 * BN254_POINT_WORDS); check_writes(&syscalls[2].mem_ops[..BN254_POINT_WORDS], &c2, &c3); check_reads(&syscalls[2].mem_ops[BN254_POINT_WORDS..], &one); } Ok(()) } #[test] fn test_bn254_precompile() -> Result<()> { let program_elf = ceno_examples::bn254_precompile; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let _ = run(&mut state)?; Ok(()) } #[test] fn test_uint256_mul() -> Result<()> { let program_elf = ceno_examples::uint256_mul_syscall; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let steps = run(&mut state)?; let syscalls = steps.iter().filter_map(|step| step.syscall()).collect_vec(); assert_eq!(syscalls.len(), 1); let witness = syscalls[0]; assert_eq!(witness.reg_ops.len(), 2); assert_eq!(witness.reg_ops[0].register_index(), Platform::reg_arg0()); assert_eq!(witness.reg_ops[1].register_index(), Platform::reg_arg1()); let a_address = witness.reg_ops[0].value.after; assert_eq!(a_address, witness.reg_ops[0].value.before); let a_address: WordAddr = a_address.into(); let b_address = witness.reg_ops[1].value.after; assert_eq!(b_address, witness.reg_ops[1].value.before); let b_address: WordAddr = b_address.into(); let expect: [u32; 8] = [ 0xF0D2F44F, 0xF0DC2116, 0x253AB7CD, 0x3089E8F6, 0x803BED8F, 0x969E7A64, 0x610CBFFF, 0x80012A20, ]; assert_eq!(witness.mem_ops.len(), 3 * UINT256_WORDS_FIELD_ELEMENT); // Expect first half to consist of read/writes on x for (i, write_op) in witness .mem_ops .iter() .take(UINT256_WORDS_FIELD_ELEMENT) .enumerate() { assert_eq!(write_op.addr, a_address + i); assert_eq!(write_op.value.after, expect[i]); } // Expect second half to consist of reads on y and modulus for (i, write_op) in witness .mem_ops .iter() .skip(UINT256_WORDS_FIELD_ELEMENT) .take(UINT256_WORDS_FIELD_ELEMENT * 2) .enumerate() { assert_eq!(write_op.addr, b_address + i); assert_eq!(write_op.value.after, write_op.value.before); } Ok(()) } #[test] fn test_syscalls_compatibility() -> Result<()> { let program_elf = ceno_examples::syscalls; let mut state = VMState::new_from_elf(unsafe_platform(), program_elf)?; let _ = run(&mut state)?; Ok(()) } #[test] fn test_fibonacci() -> Result<()> { let _ = ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::fibonacci, CenoStdin::default().write(&10_u32)?, Some(CenoStdin::default().write(&4191_u32)?), ); Ok(()) } #[test] fn test_keccak_no_syscall() -> Result<()> { let pre_image = vec![0xdeadbeefu32, 0xdeadbeef, 0xdeadbeef]; let all_messages = messages_to_strings(&ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::keccak_no_syscall, CenoStdin::default().write(&pre_image)?, None, )); let pre_image: Vec<u8> = pre_image.iter().flat_map(|x| x.to_le_bytes()).collect(); let mut hasher = Keccak::v256(); let mut raw_output = [0u8; 32]; hasher.update(&pre_image); hasher.finalize(&mut raw_output); let output = raw_output .chunks_exact(4) .map(|chunk| u32::from_le_bytes(chunk.try_into().unwrap())) .collect::<Vec<u32>>(); for (got, expect) in izip!(&all_messages, &output) { let got = u32::from_str_radix(got, 16).expect("Invalid hex string"); assert_eq!(&got, expect); } Ok(()) } #[test] fn test_keccak_guest() -> Result<()> { let _ = ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::keccak_lib, &CenoStdin::default(), None, ); let _ = ceno_host::run( CENO_PLATFORM.clone(), ceno_examples::keccak_native, &CenoStdin::default(), None, ); Ok(()) } fn unsafe_platform() -> Platform { let mut platform = CENO_PLATFORM.clone(); platform.unsafe_ecall_nop = true; platform } fn check_writes(ops: &[WriteOp], before: &[Word], after: &[Word]) { assert!(ops.len() == before.len() && ops.len() == after.len()); for (i, _) in ops.iter().enumerate() { assert_eq!(ops[0].addr + i, ops[i].addr); assert_eq!(ops[i].value.before, before[i]); assert_eq!(ops[i].value.after, after[i]); } } fn check_reads(ops: &[WriteOp], before: &[Word]) { check_writes(ops, before, before); } fn sample_keccak_f(count: usize) -> Vec<Vec<u64>> { let mut state = [0_u64; 25]; (0..count) .map(|_| { keccakf(&mut state); state.into() }) .collect_vec() } fn messages_to_strings(messages: &[Vec<u8>]) -> Vec<String> { messages .iter() .map(|msg| String::from_utf8_lossy(msg).to_string()) .collect() } fn run(state: &mut VMState) -> Result<Vec<StepRecord>> { let steps = state.iter_until_halt().collect::<Result<Vec<_>>>()?; eprintln!("Emulator ran for {} steps.", steps.len()); Ok(steps) }
rust
Apache-2.0
ce97cf805a131db43a6d3d56a2fd0506a6dc8431
2026-01-04T20:23:30.257242Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/source_map.rs
src/source_map.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! Define a disk image file "Source Map" - a tree structure that holds scalar //! data values representing fields read from the source disk image. This can //! provide some useful debugging information, or introspection into how various //! fields are being interpreted. //! The ability to mark data values as suspicious, and to insert comments, adds //! to the utility of this structure. //! //! Additionally, an [ImageSourceMap] could be provided to a file format parser //! on write, to better allow a parser to create an identical disk image from //! the parsed data. //! //! A source map is not created by a parser by default, to reduce memory usage. //! You can request source map creation by setting the CREATE_SOURCE_MAP flag //! in [ParserWriteOptions]. use crate::{ tree_map::{FoxTreeCursor, FoxTreeMap}, FoxHashSet, }; use std::{ any::Any, fmt::{Debug, Display}, }; /// An enum representing a data representation - either decimal, hexadecimal or binary. /// The internal value represents the number of digits to display. A value of 0 means /// no width specifier will be used when formatting. /// The default representation is decimal. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Copy, Clone, Debug, PartialEq)] pub enum Repr { Dec(u8), Hex(u8), Bin(u8), } impl Default for Repr { fn default() -> Self { Repr::Dec(0) } } impl Repr { /// Format the provided [Scalar] with the current representation. pub fn fmt(&self, value: &Scalar) -> String { let value = value.int(); match self { Repr::Dec(0) => format!("{}", value), Repr::Dec(digits) => format!("{:0width$}", value, width = *digits as usize), Repr::Hex(0) => format!("{:#X}", value), Repr::Hex(digits) => format!("{:#0width$X}", value, width = (*digits + 2) as usize), Repr::Bin(0) => format!("{:#b}", value), Repr::Bin(digits) => format!("{:#0width$b}", value, width = (*digits + 2) as usize), } } } /// A scalar value, primarily defining simple integers and a String type. /// In theory all integers could be stored as the same type, but I suppose this might save some /// memory in some cases. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Debug, PartialEq)] pub enum Scalar { U8(u8), U16(u16), U32(u32), String(String), } impl Scalar { pub fn int(&self) -> u64 { use Scalar::*; match self { U8(v) => *v as u64, U16(v) => *v as u64, U32(v) => *v as u64, String(_) => 0, } } } /// A state for a value, indicating whether it is good, bad, or questionable. /// Currently only Good and Bad are used - this field was a bool, but I figured it may be useful /// to have a third state if we aren't sure if a value is bad or not. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Copy, Clone, Default)] pub enum ValueState { #[default] Good, Bad, Questionable, } /// A [SourceValue] represents a value read from a disk image format. It has an optional [Scalar] /// value - if `None`, then we will simply display the name of the field. /// - The `repr` field can be set to indicate how the value should be formatted via the [Repr] enum. /// - The `invalid` flag can be set to indicate that the value is suspicious or invalid as determined /// by the parser. This can control special highlighting in the UI. /// - The `tip` field can be set to provide a string that will be displayed as a tooltip in the UI /// when the user hovers over the value. A good example of using this is to provide the Debug /// representation of the fluxfox enum mapped from the source scalar value. /// - The `comment` field can be set to provide a string that will be displayed as a comment in /// the UI to the right of the value. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Default)] pub struct SourceValue { pub(crate) scalar: Option<Scalar>, pub(crate) repr: Repr, pub(crate) state: ValueState, pub(crate) tip: Option<String>, pub(crate) comment: Option<String>, } impl Display for SourceValue { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { if let Some(scalar) = &self.scalar { match scalar { Scalar::String(s) => write!(f, "{}", s), _ => write!(f, "{}{}", self.repr.fmt(scalar), if self.is_bad() { "*" } else { "" }), } } else { write!(f, "") } } } impl SourceValue { // Queries and accessors #[inline] pub fn state(&self) -> ValueState { self.state } #[inline] pub fn state_mut(&mut self) -> &mut ValueState { &mut self.state } #[inline] /// Return true if the value is not in a bad state. (Questionable will also return true) pub fn is_good(&self) -> bool { !self.is_bad() } /// Return true if the value has a bad state. #[inline] pub fn is_bad(&self) -> bool { matches!(self.state, ValueState::Bad) } /// Return true if the value has a tool-tip string. #[inline] pub fn has_tip(&self) -> bool { self.tip.is_some() } /// Get a reference to the tool-tip string, if present. #[inline] pub fn tip_ref(&self) -> Option<&str> { self.tip.as_ref().map(|s| s.as_str()) } /// Return true if the value has a comment. #[inline] pub fn has_comment(&self) -> bool { self.comment.is_some() } /// Return a reference to the comment string, if present. #[inline] pub fn comment_ref(&self) -> Option<&str> { self.comment.as_deref() } // Generators /// Create a u8 value with the defaults. #[inline] pub fn u8(value: u8) -> Self { Self::u8_base(value, Repr::default(), ValueState::Good, "") } /// Create a u32 with hexadecimal representation. #[inline] pub fn hex_u8(value: u8) -> Self { Self::u8_base(value, Repr::Hex(8), ValueState::Good, "") } /// Base function for creating a u8 value with different parameters. Usually not called /// directly. pub fn u8_base(value: u8, repr: Repr, state: ValueState, tip: &str) -> Self { SourceValue { scalar: Some(Scalar::U8(value)), repr, tip: (!tip.is_empty()).then_some(tip.to_string()), state, comment: None, } } /// Create a u16 value with the defaults. #[inline] pub fn u16(value: u16) -> Self { Self::u16_base(value, Repr::default(), ValueState::Good, "") } /// Create a u16 with hexadecimal representation. #[inline] pub fn hex_u16(value: u16) -> Self { Self::u16_base(value, Repr::Hex(8), ValueState::Good, "") } /// Base function for creating a u16 value with different parameters. Usually not called /// directly. pub fn u16_base(value: u16, repr: Repr, state: ValueState, tip: &str) -> Self { SourceValue { scalar: Some(Scalar::U16(value)), repr, tip: (!tip.is_empty()).then_some(tip.to_string()), state, comment: None, } } /// Create a u32 value with the defaults. #[inline] pub fn u32(value: u32) -> Self { Self::u32_base(value, Repr::default(), ValueState::Good, "") } /// Create a u32 with hexadecimal representation. #[inline] pub fn hex_u32(value: u32) -> Self { Self::u32_base(value, Repr::Hex(8), ValueState::Good, "") } /// Base function for creating a u32 value with different parameters. Usually not called /// directly. pub fn u32_base(value: u32, repr: Repr, state: ValueState, tip: &str) -> Self { SourceValue { scalar: Some(Scalar::U32(value)), repr, tip: (!tip.is_empty()).then_some(tip.to_string()), state, comment: None, } } /// Create a String scalar value with the defaults. pub fn string(value: &str) -> Self { SourceValue { scalar: Some(Scalar::String(value.to_string())), repr: Repr::default(), state: ValueState::Good, tip: None, comment: None, } } // Inline Modifiers /// Set the value state to Good or Bad based on the provided predicate #[inline] pub fn good_if(mut self, predicate: bool) -> Self { self.state = if predicate { ValueState::Good } else { ValueState::Bad }; self } /// Set the value state to Bad. #[inline] pub fn bad(mut self) -> Self { self.state = ValueState::Bad; self } /// Set the value state to Bad or Good based on the provided predicate #[inline] pub fn bad_if(mut self, predicate: bool) -> Self { self.state = if predicate { ValueState::Bad } else { ValueState::Good }; self } /// Set the value state to Questionable. #[inline] pub fn quest(mut self) -> Self { self.state = ValueState::Questionable; self } /// Set the value state to Questionable or Good based on the provided predicate #[inline] pub fn quest_if(mut self, predicate: bool) -> Self { self.state = if predicate { ValueState::Questionable } else { ValueState::Good }; self } /// Set a tooltip for the value. #[inline] pub fn tip(mut self, tip: &str) -> Self { self.tip = (!tip.is_empty()).then_some(tip.to_string()); self } /// Set the representation to [Repr] hexadecimal with the provided number of `digits`. /// The 0x prefix is not included. #[inline] pub fn hex(mut self, digits: u8) -> Self { self.repr = Repr::Hex(digits); self } /// Set the scalar representation to [Repr] binary with the provided number of `digits`. /// The 0b prefix is not included. #[inline] pub fn bin(mut self, digits: u8) -> Self { self.repr = Repr::Bin(digits); self } /// Set the comment string if `comment` is not empty. #[inline] pub fn comment(mut self, comment: &str) -> Self { self.comment = (!comment.is_empty()).then_some(comment.to_string()); self } } #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Default)] pub struct SourceMap { map: FoxTreeMap<SourceValue>, } impl SourceMap { pub fn new() -> Self { Self { map: FoxTreeMap::new(SourceValue::default()), } } pub fn for_each<F>(&self, f: F) where F: FnMut(usize, &SourceValue), { self.map.for_each(f); } pub fn root(&self) -> usize { self.map.root() } pub fn children(&self, index: usize) -> &[usize] { self.map.children(index) } pub fn node(&self, index: usize) -> (&str, &SourceValue) { let node = &self.map.node(index); (&node.name, &node.data) } } // impl FoxTree for SourceMap { // type Data = SourceValue; // // fn tree_mut(&mut self) -> &mut FoxTreeMap<Self::Data> { // &mut self.map // } // // fn tree(&self) -> &FoxTreeMap<Self::Data> { // &self.map // } // } impl Debug for SourceMap { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.map.debug_with(f, &|data: &SourceValue| { if let Some(scalar) = &data.scalar { match scalar { Scalar::String(s) => s.clone(), _ => format!("{}{}", data.repr.fmt(scalar), if data.is_bad() { "*" } else { "" }), } } else { "".to_string() } }) } } /// A trait for a source map that can be optionally created by a parser. /// We can create a null source map that does nothing, to avoid having a lot of conditional code /// in our parsers. pub trait OptionalSourceMap: Any + Send + Sync { fn as_any(&self) -> &dyn Any; fn as_some(&self) -> Option<&SourceMap>; fn add_child(&mut self, parent: usize, name: &str, data: SourceValue) -> FoxTreeCursor<SourceValue>; fn debug_tree(&self); fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result; fn last_node(&mut self) -> FoxTreeCursor<SourceValue>; } impl OptionalSourceMap for SourceMap { fn as_any(&self) -> &dyn Any { self } fn as_some(&self) -> Option<&SourceMap> { Some(self) } fn add_child(&mut self, parent: usize, name: &str, data: SourceValue) -> FoxTreeCursor<SourceValue> { let child_index = self.map.add_child(parent, name, data); FoxTreeCursor::new(&mut self.map, parent, child_index) } fn debug_tree(&self) { let mut visited = FoxHashSet::new(); self.map.debug_tree(0, 0, &|_| "".to_string(), &mut visited); } fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { Debug::fmt(self, f) } fn last_node(&mut self) -> FoxTreeCursor<SourceValue> { let (parent, last) = self.map.last_node(); FoxTreeCursor::new(&mut self.map, parent, last) } } // Null implementation of SourceMap that does nothing pub struct NullSourceMap { tree: FoxTreeMap<SourceValue>, } impl NullSourceMap { pub(crate) fn new() -> Self { Self { tree: FoxTreeMap::new(SourceValue::default()), } } } impl OptionalSourceMap for NullSourceMap { fn as_any(&self) -> &dyn Any { self } fn as_some(&self) -> Option<&SourceMap> { None } fn add_child(&mut self, _parent: usize, _name: &str, _data: SourceValue) -> FoxTreeCursor<SourceValue> { // Always return a cursor to the root; do nothing else FoxTreeCursor::new(&mut self.tree, 0, 0) } fn debug_tree(&self) { // No-op } fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "NullSourceMap") } fn last_node(&mut self) -> FoxTreeCursor<SourceValue> { FoxTreeCursor::new(&mut self.tree, 0, 0) } } impl Debug for dyn OptionalSourceMap { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { self.fmt(f) } } impl Debug for NullSourceMap { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "NullSourceMap") } } pub trait MapDump { /// Writes the structure's information to the provided `OptionalSourceMap`. fn write_to_map(&self, map: &mut Box<dyn OptionalSourceMap>, parent: usize) -> usize; }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/range_check.rs
src/range_check.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- src/range_check.rs Implement an O(log n) range checker for detecting if a value is within a range. */ use std::ops::Range; #[derive(Clone, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub(crate) struct RangeChecker { events: Vec<(usize, i32)>, // (value, type), where type is +1 for start, -1 for end } impl RangeChecker { pub fn new(ranges: &[Range<usize>]) -> Self { let mut events = Vec::new(); for range in ranges { events.push((range.start, 1)); // Start of range events.push((range.end + 1, -1)); // End of range, exclusive } events.sort_unstable(); RangeChecker { events } } pub fn contains(&self, value: usize) -> bool { let mut active_ranges = 0; for &(point, event_type) in &self.events { if point > value { break; } active_ranges += event_type; } active_ranges > 0 } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/random.rs
src/random.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- src/random.rs Provide a simple random bit generator. */ #![allow(dead_code)] const RANDOM_BITS_SIZE: usize = 2048; const PSEUDO_RANDOM_BITS: [bool; RANDOM_BITS_SIZE] = generate_pseudo_random_bits(0x57A857FA, RANDOM_BITS_SIZE); const fn pseudo_random_bit(seed: u32, index: usize) -> bool { // A simple pseudo-random function using bit shifts and XOR let mut value = seed ^ (index as u32); value = value.wrapping_mul(0x45d9f3b); value ^= value >> 16; (value & 1) != 0 } const fn generate_pseudo_random_bits(seed: u32, len: usize) -> [bool; RANDOM_BITS_SIZE] { let mut bits = [false; RANDOM_BITS_SIZE]; let mut i = 0; while i < len { bits[i] = pseudo_random_bit(seed, i); i += 1; } bits } pub fn random_bit(index: usize) -> bool { PSEUDO_RANDOM_BITS[index & (RANDOM_BITS_SIZE - 1)] } pub fn random_bit_ref(index: usize) -> &'static bool { &PSEUDO_RANDOM_BITS[index & (RANDOM_BITS_SIZE - 1)] }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/image_loader.rs
src/image_loader.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ #![allow(dead_code)] //! A module that implements a builder pattern for [DiskImage] designed around //! creation of a [DiskImage] from an existing disk image file. //! It is possible to call DiskImage::load() directly, but this module provides //! a simpler interface for doing so which is less likely to break in future //! versions of the library. use crate::{prelude::TrackDataResolution, types::Platform, DiskImage, DiskImageError, DiskImageFileFormat}; use std::path::PathBuf; /// Implements the Builder pattern for DiskImage objects. /// Allows for creation of blank or pre-formatted DiskImages. #[derive(Default)] pub struct ImageLoader { /// Restrict loading of disk images to disks identified to belong to a /// specific platform. pub(crate) platform: Option<Platform>, /// Restrict loading of disk images to the specified image file format. /// This will bypass automatic format detection. pub(crate) format: Option<DiskImageFileFormat>, /// If a disk image can be resolved to different resolutions, this field /// will determine which resolution to use. If a disk image format does /// not support multiple resolutions, this field will be ignored. pub(crate) resolution: Option<TrackDataResolution>, /// Control whether to parse containers/archives while loading. /// If false, we can only handle raw disk images. /// This will disable archived disk images like IMZ and ADZ. pub(crate) parse_containers: bool, /// If an image (or image container) can contain multiple volumes, this /// field will determine which volume to load, by index. The list of /// volumes can be returned in a `ImageLoaderError::MultiVolume` error. pub(crate) volume_index: Option<usize>, /// Similar to volume_index, but allows for specifying the volume by path. /// The path is significant only to the relative archive filesystem. /// If both index and path are specified, the path will take precedence. pub(crate) volume_path: Option<PathBuf>, /// Create a source map during import, if the parser supports doing so. pub(crate) create_source_map: bool, } impl ImageLoader { pub fn new() -> ImageLoader { Default::default() } pub fn with_platform(mut self, platform: Platform) -> ImageLoader { self.platform = Some(platform); self } pub fn with_file_format(mut self, format: DiskImageFileFormat) -> ImageLoader { self.format = Some(format); self } /// Set the [`TrackDataResolution`] to use for the [`DiskImage`] to be built. pub fn with_resolution(mut self, resolution: TrackDataResolution) -> ImageLoader { self.resolution = Some(resolution); self } pub fn with_volume_index(mut self, volume_index: usize) -> ImageLoader { self.volume_index = Some(volume_index); self } pub fn with_volume_path(mut self, volume_path: PathBuf) -> ImageLoader { self.volume_path = Some(volume_path); self } pub fn with_source_map(mut self, state: bool) -> ImageLoader { self.create_source_map = state; self } pub fn with_container(mut self, state: bool) -> ImageLoader { self.parse_containers = state; self } #[allow(unused_mut)] pub fn load(mut self) -> Result<DiskImage, DiskImageError> { unimplemented!() } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/prelude.rs
src/prelude.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ pub use super::DiskImageError; pub use crate::{ diskimage::DiskImage, file_parsers::{ format_from_ext, supported_extensions, ImageFormatParser, ParserReadOptions, ParserWriteCompatibility, ParserWriteOptions, }, image_builder::ImageBuilder, image_writer::ImageWriter, platform::Platform, sector_view::StandardSectorView, types::{ DiskCh, DiskChs, DiskChsn, DiskChsnQuery, DiskImageFileFormat, RwScope, SectorMapEntry, StandardFormat, StandardFormatParam, TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity, }, SectorId, SectorIdQuery, };
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/detect.rs
src/detect.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ use crate::{ containers::{ archive::{FileArchiveType, StatelessFileArchive}, DiskImageContainer, }, file_parsers::ImageFormatParser, io::ReadSeek, types::{chs::DiskChs, standard_format::StandardFormat}, util::natural_sort, DiskImageError, DiskImageFileFormat, }; use std::path::{Path, PathBuf}; #[cfg(feature = "zip")] use crate::{containers::KryoFluxSet, file_parsers::kryoflux::KfxFormat}; use strum::IntoEnumIterator; /// Attempt to detect the container format of an input stream implementing [Read] + [Seek]. If the /// format cannot be determined, `[DiskImageError::UnknownFormat]` is returned. /// /// If at least one archive format feature is enabled, we can look into and identify images in /// archives such as `zip`, `gzip`, and `tar`. /// /// Most common of these are WinImage's "Compressed Disk Image" format, `IMZ`, which is simply /// a `zip` file containing a single raw sector image with `IMA` extension. Similarly, `ADZ` files /// are Amiga `ADF` images compressed with gzip. /// /// However, we can extend this to support simple archive containers of other file image formats. /// if an archive contains a single file, it will be assumed to be a disk image of some sort. /// /// Given MartyPC's feature set, it can either mount the files within a zip as a FAT filesystem, /// or it can let fluxfox mount the image inside the zip instead. Choosing which is the correct /// behavior desired may not be trivial; so we assume a compressed disk image will only contain /// one file. You could override loading by adding a second dummy file. /// /// The exception to this are Kryoflux images which span multiple files, but these are easily /// differentiated due to their large size and regular naming conventions. /// /// The smallest Kryoflux set I have seen is of a 160K disk, 5_741_334 bytes uncompressed. /// Therefore, I assume a cutoff point of 5MB for Kryoflux sets. This may need to be tweaked /// to support even older, lower capacity disks in the future. /// pub fn detect_container_format<T: ReadSeek>( image_io: &mut T, path: Option<&Path>, ) -> Result<DiskImageContainer, DiskImageError> { log::debug!("Detecting container format..."); #[cfg(any(feature = "zip", feature = "gzip", feature = "tar"))] { // First of all, is the input file an archive? if let Some(archive) = FileArchiveType::detect_archive_type(image_io) { log::debug!("Archive detected: {:?}", archive); // Get the archive info let a_info = archive.info(image_io)?; if a_info.file_count > 0 { log::debug!( "{:?} archive file detected with {} files, total size: {}", a_info.archive_type, a_info.file_count, a_info.total_size ); } // If there's only one file, we can assume it should be a disk image if a_info.file_count == 1 { let (file_buf, file_path) = archive.extract_first_file(image_io)?; // Wrap buffer in Cursor, and send it through all our format detectors. let mut file_io = std::io::Cursor::new(file_buf); for format in DiskImageFileFormat::iter() { if format.detect(&mut file_io) { // If we made a detection, we can return this single file as a ResolvedFile // container. The caller doesn't even need to know it was in an archive. return Ok(DiskImageContainer::ResolvedFile( format, file_io.into_inner(), Some(file_path), path.map(|p| p.to_path_buf()), )); } } return Err(DiskImageError::UnknownFormat); } else if a_info.total_size > 5_000_000 { // Multiple files in the zip, of at least 5MB - this assumes a Kryoflux set let file_listing = archive.file_listing(image_io)?; let path_vec: Vec<PathBuf> = file_listing .files .iter() .map(|entry| PathBuf::from(&entry.name)) .collect(); // Get all files that end in "00.0.raw" - should match the first file of any Kryoflux set. let mut raw_files: Vec<_> = path_vec .iter() .filter(|&path| { path.file_name() .and_then(|name| name.to_str()) .map_or(false, |name| name.ends_with("00.0.raw")) }) .collect(); // Sort the matches using alphabetic natural sort. This is intended to match the first disk if // a zip archive has multiple disks in it. raw_files.sort_by(|a: &&PathBuf, b: &&PathBuf| natural_sort(a, b)); log::debug!("Raw files: {:?}", raw_files); let mut set_vec = Vec::new(); for file in raw_files { log::debug!("Found .raw file in archive: {:?}", file); let kryo_set = KfxFormat::expand_kryoflux_set(file, Some(path_vec.clone()))?; log::debug!( "Expanded to Kryoflux set of {} files, geometry: {}", kryo_set.0.len(), kryo_set.1 ); let path_to_set = file.parent().unwrap_or(&PathBuf::new()).to_path_buf(); set_vec.push(KryoFluxSet { base_path: path_to_set.clone(), file_set: kryo_set.0, geometry: kryo_set.1, }); } if !set_vec.is_empty() { for (si, set) in set_vec.iter().enumerate() { log::debug!( "Found Kryoflux set in archive at idx {}, path : {}", si, set.base_path.display() ); } return Ok(DiskImageContainer::ZippedKryofluxSet(set_vec)); } } } } // Format is not an archive. for format in DiskImageFileFormat::iter() { if format.detect(&mut *image_io) { // If this a Kryoflux stream file, we need to resolve the set of files it belongs to. if let DiskImageFileFormat::KryofluxStream = format { return Ok(DiskImageContainer::KryofluxSet); } // Otherwise this must just be a plain File container. return Ok(DiskImageContainer::File(format, path.map(|p| p.to_path_buf()))); } } Err(DiskImageError::UnknownFormat) } /// Attempt to return a DiskChs structure representing the geometry of a disk image from the size of a raw sector image. /// Returns None if the size does not match a known raw disk image size. pub fn chs_from_raw_size(size: usize) -> Option<DiskChs> { match StandardFormat::try_from(size) { Ok(fmt) => Some(fmt.chs()), Err(_) => None, } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/lib.rs
src/lib.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! # fluxfox //! //! fluxfox is a library crate for reading, writing, and manipulating floppy disk images of the //! kind used with vintage IBM Personal Computers and compatibles. //! //! fluxfox is primarily designed for emulator authors who may be writing a PC emulator and would //! like to support disk images in a variety of formats, however it can be used for visualization, //! dumping, editing, and other disk image tasks. //! //! fluxfox currently supports several different disk image formats, both modern and vintage, of //! flux, bitstream and sector-based resolution. //! //! The main interface to fluxfox is via a [`DiskImage`] object, which can be created by loading //! a disk image file, or by creating a new disk image from scratch. //! //! It is recommended to use the [`ImageBuilder`] interface to load or create a disk image. mod bit_ring; pub mod bitstream_codec; pub mod boot_sector; mod containers; mod copy_protection; mod detect; pub mod disk_lock; mod disk_schema; pub mod diskimage; mod file_parsers; pub mod file_system; pub mod flux; pub mod image_builder; mod image_loader; mod image_writer; pub mod io; mod platform; pub mod prelude; mod random; mod range_check; mod scripting; mod sector_view; pub mod source_map; pub mod track; pub mod track_schema; mod tree_map; pub mod types; pub mod util; #[cfg(feature = "viz")] pub mod visualization; use std::{hash::RandomState, sync::Arc}; use thiserror::Error; pub const MAXIMUM_SECTOR_SIZE: usize = 8192; pub const DEFAULT_SECTOR_SIZE: usize = 512; pub const ASCII_EOF: u8 = 0x1A; /// The maximum cylinder any drive can seek to or that we will ever see in an image. /// This is used for setting safe capacities for vectors and other data structures and track-based /// normalization logic. /// This may need to be adjusted if we ever see a disk image with more than 85 cylinders. pub const MAX_CYLINDER: usize = 85; #[allow(unused)] pub type FoxHashMap<K, V, S = RandomState> = std::collections::HashMap<K, V, S>; #[allow(unused)] type FoxHashSet<T, S = RandomState> = std::collections::HashSet<T, S>; /// The status of a disk image loading operation, for file parsers that support progress reporting. pub enum LoadingStatus { /// Emitted by file parsers that support progress updates. This is sent before any other task /// is performed, to allow the caller time to prepare and display a progress bar. ProgressSupport, /// Emitted by file parsers that support progress updates to inform the caller of the current progress. /// The value is a floating-point number between 0.0 and 1.0, where 1.0 represents full completion. /// Note: The value 1.0 is not guaranteed to be emitted. Progress(f64), /// Emitted by file parsers to inform the caller that the loading operation is complete. Complete, /// Emitted by file parsers to inform the caller that an error occurred during the loading operation. Error, } pub type LoadingCallback = Arc<dyn Fn(LoadingStatus) + Send + Sync>; #[derive(Clone, Debug, Error)] pub enum DiskImageError { #[error("An IO error occurred reading or writing the disk image: {0}")] IoError(String), #[error("A filesystem error occurred or path not found")] FsError, #[error("An error occurred reading or writing a file archive: {0}")] ArchiveError(FileArchiveError), #[error("Unknown disk image format")] UnknownFormat, #[error("Unsupported disk image format for requested operation")] UnsupportedFormat, #[error("The disk image is valid but contains incompatible disk information: {0}")] IncompatibleImage(String), #[error("The disk image format parser encountered an error")] FormatParseError, #[error("The disk image format parser reported the image was corrupt: {0}")] ImageCorruptError(String), #[error("The requested head or cylinder could not be found")] SeekError, #[error("An error occurred addressing the track bitstream")] BitstreamError, #[error("The requested sector ID could not be found")] IdError, #[error("The requested operation matched multiple sector IDs")] UniqueIdError, #[error("No sectors were found on the current track")] DataError, #[error("No schema is defined for the current track")] SchemaError, #[error("A CRC error was detected in the disk image")] CrcError, #[error("An invalid function parameter was supplied")] ParameterError, #[error("Write-protect status prevents writing to the disk image")] WriteProtectError, #[error("Flux track has not been resolved")] ResolveError, #[error("An error occurred reading a multi-disk archive: {0}")] MultiDiskError(String), #[error("An error occurred attempting to lock a resource: {0}")] SyncError(String), #[error("The disk image was not compatible with the requested platform")] PlatformMismatch, #[error("The disk image was not compatible with the requested format")] FormatMismatch, } // Manually implement `From<io::Error>` for `DiskImageError` impl From<io::Error> for DiskImageError { fn from(err: io::Error) -> Self { DiskImageError::IoError(err.to_string()) // You could convert in a different way } } impl From<FileArchiveError> for DiskImageError { fn from(err: FileArchiveError) -> Self { DiskImageError::ArchiveError(err) } } // Manually implement `From<binrw::Error>` for `DiskImageError` impl From<binrw::Error> for DiskImageError { fn from(err: binrw::Error) -> Self { DiskImageError::IoError(err.to_string()) // Again, you could convert differently } } #[derive(Debug, Error)] pub enum DiskVisualizationError { #[error("An invalid parameter was supplied: {0}")] InvalidParameter(String), #[error("No compatible tracks were found to visualize")] NoTracks, #[error("The disk image is not a valid format for visualization")] InvalidImage, #[error("The supplied parameters do not produce a visible visualization")] NotVisible, } // Re-export tiny_skia for convenience pub use crate::{ diskimage::DiskImage, file_parsers::{format_from_ext, supported_extensions, ImageFormatParser, ParserWriteCompatibility}, image_builder::ImageBuilder, image_writer::ImageWriter, types::{DiskImageFileFormat, SectorMapEntry}, }; use types::{DiskCh, DiskChs, DiskChsn, DiskChsnQuery}; // Re-export tiny_skia for convenience use crate::containers::archive::FileArchiveError; pub use types::standard_format::StandardFormat; pub type SectorId = DiskChsn; pub type SectorIdQuery = DiskChsnQuery; pub type DiskSectorMap = Vec<Vec<Vec<SectorMapEntry>>>;
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/image_writer.rs
src/image_writer.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- src/image_writer.rs Implements an output helper for writing disk images to a file. */ use std::path::PathBuf; use crate::{ file_parsers::{ImageFormatParser, ParserWriteOptions}, io::Cursor, DiskImage, DiskImageError, DiskImageFileFormat, }; pub struct ImageWriter<'img> { pub image: &'img mut DiskImage, pub path: Option<PathBuf>, pub format: Option<DiskImageFileFormat>, } impl<'img> ImageWriter<'img> { pub fn new(img: &'img mut DiskImage) -> Self { Self { image: img, path: None, format: None, } } pub fn with_format(self, format: DiskImageFileFormat) -> Self { Self { format: Some(format), ..self } } pub fn with_path(self, path: PathBuf) -> Self { Self { path: Some(path), ..self } } pub fn write(self) -> Result<(), DiskImageError> { if self.path.is_none() { return Err(DiskImageError::ParameterError); } if self.format.is_none() { return Err(DiskImageError::ParameterError); } let path = self.path.unwrap(); let format = self.format.unwrap(); let mut buf = Cursor::new(Vec::with_capacity(1_000_000)); format.save_image(self.image, &ParserWriteOptions::default(), &mut buf)?; let data = buf.into_inner(); std::fs::write(path, data)?; Ok(()) } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/io.rs
src/io.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ /// Reexport the standard library's IO traits. /// This gives an opportunity to implement our own versions if we wish to add no-std support. pub use std::io::{Cursor, Error, ErrorKind, Read, Result, Seek, SeekFrom, Write}; /// A sum of `Read` and `Seek` traits. pub trait ReadSeek: Read + Seek {} impl<T: Read + Seek> ReadSeek for T {} #[allow(unused)] /// A sum of `Read`, `Write` and `Seek` traits. pub trait ReadWriteSeek: Read + Write + Seek {} impl<T: Read + Write + Seek> ReadWriteSeek for T {} #[allow(dead_code)] pub trait ReadBytesExt: Read { #[inline] fn read_u8(&mut self) -> Result<u8> { let mut buf = [0; 1]; self.read_exact(&mut buf)?; Ok(buf[0]) } } impl<R: Read> ReadBytesExt for R {}
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/sector_view.rs
src/sector_view.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- sector_io.rs Implement a sector I/O interface for reading and writing sectors. The SectorIo struct implements Read + Write + Seek and can be given to a any function that takes a Read + Write + Seek object for direct reading and writing of a fluxfox DiskImage as if it were a raw sector image. */ use crate::{ io::{Read, Seek, Write}, types::DiskCh, DiskImage, DiskImageError, SectorIdQuery, StandardFormat, }; use crate::{ disk_lock::{DiskLock, NonTrackingDiskLock, NullContext}, prelude::{DiskChs, DiskChsn}, }; pub struct StandardSectorView { disk: NonTrackingDiskLock<DiskImage>, disk_format: StandardFormat, track_cursor: DiskCh, sector_id_cursor: u8, spt: u8, sector_buffer: Box<[u8]>, sector_size: usize, sector_dirty: bool, sector_byte_cursor: usize, eod: bool, // End-of-disk flag. All read/write operations that exceed the end of the current sector will fail. } impl StandardSectorView { pub fn new( disk_lock: impl Into<NonTrackingDiskLock<DiskImage>>, format: StandardFormat, ) -> Result<Self, DiskImageError> { let disk = disk_lock.into(); let mut new = StandardSectorView { disk, disk_format: format, track_cursor: DiskCh::new(0, 0), sector_id_cursor: 1, spt: format.layout().s(), sector_buffer: vec![0; format.sector_size()].into_boxed_slice(), sector_size: format.sector_size(), sector_dirty: false, sector_byte_cursor: 0, eod: false, }; // Read the first sector into the buffer. new.read_sector(new.sector_id_cursor)?; Ok(new) } pub fn format(&self) -> StandardFormat { self.disk_format } pub fn chsn(&self) -> DiskChsn { DiskChsn::from(( DiskChs::from((self.track_cursor, self.sector_id_cursor)), self.disk_format.chsn().n(), )) } /// Seek to the specified CHS address within the sector view pub fn seek_to_chs(&mut self, chs: impl Into<DiskChs>) -> crate::io::Result<u64> { let chs = chs.into(); let offset = chs .to_raw_offset(&self.disk_format.layout()) .ok_or(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Invalid CHS"))?; self.seek_to_offset(offset) } fn seek_to_offset(&mut self, offset: usize) -> crate::io::Result<u64> { let (chs, sector_offset) = match DiskChs::from_raw_offset(offset, &self.disk_format.layout()) { Some(chs) => chs, None => return Err(std::io::Error::new(std::io::ErrorKind::InvalidInput, "Invalid offset")), }; if DiskChs::from((self.track_cursor, self.sector_id_cursor)) == chs { // We're already at the correct CHS, so just set the byte cursor. self.sector_byte_cursor = sector_offset; return Ok(offset as u64); } // Do we need to change sectors? if DiskChs::from((self.track_cursor, self.sector_id_cursor)) != chs { log::trace!("seek_to_offset(): Seeking to CHS: {}", chs); // Do we need to switch tracks? if chs.ch() != self.track_cursor { // Update the track cursor self.track_cursor = DiskCh::from(chs); } // Commit the current sector self.commit_sector() .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; // Read the specified sector on this track into the sector buffer. self.sector_id_cursor = chs.s(); self.read_sector(self.sector_id_cursor) .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; } // Set the sector byte cursor to the specified offset. self.sector_byte_cursor = sector_offset; Ok(offset as u64) } fn offset(&self) -> usize { let chs = DiskChs::from((self.track_cursor, self.sector_id_cursor)); chs.to_raw_offset(&self.disk_format.layout()).unwrap_or(0) + self.sector_byte_cursor } fn read_sector(&mut self, sector_id: u8) -> Result<(), DiskImageError> { self.sector_id_cursor = sector_id; self.sector_byte_cursor = 0; self.sector_buffer = self .disk .read(NullContext::default()) .unwrap() .read_sector_basic(self.track_cursor, SectorIdQuery::from(self.chsn()), None)? .into_boxed_slice(); log::trace!("read_sector(): Reading sector: {}", self.chsn()); // If the result is less or more than expected, extend or trim as necessary. #[allow(clippy::comparison_chain)] if self.sector_buffer.len() < self.sector_size { let mut new_sector_buffer = vec![0; self.sector_size].into_boxed_slice(); new_sector_buffer[..self.sector_buffer.len()].copy_from_slice(&self.sector_buffer); self.sector_buffer = new_sector_buffer; } else if self.sector_buffer.len() > self.sector_size { let mut new_sector_buffer = vec![0; self.sector_size].into_boxed_slice(); new_sector_buffer.copy_from_slice(&self.sector_buffer[..self.sector_size]); self.sector_buffer = new_sector_buffer; } // New sector means not at EOD self.eod = false; // New sector is clean. self.sector_dirty = false; Ok(()) } /// Write the current sector buffer to the disk image if it is dirty. /// This function must be called before changing the track cursor. fn commit_sector(&mut self) -> Result<(), DiskImageError> { if self.sector_dirty { self.disk.write(NullContext::default()).unwrap().write_sector_basic( self.track_cursor, SectorIdQuery::from(self.chsn()), None, &self.sector_buffer, )?; } self.sector_dirty = false; Ok(()) } fn next_sector(&mut self) -> Result<(), DiskImageError> { self.sector_id_cursor += 1; if self.sector_id_cursor > self.spt { // Standard sector ids are 1-indexed. self.sector_id_cursor = 1; self.eod = !self.track_cursor.seek_next_track(self.disk_format); log::trace!("next_sector(): Seek to new track: {}", self.track_cursor); } // Commit the sector if needed. self.commit_sector()?; if !self.eod { self.read_sector(self.sector_id_cursor)?; } Ok(()) } } impl Read for StandardSectorView { fn read(&mut self, buf: &mut [u8]) -> std::io::Result<usize> { let mut bytes_read = 0; let mut buf_cursor = 0; if self.eod { return Ok(0); } while !self.eod && (buf_cursor < buf.len()) { if self.sector_byte_cursor < self.sector_buffer.len() { buf[buf_cursor] = self.sector_buffer[self.sector_byte_cursor]; self.sector_byte_cursor += 1; buf_cursor += 1; bytes_read += 1; } else { // We've reached the end of the current buffered sector, so we need to read the next sector. self.next_sector() .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; } } Ok(bytes_read) } } impl Write for StandardSectorView { fn write(&mut self, buf: &[u8]) -> crate::io::Result<usize> { let mut bytes_written = 0; let mut buf_cursor = 0; if self.eod { return Ok(0); } while !self.eod && (buf_cursor < buf.len()) { if self.sector_byte_cursor < self.sector_buffer.len() { self.sector_buffer[self.sector_byte_cursor] = buf[buf_cursor]; self.sector_byte_cursor += 1; buf_cursor += 1; bytes_written += 1; self.sector_dirty = true; } else { // Move to the next sector. self.next_sector() .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; } } Ok(bytes_written) } fn flush(&mut self) -> std::io::Result<()> { self.commit_sector() .map_err(|e| std::io::Error::new(std::io::ErrorKind::Other, e))?; Ok(()) } } impl Seek for StandardSectorView { fn seek(&mut self, pos: crate::io::SeekFrom) -> crate::io::Result<u64> { let new_offset = match pos { std::io::SeekFrom::Start(offset) => { // Seek from start. We can directly calculate the CHS from the offset. offset as usize } std::io::SeekFrom::End(offset) => { // Get the total size and the signed offset from the end. self.disk_format.disk_size().saturating_add_signed(offset as isize) } std::io::SeekFrom::Current(offset) => { // Get the current offset and the signed offset from the current position. let current_offset = self.offset(); current_offset.saturating_add_signed(offset as isize) } }; // Very noisy. //log::trace!("seek(): Seeking to offset: {}", new_offset); self.seek_to_offset(new_offset)?; Ok(new_offset as u64) } } #[cfg(test)] mod tests { use super::*; use crate::prelude::*; use std::sync::{Arc, RwLock}; fn create_view() -> StandardSectorView { let disk_image = create_test_disk_image(); let format = StandardFormat::PcFloppy360; StandardSectorView::new(disk_image, format).unwrap() } fn create_test_disk_image() -> Arc<RwLock<DiskImage>> { // Create a mock DiskImage for testing purposes let disk = ImageBuilder::new() .with_standard_format(StandardFormat::PcFloppy360) .with_formatted(true) .with_resolution(TrackDataResolution::BitStream) .build() .unwrap(); DiskImage::into_arc(disk) } #[test] fn test_new_standard_sector_view() { _ = create_view(); } #[test] fn test_read_sector() { let mut sector_view = create_view(); let result = sector_view.read_sector(1); assert!(result.is_ok()); } #[test] fn test_write_sector() { let mut sector_view = create_view(); let data = vec![0u8; sector_view.format().sector_size()]; sector_view.seek(std::io::SeekFrom::Start(512)).unwrap(); let result = sector_view.write(&data); assert!(result.is_ok()); assert_eq!(result.unwrap(), data.len()); // Read sector back let mut read_data = vec![0u8; sector_view.format().sector_size()]; sector_view.seek(std::io::SeekFrom::Start(512)).unwrap(); let read_result = sector_view.read(&mut read_data); assert!(read_result.is_ok()); assert_eq!(read_result.unwrap(), data.len()); } #[test] fn test_seek_sector() { let mut sector_view = create_view(); for sector in 0..sector_view.format().chs().total_sectors() { let offset = sector * sector_view.format().sector_size(); let result = sector_view.seek(std::io::SeekFrom::Start(offset as u64)); assert!(result.is_ok()); assert_eq!(result.unwrap(), offset as u64); } } #[test] fn test_seek_logic() { let offset = 2560; // Sector 5 let mut sector_view = create_view(); sector_view.seek_to_offset(offset).unwrap(); let chs = DiskChs::from(sector_view.chsn()); assert_eq!(DiskChs::new(0, 0, 6), chs); } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/diskimage.rs
src/diskimage.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! The `diskimage` module defines the [DiskImage] struct which serves as the main interface to //! fluxfox. A [DiskImage] represents a single floppy disk, consisting of a collection of [Track]s //! //! ## Creating a [DiskImage] //! A [DiskImage] should not be created directly. Instead, use an [ImageBuilder] to create a new //! disk image with specified parameters. use crate::{bitstream_codec::mfm::MfmCodec, track::bitstream::BitStreamTrack, DiskImageFileFormat, SectorMapEntry}; use crate::{ bitstream_codec::{fm::FmCodec, gcr::GcrCodec, TrackCodec}, boot_sector::BootSector, containers::DiskImageContainer, detect::detect_container_format, file_parsers::{ filter_writable, formats_from_caps, kryoflux::KfxFormat, FormatCaps, ImageFormatParser, ParserReadOptions, }, io::ReadSeek, source_map::{NullSourceMap, OptionalSourceMap, SourceMap, SourceValue}, track::{fluxstream::FluxStreamTrack, metasector::MetaSectorTrack, DiskTrack, Track, TrackAnalysis}, track_schema::{system34::System34Standard, TrackMetadata, TrackSchema}, types::{ chs::*, standard_format::StandardFormat, BitStreamTrackParams, DiskAnalysis, DiskDescriptor, DiskImageFlags, DiskSelection, FluxStreamTrackParams, MetaSectorTrackParams, ReadSectorResult, ReadTrackResult, RwScope, SharedDiskContext, TrackDataEncoding, TrackDataRate, TrackDataResolution, WriteSectorResult, }, util, DiskImageError, FoxHashMap, FoxHashSet, LoadingCallback, LoadingStatus, }; use bit_vec::BitVec; use sha1_smol::Digest; use std::{ io::Cursor, path::Path, sync::{Arc, Mutex, RwLock}, }; pub(crate) const DEFAULT_BOOT_SECTOR: &[u8] = include_bytes!("../resources/bootsector.bin"); /// A [`DiskImage`] represents the structure of a floppy disk. It contains a pool of track data /// structures, which are indexed by head and cylinder. /// /// A [`DiskImage`] can be created from a specified disk format using an [ImageBuilder]. /// /// A [`DiskImage`] may be any of the defined [`TrackDataResolution`] levels: /// * `MetaSector`: These images are sourced from sector-based disk image formats such as /// `IMG`, `IMD`, `TD0`, `ADF`, or `PSI`. /// * `BitStream` : These images are sourced from file formats such as `HFE`, `MFM`, `86F`, or `PRI`. /// * `FluxStream`: These images are sourced from flux-based formats such as `Kryoflux`, `SCP`, or /// `MFI`. #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct DiskImage { /// Flags that can be applied to a disk image. pub(crate) flags: DiskImageFlags, /// The standard format of the disk image, if it adheres to one. (Nonstandard images will be None) pub(crate) standard_format: Option<StandardFormat>, /// The image format the disk image was sourced from, if any pub(crate) source_format: Option<DiskImageFileFormat>, /// A flag indicating if this disk image is allowed to contain multiple track resolution types. /// Attempts to add tracks with a different resolution will fail if this is false. pub(crate) multires: bool, /// A set of [TrackDataResolution] representing the different resolutions of track data in the image. /// Normally this will be a set of one; but it is possible to have multi-resolution images (MOOF, WOZ). pub(crate) resolution: FoxHashSet<TrackDataResolution>, /// A [DiskDescriptor] describing this image with more thorough parameters. pub(crate) descriptor: DiskDescriptor, /// A k/v store of metadata. Keys are normalized to a standard set of strings; see the metadata /// module for key names. pub(crate) metadata: FoxHashMap<String, String>, /// A structure containing information about the disks internal consistency. Used to construct image_caps. pub(crate) analysis: DiskAnalysis, /// The boot sector of the disk image, if successfully parsed. pub(crate) boot_sector: Option<BootSector>, /// The volume name of the disk image, if any. pub(crate) volume_name: Option<String>, /// A pool of track data structures, potentially in any order. pub(crate) track_pool: Vec<DiskTrack>, /// An array of vectors containing indices into the track pool. The first index is the head /// number, the second is the cylinder number. pub(crate) track_map: [Vec<usize>; 2], /// A shared context for the disk image, accessible by Tracks. #[cfg_attr(feature = "serde", serde(skip))] pub(crate) shared: Option<Arc<Mutex<SharedDiskContext>>>, /// A sourcemap for the disk image. This is not serialized as it is not necessary #[cfg_attr(feature = "serde", serde(skip))] pub(crate) source_map: Option<Box<dyn OptionalSourceMap>>, } impl Default for DiskImage { fn default() -> Self { Self { flags: DiskImageFlags::empty(), standard_format: None, source_format: None, multires: false, resolution: Default::default(), descriptor: DiskDescriptor::default(), metadata: Default::default(), analysis: Default::default(), boot_sector: None, volume_name: None, track_pool: Vec::new(), track_map: [Vec::new(), Vec::new()], shared: Some(Arc::new(Mutex::new(SharedDiskContext::default()))), source_map: Some(Box::new(NullSourceMap::new())), } } } impl DiskImage { pub fn detect_format<RS: ReadSeek>( image: &mut RS, path: Option<&Path>, ) -> Result<DiskImageContainer, DiskImageError> { detect_container_format(image, path) } /// Create a new [`DiskImage`] with the specified disk format. This function should not be called /// directly - use an [ImageBuilder] if you wish to create a new [`DiskImage`] from a specified format. pub fn create(disk_format: StandardFormat) -> Self { Self { flags: DiskImageFlags::empty(), standard_format: Some(disk_format), descriptor: disk_format.descriptor(), metadata: FoxHashMap::new(), source_format: None, multires: false, resolution: FoxHashSet::new(), analysis: DiskAnalysis { image_caps: Default::default(), weak: false, deleted_data: false, no_dam: false, address_error: false, data_error: false, overlapped: false, consistent_sector_size: Some(2), consistent_track_length: Some(disk_format.chs().s() as u32), }, boot_sector: None, volume_name: None, track_pool: Vec::new(), track_map: [Vec::new(), Vec::new()], shared: Some(Arc::new(Mutex::new(SharedDiskContext::default()))), source_map: Some(Box::new(NullSourceMap::new())), } } pub fn track_iter(&self) -> impl Iterator<Item = &DiskTrack> { // Find the maximum number of tracks among all heads let max_tracks = self.track_map.iter().map(|tracks| tracks.len()).max().unwrap_or(0); (0..max_tracks).flat_map(move |track_idx| { self.track_map.iter().filter_map(move |head_tracks| { head_tracks .get(track_idx) .and_then(move |&track_index| self.track_pool.get(track_index)) }) }) } pub fn track_idx_iter(&self) -> impl Iterator<Item = usize> + '_ { // Find the maximum number of tracks among all heads let max_tracks = self.track_map.iter().map(|tracks| tracks.len()).max().unwrap_or(0); (0..max_tracks).flat_map(move |track_idx| { self.track_map .iter() .filter_map(move |head_tracks| head_tracks.get(track_idx).copied()) }) } pub fn track_ch_iter(&self) -> impl Iterator<Item = DiskCh> + '_ { self.track_idx_iter() .map(move |track_idx| self.track_pool[track_idx].ch()) } pub fn track(&self, ch: DiskCh) -> Option<&DiskTrack> { self.track_map[ch.h() as usize] .get(ch.c() as usize) .and_then(|&track_idx| self.track_pool.get(track_idx)) } pub fn track_mut(&mut self, ch: DiskCh) -> Option<&mut DiskTrack> { self.track_map[ch.h() as usize] .get(ch.c() as usize) .and_then(|&track_idx| self.track_pool.get_mut(track_idx)) } pub fn track_by_idx(&self, track_idx: usize) -> Option<&DiskTrack> { self.track_pool.get(track_idx) } pub fn track_by_idx_mut(&mut self, track_idx: usize) -> Option<&mut DiskTrack> { self.track_pool.get_mut(track_idx) } pub fn set_multires(&mut self, multires: bool) { self.multires = multires; } pub fn multires(&self) -> bool { self.multires } pub fn set_flag(&mut self, flag: DiskImageFlags) { self.flags |= flag; } pub fn clear_flag(&mut self, flag: DiskImageFlags) { self.flags &= !flag; } pub fn has_flag(&self, flag: DiskImageFlags) -> bool { self.flags.contains(flag) } pub fn required_caps(&self) -> FormatCaps { self.analysis.image_caps } pub fn load_from_file( file_path: &Path, disk_selection: Option<DiskSelection>, callback: Option<LoadingCallback>, ) -> Result<Self, DiskImageError> { let mut file_vec = std::fs::read(file_path)?; let mut cursor = Cursor::new(&mut file_vec); let image = DiskImage::load(&mut cursor, Some(file_path), disk_selection, callback)?; Ok(image) } pub fn load<RS: ReadSeek>( image_io: &mut RS, image_path: Option<&Path>, disk_selection: Option<DiskSelection>, callback: Option<LoadingCallback>, ) -> Result<Self, DiskImageError> { let container = DiskImage::detect_format(image_io, image_path.clone())?; log::debug!("load(): Detected format: {:?}", container); // TODO: DiskImage should probably not concern itself with archives or disk sets... // We should probably move most of this into an ImageLoader interface similar to // ImageBuilder match container { DiskImageContainer::File(format, _path) => { let mut image = DiskImage::default(); format.load_image(image_io, &mut image, &ParserReadOptions::default(), callback)?; image.post_load_process(); Ok(image) } DiskImageContainer::ResolvedFile(format, file_vec, _path, _archive_path) => { let mut cursor = Cursor::new(file_vec); let mut image = DiskImage::default(); format.load_image(&mut cursor, &mut image, &ParserReadOptions::default(), callback)?; image.post_load_process(); Ok(image) } DiskImageContainer::Archive(_archive_format, _containers, _path) => { // We should have received any single-file archives as ResolvedFiles, so this // archive contains multiple files (but wasn't detected as a FileSet). // // This isn't currently supported. // let file_vec = extract_first_file(image_io)?; // let file_cursor = Cursor::new(file_vec); // let mut image = DiskImage::default(); // format.load_image(file_cursor, &mut image, &ParserReadOptions::default(), callback)?; // image.post_load_process(); // Ok(image) Err(DiskImageError::UnknownFormat) } DiskImageContainer::ZippedKryofluxSet(disks) => { #[cfg(not(feature = "zip"))] { log::error!("Cannot load zipped KryoFlux set: zip feature not enabled!"); return Err(DiskImageError::UnknownFormat); } let disk_opt = match disk_selection { Some(DiskSelection::Index(idx)) => disks.get(idx), Some(DiskSelection::Path(ref path)) => disks.iter().find(|disk| disk.base_path == *path), _ => { if disks.len() == 1 { disks.first() } else { log::error!("Multiple disks found in Kryoflux set without a selection."); return Err(DiskImageError::MultiDiskError( "No disk selection provided.".to_string(), )); } } }; if let Some(disk) = disk_opt { // Create an empty image. We will loop through all the files in the set and // append tracks to them as we go. let mut image = DiskImage::default(); image.descriptor.geometry = disk.geometry; if let Some(ref callback_fn) = callback { // Let caller know to show a progress bar callback_fn(LoadingStatus::ProgressSupport); } // Enable source map. image.assign_source_map(true); for (fi, file_path) in disk.file_set.iter().enumerate() { let mut file_vec = crate::containers::zip::extract_file(image_io, &file_path.clone())?; let mut cursor = Cursor::new(&mut file_vec); log::debug!("load(): Loading Kryoflux stream file from zip: {:?}", file_path); // Add a child node to the source map for each file in the set. image.source_map_mut().add_child( 0, &*file_path.file_name().unwrap_or_default().to_string_lossy(), SourceValue::default(), ); // We won't give the callback to the kryoflux loader - instead we will call it here ourselves // updating percentage complete as a fraction of files loaded. match KfxFormat::load_image(&mut cursor, &mut image, &ParserReadOptions::default(), None) { Ok(_) => {} Err(e) => { // It's okay to fail if we have already added the standard number of tracks to an image. log::error!("load(): Error loading Kryoflux stream file: {:?}", e); //return Err(e); break; } } if let Some(ref callback_fn) = callback { let completion = (fi + 1) as f64 / disk.file_set.len() as f64; callback_fn(LoadingStatus::Progress(completion)); } } if let Some(callback_fn) = callback { callback_fn(LoadingStatus::Complete); } image.post_load_process(); Ok(image) } else { log::error!( "Disk selection {} not found in Kryoflux set.", disk_selection.clone().unwrap() ); Err(DiskImageError::MultiDiskError(format!( "Disk selection {} not found in set.", disk_selection.unwrap() ))) } } DiskImageContainer::FileSet(_format, _files, _src_file) => { // Eventually this should replace KryofluxSet, but just stub it for now. Err(DiskImageError::UnsupportedFormat) } DiskImageContainer::KryofluxSet => { if let Some(image_path) = image_path { let (file_set, set_ch) = KfxFormat::expand_kryoflux_set(image_path, None)?; log::debug!( "load(): Expanded Kryoflux set to {} files, ch: {}", file_set.len(), set_ch ); // Create an empty image. We will loop through all the files in the set and // append tracks to them as we go. let mut image = DiskImage::default(); // Set the geometry of the disk image to the geometry of the Kryoflux set. image.descriptor.geometry = set_ch; // Enable source map. image.assign_source_map(true); for (fi, file_path) in file_set.iter().enumerate() { // Reading the entire file in one go and wrapping in a cursor is much faster // than a BufReader. let mut file_vec = std::fs::read(file_path.clone())?; let mut cursor = Cursor::new(&mut file_vec); log::debug!("load(): Loading Kryoflux stream file: {:?}", file_path); // Add a child node to the source map for each file in the set. image.source_map_mut().add_child( 0, &*file_path.file_name().unwrap_or_default().to_string_lossy(), SourceValue::default(), ); // We won't give the callback to the kryoflux loader - instead we will call it here ourselves // updating percentage complete as a fraction of files loaded. match KfxFormat::load_image(&mut cursor, &mut image, &ParserReadOptions::default(), None) { Ok(_) => {} Err(e) => { // It's okay to fail if we have already added the standard number of tracks to an image. log::error!("load(): Error loading Kryoflux stream file: {:?}", e); //return Err(e); break; } } if let Some(ref callback_fn) = callback { let completion = (fi + 1) as f64 / file_set.len() as f64; callback_fn(LoadingStatus::Progress(completion)); } } //let ch = DiskCh::new(build_image.track_map[0].len() as u16, build_image.track_map.len() as u8); //build_image.descriptor.geometry = ch; if let Some(callback_fn) = callback { callback_fn(LoadingStatus::Complete); } image.post_load_process(); Ok(image) } else { log::error!("Path parameter required when loading Kryoflux set."); Err(DiskImageError::ParameterError) } } } } #[cfg(feature = "async")] pub async fn load_async<RS: ReadSeek>( image_io: &mut RS, image_path: Option<&Path>, disk_selection: Option<DiskSelection>, callback: Option<LoadingCallback>, ) -> Result<Self, DiskImageError> { let container = DiskImage::detect_format(image_io, image_path)?; match container { DiskImageContainer::File(format, _) => { let mut image = DiskImage::default(); format.load_image(image_io, &mut image, &ParserReadOptions::default(), callback)?; image.post_load_process(); Ok(image) } DiskImageContainer::ResolvedFile(format, file_vec, _path, _archive_path) => { let mut cursor = Cursor::new(file_vec); let mut image = DiskImage::default(); format.load_image(&mut cursor, &mut image, &ParserReadOptions::default(), callback)?; image.post_load_process(); Ok(image) } DiskImageContainer::Archive(_archive, _items, _) => { // We should have received any single-file archives as ResolvedFiles, so this // archive contains multiple files (but wasn't detected as a FileSet). // // This isn't currently supported. // let (file_vec, inner_path) = archive.extract_first_file(image_io)?; // let file_cursor = Cursor::new(file_vec); // // // Create a default disk image and attempt to load the image from the archive. // let mut image = DiskImage::default(); // format.load_image(file_cursor, &mut image, &ParserReadOptions::default(), callback)?; // image.post_load_process(); // Ok(image) Err(DiskImageError::UnknownFormat) } DiskImageContainer::ZippedKryofluxSet(disks) => { #[cfg(feature = "zip")] { let disk_opt = match disk_selection { Some(DiskSelection::Index(idx)) => disks.get(idx), Some(DiskSelection::Path(ref path)) => disks.iter().find(|disk| disk.base_path == *path), _ => { if disks.len() == 1 { disks.first() } else { log::error!("Multiple disks found in Kryoflux set without a selection."); return Err(DiskImageError::MultiDiskError( "No disk selection provided.".to_string(), )); } } }; if let Some(disk) = disk_opt { // Create an empty image. We will loop through all the files in the set and // append tracks to them as we go. let mut image = DiskImage::default(); image.descriptor.geometry = disk.geometry; if let Some(ref callback_fn) = callback { // Let caller know to show a progress bar callback_fn(LoadingStatus::ProgressSupport); } let image_arc = Arc::new(Mutex::new(image)); for (fi, file_path) in disk.file_set.iter().enumerate() { let file_vec = crate::containers::zip::extract_file(image_io, &file_path.clone())?; let cursor = Cursor::new(file_vec); log::debug!("load(): Loading Kryoflux stream file from zip: {:?}", file_path); // We won't give the callback to the kryoflux loader - instead we will call it here ourselves // updating percentage complete as a fraction of files loaded. let kfx_format = DiskImageFileFormat::KryofluxStream; kfx_format .load_image_async(cursor, image_arc.clone(), &ParserReadOptions::default(), None) .await?; //KfxFormat::load_image_async(&mut cursor, &mut image, None).await?; if let Some(ref callback_fn) = callback { let completion = (fi + 1) as f64 / disk.file_set.len() as f64; callback_fn(LoadingStatus::Progress(completion)); } } if let Some(callback_fn) = callback { callback_fn(LoadingStatus::Complete); } // Unwrap image from Arc let mut image = Arc::try_unwrap(image_arc) .map_err(|_| DiskImageError::SyncError("Failed to unwrap image from Arc".to_string()))? .into_inner() .map_err(|_| DiskImageError::SyncError("Failed to unlock image from Mutex".to_string()))?; image.post_load_process(); Ok(image) } else { log::error!( "Disk selection {} not found in Kryoflux set.", disk_selection.clone().unwrap() ); Err(DiskImageError::MultiDiskError(format!( "Disk selection {} not found in set.", disk_selection.unwrap() ))) } } #[cfg(not(feature = "zip"))] { Err(DiskImageError::UnknownFormat) } } #[cfg(feature = "wasm")] DiskImageContainer::FileSet(_format, _path, _) => Err(DiskImageError::UnsupportedFormat), #[cfg(not(feature = "wasm"))] DiskImageContainer::FileSet(_format, _path, _) => Err(DiskImageError::UnsupportedFormat), #[cfg(feature = "wasm")] DiskImageContainer::KryofluxSet => Err(DiskImageError::UnsupportedFormat), #[cfg(feature = "tokio-async")] DiskImageContainer::KryofluxSet => { if let Some(image_path) = image_path { let (file_set, set_ch) = KfxFormat::expand_kryoflux_set(image_path, None)?; log::debug!( "load(): Expanded Kryoflux set to {} files, ch: {}", file_set.len(), set_ch ); // Create an empty image. We will loop through all the files in the set and // append tracks to them as we go. let mut image = DiskImage::default(); // Set the geometry of the disk image to the geometry of the Kryoflux set. image.descriptor.geometry = set_ch; for (fi, file_path) in file_set.iter().enumerate() { // Reading the entire file in one go and wrapping in a cursor is much faster // than a BufReader. let mut file_vec = tokio::fs::read(file_path.clone()).await?; let mut cursor = Cursor::new(&mut file_vec); log::debug!("load(): Loading Kryoflux stream file: {:?}", file_path); // We won't give the callback to the kryoflux loader - instead we will call it here ourselves // updating percentage complete as a fraction of files loaded. match KfxFormat::load_image(&mut cursor, &mut image, &ParserReadOptions::default(), None) { Ok(_) => {} Err(e) => { // It's okay to fail if we have already added the standard number of tracks to an image. log::error!("load(): Error loading Kryoflux stream file: {:?}", e); //return Err(e); break; } } if let Some(ref callback_fn) = callback { let completion = (fi + 1) as f64 / file_set.len() as f64; callback_fn(LoadingStatus::Progress(completion)); } } //let ch = DiskCh::new(build_image.track_map[0].len() as u16, build_image.track_map.len() as u8); //build_image.descriptor.geometry = ch; if let Some(callback_fn) = callback { callback_fn(LoadingStatus::Complete); } image.post_load_process(); Ok(image) } else { log::error!("Path parameter required when loading Kryoflux set."); Err(DiskImageError::ParameterError) } } } } pub fn set_volume_name(&mut self, name: String) { self.volume_name = Some(name); } pub fn volume_name(&self) -> Option<&str> { self.volume_name.as_deref() } pub fn set_data_rate(&mut self, rate: TrackDataRate) { self.descriptor.data_rate = rate; } pub fn data_rate(&self) -> TrackDataRate { self.descriptor.data_rate } pub fn set_data_encoding(&mut self, encoding: TrackDataEncoding) { self.descriptor.data_encoding = encoding; } pub fn data_encoding(&self) -> TrackDataEncoding { self.descriptor.data_encoding } pub fn set_image_format(&mut self, format: DiskDescriptor) { self.descriptor = format; } pub fn image_format(&self) -> &DiskDescriptor { &self.descriptor } pub fn geometry(&self) -> DiskCh { self.descriptor.geometry } pub fn heads(&self) -> u8 { self.descriptor.geometry.h() } pub fn tracks(&self, head: u8) -> u16 { self.track_map[head as usize].len() as u16 } pub fn write_ct(&self) -> u64 { if let Some(shared) = &self.shared { shared.lock().unwrap().writes } else { 0 } } pub fn source_format(&self) -> Option<DiskImageFileFormat> { self.source_format } pub fn set_source_format(&mut self, format: DiskImageFileFormat) { self.source_format = Some(format); } /// Return a list of track resolutions present in the disk image. /// This will usually be a single-element vector, but multi-resolution images are possible. pub fn resolution(&self) -> Vec<TrackDataResolution> { self.resolution.iter().cloned().collect() } /// Clear resolutions from the disk image and add the specified resolution. /// This should only be called before any tracks have been added to the image. pub fn set_resolution(&mut self, resolution: TrackDataResolution) { self.resolution.clear(); self.resolution.insert(resolution); } /// Ret pub fn can_visualize(&self) -> bool { self.resolution.contains(&TrackDataResolution::FluxStream) | self.resolution.contains(&TrackDataResolution::BitStream) } /// Adds a new `FluxStream` resolution track to the disk image. /// Data of this resolution is typically sourced from flux image formats such as Kryoflux, SCP
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
true
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/platform.rs
src/platform.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ use std::fmt::{self, Display, Formatter}; use crate::StandardFormat; /// The type of computer system that a disk image is intended to be used with - not necessarily the /// system that the disk image was created on. /// /// A `Platform` may be used as a hint to a disk image format parser, or provided in a /// [BitStreamTrackParams] struct to help determine the appropriate [TrackSchema] for a track. /// A `Platform` may not be specified (or reliable) in all disk image formats, nor can it always /// be determined from a [DiskImage] (High density MFM Macintosh 3.5" diskettes look nearly /// identical to PC 3.5" diskettes, unless you examine the boot sector). /// It may be the most pragmatic option to have the user specify the platform when loading/saving a /// disk image. #[repr(usize)] #[derive(Copy, Clone, Debug, PartialEq, strum::EnumIter)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub enum Platform { /// IBM PC and compatibles IbmPc, /// Commodore Amiga Amiga, /// Apple Macintosh Macintosh, /// Atari ST AtariSt, /// Apple II AppleII, } impl Display for Platform { fn fmt(&self, f: &mut Formatter) -> fmt::Result { match self { Platform::IbmPc => write!(f, "IBM PC"), Platform::Amiga => write!(f, "Commodore Amiga"), Platform::Macintosh => write!(f, "Apple Macintosh"), Platform::AtariSt => write!(f, "Atari ST"), Platform::AppleII => write!(f, "Apple II"), } } } impl From<StandardFormat> for Platform { fn from(format: StandardFormat) -> Self { use crate::types::standard_format::StandardFormat::*; match format { PcFloppy160 | PcFloppy180 | PcFloppy320 | PcFloppy360 | PcFloppy720 | PcFloppy1200 | PcFloppy1440 | PcFloppy2880 => Platform::IbmPc, #[cfg(feature = "amiga")] AmigaFloppy880 | AmigaFloppy1760 => Platform::Amiga, } } } impl Platform { // Try to convert a [TrackSchema] to a [Platform] based on a list of platforms to consider. // The list is required because some track schemas may be ambiguous (e.g. System34 used by // IBM PC, Macintosh and Atari ST). // pub fn try_from_schema(schema: TrackSchema, platforms: &[Platform]) -> Option<Self> { // for platform in platforms { // // if platforms.contains(&Platform::from(schema)) { // // return Some(Platform::from(schema)); // // } // // match schema { // TrackSchema::System34 => { // if platforms.contains(platform) { // Some(platform) // } // else { // None // } // } // #[cfg(feature = "amiga")] // TrackSchema::Amiga => { // if platforms.contains(&Platform::Amiga) { // Some(Platform::Amiga) // } // else { // None // } // } // #[cfg(feature = "macintosh")] // TrackSchema::Macintosh => { // if platforms.contains(&Platform::Macintosh) { // Some(Platform::Macintosh) // } // else { // None // } // } // #[cfg(feature = "atari_st")] // TrackSchema::AtariSt => { // if platforms.contains(&Platform::AtariSt) { // Some(Platform::AtariSt) // } // else { // None // } // } // } // } // } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/image_builder.rs
src/image_builder.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! A module to implement the builder pattern for [DiskImage]. Due to the //! complexity of the [DiskImage] object, it is not advisable to attempt to //! create one directly. //! //! An [ImageBuilder] allows for creation of a [DiskImage] with the desired //! parameters, at the desired [TrackDataResolution], optionally formatted. //! //! For IBM PC disk images, a creator tag can be specified which will be //! displayed during boot if the disk is left in the drive. use crate::{ types::{DiskCh, DiskImageFlags, TrackDataResolution}, DiskImage, DiskImageError, StandardFormat, }; /// Implements the Builder pattern for DiskImage objects. /// Allows for creation of blank or pre-formatted DiskImages. #[derive(Default)] pub struct ImageBuilder { #[doc = "Specify the [`StandardFormat`] to use for the [`DiskImage`] to be built."] pub standard_format: Option<StandardFormat>, #[doc = "Specify the [`DiskDataResolution`] to use for the DiskImage to be built."] pub resolution: Option<TrackDataResolution>, #[doc = "Specify the creator tag to display during boot."] pub creator_tag: Option<[u8; 8]>, #[doc = "Specify whether the DiskImage should be formatted."] pub formatted: bool, } impl ImageBuilder { pub fn new() -> ImageBuilder { Default::default() } /// Set the [`StandardFormat`] to use for the [`DiskImage`] to be built. pub fn with_standard_format(mut self, standard_format: impl Into<StandardFormat>) -> ImageBuilder { self.standard_format = Some(standard_format.into()); self } /// Set the [`TrackDataResolution`] to use for the [`DiskImage`] to be built. pub fn with_resolution(mut self, resolution: TrackDataResolution) -> ImageBuilder { self.resolution = Some(resolution); self } /// Set whether the [`DiskImage`] to be built should be formatted. /// If this is not set, the DiskImage will be created as a blank image which must be formatted /// before it can be read in a disk drive or emulator. pub fn with_formatted(mut self, formatted: bool) -> ImageBuilder { self.formatted = formatted; self } /// Set the creator tag for the [`DiskImage`] to be built. This is only used if the [`DiskImage`] /// is to be formatted. pub fn with_creator_tag(mut self, creator_tag: &[u8]) -> ImageBuilder { let mut new_creator_tag = [0x20; 8]; let max_len = creator_tag.len().min(8); new_creator_tag[..max_len].copy_from_slice(&creator_tag[..max_len]); self.creator_tag = Some(new_creator_tag); self } /// Build the [`DiskImage`] using the specified parameters. pub fn build(self) -> Result<DiskImage, DiskImageError> { if self.resolution.is_none() { log::error!("DiskDataResolution not set"); return Err(DiskImageError::ParameterError); } if self.standard_format.is_some() { match self.resolution { Some(TrackDataResolution::BitStream) => self.build_bitstream(), Some(TrackDataResolution::MetaSector) => self.build_metasector(), _ => Err(DiskImageError::UnsupportedFormat), } } else { Err(DiskImageError::UnsupportedFormat) } } fn build_bitstream(self) -> Result<DiskImage, DiskImageError> { let format = self.standard_format.unwrap(); let mut disk_image = DiskImage::create(format); disk_image.set_resolution(TrackDataResolution::BitStream); let chsn = format.layout(); let encoding = format.encoding(); let data_rate = format.data_rate(); let bitcell_size = format.bitcell_ct(); for head in 0..chsn.h() { for cylinder in 0..chsn.c() { let ch = DiskCh::new(cylinder, head); disk_image.add_empty_track( ch, encoding, Some(TrackDataResolution::BitStream), data_rate, bitcell_size, Some(false), )?; } } if self.formatted { log::debug!("ImageBuilder::build_bitstream(): Formatting disk image as {:?}", format); disk_image.format(format, TrackDataResolution::BitStream, None, self.creator_tag.as_ref())?; } // Do post-load processing as normal disk_image.post_load_process(); // Sanity check - do we have the correct number of heads and tracks? if disk_image.track_map[0].len() != chsn.c() as usize { log::error!("ImageBuilder::build_bitstream(): Incorrect number of tracks in head 0 after format operation"); return Err(DiskImageError::ParameterError); } // Clear dirty flag disk_image.clear_flag(DiskImageFlags::DIRTY); Ok(disk_image) } fn build_metasector(self) -> Result<DiskImage, DiskImageError> { if self.formatted { log::error!("MetaSector formatting not yet implemented"); return Err(DiskImageError::UnsupportedFormat); } let mut disk_image = DiskImage::create(self.standard_format.unwrap()); disk_image.set_resolution(TrackDataResolution::MetaSector); // Do post-load processing as normal disk_image.post_load_process(); // Clear dirty flag disk_image.clear_flag(DiskImageFlags::DIRTY); Ok(disk_image) } } #[cfg(test)] mod tests { use super::*; use crate::types::{StandardFormat, TrackDataResolution}; #[test] fn test_with_resolution() { let resolution = TrackDataResolution::BitStream; let builder = ImageBuilder::new().with_resolution(resolution); assert_eq!(builder.resolution, Some(resolution)); } #[test] fn test_with_formatted() { let builder = ImageBuilder::new().with_formatted(true); assert!(builder.formatted); } #[test] fn test_with_creator_tag() { let tag = b"CREATOR"; let builder = ImageBuilder::new().with_creator_tag(tag); assert_eq!(builder.creator_tag, Some(*b"CREATOR ")); } #[test] fn test_build_bitstream() { let format = StandardFormat::PcFloppy360; let builder = ImageBuilder::new() .with_standard_format(format) .with_resolution(TrackDataResolution::BitStream); let result = builder.build(); assert!(result.is_ok()); } #[test] fn test_build_bitstream_formatted() { let format = StandardFormat::PcFloppy360; let builder = ImageBuilder::new() .with_standard_format(format) .with_resolution(TrackDataResolution::BitStream) .with_formatted(true); let result = builder.build(); assert!(result.is_ok()); let mut disk = result.unwrap(); for sector in format.layout().chsn_iter() { assert!(disk.read_sector_basic(sector.ch(), sector.into(), None).is_ok()); } let write_vec = vec![0x55; 512]; for sector in format.layout().chsn_iter() { assert!(disk .write_sector_basic(sector.ch(), sector.into(), None, &write_vec) .is_ok()); } } #[test] fn test_build_metasector() { let format = StandardFormat::PcFloppy360; let builder = ImageBuilder::new() .with_standard_format(format) .with_resolution(TrackDataResolution::MetaSector); let result = builder.build(); assert!(result.is_ok()); } /* // TODO: Enable these tests when we have implemented formatting for MetaSector disks #[test] fn test_build_metasector_formatted() { let format = StandardFormat::PcFloppy360; let builder = ImageBuilder::new() .with_standard_format(format) .with_resolution(DiskDataResolution::MetaSector) .with_formatted(true); let result = builder.build(); assert!(result.is_ok()); let mut disk = result.unwrap(); for sector in format.chsn().iter() { assert!(disk.read_sector_basic(sector.ch(), sector.into(), None).is_ok()); } let write_vec = vec![0x55; 512]; for sector in format.chsn().iter() { assert!(disk .write_sector_basic(sector.ch(), sector.into(), None, &write_vec) .is_ok()); } }*/ }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/util.rs
src/util.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! The `util` module provides various utility functions. use regex::Regex; use std::{cmp::Ordering, path::PathBuf}; use crate::{ io::{Read, Seek, SeekFrom}, DiskImageError, }; /// The initial seed value for CRC-CCITT and related checksums. pub const CRC_CCITT_INITIAL: u16 = 0xFFFF; pub(crate) fn get_length<T: Seek>(source: &mut T) -> Result<u64, crate::io::Error> { // Seek to the end of the source let length = source.seek(SeekFrom::End(0))?; // Seek back to the beginning of the source source.seek(SeekFrom::Start(0))?; Ok(length) } pub(crate) fn read_ascii<T: Read>( source: &mut T, terminator: Option<u8>, max_len: Option<usize>, ) -> (Option<String>, u8) { let mut string = String::new(); let byte_iter = source.bytes(); let terminator = terminator.unwrap_or(0); let mut terminating_byte = 0; for (i, byte) in byte_iter.enumerate() { match byte { Ok(b) => { if b == terminator || b == 0 { terminating_byte = b; break; } else if b >= 32 && b.is_ascii() { string.push(b as char); } } Err(_) => return (None, 0), } if i == max_len.unwrap_or(usize::MAX) { break; } } if string.is_empty() { (None, terminating_byte) } else { (Some(string), terminating_byte) } } /// Calculate a 16-bit checksum over a byte slice. /// Note: previously attributed to CRC-CCITT. /// See: https://reveng.sourceforge.io/crc-catalogue/16.htm pub fn crc_ibm_3740(data: &[u8], start: Option<u16>) -> u16 { const POLY: u16 = 0x1021; // Polynomial x^16 + x^12 + x^5 + 1 let mut crc: u16 = start.unwrap_or(0xFFFF); for &byte in data { crc ^= (byte as u16) << 8; for _ in 0..8 { if (crc & 0x8000) != 0 { crc = (crc << 1) ^ POLY; } else { crc <<= 1; } } } crc } /// Calculate a 16-bit checksum one byte at a time. /// Note: previously attributed to CRC-CCITT. /// See: https://reveng.sourceforge.io/crc-catalogue/16.htm pub fn crc_ibm_3740_byte(byte: u8, crc: u16) -> u16 { const POLY: u16 = 0x1021; // Polynomial x^16 + x^12 + x^5 + 1 let mut crc = crc; crc ^= (byte as u16) << 8; for _ in 0..8 { if (crc & 0x8000) != 0 { crc = (crc << 1) ^ POLY; } else { crc <<= 1; } } crc } pub fn dump_slice<W: crate::io::Write>( data_slice: &[u8], start_address: usize, bytes_per_row: usize, element_size: usize, mut out: W, ) -> Result<(), DiskImageError> { let rows = data_slice.len() / bytes_per_row; let last_row_size = data_slice.len() % bytes_per_row; // Print all full rows. for r in 0..rows { // Print address // TODO - calculate address width from maximum address out.write_fmt(format_args!( "{:05X} | ", r * bytes_per_row * element_size + start_address ))?; for b in 0..bytes_per_row { out.write_fmt(format_args!("{:02X} ", data_slice[r * bytes_per_row + b]))?; } out.write_fmt(format_args!("| "))?; for b in 0..bytes_per_row { let byte = data_slice[r * bytes_per_row + b]; out.write_fmt(format_args!( "{}", if (40..=126).contains(&byte) { byte as char } else { '.' } ))?; } out.write_fmt(format_args!("\n"))?; } // Print last incomplete row, if any bytes left over. if last_row_size > 0 { // Print address out.write_fmt(format_args!( "{:05X} | ", rows * bytes_per_row * element_size + start_address ))?; for b in 0..bytes_per_row { if b < last_row_size { out.write_fmt(format_args!("{:02X} ", data_slice[rows * bytes_per_row + b]))?; } else { out.write_fmt(format_args!(" "))?; } } out.write_fmt(format_args!("| "))?; for b in 0..bytes_per_row { if b < last_row_size { let byte = data_slice[rows * bytes_per_row + b]; out.write_fmt(format_args!( "{}", if (40..=126).contains(&byte) { byte as char } else { '.' } ))?; } else { out.write_fmt(format_args!(" "))?; } } out.write_fmt(format_args!("\n"))?; } Ok(()) } pub fn dump_string(data_slice: &[u8]) -> String { let mut out = String::new(); for &byte in data_slice { out.push(if (40..=126).contains(&byte) { byte as char } else { '.' }); } out } /// Sort `PathBuf`s in a natural order, by breaking them down into numeric and non-numeric parts. /// This function is used to sort directory names in a natural order, so that Disk11 is sorted after /// Disk2, etc. #[allow(clippy::ptr_arg)] pub fn natural_sort(a: &PathBuf, b: &PathBuf) -> Ordering { let re = Regex::new(r"(\D+)|(\d+)").expect("Invalid regex"); let a_str = a.iter().next().and_then(|s| s.to_str()).unwrap_or(""); let b_str = b.iter().next().and_then(|s| s.to_str()).unwrap_or(""); let a_parts = re.captures_iter(a_str); let b_parts = re.captures_iter(b_str); for (a_part, b_part) in a_parts.zip(b_parts) { // Handle non-numeric parts, converting to lowercase for case-insensitive comparison if let (Some(a_text), Some(b_text)) = (a_part.get(1), b_part.get(1)) { let ordering = a_text.as_str().to_lowercase().cmp(&b_text.as_str().to_lowercase()); if ordering != Ordering::Equal { return ordering; } continue; } // Handle numeric parts let a_num = a_part.get(2).and_then(|m| m.as_str().parse::<u32>().ok()); let b_num = b_part.get(2).and_then(|m| m.as_str().parse::<u32>().ok()); match (a_num, b_num) { (Some(a_num), Some(b_num)) => { let ordering = a_num.cmp(&b_num); if ordering != Ordering::Equal { return ordering; } } // Fallback to lexicographic comparison if parsing fails _ => return a_str.to_lowercase().cmp(&b_str.to_lowercase()), } } // Fallback to comparing the full path if the directory names are identical a_str.to_lowercase().cmp(&b_str.to_lowercase()) } #[cfg(test)] mod tests { use super::*; use std::path::PathBuf; #[test] fn test_natural_sort() { let mut paths = vec![ PathBuf::from("Disk1"), PathBuf::from("disk10"), PathBuf::from("Disk2"), PathBuf::from("Disk3"), PathBuf::from("disk11"), PathBuf::from("Disk12"), PathBuf::from("Disk9"), ]; // Sort using natural_sort function paths.sort_by(natural_sort); // Expected order: Disk1, Disk2, Disk3, Disk9, Disk10, Disk11, Disk12 let expected_order = vec![ PathBuf::from("Disk1"), PathBuf::from("Disk2"), PathBuf::from("Disk3"), PathBuf::from("Disk9"), PathBuf::from("disk10"), PathBuf::from("disk11"), PathBuf::from("Disk12"), ]; assert_eq!(paths, expected_order); } #[test] fn test_natural_sort_with_paths() { let mut paths = vec![ PathBuf::from("Disk10/track00.0.raw"), PathBuf::from("Disk11/track00.0.raw"), PathBuf::from("Disk12/track00.0.raw"), PathBuf::from("Disk13/track00.0.raw"), PathBuf::from("Disk14/track00.0.raw"), PathBuf::from("Disk15/track00.0.raw"), PathBuf::from("Disk1/track00.0.raw"), PathBuf::from("Disk2/track00.0.raw"), PathBuf::from("Disk3/track00.0.raw"), PathBuf::from("Disk4/track00.0.raw"), PathBuf::from("Disk5/track00.0.raw"), PathBuf::from("Disk6/track00.0.raw"), ]; // Sort using natural_sort function paths.sort_by(natural_sort); // Expected order: Disk1, Disk2, ..., Disk15 let expected_order = vec![ PathBuf::from("Disk1/track00.0.raw"), PathBuf::from("Disk2/track00.0.raw"), PathBuf::from("Disk3/track00.0.raw"), PathBuf::from("Disk4/track00.0.raw"), PathBuf::from("Disk5/track00.0.raw"), PathBuf::from("Disk6/track00.0.raw"), PathBuf::from("Disk10/track00.0.raw"), PathBuf::from("Disk11/track00.0.raw"), PathBuf::from("Disk12/track00.0.raw"), PathBuf::from("Disk13/track00.0.raw"), PathBuf::from("Disk14/track00.0.raw"), PathBuf::from("Disk15/track00.0.raw"), ]; assert_eq!(paths, expected_order); } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/disk_lock.rs
src/disk_lock.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! This module defines a [DiskLock] trait for locking disk access. This trait is primarily employed //! to enable support for custom lock strategies, such as fluxfox-egui's `TrackingLock` which is //! used to track disk access across that application's various tools. //! //! The concept of a [DiskLock] is that it can be supplied with a [LockContext] trait implementor //! which allows the tracking and querying of what has the disk image locked. This is useful for //! debugging lock contention issues. use crate::DiskImage; use std::{ fmt::{Debug, Display}, hash::Hash, ops::{Deref, DerefMut}, sync::{Arc, RwLock, RwLockReadGuard, RwLockWriteGuard}, }; /// Trait defining the context of a lock. At a minimum, this trait should implement `Display` and /// `Debug` so that context-specific information can be logged, and `Eq` and `Hash` so that the /// context can be used as a key in a hash map to track the lock status. pub trait LockContext: Display + Debug + Eq + Hash {} /// Trait defining the locking behavior with tracking capabilities. /// /// - `T`: The type of the data being protected. /// - `C`: The type representing the context acquiring the lock. pub trait DiskLock<T, C: LockContext> { type T; type C: LockContext; /// The guard returned by the `read` method. type ReadGuard<'a>: Deref<Target = T> + 'a where Self: 'a; /// The guard returned by the `write` method. type WriteGuard<'a>: Deref<Target = T> + DerefMut<Target = T> + 'a where Self: 'a; /// Attempts to acquire a read lock for the given tool. On failure, the method should return /// the context that is holding the write lock. fn read(&self, context: C) -> Result<Self::ReadGuard<'_>, C>; /// Attempts to acquire a write lock for the given tool. On failure, the method should return /// a vector of the contexts holding read locks. fn write(&self, context: C) -> Result<Self::WriteGuard<'_>, Vec<C>>; /// Return the number of strong references to the inner lock. fn strong_count(&self) -> usize; } /// A newtype wrapper around Arc<RwLock<T>> without tracking. #[derive(Clone)] pub struct NonTrackingDiskLock<T> { inner: Arc<RwLock<T>>, } impl<T> NonTrackingDiskLock<T> { pub fn new(inner: Arc<RwLock<T>>) -> Self { Self { inner } } } #[derive(Debug, Clone, Copy, Default, PartialEq, Eq, Hash)] pub struct NullContext { _private: u8, } impl Display for NullContext { fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { write!(f, "NullContext") } } impl LockContext for NullContext {} /// Allow coercing an Arc<RwLock<T>> into a NonTrackingDiskLock<T>. impl From<Arc<RwLock<DiskImage>>> for NonTrackingDiskLock<DiskImage> { fn from(arc: Arc<RwLock<DiskImage>>) -> Self { NonTrackingDiskLock { inner: arc } } } impl<T> NonTrackingDiskLock<T> { /// Returns a cloned Arc<RwLock<T>> pointing to the inner lock. pub fn as_inner(&self) -> Arc<RwLock<T>> { Arc::clone(&self.inner) } } impl<T> DiskLock<T, NullContext> for NonTrackingDiskLock<T> { type T = DiskImage; type C = NullContext; type ReadGuard<'a> = RwLockReadGuard<'a, T> where T: 'a; type WriteGuard<'a> = RwLockWriteGuard<'a, T> where T: 'a; fn read(&self, _tool: NullContext) -> Result<Self::ReadGuard<'_>, NullContext> { match self.inner.try_read() { Ok(guard) => Ok(guard), Err(_) => Err(NullContext::default()), } } fn write(&self, _tool: NullContext) -> Result<Self::WriteGuard<'_>, Vec<NullContext>> { match self.inner.try_write() { Ok(guard) => Ok(guard), Err(_) => Err(Vec::new()), } } fn strong_count(&self) -> usize { Arc::strong_count(&self.inner) } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/tree_map.rs
src/tree_map.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! A generic tree structure with fast key-value lookup (not collision safe!) use crate::{FoxHashMap, FoxHashSet}; // A generic node in a FoxTreeMap #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Debug, Clone)] pub struct FoxTreeNode<T> { pub name: String, pub index: usize, pub parent: usize, pub children: Vec<usize>, pub data: T, } #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone, Default)] pub struct FoxTreeMap<T> { nodes: Vec<FoxTreeNode<T>>, name_to_index: FoxHashMap<String, usize>, // Name-to-index map for optional lookups } impl<T> FoxTreeMap<T> { pub fn new(root_data: T) -> Self { let root = FoxTreeNode { name: "root".to_string(), index: 0, parent: 0, children: Vec::new(), data: root_data, }; let mut name_to_index = FoxHashMap::default(); name_to_index.insert("root".to_string(), 0); Self { nodes: vec![root], name_to_index, } } pub fn root(&self) -> usize { 0 // Root is always index 0 } pub fn children(&self, index: usize) -> &[usize] { if index >= self.nodes.len() { return &[]; } &self.nodes[index].children } pub fn node(&self, index: usize) -> &FoxTreeNode<T> { if index >= self.nodes.len() { panic!("Node index out of bounds: {}", index); } &self.nodes[index] } pub fn add_child(&mut self, parent: usize, name: &str, data: T) -> usize { let index = self.nodes.len(); let node = FoxTreeNode { name: name.to_string(), index, parent, children: Vec::new(), data, }; self.name_to_index.insert(name.to_string(), index); self.nodes[parent].children.push(index); self.nodes.push(node); index } /// Walks the tree and calls the callback on each node's data, immutably pub fn for_each<F>(&self, mut callback: F) where F: FnMut(usize, &T), { let mut visited = FoxHashSet::new(); self.for_each_recursive(self.root(), &mut callback, &mut visited); } /// Internal helper for recursive traversal. fn for_each_recursive<F>(&self, index: usize, callback: &mut F, visited: &mut FoxHashSet<usize>) where F: FnMut(usize, &T), { if visited.contains(&index) { return; // Prevent cycles } visited.insert(index); let node = &self.nodes[index]; callback(index, &node.data); for &child in &node.children { self.for_each_recursive(child, callback, visited); } } pub fn debug_tree<F>(&self, index: usize, indent: usize, display: &F, visited: &mut FoxHashSet<usize>) where F: Fn(&T) -> String, { if visited.contains(&index) { println!("{}(Cycle detected at node {})", " ".repeat(indent), index); return; } visited.insert(index); let node = &self.nodes[index]; let prefix = " ".repeat(indent); println!("{}{}: {}", prefix, &node.name, display(&node.data)); for &child in &node.children { self.debug_tree(child, indent + 2, display, visited); } } pub fn debug_with<F>(&self, f: &mut std::fmt::Formatter<'_>, display: &F) -> std::fmt::Result where F: Fn(&T) -> String, { let mut visited = FoxHashSet::default(); self.debug_fmt_node_with(f, self.root(), 0, display, &mut visited) } pub fn debug_fmt_node_with<F>( &self, f: &mut std::fmt::Formatter<'_>, index: usize, indent: usize, display: &F, visited: &mut FoxHashSet<usize>, ) -> std::fmt::Result where F: Fn(&T) -> String, { if visited.contains(&index) { writeln!(f, "{}(Cycle detected at node {})", " ".repeat(indent), index)?; return Ok(()); } visited.insert(index); let node = &self.nodes[index]; let prefix = " ".repeat(indent); writeln!(f, "{}{}: {}", prefix, node.name, display(&node.data))?; for &child in &node.children { self.debug_fmt_node_with(f, child, indent + 2, display, visited)?; } Ok(()) } pub fn last_node(&self) -> (usize, usize) { let last = self.nodes.len().saturating_sub(1); (self.nodes[last].parent, last) } } // pub trait FoxTree { // type Data; // // fn tree_mut(&mut self) -> &mut FoxTreeMap<Self::Data>; // fn tree(&self) -> &FoxTreeMap<Self::Data>; // // fn root(&self) -> usize { // 0 // } // // fn add_child<'a>(&'a mut self, parent: usize, name: &str, data: Self::Data) -> FoxTreeCursor<'a, Self::Data> { // let child_index = self.tree_mut().add_child(parent, name, data); // FoxTreeCursor { // tree: self.tree_mut(), // parent_index: parent, // current_index: child_index, // } // } // // fn debug_tree(&self, display: impl Fn(&Self::Data) -> String) { // let mut visited = FoxHashSet::new(); // self.tree().debug_tree(self.root(), 0, &display, &mut visited); // } // // fn last_node(&mut self) -> FoxTreeCursor<Self::Data> { // let last = self.tree().nodes.len() - 1; // FoxTreeCursor { // parent_index: self.tree().nodes[last].parent, // current_index: last, // tree: self.tree_mut(), // } // } // } // Cursor for chaining child and sibling additions pub struct FoxTreeCursor<'a, T> { tree: &'a mut FoxTreeMap<T>, parent_index: usize, current_index: usize, } impl<'a, T: Default> FoxTreeCursor<'a, T> { pub fn new(tree: &'a mut FoxTreeMap<T>, parent_index: usize, current_index: usize) -> Self { Self { tree, parent_index, current_index, } } /// Add a child to the current node. pub fn add_child(mut self, name: &str, data: T) -> Self { let child_index = self.tree.add_child(self.current_index, name, data); self.parent_index = self.current_index; self.current_index = child_index; self } /// Add a sibling to the current node. pub fn add_sibling(mut self, name: &str, data: T) -> Self { let sibling_index = self.tree.add_child(self.parent_index, name, data); self.current_index = sibling_index; self } /// Return the parent node of this node. pub fn up(mut self) -> Self { let parent_node = self.tree.node(self.parent_index); self.parent_index = parent_node.parent; self.current_index = parent_node.index; self } /// Return the index of the current node. pub fn index(&self) -> usize { self.current_index } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/copy_protection.rs
src/copy_protection.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ // A module for identifying copy protection schemes on disk images. // Currently incomplete - mostly a placeholder for future work. use crate::{types::chs::DiskChsnQuery, DiskImage}; use std::fmt::{Display, Formatter, Result}; #[derive(Copy, Clone, Debug)] pub enum CopyProtectionScheme { FormasterCopyLock(u8), SoftguardSuperlok(u8), EaInterlock(u8), VaultProlok, XemagXelok(u8), HlsDuplication, Undetermined, } impl Display for CopyProtectionScheme { fn fmt(&self, f: &mut Formatter<'_>) -> Result { match self { CopyProtectionScheme::FormasterCopyLock(v) => write!(f, "Formaster CopyLock v{}", v), CopyProtectionScheme::SoftguardSuperlok(_v) => write!(f, "Softguard Superlok"), CopyProtectionScheme::EaInterlock(v) => write!(f, "EA Interlock v{}", v), CopyProtectionScheme::VaultProlok => write!(f, "Vault Prolok"), CopyProtectionScheme::XemagXelok(v) => write!(f, "XEMAG Xelok v{}", v), CopyProtectionScheme::HlsDuplication => write!(f, "HLS Duplication"), CopyProtectionScheme::Undetermined => write!(f, "Likely protected, but scheme undetermined"), } } } impl DiskImage { /// Attempt to determine the copy protection scheme used on the disk image. /// Returns None if no copy protection is detected. pub fn detect_copy_protection(&self) -> Option<CopyProtectionScheme> { for track in self.track_iter() { let track_ch = track.ch(); // Check for Formaster CopyLock. // Look for Sector 1 on a track with n == 1 and bad crc. // If the address crc is also bad, it's version 2. if let Ok(scan_result) = track.scan_sector(DiskChsnQuery::new(track_ch.c(), track_ch.h(), 1, 1), None) { if scan_result.data_error { return if scan_result.address_error { Some(CopyProtectionScheme::FormasterCopyLock(2)) } else { Some(CopyProtectionScheme::FormasterCopyLock(1)) }; } } // Check for Softguard Superlok. // Look for Sector 1 on a track > 1 with n == 6 (8129 bytes) and bad crc. // Not sure how to detect v2 as the main change is in the detection code. if track_ch.c() > 1 { if let Ok(scan_result) = track.scan_sector(DiskChsnQuery::new(track_ch.c(), track_ch.h(), 1, 6), None) { if scan_result.data_error { return Some(CopyProtectionScheme::SoftguardSuperlok(1)); } } } // Check for EA Interlock. // If a track has 96 sectors, that's a clear indication. if track.sector_ct() == 96 { return Some(CopyProtectionScheme::EaInterlock(1)); } } None } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/codec/mod.rs
src/codec/mod.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ pub mod stream; #[derive(Copy, Clone, Debug)] pub enum Nibble { Marker(u8), Data(u8), } impl From<Nibble> for u8 { #[inline] fn from(nibble: Nibble) -> u8 { match nibble { Nibble::Marker(nibble) => nibble, Nibble::Data(nibble) => nibble, } } } use stream::StreamCodec;
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/codec/stream/mfm.rs
src/codec/stream/mfm.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ const MARKER_MASK: u64 = 0xFFFF_FFFF_FFFF_0000; //const CLOCK_MASK: u64 = 0xAAAA_AAAA_AAAA_0000; pub struct MfmStreamDecoder { shift_reg: u64, nibbles: Vec<Nibble>, clock: bool, synced: bool, markers: Vec<u64>, nibble_bit_ct: u8, nibble: u8, } impl MfmStreamDecoder { pub fn with_markers(markers: &[u64]) -> Self { MfmStreamDecoder { shift_reg: 0, nibbles: Vec::with_capacity(128), clock: true, synced: false, markers: markers.to_vec(), nibble_bit_ct: 0, nibble: 0, } } } impl StreamDecoder for MfmStreamDecoder { fn reset(&mut self) { self.nibbles.clear(); self.shift_reg = 0; self.clock = true; self.synced = false; self.nibble_bit_ct = 0; self.nibble = 0; } #[inline] fn is_synced(&self) -> bool { self.synced } fn encoding(&self) -> DiskDataEncoding { DiskDataEncoding::Mfm } fn push_bit(&mut self, bit: bool) { self.shift_reg = (self.shift_reg << 1) | (bit as u64); for marker in &self.markers { if self.shift_reg & MARKER_MASK == *marker { self.synced = true; } if self.synced {} } self.clock = !self.clock; } fn bits_remaining(&self) -> usize { self.nibble_bit_ct as usize } fn has_nibble(&self) -> bool { !self.nibbles.is_empty() } fn peek_nibble(&self) -> Option<Nibble> { self.nibbles.last().copied() } fn pop_nibble(&mut self) -> Option<Nibble> { self.nibbles.pop() } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/codec/stream/mod.rs
src/codec/stream/mod.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ pub mod mfm; pub trait StreamDecoder { fn reset(&mut self); fn is_synced(&self) -> bool; fn encoding(&self) -> DiskDataEncoding; fn push_bit(&mut self, bit: bool); fn bits_remaining(&self) -> usize; fn has_nibble(&self) -> bool; fn peek_nibble(&self) -> Option<Nibble>; fn pop_nibble(&mut self) -> Option<Nibble>; }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/scripting/mod.rs
src/scripting/mod.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ #![allow(dead_code)] #[cfg(feature = "rhai")] pub mod rhai; use crate::DiskImageError; use thiserror::Error; #[derive(Clone, Debug, Error)] pub enum ScriptEngineError { #[error("A DiskImageError occurred executing the script: {0}")] DiskImageError(DiskImageError), #[error("A syntax error occurred executing the script: {0}")] SyntaxError(String), #[error("The script engine could not lock the DiskImage")] LockError, } pub trait ScriptEngine { /// Execute a script. /// # Arguments /// * `script` - A string containing the script text to execute. /// # Returns /// * `Ok(())` if the script executed successfully. /// * `Err(ScriptEngineError)` if an error occurred. fn run(&mut self, script: &str) -> Result<(), ScriptEngineError>; } pub type ScriptEngineHandle = Box<dyn ScriptEngine>;
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/scripting/rhai/interface.rs
src/scripting/rhai/interface.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ use rhai::Dynamic; /// Define interface for Rhai scripting pub(crate) trait RhaiInterface { fn list_tracks(&self) -> Dynamic; }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/scripting/rhai/script_engine.rs
src/scripting/rhai/script_engine.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ #![allow(dead_code)] use crate::{ scripting::{rhai::interface::RhaiInterface, ScriptEngineError}, DiskImage, }; use rhai::Dynamic; use std::sync::{Arc, RwLock}; pub struct RhaiEngine { engine: rhai::Engine, context: Arc<RhaiContext>, } #[derive(Clone)] pub struct RhaiContext { disk: Arc<RwLock<DiskImage>>, } impl RhaiEngine { fn init(disk: Arc<RwLock<DiskImage>>) -> Self { let mut engine = rhai::Engine::new(); // Wrap context in Arc so it can be cloned let context = Arc::new(RhaiContext { disk }); // Clone context for the closure let context_clone = context.clone(); engine .register_type::<RhaiContext>() .register_fn("list_tracks", move || context_clone.list_tracks()); RhaiEngine { engine, context } } fn run(&mut self, script: &str) -> Result<(), ScriptEngineError> { match self.engine.eval::<()>(script) { Ok(_) => Ok(()), Err(e) => Err(ScriptEngineError::SyntaxError(e.to_string())), } } fn engine(&mut self) -> &mut rhai::Engine { &mut self.engine } } impl RhaiInterface for RhaiContext { fn list_tracks(&self) -> Dynamic { // Use the disk image geometry iterator to get the track list let disk = self.disk.read().unwrap(); let geometry = disk.geometry(); let tracks = geometry.iter().map(|track| Dynamic::from(track)).collect(); tracks } } #[cfg(test)] mod tests { use super::*; use crate::{prelude::*, types::DiskCh, ImageBuilder, StandardFormat}; #[test] fn create_blank_disk_and_list_tracks() { // Create a blank disk using ImageBuilder let disk = ImageBuilder::new() .with_resolution(TrackDataResolution::MetaSector) .with_standard_format(StandardFormat::PcFloppy360) .build() .expect("Failed to create disk image"); let disk = disk.into_arc(); let mut rhai = RhaiEngine::init(disk.clone()); // Script to call list_tracks let script = r#" print("Hello from Rhai"); let tracks = list_tracks(); tracks "#; // Execute the script and capture the output let result: Result<Dynamic, _> = rhai.engine().eval(script); //let result = engine.run(script); assert!(result.is_ok()); // Verify the tracks let tracks = result.unwrap(); assert!(tracks.is_array()); // Extract the array directly let tracks_array = tracks.cast::<rhai::Array>(); assert!(!tracks_array.is_empty()); // Iterate through the tracks and print them for item in tracks_array { let track: DiskCh = item.cast::<DiskCh>(); println!("{:?}", track); } } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/scripting/rhai/mod.rs
src/scripting/rhai/mod.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ pub(crate) mod interface; pub(crate) mod script_engine;
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track/bitstream.rs
src/track/bitstream.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- src/track/bitstream.rs Implements the Bitstream track type and the Track trait for same. */ use super::{Track, TrackAnalysis, TrackInfo, TrackSectorScanResult}; use crate::{ bitstream_codec::{fm::FmCodec, gcr::GcrCodec, mfm::MfmCodec, EncodingVariant, TrackCodec, TrackDataStream}, io::SeekFrom, source_map::SourceMap, track_schema::{ system34::{System34Element, System34Marker, System34Schema, System34Standard}, TrackElement, TrackElementInstance, TrackMetadata, TrackSchema, TrackSchemaParser, }, types::{ chs::DiskChsnQuery, AddSectorParams, BitStreamTrackParams, DiskCh, DiskChs, DiskChsn, DiskRpm, ReadSectorResult, ReadTrackResult, RwScope, ScanSectorResult, SharedDiskContext, TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity, WriteSectorResult, }, DiskImageError, SectorIdQuery, SectorMapEntry, }; use bit_vec::BitVec; use sha1_smol::Digest; use std::{ any::Any, sync::{Arc, Mutex}, }; use strum::IntoEnumIterator; #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] #[derive(Clone)] pub struct BitStreamTrack { pub(crate) encoding: TrackDataEncoding, pub(crate) data_rate: TrackDataRate, pub(crate) rpm: Option<DiskRpm>, pub(crate) ch: DiskCh, pub(crate) data: TrackDataStream, pub(crate) metadata: TrackMetadata, pub(crate) schema: Option<TrackSchema>, #[cfg_attr(feature = "serde", serde(skip))] pub(crate) shared: Option<Arc<Mutex<SharedDiskContext>>>, } #[cfg_attr(feature = "serde", typetag::serde)] impl Track for BitStreamTrack { fn resolution(&self) -> TrackDataResolution { TrackDataResolution::BitStream } fn as_any(&self) -> &dyn Any { self } fn as_any_mut(&mut self) -> &mut dyn Any { self } fn as_bitstream_track(&self) -> Option<&BitStreamTrack> { self.as_any().downcast_ref::<BitStreamTrack>() } fn as_bitstream_track_mut(&mut self) -> Option<&mut BitStreamTrack> { self.as_any_mut().downcast_mut::<BitStreamTrack>() } fn ch(&self) -> DiskCh { self.ch } fn set_ch(&mut self, new_ch: DiskCh) { self.ch = new_ch; } fn encoding(&self) -> TrackDataEncoding { self.encoding } fn info(&self) -> TrackInfo { TrackInfo { resolution: self.resolution(), encoding: self.encoding, schema: self.schema, data_rate: self.data_rate, density: Some(TrackDensity::from(self.data_rate)), rpm: self.rpm, bit_length: self.data.len(), sector_ct: self.metadata.sector_ids().len(), flux_info: None, } } fn metadata(&self) -> Option<&TrackMetadata> { Some(&self.metadata) } fn sector_ct(&self) -> usize { let mut sector_ct = 0; for item in &self.metadata.items { if item.element.is_sector_header() { sector_ct += 1; } } sector_ct } fn has_sector_id(&self, id: u8, _id_chsn: Option<DiskChsn>) -> bool { for item in &self.metadata.items { if let TrackElement::System34(System34Element::Marker(System34Marker::Idam, _)) = item.element { if let Some(chsn) = item.chsn { if chsn.s() == id { return true; } } } } false } fn sector_list(&self) -> Vec<SectorMapEntry> { self.metadata.sector_list() // if self.schema.is_none() { // log::debug!("sector_list(): No schema found for track!"); // return Vec::new(); // } // // let mut sector_list = Vec::new(); // if let Some(schema) = self.schema { // for item in &self.metadata.items { // if let TrackElement::System34(System34Element::Data { // address_crc, // data_crc, // deleted, // .. // }) = item.elem_type // { // if let Some(chsn) = item.chsn { // sector_list.push(SectorMapEntry { // chsn, // attributes: SectorAttributes { // address_crc_valid: address_crc, // data_crc_valid: data_crc, // deleted_mark: deleted, // no_dam: false, // }, // }); // } // } // } // } // sector_list } fn add_sector(&mut self, _sd: &AddSectorParams) -> Result<(), DiskImageError> { Err(DiskImageError::UnsupportedFormat) } /// Read the sector data from the sector identified by 'chs'. The data is returned within a /// [ReadSectorResult] struct which also sets some convenience metadata flags which are needed /// when handling `MetaSector` resolution images. /// When reading a `BitStream` resolution image, the sector data can optionally include any /// applicable metadata such as the address mark and CRC bytes, depending on the value of /// [RwScope]. /// Offsets are provided within [ReadSectorResult] so these can be skipped when processing the /// read operation. fn read_sector( &self, id: SectorIdQuery, n: Option<u8>, offset: Option<usize>, scope: RwScope, debug: bool, ) -> Result<ReadSectorResult, DiskImageError> { let mut read_vec = Vec::new(); let mut result_data_error = false; let mut result_address_error = false; let mut result_deleted_mark = false; let mut result_data_range = 0..0; let mut result_chsn = None; let mut wrong_cylinder = false; let mut bad_cylinder = false; let mut wrong_head = false; let mut data_crc = None; let schema = self.schema.ok_or(DiskImageError::SchemaError)?; // Read index first to avoid borrowing issues in next match. let bit_index = self.scan_sector_element(id, offset.unwrap_or(0))?; log::debug!("read_sector(): Bit index: {:?}", bit_index); match bit_index { TrackSectorScanResult::Found { address_error, no_dam, sector_chsn, .. } if no_dam => { // Sector id was matched, but has no associated data. // Return an empty buffer with the `no_dam` flag set. return Ok(ReadSectorResult { id_chsn: Some(sector_chsn), no_dam, address_crc_error: address_error, ..ReadSectorResult::default() }); } TrackSectorScanResult::Found { ei, sector_chsn, address_error, data_error, deleted_mark, .. } => { result_chsn = Some(sector_chsn); // If there is a bad address mark, we do not read the sector data, unless the debug // flag is set. // This allows dumping of sectors with bad address marks for debugging purposes. // So if the debug flag is not set, return our 'failure' now. if address_error && !debug { return Ok(ReadSectorResult { id_chsn: result_chsn, address_crc_error: true, ..ReadSectorResult::default() }); } // TODO: All this should be moved into TrackSchema logic - we shouldn't have to know // about the formatting details in Track // Should be safe to the instance let instance = self.element(ei).unwrap(); // Get the size and range of the sector data element. let element_size = instance.element.size(); let scope_range = instance.element.range(scope).unwrap_or(0..element_size); let scope_overhead = element_size - scope_range.len(); // Normally we read the contents of the sector determined by N in the sector header. // The read operation however can override the value of N if the `n` parameter // is Some. let data_len = if let Some(n_value) = n { DiskChsn::n_to_bytes(n_value) + scope_overhead } else { sector_chsn.n_size() + scope_overhead }; log::debug!( "read_sector(): Allocating {} bytes for sector {} data element of size {} at offset: {:05X}", data_len, sector_chsn, element_size, instance.start ); read_vec = vec![0u8; data_len]; let (_, crc_opt) = schema.decode_element(&self.data, instance, scope, &mut read_vec); let crc = crc_opt.unwrap(); // Sanity check: Read CRC matches metadata? if crc.is_error() != data_error { log::warn!( "read_sector(): CRC data/metadata mismatch for sector {}: calculated: {} metadata: {}", sector_chsn, crc, if data_error { "Invalid" } else { "Valid" } ); } result_address_error = address_error; result_data_error = data_error; result_deleted_mark = deleted_mark; result_data_range = scope_range; // Move crc into Option for return data_crc = Some(crc); // if read_vec.len() < data_len { // log::error!( // "read_sector(): Data buffer underrun, expected: {} got: {}", // data_len, // read_vec.len() // ); // return Err(DiskImageError::DataError); // } // self.data // .seek(SeekFrom::Start((element_start + scope_read_off) as u64)) // .map_err(|_| DiskImageError::BitstreamError)?; // log::trace!("read_sector(): Reading {} bytes.", read_vec.len()); // self.data // .read_exact(&mut read_vec) // .map_err(|_| DiskImageError::BitstreamError)?; } TrackSectorScanResult::NotFound { wrong_cylinder: wc, bad_cylinder: bc, wrong_head: wh, } => { log::trace!( "read_sector(): Sector ID not matched reading track. wc: {} bc: {} wh: {}", wc, bc, wh ); wrong_cylinder = wc; bad_cylinder = bc; wrong_head = wh; } _ => { unreachable!() } } Ok(ReadSectorResult { id_chsn: result_chsn, read_buf: read_vec, data_range: result_data_range, deleted_mark: result_deleted_mark, not_found: false, no_dam: false, address_crc_error: result_address_error, address_crc: None, data_crc_error: result_data_error, data_crc, wrong_cylinder, bad_cylinder, wrong_head, }) } fn scan_sector(&self, id: DiskChsnQuery, offset: Option<usize>) -> Result<ScanSectorResult, DiskImageError> { // let data_crc_error = false; // let mut address_crc_error = false; // let deleted_mark = false; // let wrong_cylinder = false; // let bad_cylinder = false; // let wrong_head = false; // Read index first to avoid borrowing issues in next match. let track_scan_result = self.scan_sector_element(id, offset.unwrap_or(0))?; Ok(track_scan_result.into()) // match bit_index { // TrackSectorScanResult::Found { // address_error, no_dam, .. // } if no_dam => { // // No DAM found. Return an empty buffer. // Ok(ScanSectorResult { // deleted_mark: false, // not_found: false, // no_dam: true, // address_error, // data_error: false, // wrong_cylinder: false, // bad_cylinder: false, // wrong_head: false, // }) // } // TrackSectorScanResult::Found { // address_crc_valid, // data_crc_valid, // deleted, // .. // } => { // if !address_crc_valid { // // Bad address CRC, return status. // Ok(ScanSectorResult { // deleted_mark: false, // not_found: false, // no_dam: false, // address_error: true, // data_error: false, // wrong_cylinder, // bad_cylinder, // wrong_head, // }) // } // else { // Ok(ScanSectorResult { // deleted_mark: deleted, // not_found: false, // no_dam: false, // address_error: address_crc_error, // data_error: !data_crc_valid, // wrong_cylinder, // bad_cylinder, // wrong_head, // }) // } // } // TrackSectorScanResult::NotFound { // wrong_cylinder: wc, // bad_cylinder: bc, // wrong_head: wh, // } => { // log::trace!( // "scan_sector: Sector ID not matched reading track. wc: {} bc: {} wh: {}", // wc, // bc, // wh // ); // Ok(ScanSectorResult { // not_found: true, // no_dam: false, // deleted_mark, // address_error: address_crc_error, // data_error: data_crc_error, // wrong_cylinder: wc, // bad_cylinder: bc, // wrong_head: wc, // }) // } // _ => { // unreachable!() // } // } } fn write_sector( &mut self, id: DiskChsnQuery, offset: Option<usize>, write_data: &[u8], _scope: RwScope, write_deleted: bool, debug: bool, ) -> Result<WriteSectorResult, DiskImageError> { let data_len; let mut wrong_cylinder = false; let bad_cylinder = false; let mut wrong_head = false; // Find the bit offset of the requested sector let bit_index = self.scan_sector_element(id, offset.unwrap_or(0))?; match bit_index { TrackSectorScanResult::Found { address_error, no_dam, .. } if no_dam => { // No DAM found. Return an empty buffer. Ok(WriteSectorResult { not_found: false, no_dam: true, address_crc_error: address_error, wrong_cylinder, bad_cylinder, wrong_head, }) } TrackSectorScanResult::Found { sector_chsn, address_error, deleted_mark, .. } => { wrong_cylinder = id.c().is_some() && sector_chsn.c() != id.c().unwrap(); wrong_head = id.h().is_some() && sector_chsn.h() != id.h().unwrap(); // If there's a bad address mark, we do not proceed to write the data, unless we're // requesting it anyway for debugging purposes. if address_error && !debug { return Ok(WriteSectorResult { not_found: false, no_dam: false, address_crc_error: address_error, wrong_cylinder, bad_cylinder, wrong_head, }); } if write_deleted != deleted_mark { log::warn!( "write_sector(): Deleted mark mismatch, expected: {} got: {}. Changing sector data type not implemented", write_deleted, deleted_mark ); return Err(DiskImageError::ParameterError); } data_len = write_data.len(); if sector_chsn.n_size() != data_len { log::error!( "write_sector(): Data buffer size mismatch, expected: {} got: {}", sector_chsn.n_size(), write_data.len() ); return Err(DiskImageError::ParameterError); } /* self.data .seek(SeekFrom::Start(((ei.start >> 1) + 32) as u64)) .map_err(|_| DiskImageError::SeekError)?; log::trace!( "write_sector(): Writing {} bytes to sector_id: {} at offset: {}", write_data.len(), id.s(), ei.start + 4 * MFM_BYTE_LEN ); // Write the sector data, if the write scope is the entire sector. if !matches!(scope, RwScope::CrcOnly) { self.data .write_encoded_buf(&write_data[0..data_len], ei.start + 4 * MFM_BYTE_LEN); } // Calculate the CRC of the data address mark + data. let mut crc = crc_ibm_3740(&mark_bytes, None); crc = crc_ibm_3740(&write_data[0..data_len], Some(crc)); // Write the CRC after the data. self.data .write_encoded_buf(&crc.to_be_bytes(), ei.start + (4 + data_len) * MFM_BYTE_LEN); self.add_write(data_len);*/ Ok(WriteSectorResult { not_found: false, no_dam: false, address_crc_error: false, wrong_cylinder, bad_cylinder, wrong_head, }) } TrackSectorScanResult::NotFound { wrong_cylinder: wc, bad_cylinder: bc, wrong_head: wh, } => { log::warn!( "write_sector(): Sector ID not found writing sector: {} wc: {} bc: {} wh: {}", id, wc, bc, wh ); Ok(WriteSectorResult { not_found: true, no_dam: false, address_crc_error: false, wrong_cylinder: wc, bad_cylinder: bc, wrong_head: wh, }) } _ => { unreachable!() } } } fn recalculate_sector_crc(&mut self, id: DiskChsnQuery, offset: Option<usize>) -> Result<(), DiskImageError> { // First, read the sector data. let rr = self.read_sector(id, None, offset, RwScope::DataOnly, false)?; // Write the data back to the sector, which will recalculate the CRC. // TODO: We may wish to optimize this in the future to just write the new CRC, but I don't expect // this function to be called heavily. self.write_sector( id, offset, &rr.read_buf[rr.data_range], RwScope::CrcOnly, rr.deleted_mark, false, )?; Ok(()) } fn hash(&mut self) -> Digest { let mut hasher = sha1_smol::Sha1::new(); hasher.update(&self.data.data_copied()); hasher.digest() } /// Read all sectors from the track. The data is returned within a [ReadTrackResult] struct /// which also sets some convenience metadata flags which are needed when handling `MetaSector` /// resolution images. /// The data returned is only the actual sector data. The address marks and CRCs are not included /// in the data. /// This function is intended for use in implementing the Read Track FDC command. fn read_all_sectors(&mut self, _ch: DiskCh, n: u8, eot: u8) -> Result<ReadTrackResult, DiskImageError> { let mut track_read_vec = Vec::with_capacity(512 * 9); let sector_data_len = DiskChsn::n_to_bytes(n); let mut sector_read_vec = vec![0u8; sector_data_len]; let mut result_data_error = false; let mut result_address_error = false; let mut result_deleted_mark = false; let mut result_not_found = true; let mut sectors_read: u16 = 0; // Read index first to avoid borrowing issues in next match. let mut bit_index = self.next_sector(0); while let TrackSectorScanResult::Found { ei, sector_chsn, address_error, data_error, deleted_mark, no_dam: _no_dam, .. } = bit_index { // We've found at least one sector. result_not_found = false; // Note any data and address integrity errors, however keep reading. result_address_error |= address_error; result_data_error |= data_error; result_deleted_mark |= deleted_mark; // Resolve the element instance offsets let TrackElementInstance { start, end, .. } = *self.element(ei).unwrap(); // In a normal Read Sector operation, we'd check the value of N in the sector header. // When reading all sectors in a track, we specify the value of N for all sectors in // the entire track. The value of N in the sector header is ignored. This allows us // to read data outside a sector in the case of an 'N' mismatch. log::trace!( "read_all_sectors_bitstream(): Found sector_id: {} at offset: {} read length: {}", sector_chsn.s(), start, sector_read_vec.len() ); self.read_exact_at(start + 64, &mut sector_read_vec) .map_err(|_| DiskImageError::BitstreamError)?; track_read_vec.extend(sector_read_vec.clone()); sectors_read = sectors_read.saturating_add(1); if sector_chsn.s() == eot { println!( "read_all_sectors_bitstream(): Reached EOT at sector: {} sectors_read: {}, eot: {}", sector_chsn.s(), sectors_read, eot ); break; } bit_index = self.next_sector(end); } let read_len = track_read_vec.len(); Ok(ReadTrackResult { not_found: result_not_found, sectors_read, read_buf: track_read_vec, deleted_mark: result_deleted_mark, address_crc_error: result_address_error, data_crc_error: result_data_error, read_len_bits: read_len * 16, read_len_bytes: read_len, }) } fn next_id(&self, chs: DiskChs) -> Option<DiskChsn> { if self.metadata.sector_ids.is_empty() { log::warn!("get_next_id(): No sector_id vector for track!"); } let first_sector = *self.metadata.sector_ids.first()?; let mut sector_matched = false; for sid in &self.metadata.sector_ids { if sector_matched { return Some(*sid); } if sid.s() == chs.s() { // Have matching sector id sector_matched = true; } } // If we reached here, we matched the last sector in the list, so return the first // sector as we wrap around the track. if sector_matched { Some(first_sector) } else { log::warn!("get_next_id(): Sector not found: {:?}", chs); None } } fn read(&self, offset: Option<isize>, overdump: Option<usize>) -> Result<ReadTrackResult, DiskImageError> { let extra_bytes = overdump.unwrap_or(0); let data_size = self.data.len() / 16 + if self.data.len() % 16 > 0 { 1 } else { 0 }; let dump_size = data_size + extra_bytes; let mut track_read_vec = vec![0u8; dump_size]; let mut track_read_index = 0; if let Some(offset) = offset { track_read_index = if offset < 0 { let (read_index, overflow) = self.data.len().overflowing_add_signed(offset); if overflow { log::error!("read(): Offset underflow."); return Err(DiskImageError::ParameterError); } read_index } else { let read_index = offset as usize; if read_index >= self.data.len() { log::error!("read(): Offset out of bounds."); return Err(DiskImageError::ParameterError); } read_index } }; self.data.read_decoded_buf(&mut track_read_vec, track_read_index); Ok(ReadTrackResult { not_found: false, sectors_read: 0, read_buf: track_read_vec, deleted_mark: false, address_crc_error: false, data_crc_error: false, read_len_bits: self.data.len(), read_len_bytes: data_size, }) } fn read_raw(&self, _overdump: Option<usize>) -> Result<ReadTrackResult, DiskImageError> { //let extra_bytes = overdump.unwrap_or(0); let data_size = self.data.len() / 8 + if self.data.len() % 8 > 0 { 1 } else { 0 }; //let dump_size = data_size + extra_bytes; let track_read_vec = self.data.data_copied(); Ok(ReadTrackResult { not_found: false, sectors_read: 0, read_buf: track_read_vec, deleted_mark: false, address_crc_error: false, data_crc_error: false, read_len_bits: self.data.len(), read_len_bytes: data_size, }) } fn has_weak_bits(&self) -> bool { self.data.has_weak_bits() } fn format( &mut self, standard: System34Standard, format_buffer: Vec<DiskChsn>, fill_pattern: &[u8], gap3: usize, ) -> Result<(), DiskImageError> { let bitcell_ct = self.data.len(); let format_result = System34Schema::format_track_as_bytes(standard, bitcell_ct, format_buffer, fill_pattern, gap3)?; let new_bit_vec = self .data .encode(&format_result.track_bytes, false, EncodingVariant::Data); log::trace!( "New bitstream size: {} from {} bytes", new_bit_vec.len(), format_result.track_bytes.len() ); self.data.replace(new_bit_vec); System34Schema::set_track_markers(&mut self.data, format_result.markers)?; // Scan the new track data for markers and create a clock map. let markers = System34Schema::scan_markers(&self.data); if markers.is_empty() { log::error!("TrackData::format(): No markers found in track data post-format."); } else { log::trace!("TrackData::format(): Found {} markers in track data.", markers.len()); } System34Schema::create_clock_map(&markers, self.data.clock_map_mut()); let new_metadata = TrackMetadata::new( System34Schema::scan_metadata(&mut self.data, markers), TrackSchema::System34, ); let data_ranges = new_metadata.data_ranges(); if !data_ranges.is_empty() { self.data.set_data_ranges(data_ranges); } self.metadata = new_metadata; Ok(()) } fn analysis(&self) -> Result<TrackAnalysis, DiskImageError> { let schema = self.schema.ok_or(DiskImageError::SchemaError)?; Ok(schema.analyze_elements(&self.metadata)) } fn stream(&self) -> Option<&TrackDataStream> { Some(&self.data) } fn stream_mut(&mut self) -> Option<&mut TrackDataStream> { Some(&mut self.data) } fn element_map(&self) -> Option<&SourceMap> { Some(&self.metadata.element_map) } } impl BitStreamTrack { pub(crate) fn new( params: &BitStreamTrackParams, shared: Arc<Mutex<SharedDiskContext>>, ) -> Result<BitStreamTrack, DiskImageError> { Self::new_optional_ctx(params, Some(shared)) } pub(crate) fn new_optional_ctx( params: &BitStreamTrackParams, shared: Option<Arc<Mutex<SharedDiskContext>>>, ) -> Result<BitStreamTrack, DiskImageError> { // if params.data.is_empty() { // log::error!("add_track_bitstream(): Data is empty."); // return Err(DiskImageError::ParameterError); // } if params.weak.is_some() && (params.data.len() != params.weak.unwrap().len()) { log::error!("add_track_bitstream(): Data and weak bit mask lengths do not match."); return Err(DiskImageError::ParameterError); } log::debug!( "BitStreamTrack::new(): {} track {}, {} bits", params.encoding, params.ch, params.bitcell_ct.unwrap_or(params.data.len() * 8) ); // The data vec is optional if we have a bitcell count and MFM/FM encoding. let data = if params.data.is_empty() { if let Some(bitcell_ct) = params.bitcell_ct { #[allow(unreachable_patterns)] match params.encoding { TrackDataEncoding::Mfm | TrackDataEncoding::Fm => BitVec::from_fn(bitcell_ct, |i| i % 2 == 0),
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
true
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track/metasector.rs
src/track/metasector.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- src/track/metasector.rs Implements the MetaSector track type and the Track trait for same. */ use super::{Track, TrackAnalysis, TrackInfo}; use crate::types::{ AddSectorParams, ReadSectorResult, ReadTrackResult, RwScope, ScanSectorResult, SectorAttributes, SharedDiskContext, WriteSectorResult, }; use crate::track_schema::{system34::System34Standard, TrackMetadata, TrackSchema}; use crate::{ bitstream_codec::TrackDataStream, types::{chs::DiskChsnQuery, DiskCh, DiskChs, DiskChsn, TrackDataEncoding, TrackDataRate, TrackDataResolution}, DiskImageError, FoxHashSet, SectorMapEntry, }; use sha1_smol::Digest; use std::{ any::Any, sync::{Arc, Mutex}, }; struct SectorMatch<'a> { pub(crate) sectors: Vec<&'a MetaSector>, pub(crate) sizes: Vec<u8>, pub(crate) wrong_cylinder: bool, pub(crate) bad_cylinder: bool, pub(crate) wrong_head: bool, } impl SectorMatch<'_> { fn len(&'_ self) -> usize { self.sectors.len() } #[allow(dead_code)] fn iter(&'_ self) -> std::slice::Iter<&MetaSector> { self.sectors.iter() } } struct SectorMatchMut<'a> { pub(crate) sectors: Vec<&'a mut MetaSector>, #[allow(dead_code)] pub(crate) sizes: Vec<u8>, pub(crate) wrong_cylinder: bool, pub(crate) bad_cylinder: bool, pub(crate) wrong_head: bool, pub(crate) shared: Arc<Mutex<SharedDiskContext>>, } impl<'a> SectorMatchMut<'a> { fn len(&'a self) -> usize { self.sectors.len() } #[allow(dead_code)] fn iter_mut(&'a mut self) -> std::slice::IterMut<'a, &'a mut MetaSector> { self.sectors.iter_mut() } } #[derive(Clone, Default)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] struct MetaMask { has_bits: bool, mask: Vec<u8>, } impl MetaMask { fn empty(len: usize) -> MetaMask { MetaMask { has_bits: false, mask: vec![0; len], } } fn from(mask: &[u8]) -> MetaMask { let mut m = MetaMask::default(); m.set_mask(mask); m } fn set_mask(&mut self, mask: &[u8]) { self.mask = mask.to_vec(); self.has_bits = mask.iter().any(|&x| x != 0); } #[allow(dead_code)] fn or_mask(&mut self, source_mask: &MetaMask) { for (i, &m) in source_mask.iter().enumerate() { self.mask[i] |= m; } self.has_bits = self.mask.iter().any(|&x| x != 0); } fn or_slice(&mut self, source_mask: &[u8]) { for (i, &m) in source_mask.iter().enumerate() { self.mask[i] |= m; } self.has_bits = self.mask.iter().any(|&x| x != 0); } #[allow(dead_code)] fn clear(&mut self) { self.mask.fill(0); self.has_bits = false; } #[allow(dead_code)] fn mask(&self) -> &[u8] { &self.mask } fn has_bits(&self) -> bool { self.has_bits } fn iter(&self) -> std::slice::Iter<u8> { self.mask.iter() } #[allow(dead_code)] fn len(&self) -> usize { self.mask.len() } } #[derive(Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub(crate) struct MetaSector { id_chsn: DiskChsn, address_error: bool, data_error: bool, deleted_mark: bool, no_dam: bool, data: Vec<u8>, weak_mask: MetaMask, hole_mask: MetaMask, } impl MetaSector { pub fn read_data(&self) -> Vec<u8> { if self.no_dam { return Vec::new(); } let mut data = self.data.clone(); for (i, (weak_byte, hole_byte)) in self.weak_mask.iter().zip(self.hole_mask.iter()).enumerate() { let mask_byte = weak_byte | hole_byte; if mask_byte == 0 { continue; } let rand_byte = rand::random::<u8>(); data[i] = data[i] & !mask_byte | rand_byte & mask_byte; } data } } #[derive(Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct MetaSectorTrack { pub(crate) ch: DiskCh, pub(crate) encoding: TrackDataEncoding, pub(crate) schema: Option<TrackSchema>, pub(crate) data_rate: TrackDataRate, pub(crate) sectors: Vec<MetaSector>, #[cfg_attr(feature = "serde", serde(skip))] pub(crate) shared: Arc<Mutex<SharedDiskContext>>, } #[cfg_attr(feature = "serde", typetag::serde)] impl Track for MetaSectorTrack { fn resolution(&self) -> TrackDataResolution { TrackDataResolution::MetaSector } fn as_any(&self) -> &dyn Any { self } fn as_any_mut(&mut self) -> &mut dyn Any { self } fn as_metasector_track(&self) -> Option<&MetaSectorTrack> { self.as_any().downcast_ref::<MetaSectorTrack>() } fn as_metasector_track_mut(&mut self) -> Option<&mut MetaSectorTrack> { self.as_any_mut().downcast_mut::<MetaSectorTrack>() } fn ch(&self) -> DiskCh { self.ch } fn set_ch(&mut self, new_ch: DiskCh) { self.ch = new_ch; } fn encoding(&self) -> TrackDataEncoding { self.encoding } fn info(&self) -> TrackInfo { TrackInfo { resolution: self.resolution(), encoding: self.encoding, schema: self.schema, data_rate: self.data_rate, density: None, rpm: None, bit_length: 0, sector_ct: self.sectors.len(), flux_info: None, } } fn metadata(&self) -> Option<&TrackMetadata> { None } fn sector_ct(&self) -> usize { self.sectors.len() } fn has_sector_id(&self, sid: u8, id_chsn: Option<DiskChsn>) -> bool { self.sectors.iter().any(|sector| { if id_chsn.is_none() && sector.id_chsn.s() == sid { return true; } else if let Some(chsn) = id_chsn { if sector.id_chsn == chsn { return true; } } false }) } fn sector_list(&self) -> Vec<SectorMapEntry> { self.sectors .iter() .map(|s| SectorMapEntry { chsn: s.id_chsn, attributes: SectorAttributes { address_error: s.address_error, data_error: s.data_error, deleted_mark: s.deleted_mark, no_dam: false, }, }) .collect() } fn add_sector(&mut self, params: &AddSectorParams) -> Result<(), DiskImageError> { // Create an empty weak bit mask if none is provided. let weak_mask = match &params.weak_mask { Some(weak_buf) => MetaMask::from(weak_buf), None => MetaMask::empty(params.data.len()), }; let hole_mask = match &params.hole_mask { Some(hole_buf) => MetaMask::from(hole_buf), None => MetaMask::empty(params.data.len()), }; let new_sector = MetaSector { id_chsn: params.id_chsn, address_error: params.attributes.address_error, data_error: params.attributes.data_error, deleted_mark: params.attributes.deleted_mark, no_dam: params.attributes.no_dam, data: params.data.to_vec(), weak_mask, hole_mask, }; if params.alternate { // Look for existing sector. let existing_sector = self.sectors.iter_mut().find(|s| s.id_chsn == params.id_chsn); if let Some(es) = existing_sector { // Update the existing sector. let mut xor_vec: Vec<u8> = Vec::with_capacity(es.data.len()); // Calculate a bitmap representing the difference between the new sector data and the // existing sector data. for (i, (ns_byte, es_byte)) in new_sector.data.iter().zip(es.data.iter()).enumerate() { xor_vec[i] = ns_byte ^ es_byte; } // Update the weak bit mask for the existing sector and return. es.weak_mask.or_slice(&xor_vec); return Ok(()); } } self.sectors.push(new_sector); Ok(()) } /// Read the sector data from the sector identified by 'chs'. The data is returned within a /// ReadSectorResult struct which also sets some convenience metadata flags where are needed /// when handling MetaSector images. /// When reading a BitStream image, the sector data includes the address mark and crc. /// Offsets are provided within ReadSectorResult so these can be skipped when processing the /// read operation. fn read_sector( &self, id: DiskChsnQuery, _n: Option<u8>, _offset: Option<usize>, scope: RwScope, debug: bool, ) -> Result<ReadSectorResult, DiskImageError> { match scope { // Add 4 bytes for address mark and 2 bytes for CRC. RwScope::EntireElement => unimplemented!("DataElement scope not supported for MetaSector"), RwScope::DataOnly => {} _ => return Err(DiskImageError::ParameterError), }; let sm = self.match_sectors(id, debug); if sm.len() == 0 { log::debug!("read_sector(): No sector found for id: {}", id); Ok(ReadSectorResult { not_found: true, wrong_cylinder: sm.wrong_cylinder, bad_cylinder: sm.bad_cylinder, wrong_head: sm.wrong_head, ..ReadSectorResult::default() }) } else { if sm.len() > 1 { log::warn!( "read_sector(): Found {} sector ids matching id query: {} (with {} different sizes). Using first.", sm.len(), id, sm.sizes.len() ); } let s = sm.sectors[0]; // TODO: MetaSector doesn't have stored CRC, but we can calculate the read CRC Ok(ReadSectorResult { id_chsn: Some(s.id_chsn), data_range: 0..s.data.len(), read_buf: s.read_data(), // Calling read_data applies the weak bit and hole masks. deleted_mark: s.deleted_mark, not_found: false, no_dam: false, address_crc_error: s.address_error, data_crc_error: s.data_error, wrong_cylinder: sm.wrong_cylinder, bad_cylinder: sm.bad_cylinder, wrong_head: sm.wrong_head, ..ReadSectorResult::default() }) } } fn scan_sector(&self, id: DiskChsnQuery, _offset: Option<usize>) -> Result<ScanSectorResult, DiskImageError> { let sm = self.match_sectors(id, false); if sm.len() == 0 { log::debug!("scan_sector(): No sector found for id query: {}", id); Ok(ScanSectorResult { not_found: true, no_dam: false, deleted_mark: false, address_error: false, data_error: false, wrong_cylinder: sm.wrong_cylinder, bad_cylinder: sm.bad_cylinder, wrong_head: sm.wrong_head, }) } else { log::warn!( "scan_sector(): Found {} sector ids matching query: {} (with {} different sizes). Using first.", sm.len(), id, sm.sizes.len() ); let s = sm.sectors[0]; Ok(ScanSectorResult { deleted_mark: s.deleted_mark, not_found: false, no_dam: false, address_error: s.address_error, data_error: s.data_error, wrong_cylinder: sm.wrong_cylinder, bad_cylinder: sm.bad_cylinder, wrong_head: sm.wrong_head, }) } } fn write_sector( &mut self, id: DiskChsnQuery, _offset: Option<usize>, write_data: &[u8], _scope: RwScope, write_deleted: bool, debug: bool, ) -> Result<WriteSectorResult, DiskImageError> { let mut sm = self.match_sectors_mut(id, debug); if sm.len() > 1 { log::error!( "write_sector(): Could not identify unique target sector. (Found {} sector ids matching query: {})", sm.len(), id, ); return Err(DiskImageError::UniqueIdError); } else if sm.len() == 0 { log::debug!("write_sector(): No sector found for id query: {}", id); return Ok(WriteSectorResult { not_found: false, no_dam: false, address_crc_error: false, wrong_cylinder: sm.wrong_cylinder, bad_cylinder: sm.bad_cylinder, wrong_head: sm.wrong_head, }); } let write_data_len = write_data.len(); if DiskChsn::n_to_bytes(sm.sectors[0].id_chsn.n()) != write_data_len { // Caller didn't provide correct buffer size. log::error!( "write_sector(): Data buffer size mismatch, expected: {} got: {}", DiskChsn::n_to_bytes(sm.sectors[0].id_chsn.n()), write_data_len ); return Err(DiskImageError::ParameterError); } if sm.sectors[0].no_dam || sm.sectors[0].address_error { log::debug!( "write_sector(): Sector {} is unwritable due to no DAM or bad address CRC.", sm.sectors[0].id_chsn ); } else { sm.sectors[0].data.copy_from_slice(write_data); sm.sectors[0].deleted_mark = write_deleted; } sm.shared.lock().unwrap().writes += 1; Ok(WriteSectorResult { not_found: false, no_dam: sm.sectors[0].no_dam, address_crc_error: sm.sectors[0].address_error, wrong_cylinder: sm.wrong_cylinder, bad_cylinder: sm.bad_cylinder, wrong_head: sm.wrong_head, }) } fn recalculate_sector_crc(&mut self, id: DiskChsnQuery, offset: Option<usize>) -> Result<(), DiskImageError> { // First, read the sector data. let rr = self.read_sector(id, None, offset, RwScope::DataOnly, false)?; // Write the data back to the sector, which will recalculate the CRC. self.write_sector(id, offset, &rr.read_buf, RwScope::DataOnly, rr.deleted_mark, false)?; Ok(()) } fn hash(&mut self) -> Digest { let mut hasher = sha1_smol::Sha1::new(); let rtr = self.read_all_sectors(self.ch, 0xFF, 0xFF).unwrap(); hasher.update(&rtr.read_buf); hasher.digest() } /// Read all sectors from the track identified by 'ch'. The data is returned within a /// ReadSectorResult struct which also sets some convenience metadata flags which are needed /// when handling MetaSector images. /// Unlike read_sectors, the data returned is only the actual sector data. The address marks and /// CRCs are not included in the data. /// This function is intended for use in implementing the Read Track FDC command. fn read_all_sectors(&mut self, _ch: DiskCh, n: u8, track_len: u8) -> Result<ReadTrackResult, DiskImageError> { let track_len = track_len as u16; let sector_data_len = DiskChsn::n_to_bytes(n); let mut track_read_vec = Vec::with_capacity(sector_data_len * self.sectors.len()); let mut address_crc_error = false; let mut data_crc_error = false; let mut deleted_mark = false; let mut not_found = true; let mut sectors_read = 0; for s in &self.sectors { log::trace!("read_all_sectors(): Found sector_id: {}", s.id_chsn,); not_found = false; // TODO - do we stop after reading sector ID specified by EOT, or // or upon reaching it? if sectors_read >= track_len { log::trace!( "read_all_sectors(): Reached track_len at sector: {} \ sectors_read: {}, track_len: {}", s.id_chsn, sectors_read, track_len ); break; } track_read_vec.extend(&s.read_data()); sectors_read = sectors_read.saturating_add(1); if s.address_error { address_crc_error |= true; } if s.data_error { data_crc_error |= true; } if s.deleted_mark { deleted_mark |= true; } } let read_len = track_read_vec.len(); Ok(ReadTrackResult { not_found, sectors_read, read_buf: track_read_vec, deleted_mark, address_crc_error, data_crc_error, read_len_bits: read_len * 16, read_len_bytes: read_len, }) } fn next_id(&self, chs: DiskChs) -> Option<DiskChsn> { let first_sector = self.sectors.first()?; let mut sector_matched = false; for si in self.sectors.iter() { if sector_matched { return Some(DiskChsn::new(chs.c(), chs.h(), si.id_chsn.s(), si.id_chsn.n())); } if si.id_chsn.s() == chs.s() { // Have matching sector id sector_matched = true; } } // If we reached here, we matched the last sector in the list, so return the first // sector as we wrap around the track. if sector_matched { Some(DiskChsn::new( chs.c(), chs.h(), first_sector.id_chsn.s(), first_sector.id_chsn.n(), )) } else { None } } fn read(&self, _offset: Option<isize>, _overdump: Option<usize>) -> Result<ReadTrackResult, DiskImageError> { Err(DiskImageError::UnsupportedFormat) } fn read_raw(&self, _overdump: Option<usize>) -> Result<ReadTrackResult, DiskImageError> { Err(DiskImageError::UnsupportedFormat) } fn has_weak_bits(&self) -> bool { self.sectors.iter().any(|s| s.weak_mask.has_bits()) } fn format( &mut self, _standard: System34Standard, _format_buffer: Vec<DiskChsn>, _fill_pattern: &[u8], _gap3: usize, ) -> Result<(), DiskImageError> { // TODO: Implement format for MetaSectorTrack Err(DiskImageError::UnsupportedFormat) } fn analysis(&self) -> Result<TrackAnalysis, DiskImageError> { let sector_ct = self.sectors.len(); let mut analysis = TrackAnalysis::default(); let mut n_set: FoxHashSet<u8> = FoxHashSet::new(); let mut last_n = 0; for (si, sector) in self.sectors.iter().enumerate() { if sector.id_chsn.s() != si as u8 + 1 { analysis.nonconsecutive_sectors = true; } if sector.data_error { analysis.data_error = true; } if sector.address_error { analysis.address_error = true; } if sector.deleted_mark { analysis.deleted_data = true; } last_n = sector.id_chsn.n(); n_set.insert(sector.id_chsn.n()); } if n_set.len() > 1 { analysis.consistent_sector_size = None; } else { analysis.consistent_sector_size = Some(last_n); } analysis.sector_ct = sector_ct; Ok(analysis) } fn stream(&self) -> Option<&TrackDataStream> { None } fn stream_mut(&mut self) -> Option<&mut TrackDataStream> { None } } impl MetaSectorTrack { #[allow(dead_code)] fn add_write(&mut self, _bytes: usize) { self.shared.lock().unwrap().writes += 1; } fn match_sectors(&self, id: DiskChsnQuery, _debug: bool) -> SectorMatch { let mut wrong_cylinder = false; let mut bad_cylinder = false; let mut wrong_head = false; let mut sizes = FoxHashSet::new(); let matching_sectors: Vec<&MetaSector> = self .sectors .iter() .filter(|s| { if id.c().is_some() && s.id_chsn.c() != id.c().unwrap() { wrong_cylinder = true; } if s.id_chsn.c() == 0xFF { bad_cylinder = true; } if id.h().is_some() && s.id_chsn.h() != id.h().unwrap() { wrong_head = true; } sizes.insert(s.id_chsn.n()); id.matches(&s.id_chsn) }) .collect(); SectorMatch { sectors: matching_sectors, sizes: sizes.iter().cloned().collect(), wrong_cylinder, bad_cylinder, wrong_head, } } fn match_sectors_mut(&mut self, id: DiskChsnQuery, _debug: bool) -> SectorMatchMut { let mut wrong_cylinder = false; let mut bad_cylinder = false; let mut wrong_head = false; let mut sizes = FoxHashSet::new(); let matching_sectors: Vec<&mut MetaSector> = self .sectors .iter_mut() .filter(|s| { if id.c().is_some() && s.id_chsn.c() != id.c().unwrap() { wrong_cylinder = true; } if s.id_chsn.c() == 0xFF { bad_cylinder = true; } if id.h().is_some() && s.id_chsn.h() != id.h().unwrap() { wrong_head = true; } sizes.insert(s.id_chsn.n()); id.matches(&s.id_chsn) }) .collect(); SectorMatchMut { sectors: matching_sectors, sizes: sizes.iter().cloned().collect(), wrong_cylinder, bad_cylinder, wrong_head, shared: self.shared.clone(), } } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track/fluxstream.rs
src/track/fluxstream.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- src/track/fluxstream.rs Implements the Fluxstream track type and the Track trait for same. */ use std::{ any::Any, sync::{Arc, Mutex}, }; use super::{Track, TrackAnalysis, TrackInfo}; use crate::{ bitstream_codec::TrackDataStream, flux::{ flux_revolution::FluxRevolution, histogram::FluxHistogram, pll::{Pll, PllPreset}, }, format_us, track::bitstream::BitStreamTrack, track_schema::{system34::System34Standard, TrackMetadata, TrackSchema}, types::{ chs::DiskChsnQuery, AddSectorParams, BitStreamTrackParams, DiskCh, DiskChs, DiskChsn, DiskRpm, ReadSectorResult, ReadTrackResult, RwScope, ScanSectorResult, SharedDiskContext, TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity, WriteSectorResult, }, DiskImageError, SectorMapEntry, }; use crate::{ flux::{pll::PllMarkerEntry, FluxRevolutionType}, source_map::SourceMap, }; use sha1_smol::Digest; #[derive(Debug, Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct FluxTrackInfo { pub revolutions: usize, pub best_revolution: usize, pub transitions: Vec<usize>, pub density: TrackDensity, pub rpm: DiskRpm, pub encoding: TrackDataEncoding, } /// An iterator over the raw flux values for every revolution of a [FluxStreamTrack]. When consuming /// this iterator you are responsible for calculating where on the track you are as there is no /// index signal provided. pub struct RawFluxIterator<'a> { current_revolution: usize, current_delta: usize, revolutions: &'a [FluxRevolution], } impl<'a> RawFluxIterator<'a> { pub fn new(revolutions: &'a [FluxRevolution]) -> Self { Self { current_revolution: 0, current_delta: 0, revolutions, } } } impl<'a> Iterator for RawFluxIterator<'a> { type Item = f64; fn next(&mut self) -> Option<Self::Item> { while self.current_revolution < self.revolutions.len() { let deltas = &self.revolutions[self.current_revolution].flux_deltas; if self.current_delta < deltas.len() { let value = deltas[self.current_delta]; self.current_delta += 1; return Some(value); } else { self.current_revolution += 1; self.current_delta = 0; if self.current_revolution < self.revolutions.len() && matches!( self.revolutions[self.current_revolution].rev_type, FluxRevolutionType::Synthetic ) { // Skip synthetic revolutions break; } } } None } } #[derive(Clone)] #[cfg_attr(feature = "serde", derive(serde::Serialize, serde::Deserialize))] pub struct FluxStreamTrack { encoding: TrackDataEncoding, schema: Option<TrackSchema>, data_rate: TrackDataRate, ch: DiskCh, revolutions: Vec<FluxRevolution>, decoded_revolutions: Vec<Option<BitStreamTrack>>, best_revolution: usize, density: TrackDensity, rpm: DiskRpm, dirty: bool, resolved: Option<BitStreamTrack>, #[cfg_attr(feature = "serde", serde(skip))] shared: Option<Arc<Mutex<SharedDiskContext>>>, } #[cfg_attr(feature = "serde", typetag::serde)] impl Track for FluxStreamTrack { fn resolution(&self) -> TrackDataResolution { TrackDataResolution::FluxStream } fn as_any(&self) -> &dyn Any { self } fn as_any_mut(&mut self) -> &mut dyn Any { self } fn as_fluxstream_track(&self) -> Option<&FluxStreamTrack> { self.as_any().downcast_ref::<FluxStreamTrack>() } fn as_fluxstream_track_mut(&mut self) -> Option<&mut FluxStreamTrack> { self.as_any_mut().downcast_mut::<FluxStreamTrack>() } fn ch(&self) -> DiskCh { self.ch } fn set_ch(&mut self, new_ch: DiskCh) { self.ch = new_ch; } fn encoding(&self) -> TrackDataEncoding { self.encoding } fn info(&self) -> TrackInfo { if let Some(resolved) = self.get_bitstream() { let mut ti = resolved.info(); ti.resolution = self.resolution(); log::debug!("FluxStreamTrack::info(): Bitstream info: {:?}", ti); let fti = FluxTrackInfo { revolutions: self.revolutions.len(), best_revolution: self.best_revolution, transitions: self.revolutions.iter().map(|r| r.ft_ct()).collect(), density: self.density, rpm: self.rpm, encoding: self.encoding, }; ti.flux_info = Some(fti); return ti; } TrackInfo { resolution: self.resolution(), encoding: self.encoding, schema: self.schema, data_rate: self.data_rate, density: Some(TrackDensity::from(self.data_rate)), rpm: Some(self.rpm), bit_length: 0, sector_ct: 0, flux_info: None, } } fn metadata(&self) -> Option<&TrackMetadata> { if let Some(resolved) = self.get_bitstream() { return resolved.metadata(); } None } fn sector_ct(&self) -> usize { if let Some(resolved) = self.get_bitstream() { return resolved.sector_ct(); } 0 } fn has_sector_id(&self, id: u8, _id_chsn: Option<DiskChsn>) -> bool { if let Some(resolved) = self.get_bitstream() { return resolved.has_sector_id(id, _id_chsn); } false } fn sector_list(&self) -> Vec<SectorMapEntry> { if let Some(resolved) = self.get_bitstream() { return resolved.sector_list(); } Vec::new() } fn add_sector(&mut self, _sd: &AddSectorParams) -> Result<(), DiskImageError> { Err(DiskImageError::UnsupportedFormat) } /// Read the sector data from the sector identified by 'chs'. The data is returned within a /// ReadSectorResult struct which also sets some convenience metadata flags where are needed /// when handling MetaSector images. /// When reading a BitStream image, the sector data includes the address mark and crc. /// Offsets are provided within ReadSectorResult so these can be skipped when processing the /// read operation. fn read_sector( &self, id: DiskChsnQuery, n: Option<u8>, offset: Option<usize>, scope: RwScope, debug: bool, ) -> Result<ReadSectorResult, DiskImageError> { if let Some(resolved) = self.get_bitstream() { return resolved.read_sector(id, n, offset, scope, debug); } Err(DiskImageError::ResolveError) } fn scan_sector(&self, id: DiskChsnQuery, offset: Option<usize>) -> Result<ScanSectorResult, DiskImageError> { if let Some(resolved) = self.get_bitstream() { return Ok(resolved.scan_sector_element(id, offset.unwrap_or(0))?.into()); } Err(DiskImageError::ResolveError) } fn write_sector( &mut self, id: DiskChsnQuery, offset: Option<usize>, write_data: &[u8], scope: RwScope, write_deleted: bool, debug: bool, ) -> Result<WriteSectorResult, DiskImageError> { let old_dirty = self.dirty; self.dirty = true; if let Some(resolved) = self.get_bitstream_mut() { return resolved.write_sector(id, offset, write_data, scope, write_deleted, debug); } self.dirty = old_dirty; Err(DiskImageError::ResolveError) } fn recalculate_sector_crc(&mut self, id: DiskChsnQuery, offset: Option<usize>) -> Result<(), DiskImageError> { if let Some(resolved) = self.get_bitstream_mut() { return resolved.recalculate_sector_crc(id, offset); } Err(DiskImageError::ResolveError) } fn hash(&mut self) -> Digest { if let Some(resolved) = self.get_bitstream_mut() { return resolved.hash(); } Digest::default() } /// Read all sectors from the track identified by 'ch'. The data is returned within a /// ReadSectorResult struct which also sets some convenience metadata flags which are needed /// when handling MetaSector images. /// Unlike read_sectors, the data returned is only the actual sector data. The address marks and /// CRCs are not included in the data. /// This function is intended for use in implementing the Read Track FDC command. fn read_all_sectors(&mut self, _ch: DiskCh, n: u8, eot: u8) -> Result<ReadTrackResult, DiskImageError> { if let Some(resolved) = self.get_bitstream_mut() { return resolved.read_all_sectors(_ch, n, eot); } Err(DiskImageError::ResolveError) } fn next_id(&self, chs: DiskChs) -> Option<DiskChsn> { if let Some(resolved) = self.get_bitstream() { return resolved.next_id(chs); } None } fn read(&self, offset: Option<isize>, overdump: Option<usize>) -> Result<ReadTrackResult, DiskImageError> { if let Some(resolved) = self.get_bitstream() { return resolved.read(offset, overdump); } Err(DiskImageError::ResolveError) } fn read_raw(&self, overdump: Option<usize>) -> Result<ReadTrackResult, DiskImageError> { if let Some(resolved) = self.get_bitstream() { return resolved.read_raw(overdump); } Err(DiskImageError::ResolveError) } fn has_weak_bits(&self) -> bool { if let Some(resolved) = self.get_bitstream() { return resolved.has_weak_bits(); } false } fn format( &mut self, standard: System34Standard, format_buffer: Vec<DiskChsn>, fill_pattern: &[u8], gap3: usize, ) -> Result<(), DiskImageError> { let old_dirty = self.dirty; self.dirty = true; if let Some(resolved) = self.get_bitstream_mut() { return resolved.format(standard, format_buffer, fill_pattern, gap3); } self.dirty = old_dirty; Err(DiskImageError::ResolveError) } fn analysis(&self) -> Result<TrackAnalysis, DiskImageError> { if let Some(resolved) = self.get_bitstream() { return resolved.analysis(); } Err(DiskImageError::ResolveError) } fn stream(&self) -> Option<&TrackDataStream> { if let Some(resolved) = self.get_bitstream() { return resolved.stream(); } None } fn stream_mut(&mut self) -> Option<&mut TrackDataStream> { if let Some(resolved) = self.get_bitstream_mut() { return resolved.stream_mut(); } None } fn element_map(&self) -> Option<&SourceMap> { if let Some(resolved) = self.get_bitstream() { return resolved.element_map(); } None } } impl Default for FluxStreamTrack { fn default() -> Self { FluxStreamTrack::new() } } impl FluxStreamTrack { pub fn new() -> Self { FluxStreamTrack { encoding: Default::default(), schema: None, data_rate: Default::default(), ch: Default::default(), revolutions: Vec::new(), decoded_revolutions: Vec::new(), best_revolution: 0, density: TrackDensity::Double, rpm: DiskRpm::Rpm300(1.0), dirty: false, resolved: None, shared: None, } } pub fn density(&self) -> TrackDensity { self.density } pub fn set_density(&mut self, density: TrackDensity) { self.density = density; } pub fn is_empty(&self) -> bool { self.revolutions.is_empty() } #[allow(dead_code)] pub(crate) fn normalize(&mut self) { // Drop revolutions that didn't decode at least 100 bits // TODO: Can we do this while keeping the best revolution index valid? self.revolutions.retain(|r| r.bitstream.len() > 100); self.best_revolution = 0; } pub(crate) fn add_revolution(&mut self, ch: DiskCh, data: &[f64], index_time: f64) -> &mut FluxRevolution { let new_stream = FluxRevolution::from_f64(ch, data, index_time); self.revolutions.push(new_stream); self.revolutions.last_mut().unwrap() } pub fn set_revolution(&mut self, index: usize) { if index < self.revolutions.len() { self.best_revolution = index; } } pub fn revolution_ct(&self) -> usize { self.revolutions.len() } pub fn revolution(&self, index: usize) -> Option<&FluxRevolution> { self.revolutions.get(index) } pub fn revolution_mut(&mut self, index: usize) -> Option<&mut FluxRevolution> { self.revolutions.get_mut(index) } pub fn revolution_iter(&self) -> impl Iterator<Item = &FluxRevolution> { self.revolutions.iter() } pub fn revolution_iter_mut(&mut self) -> impl Iterator<Item = &mut FluxRevolution> { self.revolutions.iter_mut() } /// Decode all revolutions in the track. Use 'base_clock' to set the base clock for the PLL, /// if provided. If not provided, the base clock is estimated based on the flux transition /// count, but this can be ambiguous. If no base clock is provided, and we cannot guess, we /// will assume a double density track. pub(crate) fn decode_revolutions( &mut self, clock_hint: Option<f64>, rpm_hint: Option<DiskRpm>, ) -> Result<(), DiskImageError> { self.decoded_revolutions = Vec::new(); for (i, revolution) in self.revolutions.iter_mut().enumerate() { self.decoded_revolutions.push(None); let ft_ct = revolution.ft_ct(); // Use the rpm hint if provided, otherwise try to derive from the revolution's index time, // falling back to 300 RPM if neither works. let mut base_rpm = rpm_hint.unwrap_or(DiskRpm::try_from_index_time(revolution.index_time).unwrap_or(DiskRpm::Rpm300(1.0))); log::debug!("decode_revolutions:() using base rpm: {}", base_rpm); let mut base_clock; let base_clock_opt = match clock_hint { Some(hint) => { log::debug!("decode_revolutions(): Revolution {}: Using clock hint: {}", i, hint); Some(hint) } None => { // Try to estimate base clock and rpm based on flux transition count. // This is not perfect - we may need to adjust the clock later. let base_clock_opt = match ft_ct { 20_000..41_666 => Some(2e-6), 50_000.. => Some(1e-6), _ => { log::warn!( "decode_revolutions(): Revolution {} has ambiguous FT count: {}. Falling back to histogram clock detection.", i, ft_ct ); None } }; log::debug!( "decode_revolutions(): Revolution {}: Estimating clock by FT count: {} Base clock: {:?}", i, ft_ct, base_clock_opt ); base_clock_opt } }; log::debug!("Base clock after flux count check is {:?}", base_clock_opt); let index_time = revolution.index_time; let rev_rpm = 60.0 / index_time; let f_rpm = f64::from(base_rpm); // If RPM calculated from the index time seems accurate, trust it over the rpm hint. base_rpm = match rev_rpm { 255.0..345.0 => DiskRpm::Rpm300(rev_rpm / 300.0), 345.0..414.0 => DiskRpm::Rpm360(rev_rpm / 360.0), _ => { log::error!( "Revolution {} RPM is out of range ({:.2}). Assuming {}", i, rev_rpm, base_rpm ); // TODO: Fall back to calculating rpm from sum of flux times? base_rpm } }; log::debug!("Base RPM after index time check is {:?}", base_rpm); base_clock = if let Some(base_clock) = base_clock_opt { // Handling the case of a double-density disk imaged in a 360 RPM drive is a pain. // For now, let's assume that anything higher than a 1.5us base clock is double density, // in which case we will adjust the clock by the relative RPM. base_rpm.adjust_clock(base_clock) } else { // Try to determine the base clock and RPM based on the revolution histogram. let mut full_hist = FluxHistogram::new(&revolution.flux_deltas, 1.0); let base_transition_time_opt = full_hist.base_transition_time(); if let Some(base_transition_time) = base_transition_time_opt { let hist_period = base_transition_time / 2.0; log::debug!( "decode_revolutions(): Revolution {}: Histogram base period {:.4}", i, format_us!(hist_period) ); hist_period } else { log::warn!( "decode_revolutions(): Revolution {}: No base clock hint, and full histogram base period not found. Assuming 2us bitcell.", i ); 2e-6 } }; // Create PLL and decode revolution. let mut pll = Pll::from_preset(PllPreset::Aggressive); // Create histogram for start of revolution (first 2% of track) let mut hist = FluxHistogram::new(&revolution.flux_deltas, 0.02); let base_transition_time_opt = hist.base_transition_time(); if base_transition_time_opt.is_none() { log::warn!( "decode_revolutions(): Revolution {}: Unable to detect track start transition time.", i ); } if let Some(base_transition_time) = base_transition_time_opt { let hist_period = base_transition_time / 2.0; let difference_ratio = (hist_period - base_clock) / base_clock; if difference_ratio.abs() < 0.25 { log::debug!( "decode_revolutions(): Revolution {}: Histogram refined clock to {}", i, format_us!(hist_period), ); base_clock = hist_period; } else { log::warn!( "decode_revolutions(): Revolution {}: Start of track histogram clock {} is too far from base {}, not adjusting clock.", i, format_us!(hist_period), format_us!(base_clock) ); } } pll.set_clock(1.0 / base_clock, None); log::debug!( "decode_revolutions(): Decoding revolution {}: Bitrate: {:.2}, Base period {}, {:.2}rpm", i, 1.0 / base_clock, format_us!(base_clock), f_rpm ); let flux_stats = revolution.decode_direct(&mut pll); let (bitstream_data, bitcell_ct) = revolution.bitstream_data(); let params = BitStreamTrackParams { schema: self.schema, encoding: revolution.encoding, data_rate: TrackDataRate::from(revolution.data_rate.unwrap() as u32), // Data rate should be Some after decoding rpm: Some(base_rpm), ch: revolution.ch, bitcell_ct: Some(bitcell_ct), data: &bitstream_data, weak: None, hole: None, detect_weak: false, }; let bitstream_track = BitStreamTrack::new( &params, self.shared .clone() .expect("Attempted to decode track before adding it."), )?; self.decoded_revolutions[i] = Some(bitstream_track); log::debug!("decode_revolutions(): Decoded revolution {}: {}", i, flux_stats); } Ok(()) } pub fn synthesize_revolutions(&mut self) { let synthetic_revs: Vec<FluxRevolution> = self .revolutions .windows(2) // Create pairs of successive elements .flat_map(|pair| FluxRevolution::from_adjacent_pair(&pair[0], &pair[1])) // Call make_foo on each pair .collect(); self.revolutions.extend(synthetic_revs); } pub fn analyze_revolutions(&mut self) { let mut best_revolution = 0; let mut best_score = 0; if self.revolutions.is_empty() { log::warn!("FluxStreamTrack::analyze_revolutions(): No revolutions to analyze."); return; } for (i, bitstream) in self.decoded_revolutions.iter().enumerate() { if let Some(track) = bitstream { let score = track.calc_quality_score(); let bad_sectors = track.sector_list().iter().filter(|s| s.attributes.data_error).count(); log::debug!( "FluxStreamTrack::analyze_revolutions(): Revolution {}, ft_ct: {} bitcells: {} bad sectors: {} score: {}", i, self.revolutions[i].ft_ct(), track.info().bit_length, bad_sectors, score ); // Higher bitstream quality score = better revolution. if score > best_score { best_score = score; best_revolution = i; } } } log::debug!( "FluxStreamTrack::analyze_revolutions(): Best revolution is {}/{} with score {}", best_revolution, self.revolutions.len(), best_score ); self.best_revolution = best_revolution; let rev_ref = self .revolutions .get_mut(best_revolution) .expect("Best revolution not found."); self.encoding = rev_ref.encoding; } /// Retrieve the flux deltas for the best revolution. pub fn flux_deltas(&self) -> &[f64] { self.revolutions[self.best_revolution].flux_deltas.as_slice() } pub fn flux_deltas_us(&self) -> Vec<f32> { self.revolutions[self.best_revolution] .flux_deltas .iter() .map(|&f| (f * 1_000_000.0) as f32) .collect::<Vec<f32>>() } pub fn flux_deltas_revolution(&self, rev: usize) -> Option<&[f64]> { if rev < self.revolutions.len() { return Some(self.revolutions[rev].flux_deltas.as_slice()); } None } pub fn pll_markers(&self) -> &[PllMarkerEntry] { &self.revolutions[self.best_revolution].markers } pub fn pll_markers_revolution(&self, rev: usize) -> Option<&[PllMarkerEntry]> { if rev < self.revolutions.len() { Some(self.revolutions[rev].markers.as_slice()) } else { None } } fn get_bitstream(&self) -> Option<&BitStreamTrack> { if let Some(resolved) = &self.resolved { return Some(resolved); } else if self.best_revolution < self.revolutions.len() { if let Some(track) = &self.decoded_revolutions[self.best_revolution] { return Some(track); } } log::warn!( "get_bitstream(): No track resolved for {} Best: {} Revolutions: {}", self.ch, self.best_revolution, self.revolutions.len() ); None } fn get_bitstream_mut(&mut self) -> Option<&mut BitStreamTrack> { if let Some(resolved) = &mut self.resolved { return Some(resolved); } else if self.best_revolution < self.revolutions.len() { if let Some(track) = &mut self.decoded_revolutions[self.best_revolution] { return Some(track); } } log::warn!( "get_bitstream_mut(): No track resolved for {} Best: {} Revolutions: {}", self.ch, self.best_revolution, self.revolutions.len() ); None } pub(crate) fn set_shared(&mut self, shared: Arc<Mutex<SharedDiskContext>>) { self.shared = Some(shared); } pub fn raw_flux_iter(&self) -> RawFluxIterator { RawFluxIterator::new(&self.revolutions) } } #[cfg(test)] mod tests { use super::*; use bit_vec::BitVec; #[test] fn test_raw_flux_iterator() { let flux_revolutions = vec![ FluxRevolution { rev_type: FluxRevolutionType::Source, ch: DiskCh::new(0, 0), data_rate: None, index_time: 0.200, flux_deltas: vec![0.002, 0.004, 0.006], transitions: vec![], bitstream: BitVec::new(), biterrors: BitVec::new(), encoding: TrackDataEncoding::Mfm, markers: vec![], pll_stats: vec![], }, FluxRevolution { rev_type: FluxRevolutionType::Source, ch: DiskCh::new(0, 0), data_rate: None, index_time: 0.200, flux_deltas: vec![0.004, 0.006, 0.002], transitions: vec![], bitstream: BitVec::new(), biterrors: BitVec::new(), encoding: TrackDataEncoding::Mfm, markers: vec![], pll_stats: vec![], }, ]; let iter = RawFluxIterator::new(&flux_revolutions); let collected: Vec<f64> = iter.collect(); assert_eq!(collected, vec![0.002, 0.004, 0.006, 0.004, 0.006, 0.002]); } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track/mod.rs
src/track/mod.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- src/track/mod.rs Defines the Track trait */ pub mod bitstream; pub mod fluxstream; pub mod metasector; //mod sector_iterator; use crate::{ bitstream_codec::TrackDataStream, source_map::SourceMap, track::{ bitstream::BitStreamTrack, fluxstream::{FluxStreamTrack, FluxTrackInfo}, metasector::MetaSectorTrack, }, track_schema::{system34::System34Standard, TrackMetadata, TrackSchema}, types::{ chs::DiskChsnQuery, AddSectorParams, DiskCh, DiskChs, DiskChsn, DiskRpm, ReadSectorResult, ReadTrackResult, RwScope, ScanSectorResult, TrackDataEncoding, TrackDataRate, TrackDataResolution, TrackDensity, WriteSectorResult, }, DiskImageError, SectorIdQuery, SectorMapEntry, }; use dyn_clone::{clone_trait_object, DynClone}; use sha1_smol::Digest; use std::any::Any; /// A struct containing information about a track's encoding, data rate, density, RPM, bit length, /// and sector count. #[derive(Debug, Default)] pub struct TrackInfo { /// The resolution of the track as a `TrackDataResolution` enum. pub resolution: TrackDataResolution, /// The type of encoding used on the track as a `DiskDataEncoding` enum. pub encoding: TrackDataEncoding, /// The track data schema pub schema: Option<TrackSchema>, /// The data rate of the track as a `DiskDataRate` enum. pub data_rate: TrackDataRate, /// The density of the track as a `DiskDensity` enum, or `None` if density has not been determined. pub density: Option<TrackDensity>, /// The RPM of the track as an `DiskRpm`, or `None` if RPM has not been determined. pub rpm: Option<DiskRpm>, /// The bit length of the track. pub bit_length: usize, /// The number of sectors on the track. pub sector_ct: usize, /// a FluxTrackInfo struct, if the track is a FluxStreamTrack pub flux_info: Option<FluxTrackInfo>, } /// A struct representing the result of a sector scan operation on a track. #[derive(Debug)] pub(crate) enum TrackSectorScanResult { /// A variant indicating the specified sector ID was found on the track. Found { /// The index of the [TrackElementInstance] that was found. ei: usize, /// The matching sector ID found on the track. sector_chsn: DiskChsn, /// Whether the specified sector failed a header data integrity check. address_error: bool, /// Whether the specified sector failed a data integrity check. data_error: bool, /// Whether the specific sector has a "deleted data" address mark. deleted_mark: bool, /// A boolean flag indicating whether the sector ID was matched, but no sector data was found. no_dam: bool, }, /// A variant indicating the specified sector ID was not found on the track. NotFound { /// A sector ID with a different cylinder ID as the requested sector was found while scanning /// the track. wrong_cylinder: bool, /// A sector ID with a different head ID as the requested sector was found while scanning /// the track. bad_cylinder: bool, /// A sector ID with a different head ID as the requested sector was found while scanning /// the track. wrong_head: bool, }, #[allow(dead_code)] // use this someday (wrong track encoding?) Incompatible, } impl From<TrackSectorScanResult> for ScanSectorResult { fn from(other: TrackSectorScanResult) -> Self { match other { TrackSectorScanResult::Found { address_error, data_error, deleted_mark, no_dam, .. } => ScanSectorResult { not_found: false, no_dam, deleted_mark, address_error, data_error, ..Default::default() }, TrackSectorScanResult::NotFound { wrong_cylinder, bad_cylinder, wrong_head, } => ScanSectorResult { wrong_cylinder, bad_cylinder, wrong_head, ..Default::default() }, TrackSectorScanResult::Incompatible => Default::default(), } } } /// A structure containing information about a track's consistency vs a standard track. #[derive(Debug, Default)] pub struct TrackAnalysis { /// A boolean flag indicating whether the track contains sectors with bad data CRCs. pub data_error: bool, /// A boolean flag indicating whether the track contains sectors with bad address CRCs. pub address_error: bool, /// A boolean flag indicating whether the track contains sectors with deleted data. pub deleted_data: bool, /// A boolean flag indicating whether the track contains sectors with no DAM. pub no_dam: bool, /// An optional value indicating the consistent sector size of the track, or None if the track /// contains sectors of varying sizes. pub consistent_sector_size: Option<u8>, /// A boolean flag indicating whether the track contains nonconsecutive sectors. pub nonconsecutive_sectors: bool, /// A boolean flag indicating whether the track contains overlapping sectors. pub overlapping_sectors: bool, /// A boolean flag indicating whether the track contains sectors that cross the index. pub sector_crossing_index: bool, /// The number of sectors on the track. pub sector_ct: usize, } impl TrackAnalysis { /// Merge a [TrackAnalysis] struct with another, by OR'ing together their boolean values. pub fn join(&mut self, other: &TrackAnalysis) { self.data_error |= other.data_error; self.address_error |= other.address_error; self.deleted_data |= other.deleted_data; self.no_dam |= other.no_dam; self.nonconsecutive_sectors |= other.nonconsecutive_sectors; self.overlapping_sectors |= other.overlapping_sectors; self.sector_crossing_index |= other.sector_crossing_index; if other.consistent_sector_size.is_none() { self.consistent_sector_size = None; } } } #[cfg_attr(feature = "serde", typetag::serde)] pub trait Track: DynClone + Any + Send + Sync { /// Return the resolution of the track as a `DiskDataResolution`. /// This can be used to determine the track's underlying representation, especially if you wish /// to downcast the track to a specific type. fn resolution(&self) -> TrackDataResolution; /// Return a reference to the track as a `&dyn Any`, for downcasting. fn as_any(&self) -> &dyn Any; /// Return a mutable reference to the track as a `&mut dyn Any`, for downcasting. fn as_any_mut(&mut self) -> &mut dyn Any; /// Downcast the track to a `MetaSectorTrack` reference, if possible. fn as_metasector_track(&self) -> Option<&MetaSectorTrack> { None } /// Downcast the track to a mutable `MetaSectorTrack` reference, if possible. fn as_metasector_track_mut(&mut self) -> Option<&mut MetaSectorTrack> { None } /// Downcast the track to a `BitStreamTrack` reference, if possible. fn as_bitstream_track(&self) -> Option<&BitStreamTrack> { None } /// Downcast the track to a `BitStreamTrack` reference, if possible. fn as_bitstream_track_mut(&mut self) -> Option<&mut BitStreamTrack> { None } /// Downcast the track to a `FluxStreamTrack` reference, if possible. fn as_fluxstream_track(&self) -> Option<&FluxStreamTrack> { None } /// Downcast the track to a mutable `FluxStreamTrack` reference, if possible. fn as_fluxstream_track_mut(&mut self) -> Option<&mut FluxStreamTrack> { None } /// Return the track's physical cylinder and head as a `DiskCh`. fn ch(&self) -> DiskCh; /// Set the track's physical cylinder and head. fn set_ch(&mut self, ch: DiskCh); /// Return the encoding of the track as `DiskDataEncoding`. fn encoding(&self) -> TrackDataEncoding; /// Return information about the track as a `TrackInfo` struct. fn info(&self) -> TrackInfo; /// Return the track's metadata as a reference to [TrackMetadata], or None if the track has not /// been scanned for metadata or no metadata was found. fn metadata(&self) -> Option<&TrackMetadata>; /// Return a count of the sectors on the track. fn sector_ct(&self) -> usize; /// Returns `true` if the track contains a sector with the specified ID. /// /// # Arguments /// - `id`: The sector ID to search for. /// - `id_chsn`: An optional `DiskChsn` value. If provided, the `id` parameter is ignored and /// the entire `DiskChsn` value is used to search for the sector. fn has_sector_id(&self, id: u8, id_chsn: Option<DiskChsn>) -> bool; // Return a SectorIterator for the current track. // Warning: Reformatting the track will invalidate the iterator. //fn sector_iter(&self) -> SectorIterator<'a, T>; /// TODO: Rename SectorMapEntry - it's not a map, it's a list. /// Returns a vector of `SectorMapEntry` structs representing the sectors on the track. fn sector_list(&self) -> Vec<SectorMapEntry>; /// Adds a new sector to a track in the disk image, essentially 'formatting' a new sector, /// This function is only valid for tracks with `MetaSector` resolution. /// /// # Arguments /// - `sd`: A reference to a `SectorDescriptor` containing the sector data and metadata. /// - `alternate`: A boolean flag indicating whether the sector is an alternate sector. /// Alternate sectors will calculate weak bit masks for the existing sector. /// If the existing sector does not exist, the alternate flag is ignored. /// /// # Returns /// - `Ok(())` if the sector was successfully mastered. /// - `Err(DiskImageError::SeekError)` if the head value in `chs` is greater than 1 or the track map does not contain the specified cylinder. /// - `Err(DiskImageError::UnsupportedFormat)` if the track data is not of `MetaSector` resolution. fn add_sector(&mut self, sd: &AddSectorParams) -> Result<(), DiskImageError>; /// Attempts to read the sector data from the sector identified by `id`. /// /// # Arguments /// - `id`: The sector ID to read as a `SectorIdQuery`. /// - `n`: An optional override value for the sector's size parameter. If provided, the sector /// will be read as a sector of this size. /// - `offset`: An optional bit offset to start reading the sector data from. If a track /// contains multiple sectors with the same ID, the offset can be used to specify /// which sector to read. /// - `scope`: The scope of the read operation as a `RwSectorScope` enum. This can be used to /// specify whether to include the sector's address mark and CRC in the read data. /// - `debug`: A boolean flag controlling debug mode. When set to `true`, the read operation /// return data even if the sector has an invalid address CRC or would otherwise /// normally not be read. /// /// # Returns /// A Result containing either /// - [ReadSectorResult] struct which provides various result flags and the resulting data if /// the sector was successfully read. /// - [DiskImageError] if an error occurred while reading the sector. fn read_sector( &self, id: SectorIdQuery, n: Option<u8>, offset: Option<usize>, scope: RwScope, debug: bool, ) -> Result<ReadSectorResult, DiskImageError>; fn scan_sector(&self, id: SectorIdQuery, offset: Option<usize>) -> Result<ScanSectorResult, DiskImageError>; fn write_sector( &mut self, id: DiskChsnQuery, offset: Option<usize>, write_data: &[u8], scope: RwScope, write_deleted: bool, debug: bool, ) -> Result<WriteSectorResult, DiskImageError>; /// Recalculate the sector CRC for the first sector matching the query from the specified bit /// offset. fn recalculate_sector_crc(&mut self, id: DiskChsnQuery, offset: Option<usize>) -> Result<(), DiskImageError>; /// Return a hash that uniquely identifies the track data. Intended for use in identifying /// duplicate tracks. fn hash(&mut self) -> Digest; /// Read all sectors from the track. The data is returned within a `ReadSectorResult` struct /// which also sets some convenience metadata flags which are needed when handling `MetaSector` /// resolution images. /// Unlike `read_sector`, the data returned is only the actual sector data. The address marks /// and CRCs are not included in the data. /// This function is intended for use in implementing the µPD765 FDC's "Read Track" command. fn read_all_sectors(&mut self, ch: DiskCh, n: u8, track_len: u8) -> Result<ReadTrackResult, DiskImageError>; fn next_id(&self, chs: DiskChs) -> Option<DiskChsn>; /// Read the entire track, decoding the data within. /// Not valid for MetaSector resolution tracks, which will return `DiskImageError::UnsupportedFormat`. /// /// # Parameters /// - `ch`: The cylinder and head of the track to read. /// - `overdump`: An optional parameter to specify the number of bytes to read past the end of /// the track. This is useful for examining track wrapping behavior. /// # Returns /// - `Ok(ReadTrackResult)` if the track was successfully read. /// - `Err(DiskImageError)` if an error occurred while reading the track. fn read(&self, offset: Option<isize>, overdump: Option<usize>) -> Result<ReadTrackResult, DiskImageError>; /// Read the entire track without decoding. /// Not valid for MetaSector resolution tracks, which will return `DiskImageError::UnsupportedFormat`. /// /// # Parameters /// - `ch`: The cylinder and head of the track to read. /// - `overdump`: An optional parameter to specify the number of bytes to read past the end of /// the track. This is useful for examining track wrapping behavior. /// # Returns /// - `Ok(ReadTrackResult)` if the track was successfully read. /// - `Err(DiskImageError)` if an error occurred while reading the track. fn read_raw(&self, overdump: Option<usize>) -> Result<ReadTrackResult, DiskImageError>; /// Return a boolean value indicating whether the track has bits set in its weak bit mask. fn has_weak_bits(&self) -> bool; /// Format the track with the specified parameters. /// # Arguments /// - `standard`: The disk structure standard to use when formatting the track. /// - `format_buffer`: A vector of `DiskChsn` values representing the sectors to format. /// - `fill_pattern`: A slice of bytes to use as the fill pattern when formatting the track. /// - `gap3`: The GAP3 length in bytes to use when formatting the track. fn format( &mut self, standard: System34Standard, format_buffer: Vec<DiskChsn>, fill_pattern: &[u8], gap3: usize, ) -> Result<(), DiskImageError>; /// Retrieve information about a track's consistency vs a standard track. /// Returns a `TrackAnalysis` struct containing information about the track's formatting, /// such as bad CRCs, deleted data, and overlapping sectors. /// # Returns /// - `Ok(TrackAnalysis)` if the track was successfully analyzed /// - `Err(DiskImageError)` if an error occurred while checking the analyzing the track fn analysis(&self) -> Result<TrackAnalysis, DiskImageError>; /// Return a reference to the underlying `TrackDataStream`. fn stream(&self) -> Option<&TrackDataStream>; /// Return a mutable reference to the underlying `TrackDataStream`. fn stream_mut(&mut self) -> Option<&mut TrackDataStream>; /// Return a SourceMap containing info about the track's elements for display in a UI or /// debug output. fn element_map(&self) -> Option<&SourceMap> { None } } clone_trait_object!(Track); pub type DiskTrack = Box<dyn Track>;
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/track/sector_iterator.rs
src/track/sector_iterator.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ use crate::{track::Track, DiskChsn}; use std::marker::PhantomData; pub struct SectorSpecifier { id_chsn: DiskChsn, offset: Option<usize>, } pub struct SectorIterator<'a, T: Track> { track: &'a T, cursor: SectorSpecifier, _marker: PhantomData<&'a T>, } impl<'a, T: Track> Iterator for SectorIterator<'a, T> { type Item = SectorSpecifier; fn next(&mut self) -> Option<Self::Item> { // Logic to find the next sector in the track if let Some(current_id) = self.cursor { if let Some(sector) = self.track.get_sector(current_id) { // Update the iterator state self.cursor = self.track.next_sector_id(current_id); //self.cursor.offset = self.track.get_bit_offset(self.current_sector); return Some(sector); } } // No more sectors None } }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/prelude.rs
src/visualization/prelude.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! Prelude for visualization module. This module re-exports all necessary //! types and functions for visualization. pub use super::{ types::{blend::VizBlendMode, color::VizColor, shapes::*}, vectorize_disk::*, TurningDirection, *, }; pub use crate::visualization::types::display_list::*;
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/rasterize_disk.rs
src/visualization/rasterize_disk.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! Module for rendering disk images to a pixmap, requiring tiny_skia use crate::{ track_schema::{GenericTrackElement, TrackElement}, visualization::{ collect_error_maps, collect_metadata, collect_streams, collect_weak_masks, metadata, stream, types::{color::VizColor, shapes::VizPoint2d}, CommonVizParams, RenderDiskSelectionParams, RenderMaskType, RenderRasterizationParams, RenderTrackDataParams, RenderTrackMetadataParams, ResolutionType, TurningDirection, POPCOUNT_TABLE, }, DiskImage, DiskImageError, DiskVisualizationError, MAX_CYLINDER, }; use std::{ cmp::min, f32::consts::{PI, TAU}, }; use tiny_skia::{ BlendMode, Color, FillRule, GradientStop, LineCap, LineJoin, LinearGradient, Paint, PathBuilder, Pixmap, Point, PremultipliedColorU8, SpreadMode, Stroke, Transform, }; /// Rasterize a representation of a disk's data to a Pixmap. This function samples the disk image /// across U,V coordinates at the requested rendering resolution, and can be quite slow for large /// resolutions (the number of pixels grows quadratically with the image size). /// /// This technique can also result in aliasing artifacts, especially moiré. This is worst at 45 /// degree increments for reasons that are not entirely clear to me but probably due to the /// trigonometric functions used. /// /// Supersampling can help, but additional rendering cost. /// /// Still, it is possible to render things that aren't practical with a vector-based approach, /// such as rendering individual bits. pub fn rasterize_track_data( disk_image: &DiskImage, pixmap: &mut Pixmap, p: &CommonVizParams, r: &RenderTrackDataParams, rr: &RenderRasterizationParams, ) -> Result<(), DiskImageError> { // Render at the full supersampling resolution. The caller is responsible for down-sampling. let (width, height) = rr.render_size().to_tuple(); let span = pixmap.width(); // Get the offset from the RenderRasterizationParams, which defines them in pixels. let (x_offset, y_offset) = rr.pos_offset.unwrap_or(VizPoint2d::default()).to_tuple(); let center_x = width as f32 / 2.0; let center_y = height as f32 / 2.0; let total_radius = width.min(height) as f32 / 2.0; let mut min_radius = p.min_radius_ratio * total_radius; // Scale min_radius to pixel value let r_tracks = collect_streams(r.side, disk_image); let r_metadata = collect_metadata(r.side, disk_image); let track_limit = p.track_limit.unwrap_or(MAX_CYLINDER); let num_tracks = min(r_tracks.len(), track_limit); if num_tracks == 0 { return Err(DiskImageError::IncompatibleImage("No tracks to visualize!".to_string())); } log::trace!("collected {} track references.", num_tracks); for (ti, track) in r_tracks.iter().enumerate() { log::trace!("track {} length: {}", ti, track.len()); } // If pinning has been specified, adjust the minimum radius. // We subtract any over-dumped tracks from the radius, so that the minimum radius fraction // is consistent with the last standard track. min_radius = if p.pin_last_standard_track { let normalized_track_ct = match num_tracks { 0..50 => 40, 50.. => 80, }; let track_width = (total_radius - min_radius) / normalized_track_ct as f32; log::debug!( "render_track_data(): track ct: {} normalized track ct: {}", num_tracks, normalized_track_ct ); let overdump = num_tracks.saturating_sub(normalized_track_ct); p.min_radius_ratio * total_radius - (overdump as f32 * track_width) } else { min_radius }; let track_width = (total_radius - min_radius) / num_tracks as f32; let pix_buf = pixmap.pixels_mut(); let color_black = PremultipliedColorU8::from_rgba(0, 0, 0, 255).unwrap(); let color_white = PremultipliedColorU8::from_rgba(255, 255, 255, 255).unwrap(); let skia_color = rr.image_bg_color.map(|color| Color::from(color)); let color_bg: PremultipliedColorU8 = match skia_color { Some(color) => PremultipliedColorU8::from_rgba( (color.red() * 255.0) as u8, (color.green() * 255.0) as u8, (color.blue() * 255.0) as u8, (color.alpha() * 255.0) as u8, ) .unwrap(), None => PremultipliedColorU8::from_rgba(0, 0, 0, 0).unwrap(), }; // Draw the tracks for y in 0..height { for x in 0..width { let dx = x as f32 - center_x; let dy = y as f32 - center_y; let distance = (dx * dx + dy * dy).sqrt(); let _distance_sq = dx * dx + dy * dy; let angle = (dy.atan2(dx) + PI) % TAU; //let angle = dy.atan2(dx) % TAU; if distance >= min_radius && distance <= total_radius { let track_offset = (distance - min_radius) / track_width; if track_offset.fract() < p.track_gap { continue; } let track_index = (num_tracks - 1).saturating_sub(track_offset.floor() as usize); if track_index < num_tracks { if r_tracks[track_index].is_empty() { continue; } // Adjust angle for clockwise or counter-clockwise let mut normalized_angle = match p.direction { TurningDirection::Clockwise => angle - p.index_angle, TurningDirection::CounterClockwise => TAU - (angle - p.index_angle), }; // Normalize the angle to the range 0..2π //normalized_angle = normalized_angle % TAU; normalized_angle = (normalized_angle + PI) % TAU; let bit_index = ((normalized_angle / TAU) * r_tracks[track_index].len() as f32) as usize; // Ensure bit_index is within bounds //let bit_index = min(bit_index, r_tracks[track_index].len() - 9); let bit_index = bit_index % r_tracks[track_index].len(); let color = match r.resolution { ResolutionType::Bit => { if r_tracks[track_index][bit_index] { color_white } else { color_black } } ResolutionType::Byte => { // Calculate the byte value // Don't decode empty tracks - there's no data to decode! let decoded_bit_idx = (bit_index) & !0xF; let decode_override = r.decode && !r_metadata[track_index].items.is_empty() && r_tracks[track_index].is_data(decoded_bit_idx, false); let byte_value = match decode_override { false => r_tracks[track_index].read_raw_u8(bit_index).unwrap_or_default(), true => { // Only render bits in 16-bit steps. r_tracks[track_index] .read_decoded_u8(decoded_bit_idx) .unwrap_or_default() } }; let gray_value = POPCOUNT_TABLE[byte_value as usize]; PremultipliedColorU8::from_rgba(gray_value, gray_value, gray_value, 255).unwrap() } }; pix_buf[((y + y_offset) * span + (x + x_offset)) as usize] = color; } } else { pix_buf[((y + y_offset) * span + (x + x_offset)) as usize] = color_bg; } } } Ok(()) } /// Render a representation of a track map to a `tiny_skia::Pixmap`. /// The destination Pixmap is usually the result of a call to `render_track_data`. /// The mask can be either a weak bit map or an error map pub fn render_track_mask( disk_image: &DiskImage, pixmap: &mut Pixmap, map: RenderMaskType, p: &CommonVizParams, r: &RenderTrackDataParams, rr: &RenderRasterizationParams, ) -> Result<(), DiskImageError> { let (width, height) = rr.image_size.to_tuple(); let span = pixmap.width(); // Get the offset from the RenderRasterizationParams, which defines them in pixels. let (x_offset, y_offset) = rr.pos_offset.unwrap_or(VizPoint2d::default()).to_tuple(); let center_x = width as f32 / 2.0; let center_y = height as f32 / 2.0; let total_radius = width.min(height) as f32 / 2.0; let mut min_radius = p.min_radius_ratio * total_radius; // Scale min_radius to pixel value let track_refs = match map { RenderMaskType::WeakBits => collect_weak_masks(r.side, disk_image), RenderMaskType::Errors => collect_error_maps(r.side, disk_image), }; let track_limit = p.track_limit.unwrap_or(MAX_CYLINDER); let num_tracks = min(track_refs.len(), track_limit); if num_tracks == 0 { return Err(DiskImageError::IncompatibleImage("No tracks to visualize!".to_string())); } // log::trace!("collected {} maps of type {:?}", num_tracks, map); // for (ti, track) in track_refs.iter().enumerate() { // log::debug!("map {} has {} bits", ti, track.count_ones()); // log::trace!("track {} length: {}", ti, track.len()); // } // If pinning has been specified, adjust the minimum radius. // We subtract any over-dumped tracks from the radius, so that the minimum radius fraction // is consistent with the last standard track. min_radius = if p.pin_last_standard_track { let normalized_track_ct = match num_tracks { 0..50 => 40, 50.. => 80, }; let track_width = (total_radius - min_radius) / normalized_track_ct as f32; let overdump = num_tracks - normalized_track_ct; p.min_radius_ratio * total_radius - (overdump as f32 * track_width) } else { min_radius }; let track_width = (total_radius - min_radius) / num_tracks as f32; let _track_width_sq = track_width * track_width; let _render_track_width = track_width * (1.0 - p.track_gap); let pix_buf = pixmap.pixels_mut(); let skia_color = rr.mask_color.map(|color| Color::from(color)); let mask_color: PremultipliedColorU8 = match skia_color { Some(color) => PremultipliedColorU8::from_rgba( (color.red() * 255.0) as u8, (color.green() * 255.0) as u8, (color.blue() * 255.0) as u8, (color.alpha() * 255.0) as u8, ) .unwrap(), None => PremultipliedColorU8::from_rgba(0, 0, 0, 0).unwrap(), }; let color_trans: PremultipliedColorU8 = PremultipliedColorU8::from_rgba(0, 0, 0, 0).unwrap(); // Draw the tracks for y in 0..height { for x in 0..width { let dx = x as f32 - center_x; let dy = y as f32 - center_y; let distance = (dx * dx + dy * dy).sqrt(); let _distance_sq = dx * dx + dy * dy; let angle = (dy.atan2(dx) + PI) % TAU; if distance >= min_radius && distance <= total_radius { let track_offset = (distance - min_radius) / track_width; if track_offset.fract() < p.track_gap { continue; } let track_index = (num_tracks - 1).saturating_sub(track_offset.floor() as usize); if track_index < num_tracks { // Adjust angle for clockwise or counter-clockwise let normalized_angle = match p.direction { TurningDirection::Clockwise => angle - p.index_angle, TurningDirection::CounterClockwise => TAU - (angle - p.index_angle), }; let normalized_angle = (normalized_angle + PI) % TAU; let bit_index = ((normalized_angle / TAU) * track_refs[track_index].len() as f32) as usize; // Ensure bit_index is within bounds let bit_index = min(bit_index.saturating_sub(8), track_refs[track_index].len() - 17); let word_index = bit_index / 16; let word_value = if word_index < track_refs[track_index].len() / 16 - 1 { let mut build_word: u16 = 0; for bi in 0..16 { build_word |= if track_refs[track_index][bit_index + bi] { 1 } else { 0 }; build_word <<= 1; } build_word } else { 0 }; if word_value != 0 { pix_buf[((y + y_offset) * span + (x + x_offset)) as usize] = mask_color; } } } else { pix_buf[((y + y_offset) * span + (x + x_offset)) as usize] = color_trans; } } } Ok(()) } /// Render a representation of a disk's data to a `tiny_skia::Pixmap`, for a specific quadrant of /// the unit circle. /// Rendering is broken into quadrants to allow for multithreaded rendering of each quadrant, and /// to avoid rendering arcs longer than 90 degrees. pub fn rasterize_track_metadata_quadrant( disk_image: &DiskImage, pixmap: &mut Pixmap, p: &CommonVizParams, r: &RenderTrackMetadataParams, rr: &RenderRasterizationParams, ) -> Result<(), DiskVisualizationError> { let r_tracks = collect_streams(r.side, disk_image); let r_metadata = collect_metadata(r.side, disk_image); if r_tracks.len() != r_metadata.len() { return Err(DiskVisualizationError::InvalidImage); } let quadrant = r.quadrant.unwrap_or(0); let overlap_max = (1024 + 6) * 16; let t_params = p.track_params(r_tracks.len())?; let mut path_builder = PathBuilder::new(); let center = Point::from(t_params.quadrant_center(quadrant)); let draw_metadata_slice = |path_builder: &mut PathBuilder, paint: &mut Paint, start_angle: f32, end_angle: f32, inner_radius: f32, outer_radius: f32, sector_lookup: bool, phys_c: u16, phys_s: u8, element_type: Option<TrackElement>| -> Color { // Draw the outer curve add_arc(path_builder, center, inner_radius, start_angle, end_angle); // Draw line segment to end angle of inner curve path_builder.line_to( center.x + outer_radius * end_angle.cos(), center.y + outer_radius * end_angle.sin(), ); // Draw inner curve back to start angle add_arc(path_builder, center, outer_radius, end_angle, start_angle); // Draw line segment back to start angle of outer curve path_builder.line_to( center.x + inner_radius * start_angle.cos(), center.y + inner_radius * start_angle.sin(), ); path_builder.close(); // Use a predefined color for each sector let color; if let Some(element_type) = element_type { match sector_lookup { true => { // If we're drawing a sector lookup bitmap, we encode the physical head, // cylinder, and sector index as r, g, b components. // This is so that we can retrieve a mapping of physical sector from bitmap // x,y coordinates. // Alpha must remain 255 for our values to survive pre-multiplication. color = VizColor::from_rgba8(r.side, phys_c as u8, phys_s, 255); } false => { let generic_elem = GenericTrackElement::from(element_type); color = rr .palette .as_ref() .and_then(move |palette| palette.get(&generic_elem).copied()) .unwrap_or(VizColor::TRANSPARENT); } } } else { color = VizColor::BLACK; } let skia_color = Color::from(color); paint.set_color(skia_color); skia_color }; let (clip_start, clip_end) = t_params.quadrant_clip(r.quadrant.unwrap_or(0)); for draw_markers in [false, true].iter() { for (ti, track_meta) in r_metadata.iter().enumerate() { let mut has_elements = false; let outer_radius = t_params.total_radius - (ti as f32 * t_params.render_track_width); let inner_radius = outer_radius - (t_params.render_track_width * (1.0 - p.track_gap)); let mut paint = Paint { blend_mode: BlendMode::SourceOver, anti_alias: !r.draw_sector_lookup, ..Default::default() }; // Look for metadata items crossing the index, and draw them first. // We limit the maximum index overlap as an 8192 byte sector at the end of a track will // wrap the index twice. if !r.draw_sector_lookup && !*draw_markers { for meta_item in track_meta.items.iter() { if meta_item.end >= r_tracks[ti].len() { let meta_length = meta_item.end - meta_item.start; let meta_overlap = meta_item.end % r_tracks[ti].len(); let overlap_long = meta_length > overlap_max; log::trace!( "render_track_metadata_quadrant(): Overlapping metadata item at {}-{} len: {} max: {} long: {}", meta_item.start, meta_item.end, meta_length, overlap_max, overlap_long, ); has_elements = true; let mut start_angle; let mut end_angle; if overlap_long { start_angle = p.index_angle; end_angle = p.index_angle + ((((meta_item.start + overlap_max) % r_tracks[ti].len()) as f32 / r_tracks[ti].len() as f32) * TAU); } else { start_angle = p.index_angle; end_angle = p.index_angle + ((meta_overlap as f32 / r_tracks[ti].len() as f32) * TAU); } if start_angle > end_angle { std::mem::swap(&mut start_angle, &mut end_angle); } (start_angle, end_angle) = match p.direction { TurningDirection::Clockwise => (start_angle, end_angle), TurningDirection::CounterClockwise => (TAU - start_angle, TAU - end_angle), }; if start_angle > end_angle { std::mem::swap(&mut start_angle, &mut end_angle); } // Skip sectors that are outside the current quadrant if end_angle <= clip_start || start_angle >= clip_end { continue; } // Clamp start and end angle to quadrant boundaries if start_angle < clip_start { start_angle = clip_start; } if end_angle > clip_end { end_angle = clip_end; } let start_color = draw_metadata_slice( &mut path_builder, &mut paint, start_angle, end_angle, inner_radius, outer_radius, false, 0, 0, Some(meta_item.element), ); //let overlap_long = false; if overlap_long { // Long elements are gradually faded out across the index to imply they continue. let end_color = Color::from_rgba(start_color.red(), start_color.green(), start_color.blue(), 0.0) .unwrap(); let (start_pt, end_pt) = match p.direction { TurningDirection::CounterClockwise => ( Point::from_xy(center.x, 0.0), Point::from_xy(center.x, t_params.total_radius / 8.0), ), TurningDirection::Clockwise => ( Point::from_xy(center.x, center.y), Point::from_xy(center.x, center.y - t_params.total_radius / 8.0), ), }; // Set up a vertical gradient (top to bottom) let gradient = LinearGradient::new( start_pt, //Point::from_xy(center.x, 0.0), end_pt, //Point::from_xy(center.x, total_radius / 8.0), vec![GradientStop::new(0.0, start_color), GradientStop::new(1.0, end_color)], SpreadMode::Pad, Transform::identity(), ) .unwrap(); paint.shader = gradient; } if let Some(path) = path_builder.finish() { pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); } path_builder = PathBuilder::new(); // Reset the path builder for the next sector } } } let mut phys_s: u8 = 0; // Physical sector index, 0-indexed from first sector on track // Draw non-overlapping metadata. for (_mi, meta_item) in track_meta.items.iter().enumerate() { let generic_elem = GenericTrackElement::from(meta_item.element); if let GenericTrackElement::Marker = generic_elem { if !*draw_markers { continue; } } else if *draw_markers { continue; } // Advance physical sector number for each sector header encountered. if meta_item.element.is_sector_header() { phys_s = phys_s.wrapping_add(1); } has_elements = true; let mut start_angle = ((meta_item.start as f32 / r_tracks[ti].len() as f32) * TAU) + p.index_angle; let mut end_angle = ((meta_item.end as f32 / r_tracks[ti].len() as f32) * TAU) + p.index_angle; if start_angle > end_angle { std::mem::swap(&mut start_angle, &mut end_angle); } (start_angle, end_angle) = p.direction.adjust_angles((start_angle, end_angle)); // Normalize the angle to the range 0..2π // start_angle = (start_angle % TAU).abs(); // end_angle = (end_angle % TAU).abs(); // Exchange start and end if reversed if start_angle > end_angle { std::mem::swap(&mut start_angle, &mut end_angle); } // Skip sectors that are outside the current quadrant let (hit, (start_angle, end_angle)) = t_params.quadrant_hit_test(quadrant, (start_angle, end_angle)); if !hit { continue; } draw_metadata_slice( &mut path_builder, &mut paint, start_angle, end_angle, inner_radius, outer_radius, r.draw_sector_lookup, ti as u16, phys_s, Some(meta_item.element), ); if let Some(path) = path_builder.finish() { pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); } path_builder = PathBuilder::new(); // Reset the path builder for the next sector } // If a track contained no elements, draw a black ring if !has_elements && r.draw_empty_tracks { draw_metadata_slice( &mut path_builder, &mut paint, clip_start, clip_end, inner_radius, outer_radius, true, 0, 0, None, ); if let Some(path) = path_builder.finish() { pixmap.fill_path(&path, &paint, FillRule::Winding, Transform::identity(), None); } path_builder = PathBuilder::new(); // Reset the path builder for the next sector } } } Ok(()) } /// Rasterize a representation of a specific sector to a `tiny_skia::Pixmap`. /// Unlike other metadata rendering functions, this does not operate per quadrant, but should be /// given a composited pixmap. pub fn rasterize_disk_selection( disk_image: &DiskImage, pixmap: &mut Pixmap, p: &CommonVizParams, r: &RenderDiskSelectionParams, ) -> Result<(), DiskVisualizationError> { let track = stream(r.ch, disk_image); let track_len = track.len(); let r_metadata = metadata(r.ch, disk_image); let track_limit = p.track_limit.unwrap_or(MAX_CYLINDER); let num_tracks = min(disk_image.tracks(r.ch.h()) as usize, track_limit); if r.ch.c() >= num_tracks as u16 { return Err(DiskVisualizationError::NoTracks); } let image_size = pixmap.width() as f32; let total_radius = image_size / 2.0; let mut min_radius = p.min_radius_ratio * total_radius; // Scale min_radius to pixel value // If pinning has been specified, adjust the minimum radius. // We subtract any over-dumped tracks from the radius, so that the minimum radius fraction // is consistent with the last standard track. min_radius = if p.pin_last_standard_track { let normalized_track_ct = match num_tracks { 0..50 => 40, 50.. => 80, }; let track_width = (total_radius - min_radius) / normalized_track_ct as f32; let overdump = num_tracks.saturating_sub(normalized_track_ct); p.min_radius_ratio * total_radius - (overdump as f32 * track_width) } else { min_radius }; let track_width = (total_radius - min_radius) / num_tracks as f32; let center = Point::from_xy(image_size / 2.0, image_size / 2.0); let draw_sector_slice = |path_builder: &mut PathBuilder, paint: &mut Paint, start_angle: f32, end_angle: f32, inner_radius: f32, outer_radius: f32, color: Color| -> Color { // Draw the outer curve add_arc(path_builder, center, inner_radius, start_angle, end_angle); // Draw line segment to end angle of inner curve path_builder.line_to( center.x + outer_radius * end_angle.cos(), center.y + outer_radius * end_angle.sin(), ); // Draw inner curve back to start angle add_arc(path_builder, center, outer_radius, end_angle, start_angle); // Draw line segment back to start angle of outer curve path_builder.line_to( center.x + inner_radius * start_angle.cos(), center.y + inner_radius * start_angle.sin(), ); path_builder.close(); paint.set_color(color); color }; let (clip_start, clip_end) = match p.direction { TurningDirection::CounterClockwise => (0.0, TAU), TurningDirection::Clockwise => (TAU, 0.0), }; for draw_markers in [false, true].iter() { let ti = r.ch.c() as usize; let track_meta = r_metadata; let outer_radius = total_radius - (ti as f32 * track_width); let inner_radius = outer_radius - (track_width * (1.0 - p.track_gap)); let mut paint = Paint { blend_mode: BlendMode::SourceOver, anti_alias: true, ..Default::default() }; let mut phys_s: u8 = 0; // Physical sector index, 0-indexed from first sector on track // Draw non-overlapping metadata. for (_mi, meta_item) in track_meta.items.iter().enumerate() { let generic_elem = GenericTrackElement::from(meta_item.element); if let GenericTrackElement::Marker = generic_elem { if !*draw_markers { continue; } } else if *draw_markers { continue; } // Advance physical sector number for each sector header encountered. if meta_item.element.is_sector_header() { phys_s = phys_s.wrapping_add(1); } if !meta_item.element.is_sector_data() || ((phys_s as usize) < r.sector_idx) { continue; } let mut path_builder = PathBuilder::new(); let mut start_angle = ((meta_item.start as f32 / track_len as f32) * TAU) + p.index_angle; let mut end_angle = ((meta_item.end as f32 / track_len as f32) * TAU) + p.index_angle; if start_angle > end_angle { std::mem::swap(&mut start_angle, &mut end_angle); } (start_angle, end_angle) = match p.direction { TurningDirection::Clockwise => (start_angle, end_angle), TurningDirection::CounterClockwise => (TAU - start_angle, TAU - end_angle), };
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
true
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/pixmap_to_disk.rs
src/visualization/pixmap_to_disk.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ use std::{ cmp::min, f32::consts::{PI, TAU}, }; use crate::{ visualization::{RenderTrackDataParams, TurningDirection}, DiskImage, DiskImageError, MAX_CYLINDER, }; use crate::visualization::{CommonVizParams, PixmapToDiskParams}; use tiny_skia::Pixmap; const MFM_GRAYSCALE_RAMP: [u64; 16] = [ 0x8888888888888888, // popcount: 16 0x888888888888888A, // popcount: 17 0x888A88888888888A, // popcount: 18 0x888A88A88888888A, // popcount: 19 0x8A8A88A88888888A, // popcount: 20 0xAA8A88A88888888A, // popcount: 21 0xAA8A88A88888A88A, // popcount: 22 0xAA8A8AA88888A88A, // popcount: 23 0xAA8A8AA88888AA8A, // popcount: 24 0xAAAA8AA88888AA8A, // popcount: 25 0xAAAA8AAA8888AA8A, // popcount: 26 0xAAAA8AAAA888AA8A, // popcount: 27 0xAAAA8AAAA888AAAA, // popcount: 28 0xAAAA8AAAAA88AAAA, // popcount: 29 0xAAAAAAAAAA88AAAA, // popcount: 30 0xAAAAAAAAAAAAAAAA, // popcount: 32 ]; /// We can't collect mutable references to the track streams, so we collect the indices into the /// track pool instead. fn collect_stream_indices(head: u8, disk_image: &mut DiskImage) -> Vec<usize> { disk_image.track_map[head as usize].iter().copied().collect() } /// The reverse of the normal visualization logic, this function takes a pixmap and writes it to /// the disk image. This is completely useless other than for novelty purposes. pub fn render_pixmap_to_disk( pixmap: &Pixmap, disk_image: &mut DiskImage, p: &CommonVizParams, r: &RenderTrackDataParams, p2d: &PixmapToDiskParams, ) -> Result<(), DiskImageError> { let (sample_width, sample_height) = p2d.sample_size; let (img_width, img_height) = p2d.img_dimensions.to_tuple(); let span = pixmap.width(); let (x_offset, y_offset) = p2d.img_pos.to_tuple(); if p2d.mask_resolution < 1 || p2d.mask_resolution > 8 { return Err(DiskImageError::ParameterError); } let index_mask: usize = !((1 << p2d.mask_resolution) - 1); log::debug!("render_pixmap_to_disk(): using bit index_mask: {:#08b}", index_mask); // We work in sampling coordinates, so we need to adjust the center and radius to match the // sampling resolution. let center_x = sample_width as f32 / 2.0; let center_y = sample_height as f32 / 2.0; let total_radius = sample_width.min(sample_height) as f32 / 2.0; let mut min_radius = p.min_radius_ratio * total_radius; // Scale min_radius to pixel value let track_indices = collect_stream_indices(r.side, disk_image); let track_limit = p.track_limit.unwrap_or(MAX_CYLINDER); let num_tracks = min(track_indices.len(), track_limit); log::trace!("collected {} track references.", num_tracks); // If pinning has been specified, adjust the minimum radius. // We subtract any over-dumped tracks from the radius, so that the minimum radius fraction // is consistent with the last standard track. min_radius = if p.pin_last_standard_track { let normalized_track_ct = match num_tracks { 0..50 => 40, 50.. => 80, }; let track_width = (total_radius - min_radius) / normalized_track_ct as f32; log::debug!( "render_track_data(): track ct: {} normalized track ct: {}", num_tracks, normalized_track_ct ); let overdump = num_tracks.saturating_sub(normalized_track_ct); p.min_radius_ratio * total_radius - (overdump as f32 * track_width) } else { min_radius }; let track_width = (total_radius - min_radius) / num_tracks as f32; let pix_buf = pixmap.pixels(); let map_sample_uv = |uv: (u32, u32)| -> (u32, u32) { let (x, y) = uv; let x = x as f32 * (img_width as f32 / sample_width as f32); let y = y as f32 * (img_height as f32 / sample_height as f32); (x as u32, y as u32) }; // Sample the image and write sampled pixels to the disk image. // The sampling resolution needs to be quite high, at least 4096x4096, to get a good result // without gaps between pixels on the track which will introduce MFM errors. for v in 0..sample_height { for u in 0..sample_width { let dx = u as f32 - center_x; let dy = v as f32 - center_y; let distance = (dx * dx + dy * dy).sqrt(); let angle = (dy.atan2(dx) + PI) % TAU; if distance >= min_radius && distance <= total_radius { let track_offset = (distance - min_radius) / track_width; if track_offset.fract() < p.track_gap { continue; } let track_index = (num_tracks - 1).saturating_sub(track_offset.floor() as usize); if track_index < p2d.skip_tracks as usize { continue; } if track_index < num_tracks { // Adjust angle via input angle parameter, for clockwise or counter-clockwise turning let mut normalized_angle = match p.direction { TurningDirection::Clockwise => angle - p.index_angle, TurningDirection::CounterClockwise => TAU - (angle - p.index_angle), }; // Normalize the angle to the range 0..2π while normalized_angle < 0.0 { normalized_angle += TAU; } normalized_angle = (normalized_angle + PI) % TAU; if let Some(track) = disk_image.track_pool[track_indices[track_index]].stream_mut() { let bit_index = ((normalized_angle / TAU) * track.len() as f32) as usize; let mut render_enable = true; // Control rendering based on metadata if sector masking is enabled. if r.sector_mask && !track.is_data(bit_index, false) { render_enable = false; } if render_enable { // Mask the bit index to the resolution specified let bit_index = bit_index & index_mask; // Ensure bit_index is within bounds let bit_index = min(bit_index, track.len() - 9); // We ignore resolution here - we can only render bytes. let (img_x, img_y) = map_sample_uv((u, v)); let offset = ((img_y + y_offset) * span + (img_x + x_offset)) as usize; if offset < pix_buf.len() { let color = pix_buf[offset]; // We work in monochrome so just take the red channel... let color_value = color.red(); let alpha_value = color.alpha(); // We might want to implement support for grayscale images in the future, but // for now do a simple threshold. let mfm_data: u8 = match color_value { //0..40 => 0x88, 0..128 => p2d.black_byte, _ => p2d.white_byte, }; // Alpha channel controls whether we write the pixel or not if alpha_value >= 128 { track.write_raw_u8(bit_index, mfm_data); } } } } } } } } Ok(()) } /// Produce a 64-bit MFM pattern representing an 8-bit flux density ramp. pub fn gen_ramp_64(value: u8) -> u64 { let base_pattern = 0x8888_8888_8888_8888u64; // Base MFM pattern mapping to 0 (darkest color) let mut result = base_pattern; // Slot positions in the 64-bit base pattern (middle of each run of 3 zeros) const SLOTS: [u8; 16] = [61, 57, 53, 49, 45, 41, 37, 33, 29, 25, 21, 17, 13, 9, 5, 1]; // Iterate through the 8 bits of the input value for i in 0..8 { if value & (1 << i) != 0 { // Set a bit in the corresponding slot pair result |= 1 << SLOTS[i * 2]; // First slot in the pair result |= 1 << SLOTS[i * 2 + 1]; // Second slot in the pair } } result } /// Render a grayscale pixmap to disk. /// Applesauce renders an 8-bit grayscale SVG image using 0.25 degree arcs (1440 slices per track) /// Therefore we write 64-bit values to the disk image at a time. pub fn render_pixmap_to_disk_grayscale( pixmap: &Pixmap, disk_image: &mut DiskImage, p: &CommonVizParams, r: &RenderTrackDataParams, p2d: &PixmapToDiskParams, ) -> Result<(), DiskImageError> { let (sample_width, sample_height) = p2d.sample_size; let (img_width, img_height) = p2d.img_dimensions.to_tuple(); let span = pixmap.width(); let (x_offset, y_offset) = p2d.img_pos.to_tuple(); // if p2d.mask_resolution < 1 || p2d.mask_resolution > 8 { // return Err(DiskImageError::ParameterError); // } let color_ramp_bytes = (0..256) .map(|v| MFM_GRAYSCALE_RAMP[v / 16].to_be_bytes()) .collect::<Vec<[u8; 8]>>(); // Ignore the mask resolution and use 6 bits for now. let index_mask: usize = !((1 << 6) - 1); log::debug!( "render_pixmap_to_disk_grayscale(): using bit index_mask: {:#08b}", index_mask ); // We work in sampling coordinates, so we need to adjust the center and radius to match the // sampling resolution. let center_x = sample_width as f32 / 2.0; let center_y = sample_height as f32 / 2.0; let total_radius = sample_width.min(sample_height) as f32 / 2.0; let mut min_radius = p.min_radius_ratio * total_radius; // Scale min_radius to pixel value let track_indices = collect_stream_indices(r.side, disk_image); let track_limit = p.track_limit.unwrap_or(MAX_CYLINDER); let num_tracks = min(track_indices.len(), track_limit); log::trace!("collected {} track references.", num_tracks); // If pinning has been specified, adjust the minimum radius. // We subtract any over-dumped tracks from the radius, so that the minimum radius fraction // is consistent with the last standard track. min_radius = if p.pin_last_standard_track { let normalized_track_ct = match num_tracks { 0..50 => 40, 50.. => 80, }; let track_width = (total_radius - min_radius) / normalized_track_ct as f32; log::debug!( "render_track_data(): track ct: {} normalized track ct: {}", num_tracks, normalized_track_ct ); let overdump = num_tracks.saturating_sub(normalized_track_ct); p.min_radius_ratio * total_radius - (overdump as f32 * track_width) } else { min_radius }; let track_width = (total_radius - min_radius) / num_tracks as f32; let pix_buf = pixmap.pixels(); let map_sample_uv = |uv: (u32, u32)| -> (u32, u32) { let (x, y) = uv; let x = x as f32 * (img_width as f32 / sample_width as f32); let y = y as f32 * (img_height as f32 / sample_height as f32); (x as u32, y as u32) }; // Sample the image and write sampled pixels to the disk image. // The sampling resolution needs to be quite high, at least 4096x4096, to get a good result // without gaps between pixels on the track which will introduce MFM errors. for v in 0..sample_height { for u in 0..sample_width { let dx = u as f32 - center_x; let dy = v as f32 - center_y; let distance = (dx * dx + dy * dy).sqrt(); let angle = (dy.atan2(dx) + PI) % TAU; if distance >= min_radius && distance <= total_radius { let track_offset = (distance - min_radius) / track_width; if track_offset.fract() < p.track_gap { continue; } let track_index = (num_tracks - 1).saturating_sub(track_offset.floor() as usize); if track_index < p2d.skip_tracks as usize { continue; } if track_index < num_tracks { // Adjust angle via input angle parameter, for clockwise or counter-clockwise turning let mut normalized_angle = match p.direction { TurningDirection::Clockwise => angle - p.index_angle, TurningDirection::CounterClockwise => TAU - (angle - p.index_angle), }; // Normalize the angle to the range 0..2π while normalized_angle < 0.0 { normalized_angle += TAU; } normalized_angle = (normalized_angle + PI) % TAU; if let Some(track) = disk_image.track_pool[track_indices[track_index]].stream_mut() { let bit_index = ((normalized_angle / TAU) * track.len() as f32) as usize; // Mask the bit index to the resolution specified let bit_index = bit_index & index_mask; // Ensure bit_index is within bounds let bit_index = min(bit_index, track.len() - 64); //let bit_index = (bit_index % track.len()) - 64; let mut render_enable = true; // Control rendering based on metadata if sector masking is enabled. if r.sector_mask && !track.is_data(bit_index, false) { render_enable = false; } if render_enable { //log::debug!("rendering enabled..."); let (img_x, img_y) = map_sample_uv((u, v)); let offset = ((img_y + y_offset) * span + (img_x + x_offset)) as usize; if offset < pix_buf.len() { let color = pix_buf[offset]; // We work in monochrome so just take the green channel... let color_value = color.green(); let alpha_value = color.alpha(); // Alpha channel controls whether we write the pixel or not if alpha_value >= 128 { track.write_raw_buf(&color_ramp_bytes[color_value as usize], bit_index); } } } } } } } } Ok(()) }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false
dbalsom/fluxfox
https://github.com/dbalsom/fluxfox/blob/b4c04b51746e5fe7769f49a1b32b8caad426fc81/src/visualization/mod.rs
src/visualization/mod.rs
/* FluxFox https://github.com/dbalsom/fluxfox Copyright 2024-2025 Daniel Balsom Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. -------------------------------------------------------------------------- */ //! # FluxFox Visualization //! The `visualization` module provides rendering functions for disk images. //! This module requires the `viz` feature to be enabled. //! //! The general principle is that a disk image is rendered on the unit circle, where angle //! 0 in radians corresponds to the index position of the disk. The angle increases either //! clockwise or counter-clockwise from 0 to 2π, depending on the [TurningDirection] specified. //! //! [TurningDirection] specifies the way the data is mapped to the unit circle, and is the inverse //! of the actual physical rotation of the disk. The default turning is clockwise for side 0, //! and counter-clockwise for side 1. This gives an accurate depiction of how the data would appear //! looking top-down at each side of a standard PC floppy disk. //! //! ## Visualization layers //! Visualizations can be constructed of several different layers. //! //! - The `data layer` is a visualization of the data on the disk, and can be optionally decoded //! for MFM tracks in rasterization mode. //! - The `metadata layer` is a visualization of track elements such as markers, sector headers, //! and sector data. //! - `Mask layers` are visualizations of bit masks, including weak bit masks and error maps. //! The mask type may be specified by [RenderMaskType]. //! //! Layers are typically rendered in order with some sort of blend operation, with mask layers on //! top. //! //! ## Visualization types //! //! Two primary rendering modes are supported, `rasterization` and `vectorization`. //! //! - `Rasterization` is much slower but can resolve much higher levels of detail for the data //! layer. It is subject to moiré patterns and aliasing artifacts, but good results can be //! achieved by supersampling. //! //! - `Vectorization` can be much faster, especially for rendering the data layer of a disk image. //! Vectorization methods return display lists, which may either be rasterized directly, or //! converted to SVG, or triangulated and displayed by a GPU. The main advantage of vectorization //! is that rasterizing a high resolution vector image is not subject to the same quadratic time //! complexity as rasterizing a high resolution raster image, and produces crisper results at //! lower resolutions without supersampling. //! //! ## Helper crates //! //! - The `fluxfox_tiny_skia` crate provides a `tiny_skia` backend for fluxfox's rasterization //! functions. //! - The `fluxfox_svg` crate provides a backend for fluxfox's vectorization functions capable of //! saving display lists to SVG files with many configurable options. //! //! ## Examples //! See the `imgviz` example for a demonstration of how to use the visualization functions, //! `fluxfox_tiny_skia`, and `fluxfox_svg`. //! pub mod data_segmenter; #[cfg(feature = "tiny_skia")] pub mod pixmap_to_disk; pub mod prelude; #[cfg(feature = "tiny_skia")] pub mod rasterize_disk; pub mod types; pub mod vectorize_disk; use crate::{ bitstream_codec::TrackDataStream, track_schema::{GenericTrackElement, TrackMetadata}, visualization::types::{ color::VizColor, shapes::{VizDimensions, VizPoint2d, VizRect, VizRotation}, }, DiskCh, DiskImage, DiskVisualizationError, FoxHashMap, MAX_CYLINDER, }; use std::{ cmp::min, f32::consts::{PI, TAU}, }; use bit_vec::BitVec; /// A vector data visualization is broken up into 1440 slices, representing four slices for /// each degree angle. This is designed to roughly fit the popcnt of each slice into a u8, even /// for ED disks (400_000/1440 = 277.78, but popcnt cannot reach that maximum value). /// This is a compromise to allow for a simple lookup table to map popcnt to a grayscale value. /// Changing this value would require adjusting the table. pub(crate) const VIZ_SLICES: usize = 1440; pub(crate) const DEFAULT_INNER_RADIUS_RATIO: f32 = 0.30; // Matches HxC default for 'dummy' disk viz /// Create a lookup table to map a u8 value to a grayscale gradient value based on the number of /// bits set in the u8 value (popcount) const POPCOUNT_TABLE: [u8; 256] = { let values: [u8; 9] = [0, 32, 64, 96, 128, 160, 192, 224, 255]; let mut table = [0; 256]; let mut i = 0; while i < 256 { table[i] = values[i.count_ones() as usize]; i += 1; } table }; /// A simple trait to allow for rotation of visualization elements pub trait VizRotate { /// Produce a rotated copy of the element fn rotate(self, rotation: &VizRotation) -> Self; } use crate::visualization::prelude::VizElementDisplayList; #[cfg(feature = "tiny_skia")] pub use pixmap_to_disk::{render_pixmap_to_disk, render_pixmap_to_disk_grayscale}; #[cfg(feature = "tiny_skia")] pub use rasterize_disk::rasterize_track_data; #[cfg(feature = "tiny_skia")] pub use rasterize_disk::render_track_mask; /// A map type selector for visualization functions. #[derive(Copy, Clone, Debug)] pub enum RenderMaskType { /// Select the weak bit mask for rendering WeakBits, /// Select the bitstream codec error mask for rendering Errors, } #[derive(Copy, Clone, Default)] pub enum RenderWinding { #[default] Clockwise, CounterClockwise, } /// A [RenderGeometry] enum specifies what geometry to generate for metadata element sectors. /// This is useful if your rasterizer cannot fill concave paths - you can use an arc and stroke /// it at the track width instead. #[derive(Copy, Clone, Default)] pub enum RenderGeometry { #[default] Sector, Arc, } /// Parameter struct for use with display list rasterization functions #[derive(Clone, Default)] pub struct RenderVectorizationParams { /// View box dimensions to use for the visualization. pub view_box: VizRect<f32>, /// Image background color to use for the visualization. If None, background will be transparent. pub image_bg_color: Option<VizColor>, /// Background color to use for the disk surface, in absence of any rendered elements. /// If None, the disk surface will be transparent between tracks (determined by track_gap). pub disk_bg_color: Option<VizColor>, /// Color to use when rendering a track bit mask. pub mask_color: Option<VizColor>, /// Offset for the output of the rasterization within the destination pixmap, in pixels. If /// None, the offset will be set to (0, 0) (no offset). pub pos_offset: Option<VizPoint2d<f32>>, } /// Parameter struct for use with display list rasterization functions #[derive(Clone)] pub struct RenderRasterizationParams { /// Dimensions of the image to be rendered. pub image_size: VizDimensions, /// Supersampling factor to use. pub supersample: u32, /// Background color to use for area outside of disk ring. If None, the image will have a /// transparent background outside the disk surfaces. pub image_bg_color: Option<VizColor>, /// Background color to use for the disk surface, in absence of any rendered elements. /// If None, the disk surface will be transparent where elements are not rendered. pub disk_bg_color: Option<VizColor>, /// Color to use when rendering a track bit mask. pub mask_color: Option<VizColor>, /// Palette to use for rasterizing metadata elements. Can be set to None if not rendering /// metadata. pub palette: Option<FoxHashMap<GenericTrackElement, VizColor>>, /// Offset for the output of the rasterization within the destination pixmap, in pixels. If /// None, the offset will be set to (0, 0) (no offset). pub pos_offset: Option<VizPoint2d<u32>>, } impl RenderRasterizationParams { /// Return the full resolution of the image to be rendered, taking int account the supersampling /// factor. pub fn render_size(&self) -> VizDimensions { self.image_size.scale(self.supersample) } } /// Common parameters for all rendering functions #[derive(Clone)] pub struct CommonVizParams { /// Outer radius of the visualization in pixels. This should equal the width of a square /// destination pixmap, divided by two. Pixmap dimensions must be square, and ideally a power /// of two. If `None`, the radius will be set to 0.5, to create a rendering with normalized /// coordinates from (0.0, 0.0) to (1.0, 1.0). You can then translate the image yourself using /// a transformation matrix before rendering. pub radius: Option<f32>, /// Maximum outer radius as a fraction ot total radius /// The outside of the first track will be rendered at this radius. pub max_radius_ratio: f32, /// Minimum inner radius as a fraction of total radius (0.333) == 1/3 of total radius /// If `pin_last_track` is false, the inside of the last track will be rendered at this radius. /// If `pin_last_track` is true, the inside of the last standard track will be rendered at this /// radius, but non-standard or over-dumped tracks will be rendered at a smaller radius within. /// This is useful for keeping proportions consistent between disks with different track counts, /// if for example, you are rendering a slideshow of various disk images. pub min_radius_ratio: f32, /// Offset for points produced by the rendering function. This is useful for rendering a /// visualization off-center. If `None`, the offset will be set to (0.0, 0.0) (no offset). /// Note: If you are intending to rasterize the resulting display list and wish to say, place /// two visualizations side by side, you should set this value to None and use the `pos_offset` /// field of the [RenderRasterizationParams] struct instead. pub pos_offset: Option<VizPoint2d<f32>>, /// Angle of index position / start of track, in radians. The default value is 0 which will /// render the disk with the index position at the 3 o'clock position. pub index_angle: f32, /// Maximum number of tracks to render. If None, no limit will be enforced. pub track_limit: Option<usize>, /// Set the inner radius to the last standard track instead of last track /// This keeps proportions consistent between disks with different track counts pub pin_last_standard_track: bool, /// Width of the gap between tracks as a fraction of total track width (0.0 to 1.0) /// Track width itself is determined by the track count and the inner and outer radii. pub track_gap: f32, /// Whether to allow tracks to overlap. If true, the track width will be increased slightly to /// avoid rendering artifacts between tracks. pub track_overlap: bool, /// How the data should visually turn on the disk surface, starting from the index position. /// Note: this is the logical reverse of the physical rotation of the disk. pub direction: TurningDirection, } impl Default for CommonVizParams { fn default() -> Self { Self { radius: Some(0.5), max_radius_ratio: 1.0, min_radius_ratio: DEFAULT_INNER_RADIUS_RATIO, pos_offset: Some(VizPoint2d::new(0.0, 0.0)), index_angle: 0.0, track_limit: None, pin_last_standard_track: true, track_gap: 0.1, track_overlap: true, direction: TurningDirection::CounterClockwise, } } } impl CommonVizParams { pub(crate) fn track_params(&self, num_tracks: usize) -> Result<InternalTrackParams, DiskVisualizationError> { let mut tp = InternalTrackParams::default(); let track_limit = self.track_limit.unwrap_or(MAX_CYLINDER); tp.track_gap = self.track_gap; tp.num_tracks = min(num_tracks, track_limit); if tp.num_tracks == 0 { return Err(DiskVisualizationError::NoTracks); } tp.total_radius = self.radius.unwrap_or(0.5); tp.center = VizPoint2d::new(tp.total_radius, tp.total_radius); tp.min_radius = self.min_radius_ratio * tp.total_radius; tp.max_radius = self.max_radius_ratio * tp.total_radius; // If pinning has been specified, adjust the minimum radius. // We subtract any over-dumped tracks from the radius, so that the minimum radius fraction // is consistent with the last standard track. tp.min_radius = if self.pin_last_standard_track { let normalized_track_ct = match num_tracks { 0..50 => 40, 50.. => 80, }; let track_width = (tp.total_radius - tp.min_radius) / normalized_track_ct as f32; let overdump = num_tracks.saturating_sub(normalized_track_ct); self.min_radius_ratio * tp.total_radius - (overdump as f32 * track_width) } else { tp.min_radius }; if tp.max_radius <= tp.min_radius { return Err(DiskVisualizationError::InvalidParameter( "max_radius must be greater than min_radius".to_string(), )); } // Calculate the rendered width of each track, excluding the track gap. tp.total_track_width = (tp.max_radius - tp.min_radius) / num_tracks as f32; tp.render_track_width = tp.total_track_width * (1.0 - self.track_gap); if self.track_gap == 0.0 { // slightly increase the track width to avoid rendering sparkles between tracks if there's // 0 gap specified, due to floating point errors tp.radius_adjust = -(tp.total_track_width * 0.01) / 2.0; tp.render_track_width = tp.total_track_width * 1.01; } else { tp.radius_adjust = tp.total_track_width * (tp.track_gap / 2.0); tp.render_track_width = tp.total_track_width * (1.0 - self.track_gap); } if tp.total_track_width <= 0.0 { // Nothing to render! return Err(DiskVisualizationError::NotVisible); } Ok(tp) } } #[derive(Clone, Default)] pub(crate) struct InternalTrackParams { pub(crate) total_radius: f32, pub(crate) min_radius: f32, pub(crate) max_radius: f32, pub(crate) num_tracks: usize, pub(crate) center: VizPoint2d<f32>, pub(crate) total_track_width: f32, pub(crate) render_track_width: f32, pub(crate) radius_adjust: f32, pub(crate) track_gap: f32, pub(crate) track_overlap: f32, } impl InternalTrackParams { const QUADRANT_ANGLES: [(f32, f32); 4] = [ (0.0, PI / 2.0), (PI / 2.0, PI), (PI, 3.0 * PI / 2.0), (3.0 * PI / 2.0, TAU), ]; // Coordinate offset factors for each quadrant, when rendering quadrant pixmaps. // These offsets are scaled by the radius of the disk. // For example, and offset of (1.0, 1.0) and a radius of 512 will push all coordinates // to the right and down by 512 pixels (rendering the lower-right quadrant). const QUADRANT_OFFSETS: [(f32, f32); 4] = [ // Lower-right quadrant (1.0, 1.0), // Lower-left quadrant (0.0, 1.0), // Upper-right quadrant (1.0, 0.0), // Upper-left quadrant (0.0, 0.0), ]; /// Return the center point in pixels for a given quadrant, for rendering a quadrant pixmap. #[inline] pub(crate) fn quadrant_center(&self, quadrant: u8) -> VizPoint2d<f32> { let (x, y) = Self::QUADRANT_OFFSETS[quadrant as usize & 0x03]; VizPoint2d::new(x * self.total_radius, y * self.total_radius) } /// Return the start and end angles in radians for a given quadrant #[inline] pub(crate) fn quadrant_clip(&self, quadrant: u8) -> (f32, f32) { Self::QUADRANT_ANGLES[quadrant as usize & 0x03] } // Return a tuple of (bool, (f32, f32)) where the bool is true if the start and end angles // overlap the quadrant, and the tuple parameter contains the clipped start and end angles. // The angles are not clipped if false is returned. #[inline] pub(crate) fn quadrant_hit_test(&self, quadrant: u8, angles: (f32, f32)) -> (bool, (f32, f32)) { let (clip_start, clip_end) = self.quadrant_clip(quadrant); if angles.1 <= clip_start || angles.0 >= clip_end { // No overlap (false, (angles.0, angles.1)) } else { // Overlap - clip the angles to the quadrant (true, (angles.0.max(clip_start), angles.1.min(clip_end))) } } /// Return the calculated outer, middle and inner radii for the given cylinder pub(crate) fn radii(&self, cylinder: usize, vector: bool) -> (f32, f32, f32) { match vector { true => { let outer = self.total_radius - (cylinder as f32 * self.total_track_width); let inner = outer - self.total_track_width; let middle = outer - self.total_track_width / 2.0; (outer - self.radius_adjust, middle, inner + self.radius_adjust) } false => { let outer = self.total_radius - (cylinder as f32 * self.total_track_width); let inner = outer - self.total_track_width; let middle = outer - self.total_track_width / 2.0; (outer - self.radius_adjust, middle, inner + self.radius_adjust) } } } } /// Parameter struct for use with disk surface rendering functions pub struct RenderTrackDataParams { /// Which side of disk to render. This may seem superfluous as we render one head at a time, /// but the data is stored within the [VizElement] of the resulting display list. pub side: u8, /// Attempt to decode data on a track for more visual contrast. This will only work if the /// encoding and track schema supports random-access decoding. GCR encoding is not supported. /// A request to decode an incompatible track will be ignored. pub decode: bool, /// Mask decoding or encoding operations to sector data regions. This will only work if the /// track defines sector data elements. A request to mask an incompatible track will be ignored. /// The main advantage of using this flag with `decode` is to avoid visualizing write splices /// outside of sector data regions that cause ugly flips in contrast. pub sector_mask: bool, /// Resolution to render data at (Bit or Byte). Bit resolution requires extremely high /// resolution rasterized output to be legible - it's fun but not really practical. pub resolution: ResolutionType, /// Number of slices to use to segment the data. This is only used during vector-based rendering. pub slices: usize, /// Factor to overlap slices by. This can avoid rendering artifacts at full opacity, but can /// cause artifacts if fractional opacity is used. pub overlap: f32, } impl Default for RenderTrackDataParams { fn default() -> Self { Self { side: 0, decode: false, sector_mask: false, resolution: ResolutionType::Byte, slices: VIZ_SLICES, // Default 10% overlap overlap: 0.1, } } } /// Parameter struct for use with disk metadata rendering functions pub struct RenderTrackMetadataParams { /// Which quadrant to render (0-3) if Some. If None, all quadrants will be rendered. pub quadrant: Option<u8>, /// Which side of disk to render pub side: u8, /// The type of geometry to generate for metadata elements pub geometry: RenderGeometry, /// Which point winding to use when creating sectors and other closed paths pub winding: RenderWinding, /// Whether to draw empty tracks as black rings pub draw_empty_tracks: bool, /// Draw a sector lookup bitmap instead of color information pub draw_sector_lookup: bool, } impl Default for RenderTrackMetadataParams { fn default() -> Self { Self { quadrant: None, side: 0, geometry: RenderGeometry::default(), winding: RenderWinding::default(), draw_empty_tracks: false, draw_sector_lookup: false, } } } #[derive(Default, Copy, Clone)] pub enum RenderDiskSelectionType { #[default] Sector, Track, } /// Parameter struct for use with disk selection rendering functions /// This is useful for rendering a single sector or track on a disk image. /// Note: more than one VizElement may be emitted for a single sector, depending on the size /// of the sector. Arcs are split at quadrant boundaries to avoid rendering artifacts. pub struct RenderDiskSelectionParams { /// The selection type (Sector or Track) pub selection_type: RenderDiskSelectionType, /// The physical cylinder and head to render pub ch: DiskCh, /// The physical sector index to render, 1-offset pub sector_idx: usize, /// Color to use to draw sector arc pub color: VizColor, } impl Default for RenderDiskSelectionParams { fn default() -> Self { Self { ch: DiskCh::new(0, 0), selection_type: RenderDiskSelectionType::default(), sector_idx: 1, color: VizColor::WHITE, } } } /// Parameter struct for use with disk hit test rendering functions pub struct RenderDiskHitTestParams { pub side: u8, /// The hit test selection type (Sector or Track) pub selection_type: RenderDiskSelectionType, /// The type of geometry to generate for the selection pub geometry: RenderGeometry, /// The coordinate to hit test pub point: VizPoint2d<f32>, } impl Default for RenderDiskHitTestParams { fn default() -> Self { Self { side: 0, selection_type: RenderDiskSelectionType::default(), geometry: RenderGeometry::Arc, point: VizPoint2d::new(0.0, 0.0), } } } #[derive(Default)] pub struct DiskHitTestResult { pub display_list: Option<VizElementDisplayList>, pub angle: f32, pub bit_index: usize, pub track: u16, } /// Determines the direction that the linear track data is mapped to the disk surface during /// rendering, starting from the index position, either clockwise or counter-clockwise. /// This is not the physical rotation of the disk, as they are essentially opposites. /// /// Typically, Side 0, the bottom-facing side of a disk, rotates counter-clockwise when viewed /// from the bottom, and Side 1, the top-facing side, rotates clockwise, and the turning will be /// the opposite of the physical rotation. #[derive(Copy, Clone, Debug, Default)] pub enum TurningDirection { #[default] Clockwise, CounterClockwise, } impl TurningDirection { pub fn opposite(&self) -> Self { match self { TurningDirection::Clockwise => TurningDirection::CounterClockwise, TurningDirection::CounterClockwise => TurningDirection::Clockwise, } } pub fn adjust_angle(&self, angle: f32) -> f32 { match self { TurningDirection::Clockwise => angle, TurningDirection::CounterClockwise => TAU - angle, } } pub fn adjust_angles(&self, angles: (f32, f32)) -> (f32, f32) { match self { TurningDirection::Clockwise => (angles.0, angles.1), TurningDirection::CounterClockwise => (TAU - angles.0, TAU - angles.1), } } } impl From<u8> for TurningDirection { fn from(val: u8) -> Self { match val { 0 => TurningDirection::Clockwise, _ => TurningDirection::CounterClockwise, } } } /// Determines the visualization resolution - either byte resolution or bit resolution. /// Bit resolution requires extremely high resolution output to be legible. pub struct PixmapToDiskParams { pub img_dimensions: VizDimensions, pub img_pos: VizPoint2d<u32>, pub sample_size: (u32, u32), pub skip_tracks: u16, pub black_byte: u8, pub white_byte: u8, pub mask_resolution: u8, } impl Default for PixmapToDiskParams { fn default() -> Self { Self { img_dimensions: VizDimensions::default(), img_pos: VizPoint2d::default(), sample_size: (4096, 4096), skip_tracks: 0, black_byte: 0x88, // Represents a valid MFM pattern with low flux density (0b1000_1000) white_byte: 0x66, // Represents a valid MFM pattern with high flux density (0b1010_1010) mask_resolution: 3, // 3 bits = 0b0111 or 8 bit mask } } } #[derive(Copy, Clone, Default, Debug)] pub enum ResolutionType { Bit, #[default] Byte, } fn stream(ch: DiskCh, disk_image: &DiskImage) -> &TrackDataStream { disk_image.track_map[ch.h() as usize] .get(ch.c() as usize) .map(|track_i| disk_image.track_pool[*track_i].stream().unwrap()) .unwrap() } fn metadata(ch: DiskCh, disk_image: &DiskImage) -> &TrackMetadata { disk_image.track_map[ch.h() as usize] .get(ch.c() as usize) .map(|track_i| disk_image.track_pool[*track_i].metadata().unwrap()) .unwrap() } fn collect_streams(head: u8, disk_image: &DiskImage) -> Vec<&TrackDataStream> { disk_image.track_map[head as usize] .iter() .filter_map(|track_i| disk_image.track_pool[*track_i].stream()) .collect() } fn collect_weak_masks(head: u8, disk_image: &DiskImage) -> Vec<&BitVec> { disk_image.track_map[head as usize] .iter() .filter_map(|track_i| disk_image.track_pool[*track_i].stream().map(|track| track.weak_mask())) .collect() } fn collect_error_maps(head: u8, disk_image: &DiskImage) -> Vec<&BitVec> { disk_image.track_map[head as usize] .iter() .filter_map(|track_i| disk_image.track_pool[*track_i].stream().map(|track| track.error_map())) .collect() } fn collect_metadata(head: u8, disk_image: &DiskImage) -> Vec<&TrackMetadata> { disk_image.track_map[head as usize] .iter() .filter_map(|track_i| disk_image.track_pool[*track_i].metadata()) .collect() }
rust
MIT
b4c04b51746e5fe7769f49a1b32b8caad426fc81
2026-01-04T20:24:04.021295Z
false