repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/aggregation/root.rs | ceno_recursion/src/aggregation/root.rs | // TODO: assert that the shard ram ec point is `PointAtInfinity`
// let is_sum_x_zero = ec_sum.x.is_zero(builder);
// let is_sum_y_zero = ec_sum.y.is_zero(builder);
// builder.assert_usize_eq(is_sum_x_zero, Usize::from(1));
// builder.assert_usize_eq(is_sum_y_zero, Usize::from(1));
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/aggregation/types.rs | ceno_recursion/src/aggregation/types.rs | // TODO: enable this
// #[derive(Debug, Clone, AlignedBorrow)]
// pub struct ContinuationPvs<T> {
// pub sum: SepticPoint<T>,
// }
// impl<C: Config> ContinuationPvs<Felt<C::F>> {
// pub fn uninit(builder: &mut Builder<C>) -> Self {
// todo!()
// }
// }
// #[derive(Debug, Clone, AlignedBorrow)]
// #[repr(C)]
// pub struct VmVerifierPvs<T> {
// /// The merged execution state of all the segments this circuit aggregates.
// pub connector: VmConnectorPvs<T>,
// /// The state before/after all the segments this circuit aggregates.
// // (TODO) pub shard_ram_connector: ContinuationPvs<T>,
// /// The merkle root of all public values. This is only meaningful when the last segment is
// /// aggregated by this circuit.
// pub public_values_commit: [T; DIGEST_SIZE],
// }
// impl<C: Config> VmVerifierPvs<Felt<C::F>> {
// pub fn uninit(builder: &mut Builder<C>) -> Self {
// VmVerifierPvs {
// connector: VmConnectorPvs {
// initial_pc: builder.uninit(),
// final_pc: builder.uninit(),
// exit_code: builder.uninit(),
// is_terminate: builder.uninit(),
// },
// // shard_ram_connector: builder.uninit(),
// public_values_commit: array::from_fn(|_| builder.uninit()),
// }
// }
// }
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/aggregation/mod.rs | ceno_recursion/src/aggregation/mod.rs | use crate::zkvm_verifier::{
binding::{E, F, ZKVMProofInput, ZKVMProofInputVariable},
verifier::verify_zkvm_proof,
};
use ceno_zkvm::{
instructions::riscv::constants::{END_PC_IDX, EXIT_CODE_IDX, INIT_PC_IDX},
scheme::ZKVMProof,
structs::ZKVMVerifyingKey,
};
use ff_ext::BabyBearExt4;
use mpcs::{Basefold, BasefoldRSParams};
use openvm_circuit::{
arch::{
MemoryConfig, SystemConfig, VirtualMachine, VmInstance, instructions::program::Program,
},
system::program::trace::VmCommittedExe,
utils::air_test_impl,
};
use openvm_stark_backend::config::{PcsProverData, Val};
use internal::InternalVmVerifierConfig;
use openvm_continuations::{
C,
verifier::{
common::types::VmVerifierPvs,
internal::types::{InternalVmVerifierInput, InternalVmVerifierPvs, VmStarkProof},
},
};
#[cfg(feature = "gpu")]
use openvm_cuda_backend::engine::GpuBabyBearPoseidon2Engine as BabyBearPoseidon2Engine;
use openvm_native_circuit::{NativeBuilder, NativeConfig};
use openvm_native_compiler::{
asm::AsmBuilder,
conversion::{CompilerOptions, convert_program},
prelude::*,
};
use openvm_native_recursion::hints::Hintable;
use openvm_sdk::{
SC,
commit::AppExecutionCommit,
config::DEFAULT_NUM_CHILDREN_INTERNAL,
prover::vm::{new_local_prover, types::VmProvingKey},
};
use openvm_stark_backend::{
config::{Com, StarkGenericConfig},
engine::StarkEngine,
};
#[cfg(not(feature = "gpu"))]
use openvm_stark_sdk::config::baby_bear_poseidon2::BabyBearPoseidon2Engine;
use openvm_stark_sdk::{
config::{
FriParameters, baby_bear_poseidon2::BabyBearPoseidon2Config,
fri_params::standard_fri_params_with_100_bits_conjectured_security,
},
engine::StarkFriEngine,
openvm_stark_backend::keygen::types::MultiStarkVerifyingKey,
p3_bn254_fr::Bn254Fr,
};
use p3::field::FieldAlgebra;
use serde::{Deserialize, Serialize};
use std::{borrow::Borrow, sync::Arc, time::Instant};
pub type RecPcs = Basefold<E, BasefoldRSParams>;
use openvm_circuit::{
arch::{
CONNECTOR_AIR_ID, PROGRAM_AIR_ID, PROGRAM_CACHED_TRACE_INDEX, PUBLIC_VALUES_AIR_ID,
SingleSegmentVmProver,
hasher::{Hasher, poseidon2::vm_poseidon2_hasher},
instructions::exe::VmExe,
},
system::{memory::CHUNK, program::trace::compute_exe_commit},
};
use openvm_native_compiler::{
asm::AsmConfig,
ir::{Builder, Config, Felt},
};
use openvm_sdk::util::check_max_constraint_degrees;
use openvm_stark_backend::proof::Proof;
mod internal;
mod root;
mod types;
pub type InnerConfig = AsmConfig<F, E>;
pub const LEAF_LOG_BLOWUP: usize = 1;
pub const INTERNAL_LOG_BLOWUP: usize = 2;
pub const ROOT_LOG_BLOWUP: usize = 3;
pub const SBOX_SIZE: usize = 7;
const VM_MAX_TRACE_HEIGHTS: &[u32] = &[
4194304, 4, 128, 2097152, 8388608, 4194304, 262144, 8388608, 16777216, 2097152, 16777216,
2097152, 8388608, 262144, 2097152, 1048576, 4194304, 1048576, 262144,
];
pub struct CenoAggregationProver {
pub leaf_prover: VmInstance<BabyBearPoseidon2Engine, NativeBuilder>,
pub internal_prover: VmInstance<BabyBearPoseidon2Engine, NativeBuilder>,
pub vk: CenoRecursionVerifierKeys<BabyBearPoseidon2Config>,
pub pk: CenoRecursionProvingKeys<BabyBearPoseidon2Config, NativeConfig>,
}
impl CenoAggregationProver {
pub fn new(
leaf_prover: VmInstance<BabyBearPoseidon2Engine, NativeBuilder>,
internal_prover: VmInstance<BabyBearPoseidon2Engine, NativeBuilder>,
pk: CenoRecursionProvingKeys<BabyBearPoseidon2Config, NativeConfig>,
) -> Self {
Self {
leaf_prover,
internal_prover,
vk: pk.get_vk(),
pk,
}
}
pub fn from_base_vk(vk: ZKVMVerifyingKey<E, Basefold<E, BasefoldRSParams>>) -> Self {
let vb = NativeBuilder::default();
let [leaf_fri_params, internal_fri_params, _root_fri_params] =
[LEAF_LOG_BLOWUP, INTERNAL_LOG_BLOWUP, ROOT_LOG_BLOWUP]
.map(FriParameters::standard_with_100_bits_conjectured_security);
// Configure vm for the leaf layer
let leaf_vm_config = NativeConfig {
system: SystemConfig::new(
SBOX_SIZE.min(leaf_fri_params.max_constraint_degree()),
MemoryConfig {
max_access_adapter_n: 16,
..Default::default()
},
VmVerifierPvs::<u8>::width(),
)
.with_max_segment_len((1 << 24) - 100)
.with_profiling()
.without_continuations(),
native: Default::default(),
};
// Leaf layer keygen
let leaf_vm_pk = {
let leaf_engine = BabyBearPoseidon2Engine::new(leaf_fri_params);
let (_, vm_pk) =
VirtualMachine::new_with_keygen(leaf_engine, vb.clone(), leaf_vm_config.clone())
.expect("leaf keygen");
assert!(vm_pk.max_constraint_degree <= leaf_fri_params.max_constraint_degree());
check_max_constraint_degrees(&leaf_vm_config.system, &leaf_fri_params);
Arc::new(VmProvingKey {
fri_params: leaf_fri_params,
vm_config: leaf_vm_config,
vm_pk,
})
};
let leaf_vm_vk = leaf_vm_pk.vm_pk.get_vk();
// Leaf layer program
let leaf_engine = BabyBearPoseidon2Engine::new(leaf_fri_params);
let leaf_program = CenoLeafVmVerifierConfig {
vk,
compiler_options: CompilerOptions::default().with_cycle_tracker(),
}
.build_program();
let leaf_committed_exe = Arc::new(VmCommittedExe::<SC>::commit(
leaf_program.into(),
leaf_engine.config().pcs(),
));
let leaf_prover = new_local_prover::<BabyBearPoseidon2Engine, NativeBuilder>(
vb.clone(),
&leaf_vm_pk,
leaf_committed_exe.exe.clone(),
)
.expect("leaf prover");
// Configure vm for internal layers
// needs to be a multiple of DIGEST_SIZE
let num_public_values =
InternalVmVerifierPvs::<u8>::width().div_ceil(DIGEST_SIZE) * DIGEST_SIZE;
let internal_vm_config = NativeConfig {
system: SystemConfig::new(
SBOX_SIZE.min(internal_fri_params.max_constraint_degree()),
MemoryConfig {
max_access_adapter_n: 16,
..Default::default()
},
num_public_values,
)
.with_max_segment_len((1 << 24) - 100)
.with_profiling()
.without_continuations(),
native: Default::default(),
};
// Internal keygen
let internal_engine = BabyBearPoseidon2Engine::new(internal_fri_params);
let (internal_vm, vm_pk) = VirtualMachine::new_with_keygen(
internal_engine,
vb.clone(),
internal_vm_config.clone(),
)
.expect("internal keygen");
check_max_constraint_degrees(&internal_vm_config.system, &internal_fri_params);
assert!(vm_pk.max_constraint_degree <= internal_fri_params.max_constraint_degree());
let internal_vm_pk = Arc::new(VmProvingKey {
fri_params: internal_fri_params,
vm_config: internal_vm_config,
vm_pk,
});
let internal_vm_vk = internal_vm_pk.vm_pk.get_vk();
// Internal program
let internal_program = InternalVmVerifierConfig {
leaf_fri_params,
internal_fri_params,
compiler_options: CompilerOptions::default().with_cycle_tracker(),
}
.build_program(&leaf_vm_vk, &internal_vm_vk);
let internal_committed_exe = Arc::new(VmCommittedExe::<SC>::commit(
internal_program.into(),
internal_vm.engine.config().pcs(),
));
let internal_prover = new_local_prover::<BabyBearPoseidon2Engine, NativeBuilder>(
vb.clone(),
&internal_vm_pk,
internal_committed_exe.exe.clone(),
)
.expect("internal prover");
// TODO: build root program (requires shard ram ec point is zero)
// TODO: add root prover
let vk = CenoRecursionVerifierKeys {
leaf_vm_vk,
leaf_fri_params: leaf_vm_pk.fri_params,
internal_vm_vk,
internal_fri_params: internal_vm_pk.fri_params,
internal_commit: internal_committed_exe.get_program_commit(),
};
let pk = CenoRecursionProvingKeys {
leaf_vm_pk,
leaf_committed_exe,
internal_vm_pk,
internal_committed_exe,
};
Self {
leaf_prover,
internal_prover,
vk,
pk,
}
}
pub fn generate_root_proof(
&mut self,
base_proofs: Vec<ZKVMProof<BabyBearExt4, Basefold<E, BasefoldRSParams>>>,
) -> VmStarkProof<SC> {
let aggregation_start_timestamp = Instant::now();
// Construct zkvm proof input
let zkvm_proof_inputs: Vec<ZKVMProofInput> = base_proofs
.into_iter()
.enumerate()
.map(|(shard_id, p)| ZKVMProofInput::from((shard_id, p)))
.collect();
let user_public_values: Vec<F> = zkvm_proof_inputs
.iter()
.flat_map(|p| p.raw_pi.iter().flat_map(|v| v.clone()).collect::<Vec<F>>())
.collect();
let leaf_inputs = chunk_ceno_leaf_proof_inputs(zkvm_proof_inputs);
let leaf_proofs = leaf_inputs
.iter()
.enumerate()
.map(|(proof_idx, p)| {
println!(
"Aggregation - Start leaf proof (idx: {:?}) at: {:?}",
proof_idx,
aggregation_start_timestamp.elapsed()
);
let mut witness_stream: Vec<Vec<F>> = Vec::new();
witness_stream.extend(p.write());
let leaf_proof = SingleSegmentVmProver::prove(
&mut self.leaf_prover,
witness_stream,
VM_MAX_TRACE_HEIGHTS,
)
.expect("leaf proof generation failed");
// _debug: export
// let file =
// File::create(format!("leaf_proof_{:?}.bin", proof_idx)).expect("Create export proof file");
// bincode::serialize_into(file, &leaf_proof).expect("failed to serialize leaf proof");
println!(
"Aggregation - Completed leaf proof (idx: {:?}) at: {:?}, public values: {:?}",
proof_idx,
aggregation_start_timestamp.elapsed(),
leaf_proof.per_air[PUBLIC_VALUES_AIR_ID].public_values,
);
leaf_proof
})
.collect::<Vec<_>>();
// Aggregate tree to root proof
let mut internal_node_idx = -1;
let mut internal_node_height = 0;
let mut proofs = leaf_proofs;
println!(
"Aggregation - Start internal aggregation at: {:?}",
aggregation_start_timestamp.elapsed()
);
// We will always generate at least one internal proof, even if there is only one leaf
// proof, in order to shrink the proof size
while proofs.len() > 1 || internal_node_height == 0 {
let internal_inputs = InternalVmVerifierInput::chunk_leaf_or_internal_proofs(
(*self.internal_prover.program_commitment()).into(),
&proofs,
DEFAULT_NUM_CHILDREN_INTERNAL,
);
let layer_proofs: Vec<Proof<_>> = internal_inputs
.into_iter()
.map(|input| {
internal_node_idx += 1;
let internal_proof = SingleSegmentVmProver::prove(
&mut self.internal_prover,
input.write(),
VM_MAX_TRACE_HEIGHTS,
)
.expect("internal proof generation failed");
println!(
"Aggregation - Completed internal node (idx: {:?}) at height {:?}: {:?}",
internal_node_idx,
internal_node_height,
aggregation_start_timestamp.elapsed()
);
// _debug: export
// let file = File::create(format!(
// "internal_proof_{:?}_height_{:?}.bin",
// internal_node_idx, internal_node_height
// ))
// .expect("Create export proof file");
// bincode::serialize_into(file, &internal_proof).expect("failed to serialize internal proof");
internal_proof
})
.collect();
proofs = layer_proofs;
internal_node_height += 1;
}
println!(
"Aggregation - Completed internal aggregation at: {:?}",
aggregation_start_timestamp.elapsed()
);
println!("Aggregation - Final height: {:?}", internal_node_height);
// TODO: generate root proof from last internal proof
// Export e2e stark proof (used in verify_e2e_stark_proof)
VmStarkProof {
inner: proofs.pop().unwrap(),
user_public_values,
}
}
}
/// Config to generate leaf VM verifier program.
pub struct CenoLeafVmVerifierConfig {
pub vk: ZKVMVerifyingKey<E, Basefold<E, BasefoldRSParams>>,
pub compiler_options: CompilerOptions,
}
impl CenoLeafVmVerifierConfig {
pub fn build_program(&self) -> Program<F> {
let mut builder = Builder::<C>::default();
{
let ceno_leaf_input = CenoLeafVmVerifierInput::read(&mut builder);
let stark_pvs = VmVerifierPvs::<Felt<F>>::uninit(&mut builder);
builder.cycle_tracker_start("Verify Ceno ZKVM Proof");
let zkvm_proof = ceno_leaf_input.proof;
let raw_pi = zkvm_proof.raw_pi.clone();
let _calculated_shard_ec_sum = verify_zkvm_proof(&mut builder, zkvm_proof, &self.vk);
builder.cycle_tracker_end("Verify Ceno ZKVM Proof");
builder.cycle_tracker_start("PV Operations");
// TODO: define our own VmVerifierPvs
for i in 0..DIGEST_SIZE {
builder.assign(&stark_pvs.app_commit[i], F::ZERO);
}
let pv = &raw_pi;
let init_pc = {
let arr = builder.get(pv, INIT_PC_IDX);
builder.get(&arr, 0)
};
let end_pc = {
let arr = builder.get(pv, END_PC_IDX);
builder.get(&arr, 0)
};
let exit_code = {
let arr = builder.get(pv, EXIT_CODE_IDX);
builder.get(&arr, 0)
};
builder.assign(&stark_pvs.connector.initial_pc, init_pc);
builder.assign(&stark_pvs.connector.final_pc, end_pc);
builder.assign(&stark_pvs.connector.exit_code, exit_code);
// TODO: assign shard_ec_sum to stark_pvs.shard_ec_sum
// builder
// .if_eq(ceno_leaf_input.is_last, Usize::from(1))
// .then(|builder| {
// builder.assert_nonzero(&pv.len());
// // PC and cycle checks
// let prev_pc: Ext<_, _> = builder.uninit();
// builder.range(0, pv.len()).for_each(|idx_vec, builder| {
// let shard_pi = builder.get(&pv, idx_vec[0]);
// let init_cycle = builder.get(&shard_pi, INIT_CYCLE_IDX);
// let tracer_default: Ext<_, _> =
// builder.constant(E::from_canonical_u64(Tracer::SUBCYCLES_PER_INSN));
// builder.assert_ext_eq(init_cycle, tracer_default);
// let end_pc = builder.get(&shard_pi, END_PC_IDX);
// let init_pc = builder.get(&shard_pi, INIT_PC_IDX);
// builder.if_eq(idx_vec[0], Usize::from(0)).then_or_else(
// |builder| {
// let entry_point: Ext<_, _> =
// builder.constant(E::from_canonical_u32(self.vk.entry_pc));
// builder.assert_ext_eq(init_pc, entry_point);
// },
// |builder| {
// builder.assert_ext_eq(init_pc, prev_pc);
// },
// );
// builder.assign(&prev_pc, end_pc);
// });
// });
for pv in stark_pvs.flatten() {
builder.commit_public_value(pv);
}
builder.cycle_tracker_end("PV Operations");
builder.halt();
}
builder.compile_isa_with_options(self.compiler_options)
}
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "Com<SC>: Serialize",
deserialize = "Com<SC>: Deserialize<'de>"
))]
pub struct CenoRecursionVerifierKeys<SC: StarkGenericConfig> {
pub leaf_vm_vk: MultiStarkVerifyingKey<SC>,
pub leaf_fri_params: FriParameters,
pub internal_vm_vk: MultiStarkVerifyingKey<SC>,
pub internal_fri_params: FriParameters,
pub internal_commit: Com<SC>,
}
#[derive(Serialize, Deserialize)]
#[serde(bound(
serialize = "VmExe<Val<SC>>: Serialize, PcsProverData<SC>: Serialize, VC: Serialize",
deserialize = "VmExe<Val<SC>>: Deserialize<'de>, PcsProverData<SC>: Deserialize<'de>, VC: Deserialize<'de>"
))]
pub struct CenoRecursionProvingKeys<SC: StarkGenericConfig, VC> {
pub leaf_vm_pk: Arc<VmProvingKey<SC, VC>>,
pub leaf_committed_exe: Arc<VmCommittedExe<SC>>,
pub internal_vm_pk: Arc<VmProvingKey<SC, VC>>,
pub internal_committed_exe: Arc<VmCommittedExe<SC>>,
}
impl<SC: StarkGenericConfig, VC> Clone for CenoRecursionProvingKeys<SC, VC> {
fn clone(&self) -> Self {
Self {
leaf_vm_pk: self.leaf_vm_pk.clone(),
leaf_committed_exe: self.leaf_committed_exe.clone(),
internal_vm_pk: self.internal_vm_pk.clone(),
internal_committed_exe: self.internal_committed_exe.clone(),
}
}
}
impl<SC: StarkGenericConfig, VC> CenoRecursionProvingKeys<SC, VC> {
pub fn get_vk(&self) -> CenoRecursionVerifierKeys<SC> {
CenoRecursionVerifierKeys {
leaf_vm_vk: self.leaf_vm_pk.vm_pk.get_vk(),
leaf_fri_params: self.leaf_vm_pk.fri_params,
internal_vm_vk: self.internal_vm_pk.vm_pk.get_vk(),
internal_fri_params: self.internal_vm_pk.fri_params,
internal_commit: self.internal_committed_exe.get_program_commit(),
}
}
}
pub(crate) struct CenoLeafVmVerifierInput {
pub proof: ZKVMProofInput,
pub is_last: usize,
}
#[derive(DslVariable, Clone)]
pub(crate) struct CenoLeafVmVerifierInputVariable<C: Config> {
pub proof: ZKVMProofInputVariable<C>,
pub is_last: Usize<C::N>,
}
impl Hintable<InnerConfig> for CenoLeafVmVerifierInput {
type HintVariable = CenoLeafVmVerifierInputVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let proof = ZKVMProofInput::read(builder);
let is_last = Usize::Var(usize::read(builder));
Self::HintVariable { proof, is_last }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.proof.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.is_last));
stream
}
}
pub(crate) fn chunk_ceno_leaf_proof_inputs(
zkvm_proofs: Vec<ZKVMProofInput>,
) -> Vec<CenoLeafVmVerifierInput> {
let mut ret: Vec<CenoLeafVmVerifierInput> = zkvm_proofs
.into_iter()
.map(|p| CenoLeafVmVerifierInput {
proof: p,
is_last: 0,
})
.collect();
let last = ret.last_mut().unwrap();
last.is_last = 1;
ret
}
// Source from OpenVm SDK::verify_e2e_stark_proof with abridged key
// See: https://github.com/openvm-org/openvm
pub fn verify_e2e_stark_proof(
k: &CenoRecursionVerifierKeys<SC>,
proof: &VmStarkProof<SC>,
_expected_exe_commit: &Bn254Fr,
_expected_vm_commit: &Bn254Fr,
) -> Result<AppExecutionCommit, String> {
if proof.inner.per_air.len() < 3 {
return Err("Invalid number of AIRs: expected at least 3".into());
} else if proof.inner.per_air[0].air_id != PROGRAM_AIR_ID {
return Err("Missing program AIR".into());
} else if proof.inner.per_air[1].air_id != CONNECTOR_AIR_ID {
return Err("Missing connector AIR".into());
} else if proof.inner.per_air[2].air_id != PUBLIC_VALUES_AIR_ID {
return Err("Missing public values AIR".into());
}
let public_values_air_proof_data = &proof.inner.per_air[2];
let program_commit = proof.inner.commitments.main_trace[PROGRAM_CACHED_TRACE_INDEX].as_ref();
let internal_commit: &[_; CHUNK] = &k.internal_commit.into();
let (vm_vk, fri_params, vm_commit) = if program_commit == internal_commit {
let internal_pvs: &InternalVmVerifierPvs<_> = public_values_air_proof_data
.public_values
.as_slice()
.borrow();
if internal_commit != &internal_pvs.extra_pvs.internal_program_commit {
return Err(format!(
"Invalid internal program commit: expected {:?}, got {:?}",
internal_commit, internal_pvs.extra_pvs.internal_program_commit
));
}
(
&k.internal_vm_vk,
k.internal_fri_params,
internal_pvs.extra_pvs.leaf_verifier_commit,
)
} else {
(&k.leaf_vm_vk, k.leaf_fri_params, *program_commit)
};
let e = BabyBearPoseidon2Engine::new(fri_params);
e.verify(vm_vk, &proof.inner)
.expect("stark e2e proof verification should pass");
let pvs: &VmVerifierPvs<_> =
public_values_air_proof_data.public_values[..VmVerifierPvs::<u8>::width()].borrow();
// _debug: AIR ordering
// if let Some(exit_code) = pvs.connector.exit_code() {
// if exit_code != 0 {
// return Err(format!(
// "Invalid exit code: expected 0, got {}",
// exit_code
// ));
// }
// } else {
// return Err(format!("Program did not terminate"));
// }
let hasher = vm_poseidon2_hasher();
let _public_values_root = hasher.merkle_root(&proof.user_public_values);
// _debug: Public value commitment
// if public_values_root != pvs.public_values_commit {
// return Err(format!(
// "Invalid public values root: expected {:?}, got {:?}",
// pvs.public_values_commit,
// public_values_root
// ));
// }
let exe_commit = compute_exe_commit(
&hasher,
&pvs.app_commit,
&pvs.memory.initial_root,
pvs.connector.initial_pc,
);
let app_commit = AppExecutionCommit::from_field_commit(exe_commit, vm_commit);
let _exe_commit_bn254 = app_commit.app_exe_commit.to_bn254();
let _vm_commit_bn254 = app_commit.app_vm_commit.to_bn254();
// _debug: execution commit checks
// if exe_commit_bn254 != *expected_exe_commit {
// return Err(eyre::eyre!(
// "Invalid app exe commit: expected {:?}, got {:?}",
// expected_exe_commit,
// exe_commit_bn254
// ));
// } else if vm_commit_bn254 != *expected_vm_commit {
// return Err(eyre::eyre!(
// "Invalid app vm commit: expected {:?}, got {:?}",
// expected_vm_commit,
// vm_commit_bn254
// ));
// }
Ok(app_commit)
}
/// Build Ceno's zkVM verifier program from vk in OpenVM's eDSL
pub fn build_zkvm_verifier_program(
vk: &ZKVMVerifyingKey<E, Basefold<E, BasefoldRSParams>>,
) -> Program<F> {
let mut builder = AsmBuilder::<F, E>::default();
let zkvm_proof_input_variables = ZKVMProofInput::read(&mut builder);
verify_zkvm_proof(&mut builder, zkvm_proof_input_variables, vk);
builder.halt();
// Compile program
#[cfg(feature = "bench-metrics")]
let options = CompilerOptions::default().with_cycle_tracker();
#[cfg(not(feature = "bench-metrics"))]
let options = CompilerOptions::default();
let mut compiler = AsmCompiler::new(options.word_size);
compiler.build(builder.operations);
let asm_code = compiler.code();
let program: Program<F> = convert_program(asm_code, options);
program
}
pub fn verify_proofs(
zkvm_proofs: Vec<ZKVMProof<E, RecPcs>>,
vk: ZKVMVerifyingKey<E, Basefold<E, BasefoldRSParams>>,
) {
let program = build_zkvm_verifier_program(&vk);
if !zkvm_proofs.is_empty() {
let zkvm_proof_input = ZKVMProofInput::from((0usize, zkvm_proofs[0].clone()));
// Pass in witness stream
let mut witness_stream: Vec<Vec<F>> = Vec::new();
witness_stream.extend(zkvm_proof_input.write());
let poseidon2_max_constraint_degree: usize = 3;
let mut config = NativeConfig::aggregation(0, poseidon2_max_constraint_degree);
config.system.memory_config.max_access_adapter_n = 16;
let exe = VmExe::new(program);
let fri_params = standard_fri_params_with_100_bits_conjectured_security(1);
let vb = NativeBuilder::default();
air_test_impl::<BabyBearPoseidon2Engine, _>(
fri_params,
vb,
config,
exe,
witness_stream,
1,
true,
)
.unwrap();
}
}
#[cfg(test)]
mod tests {
use super::verify_e2e_stark_proof;
use crate::{
aggregation::{CenoAggregationProver, verify_proofs},
zkvm_verifier::binding::E,
};
use ceno_zkvm::{
e2e::verify,
scheme::{ZKVMProof, verifier::ZKVMVerifier},
structs::ZKVMVerifyingKey,
};
use mpcs::{Basefold, BasefoldRSParams};
use openvm_stark_sdk::{config::setup_tracing_with_log_level, p3_bn254_fr::Bn254Fr};
use p3::field::FieldAlgebra;
use std::fs::File;
pub fn aggregation_inner_thread() {
setup_tracing_with_log_level(tracing::Level::WARN);
let proof_path = "./src/imported/proof.bin";
let vk_path = "./src/imported/vk.bin";
let zkvm_proofs: Vec<ZKVMProof<E, Basefold<E, BasefoldRSParams>>> =
bincode::deserialize_from(File::open(proof_path).expect("Failed to open proof file"))
.expect("Failed to deserialize proof file");
let vk: ZKVMVerifyingKey<E, Basefold<E, BasefoldRSParams>> =
bincode::deserialize_from(File::open(vk_path).expect("Failed to open vk file"))
.expect("Failed to deserialize vk file");
let mut agg_prover = CenoAggregationProver::from_base_vk(vk);
let root_stark_proof = agg_prover.generate_root_proof(zkvm_proofs);
// _debug
verify_e2e_stark_proof(
&agg_prover.vk,
&root_stark_proof,
&Bn254Fr::ZERO,
&Bn254Fr::ZERO,
)
.expect("Verify e2e stark proof should pass");
}
pub fn verify_single_inner_thread() {
setup_tracing_with_log_level(tracing::Level::WARN);
let proof_path = "./src/imported/proof.bin";
let vk_path = "./src/imported/vk.bin";
let zkvm_proofs: Vec<ZKVMProof<E, Basefold<E, BasefoldRSParams>>> =
bincode::deserialize_from(File::open(proof_path).expect("Failed to open proof file"))
.expect("Failed to deserialize proof file");
let vk: ZKVMVerifyingKey<E, Basefold<E, BasefoldRSParams>> =
bincode::deserialize_from(File::open(vk_path).expect("Failed to open vk file"))
.expect("Failed to deserialize vk file");
verify_proofs(zkvm_proofs, vk);
}
pub fn verify_single_rust_verifier_inner_thread() {
setup_tracing_with_log_level(tracing::Level::WARN);
let proof_path = "./src/imported/proof.bin";
let vk_path = "./src/imported/vk.bin";
let zkvm_proofs: Vec<ZKVMProof<E, Basefold<E, BasefoldRSParams>>> =
bincode::deserialize_from(File::open(proof_path).expect("Failed to open proof file"))
.expect("Failed to deserialize proof file");
let vk: ZKVMVerifyingKey<E, Basefold<E, BasefoldRSParams>> =
bincode::deserialize_from(File::open(vk_path).expect("Failed to open vk file"))
.expect("Failed to deserialize vk file");
let verifier = ZKVMVerifier::new(vk);
verify(zkvm_proofs.clone(), &verifier).expect("Verification failed");
}
#[test]
#[ignore = "need to generate proof first"]
pub fn test_aggregation() {
let stack_size = 256 * 1024 * 1024; // 64 MB
let handler = std::thread::Builder::new()
.stack_size(stack_size)
.spawn(aggregation_inner_thread)
.expect("Failed to spawn thread");
handler.join().expect("Thread panicked");
}
#[test]
#[ignore = "need to generate proof first"]
pub fn test_single() {
let stack_size = 256 * 1024 * 1024; // 64 MB
let handler = std::thread::Builder::new()
.stack_size(stack_size)
.spawn(verify_single_inner_thread)
.expect("Failed to spawn thread");
handler.join().expect("Thread panicked");
}
#[test]
#[ignore = "need to generate proof first"]
pub fn test_single_rust_verifier() {
let stack_size = 256 * 1024 * 1024; // 64 MB
let handler = std::thread::Builder::new()
.stack_size(stack_size)
.spawn(verify_single_rust_verifier_inner_thread)
.expect("Failed to spawn thread");
handler.join().expect("Thread panicked");
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/field.rs | ceno_recursion/src/basefold_verifier/field.rs | const TWO_ADICITY: usize = 32;
const TWO_ADIC_GENERATORS: [usize; 33] = [
0x0000000000000001,
0xffffffff00000000,
0x0001000000000000,
0xfffffffeff000001,
0xefffffff00000001,
0x00003fffffffc000,
0x0000008000000000,
0xf80007ff08000001,
0xbf79143ce60ca966,
0x1905d02a5c411f4e,
0x9d8f2ad78bfed972,
0x0653b4801da1c8cf,
0xf2c35199959dfcb6,
0x1544ef2335d17997,
0xe0ee099310bba1e2,
0xf6b2cffe2306baac,
0x54df9630bf79450e,
0xabd0a6e8aa3d8a0e,
0x81281a7b05f9beac,
0xfbd41c6b8caa3302,
0x30ba2ecd5e93e76d,
0xf502aef532322654,
0x4b2a18ade67246b5,
0xea9d5a1336fbc98b,
0x86cdcc31c307e171,
0x4bbaf5976ecfefd8,
0xed41d05b78d6e286,
0x10d78dd8915a171d,
0x59049500004a4485,
0xdfa8c93ba46d2666,
0x7e9bd009b86a0845,
0x400a7f755588e659,
0x185629dcda58878c,
];
use openvm_native_compiler::prelude::*;
use p3_field::FieldAlgebra;
fn two_adic_generator<C: Config>(
builder: &mut Builder<C>,
bits: Var<C::N>,
) -> Var<C::F> {
let bits_limit = builder.eval(Usize::from(TWO_ADICITY) + Usize::from(1));
builder.assert_less_than_slow_small_rhs(bits, bits_limit);
let two_adic_generator: Array<C, Var<<C as Config>::F>> = builder.dyn_array(TWO_ADICITY + 1);
builder.range(0, TWO_ADICITY + 1).for_each(|i_vec, builder| {
let i = i_vec[0];
builder.set_value(&two_adic_generator, i, C::F::from_canonical_usize(TWO_ADIC_GENERATORS[i.value()]));
});
builder.get(&two_adic_generator, bits)
} | rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/query_phase.rs | ceno_recursion/src/basefold_verifier/query_phase.rs | use ff_ext::{BabyBearExt4, ExtensionField, PoseidonField};
use mpcs::basefold::structure::QueryOpeningProof as InnerQueryOpeningProof;
use openvm_native_compiler::{asm::AsmConfig, prelude::*};
use openvm_native_compiler_derive::iter_zip;
use openvm_native_recursion::{
hints::{Hintable, VecAutoHintable},
vars::HintSlice,
};
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::{
commit::ExtensionMmcs,
field::{Field, FieldAlgebra},
};
use serde::Deserialize;
use super::{basefold::*, extension_mmcs::*, mmcs::*, rs::*, utils::*};
use crate::{
arithmetics::eq_eval_with_index,
tower_verifier::{binding::*, program::interpolate_uni_poly},
};
pub type F = BabyBear;
pub type E = BabyBearExt4;
pub type InnerConfig = AsmConfig<F, E>;
use p3::fri::{
BatchOpening as InnerBatchOpening, CommitPhaseProofStep as InnerCommitPhaseProofStep,
};
/// We have to define a struct similar to p3_fri::BatchOpening as
/// the trait `Hintable` is defined in another crate inside OpenVM
#[derive(Deserialize)]
pub struct BatchOpening {
pub opened_values: Vec<Vec<F>>,
pub opening_proof: MmcsProof,
}
impl
From<
InnerBatchOpening<
<E as ExtensionField>::BaseField,
<<E as ExtensionField>::BaseField as PoseidonField>::MMCS,
>,
> for BatchOpening
{
fn from(
inner: InnerBatchOpening<
<E as ExtensionField>::BaseField,
<<E as ExtensionField>::BaseField as PoseidonField>::MMCS,
>,
) -> Self {
Self {
opened_values: inner.opened_values,
opening_proof: inner.opening_proof,
}
}
}
#[derive(DslVariable, Clone)]
pub struct BatchOpeningVariable<C: Config> {
pub opened_values: HintSlice<C>,
pub opening_proof: HintSlice<C>,
}
impl Hintable<InnerConfig> for BatchOpening {
type HintVariable = BatchOpeningVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let opened_values = read_hint_slice(builder);
let opening_proof = read_hint_slice(builder);
BatchOpeningVariable {
opened_values,
opening_proof,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(vec![
vec![F::from_canonical_usize(self.opened_values.len())],
self.opened_values
.iter()
.flatten()
.copied()
.collect::<Vec<_>>(),
]);
stream.extend(vec![
vec![F::from_canonical_usize(self.opening_proof.len())],
self.opening_proof
.iter()
.flatten()
.copied()
.collect::<Vec<_>>(),
]);
stream
}
}
impl VecAutoHintable for BatchOpening {}
/// TODO: use `openvm_native_recursion::fri::types::FriCommitPhaseProofStepVariable` instead
#[derive(Deserialize)]
pub struct CommitPhaseProofStep {
pub sibling_value: E,
pub opening_proof: MmcsProof,
}
pub type ExtMmcs<E> = ExtensionMmcs<
<E as ExtensionField>::BaseField,
E,
<<E as ExtensionField>::BaseField as PoseidonField>::MMCS,
>;
impl From<InnerCommitPhaseProofStep<E, ExtMmcs<E>>> for CommitPhaseProofStep {
fn from(inner: InnerCommitPhaseProofStep<E, ExtMmcs<E>>) -> Self {
Self {
sibling_value: inner.sibling_value,
opening_proof: inner.opening_proof,
}
}
}
#[derive(DslVariable, Clone)]
pub struct CommitPhaseProofStepVariable<C: Config> {
pub sibling_value: Ext<C::F, C::EF>,
pub opening_proof: HintSlice<C>,
}
impl VecAutoHintable for CommitPhaseProofStep {}
impl Hintable<InnerConfig> for CommitPhaseProofStep {
type HintVariable = CommitPhaseProofStepVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let sibling_value = E::read(builder);
let opening_proof = read_hint_slice(builder);
CommitPhaseProofStepVariable {
sibling_value,
opening_proof,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.sibling_value.write());
stream.extend(vec![
vec![F::from_canonical_usize(self.opening_proof.len())],
self.opening_proof
.iter()
.flatten()
.copied()
.collect::<Vec<_>>(),
]);
stream
}
}
#[derive(Deserialize)]
pub struct QueryOpeningProof {
pub input_proofs: Vec<BatchOpening>,
pub commit_phase_openings: Vec<CommitPhaseProofStep>,
}
impl From<InnerQueryOpeningProof<E>> for QueryOpeningProof {
fn from(proof: InnerQueryOpeningProof<E>) -> Self {
Self {
input_proofs: proof
.input_proofs
.into_iter()
.map(|proof| proof.into())
.collect(),
commit_phase_openings: proof
.commit_phase_openings
.into_iter()
.map(|proof| proof.into())
.collect(),
}
}
}
#[derive(DslVariable, Clone)]
pub struct QueryOpeningProofVariable<C: Config> {
pub input_proofs: Array<C, BatchOpeningVariable<C>>,
pub commit_phase_openings: Array<C, CommitPhaseProofStepVariable<C>>,
}
pub(crate) type QueryOpeningProofs = Vec<QueryOpeningProof>;
pub(crate) type QueryOpeningProofsVariable<C> = Array<C, QueryOpeningProofVariable<C>>;
impl VecAutoHintable for QueryOpeningProof {}
impl Hintable<InnerConfig> for QueryOpeningProof {
type HintVariable = QueryOpeningProofVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let input_proofs = Vec::<BatchOpening>::read(builder);
let commit_phase_openings = Vec::<CommitPhaseProofStep>::read(builder);
QueryOpeningProofVariable {
input_proofs,
commit_phase_openings,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.input_proofs.write());
stream.extend(self.commit_phase_openings.write());
stream
}
}
#[derive(Deserialize)]
// NOTE: Different from PointAndEval in tower_verifier!
pub struct PointAndEvals {
pub point: Point,
pub evals: Vec<E>,
}
impl Hintable<InnerConfig> for PointAndEvals {
type HintVariable = PointAndEvalsVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let point = Point::read(builder);
let evals = Vec::<E>::read(builder);
PointAndEvalsVariable { point, evals }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.point.write());
stream.extend(self.evals.write());
stream
}
}
impl VecAutoHintable for PointAndEvals {}
#[derive(DslVariable, Clone)]
pub struct PointAndEvalsVariable<C: Config> {
pub point: PointVariable<C>,
pub evals: Array<C, Ext<C::F, C::EF>>,
}
#[derive(Deserialize)]
pub struct QueryPhaseVerifierInput {
// pub t_inv_halves: Vec<Vec<<E as ExtensionField>::BaseField>>,
pub max_num_var: usize,
// This is the maximum width of all the opened values in the query openings. The reason
// for providing this information is to allow us to allocate a single all-zero buffer that is
// sufficiently large to pass to the `fri_single_reduced_opening_eval` chip.
pub max_width: usize,
pub batch_coeffs: Vec<E>,
pub fold_challenges: Vec<E>,
pub indices: Vec<usize>,
pub proof: BasefoldProof,
pub rounds: Vec<Round>,
}
impl Hintable<InnerConfig> for QueryPhaseVerifierInput {
type HintVariable = QueryPhaseVerifierInputVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
// let t_inv_halves = Vec::<Vec<F>>::read(builder);
let max_num_var = Usize::Var(usize::read(builder));
let max_width = Usize::Var(usize::read(builder));
let batch_coeffs = Vec::<E>::read(builder);
let fold_challenges = Vec::<E>::read(builder);
let indices = Vec::<usize>::read(builder);
let proof = BasefoldProof::read(builder);
let rounds = Vec::<Round>::read(builder);
QueryPhaseVerifierInputVariable {
// t_inv_halves,
max_num_var,
max_width,
batch_coeffs,
fold_challenges,
indices,
proof,
rounds,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
// stream.extend(self.t_inv_halves.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.max_num_var));
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.max_width));
stream.extend(self.batch_coeffs.write());
stream.extend(self.fold_challenges.write());
stream.extend(self.indices.write());
stream.extend(self.proof.write());
stream.extend(self.rounds.write());
stream
}
}
#[derive(DslVariable, Clone)]
pub struct QueryPhaseVerifierInputVariable<C: Config> {
// pub t_inv_halves: Array<C, Array<C, Felt<C::F>>>,
pub max_num_var: Usize<C::N>,
// See `QueryPhaseVerifierInput` for explaining the purpose of this field.
pub max_width: Usize<C::N>,
pub batch_coeffs: Array<C, Ext<C::F, C::EF>>,
pub fold_challenges: Array<C, Ext<C::F, C::EF>>,
pub indices: Array<C, Var<C::N>>,
pub proof: BasefoldProofVariable<C>,
pub rounds: Array<C, RoundVariable<C>>,
}
#[derive(DslVariable, Clone)]
pub struct PackedCodeword<C: Config> {
pub low: Ext<C::F, C::EF>,
pub high: Ext<C::F, C::EF>,
}
#[derive(DslVariable, Clone)]
pub struct RoundContextVariable<C: Config> {
pub(crate) opened_values_buffer: Array<C, Array<C, Felt<C::F>>>,
pub(crate) log2_heights: Array<C, Var<C::N>>,
pub(crate) minus_alpha_offsets: Array<C, Ext<C::F, C::EF>>,
pub(crate) dimensions: Array<C, Var<C::N>>,
}
pub(crate) fn batch_verifier_query_phase<C: Config>(
builder: &mut Builder<C>,
input: QueryPhaseVerifierInputVariable<C>,
) {
let inv_2 = builder.constant(C::F::from_canonical_u32(0x3c000001));
let two_adic_generators_inverses: Array<C, Felt<C::F>> = builder.dyn_array(28);
for (index, val) in [
0x1usize, 0x78000000, 0x67055c21, 0x5ee99486, 0xbb4c4e4, 0x2d4cc4da, 0x669d6090,
0x17b56c64, 0x67456167, 0x688442f9, 0x145e952d, 0x4fe61226, 0x4c734715, 0x11c33e2a,
0x62c3d2b1, 0x77cad399, 0x54c131f4, 0x4cabd6a6, 0x5cf5713f, 0x3e9430e8, 0xba067a3,
0x18adc27d, 0x21fd55bc, 0x4b859b3d, 0x3bd57996, 0x4483d85a, 0x3a26eef8, 0x1a427a41,
]
.iter()
.enumerate()
{
let generator_inverse = builder.constant(C::F::from_canonical_usize(*val).inverse());
builder.set_value(&two_adic_generators_inverses, index, generator_inverse);
}
let zero: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
let zero_flag = builder.constant(C::N::ZERO);
let two: Var<C::N> = builder.constant(C::N::TWO);
let two_felt: Felt<C::F> = builder.constant(C::F::TWO);
// encode_small
let final_message = &input.proof.final_message;
let final_rmm_values_len = builder.get(final_message, 0).len();
let final_rmm_values = builder.dyn_array(final_rmm_values_len.clone());
let all_zeros = builder.dyn_array(input.max_width.clone());
iter_zip!(builder, all_zeros).for_each(|ptr_vec, builder| {
builder.set_value(&all_zeros, ptr_vec[0], zero);
});
builder
.range(0, final_rmm_values_len.clone())
.for_each(|i_vec, builder| {
let i = i_vec[0];
let row_len = final_message.len();
let sum = builder.constant(C::EF::ZERO);
builder.range(0, row_len).for_each(|j_vec, builder| {
let j = j_vec[0];
let row = builder.get(final_message, j);
let row_j = builder.get(&row, i);
builder.assign(&sum, sum + row_j);
});
builder.set_value(&final_rmm_values, i, sum);
});
let final_rmm = RowMajorMatrixVariable {
values: final_rmm_values,
width: builder.eval(Usize::from(1)),
};
let final_codeword = encode_small(builder, final_rmm);
let log2_max_codeword_size: Var<C::N> =
builder.eval(input.max_num_var.clone() + Usize::from(get_rate_log()));
let alpha: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
builder
.if_ne(input.batch_coeffs.len(), C::N::ONE)
.then(|builder| {
let batch_coeff = builder.get(&input.batch_coeffs, 1);
builder.assign(&alpha, batch_coeff);
});
let initial_cur_num_var: Var<C::N> = builder.eval(input.max_num_var.clone());
let initial_log2_height: Var<C::N> =
builder.eval(initial_cur_num_var + Usize::from(get_rate_log() - 1));
builder.assert_eq::<Var<C::N>>(
input.proof.commits.len() + Usize::from(1),
input.fold_challenges.len(),
);
let rounds_context: Array<C, RoundContextVariable<C>> = builder.dyn_array(input.rounds.len());
let batch_coeffs_offset: Var<C::N> = builder.constant(C::N::ZERO);
builder.cycle_tracker_start("Construct round context");
iter_zip!(builder, input.rounds, rounds_context).for_each(|ptr_vec, builder| {
let round = builder.iter_ptr_get(&input.rounds, ptr_vec[0]);
// This buffer is not initialized here in providing the context.
// It will be initialized later (once for each query) in the loop over queries,
// by the `fri_single_reduced_opening_eval` chip.
let opened_values_buffer: Array<C, Array<C, Felt<C::F>>> =
builder.dyn_array(round.openings.len());
let log2_heights = builder.dyn_array(round.openings.len());
let minus_alpha_offsets = builder.dyn_array(round.openings.len());
let dimensions = builder.dyn_array(round.openings.len());
iter_zip!(
builder,
opened_values_buffer,
log2_heights,
round.openings,
minus_alpha_offsets,
)
.for_each(|ptr_vec, builder| {
let opening = builder.iter_ptr_get(&round.openings, ptr_vec[2]);
let log2_height: Var<C::N> =
builder.eval(opening.num_var + Usize::from(get_rate_log() - 1));
builder.iter_ptr_set(&log2_heights, ptr_vec[1], log2_height);
let width = opening.point_and_evals.evals.len();
let opened_value_len: Var<C::N> = builder.eval(width.clone() * two);
let opened_value_buffer = builder.dyn_array(opened_value_len);
builder.iter_ptr_set(
&opened_values_buffer,
ptr_vec[0],
opened_value_buffer.clone(),
);
let alpha_offset = builder.get(&input.batch_coeffs, batch_coeffs_offset);
// Will need to negate the values of low and high
// because `fri_single_reduced_opening_eval` is
// computing \sum_i alpha^i (0 - opened_value[i]).
// We want \sum_i alpha^(i + offset) opened_value[i]
// Let's negate it here.
builder.assign(&alpha_offset, -alpha_offset);
builder.iter_ptr_set(&minus_alpha_offsets, ptr_vec[3], alpha_offset);
builder.assign(&batch_coeffs_offset, batch_coeffs_offset + width.clone());
});
// TODO: ensure that perm is indeed a permutation of 0, ..., opened_values.len()-1
// Note that this should be done outside the loop over queries
// reorder (opened values, dimension) according to the permutation
builder
.range(0, round.openings.len())
.for_each(|j_vec, builder| {
let height_j = builder.get(&log2_heights, j_vec[0]);
let permuted_j = builder.get(&round.perm, j_vec[0]);
// let permuted_j = j;
builder.set_value(&dimensions, permuted_j, height_j);
});
// TODO: ensure that dimensions is indeed sorted decreasingly
// Note that this should be done outside the loop over queries
let round_context = RoundContextVariable {
opened_values_buffer,
log2_heights,
minus_alpha_offsets,
dimensions,
};
builder.iter_ptr_set(&rounds_context, ptr_vec[1], round_context);
});
builder.cycle_tracker_end("Construct round context");
iter_zip!(builder, input.indices, input.proof.query_opening_proof).for_each(
|ptr_vec, builder| {
// TODO: change type of input.indices to be `Array<C, Array<C, Var<C::N>>>`
let idx = builder.iter_ptr_get(&input.indices, ptr_vec[0]);
let idx = builder.unsafe_cast_var_to_felt(idx);
let idx_bits = builder.num2bits_f(idx, C::N::bits() as u32);
// assert idx_bits[log2_max_codeword_size..] == 0
builder
.range(log2_max_codeword_size, idx_bits.len())
.for_each(|i_vec, builder| {
let bit = builder.get(&idx_bits, i_vec[0]);
builder.assert_eq::<Var<_>>(bit, Usize::from(0));
});
let idx_bits = idx_bits.slice(builder, 1, log2_max_codeword_size);
let reduced_codeword_by_height: Array<C, PackedCodeword<C>> =
builder.dyn_array(log2_max_codeword_size);
// initialize reduced_codeword_by_height with zeroes
iter_zip!(builder, reduced_codeword_by_height).for_each(|ptr_vec, builder| {
let zero_codeword = PackedCodeword {
low: zero,
high: zero,
};
builder.set_value(&reduced_codeword_by_height, ptr_vec[0], zero_codeword);
});
let query = builder.iter_ptr_get(&input.proof.query_opening_proof, ptr_vec[1]);
builder.cycle_tracker_start("Batching and first FRI round");
iter_zip!(builder, query.input_proofs, input.rounds, rounds_context).for_each(
|ptr_vec, builder| {
let batch_opening = builder.iter_ptr_get(&query.input_proofs, ptr_vec[0]);
let round = builder.iter_ptr_get(&input.rounds, ptr_vec[1]);
let opened_values = batch_opening.opened_values;
let perm_opened_values = builder.dyn_array(opened_values.length.clone());
let opening_proof = batch_opening.opening_proof;
let round_context = builder.iter_ptr_get(&rounds_context, ptr_vec[2]);
iter_zip!(
builder,
round_context.log2_heights,
round_context.minus_alpha_offsets,
round_context.opened_values_buffer,
round.perm,
round.openings,
)
.for_each(|ptr_vec, builder| {
let opened_values_buffer =
builder.iter_ptr_get(&round_context.opened_values_buffer, ptr_vec[2]);
let log2_height: Var<C::N> =
builder.iter_ptr_get(&round_context.log2_heights, ptr_vec[0]);
// The linear combination is by (alpha^offset, ..., alpha^(offset+width-1)), which is equal to
// alpha^offset * (1, ..., alpha^(width-1))
let minus_alpha_offset =
builder.iter_ptr_get(&round_context.minus_alpha_offsets, ptr_vec[1]);
let opening = builder.iter_ptr_get(&round.openings, ptr_vec[4]);
let width = opening.point_and_evals.evals.len();
let low_values = opened_values_buffer.slice(builder, 0, width.clone());
let high_values = opened_values_buffer.slice(
builder,
width.clone(),
opened_values_buffer.len(),
);
let all_zeros_slice = all_zeros.slice(builder, 0, width.clone());
let low = builder.fri_single_reduced_opening_eval(
alpha,
opened_values.id.get_var(),
zero_flag,
&low_values,
&all_zeros_slice,
);
let high = builder.fri_single_reduced_opening_eval(
alpha,
opened_values.id.get_var(),
zero_flag,
&high_values,
&all_zeros_slice,
);
builder.assign(&low, low * minus_alpha_offset);
builder.assign(&high, high * minus_alpha_offset);
let codeword: PackedCodeword<C> = PackedCodeword { low, high };
let codeword_acc = builder.get(&reduced_codeword_by_height, log2_height);
// reduced_openings[log2_height] += codeword
builder.assign(&codeword_acc.low, codeword_acc.low + codeword.low);
builder.assign(&codeword_acc.high, codeword_acc.high + codeword.high);
builder.set_value(&reduced_codeword_by_height, log2_height, codeword_acc);
// reorder opened values according to the permutation
let mat_j =
builder.iter_ptr_get(&round_context.opened_values_buffer, ptr_vec[2]);
let permuted_j = builder.iter_ptr_get(&round.perm, ptr_vec[3]);
// let permuted_j = j;
builder.set_value(&perm_opened_values, permuted_j, mat_j);
});
// i >>= (log2_max_codeword_size - commit.log2_max_codeword_size);
let bits_shift: Var<C::N> =
builder.eval(log2_max_codeword_size - round.commit.log2_max_codeword_size);
let reduced_idx_bits = idx_bits.slice(builder, bits_shift, idx_bits.len());
// verify input mmcs
let mmcs_verifier_input = MmcsVerifierInputVariable {
commit: round.commit.commit.clone(),
dimensions: round_context.dimensions,
index_bits: reduced_idx_bits,
opened_values: perm_opened_values,
proof: opening_proof,
};
mmcs_verify_batch(builder, mmcs_verifier_input);
},
);
builder.cycle_tracker_end("Batching and first FRI round");
let opening_ext = query.commit_phase_openings;
// fold 1st codeword
let cur_num_var: Var<C::N> = builder.eval(initial_cur_num_var);
let log2_height: Var<C::N> = builder.eval(initial_log2_height);
let r = builder.get(&input.fold_challenges, 0);
let codeword = builder.get(&reduced_codeword_by_height, log2_height);
let coeff = verifier_folding_coeffs_level(
builder,
&two_adic_generators_inverses,
log2_height,
&idx_bits,
inv_2,
);
let folded = codeword_fold_with_challenge::<C>(
builder,
codeword.low,
codeword.high,
r,
coeff,
inv_2,
);
// check commit phases
let commits = &input.proof.commits;
builder.assert_eq::<Var<C::N>>(commits.len(), opening_ext.len());
builder.cycle_tracker_start("FRI rounds");
let i: Var<C::N> = builder.constant(C::N::ZERO);
iter_zip!(builder, commits, opening_ext).for_each(|ptr_vec, builder| {
let commit = builder.iter_ptr_get(commits, ptr_vec[0]);
let commit_phase_step = builder.iter_ptr_get(&opening_ext, ptr_vec[1]);
let i_plus_one = builder.eval_expr(i + Usize::from(1));
let sibling_value = commit_phase_step.sibling_value;
let proof = commit_phase_step.opening_proof;
builder.assign(&cur_num_var, cur_num_var - Usize::from(1));
builder.assign(&log2_height, log2_height - Usize::from(1));
let folded_idx = builder.get(&idx_bits, i);
let new_involved_packed_codeword =
builder.get(&reduced_codeword_by_height, log2_height);
// Note that previous coeff is
// 1/2 * generator_of_order(2^{level + 2})^{-prev_index}
// = 1/2 * generator_of_order(2^{level + 2})^{-(index+2^level*prev_bit)}
// = 1/2 * omega^{-2^(MAX - (level + 2)) * (index+2^level*prev_bit)}
// where generator_of_order(2^k) returns the generator of order 2^k, which is omega^{2^(MAX - k)}
// since omega is the generator of order 2^MAX, where MAX is the two-adicity of this field.
// Here prev_bit is the most significant bit of prev_index, and index is removing this bit
// from prev_index.
//
// So prev ^ 2 = 1/4 * omega^{-2^(MAX - (level + 2)) * 2 * (index+2^level*prev_bit)}
// = 1/4 * omega^{-2^(MAX - (level + 1)) * (index+2^level*prev_bit)}
// = 1/4 * omega^{-2^(MAX - (level + 1)) * index - 2^(MAX - (level + 1)) * 2^level*prev_bit}
// = 1/2 * curr * omeag^{- 2^(MAX - 1) * prev_bit}
// = 1/2 * curr * generator_of_order(2)^{-prev_bit}
// Note that generator_of_order(2) is exactly -1, so
// prev ^ 2 = 1/2 * curr * (-1)^{-prev_bit}
// which gives us
// curr = 2 * prev^2 * (-1)^{prev_bit}
// Note that we haven't multplied the (-1)^{prev_bit} in the following line.
// Because `folded_idx` is just the `prev_bit`, so we reuse the following `if_eq` and multiply -1
// in the branch where `folded_idx` is != 0.
builder.assign(&coeff, coeff * coeff * two_felt);
builder.if_eq(folded_idx, Usize::from(0)).then_or_else(
|builder| {
builder.assign(&folded, folded + new_involved_packed_codeword.low);
},
|builder| {
builder.assign(&folded, folded + new_involved_packed_codeword.high);
builder.assign(&coeff, -coeff);
},
);
// leafs
let leafs = builder.dyn_array(2);
let sibling_idx = builder.eval_expr(RVar::from(1) - folded_idx);
builder.set_value(&leafs, folded_idx, folded);
builder.set_value(&leafs, sibling_idx, sibling_value);
// idx >>= 1
let idx_pair = idx_bits.slice(builder, i_plus_one, idx_bits.len());
// mmcs_ext.verify_batch
let dimensions = builder.dyn_array(1);
let opened_values = builder.dyn_array(1);
builder.set_value(&opened_values, 0, leafs.clone());
builder.set_value(&dimensions, 0, log2_height);
let ext_mmcs_verifier_input = ExtMmcsVerifierInputVariable {
commit: commit.clone(),
dimensions,
index_bits: idx_pair.clone(),
opened_values,
proof,
};
ext_mmcs_verify_batch::<C>(builder, ext_mmcs_verifier_input);
let r = builder.get(&input.fold_challenges, i_plus_one);
let left = builder.get(&leafs, 0);
let right = builder.get(&leafs, 1);
let new_folded =
codeword_fold_with_challenge(builder, left, right, r, coeff, inv_2);
builder.assign(&folded, new_folded);
builder.assign(&i, i_plus_one);
});
builder.cycle_tracker_end("FRI rounds");
// assert that final_value[i] = folded
let final_idx: Var<C::N> = builder.constant(C::N::ZERO);
builder
.range(commits.len(), idx_bits.len())
.for_each(|i_vec, builder| {
let i = i_vec[0];
let bit = builder.get(&idx_bits, i);
builder.assign(
&final_idx,
final_idx * SymbolicVar::from(C::N::from_canonical_u16(2)) + bit,
);
});
let final_value = builder.get(&final_codeword.values, final_idx);
builder.assert_eq::<Ext<C::F, C::EF>>(final_value, folded);
},
);
// 1. check initial claim match with first round sumcheck value
let batch_coeffs_offset: Var<C::N> = builder.constant(C::N::ZERO);
let expected_sum: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
iter_zip!(builder, input.rounds).for_each(|ptr_vec, builder| {
let round = builder.iter_ptr_get(&input.rounds, ptr_vec[0]);
iter_zip!(builder, round.openings).for_each(|ptr_vec, builder| {
let opening = builder.iter_ptr_get(&round.openings, ptr_vec[0]);
// TODO: filter out openings with num_var >= get_basecode_msg_size_log::<C>()
let var_diff: Var<C::N> = builder.eval(input.max_num_var.get_var() - opening.num_var);
let scalar_var = pow_2(builder, var_diff);
let scalar = builder.unsafe_cast_var_to_felt(scalar_var);
iter_zip!(builder, opening.point_and_evals.evals).for_each(|ptr_vec, builder| {
let eval = builder.iter_ptr_get(&opening.point_and_evals.evals, ptr_vec[0]);
let coeff = builder.get(&input.batch_coeffs, batch_coeffs_offset);
let val: Ext<C::F, C::EF> = builder.eval(eval * coeff * scalar);
builder.assign(&expected_sum, expected_sum + val);
builder.assign(&batch_coeffs_offset, batch_coeffs_offset + Usize::from(1));
});
});
});
let sum: Ext<C::F, C::EF> = {
let sumcheck_evals = builder.get(&input.proof.sumcheck_proof, 0).evaluations;
let eval0 = builder.get(&sumcheck_evals, 0);
let eval1 = builder.get(&sumcheck_evals, 1);
builder.eval(eval0 + eval1)
};
builder.assert_eq::<Ext<C::F, C::EF>>(expected_sum, sum);
// 2. check every round of sumcheck match with prev claims
let fold_len_minus_one: Var<C::N> = builder.eval(input.fold_challenges.len() - Usize::from(1));
builder
.range(0, fold_len_minus_one)
.for_each(|i_vec, builder| {
let i = i_vec[0];
let evals = builder.get(&input.proof.sumcheck_proof, i).evaluations;
let challenge = builder.get(&input.fold_challenges, i);
let left = interpolate_uni_poly(builder, &evals, challenge);
let i_plus_one = builder.eval_expr(i + Usize::from(1));
let next_evals = builder
.get(&input.proof.sumcheck_proof, i_plus_one)
.evaluations;
let eval0 = builder.get(&next_evals, 0);
let eval1 = builder.get(&next_evals, 1);
let right: Ext<C::F, C::EF> = builder.eval(eval0 + eval1);
builder.assert_eq::<Ext<C::F, C::EF>>(left, right);
});
// 3. check final evaluation are correct
let final_evals = builder
.get(&input.proof.sumcheck_proof, fold_len_minus_one)
.evaluations;
let final_challenge = builder.get(&input.fold_challenges, fold_len_minus_one);
let left = interpolate_uni_poly(builder, &final_evals, final_challenge);
let right: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
let one: Var<C::N> = builder.constant(C::N::ONE);
let j: Var<C::N> = builder.constant(C::N::ZERO);
// \sum_i eq(p, [r,i]) * f(r,i)
iter_zip!(builder, input.rounds,).for_each(|ptr_vec, builder| {
let round = builder.iter_ptr_get(&input.rounds, ptr_vec[0]);
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/rs.rs | ceno_recursion/src/basefold_verifier/rs.rs | // Note: check all XXX comments!
use std::{cell::RefCell, collections::BTreeMap};
use openvm_native_compiler::{asm::AsmConfig, prelude::*};
use openvm_native_recursion::hints::Hintable;
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::field::{FieldAlgebra, extension::BinomialExtensionField};
use serde::Deserialize;
use super::{structs::*, utils::pow_felt_bits};
pub type F = BabyBear;
pub type E = BinomialExtensionField<F, DEGREE>;
pub type InnerConfig = AsmConfig<F, E>;
pub struct DenseMatrix {
pub values: Vec<E>,
pub width: usize,
}
impl Hintable<InnerConfig> for DenseMatrix {
type HintVariable = DenseMatrixVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let values = Vec::<E>::read(builder);
let width = usize::read(builder);
DenseMatrixVariable { values, width }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.values.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.width));
stream
}
}
#[derive(DslVariable, Clone)]
pub struct DenseMatrixVariable<C: Config> {
pub values: Array<C, Ext<C::F, C::EF>>,
pub width: Var<C::N>,
}
pub type RowMajorMatrixVariable<C> = DenseMatrixVariable<C>;
impl<C: Config> DenseMatrixVariable<C> {
pub fn height(&self, builder: &mut Builder<C>) -> Var<C::N> {
// Supply height as hint
let height = builder.hint_var();
builder.if_eq(self.width, Usize::from(0)).then(|builder| {
builder.assert_usize_eq(height, Usize::from(0));
});
builder.if_ne(self.width, Usize::from(0)).then(|builder| {
// XXX: check that width * height is not a field multiplication
builder.assert_usize_eq(self.width * height, self.values.len());
});
height
}
// XXX: Find better ways to handle this without cloning
pub fn pad_to_height(
&self,
builder: &mut Builder<C>,
new_height: Var<C::N>,
fill: Ext<C::F, C::EF>,
) {
// XXX: Not necessary, only for testing purpose
let old_height = self.height(builder);
builder.assert_less_than_slow_small_rhs(old_height, new_height + RVar::from(1));
let new_size = builder.eval_expr(self.width * new_height);
let evals: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(new_size);
builder
.range(0, self.values.len())
.for_each(|i_vec, builder| {
let i = i_vec[0];
let tmp: Ext<C::F, C::EF> = builder.get(&self.values, i);
builder.set(&evals, i, tmp);
});
builder
.range(self.values.len(), evals.len())
.for_each(|i_vec, builder| {
let i = i_vec[0];
builder.set(&evals, i, fill);
});
builder.assign(&self.values, evals);
}
}
pub fn get_rate_log() -> usize {
1
}
pub fn get_basecode_msg_size_log() -> usize {
0
}
pub fn get_num_queries() -> usize {
100
}
pub fn verifier_folding_coeffs_level<C: Config>(
builder: &mut Builder<C>,
two_adic_generators_inverses: &Array<C, Felt<C::F>>,
level: Var<C::N>,
index_bits: &Array<C, Var<C::N>>,
two_inv: Felt<C::F>,
) -> Felt<C::F> {
let level_plus_one = builder.eval::<Var<C::N>, _>(level + C::N::ONE);
let g_inv = builder.get(two_adic_generators_inverses, level_plus_one);
let g_inv_index = pow_felt_bits(builder, g_inv, index_bits, level.into());
builder.eval(g_inv_index * two_inv)
}
/// The DIT FFT algorithm.
#[derive(Deserialize)]
pub struct Radix2Dit {
pub twiddles: RefCell<BTreeMap<usize, Vec<F>>>,
}
impl Hintable<InnerConfig> for Radix2Dit {
type HintVariable = Radix2DitVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let twiddles = Vec::<E>::read(builder);
Radix2DitVariable { twiddles }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
// XXX: process BTreeMap
let twiddles_vec: Vec<E> = Vec::new();
stream.extend(twiddles_vec.write());
stream
}
}
#[derive(DslVariable, Clone)]
pub struct Radix2DitVariable<C: Config> {
/// Memoized twiddle factors for each length log_n.
/// Precise definition is a map from usize to E
pub twiddles: Array<C, Ext<C::F, C::EF>>,
}
// impl<C: Config> Radix2DitVariable<C> {
// fn dft_batch(
// &self,
// builder: &mut Builder<C>,
// mat: RowMajorMatrixVariable<C>
// ) -> RowMajorMatrixVariable<C> {
// let h = mat.height(builder);
// let log_h = builder.hint_var();
// let log_h_minus_1: Var<C::N> = builder.eval(log_h - Usize::from(1));
// let purported_h_lower_bound = pow_2(builder, log_h_minus_1);
// let purported_h_upper_bound = pow_2(builder, log_h);
// builder.assert_less_than_slow_small_rhs(purported_h_lower_bound, h);
// builder.assert_less_than_slow_small_rhs(h, purported_h_upper_bound);
//
// TODO: support memoization
// Compute twiddle factors, or take memoized ones if already available.
// let twiddles = {
// let root = F::two_adic_generator(log_h);
// root.powers().take(1 << log_h).collect()
// };
//
// DIT butterfly
// reverse_matrix_index_bits(&mut mat);
// for layer in 0..log_h {
// dit_layer(&mut mat.as_view_mut(), layer, twiddles);
// }
// mat
// }
// }
#[derive(Deserialize)]
pub struct RSCodeVerifierParameters {
pub full_message_size_log: usize,
}
#[derive(DslVariable, Clone)]
pub struct RSCodeVerifierParametersVariable<C: Config> {
pub full_message_size_log: Usize<C::N>,
}
// pub(crate) fn encode_small<C: Config>(
// builder: &mut Builder<C>,
// vp: RSCodeVerifierParametersVariable<C>,
// rmm: RowMajorMatrixVariable<C>,
// ) -> RowMajorMatrixVariable<C> {
// let m = rmm;
// Add current setup this is unnecessary
// let old_height = m.height(builder);
// let new_height = builder.eval_expr(
// old_height * Usize::from(1 << get_rate_log())
// );
// m.pad_to_height(builder, new_height, Ext::new(0));
// m
// }
/// Encode the last message sent from the prover to the verifier
/// in the commit phase. Currently, for simplicity, we drop the
/// early stopping strategy so the last message has just one
/// element, and the encoding is simply repeating this element
/// by the expansion rate.
pub(crate) fn encode_small<C: Config>(
builder: &mut Builder<C>,
rmm: RowMajorMatrixVariable<C>, // Assumed to have only one row and one column
) -> RowMajorMatrixVariable<C> {
// XXX: nondeterministically supply the results for now
let result = builder.array(2); // Assume the expansion rate is fixed to 2 by now
let value = builder.get(&rmm.values, 0);
builder.range(0, 2).for_each(|i_vec, builder| {
let i = i_vec[0];
builder.set_value(&result, i, value);
});
DenseMatrixVariable {
values: result,
width: builder.eval(Usize::from(1)),
}
}
pub mod tests {
use openvm_circuit::arch::{SystemConfig, VmExecutor, instructions::program::Program};
use openvm_instructions::exe::VmExe;
use openvm_native_circuit::{Native, NativeConfig};
use openvm_native_compiler::{asm::AsmBuilder, prelude::*};
use openvm_native_recursion::hints::Hintable;
use openvm_stark_backend::config::StarkGenericConfig;
use openvm_stark_sdk::{
config::baby_bear_poseidon2::BabyBearPoseidon2Config, p3_baby_bear::BabyBear,
};
use p3::field::{FieldAlgebra, extension::BinomialExtensionField};
type SC = BabyBearPoseidon2Config;
type F = BabyBear;
type E = BinomialExtensionField<F, 4>;
type EF = <SC as StarkGenericConfig>::Challenge;
use super::{DenseMatrix, InnerConfig};
#[allow(dead_code)]
pub fn build_test_dense_matrix_pad() -> (Program<BabyBear>, Vec<Vec<BabyBear>>) {
// OpenVM DSL
let mut builder = AsmBuilder::<F, EF>::default();
// Witness inputs
let dense_matrix_variable = DenseMatrix::read(&mut builder);
let new_height = builder.eval(Usize::from(8));
let fill = Ext::new(0);
dense_matrix_variable.pad_to_height(&mut builder, new_height, fill);
builder.halt();
// Pass in witness stream
let mut witness_stream: Vec<Vec<F>> = Vec::new();
let verifier_input = DenseMatrix {
values: vec![E::ONE; 25],
width: 5,
};
witness_stream.extend(verifier_input.write());
// Hint for height
witness_stream.extend(<usize as Hintable<InnerConfig>>::write(&5));
let program: Program<F> = builder.compile_isa();
(program, witness_stream)
}
#[test]
fn test_dense_matrix_pad() {
let (program, witness) = build_test_dense_matrix_pad();
let system_config = SystemConfig::default()
.with_public_values(4)
.with_max_segment_len((1 << 25) - 100);
let config = NativeConfig::new(system_config, Native);
let executor = VmExecutor::<BabyBear, NativeConfig>::new(config).unwrap();
let exe = VmExe::new(program);
let interpreter = executor.instance(&exe).unwrap();
interpreter
.execute(witness, None)
.expect("test_dense_matrix_pad should not fail");
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/mmcs.rs | ceno_recursion/src/basefold_verifier/mmcs.rs | use openvm_native_compiler::{asm::AsmConfig, prelude::*};
use openvm_native_recursion::{hints::Hintable, vars::HintSlice};
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::field::extension::BinomialExtensionField;
use crate::basefold_verifier::utils::{read_hint_slice, write_mmcs_proof};
use super::{hash::*, structs::*};
pub type F = BabyBear;
pub type E = BinomialExtensionField<F, DEGREE>;
pub type InnerConfig = AsmConfig<F, E>;
pub type MmcsCommitment = Hash;
pub type MmcsProof = Vec<[F; DIGEST_ELEMS]>;
pub struct MmcsVerifierInput {
pub commit: MmcsCommitment,
pub dimensions: Vec<usize>,
pub index: usize,
pub opened_values: Vec<Vec<F>>,
pub proof: MmcsProof,
}
pub type MmcsCommitmentVariable<C> = HashVariable<C>;
#[derive(DslVariable, Clone)]
pub struct MmcsVerifierInputVariable<C: Config> {
pub commit: MmcsCommitmentVariable<C>,
pub dimensions: Array<C, Var<C::N>>,
pub index_bits: Array<C, Var<C::N>>,
pub opened_values: Array<C, Array<C, Felt<C::F>>>,
pub proof: HintSlice<C>,
}
impl Hintable<InnerConfig> for MmcsVerifierInput {
type HintVariable = MmcsVerifierInputVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let commit = MmcsCommitment::read(builder);
let dimensions = Vec::<usize>::read(builder);
let index_bits = Vec::<usize>::read(builder);
let opened_values = Vec::<Vec<F>>::read(builder);
let proof = read_hint_slice(builder);
MmcsVerifierInputVariable {
commit,
dimensions,
index_bits,
opened_values,
proof,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
let idx_bits = (0..self.proof.len())
.scan(self.index, |acc, _| {
let bit = *acc & 0x01;
*acc >>= 1;
Some(bit)
})
.collect::<Vec<_>>();
stream.extend(self.commit.write());
stream.extend(self.dimensions.write());
stream.extend(idx_bits.write());
stream.extend(self.opened_values.write());
stream.extend(write_mmcs_proof(&self.proof));
stream
}
}
pub fn mmcs_verify_batch<C: Config>(builder: &mut Builder<C>, input: MmcsVerifierInputVariable<C>) {
let dimensions = match input.dimensions {
Array::Dyn(ptr, len) => Array::Dyn(ptr, len.clone()),
_ => panic!("Expected a dynamic array of felts"),
};
builder.verify_batch_felt(
&dimensions,
&input.opened_values,
input.proof.id.get_var(),
&input.index_bits,
&input.commit.value,
);
}
#[cfg(test)]
pub mod tests {
use openvm_circuit::arch::{SystemConfig, VmExecutor, instructions::program::Program};
use openvm_instructions::exe::VmExe;
use openvm_native_circuit::{Native, NativeConfig};
use openvm_native_compiler::asm::AsmBuilder;
use openvm_native_recursion::hints::Hintable;
use p3::field::FieldAlgebra;
use super::{E, F, MmcsCommitment, MmcsVerifierInput, mmcs_verify_batch};
/// The witness in this test is produced by:
/// https://github.com/Jiangkm3/Plonky3 branch cyte/mmcs-poseidon2-constants
/// cargo test --package p3-merkle-tree --lib -- mmcs::tests::size_gaps --exact --show-output
#[allow(dead_code)]
pub fn build_mmcs_verify_batch() -> (Program<F>, Vec<Vec<F>>) {
// OpenVM DSL
let mut builder = AsmBuilder::<F, E>::default();
// Witness inputs
let mmcs_input = MmcsVerifierInput::read(&mut builder);
mmcs_verify_batch(&mut builder, mmcs_input);
builder.halt();
// Pass in witness stream
let f = |n: usize| F::from_canonical_usize(n);
let mut witness_stream: Vec<Vec<F>> = Vec::new();
let commit = MmcsCommitment {
value: [
f(414821839),
f(366064801),
f(76927727),
f(1054874897),
f(522043147),
f(638338172),
f(1583746438),
f(941156703),
],
};
let dimensions = vec![7, 0, 0];
let index = 6;
let opened_values = vec![
vec![
f(783379538),
f(1083745632),
f(1297755122),
f(739705382),
f(1249630435),
f(1794480926),
f(706129135),
f(51286871),
],
vec![
f(1782820525),
f(487690259),
f(1939320991),
f(1236615939),
f(1149125220),
f(1681169264),
f(418636771),
f(1198975790),
],
vec![
f(1782820525),
f(487690259),
f(1939320991),
f(1236615939),
f(1149125220),
f(1681169264),
f(418636771),
f(1198975790),
],
];
let proof = vec![
[
f(709175359),
f(862600965),
f(21724453),
f(1644204827),
f(1122851899),
f(902491334),
f(187250228),
f(766400688),
],
[
f(1500388444),
f(788589576),
f(699109303),
f(1804289606),
f(295155621),
f(328080503),
f(198482491),
f(1942550078),
],
[
f(132120813),
f(362247724),
f(635527855),
f(709381234),
f(1331884835),
f(1016275827),
f(962247980),
f(1772849136),
],
[
f(1707124288),
f(1917010688),
f(261076785),
f(346295418),
f(1637246858),
f(1607442625),
f(777235843),
f(194556598),
],
[
f(1410853257),
f(1598063795),
f(1111574219),
f(1465562989),
f(1102456901),
f(1433687377),
f(1376477958),
f(1087266135),
],
[
f(278709284),
f(1823086849),
f(1969802325),
f(633552560),
f(1780238760),
f(297873878),
f(421105965),
f(1357131680),
],
[
f(883611536),
f(685305811),
f(56966874),
f(170904280),
f(1353579462),
f(1357636937),
f(1565241058),
f(209109553),
],
];
let mmcs_input = MmcsVerifierInput {
commit,
dimensions,
index,
opened_values,
proof,
};
witness_stream.extend(mmcs_input.write());
// PROGRAM
let program: Program<F> = builder.compile_isa();
(program, witness_stream)
}
#[test]
fn test_mmcs_verify_batch() {
let (program, witness) = build_mmcs_verify_batch();
let system_config = SystemConfig::default()
.with_public_values(4)
.with_max_segment_len((1 << 25) - 100);
let config = NativeConfig::new(system_config, Native);
let executor = VmExecutor::<F, NativeConfig>::new(config).unwrap();
let exe = VmExe::new(program);
let interpreter = executor.instance(&exe).unwrap();
interpreter
.execute(witness, None)
.expect("test_mmcs_verify_batch should not fail");
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/extension_mmcs.rs | ceno_recursion/src/basefold_verifier/extension_mmcs.rs | use openvm_native_compiler::{asm::AsmConfig, prelude::*};
use openvm_native_recursion::{hints::Hintable, vars::HintSlice};
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::field::extension::BinomialExtensionField;
use super::{mmcs::*, structs::*};
pub type F = BabyBear;
pub type E = BinomialExtensionField<F, DEGREE>;
pub type InnerConfig = AsmConfig<F, E>;
pub struct ExtMmcsVerifierInput {
pub commit: MmcsCommitment,
pub dimensions: Vec<usize>,
pub index: usize,
pub opened_values: Vec<Vec<E>>,
pub proof: MmcsProof,
}
#[derive(DslVariable, Clone)]
pub struct ExtMmcsVerifierInputVariable<C: Config> {
pub commit: MmcsCommitmentVariable<C>,
pub dimensions: Array<C, Var<C::N>>,
pub index_bits: Array<C, Var<C::N>>,
pub opened_values: Array<C, Array<C, Ext<C::F, C::EF>>>,
pub proof: HintSlice<C>,
}
impl Hintable<InnerConfig> for ExtMmcsVerifierInput {
type HintVariable = ExtMmcsVerifierInputVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let commit = MmcsCommitment::read(builder);
let dimensions = Vec::<usize>::read(builder);
let index_bits = Vec::<usize>::read(builder);
let opened_values = Vec::<Vec<E>>::read(builder);
let length = Usize::from(builder.hint_var());
let id = Usize::from(builder.hint_load());
let proof = HintSlice { length, id };
ExtMmcsVerifierInputVariable {
commit,
dimensions,
index_bits,
opened_values,
proof,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.commit.write());
stream.extend(self.dimensions.write());
let mut index_bits = Vec::new();
let mut index = self.index;
while index > 0 {
index_bits.push(index % 2);
index /= 2;
}
stream.extend(<Vec<usize> as Hintable<InnerConfig>>::write(&index_bits));
stream.extend(self.opened_values.write());
stream.extend(
self.proof
.iter()
.map(|p| p.to_vec())
.collect::<Vec<_>>()
.write(),
);
stream
}
}
pub(crate) fn ext_mmcs_verify_batch<C: Config>(
builder: &mut Builder<C>,
input: ExtMmcsVerifierInputVariable<C>,
) {
let dimensions = match input.dimensions {
Array::Dyn(ptr, len) => Array::Dyn(ptr, len.clone()),
_ => panic!("Expected a dynamic array of felts"),
};
builder.verify_batch_ext(
&dimensions,
&input.opened_values,
input.proof.id.get_var(),
&input.index_bits,
&input.commit.value,
);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/structs.rs | ceno_recursion/src/basefold_verifier/structs.rs | use openvm_native_compiler::{asm::AsmConfig, ir::*};
use openvm_native_compiler_derive::DslVariable;
use openvm_native_recursion::hints::{Hintable, VecAutoHintable};
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::field::extension::BinomialExtensionField;
pub const DEGREE: usize = 4;
pub type F = BabyBear;
pub type E = BinomialExtensionField<F, DEGREE>;
pub type InnerConfig = AsmConfig<F, E>;
#[derive(DslVariable, Clone)]
pub struct DimensionsVariable<C: Config> {
pub width: Var<C::N>,
pub height: Var<C::N>,
}
pub struct Dimensions {
pub width: usize,
pub height: usize,
}
impl VecAutoHintable for Dimensions {}
impl Hintable<InnerConfig> for Dimensions {
type HintVariable = DimensionsVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let width = usize::read(builder);
let height = usize::read(builder);
DimensionsVariable { width, height }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.width));
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.height));
stream
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/basefold.rs | ceno_recursion/src/basefold_verifier/basefold.rs | use itertools::Itertools;
use mpcs::basefold::structure::BasefoldProof as InnerBasefoldProof;
use openvm_native_compiler::{asm::AsmConfig, prelude::*};
use openvm_native_recursion::hints::{Hintable, VecAutoHintable};
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::field::extension::BinomialExtensionField;
use serde::Deserialize;
use crate::{
basefold_verifier::{
hash::{Hash, HashVariable},
query_phase::{
PointAndEvals, PointAndEvalsVariable, QueryOpeningProofs, QueryOpeningProofsVariable,
},
},
tower_verifier::binding::{IOPProverMessage, IOPProverMessageVariable},
};
use super::{mmcs::*, structs::DEGREE};
pub type F = BabyBear;
pub type E = BinomialExtensionField<F, DEGREE>;
pub type InnerConfig = AsmConfig<F, E>;
pub type HashDigest = MmcsCommitment;
#[derive(Deserialize, Debug)]
pub struct BasefoldCommitment {
pub commit: HashDigest,
pub log2_max_codeword_size: usize,
}
use mpcs::BasefoldCommitment as InnerBasefoldCommitment;
impl From<InnerBasefoldCommitment<E>> for BasefoldCommitment {
fn from(value: InnerBasefoldCommitment<E>) -> Self {
Self {
commit: Hash {
value: value.commit().into(),
},
log2_max_codeword_size: value.log2_max_codeword_size,
}
}
}
impl Hintable<InnerConfig> for BasefoldCommitment {
type HintVariable = BasefoldCommitmentVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let commit = HashDigest::read(builder);
let log2_max_codeword_size = Usize::Var(usize::read(builder));
BasefoldCommitmentVariable {
commit,
log2_max_codeword_size,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.commit.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(
&self.log2_max_codeword_size,
));
stream
}
}
pub type HashDigestVariable<C> = MmcsCommitmentVariable<C>;
#[derive(DslVariable, Clone)]
pub struct BasefoldCommitmentVariable<C: Config> {
pub commit: HashDigestVariable<C>,
pub log2_max_codeword_size: Usize<C::N>,
}
#[derive(Deserialize)]
pub struct BasefoldProof {
pub commits: Vec<Hash>,
pub final_message: Vec<Vec<E>>,
pub query_opening_proof: QueryOpeningProofs,
pub sumcheck_proof: Vec<IOPProverMessage>,
pub pow_witness: F,
}
#[derive(DslVariable, Clone)]
pub struct BasefoldProofVariable<C: Config> {
pub commits: Array<C, HashVariable<C>>,
pub final_message: Array<C, Array<C, Ext<C::F, C::EF>>>,
pub query_opening_proof: QueryOpeningProofsVariable<C>,
pub sumcheck_proof: Array<C, IOPProverMessageVariable<C>>,
pub pow_witness: Felt<C::F>,
}
impl Hintable<InnerConfig> for BasefoldProof {
type HintVariable = BasefoldProofVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let commits = Vec::<Hash>::read(builder);
let final_message = Vec::<Vec<E>>::read(builder);
let query_opening_proof = QueryOpeningProofs::read(builder);
let sumcheck_proof = Vec::<IOPProverMessage>::read(builder);
let pow_witness = F::read(builder);
BasefoldProofVariable {
commits,
final_message,
query_opening_proof,
sumcheck_proof,
pow_witness,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.commits.write());
stream.extend(self.final_message.write());
stream.extend(self.query_opening_proof.write());
stream.extend(self.sumcheck_proof.write());
stream.extend(self.pow_witness.write());
stream
}
}
impl From<InnerBasefoldProof<E>> for BasefoldProof {
fn from(proof: InnerBasefoldProof<E>) -> Self {
BasefoldProof {
commits: proof.commits.iter().map(|c| (*c).into()).collect(),
final_message: proof.final_message,
query_opening_proof: proof
.query_opening_proof
.iter()
.map(|proof| proof.clone().into())
.collect(),
sumcheck_proof: proof.sumcheck_proof.map_or(vec![], |proof| {
proof.into_iter().map(|proof| proof.into()).collect()
}),
pow_witness: proof.pow_witness,
}
}
}
#[derive(Deserialize)]
pub struct RoundOpening {
pub num_var: usize,
pub point_and_evals: PointAndEvals,
}
#[derive(DslVariable, Clone)]
pub struct RoundOpeningVariable<C: Config> {
pub num_var: Var<C::N>,
pub point_and_evals: PointAndEvalsVariable<C>,
}
impl Hintable<InnerConfig> for RoundOpening {
type HintVariable = RoundOpeningVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let num_var = usize::read(builder);
let point_and_evals = PointAndEvals::read(builder);
RoundOpeningVariable {
num_var,
point_and_evals,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = vec![];
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.num_var));
stream.extend(self.point_and_evals.write());
stream
}
}
impl VecAutoHintable for RoundOpening {}
#[derive(Deserialize)]
pub struct Round {
pub commit: BasefoldCommitment,
pub openings: Vec<RoundOpening>,
}
#[derive(DslVariable, Clone)]
pub struct RoundVariable<C: Config> {
pub commit: BasefoldCommitmentVariable<C>,
pub openings: Array<C, RoundOpeningVariable<C>>,
pub perm: Array<C, Var<C::N>>,
}
impl Hintable<InnerConfig> for Round {
type HintVariable = RoundVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let commit = BasefoldCommitment::read(builder);
let openings = Vec::<RoundOpening>::read(builder);
let perm = Vec::<usize>::read(builder);
RoundVariable {
commit,
openings,
perm,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut perm = vec![0; self.openings.len()];
self.openings
.iter()
.enumerate()
// the original order
.map(|(i, opening)| (i, opening.num_var))
.sorted_by(|(_, nv_a), (_, nv_b)| Ord::cmp(nv_b, nv_a))
.enumerate()
// j is the new index where i is the original index
.map(|(j, (i, _))| (i, j))
.for_each(|(i, j)| {
perm[i] = j;
});
let mut stream = vec![];
stream.extend(self.commit.write());
stream.extend(self.openings.write());
stream.extend(perm.write());
stream
}
}
impl VecAutoHintable for Round {}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/utils.rs | ceno_recursion/src/basefold_verifier/utils.rs | use openvm_native_compiler::ir::*;
use openvm_native_recursion::vars::HintSlice;
use p3::{babybear::BabyBear, field::FieldAlgebra};
use crate::basefold_verifier::mmcs::MmcsProof;
// XXX: more efficient pow implementation
pub fn pow<C: Config>(builder: &mut Builder<C>, base: Var<C::N>, exponent: Var<C::N>) -> Var<C::N> {
let value: Var<C::N> = builder.constant(C::N::ONE);
builder.range(0, exponent).for_each(|_, builder| {
builder.assign(&value, value * base);
});
value
}
// XXX: more efficient pow implementation
pub fn pow_felt<C: Config>(
builder: &mut Builder<C>,
base: Felt<C::F>,
exponent: Var<C::N>,
) -> Felt<C::F> {
let value: Felt<C::F> = builder.constant(C::F::ONE);
builder.range(0, exponent).for_each(|_, builder| {
builder.assign(&value, value * base);
});
value
}
// XXX: more efficient pow implementation
pub fn pow_felt_bits<C: Config>(
builder: &mut Builder<C>,
base: Felt<C::F>,
exponent_bits: &Array<C, Var<C::N>>,
exponent_len: Usize<C::N>, // This is not exponent_bits.len(), possibly smaller than it
) -> Felt<C::F> {
let value: Felt<C::F> = builder.constant(C::F::ONE);
// Little endian
// let repeated_squared: Felt<C::F> = base;
// builder.range(0, exponent_len).for_each(|ptr, builder| {
// let ptr = ptr[0];
// let bit = builder.get(exponent_bits, ptr);
// builder.if_eq(bit, C::N::ONE).then(|builder| {
// builder.assign(&value, value * repeated_squared);
// });
// builder.assign(&repeated_squared, repeated_squared * repeated_squared);
// });
// Big endian
builder.range(0, exponent_len).for_each(|ptr, builder| {
let ptr = ptr[0];
builder.assign(&value, value * value);
let bit = builder.get(exponent_bits, ptr);
builder.if_eq(bit, C::N::ONE).then(|builder| {
builder.assign(&value, value * base);
});
});
value
}
pub fn pow_2<C: Config>(builder: &mut Builder<C>, exponent: Var<C::N>) -> Var<C::N> {
let two: Var<C::N> = builder.constant(C::N::from_canonical_usize(2));
pow(builder, two, exponent)
}
// XXX: Equally outrageously inefficient
pub fn next_power_of_two<C: Config>(builder: &mut Builder<C>, value: Var<C::N>) -> Var<C::N> {
// Non-deterministically supply the exponent n such that
// 2^n < v <= 2^{n+1}
// Ignore if v == 1
let n: Var<C::N> = builder.hint_var();
let ret = pow_2(builder, n);
builder.if_eq(value, Usize::from(1)).then(|builder| {
builder.assign(&ret, Usize::from(1));
});
builder.if_ne(value, Usize::from(1)).then(|builder| {
builder.assert_less_than_slow_bit_decomp(ret, value);
let two: Var<C::N> = builder.constant(C::N::from_canonical_usize(2));
builder.assign(&ret, ret * two);
let ret_plus_one = builder.eval(ret + Usize::from(1));
builder.assert_less_than_slow_bit_decomp(value, ret_plus_one);
});
ret
}
// Dot product: li * ri
pub fn dot_product<C: Config, F>(
builder: &mut Builder<C>,
li: &Array<C, Ext<C::F, C::EF>>,
ri: &Array<C, F>,
) -> Ext<C::F, C::EF>
where
F: openvm_native_compiler::ir::MemVariable<C> + 'static,
{
dot_product_with_index::<C, F>(builder, li, ri, Usize::from(0), Usize::from(0), li.len())
}
// Generic dot product of li[llo..llo+len] * ri[rlo..rlo+len]
pub fn dot_product_with_index<C: Config, F>(
builder: &mut Builder<C>,
li: &Array<C, Ext<C::F, C::EF>>,
ri: &Array<C, F>,
llo: Usize<C::N>,
rlo: Usize<C::N>,
len: Usize<C::N>,
) -> Ext<C::F, C::EF>
where
F: openvm_native_compiler::ir::MemVariable<C> + 'static,
{
let ret: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
builder.range(0, len).for_each(|i_vec, builder| {
let i = i_vec[0];
let lidx: Var<C::N> = builder.eval(llo.clone() + i);
let ridx: Var<C::N> = builder.eval(rlo.clone() + i);
let l = builder.get(li, lidx);
let r = builder.get(ri, ridx);
builder.assign(&ret, ret + l * r);
});
ret
}
// Convert the first len entries of binary to decimal
// BIN is in big endian
pub fn bin_to_dec<C: Config>(
builder: &mut Builder<C>,
bin: &Array<C, Var<C::N>>,
len: Var<C::N>,
) -> Var<C::N> {
let value: Var<C::N> = builder.constant(C::N::ZERO);
let two: Var<C::N> = builder.constant(C::N::TWO);
builder.range(0, len).for_each(|i_vec, builder| {
let i = i_vec[0];
builder.assign(&value, value * two);
let next_bit = builder.get(bin, i);
builder.assign(&value, value + next_bit);
});
value
}
// Convert start to end entries of binary to decimal in little endian
pub fn bin_to_dec_le<C: Config>(
builder: &mut Builder<C>,
bin: &Array<C, Var<C::N>>,
start: Var<C::N>,
end: Var<C::N>,
) -> Var<C::N> {
let value: Var<C::N> = builder.constant(C::N::ZERO);
let two: Var<C::N> = builder.constant(C::N::TWO);
let power_of_two: Var<C::N> = builder.constant(C::N::ONE);
builder.range(start, end).for_each(|i_vec, builder| {
let i = i_vec[0];
let next_bit = builder.get(bin, i);
builder.assign(&value, value + power_of_two * next_bit);
builder.assign(&power_of_two, power_of_two * two);
});
value
}
// Sort a list in decreasing order, returns:
// 1. The original index of each sorted entry
// 2. Number of unique entries
// 3. Number of counts of each unique entry
pub fn sort_with_count<C: Config, E, N, Ind>(
builder: &mut Builder<C>,
list: &Array<C, E>,
ind: Ind, // Convert loaded out entries into comparable ones
) -> (
Array<C, Var<C::N>>,
Var<C::N>,
Array<C, Var<C::N>>,
Array<C, Var<C::N>>,
)
where
E: openvm_native_compiler::ir::MemVariable<C>,
N: Into<SymbolicVar<<C as openvm_native_compiler::ir::Config>::N>>
+ openvm_native_compiler::ir::Variable<C>,
Ind: Fn(E) -> N,
{
let len = list.len();
// Nondeterministically supplies:
// 1. num_unique_entries: number of different entries
// 2. entry_order: after sorting by decreasing order, the original index of each entry
// To ensure that entry_order represents sorted index, assert that
// 1. It has the same length as list (checked by requesting list.len() hints)
// 2. It does not contain the same index twice (checked via a correspondence array)
// 3. Sorted entries are in decreasing order
// While checking, record:
// 1. count_per_unique_entry: for each unique entry value, count of entries of that value
let num_unique_entries = builder.hint_var();
let count_per_unique_entry = builder.dyn_array(num_unique_entries);
let sorted_unique_num_vars = builder.dyn_array(num_unique_entries);
let zero: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let entries_sort_surjective: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(len.clone());
builder.range(0, len.clone()).for_each(|i_vec, builder| {
let i = i_vec[0];
builder.set(&entries_sort_surjective, i, zero);
});
let entries_order = builder.dyn_array(len.clone());
let next_order = builder.hint_var();
// Check surjection
let surjective = builder.get(&entries_sort_surjective, next_order);
builder.assert_ext_eq(surjective, zero);
builder.set(&entries_sort_surjective, next_order, one);
builder.set_value(&entries_order, 0, next_order);
let last_entry = ind(builder.get(list, next_order));
let last_unique_entry_index: Var<C::N> = builder.eval(Usize::from(0));
let last_count_per_unique_entry: Var<C::N> = builder.eval(Usize::from(1));
builder.range(1, len).for_each(|i_vec, builder| {
let i = i_vec[0];
let next_order = builder.hint_var();
// Check surjection
let surjective = builder.get(&entries_sort_surjective, next_order);
builder.assert_ext_eq(surjective, zero);
builder.set(&entries_sort_surjective, next_order, one);
// Check entries
let next_entry = ind(builder.get(list, next_order));
builder
.if_eq(last_entry.clone(), next_entry.clone())
.then(|builder| {
// next_entry == last_entry
builder.assign(
&last_count_per_unique_entry,
last_count_per_unique_entry + Usize::from(1),
);
});
builder
.if_ne(last_entry.clone(), next_entry.clone())
.then(|builder| {
// next_entry < last_entry
builder.assert_less_than_slow_small_rhs(next_entry.clone(), last_entry.clone());
// Update count_per_unique_entry
builder.set(
&count_per_unique_entry,
last_unique_entry_index,
last_count_per_unique_entry,
);
builder.set(
&sorted_unique_num_vars,
last_unique_entry_index,
last_entry.clone(),
);
builder.assign(&last_entry, next_entry.clone());
builder.assign(
&last_unique_entry_index,
last_unique_entry_index + Usize::from(1),
);
builder.assign(&last_count_per_unique_entry, Usize::from(1));
});
builder.set_value(&entries_order, i, next_order);
});
// Final check on num_unique_entries and count_per_unique_entry
builder.set(
&count_per_unique_entry,
last_unique_entry_index,
last_count_per_unique_entry,
);
builder.set(
&sorted_unique_num_vars,
last_unique_entry_index,
last_entry.clone(),
);
builder.assign(
&last_unique_entry_index,
last_unique_entry_index + Usize::from(1),
);
builder.assert_var_eq(last_unique_entry_index, num_unique_entries);
(
entries_order,
num_unique_entries,
count_per_unique_entry,
sorted_unique_num_vars,
)
}
pub fn codeword_fold_with_challenge<C: Config>(
builder: &mut Builder<C>,
left: Ext<C::F, C::EF>,
right: Ext<C::F, C::EF>,
challenge: Ext<C::F, C::EF>,
coeff: Felt<C::F>,
inv_2: Felt<C::F>,
) -> Ext<C::F, C::EF> {
// original (left, right) = (lo + hi*x, lo - hi*x), lo, hi are codeword, but after times x it's not codeword
// recover left & right codeword via (lo, hi) = ((left + right) / 2, (left - right) / 2x)
let lo: Ext<C::F, C::EF> = builder.eval((left + right) * inv_2);
let hi: Ext<C::F, C::EF> = builder.eval((left - right) * coeff);
// e.g. coeff = (2 * dit_butterfly)^(-1) in rs code
// we do fold on (lo, hi) to get folded = (1-r) * lo + r * hi
// (with lo, hi are two codewords), as it match perfectly with raw message in
// lagrange domain fixed variable
let ret: Ext<C::F, C::EF> = builder.eval(lo + challenge * (hi - lo));
ret
}
pub(crate) fn read_hint_slice<C: Config>(builder: &mut Builder<C>) -> HintSlice<C> {
let length = Usize::from(builder.hint_var());
let id = Usize::from(builder.hint_load());
HintSlice { length, id }
}
pub(crate) fn write_mmcs_proof(proof: &MmcsProof) -> Vec<Vec<BabyBear>> {
vec![
vec![BabyBear::from_canonical_usize(proof.len())],
proof.iter().flatten().copied().collect::<Vec<_>>(),
]
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/mod.rs | ceno_recursion/src/basefold_verifier/mod.rs | #![allow(dead_code)]
#![allow(unused_variables)]
#![allow(unused_imports)]
pub(crate) mod basefold;
pub(crate) mod extension_mmcs;
pub(crate) mod hash;
pub(crate) mod mmcs;
pub(crate) mod query_phase;
pub(crate) mod rs;
pub(crate) mod structs;
// pub(crate) mod field;
pub(crate) mod utils;
pub(crate) mod verifier;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/hash.rs | ceno_recursion/src/basefold_verifier/hash.rs | use openvm_native_compiler::{asm::AsmConfig, prelude::*};
use openvm_native_recursion::hints::{Hintable, VecAutoHintable};
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::field::extension::BinomialExtensionField;
use serde::Deserialize;
use super::structs::DEGREE;
pub const DIGEST_ELEMS: usize = 8;
pub type F = BabyBear;
pub type E = BinomialExtensionField<F, DEGREE>;
pub type InnerConfig = AsmConfig<F, E>;
#[derive(Deserialize, Default, Debug)]
pub struct Hash {
pub value: [F; DIGEST_ELEMS],
}
impl From<p3::symmetric::Hash<F, F, DIGEST_ELEMS>> for Hash {
fn from(hash: p3::symmetric::Hash<F, F, DIGEST_ELEMS>) -> Self {
Hash { value: hash.into() }
}
}
#[derive(DslVariable, Clone)]
pub struct HashVariable<C: Config> {
pub value: Array<C, Felt<C::F>>,
}
impl VecAutoHintable for Hash {}
impl Hintable<InnerConfig> for Hash {
type HintVariable = HashVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let value = builder.hint_felts_fixed(DIGEST_ELEMS);
HashVariable { value }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
self.value.map(|felt| vec![felt]).to_vec()
}
}
#[cfg(test)]
mod tests {
use openvm_circuit::arch::{SystemConfig, VmExecutor};
use openvm_instructions::exe::VmExe;
use openvm_native_circuit::{Native, NativeConfig};
use openvm_native_compiler::asm::AsmBuilder;
use crate::basefold_verifier::basefold::HashDigest;
use super::*;
#[test]
fn test_read_to_hash_variable() {
// simple test program
let mut builder = AsmBuilder::<F, E>::default();
let _digest = HashDigest::read(&mut builder);
builder.halt();
// configure the VM executor
let system_config = SystemConfig::default().with_max_segment_len(1 << 20);
let config = NativeConfig::new(system_config, Native);
let executor = VmExecutor::new(config).unwrap();
// prepare input
let mut input = Vec::new();
input.extend(Hash::default().write());
// execute the program
let program = builder.compile_isa();
let exe = VmExe::new(program);
let interpreter = executor.instance(&exe).unwrap();
interpreter.execute(input, None).unwrap();
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/basefold_verifier/verifier.rs | ceno_recursion/src/basefold_verifier/verifier.rs | use crate::{
basefold_verifier::query_phase::{QueryPhaseVerifierInputVariable, batch_verifier_query_phase},
transcript::{transcript_check_pow_witness, transcript_observe_label},
};
use super::{basefold::*, rs::*, utils::*};
use ff_ext::BabyBearExt4;
use openvm_native_compiler::{asm::AsmConfig, ir::FromConstant, prelude::*};
use openvm_native_compiler_derive::iter_zip;
use openvm_native_recursion::challenger::{
CanObserveDigest, CanObserveVariable, CanSampleBitsVariable, FeltChallenger,
duplex::DuplexChallengerVariable,
};
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::field::FieldAlgebra;
pub type F = BabyBear;
pub type E = BabyBearExt4;
pub type InnerConfig = AsmConfig<F, E>;
pub fn batch_verify<C: Config>(
builder: &mut Builder<C>,
max_num_var: Var<C::N>,
max_width: Var<C::N>,
rounds: Array<C, RoundVariable<C>>,
proof: BasefoldProofVariable<C>,
challenger: &mut DuplexChallengerVariable<C>,
) {
builder.cycle_tracker_start("prior query phase");
builder.assert_nonzero(&proof.final_message.len());
builder.assert_nonzero(&proof.sumcheck_proof.len());
// we don't support early stopping for now
iter_zip!(builder, proof.final_message).for_each(|ptr_vec, builder| {
let final_message_len = builder.iter_ptr_get(&proof.final_message, ptr_vec[0]).len();
builder.assert_eq::<Usize<C::N>>(
final_message_len,
Usize::from(1 << get_basecode_msg_size_log()),
);
});
builder.assert_eq::<Usize<C::N>>(
proof.query_opening_proof.len(),
Usize::from(get_num_queries()),
);
// Compute the total number of polynomials across all rounds
let total_num_polys: Var<C::N> = builder.constant(C::N::ZERO);
iter_zip!(builder, rounds).for_each(|ptr_vec, builder| {
let openings = builder.iter_ptr_get(&rounds, ptr_vec[0]).openings;
iter_zip!(builder, openings).for_each(|ptr_vec_openings, builder| {
let evals_num = builder
.iter_ptr_get(&openings, ptr_vec_openings[0])
.point_and_evals
.evals
.len();
builder.assign(&total_num_polys, total_num_polys + evals_num);
});
});
// get batch coeffs
transcript_observe_label(builder, challenger, b"batch coeffs");
let batch_coeff = challenger.sample_ext(builder);
let running_coeff =
<Ext<C::F, C::EF> as FromConstant<C>>::constant(C::EF::from_canonical_usize(1), builder);
let batch_coeffs: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(total_num_polys);
iter_zip!(builder, batch_coeffs).for_each(|ptr_vec_batch_coeffs, builder| {
builder.iter_ptr_set(&batch_coeffs, ptr_vec_batch_coeffs[0], running_coeff);
builder.assign(&running_coeff, running_coeff * batch_coeff);
});
// The max num var and max width are provided by the prover and not guaranteed to be correct.
// Check that
// 1. max_num_var is greater than or equal to every num var (same for width);
// 2. it is equal to at least one of the num vars by multiplying all the differences
// together and assert the product is zero (same for width).
let diff_product_num_var: Var<C::N> = builder.eval(Usize::from(1));
let diff_product_width: Var<C::N> = builder.eval(Usize::from(1));
iter_zip!(builder, rounds).for_each(|ptr_vec, builder| {
let round = builder.iter_ptr_get(&rounds, ptr_vec[0]);
iter_zip!(builder, round.openings).for_each(|ptr_vec_opening, builder| {
let opening = builder.iter_ptr_get(&round.openings, ptr_vec_opening[0]);
let diff: Var<C::N> = builder.eval(max_num_var - opening.num_var);
// num_var is always smaller than 32.
builder.range_check_var(diff, 5);
builder.assign(&diff_product_num_var, diff_product_num_var * diff);
let diff: Var<C::N> = builder.eval(max_width - opening.point_and_evals.evals.len());
// width is always smaller than 2^14.
builder.range_check_var(diff, 14);
builder.assign(&diff_product_width, diff_product_width * diff);
});
});
// Check that at least one num_var is equal to max_num_var
let zero: Var<C::N> = builder.eval(C::N::ZERO);
builder.assert_eq::<Var<C::N>>(diff_product_num_var, zero);
builder.assert_eq::<Var<C::N>>(diff_product_width, zero);
let num_rounds: Var<C::N> =
builder.eval(max_num_var - Usize::from(get_basecode_msg_size_log()));
let fold_challenges: Array<C, Ext<_, _>> = builder.dyn_array(max_num_var);
builder.range(0, num_rounds).for_each(|index_vec, builder| {
let sumcheck_message = builder.get(&proof.sumcheck_proof, index_vec[0]).evaluations;
iter_zip!(builder, sumcheck_message).for_each(|ptr_vec_sumcheck_message, builder| {
let elem = builder.iter_ptr_get(&sumcheck_message, ptr_vec_sumcheck_message[0]);
let elem_felts = builder.ext2felt(elem);
challenger.observe_slice(builder, elem_felts);
});
transcript_observe_label(builder, challenger, b"commit round");
let challenge = challenger.sample_ext(builder);
builder.set(&fold_challenges, index_vec[0], challenge);
builder
.if_ne(index_vec[0], num_rounds - Usize::from(1))
.then(|builder| {
let commit = builder.get(&proof.commits, index_vec[0]);
challenger.observe_digest(builder, commit.value.into());
});
});
iter_zip!(builder, proof.final_message).for_each(|ptr_vec_sumcheck_message, builder| {
// Each final message should contain a single element, since the final
// message size log is assumed to be zero
let elems = builder.iter_ptr_get(&proof.final_message, ptr_vec_sumcheck_message[0]);
let elem = builder.get(&elems, 0);
let elem_felts = builder.ext2felt(elem);
challenger.observe_slice(builder, elem_felts);
});
transcript_check_pow_witness(builder, challenger, 16, proof.pow_witness); // TODO: avoid hardcoding pow bits
transcript_observe_label(builder, challenger, b"query indices");
let queries: Array<C, Var<C::N>> = builder.dyn_array(get_num_queries());
builder
.range(0, get_num_queries())
.for_each(|index_vec, builder| {
let number_of_bits = builder.eval_expr(max_num_var + Usize::from(get_rate_log()));
let query = challenger.sample_bits(builder, number_of_bits);
// TODO: the index will need to be split back to bits in query phase, so it's
// probably better to avoid converting bits to integer altogether
let number_of_bits = builder.eval(max_num_var + Usize::from(get_rate_log()));
let query = bin_to_dec_le(builder, &query, zero, number_of_bits);
builder.set(&queries, index_vec[0], query);
});
let input = QueryPhaseVerifierInputVariable {
max_num_var: builder.eval(max_num_var),
max_width: builder.eval(max_width),
batch_coeffs,
fold_challenges,
indices: queries,
proof,
rounds,
};
builder.cycle_tracker_end("prior query phase");
builder.cycle_tracker_start("query phase");
batch_verifier_query_phase(builder, input);
builder.cycle_tracker_end("query phase");
}
#[cfg(test)]
pub mod tests {
use std::{cmp::Reverse, collections::BTreeMap, iter::once};
use ff_ext::{BabyBearExt4, FromUniformBytes};
use itertools::Itertools;
use mpcs::{
BasefoldDefault, BasefoldRSParams, BasefoldSpec, PCSFriParam, PolynomialCommitmentScheme,
pcs_batch_commit, pcs_setup, pcs_trim, util::hash::write_digest_to_transcript,
};
use multilinear_extensions::mle::MultilinearExtension;
use openvm_circuit::arch::{SystemConfig, VmExecutor, instructions::program::Program};
use openvm_native_circuit::{Native, NativeConfig};
use openvm_native_compiler::{asm::AsmBuilder, conversion::CompilerOptions};
use openvm_native_recursion::{challenger::duplex::DuplexChallengerVariable, hints::Hintable};
use openvm_stark_backend::p3_challenger::GrindingChallenger;
use openvm_stark_sdk::{config::baby_bear_poseidon2::Challenger, p3_baby_bear::BabyBear};
use p3::field::{Field, FieldAlgebra};
use rand::thread_rng;
use serde::Deserialize;
use transcript::{BasicTranscript, Transcript};
type F = BabyBear;
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
use super::{BasefoldProof, BasefoldProofVariable, InnerConfig, RoundVariable, batch_verify};
use crate::{
basefold_verifier::{
basefold::{BasefoldCommitment, Round, RoundOpening},
query_phase::{BatchOpening, CommitPhaseProofStep, PointAndEvals, QueryOpeningProof},
},
tower_verifier::binding::{Point, PointAndEval},
};
use openvm_native_compiler::{asm::AsmConfig, prelude::*};
#[derive(Deserialize)]
pub struct VerifierInput {
pub max_num_var: usize,
pub max_width: usize,
pub proof: BasefoldProof,
pub rounds: Vec<Round>,
}
impl Hintable<InnerConfig> for VerifierInput {
type HintVariable = VerifierInputVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let max_num_var = usize::read(builder);
let max_width = usize::read(builder);
let proof = BasefoldProof::read(builder);
let rounds = Vec::<Round>::read(builder);
VerifierInputVariable {
max_num_var,
max_width,
proof,
rounds,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.max_num_var));
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.max_width));
stream.extend(self.proof.write());
stream.extend(self.rounds.write());
stream
}
}
#[derive(DslVariable, Clone)]
pub struct VerifierInputVariable<C: Config> {
pub max_num_var: Var<C::N>,
pub max_width: Var<C::N>,
pub proof: BasefoldProofVariable<C>,
pub rounds: Array<C, RoundVariable<C>>,
}
#[allow(dead_code)]
pub fn build_batch_verifier(input: VerifierInput) -> (Program<BabyBear>, Vec<Vec<BabyBear>>) {
// build test program
let mut builder = AsmBuilder::<F, E>::default();
builder.cycle_tracker_start("Prepare data");
let mut challenger = DuplexChallengerVariable::new(&mut builder);
let verifier_input = VerifierInput::read(&mut builder);
builder.cycle_tracker_end("Prepare data");
batch_verify(
&mut builder,
verifier_input.max_num_var,
verifier_input.max_width,
verifier_input.rounds,
verifier_input.proof,
&mut challenger,
);
builder.halt();
let program = builder.compile_isa_with_options(CompilerOptions {
enable_cycle_tracker: true,
..Default::default()
});
let mut witness_stream: Vec<Vec<F>> = Vec::new();
witness_stream.extend(input.write());
(program, witness_stream)
}
fn construct_test(dimensions: Vec<Vec<(usize, usize)>>) {
let mut rng = thread_rng();
// setup PCS
let pp = Pcs::setup(1 << 22, mpcs::SecurityLevel::Conjecture100bits).unwrap();
let (pp, vp) = pcs_trim::<E, Pcs>(pp, 1 << 22).unwrap();
let rounds = dimensions
.iter()
.map(|dimensions| {
let mut num_total_polys = 0;
let (matrices, mles): (Vec<_>, Vec<_>) = dimensions
.iter()
.map(|(num_vars, width)| {
let m = witness::RowMajorMatrix::<F>::rand(&mut rng, 1 << num_vars, *width);
let mles = m.to_mles();
num_total_polys += width;
(m, mles)
})
.unzip();
// commit to matrices
let pcs_data = pcs_batch_commit::<E, Pcs>(&pp, matrices).unwrap();
let point_and_evals = mles
.iter()
.map(|mles| {
let point = E::random_vec(mles[0].num_vars(), &mut rng);
let evals = mles.iter().map(|mle| mle.evaluate(&point)).collect_vec();
(point, evals)
})
.collect_vec();
(pcs_data, point_and_evals.clone())
})
.collect_vec();
let prover_rounds = rounds
.iter()
.map(|(comm, other)| (comm, other.clone()))
.collect_vec();
let max_num_var = rounds
.iter()
.map(|round| round.1.iter().map(|(point, _)| point.len()).max().unwrap())
.max()
.unwrap();
let max_width = rounds
.iter()
.map(|round| round.1.iter().map(|(_, evals)| evals.len()).max().unwrap())
.max()
.unwrap();
let verifier_rounds = rounds
.iter()
.map(|round| {
(
Pcs::get_pure_commitment(&round.0),
round
.1
.iter()
.map(|(point, evals)| (point.len(), (point.clone(), evals.clone())))
.collect_vec(),
)
})
.collect_vec();
// batch open
let mut transcript = BasicTranscript::<E>::new(&[]);
let opening_proof = Pcs::batch_open(&pp, prover_rounds, &mut transcript).unwrap();
// batch verify
let mut transcript = BasicTranscript::<E>::new(&[]);
Pcs::batch_verify(
&vp,
verifier_rounds.clone(),
&opening_proof,
&mut transcript,
)
.expect("Native verification failed");
let verifier_input = VerifierInput {
max_num_var,
max_width,
rounds: verifier_rounds
.into_iter()
.map(|(commit, openings)| Round {
commit: commit.into(),
openings: openings
.into_iter()
.map(|(num_var, (point, evals))| RoundOpening {
num_var,
point_and_evals: PointAndEvals {
point: Point { fs: point },
evals,
},
})
.collect(),
})
.collect(),
proof: opening_proof.into(),
};
let (program, witness) = build_batch_verifier(verifier_input);
let system_config = SystemConfig::default()
.with_public_values(4)
.with_max_segment_len((1 << 25) - 100)
.with_profiling();
let config = NativeConfig::new(system_config, Native);
// _debug
// let executor = VmExecutor::<BabyBear, NativeConfig>::new(config);
// executor.execute(program.clone(), witness.clone()).unwrap();
//
// let results = executor.execute_segments(program, witness).unwrap();
// for seg in results {
// println!("=> cycle count: {:?}", seg.metrics.cycle_count);
// }
}
#[test]
fn test_simple_batch() {
for num_var in 5..20 {
construct_test(vec![vec![(num_var, 20)]]);
}
}
#[test]
fn test_decreasing_batch() {
construct_test(vec![vec![
(14, 20),
(14, 40),
(13, 30),
(12, 30),
(11, 10),
(10, 15),
]]);
}
#[test]
fn test_random_batch() {
construct_test(vec![vec![(10, 20), (12, 30), (11, 10), (12, 15)]]);
}
#[test]
fn test_e2e_fibonacci_batch() {
construct_test(vec![
vec![
(22, 22),
(22, 18),
(1, 28),
(2, 24),
(3, 18),
(1, 21),
(4, 19),
(21, 18),
(1, 8),
(1, 11),
(4, 22),
(3, 27),
(5, 22),
(16, 1),
(16, 1),
(16, 1),
(5, 1),
(16, 1),
(1, 28),
(9, 1),
(3, 2),
(3, 1),
(5, 2),
(10, 2),
(6, 3),
(14, 1),
(16, 1),
(5, 1),
(8, 1),
(4, 29),
(1, 29),
(1, 18),
(1, 23),
(21, 20),
(21, 22),
(5, 22),
],
vec![
(16, 3),
(16, 3),
(16, 3),
(5, 3),
(16, 3),
(9, 6),
(3, 1),
(10, 2),
(6, 3),
],
]);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/tower_verifier/program.rs | ceno_recursion/src/tower_verifier/program.rs | use super::binding::{PointAndEvalVariable, PointVariable};
use crate::{
arithmetics::{
UniPolyExtrapolator, challenger_multi_observe, eq_eval, evaluate_at_point_degree_1, extend,
exts_to_felts, reverse,
},
tower_verifier::binding::IOPProverMessageVecVariable,
transcript::transcript_observe_label,
zkvm_verifier::binding::TowerProofInputVariable,
};
use openvm_native_compiler::prelude::*;
use openvm_native_compiler_derive::iter_zip;
use openvm_native_recursion::challenger::{
CanObserveVariable, FeltChallenger, duplex::DuplexChallengerVariable,
};
use openvm_stark_backend::p3_field::FieldAlgebra;
const NATIVE_SUMCHECK_CTX_LEN: usize = 9;
pub(crate) fn interpolate_uni_poly<C: Config>(
builder: &mut Builder<C>,
p_i: &Array<C, Ext<C::F, C::EF>>,
eval_at: Ext<C::F, C::EF>,
) -> Ext<C::F, C::EF> {
let len = p_i.len();
let evals: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(len.clone());
let prod: Ext<C::F, C::EF> = builder.eval(eval_at);
builder.set(&evals, 0, eval_at);
// `prod = \prod_{j} (eval_at - j)`
let e: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
builder.range(1, len.clone()).for_each(|i_vec, builder| {
let i = i_vec[0];
let tmp: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
builder.assign(&tmp, eval_at - e);
builder.set(&evals, i, tmp);
builder.assign(&prod, prod * tmp);
builder.assign(&e, e + one);
});
let denom_up: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let i: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
builder.assign(&i, i + one);
builder.range(2, len.clone()).for_each(|_i_vec, builder| {
builder.assign(&denom_up, denom_up * i);
builder.assign(&i, i + one);
});
let denom_down: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let idx_vec_len: RVar<C::N> = builder.eval_expr(len.clone() - RVar::from(1));
let idx_vec: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(idx_vec_len);
let idx_val: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
builder.range(0, idx_vec.len()).for_each(|i_vec, builder| {
builder.set(&idx_vec, i_vec[0], idx_val);
builder.assign(&idx_val, idx_val + one);
});
let idx_rev = reverse(builder, &idx_vec);
let res = builder.constant(C::EF::ZERO);
let len_f = idx_val;
let neg_one: Ext<C::F, C::EF> = builder.constant(C::EF::NEG_ONE);
let evals_rev = reverse(builder, &evals);
let p_i_rev = reverse(builder, p_i);
let mut idx_pos: RVar<C::N> = builder.eval_expr(len.clone() - RVar::from(1));
iter_zip!(builder, idx_rev, evals_rev, p_i_rev).for_each(|ptr_vec, builder| {
let idx = builder.iter_ptr_get(&idx_rev, ptr_vec[0]);
let eval = builder.iter_ptr_get(&evals_rev, ptr_vec[1]);
let up_eval_inv: Ext<C::F, C::EF> = builder.eval(denom_up * eval);
builder.assign(&up_eval_inv, up_eval_inv.inverse());
let p = builder.iter_ptr_get(&p_i_rev, ptr_vec[2]);
builder.assign(&res, res + p * prod * denom_down * up_eval_inv);
builder.assign(&denom_up, denom_up * (len_f - idx) * neg_one);
builder.assign(&denom_down, denom_down * idx);
idx_pos = builder.eval_expr(idx_pos - RVar::from(1));
});
let p_i_0 = builder.get(p_i, 0);
let eval_0 = builder.get(&evals, 0);
let up_eval_inv: Ext<C::F, C::EF> = builder.eval(denom_up * eval_0);
builder.assign(&up_eval_inv, up_eval_inv.inverse());
builder.assign(&res, res + p_i_0 * prod * denom_down * up_eval_inv);
res
}
pub fn iop_verifier_state_verify<C: Config>(
builder: &mut Builder<C>,
challenger: &mut DuplexChallengerVariable<C>,
out_claim: &Ext<C::F, C::EF>,
prover_messages: &IOPProverMessageVecVariable<C>,
max_num_variables: Felt<C::F>,
max_degree: Felt<C::F>,
unipoly_extrapolator: &mut UniPolyExtrapolator<C>,
) -> (
Array<C, Ext<<C as Config>::F, <C as Config>::EF>>,
Ext<<C as Config>::F, <C as Config>::EF>,
) {
// TODO: either store it in a global cache or pass them as parameters
let zero: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
let zero_f: Felt<C::F> = builder.constant(C::F::ZERO);
let max_num_variables_usize: Usize<C::N> =
Usize::from(builder.cast_felt_to_var(max_num_variables));
challenger.observe(builder, max_num_variables);
challenger.observe(builder, zero_f);
challenger.observe(builder, max_degree);
challenger.observe(builder, zero_f);
builder.assert_var_eq(max_num_variables_usize.get_var(), prover_messages.len());
let challenges: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(max_num_variables_usize.clone());
let expected: Ext<C::F, C::EF> = builder.eval(*out_claim + zero);
builder.cycle_tracker_start("IOPVerifierState::verify_round_and_update_state");
builder
.range(0, max_num_variables_usize.clone())
.for_each(|i_vec, builder| {
let i = i_vec[0];
// TODO: this takes 7 cycles, can we optimize it?
let prover_msg = prover_messages.get(builder, i.variable());
unsafe {
let prover_msg_felts = exts_to_felts(builder, &prover_msg);
challenger_multi_observe(builder, challenger, &prover_msg_felts);
}
transcript_observe_label(builder, challenger, b"Internal round");
let challenge = challenger.sample_ext(builder);
let e1 = builder.get(&prover_msg, 0);
let e2 = builder.get(&prover_msg, 1);
let target: Ext<<C as Config>::F, <C as Config>::EF> = builder.eval(e1 + e2);
builder.assert_ext_eq(expected, target);
let p_r = unipoly_extrapolator.extrapolate_uni_poly(builder, &prover_msg, challenge);
builder.assign(&expected, p_r + zero);
builder.set_value(&challenges, i, challenge);
});
builder.cycle_tracker_end("IOPVerifierState::verify_round_and_update_state");
(challenges, expected)
}
pub fn verify_tower_proof<C: Config>(
builder: &mut Builder<C>,
challenger: &mut DuplexChallengerVariable<C>,
prod_out_evals: Array<C, Array<C, Ext<C::F, C::EF>>>,
logup_out_evals: &Array<C, Array<C, Ext<C::F, C::EF>>>,
num_variables: Array<C, Usize<C::N>>,
num_fanin: Usize<C::N>,
// TowerProofVariable
max_num_variables: Usize<C::N>,
proof: &TowerProofInputVariable<C>,
unipoly_extrapolator: &mut UniPolyExtrapolator<C>,
) -> (
PointVariable<C>,
Array<C, PointAndEvalVariable<C>>,
Array<C, PointAndEvalVariable<C>>,
Array<C, PointAndEvalVariable<C>>,
) {
let num_prod_spec = prod_out_evals.len();
let num_logup_spec = logup_out_evals.len();
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let zero: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
builder.assert_usize_eq(proof.prod_specs_eval.len(), num_prod_spec.clone());
iter_zip!(builder, prod_out_evals).for_each(|ptr_vec, builder| {
let ptr = ptr_vec[0];
let evals = builder.iter_ptr_get(&prod_out_evals, ptr);
builder.assert_usize_eq(evals.len(), num_fanin.clone());
});
builder.assert_usize_eq(proof.logup_specs_eval.len(), num_logup_spec.clone());
iter_zip!(builder, logup_out_evals).for_each(|ptr_vec, builder| {
let ptr = ptr_vec[0];
let evals = builder.iter_ptr_get(logup_out_evals, ptr);
builder.assert_usize_eq(evals.len(), RVar::from(4));
});
builder.assert_usize_eq(
num_variables.len(),
num_prod_spec.clone() + num_logup_spec.clone(),
);
let var_zero: Var<C::N> = builder.constant(C::N::ZERO);
let num_specs: Var<C::N> = builder.eval(num_prod_spec.get_var() + num_logup_spec.get_var());
let should_skip: Array<C, Var<C::N>> = builder.dyn_array(num_specs);
builder.range(0, num_specs).for_each(|i_vec, builder| {
let i = i_vec[0];
// all specs should not be skipped initially
builder.set_value(&should_skip, i, var_zero);
});
transcript_observe_label(builder, challenger, b"combine subset evals");
let alpha = challenger.sample_ext(builder);
let alpha_acc: Ext<C::F, C::EF> = builder.eval(zero + one);
// initial_claim = \sum_j alpha^j * out_j[rt]
// out_j[rt] := (record_{j}[rt])
// out_j[rt] := (logup_p{j}[rt])
// out_j[rt] := (logup_q{j}[rt])
let log2_num_fanin = 1usize;
builder.cycle_tracker_start("initial sum");
let initial_rt: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(log2_num_fanin);
transcript_observe_label(builder, challenger, b"product_sum");
builder
.range(0, initial_rt.len())
.for_each(|idx_vec, builder| {
let idx = idx_vec[0];
let c = challenger.sample_ext(builder);
builder.set_value(&initial_rt, idx, c);
});
let prod_spec_point_n_eval: Array<C, PointAndEvalVariable<C>> =
builder.dyn_array(num_prod_spec.clone());
iter_zip!(builder, prod_out_evals, prod_spec_point_n_eval).for_each(|ptr_vec, builder| {
let ptr = ptr_vec[0];
let evals = builder.iter_ptr_get(&prod_out_evals, ptr);
let e = evaluate_at_point_degree_1(builder, &evals, &initial_rt);
let p_ptr = ptr_vec[1];
builder.iter_ptr_set(
&prod_spec_point_n_eval,
p_ptr,
PointAndEvalVariable {
point: PointVariable {
fs: initial_rt.clone(),
},
eval: e,
},
);
});
let logup_spec_p_point_n_eval: Array<C, PointAndEvalVariable<C>> =
builder.dyn_array(num_logup_spec.clone());
let logup_spec_q_point_n_eval: Array<C, PointAndEvalVariable<C>> =
builder.dyn_array(num_logup_spec.clone());
iter_zip!(
builder,
logup_out_evals,
logup_spec_p_point_n_eval,
logup_spec_q_point_n_eval
)
.for_each(|ptr_vec, builder| {
let ptr = ptr_vec[0];
let evals = builder.iter_ptr_get(&prod_out_evals, ptr);
let p_slice = evals.slice(builder, 0, 2);
let q_slice = evals.slice(builder, 2, 4);
let e1 = evaluate_at_point_degree_1(builder, &p_slice, &initial_rt);
let e2 = evaluate_at_point_degree_1(builder, &q_slice, &initial_rt);
let p_ptr = ptr_vec[1];
let q_ptr = ptr_vec[2];
builder.iter_ptr_set(
&logup_spec_p_point_n_eval,
p_ptr,
PointAndEvalVariable {
point: PointVariable {
fs: initial_rt.clone(),
},
eval: e1,
},
);
builder.iter_ptr_set(
&logup_spec_q_point_n_eval,
q_ptr,
PointAndEvalVariable {
point: PointVariable {
fs: initial_rt.clone(),
},
eval: e2,
},
);
});
let initial_claim: Ext<C::F, C::EF> = builder.eval(zero + zero);
iter_zip!(builder, prod_spec_point_n_eval).for_each(|ptr_vec, builder| {
let ptr = ptr_vec[0];
let prod_eval = builder.iter_ptr_get(&prod_spec_point_n_eval, ptr);
builder.assign(&initial_claim, initial_claim + prod_eval.eval * alpha_acc);
builder.assign(&alpha_acc, alpha_acc * alpha);
});
builder
.range(0, num_logup_spec.clone())
.for_each(|i_vec, builder| {
let p = builder.get(&logup_spec_p_point_n_eval, i_vec[0]);
builder.assign(&initial_claim, initial_claim + p.eval * alpha_acc);
builder.assign(&alpha_acc, alpha_acc * alpha);
let q = builder.get(&logup_spec_q_point_n_eval, i_vec[0]);
builder.assign(&initial_claim, initial_claim + q.eval * alpha_acc);
builder.assign(&alpha_acc, alpha_acc * alpha);
});
builder.cycle_tracker_end("initial sum");
let curr_pt = initial_rt.clone();
let curr_eval = initial_claim;
let op_range: RVar<C::N> = builder.eval_expr(max_num_variables - Usize::from(1));
let round: Felt<C::F> = builder.constant(C::F::ZERO);
let next_rt = PointAndEvalVariable {
point: PointVariable { fs: initial_rt },
eval: initial_claim,
};
let next_layer_evals_output_len: Usize<C::N> = builder
.eval(Usize::from(1) + num_prod_spec.clone() + Usize::from(2) * num_logup_spec.clone());
let next_layer_evals: Array<C, Ext<C::F, C::EF>> =
builder.dyn_array(next_layer_evals_output_len);
builder.range(0, op_range).for_each(|i_vec, builder| {
let round_var = i_vec[0];
let out_rt = &curr_pt;
let out_claim = &curr_eval;
let prover_messages = builder.get(&proof.proofs, round_var);
let max_num_variables: Felt<C::F> = builder.constant(C::F::ONE);
builder.assign(&max_num_variables, max_num_variables + round);
let max_degree = builder.constant(C::F::from_canonical_usize(3));
builder.cycle_tracker_start("sumcheck verify");
let (sub_rt, sub_e) = iop_verifier_state_verify(
builder,
challenger,
out_claim,
&prover_messages,
max_num_variables,
max_degree,
unipoly_extrapolator,
);
builder.cycle_tracker_end("sumcheck verify");
builder.cycle_tracker_start("check expected evaluation");
let eq_e = eq_eval(builder, out_rt, &sub_rt, one, zero);
let input_ctx: Array<C, Usize<C::N>> = builder.dyn_array(NATIVE_SUMCHECK_CTX_LEN);
builder.set(&input_ctx, 0, round_var);
builder.set(&input_ctx, 1, num_prod_spec.clone());
builder.set(&input_ctx, 2, num_logup_spec.clone());
builder.set(
&input_ctx,
3,
Usize::from(proof.prod_specs_eval.inner_length),
);
builder.set(
&input_ctx,
4,
Usize::from(proof.prod_specs_eval.inner_inner_length),
);
builder.set(
&input_ctx,
5,
Usize::from(proof.logup_specs_eval.inner_length),
);
builder.set(
&input_ctx,
6,
Usize::from(proof.logup_specs_eval.inner_inner_length),
);
builder.set(&input_ctx, 7, Usize::from(1));
let n_v = builder.get(&num_variables, 0);
builder.set(&input_ctx, 8, n_v);
let challenges: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(3);
builder.set(&challenges, 0, alpha);
builder.sumcheck_layer_eval(
&input_ctx,
&challenges,
&proof.prod_specs_eval.data,
&proof.logup_specs_eval.data,
&next_layer_evals,
);
let expected_evaluation = builder.get(&next_layer_evals, 0);
builder.assign(&expected_evaluation, expected_evaluation * eq_e);
builder.assert_ext_eq(expected_evaluation, sub_e);
builder.cycle_tracker_end("check expected evaluation");
builder.cycle_tracker_start("derive next layer's expected sum");
// derive single eval
// rt' = r_merge || rt
// r_merge.len() == ceil_log2(num_product_fanin)
transcript_observe_label(builder, challenger, b"merge");
builder.cycle_tracker_start("derive rt_prime");
let r_merge = challenger.sample_ext(builder);
let c1: Ext<<C as Config>::F, <C as Config>::EF> = builder.eval(one - r_merge);
let c2: Ext<<C as Config>::F, <C as Config>::EF> = builder.eval(r_merge);
let rt_prime = extend(builder, &sub_rt, &r_merge);
builder.cycle_tracker_end("derive rt_prime");
// generate next round challenge
transcript_observe_label(builder, challenger, b"combine subset evals");
let new_alpha = challenger.sample_ext(builder);
builder.assign(&alpha, new_alpha);
// Use native opcode
builder.set(&input_ctx, 7, Usize::from(0)); // Turn `in_round` off
builder.set(&challenges, 0, new_alpha);
builder.set(&challenges, 1, c1);
builder.set(&challenges, 2, c2);
builder.sumcheck_layer_eval(
&input_ctx,
&challenges,
&proof.prod_specs_eval.data,
&proof.logup_specs_eval.data,
&next_layer_evals,
);
let next_round = builder.eval_expr(round_var + RVar::from(1));
builder
.range(0, num_prod_spec.clone())
.for_each(|i_vec, builder| {
let spec_index = i_vec[0];
let skip = builder.get(&should_skip, spec_index);
let max_round = builder.get(&num_variables, spec_index);
let round_limit: RVar<C::N> = builder.eval_expr(max_round - RVar::from(1));
// now skip is 0 if and only if current round_var is smaller than round_limit.
builder.if_eq(skip, var_zero).then(|builder| {
builder.if_eq(next_round, round_limit).then(|builder| {
let evals_idx: Usize<C::N> = builder.eval(spec_index + Usize::from(1));
let evals = builder.get(&next_layer_evals, evals_idx);
let point_and_eval: PointAndEvalVariable<C> =
builder.eval(PointAndEvalVariable {
point: PointVariable {
fs: rt_prime.clone(),
},
eval: evals,
});
builder.set_value(&prod_spec_point_n_eval, spec_index, point_and_eval);
});
});
});
let num_variables_len = num_variables.len();
let logup_num_variables_slice =
num_variables.slice(builder, num_prod_spec.clone(), num_variables_len.clone());
builder
.range(0, num_logup_spec.clone())
.for_each(|i_vec, builder| {
let spec_index = i_vec[0];
let max_round = builder.get(&logup_num_variables_slice, spec_index);
let round_limit: RVar<C::N> = builder.eval_expr(max_round - RVar::from(1));
let idx: Var<C::N> = builder.eval(spec_index.variable() + num_prod_spec.get_var());
let skip = builder.get(&should_skip, idx);
// now skip is 0 if and only if current round_var is smaller than round_limit.
builder.if_eq(skip, var_zero).then(|builder| {
builder.if_eq(next_round, round_limit).then(|builder| {
let p_idx: Usize<C::N> = builder.eval(idx + Usize::from(1));
let q_idx: Usize<C::N> =
builder.eval(idx + Usize::from(1) + num_logup_spec.clone());
let p_eval = builder.get(&next_layer_evals, p_idx);
let q_eval = builder.get(&next_layer_evals, q_idx);
let p_eval: PointAndEvalVariable<C> = builder.eval(PointAndEvalVariable {
point: PointVariable {
fs: rt_prime.clone(),
},
eval: p_eval,
});
let q_eval: PointAndEvalVariable<C> = builder.eval(PointAndEvalVariable {
point: PointVariable {
fs: rt_prime.clone(),
},
eval: q_eval,
});
builder.set_value(&logup_spec_p_point_n_eval, spec_index, p_eval);
builder.set_value(&logup_spec_q_point_n_eval, spec_index, q_eval);
});
});
});
let output_eval = builder.get(&next_layer_evals, 0);
builder.assign(&curr_pt, rt_prime.clone());
builder.assign(&curr_eval, output_eval);
builder.assign(&round, round + C::F::ONE);
builder.cycle_tracker_end("derive next layer's expected sum");
builder.assign(
&next_rt,
PointAndEvalVariable {
point: PointVariable {
fs: rt_prime.clone(),
},
eval: curr_eval,
},
);
});
(
next_rt.point,
prod_spec_point_n_eval,
logup_spec_p_point_n_eval,
logup_spec_q_point_n_eval,
)
}
// #[cfg(test)]
// mod tests {
// use crate::arithmetics::UniPolyExtrapolator;
// use crate::tower_verifier::binding::IOPProverMessage;
// use crate::tower_verifier::binding::TowerVerifierInput;
// use crate::tower_verifier::program::iop_verifier_state_verify;
// use crate::tower_verifier::program::verify_tower_proof;
// use ceno_mle::mle::ArcMultilinearExtension;
// use ceno_mle::mle::{IntoMLE, MultilinearExtension};
// use ceno_mle::virtual_polys::VirtualPolynomials;
// use ceno_sumcheck::structs::IOPProverState;
// use ceno_transcript::BasicTranscript;
// use ceno_zkvm::scheme::constants::NUM_FANIN;
// use ceno_zkvm::scheme::hal::TowerProver;
// use ceno_zkvm::scheme::hal::TowerProverSpec;
// use ff_ext::BabyBearExt4;
// use ff_ext::FieldFrom;
// use ff_ext::FromUniformBytes;
// use itertools::Itertools;
// use openvm_circuit::arch::SystemConfig;
// use openvm_circuit::arch::VmExecutor;
// use openvm_native_circuit::Native;
// use openvm_native_circuit::NativeConfig;
// use openvm_native_compiler::asm::AsmCompiler;
// use openvm_native_compiler::asm::{AsmBuilder, AsmConfig};
// use openvm_native_compiler::conversion::convert_program;
// use openvm_native_compiler::conversion::CompilerOptions;
// use openvm_native_compiler::ir::Array;
// use openvm_native_compiler::ir::Ext;
// use openvm_native_compiler::prelude::Felt;
// use openvm_native_recursion::challenger::duplex::DuplexChallengerVariable;
// use openvm_native_recursion::hints::Hintable;
// use openvm_stark_sdk::config::setup_tracing_with_log_level;
// use p3_baby_bear::BabyBear;
// use p3_field::extension::BinomialExtensionField;
// use p3_field::Field;
// use p3_field::FieldAlgebra;
// use rand::thread_rng;
//
// type F = BabyBear;
// type E = BabyBearExt4;
// type EF = BinomialExtensionField<BabyBear, 4>;
// type C = AsmConfig<F, EF>;
//
// #[test]
// fn test_simple_sumcheck() {
// setup_tracing_with_log_level(tracing::Level::WARN);
//
// let nv = 5;
// let degree = 3;
//
// let mut builder = AsmBuilder::<F, EF>::default();
//
// let out_claim = EF::read(&mut builder);
// let prover_msgs = Vec::<IOPProverMessage>::read(&mut builder);
//
// let max_num_variables: Felt<F> = builder.constant(F::from_canonical_u32(nv as u32));
// let max_degree: Felt<F> = builder.constant(F::from_canonical_u32(degree as u32));
//
// let mut challenger: DuplexChallengerVariable<C> =
// DuplexChallengerVariable::new(&mut builder);
//
// let mut uni_p = UniPolyExtrapolator::new(&mut builder);
//
// builder.cycle_tracker_start("sumcheck verify");
// iop_verifier_state_verify(
// &mut builder,
// &mut challenger,
// &out_claim,
// &prover_msgs,
// max_num_variables,
// max_degree,
// &mut uni_p,
// );
// builder.cycle_tracker_end("sumcheck verify");
//
// builder.halt();
//
// get the assembly code
// let options = CompilerOptions::default().with_cycle_tracker();
// let mut compiler = AsmCompiler::new(options.word_size);
// compiler.build(builder.operations);
// let asm_code = compiler.code();
// println!("asm code");
// println!("{asm_code}");
//
// run sumcheck prover to get sumcheck proof
// let mut rng = thread_rng();
// let (mles, expected_sum) = MultilinearExtension::<E>::random_mle_list(nv, degree, &mut rng);
// let mles: Vec<ceno_mle::mle::ArcMultilinearExtension<E>> =
// mles.into_iter().map(|mle| mle as _).collect_vec();
// let mut virtual_poly: VirtualPolynomials<'_, E> = VirtualPolynomials::new(1, nv);
// virtual_poly.add_mle_list(mles.iter().collect_vec(), E::from_v(1));
//
// let mut transcript = BasicTranscript::new(&[]);
// let (sumcheck_proof, _) = IOPProverState::prove(virtual_poly, &mut transcript);
// let mut input_stream = Vec::new();
//
// hacky way: convert E to EF but actually they are the same
// let expected_sum: EF = cast_vec(vec![expected_sum])[0];
// input_stream.extend(expected_sum.write());
// input_stream.extend(
// sumcheck_proof
// .proofs
// .into_iter()
// .map(|msg| {
// let evaluations: Vec<EF> = cast_vec(msg.evaluations);
// IOPProverMessage { evaluations }
// })
// .collect_vec()
// .write(),
// );
//
// get execution result
// let program = convert_program(asm_code, options);
// let system_config = SystemConfig::default()
// .with_public_values(4)
// .with_max_segment_len((1 << 25) - 100)
// .with_profiling();
// let config = NativeConfig::new(system_config, Native);
// let executor = VmExecutor::<BabyBear, NativeConfig>::new(config);
//
// let res = executor
// .execute_and_then(program, input_stream, |_, seg| Ok(seg), |err| err)
// .unwrap();
//
// for (i, seg) in res.iter().enumerate() {
// #[cfg(feature = "bench-metrics")]
// {
// println!(
// "=> segment {} metrics.cycle_count: {:?}",
// i, seg.metrics.cycle_count
// );
// for (insn, count) in seg.metrics.counts.iter() {
// println!("insn: {:?}, count: {:?}", insn, count);
// }
// println!(
// "=> segment {} #(insns): {}",
// i,
// seg.metrics
// .counts
// .values()
// .copied()
// .into_iter()
// .sum::<usize>()
// );
// }
// }
// }
//
// #[test]
// fn test_prod_tower() {
// let nv = 5;
// let num_prod_specs = 2;
// let num_logup_specs = 1;
// let mut rng = thread_rng();
//
// setup_tracing_with_log_level(tracing::Level::WARN);
//
// let records: Vec<MultilinearExtension<E>> = (0..num_prod_specs)
// .map(|_| {
// MultilinearExtension::from_evaluations_ext_vec(
// nv - 1,
// E::random_vec(1 << (nv - 1), &mut rng),
// )
// })
// .collect_vec();
// let denom_records = (0..num_logup_specs)
// .map(|_| {
// MultilinearExtension::from_evaluations_ext_vec(nv, E::random_vec(1 << nv, &mut rng))
// })
// .collect_vec();
//
// let prod_specs = records
// .into_iter()
// .map(|record| {
// let (first, second) = record
// .get_ext_field_vec()
// .split_at(record.evaluations().len() / 2);
// let last_layer: Vec<ArcMultilinearExtension<E>> = vec![
// first.to_vec().into_mle().into(),
// second.to_vec().into_mle().into(),
// ];
// assert_eq!(last_layer.len(), NUM_FANIN);
// ceno_zkvm::structs::TowerProverSpec {
// witness: infer_tower_product_witness(nv - 1, last_layer, NUM_FANIN),
// }
// })
// .collect_vec();
//
// let prod_out_evals = prod_specs
// .iter()
// .map(|spec| {
// spec.witness[0]
// .iter()
// .map(|mle| cast_vec(mle.get_ext_field_vec().to_vec())[0])
// .collect_vec()
// })
// .collect_vec();
//
// let logup_specs = denom_records
// .into_iter()
// .map(|record| {
// let (first, second) = record
// .get_ext_field_vec()
// .split_at(record.evaluations().len() / 2);
// let last_layer: Vec<ArcMultilinearExtension<E>> = vec![
// first.to_vec().into_mle().into(),
// second.to_vec().into_mle().into(),
// ];
// TowerProverSpec {
// witness: infer_tower_logup_witness(None, last_layer),
// }
// })
// .collect_vec();
//
// let logup_out_evals = logup_specs
// .iter()
// .map(|spec| {
// spec.witness[0]
// .iter()
// .map(|mle| cast_vec(mle.get_ext_field_vec().to_vec())[0])
// .collect_vec()
// })
// .collect_vec();
//
// let num_variables = prod_specs
// .iter()
// .chain(logup_specs.iter())
// .map(|spec| spec.witness.len())
// .collect_vec();
//
// let mut transcript = BasicTranscript::new(&[]);
// let (_, tower_proof) =
// TowerProver::create_proof(prod_specs, logup_specs, NUM_FANIN, &mut transcript);
//
// build program
// let mut builder = AsmBuilder::<F, EF>::default();
//
// let mut challenger: DuplexChallengerVariable<C> =
// DuplexChallengerVariable::new(&mut builder);
//
// construct extrapolation weights
// let mut uni_p = UniPolyExtrapolator::new(&mut builder);
//
// assert_eq!(tower_proof.proofs.len(), nv - 1);
// let tower_verifier_input_var = TowerVerifierInput::read(&mut builder);
// let tower_verifier_input = TowerVerifierInput {
// prod_out_evals,
// logup_out_evals,
// num_variables,
// num_fanin: NUM_FANIN,
// num_proofs: nv - 1,
// num_prod_specs,
// num_logup_specs,
// _max_num_variables: nv,
// proofs: tower_proof
// .proofs
// .iter()
// .map(|layer| {
// layer
// .iter()
// .map(|round| IOPProverMessage {
// evaluations: cast_vec(round.evaluations.clone()),
// })
// .collect_vec()
// })
// .collect_vec(),
// prod_specs_eval: tower_proof
// .prod_specs_eval
// .iter()
// .map(|spec| {
// spec.iter()
// .map(|layer| cast_vec(layer.clone()))
// .collect_vec()
// })
// .collect_vec(),
// logup_specs_eval: tower_proof
// .logup_specs_eval
// .iter()
// .map(|spec| {
// spec.iter()
// .map(|layer| cast_vec(layer.clone()))
// .collect_vec()
// })
// .collect_vec(),
// };
// verify_tower_proof(
// &mut builder,
// &mut challenger,
// tower_verifier_input_var,
// &mut uni_p,
// );
//
// builder.halt();
//
// prepare input
// let mut input_stream = Vec::new();
// input_stream.extend(tower_verifier_input.write());
//
// get the assembly code
// let options = CompilerOptions::default().with_cycle_tracker();
// let program = builder.compile_isa_with_options(options);
// let system_config = SystemConfig::default()
// .with_public_values(4)
// .with_max_segment_len((1 << 25) - 100)
// .with_profiling();
// let config = NativeConfig::new(system_config, Native);
// let executor = VmExecutor::<BabyBear, NativeConfig>::new(config);
// executor
// .execute_and_then(program, input_stream, |_, seg| Ok(seg), |err| err)
// .unwrap();
// }
//
// fn cast_vec<A, B>(mut vec: Vec<A>) -> Vec<B> {
// let length = vec.len();
// let capacity = vec.capacity();
// let ptr = vec.as_mut_ptr();
// Prevent `vec` from dropping its contents
// std::mem::forget(vec);
//
// Convert the pointer to the new type
// let new_ptr = ptr as *mut B;
//
// Create a new vector with the same length and capacity, but different type
// unsafe { Vec::from_raw_parts(new_ptr, length, capacity) }
// }
// }
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/tower_verifier/mod.rs | ceno_recursion/src/tower_verifier/mod.rs | pub(crate) mod binding;
pub(crate) mod program;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/tower_verifier/binding.rs | ceno_recursion/src/tower_verifier/binding.rs | use itertools::Itertools;
use openvm_native_compiler::{
asm::AsmConfig,
ir::{Array, Builder, Config},
prelude::*,
};
use openvm_native_recursion::hints::{Hintable, VecAutoHintable};
pub type F = BabyBear;
pub type E = BinomialExtensionField<F, 4>;
pub type InnerConfig = AsmConfig<F, E>;
use openvm_stark_backend::p3_field::extension::BinomialExtensionField;
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use serde::Deserialize;
#[derive(DslVariable, Clone)]
pub struct PointVariable<C: Config> {
pub fs: Array<C, Ext<C::F, C::EF>>,
}
#[derive(DslVariable, Clone)]
pub struct PointAndEvalVariable<C: Config> {
pub point: PointVariable<C>,
pub eval: Ext<C::F, C::EF>,
}
#[derive(DslVariable, Clone)]
pub struct IOPProverMessageVariable<C: Config> {
pub evaluations: Array<C, Ext<C::F, C::EF>>,
}
#[derive(DslVariable, Clone)]
pub struct IOPProverMessageVecVariable<C: Config> {
pub prover_message_size: Var<C::N>,
pub length: Var<C::N>,
pub evaluations: Array<C, Ext<C::F, C::EF>>,
}
impl<C: Config> IOPProverMessageVecVariable<C> {
pub fn get(&self, builder: &mut Builder<C>, index: Var<C::N>) -> Array<C, Ext<C::F, C::EF>> {
let start: Var<C::N> = builder.eval(self.prover_message_size * index);
let end: Var<C::N> = builder.eval(start + self.prover_message_size);
self.evaluations.slice(builder, start, end)
}
pub fn len(&self) -> Var<C::N> {
self.length
}
}
#[derive(DslVariable, Clone)]
pub struct ThreeDimensionalVecVariable<C: Config> {
pub inner_inner_length: Var<C::N>,
pub inner_length: Var<C::N>,
pub length: Var<C::N>,
pub data: Array<C, Ext<C::F, C::EF>>,
}
impl<C: Config> ThreeDimensionalVecVariable<C> {
pub fn get(&self, builder: &mut Builder<C>, index: Var<C::N>) -> Array<C, Ext<C::F, C::EF>> {
let start: Var<C::N> = builder.eval(self.inner_inner_length * self.inner_length * index);
let end: Var<C::N> = builder.eval(start + self.inner_inner_length * self.inner_length);
self.data.slice(builder, start, end)
}
pub fn get_inner(
&self,
builder: &mut Builder<C>,
outer_index: Var<C::N>,
inner_index: Var<C::N>,
) -> Array<C, Ext<C::F, C::EF>> {
let start: Var<C::N> = builder.eval(
self.inner_inner_length * self.inner_length * outer_index
+ self.inner_inner_length * inner_index,
);
let end: Var<C::N> = builder.eval(start + self.inner_inner_length);
self.data.slice(builder, start, end)
}
pub fn len(&self) -> Var<C::N> {
self.length
}
}
#[derive(Clone, Deserialize)]
pub struct Point {
pub fs: Vec<E>,
}
impl Hintable<InnerConfig> for Point {
type HintVariable = PointVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
PointVariable {
fs: builder.hint_exts(),
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.fs.write());
stream
}
}
impl VecAutoHintable for Point {}
#[allow(dead_code)]
pub struct PointAndEval {
pub point: Point,
pub eval: E,
}
impl Hintable<InnerConfig> for PointAndEval {
type HintVariable = PointAndEvalVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let point = Point::read(builder);
let eval = E::read(builder);
PointAndEvalVariable { point, eval }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.point.write());
stream.extend(self.eval.write());
stream
}
}
impl VecAutoHintable for PointAndEval {}
#[derive(Debug, Deserialize)]
pub struct IOPProverMessage {
pub evaluations: Vec<E>,
}
use sumcheck::structs::IOPProverMessage as InnerIOPProverMessage;
impl From<InnerIOPProverMessage<E>> for IOPProverMessage {
fn from(value: InnerIOPProverMessage<E>) -> Self {
IOPProverMessage {
evaluations: value.evaluations,
}
}
}
impl Hintable<InnerConfig> for IOPProverMessage {
type HintVariable = IOPProverMessageVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
IOPProverMessageVariable {
evaluations: builder.hint_exts(),
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.evaluations.write());
stream
}
}
impl VecAutoHintable for IOPProverMessage {}
/// Assume that all the prover messages have the same size.
#[derive(Debug, Deserialize, Default)]
pub struct IOPProverMessageVec {
pub prover_message_size: usize,
pub data: Vec<E>,
}
impl IOPProverMessageVec {
pub fn get(&self, index: usize) -> &[E] {
let start = index * self.prover_message_size;
let end = start + self.prover_message_size;
&self.data[start..end]
}
}
impl From<Vec<IOPProverMessage>> for IOPProverMessageVec {
fn from(messages: Vec<IOPProverMessage>) -> Self {
if !messages.is_empty() {
let prover_message_size = messages[0].evaluations.len();
assert!(
messages
.iter()
.map(|message| message.evaluations.len())
.all_equal()
);
let data = messages
.into_iter()
.flat_map(|msg| msg.evaluations)
.collect();
IOPProverMessageVec {
prover_message_size,
data,
}
} else {
Self::default()
}
}
}
impl Hintable<InnerConfig> for IOPProverMessageVec {
type HintVariable = IOPProverMessageVecVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let prover_message_size: Var<F> = usize::read(builder);
let length: Var<F> = usize::read(builder);
let evaluations = Vec::<E>::read(builder);
builder.assert_eq::<Var<F>>(evaluations.len(), prover_message_size * length);
IOPProverMessageVecVariable {
prover_message_size,
length,
evaluations,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(<usize as Hintable<InnerConfig>>::write(
&self.prover_message_size,
));
stream.extend(<usize as Hintable<InnerConfig>>::write(
&if self.data.is_empty() {
0
} else {
self.data.len() / self.prover_message_size
},
));
stream.extend(self.data.write());
stream
}
}
#[derive(Debug, Default, Deserialize)]
pub struct ThreeDimensionalVector {
pub inner_inner_length: usize,
pub inner_length: usize,
pub outer_length: usize,
/// Flattened data
pub data: Vec<E>,
}
impl ThreeDimensionalVector {
pub fn get(&self, outer_index: usize) -> &[E] {
let start = outer_index * self.inner_length * self.inner_inner_length;
let end = start + self.inner_length * self.inner_inner_length;
&self.data[start..end]
}
pub fn get_inner(&self, outer_index: usize, inner_index: usize) -> &[E] {
let start = outer_index * self.inner_length * self.inner_inner_length
+ inner_index * self.inner_inner_length;
let end = start + self.inner_inner_length;
&self.data[start..end]
}
}
impl From<Vec<Vec<Vec<E>>>> for ThreeDimensionalVector {
fn from(data: Vec<Vec<Vec<E>>>) -> Self {
let inner_inner_length = if data.is_empty() || data[0].is_empty() {
0
} else {
data[0][0].len()
};
let inner_length = if data.is_empty() { 0 } else { data[0].len() };
let outer_length = data.len();
let flattened_data = data.into_iter().flatten().flatten().collect();
ThreeDimensionalVector {
inner_inner_length,
inner_length,
outer_length,
data: flattened_data,
}
}
}
impl Hintable<InnerConfig> for ThreeDimensionalVector {
type HintVariable = ThreeDimensionalVecVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let inner_inner_length: Var<F> = usize::read(builder);
let inner_length: Var<F> = usize::read(builder);
let length: Var<F> = usize::read(builder);
let data = Vec::<E>::read(builder);
builder.assert_eq::<Var<F>>(data.len(), inner_inner_length * inner_length * length);
ThreeDimensionalVecVariable {
inner_inner_length,
inner_length,
length,
data,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(<usize as Hintable<InnerConfig>>::write(
&self.inner_inner_length,
));
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.inner_length));
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.outer_length));
stream.extend(self.data.write());
stream
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/zkvm_verifier/mod.rs | ceno_recursion/src/zkvm_verifier/mod.rs | pub mod binding;
pub mod verifier;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/zkvm_verifier/binding.rs | ceno_recursion/src/zkvm_verifier/binding.rs | use std::collections::BTreeMap;
use crate::{
arithmetics::{ceil_log2, next_pow2_instance_padding},
basefold_verifier::basefold::{
BasefoldCommitment, BasefoldCommitmentVariable, BasefoldProof, BasefoldProofVariable,
},
tower_verifier::binding::{
IOPProverMessage, IOPProverMessageVec, IOPProverMessageVecVariable, PointVariable,
ThreeDimensionalVecVariable, ThreeDimensionalVector,
},
};
use ceno_zkvm::{
scheme::{ZKVMChipProof, ZKVMProof},
structs::{EccQuarkProof, TowerProofs},
};
use gkr_iop::gkr::{GKRProof, layer::sumcheck_layer::LayerProof};
use itertools::Itertools;
use mpcs::{Basefold, BasefoldRSParams};
use multilinear_extensions::mle::Point;
use openvm_native_compiler::{
asm::AsmConfig,
ir::{Array, Builder, Config, Felt},
prelude::*,
};
use openvm_native_compiler_derive::iter_zip;
use openvm_native_recursion::hints::{Hintable, VecAutoHintable};
use openvm_stark_backend::p3_field::{FieldAlgebra, extension::BinomialExtensionField};
use openvm_stark_sdk::p3_baby_bear::BabyBear;
use p3::field::FieldExtensionAlgebra;
use sumcheck::structs::IOPProof;
pub type F = BabyBear;
pub type E = BinomialExtensionField<F, 4>;
pub type RecPcs = Basefold<E, BasefoldRSParams>;
pub type InnerConfig = AsmConfig<F, E>;
pub fn decompose_minus_one_bits(n: usize) -> Vec<F> {
let a = if n > 0 { n - 1 } else { 0 };
let mut bit_decomp: Vec<F> = vec![];
for i in 0..32usize {
bit_decomp.push(F::from_canonical_usize((a >> i) & 1));
}
bit_decomp
}
pub fn decompose_prefixed_layer_bits(n: usize) -> (Vec<usize>, Vec<Vec<F>>) {
let mut m = n;
let mut r = vec![];
let mut r_bits = vec![];
r.push(m);
r_bits.push(decompose_minus_one_bits(m));
while m > 1 {
let cur = m / 2;
r.push(cur);
r_bits.push(decompose_minus_one_bits(cur));
m = m.div_ceil(2);
}
(r, r_bits)
}
#[derive(DslVariable, Clone)]
pub struct ZKVMProofInputVariable<C: Config> {
pub shard_id: Usize<C::N>,
pub raw_pi: Array<C, Array<C, Felt<C::F>>>,
pub raw_pi_num_variables: Array<C, Var<C::N>>,
pub pi_evals: Array<C, Ext<C::F, C::EF>>,
pub chip_proofs: Array<C, Array<C, ZKVMChipProofInputVariable<C>>>,
pub max_num_var: Var<C::N>,
pub max_width: Var<C::N>,
pub witin_commit: BasefoldCommitmentVariable<C>,
pub witin_perm: Array<C, Var<C::N>>,
pub fixed_perm: Array<C, Var<C::N>>,
pub pcs_proof: BasefoldProofVariable<C>,
}
#[derive(DslVariable, Clone)]
pub struct TowerProofInputVariable<C: Config> {
pub num_proofs: Usize<C::N>,
pub proofs: Array<C, IOPProverMessageVecVariable<C>>,
pub num_prod_specs: Usize<C::N>,
pub prod_specs_eval: ThreeDimensionalVecVariable<C>,
pub num_logup_specs: Usize<C::N>,
pub logup_specs_eval: ThreeDimensionalVecVariable<C>,
}
pub(crate) struct ZKVMProofInput {
pub shard_id: usize,
pub raw_pi: Vec<Vec<F>>,
// Evaluation of raw_pi.
pub pi_evals: Vec<E>,
pub chip_proofs: BTreeMap<usize, ZKVMChipProofs>,
pub witin_commit: BasefoldCommitment,
pub opening_proof: BasefoldProof,
}
impl From<(usize, ZKVMProof<E, RecPcs>)> for ZKVMProofInput {
fn from(d: (usize, ZKVMProof<E, RecPcs>)) -> Self {
ZKVMProofInput {
shard_id: d.0,
raw_pi: d.1.raw_pi,
pi_evals: d.1.pi_evals,
chip_proofs: d
.1
.chip_proofs
.into_iter()
.map(|(chip_idx, proofs)| {
(
chip_idx,
proofs
.into_iter()
.map(|proof| ZKVMChipProofInput::from((chip_idx, proof)))
.collect::<Vec<ZKVMChipProofInput>>()
.into(),
)
})
.collect::<BTreeMap<usize, ZKVMChipProofs>>(),
witin_commit: d.1.witin_commit.into(),
opening_proof: d.1.opening_proof.into(),
}
}
}
impl Hintable<InnerConfig> for ZKVMProofInput {
type HintVariable = ZKVMProofInputVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let shard_id = Usize::Var(usize::read(builder));
let raw_pi = Vec::<Vec<F>>::read(builder);
let raw_pi_num_variables = Vec::<usize>::read(builder);
let pi_evals = Vec::<E>::read(builder);
let chip_proofs = Vec::<ZKVMChipProofs>::read(builder);
let max_num_var = usize::read(builder);
let max_width = usize::read(builder);
let witin_commit = BasefoldCommitment::read(builder);
let witin_perm: Array<AsmConfig<F, BinomialExtensionField<F, 4>>, Var<F>> =
Vec::<usize>::read(builder);
let fixed_perm = Vec::<usize>::read(builder);
let pcs_proof = BasefoldProof::read(builder);
ZKVMProofInputVariable {
shard_id,
raw_pi,
raw_pi_num_variables,
pi_evals,
chip_proofs,
max_num_var,
max_width,
witin_commit,
witin_perm,
fixed_perm,
pcs_proof,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
let raw_pi_num_variables: Vec<usize> = self
.raw_pi
.iter()
.map(|v| ceil_log2(v.len().next_power_of_two()))
.collect();
let witin_num_vars = self
.chip_proofs
.iter()
.flat_map(|(_, proofs)| proofs.iter())
.map(|proof| proof.sum_num_instances)
.collect::<Vec<_>>();
let witin_max_widths = self
.chip_proofs
.iter()
.flat_map(|(_, proofs)| proofs.iter())
.map(|proof| proof.wits_in_evals.len().max(1))
.collect::<Vec<_>>();
let fixed_num_vars = self
.chip_proofs
.iter()
.flat_map(|(_, proofs)| proofs.iter())
.filter(|proof| !proof.fixed_in_evals.is_empty())
.map(|proof| proof.sum_num_instances)
.collect::<Vec<_>>();
let fixed_max_widths = self
.chip_proofs
.iter()
.flat_map(|(_, proofs)| proofs.iter())
.filter(|proof| !proof.fixed_in_evals.is_empty())
.map(|proof| proof.fixed_in_evals.len())
.collect::<Vec<_>>();
let max_num_var = witin_num_vars.iter().copied().max().unwrap_or(0);
let max_width = witin_max_widths
.iter()
.chain(fixed_max_widths.iter())
.copied()
.max()
.unwrap_or(0);
let get_perm = |v: Vec<usize>| {
let mut perm = vec![0; v.len()];
v.into_iter()
// the original order
.enumerate()
.sorted_by(|(_, nv_a), (_, nv_b)| Ord::cmp(nv_b, nv_a))
.enumerate()
// j is the new index where i is the original index
.map(|(j, (i, _))| (i, j))
.for_each(|(i, j)| {
perm[i] = j;
});
perm
};
let witin_perm = get_perm(witin_num_vars);
let fixed_perm = get_perm(fixed_num_vars);
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.shard_id));
stream.extend(self.raw_pi.write());
stream.extend(raw_pi_num_variables.write());
stream.extend(self.pi_evals.write());
stream.extend(vec![vec![F::from_canonical_usize(self.chip_proofs.len())]]);
for proofs in self.chip_proofs.values() {
stream.extend(proofs.write());
}
stream.extend(<usize as Hintable<InnerConfig>>::write(&max_num_var));
stream.extend(<usize as Hintable<InnerConfig>>::write(&max_width));
stream.extend(self.witin_commit.write());
stream.extend(witin_perm.write());
stream.extend(fixed_perm.write());
stream.extend(self.opening_proof.write());
stream
}
}
#[derive(Default, Debug)]
pub struct TowerProofInput {
pub num_proofs: usize,
pub proofs: Vec<IOPProverMessageVec>,
// specs -> layers -> evals
pub num_prod_specs: usize,
pub prod_specs_eval: ThreeDimensionalVector,
// specs -> layers -> evals
pub num_logup_specs: usize,
pub logup_specs_eval: ThreeDimensionalVector,
}
impl From<TowerProofs<E>> for TowerProofInput {
fn from(p: TowerProofs<E>) -> Self {
let proofs: Vec<IOPProverMessageVec> = p
.proofs
.iter()
.map(|vec| {
IOPProverMessageVec::from(
vec.iter()
.map(|p| IOPProverMessage {
evaluations: p.evaluations.clone(),
})
.collect::<Vec<IOPProverMessage>>(),
)
})
.collect();
Self {
num_proofs: p.proofs.len(),
proofs,
num_prod_specs: p.prod_spec_size(),
prod_specs_eval: ThreeDimensionalVector::from(p.prod_specs_eval.clone()),
num_logup_specs: p.logup_spec_size(),
logup_specs_eval: ThreeDimensionalVector::from(p.logup_specs_eval),
}
}
}
impl Hintable<InnerConfig> for TowerProofInput {
type HintVariable = TowerProofInputVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let num_proofs = Usize::Var(usize::read(builder));
let proofs = builder.dyn_array(num_proofs.clone());
iter_zip!(builder, proofs).for_each(|idx_vec, builder| {
let ptr = idx_vec[0];
let proof = IOPProverMessageVec::read(builder);
builder.iter_ptr_set(&proofs, ptr, proof);
});
let num_prod_specs = Usize::Var(usize::read(builder));
let prod_specs_eval = ThreeDimensionalVector::read(builder);
let num_logup_specs = Usize::Var(usize::read(builder));
let logup_specs_eval = ThreeDimensionalVector::read(builder);
TowerProofInputVariable {
num_proofs,
proofs,
num_prod_specs,
prod_specs_eval,
num_logup_specs,
logup_specs_eval,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.num_proofs));
for p in &self.proofs {
stream.extend(p.write());
}
stream.extend(<usize as Hintable<InnerConfig>>::write(
&self.num_prod_specs,
));
stream.extend(self.prod_specs_eval.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(
&self.num_logup_specs,
));
stream.extend(self.logup_specs_eval.write());
stream
}
}
pub struct ZKVMChipProofInput {
pub idx: usize,
pub sum_num_instances: usize,
// product constraints
pub r_out_evals_len: usize,
pub w_out_evals_len: usize,
pub lk_out_evals_len: usize,
pub r_out_evals: Vec<Vec<E>>,
pub w_out_evals: Vec<Vec<E>>,
pub lk_out_evals: Vec<Vec<E>>,
pub tower_proof: TowerProofInput,
// main constraint and select sumcheck proof
pub has_main_sumcheck_proofs: usize,
pub main_sumcheck_proofs: IOPProverMessageVec,
// gkr proof
pub has_gkr_proof: usize,
pub gkr_iop_proof: GKRProofInput,
// ecc proof
pub has_ecc_proof: usize,
pub ecc_proof: EccQuarkProofInput,
pub num_instances: Vec<usize>,
pub wits_in_evals: Vec<E>,
pub fixed_in_evals: Vec<E>,
}
impl VecAutoHintable for ZKVMChipProofInput {}
/// wrapper struct to allow us implement VecAutoHintable
pub struct ZKVMChipProofs(Vec<ZKVMChipProofInput>);
impl From<Vec<ZKVMChipProofInput>> for ZKVMChipProofs {
fn from(v: Vec<ZKVMChipProofInput>) -> Self {
Self(v)
}
}
impl VecAutoHintable for ZKVMChipProofs {}
impl ZKVMChipProofs {
pub fn iter(&self) -> std::slice::Iter<'_, ZKVMChipProofInput> {
self.0.iter()
}
}
impl Hintable<InnerConfig> for ZKVMChipProofs {
type HintVariable = Array<InnerConfig, ZKVMChipProofInputVariable<InnerConfig>>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
Vec::<ZKVMChipProofInput>::read(builder)
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
self.0.write()
}
}
impl From<(usize, ZKVMChipProof<E>)> for ZKVMChipProofInput {
fn from(d: (usize, ZKVMChipProof<E>)) -> Self {
let idx = d.0;
let p = d.1;
let sum_num_instances = p.num_instances.iter().sum();
Self {
idx,
sum_num_instances,
r_out_evals_len: p.r_out_evals.len(),
w_out_evals_len: p.w_out_evals.len(),
lk_out_evals_len: p.lk_out_evals.len(),
r_out_evals: p.r_out_evals,
w_out_evals: p.w_out_evals,
lk_out_evals: p.lk_out_evals,
tower_proof: p.tower_proof.into(),
has_main_sumcheck_proofs: if p.main_sumcheck_proofs.is_some() {
1
} else {
0
},
main_sumcheck_proofs: if p.main_sumcheck_proofs.is_some() {
let r = p.main_sumcheck_proofs.unwrap();
r.iter()
.map(|p| IOPProverMessage {
evaluations: p.evaluations.clone(),
})
.collect::<Vec<IOPProverMessage>>()
.into()
} else {
IOPProverMessageVec::default()
},
has_gkr_proof: if p.gkr_iop_proof.is_some() { 1 } else { 0 },
gkr_iop_proof: if p.gkr_iop_proof.is_some() {
p.gkr_iop_proof.unwrap().into()
} else {
GKRProofInput::default()
},
has_ecc_proof: if p.ecc_proof.is_some() { 1 } else { 0 },
ecc_proof: if p.ecc_proof.is_some() {
p.ecc_proof.unwrap().into()
} else {
EccQuarkProofInput::dummy()
},
num_instances: p.num_instances,
wits_in_evals: p.wits_in_evals,
fixed_in_evals: p.fixed_in_evals,
}
}
}
#[derive(DslVariable, Clone)]
pub struct ZKVMChipProofInputVariable<C: Config> {
pub idx: Usize<C::N>,
pub idx_felt: Felt<C::F>,
pub sum_num_instances: Usize<C::N>,
pub sum_num_instances_minus_one_bit_decomposition: Array<C, Felt<C::F>>,
pub log2_num_instances: Usize<C::N>,
pub r_out_evals_len: Usize<C::N>,
pub w_out_evals_len: Usize<C::N>,
pub lk_out_evals_len: Usize<C::N>,
pub r_out_evals: Array<C, Array<C, Ext<C::F, C::EF>>>,
pub w_out_evals: Array<C, Array<C, Ext<C::F, C::EF>>>,
pub lk_out_evals: Array<C, Array<C, Ext<C::F, C::EF>>>,
pub has_main_sumcheck_proofs: Usize<C::N>,
pub main_sumcheck_proofs: IOPProverMessageVecVariable<C>,
pub has_gkr_iop_proof: Usize<C::N>,
pub gkr_iop_proof: GKRProofVariable<C>,
pub tower_proof: TowerProofInputVariable<C>,
pub has_ecc_proof: Usize<C::N>,
pub ecc_proof: EccQuarkProofVariable<C>,
pub num_instances: Array<C, Var<C::N>>,
pub n_inst_0_bit_decomps: Array<C, Felt<C::F>>,
pub n_inst_1_bit_decomps: Array<C, Felt<C::F>>,
pub fixed_in_evals: Array<C, Ext<C::F, C::EF>>,
pub wits_in_evals: Array<C, Ext<C::F, C::EF>>,
}
impl Hintable<InnerConfig> for ZKVMChipProofInput {
type HintVariable = ZKVMChipProofInputVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let idx = Usize::Var(usize::read(builder));
let idx_felt = F::read(builder);
let sum_num_instances = Usize::Var(usize::read(builder));
let sum_num_instances_minus_one_bit_decomposition = Vec::<F>::read(builder);
let log2_num_instances = Usize::Var(usize::read(builder));
let r_out_evals_len = Usize::Var(usize::read(builder));
let w_out_evals_len = Usize::Var(usize::read(builder));
let lk_out_evals_len = Usize::Var(usize::read(builder));
let r_out_evals = Vec::<Vec<E>>::read(builder);
let w_out_evals = Vec::<Vec<E>>::read(builder);
let lk_out_evals = Vec::<Vec<E>>::read(builder);
let tower_proof = TowerProofInput::read(builder);
let has_main_sumcheck_proofs = Usize::Var(usize::read(builder));
let main_sumcheck_proofs = IOPProverMessageVec::read(builder);
let has_gkr_iop_proof = Usize::Var(usize::read(builder));
let gkr_iop_proof = GKRProofInput::read(builder);
let has_ecc_proof = Usize::Var(usize::read(builder));
let ecc_proof = EccQuarkProofInput::read(builder);
let num_instances = Vec::<usize>::read(builder);
let n_inst_0_bit_decomps = Vec::<F>::read(builder);
let n_inst_1_bit_decomps = Vec::<F>::read(builder);
let fixed_in_evals = Vec::<E>::read(builder);
let wits_in_evals = Vec::<E>::read(builder);
ZKVMChipProofInputVariable {
idx,
idx_felt,
sum_num_instances,
sum_num_instances_minus_one_bit_decomposition,
log2_num_instances,
r_out_evals_len,
w_out_evals_len,
lk_out_evals_len,
r_out_evals,
w_out_evals,
lk_out_evals,
has_main_sumcheck_proofs,
main_sumcheck_proofs,
has_gkr_iop_proof,
gkr_iop_proof,
tower_proof,
has_ecc_proof,
ecc_proof,
num_instances,
n_inst_0_bit_decomps,
n_inst_1_bit_decomps,
fixed_in_evals,
wits_in_evals,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.idx));
let idx_u32: F = F::from_canonical_u32(self.idx as u32);
stream.extend(idx_u32.write());
let sum_num_instances = self.num_instances.iter().sum();
stream.extend(<usize as Hintable<InnerConfig>>::write(&sum_num_instances));
let sum_num_instance_bit_decomp = decompose_minus_one_bits(sum_num_instances);
stream.extend(sum_num_instance_bit_decomp.write());
let next_pow2_instance = next_pow2_instance_padding(sum_num_instances);
let log2_num_instances = ceil_log2(next_pow2_instance);
stream.extend(<usize as Hintable<InnerConfig>>::write(&log2_num_instances));
let r_out_evals_len = self.r_out_evals.len();
let w_out_evals_len = self.w_out_evals.len();
let lk_out_evals_len = self.lk_out_evals.len();
stream.extend(<usize as Hintable<InnerConfig>>::write(&r_out_evals_len));
stream.extend(<usize as Hintable<InnerConfig>>::write(&w_out_evals_len));
stream.extend(<usize as Hintable<InnerConfig>>::write(&lk_out_evals_len));
stream.extend(self.r_out_evals.write());
stream.extend(self.w_out_evals.write());
stream.extend(self.lk_out_evals.write());
stream.extend(self.tower_proof.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(
&self.has_main_sumcheck_proofs,
));
stream.extend(self.main_sumcheck_proofs.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.has_gkr_proof));
stream.extend(self.gkr_iop_proof.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.has_ecc_proof));
stream.extend(self.ecc_proof.write());
stream.extend(<Vec<usize> as Hintable<InnerConfig>>::write(
&self.num_instances,
));
let n_inst_0 = self.num_instances[0];
let n_inst_0_bit_decomps = decompose_minus_one_bits(n_inst_0);
let n_inst_1 = if self.num_instances.len() > 1 {
self.num_instances[1]
} else {
1usize
};
let n_inst_1_bit_decomps = decompose_minus_one_bits(n_inst_1);
stream.extend(n_inst_0_bit_decomps.write());
stream.extend(n_inst_1_bit_decomps.write());
stream.extend(self.fixed_in_evals.write());
stream.extend(self.wits_in_evals.write());
stream
}
}
#[derive(Default)]
pub struct SumcheckLayerProofInput {
pub proof: IOPProverMessageVec,
pub evals: Vec<E>,
}
#[derive(DslVariable, Clone)]
pub struct SumcheckLayerProofVariable<C: Config> {
pub proof: IOPProverMessageVecVariable<C>,
pub evals: Array<C, Ext<C::F, C::EF>>,
pub evals_len_div_3: Var<C::N>,
}
impl VecAutoHintable for SumcheckLayerProofInput {}
impl Hintable<InnerConfig> for SumcheckLayerProofInput {
type HintVariable = SumcheckLayerProofVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let proof = IOPProverMessageVec::read(builder);
let evals = Vec::<E>::read(builder);
let evals_len_div_3 = usize::read(builder);
Self::HintVariable {
proof,
evals,
evals_len_div_3,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.proof.write());
stream.extend(self.evals.write());
let evals_len_div_3 = self.evals.len() / 3;
stream.extend(<usize as Hintable<InnerConfig>>::write(&evals_len_div_3));
stream
}
}
pub struct LayerProofInput {
pub has_rotation: usize,
pub rotation: SumcheckLayerProofInput,
pub main: SumcheckLayerProofInput,
}
impl From<LayerProof<E>> for LayerProofInput {
fn from(p: LayerProof<E>) -> Self {
Self {
has_rotation: if p.rotation.is_some() { 1 } else { 0 },
rotation: if p.rotation.is_some() {
let r = p.rotation.unwrap();
SumcheckLayerProofInput {
proof: IOPProverMessageVec::from(
r.proof
.proofs
.iter()
.map(|p| IOPProverMessage {
evaluations: p.evaluations.clone(),
})
.collect::<Vec<IOPProverMessage>>(),
),
evals: r.evals,
}
} else {
SumcheckLayerProofInput::default()
},
main: SumcheckLayerProofInput {
proof: IOPProverMessageVec::from(
p.main
.proof
.proofs
.iter()
.map(|p| IOPProverMessage {
evaluations: p.evaluations.clone(),
})
.collect::<Vec<IOPProverMessage>>(),
),
evals: p.main.evals,
},
}
}
}
#[derive(DslVariable, Clone)]
pub struct LayerProofVariable<C: Config> {
pub has_rotation: Usize<C::N>,
pub rotation: SumcheckLayerProofVariable<C>,
pub main: SumcheckLayerProofVariable<C>,
}
impl VecAutoHintable for LayerProofInput {}
impl Hintable<InnerConfig> for LayerProofInput {
type HintVariable = LayerProofVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let has_rotation = Usize::Var(usize::read(builder));
let rotation = SumcheckLayerProofInput::read(builder);
let main = SumcheckLayerProofInput::read(builder);
Self::HintVariable {
has_rotation,
rotation,
main,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.has_rotation));
stream.extend(self.rotation.write());
stream.extend(self.main.write());
stream
}
}
#[derive(Default)]
pub struct GKRProofInput {
pub layer_proofs: Vec<LayerProofInput>,
}
impl From<GKRProof<E>> for GKRProofInput {
fn from(p: GKRProof<E>) -> Self {
Self {
layer_proofs: p
.0
.into_iter()
.map(LayerProofInput::from)
.collect::<Vec<LayerProofInput>>(),
}
}
}
#[derive(DslVariable, Clone)]
pub struct GKRProofVariable<C: Config> {
pub layer_proofs: Array<C, LayerProofVariable<C>>,
}
impl Hintable<InnerConfig> for GKRProofInput {
type HintVariable = GKRProofVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let layer_proofs = Vec::<LayerProofInput>::read(builder);
Self::HintVariable { layer_proofs }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.layer_proofs.write());
stream
}
}
#[derive(DslVariable, Clone)]
pub struct ClaimAndPoint<C: Config> {
pub evals: Array<C, Ext<C::F, C::EF>>,
pub has_point: Usize<C::N>,
pub point: PointVariable<C>,
}
#[derive(DslVariable, Clone)]
pub struct RotationClaim<C: Config> {
pub left_evals: Array<C, Ext<C::F, C::EF>>,
pub right_evals: Array<C, Ext<C::F, C::EF>>,
pub target_evals: Array<C, Ext<C::F, C::EF>>,
pub left_point: Array<C, Ext<C::F, C::EF>>,
pub right_point: Array<C, Ext<C::F, C::EF>>,
pub origin_point: Array<C, Ext<C::F, C::EF>>,
}
#[derive(DslVariable, Clone)]
pub struct GKRClaimEvaluation<C: Config> {
pub value: Ext<C::F, C::EF>,
pub point: PointVariable<C>,
pub poly: Usize<C::N>,
}
#[derive(DslVariable, Clone)]
pub struct SepticExtensionVariable<C: Config> {
pub vs: Array<C, Ext<C::F, C::EF>>,
}
impl<C: Config> From<Array<C, Ext<C::F, C::EF>>> for SepticExtensionVariable<C> {
fn from(slice: Array<C, Ext<C::F, C::EF>>) -> Self {
Self { vs: slice }
}
}
impl<C: Config> SepticExtensionVariable<C> {
pub fn is_zero(&self, builder: &mut Builder<C>) -> Usize<C::N> {
let r = Usize::uninit(builder);
builder.assign(&r, Usize::from(1));
let zero = Usize::from(0);
iter_zip!(builder, self.vs).for_each(|ptr_vec, builder| {
let e = builder.iter_ptr_get(&self.vs, ptr_vec[0]);
let fs = builder.ext2felt(e);
builder.range(0, fs.len()).for_each(|idx_vec, builder| {
let f = builder.get(&fs, idx_vec[0]);
let u = Usize::Var(builder.cast_felt_to_var(f));
builder.if_ne(u, zero.clone()).then(|builder| {
builder.assign(&r, Usize::from(0));
});
});
});
r
}
}
pub struct SepticPointInput {
x: SepticExtensionInput,
y: SepticExtensionInput,
is_infinity: bool,
}
#[derive(DslVariable, Clone)]
pub struct SepticPointVariable<C: Config> {
pub x: SepticExtensionVariable<C>,
pub y: SepticExtensionVariable<C>,
pub is_infinity: Usize<C::N>,
}
pub struct EccQuarkProofInput {
pub zerocheck_proof: IOPProof<E>,
pub num_instances: usize,
pub evals: Vec<E>, // x[rt,0], x[rt,1], y[rt,0], y[rt,1], x[0,rt], y[0,rt], s[0,rt]
pub rt: Point<E>,
pub sum: SepticPointInput,
}
impl EccQuarkProofInput {
pub fn dummy() -> Self {
Self {
zerocheck_proof: IOPProof { proofs: Vec::new() },
num_instances: 0,
evals: Vec::new(),
rt: Vec::new(),
sum: SepticPointInput {
x: SepticExtensionInput { v: [F::ZERO; 7] },
y: SepticExtensionInput { v: [F::ZERO; 7] },
is_infinity: false,
},
}
}
}
impl From<EccQuarkProof<E>> for EccQuarkProofInput {
fn from(proof: EccQuarkProof<E>) -> Self {
Self {
zerocheck_proof: proof.zerocheck_proof,
num_instances: proof.num_instances,
evals: proof.evals,
rt: proof.rt,
sum: SepticPointInput {
x: SepticExtensionInput { v: proof.sum.x.0 },
y: SepticExtensionInput { v: proof.sum.y.0 },
is_infinity: proof.sum.is_infinity,
},
}
}
}
#[derive(DslVariable, Clone)]
pub struct EccQuarkProofVariable<C: Config> {
pub zerocheck_proof: IOPProverMessageVecVariable<C>,
pub num_instances: Usize<C::N>,
pub num_instances_layered_ns: Array<C, Var<C::N>>,
pub num_instances_bit_decomps: Array<C, Array<C, Felt<C::F>>>,
pub num_vars: Usize<C::N>, // next_pow2_instance_padding(proof.num_instances).ilog2()
pub evals: Array<C, Ext<C::F, C::EF>>,
pub rt: PointVariable<C>,
pub sum: SepticPointVariable<C>,
}
impl Hintable<InnerConfig> for EccQuarkProofInput {
type HintVariable = EccQuarkProofVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let zerocheck_proof = IOPProverMessageVec::read(builder);
let num_instances = Usize::Var(usize::read(builder));
let num_instances_layered_ns = Vec::<usize>::read(builder);
let num_instances_bit_decomps = Vec::<Vec<F>>::read(builder);
let num_vars = Usize::Var(usize::read(builder));
let evals = Vec::<E>::read(builder);
let rt_vec = Vec::<E>::read(builder);
let rt = PointVariable { fs: rt_vec };
let sum = SepticPointInput::read(builder);
EccQuarkProofVariable {
zerocheck_proof,
num_instances,
num_instances_layered_ns,
num_instances_bit_decomps,
num_vars,
evals,
rt,
sum,
}
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
let p_vec = IOPProverMessageVec::from(
self.zerocheck_proof
.proofs
.clone()
.into_iter()
.map(|p| IOPProverMessage {
evaluations: p.evaluations,
})
.collect::<Vec<IOPProverMessage>>(),
);
stream.extend(p_vec.write());
stream.extend(<usize as Hintable<InnerConfig>>::write(&self.num_instances));
let (ns, n_bits) = decompose_prefixed_layer_bits(self.num_instances);
stream.extend(<Vec<usize> as Hintable<InnerConfig>>::write(&ns));
stream.extend(<Vec<Vec<F>> as Hintable<InnerConfig>>::write(&n_bits));
let num_vars = next_pow2_instance_padding(self.num_instances).ilog2() as usize;
stream.extend(<usize as Hintable<InnerConfig>>::write(&num_vars));
stream.extend(self.evals.write());
stream.extend(self.rt.write());
stream.extend(self.sum.write());
stream
}
}
pub struct SepticExtensionInput {
v: [F; 7],
}
impl Hintable<InnerConfig> for SepticExtensionInput {
type HintVariable = SepticExtensionVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let f_vec = Vec::<E>::read(builder);
SepticExtensionVariable { vs: f_vec }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
let f_vec = self.v.to_vec();
let e_vec: Vec<E> = f_vec.into_iter().map(E::from_base).collect();
stream.extend(e_vec.write());
stream
}
}
impl Hintable<InnerConfig> for SepticPointInput {
type HintVariable = SepticPointVariable<InnerConfig>;
fn read(builder: &mut Builder<InnerConfig>) -> Self::HintVariable {
let x = SepticExtensionInput::read(builder);
let y = SepticExtensionInput::read(builder);
let is_infinity = Usize::Var(usize::read(builder));
SepticPointVariable { x, y, is_infinity }
}
fn write(&self) -> Vec<Vec<<InnerConfig as Config>::N>> {
let mut stream = Vec::new();
stream.extend(self.x.write());
stream.extend(self.y.write());
if self.is_infinity {
stream.extend(<usize as Hintable<InnerConfig>>::write(&1usize));
} else {
stream.extend(<usize as Hintable<InnerConfig>>::write(&0usize));
}
stream
}
}
#[derive(DslVariable, Clone)]
pub struct SelectorContextVariable<C: Config> {
pub offset: Usize<C::N>,
pub offset_bit_decomps: Array<C, Felt<C::F>>,
pub num_instances: Usize<C::N>,
pub num_instances_layered_ns: Array<C, Var<C::N>>,
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/zkvm_verifier/verifier.rs | ceno_recursion/src/zkvm_verifier/verifier.rs | use super::binding::{
ClaimAndPoint, GKRClaimEvaluation, RotationClaim, ZKVMChipProofInputVariable,
ZKVMProofInputVariable,
};
use crate::{
arithmetics::{
PolyEvaluator, UniPolyExtrapolator, assert_ext_arr_eq, challenger_multi_observe, eq_eval,
eval_ceno_expr_with_instance, eval_wellform_address_vec, mask_arr, reverse,
},
basefold_verifier::{
basefold::{BasefoldCommitmentVariable, RoundOpeningVariable, RoundVariable},
mmcs::MmcsCommitmentVariable,
query_phase::PointAndEvalsVariable,
utils::pow_2,
},
};
// use crate::basefold_verifier::verifier::batch_verify;
use crate::{
arithmetics::{
arr_product, build_eq_x_r_vec_sequential, concat, dot_product as ext_dot_product,
eq_eval_less_or_equal_than, gen_alpha_pows, nested_product,
},
tower_verifier::{
binding::{PointAndEvalVariable, PointVariable},
program::{iop_verifier_state_verify, verify_tower_proof},
},
transcript::transcript_observe_label,
zkvm_verifier::binding::{
EccQuarkProofVariable, GKRProofVariable, LayerProofVariable, SelectorContextVariable,
SepticExtensionVariable, SepticPointVariable, SumcheckLayerProofVariable,
},
};
use ceno_zkvm::structs::{ComposedConstrainSystem, VerifyingKey, ZKVMVerifyingKey};
use ff_ext::BabyBearExt4;
use gkr_iop::{
evaluation::EvalExpression,
gkr::{
GKRCircuit,
booleanhypercube::BooleanHypercube,
layer::{Layer, ROTATION_OPENING_COUNT},
},
selector::SelectorType,
};
use itertools::{Itertools, izip};
use mpcs::{Basefold, BasefoldRSParams};
use multilinear_extensions::{
StructuralWitInType, StructuralWitInType::StackedConstantSequence, expression::Expression,
};
use openvm_native_compiler::prelude::*;
use openvm_native_compiler_derive::iter_zip;
use openvm_native_recursion::challenger::{
CanObserveVariable, FeltChallenger, duplex::DuplexChallengerVariable,
};
use openvm_stark_backend::p3_field::FieldAlgebra;
use p3::babybear::BabyBear;
type F = BabyBear;
type E = BabyBearExt4;
type Pcs = Basefold<E, BasefoldRSParams>;
const NUM_FANIN: usize = 2;
const SEPTIC_EXTENSION_DEGREE: usize = 7;
pub fn transcript_group_observe_label<C: Config>(
builder: &mut Builder<C>,
challenger_group: &mut Vec<DuplexChallengerVariable<C>>,
label: &[u8],
) {
for t in challenger_group {
transcript_observe_label(builder, t, label);
}
}
pub fn transcript_group_observe_f<C: Config>(
builder: &mut Builder<C>,
challenger_group: &mut Vec<DuplexChallengerVariable<C>>,
f: Felt<C::F>,
) {
for t in challenger_group {
t.observe(builder, f);
}
}
pub fn transcript_group_sample_ext<C: Config>(
builder: &mut Builder<C>,
challenger_group: &mut [DuplexChallengerVariable<C>],
) -> Ext<C::F, C::EF> {
let e: Ext<C::F, C::EF> = challenger_group[0].sample_ext(builder);
challenger_group.iter_mut().skip(1).for_each(|c| {
c.sample_ext(builder);
});
e
}
pub fn verify_zkvm_proof<C: Config<F = F>>(
builder: &mut Builder<C>,
zkvm_proof_input: ZKVMProofInputVariable<C>,
vk: &ZKVMVerifyingKey<E, Pcs>,
) -> SepticPointVariable<C> {
let mut challenger = DuplexChallengerVariable::new(builder);
transcript_observe_label(builder, &mut challenger, b"riscv");
let prod_r: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let prod_w: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let logup_sum: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
iter_zip!(builder, zkvm_proof_input.raw_pi).for_each(|ptr_vec, builder| {
let v = builder.iter_ptr_get(&zkvm_proof_input.raw_pi, ptr_vec[0]);
challenger_multi_observe(builder, &mut challenger, &v);
});
iter_zip!(builder, zkvm_proof_input.raw_pi, zkvm_proof_input.pi_evals).for_each(
|ptr_vec, builder| {
let raw = builder.iter_ptr_get(&zkvm_proof_input.raw_pi, ptr_vec[0]);
let eval = builder.iter_ptr_get(&zkvm_proof_input.pi_evals, ptr_vec[1]);
let raw0 = builder.get(&raw, 0);
builder.if_eq(raw.len(), Usize::from(1)).then(|builder| {
let raw0_ext = builder.ext_from_base_slice(&[raw0]);
builder.assert_ext_eq(raw0_ext, eval);
});
},
);
builder
.if_eq(zkvm_proof_input.shard_id.clone(), Usize::from(0))
.then(|builder| {
if let Some(fixed_commit) = vk.fixed_commit.as_ref() {
let commit: crate::basefold_verifier::hash::Hash = fixed_commit.commit().into();
let commit_array: Array<C, Felt<C::F>> = builder.dyn_array(commit.value.len());
commit.value.into_iter().enumerate().for_each(|(i, v)| {
let v = builder.constant(v);
// TODO: put fixed commit to public values
// builder.commit_public_value(v);
builder.set_value(&commit_array, i, v);
});
challenger_multi_observe(builder, &mut challenger, &commit_array);
let log2_max_codeword_size_felt = builder.constant(C::F::from_canonical_usize(
fixed_commit.log2_max_codeword_size,
));
challenger.observe(builder, log2_max_codeword_size_felt);
}
});
builder
.if_ne(zkvm_proof_input.shard_id.clone(), Usize::from(0))
.then(|builder| {
if let Some(fixed_commit) = vk.fixed_no_omc_init_commit.as_ref() {
let commit: crate::basefold_verifier::hash::Hash = fixed_commit.commit().into();
let commit_array: Array<C, Felt<C::F>> = builder.dyn_array(commit.value.len());
commit.value.into_iter().enumerate().for_each(|(i, v)| {
let v = builder.constant(v);
// TODO: put fixed commit to public values
// builder.commit_public_value(v);
builder.set_value(&commit_array, i, v);
});
challenger_multi_observe(builder, &mut challenger, &commit_array);
let log2_max_codeword_size_felt = builder.constant(C::F::from_canonical_usize(
fixed_commit.log2_max_codeword_size,
));
challenger.observe(builder, log2_max_codeword_size_felt);
}
});
iter_zip!(builder, zkvm_proof_input.chip_proofs).for_each(|ptr_vec, builder| {
let chip_proofs = builder.iter_ptr_get(&zkvm_proof_input.chip_proofs, ptr_vec[0]);
let chip_idx = builder.get(&chip_proofs, 0).idx_felt;
challenger.observe(builder, chip_idx);
iter_zip!(builder, chip_proofs).for_each(|ptr_vec, builder| {
let chip_proof = builder.iter_ptr_get(&chip_proofs, ptr_vec[0]);
iter_zip!(builder, chip_proof.num_instances).for_each(|ptr_vec, builder| {
let num_instance = builder.iter_ptr_get(&chip_proof.num_instances, ptr_vec[0]);
let num_instance = builder.unsafe_cast_var_to_felt(num_instance);
challenger.observe(builder, num_instance);
});
});
});
challenger_multi_observe(
builder,
&mut challenger,
&zkvm_proof_input.witin_commit.commit.value,
);
let log2_max_codeword_size_felt = builder.unsafe_cast_var_to_felt(
zkvm_proof_input
.witin_commit
.log2_max_codeword_size
.get_var(),
);
challenger.observe(builder, log2_max_codeword_size_felt);
let alpha = challenger.sample_ext(builder);
let beta = challenger.sample_ext(builder);
let challenges: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(2);
builder.set(&challenges, 0, alpha);
builder.set(&challenges, 1, beta);
let num_fixed_opening = vk
.circuit_vks
.values()
.filter(|c| c.get_cs().num_fixed() > 0)
.count();
let mut unipoly_extrapolator = UniPolyExtrapolator::new(builder);
let mut poly_evaluator = PolyEvaluator::new(builder);
let dummy_table_item = alpha;
let dummy_table_item_multiplicity: Var<C::N> = builder.constant(C::N::ZERO);
let witin_openings: Array<C, RoundOpeningVariable<C>> =
builder.dyn_array(zkvm_proof_input.chip_proofs.len());
let fixed_openings: Array<C, RoundOpeningVariable<C>> =
builder.dyn_array(zkvm_proof_input.chip_proofs.len());
let shard_ec_sum = SepticPointVariable {
x: SepticExtensionVariable {
vs: builder.dyn_array(7),
},
y: SepticExtensionVariable {
vs: builder.dyn_array(7),
},
is_infinity: Usize::uninit(builder),
};
let num_chips_verified: Usize<C::N> = builder.eval(C::N::ZERO);
let num_chips_have_fixed: Usize<C::N> = builder.eval(C::N::ZERO);
let chip_indices: Array<C, Var<C::N>> = builder.dyn_array(zkvm_proof_input.chip_proofs.len());
builder
.range(0, chip_indices.len())
.for_each(|idx_vec, builder| {
let i = idx_vec[0];
let chip_proofs = builder.get(&zkvm_proof_input.chip_proofs, i);
let chip_idx = builder.get(&chip_proofs, 0).idx;
builder.set(&chip_indices, i, chip_idx);
});
for (i, (circuit_name, chip_vk)) in vk.circuit_vks.iter().enumerate() {
let circuit_vk = &vk.circuit_vks[circuit_name];
let chip_id: Var<C::N> = builder.get(&chip_indices, num_chips_verified.get_var());
builder.if_eq(chip_id, RVar::from(i)).then(|builder| {
let chip_proofs =
builder.get(&zkvm_proof_input.chip_proofs, num_chips_verified.get_var());
iter_zip!(builder, chip_proofs).for_each(|ptr_vec, builder| {
let chip_proof = builder.iter_ptr_get(&chip_proofs, ptr_vec[0]);
builder.assert_usize_eq(
chip_proof.wits_in_evals.len(),
Usize::from(circuit_vk.get_cs().num_witin()),
);
builder.assert_usize_eq(
chip_proof.fixed_in_evals.len(),
Usize::from(circuit_vk.get_cs().num_fixed()),
);
builder.assert_usize_eq(
chip_proof.r_out_evals.len(),
Usize::from(circuit_vk.get_cs().num_reads()),
);
builder.assert_usize_eq(
chip_proof.w_out_evals.len(),
Usize::from(circuit_vk.get_cs().num_writes()),
);
builder.assert_usize_eq(
chip_proof.lk_out_evals.len(),
Usize::from(circuit_vk.get_cs().num_lks()),
);
let chip_logup_sum: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
iter_zip!(builder, chip_proof.lk_out_evals).for_each(|ptr_vec, builder| {
let evals = builder.iter_ptr_get(&chip_proof.lk_out_evals, ptr_vec[0]);
let p1 = builder.get(&evals, 0);
let p2 = builder.get(&evals, 1);
let q1 = builder.get(&evals, 2);
let q2 = builder.get(&evals, 3);
builder.assign(&chip_logup_sum, chip_logup_sum + p1 * q1.inverse());
builder.assign(&chip_logup_sum, chip_logup_sum + p2 * q2.inverse());
});
challenger.observe(builder, chip_proof.idx_felt);
if circuit_vk.get_cs().is_with_lk_table() {
builder.assign(&logup_sum, logup_sum - chip_logup_sum);
} else {
// getting the number of dummy padding item that we used in this opcode circuit
let num_lks: Var<C::N> =
builder.eval(C::N::from_canonical_usize(chip_vk.get_cs().num_lks()));
// each padding instance contribute to (2^rotation_vars) dummy lookup padding
let next_pow2_instance: Var<C::N> =
pow_2(builder, chip_proof.log2_num_instances.get_var());
let num_padded_instance: Var<C::N> =
builder.eval(next_pow2_instance - chip_proof.sum_num_instances.clone());
let rotation_var: Var<C::N> = builder.constant(C::N::from_canonical_usize(
1 << circuit_vk.get_cs().rotation_vars().unwrap_or(0),
));
let rotation_subgroup_size: Var<C::N> =
builder.constant(C::N::from_canonical_usize(
circuit_vk.get_cs().rotation_subgroup_size().unwrap_or(0),
));
builder.assign(&num_padded_instance, num_padded_instance * rotation_var);
// each instance contribute to (2^rotation_vars - rotated) dummy lookup padding
let num_instance_non_selected: Var<C::N> = builder.eval(
chip_proof.sum_num_instances.clone()
* (rotation_var - rotation_subgroup_size - C::N::ONE),
);
let new_multiplicity: Var<C::N> =
builder.eval(num_lks * (num_padded_instance + num_instance_non_selected));
builder.assign(
&dummy_table_item_multiplicity,
dummy_table_item_multiplicity + new_multiplicity,
);
builder.assign(&logup_sum, logup_sum + chip_logup_sum);
}
builder.cycle_tracker_start("Verify chip proof");
let (input_opening_point, chip_shard_ec_sum) = verify_chip_proof(
circuit_name,
builder,
&mut challenger,
&chip_proof,
&zkvm_proof_input.pi_evals,
&zkvm_proof_input.raw_pi,
&zkvm_proof_input.raw_pi_num_variables,
&challenges,
chip_vk,
&mut unipoly_extrapolator,
&mut poly_evaluator,
);
builder.cycle_tracker_end("Verify chip proof");
let point_clone: Array<C, Ext<C::F, C::EF>> =
builder.eval(input_opening_point.clone());
if circuit_vk.get_cs().num_witin() > 0 {
let witin_round: RoundOpeningVariable<C> = builder.eval(RoundOpeningVariable {
num_var: input_opening_point.len().get_var(),
point_and_evals: PointAndEvalsVariable {
point: PointVariable { fs: point_clone },
evals: chip_proof.wits_in_evals,
},
});
builder.set_value(&witin_openings, num_chips_verified.get_var(), witin_round);
}
if circuit_vk.get_cs().num_fixed() > 0 {
let fixed_round: RoundOpeningVariable<C> = builder.eval(RoundOpeningVariable {
num_var: input_opening_point.len().get_var(),
point_and_evals: PointAndEvalsVariable {
point: PointVariable {
fs: input_opening_point,
},
evals: chip_proof.fixed_in_evals,
},
});
builder.set_value(&fixed_openings, num_chips_have_fixed.get_var(), fixed_round);
builder.inc(&num_chips_have_fixed);
}
let r_out_evals_prod = nested_product(builder, &chip_proof.r_out_evals);
builder.assign(&prod_r, prod_r * r_out_evals_prod);
let w_out_evals_prod = nested_product(builder, &chip_proof.w_out_evals);
builder.assign(&prod_w, prod_w * w_out_evals_prod);
builder
.if_ne(chip_shard_ec_sum.is_infinity.clone(), Usize::from(1))
.then(|builder| {
add_septic_points_in_place(builder, &shard_ec_sum, &chip_shard_ec_sum);
});
});
builder.inc(&num_chips_verified);
});
}
builder.assert_eq::<Usize<_>>(num_chips_verified, chip_indices.len());
let dummy_table_item_multiplicity =
builder.unsafe_cast_var_to_felt(dummy_table_item_multiplicity);
builder.assign(
&logup_sum,
logup_sum - dummy_table_item_multiplicity * dummy_table_item.inverse(),
);
let rounds: Array<C, RoundVariable<C>> = if num_fixed_opening > 0 {
builder.dyn_array(2)
} else {
builder.dyn_array(1)
};
builder.set(
&rounds,
0,
RoundVariable {
commit: zkvm_proof_input.witin_commit,
openings: witin_openings,
perm: zkvm_proof_input.witin_perm.clone(),
},
);
if let Some(fixed_commit) = vk.fixed_commit.as_ref() {
builder
.if_eq(zkvm_proof_input.shard_id.clone(), Usize::from(0))
.then(|builder| {
let commit: crate::basefold_verifier::hash::Hash = fixed_commit.commit().into();
let commit_array: Array<C, Felt<C::F>> = builder.dyn_array(commit.value.len());
let log2_max_codeword_size: Var<C::N> = builder.constant(
C::N::from_canonical_usize(fixed_commit.log2_max_codeword_size),
);
builder.set(
&rounds,
1,
RoundVariable {
commit: BasefoldCommitmentVariable {
commit: MmcsCommitmentVariable {
value: commit_array,
},
log2_max_codeword_size: log2_max_codeword_size.into(),
},
openings: fixed_openings.clone(),
perm: zkvm_proof_input.fixed_perm.clone(),
},
);
});
} else if let Some(fixed_commit) = vk.fixed_no_omc_init_commit.as_ref() {
builder
.if_ne(zkvm_proof_input.shard_id.clone(), Usize::from(0))
.then(|builder| {
let commit: crate::basefold_verifier::hash::Hash = fixed_commit.commit().into();
let commit_array: Array<C, Felt<C::F>> = builder.dyn_array(commit.value.len());
let log2_max_codeword_size: Var<C::N> = builder.constant(
C::N::from_canonical_usize(fixed_commit.log2_max_codeword_size),
);
builder.set(
&rounds,
1,
RoundVariable {
commit: BasefoldCommitmentVariable {
commit: MmcsCommitmentVariable {
value: commit_array,
},
log2_max_codeword_size: log2_max_codeword_size.into(),
},
openings: fixed_openings.clone(),
perm: zkvm_proof_input.fixed_perm.clone(),
},
);
});
}
// _debug
// batch_verify(
// builder,
// zkvm_proof_input.max_num_var,
// zkvm_proof_input.max_width,
// rounds,
// zkvm_proof_input.pcs_proof,
// &mut challenger,
// );
let empty_arr: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(0);
let initial_global_state = eval_ceno_expr_with_instance(
builder,
&empty_arr,
&empty_arr,
&empty_arr,
&zkvm_proof_input.pi_evals,
&challenges,
&vk.initial_global_state_expr,
);
builder.assign(&prod_w, prod_w * initial_global_state);
let finalize_global_state = eval_ceno_expr_with_instance(
builder,
&empty_arr,
&empty_arr,
&empty_arr,
&zkvm_proof_input.pi_evals,
&challenges,
&vk.finalize_global_state_expr,
);
builder.assign(&prod_r, prod_r * finalize_global_state);
// memory consistency check
builder.assert_ext_eq(prod_r, prod_w);
// logup check
let zero: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
builder.assert_ext_eq(logup_sum, zero);
shard_ec_sum
}
pub fn verify_chip_proof<C: Config>(
circuit_name: &str,
builder: &mut Builder<C>,
challenger: &mut DuplexChallengerVariable<C>,
chip_proof: &ZKVMChipProofInputVariable<C>,
pi_evals: &Array<C, Ext<C::F, C::EF>>,
raw_pi: &Array<C, Array<C, Felt<C::F>>>,
raw_pi_num_variables: &Array<C, Var<C::N>>,
challenges: &Array<C, Ext<C::F, C::EF>>,
vk: &VerifyingKey<E>,
unipoly_extrapolator: &mut UniPolyExtrapolator<C>,
poly_evaluator: &mut PolyEvaluator<C>,
) -> (Array<C, Ext<C::F, C::EF>>, SepticPointVariable<C>) {
let composed_cs = vk.get_cs();
let ComposedConstrainSystem {
zkvm_v1_css: cs,
gkr_circuit,
} = &composed_cs;
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let r_len = cs.r_expressions.len() + cs.r_table_expressions.len();
let w_len = cs.w_expressions.len() + cs.w_table_expressions.len();
let lk_len = cs.lk_expressions.len() + cs.lk_table_expressions.len();
let num_batched = r_len + w_len + lk_len;
let r_counts_per_instance: Usize<C::N> = Usize::from(r_len);
let w_counts_per_instance: Usize<C::N> = Usize::from(w_len);
let lk_counts_per_instance: Usize<C::N> = Usize::from(lk_len);
let num_batched: Usize<C::N> = Usize::from(num_batched);
let log2_num_instances = chip_proof.log2_num_instances.clone();
if composed_cs.has_ecc_ops() {
builder.assign(
&log2_num_instances,
log2_num_instances.clone() + Usize::from(1),
);
}
let num_var_with_rotation: Usize<C::N> = Usize::Var(Var::uninit(builder));
builder.assign(
&num_var_with_rotation,
log2_num_instances.clone() + Usize::from(composed_cs.rotation_vars().unwrap_or(0)),
);
let shard_ec_sum = SepticPointVariable {
x: SepticExtensionVariable {
vs: builder.dyn_array(7),
},
y: SepticExtensionVariable {
vs: builder.dyn_array(7),
},
is_infinity: Usize::uninit(builder),
};
if composed_cs.has_ecc_ops() {
builder.assert_nonzero(&chip_proof.has_ecc_proof);
let ecc_proof = &chip_proof.ecc_proof;
builder.assert_usize_eq(ecc_proof.sum.is_infinity.clone(), Usize::from(0));
verify_ecc_proof(builder, challenger, ecc_proof, unipoly_extrapolator);
builder.assign(&shard_ec_sum, ecc_proof.sum.clone());
} else {
builder.assign(&shard_ec_sum.is_infinity, Usize::from(1));
}
let tower_proof = &chip_proof.tower_proof;
let num_variables: Array<C, Usize<C::N>> = builder.dyn_array(num_batched);
builder
.range(0, num_variables.len())
.for_each(|idx_vec, builder| {
builder.set(&num_variables, idx_vec[0], num_var_with_rotation.clone());
});
let prod_out_evals: Array<C, Array<C, Ext<C::F, C::EF>>> =
concat(builder, &chip_proof.r_out_evals, &chip_proof.w_out_evals);
let num_fanin: Usize<C::N> = Usize::from(NUM_FANIN);
builder.cycle_tracker_start(format!("verify tower proof for opcode {circuit_name}",).as_str());
let (_, record_evals, logup_p_evals, logup_q_evals) = verify_tower_proof(
builder,
challenger,
prod_out_evals,
&chip_proof.lk_out_evals,
num_variables,
num_fanin,
num_var_with_rotation.clone(),
tower_proof,
unipoly_extrapolator,
);
builder.cycle_tracker_end(format!("verify tower proof for opcode {circuit_name}",).as_str());
if cs.lk_table_expressions.is_empty() {
builder
.range(0, logup_p_evals.len())
.for_each(|idx_vec, builder| {
let eval = builder.get(&logup_p_evals, idx_vec[0]).eval;
builder.assert_ext_eq(eval, one);
});
}
let num_rw_records: Usize<C::N> = builder.eval(r_counts_per_instance + w_counts_per_instance);
builder.assert_usize_eq(record_evals.len(), num_rw_records.clone());
builder.assert_usize_eq(logup_p_evals.len(), lk_counts_per_instance.clone());
builder.assert_usize_eq(logup_q_evals.len(), lk_counts_per_instance.clone());
// GKR circuit
let out_evals_len: Usize<C::N> = if cs.lk_table_expressions.is_empty() {
builder.eval(record_evals.len() + logup_q_evals.len())
} else {
builder.eval(record_evals.len() + logup_p_evals.len() + logup_q_evals.len())
};
let out_evals: Array<C, PointAndEvalVariable<C>> = builder.dyn_array(out_evals_len.clone());
builder
.range(0, record_evals.len())
.for_each(|idx_vec, builder| {
let cpt = builder.get(&record_evals, idx_vec[0]);
builder.set(&out_evals, idx_vec[0], cpt);
});
let end: Usize<C::N> = Usize::uninit(builder);
if !cs.lk_table_expressions.is_empty() {
builder.assign(&end, record_evals.len() + logup_p_evals.len());
let p_slice = out_evals.slice(builder, record_evals.len(), end.clone());
builder
.range(0, logup_p_evals.len())
.for_each(|idx_vec, builder| {
let cpt = builder.get(&logup_p_evals, idx_vec[0]);
builder.set(&p_slice, idx_vec[0], cpt);
});
} else {
builder.assign(&end, record_evals.len());
}
let q_slice = out_evals.slice(builder, end, out_evals_len);
builder
.range(0, logup_q_evals.len())
.for_each(|idx_vec, builder| {
let cpt = builder.get(&logup_q_evals, idx_vec[0]);
builder.set(&q_slice, idx_vec[0], cpt);
});
let gkr_circuit = gkr_circuit.clone().unwrap();
let zero_bit_decomps: Array<C, Felt<C::F>> = builder.dyn_array(32);
let selector_ctxs: Vec<SelectorContextVariable<C>> = if cs.ec_final_sum.is_empty() {
builder.assert_usize_eq(chip_proof.num_instances.len(), Usize::from(1));
let num_instances_bit_decomps: Array<C, Array<C, Felt<C::F>>> = builder.dyn_array(1);
builder.set(
&num_instances_bit_decomps,
0,
chip_proof
.sum_num_instances_minus_one_bit_decomposition
.clone(),
);
vec![
SelectorContextVariable {
offset: Usize::from(0),
offset_bit_decomps: zero_bit_decomps,
num_instances: chip_proof.sum_num_instances.clone(),
num_instances_layered_ns: builder.dyn_array(0), /* Only used in QuarkBinaryTreeLessThan(Expression<E>) */
num_instances_bit_decomps,
offset_instance_sum_bit_decomps: chip_proof
.sum_num_instances_minus_one_bit_decomposition
.clone(),
num_vars: num_var_with_rotation.clone(),
};
gkr_circuit
.layers
.first()
.map(|layer| layer.out_sel_and_eval_exprs.len())
.unwrap_or(0)
]
} else {
builder.assert_usize_eq(chip_proof.num_instances.len(), Usize::from(2));
let num_inst_0_bit_decomps: Array<C, Array<C, Felt<C::F>>> = builder.dyn_array(1);
let num_inst_1_bit_decomps: Array<C, Array<C, Felt<C::F>>> = builder.dyn_array(1);
let num_inst_sum_bit_decomps: Array<C, Array<C, Felt<C::F>>> = builder.dyn_array(1);
builder.set(
&num_inst_0_bit_decomps,
0,
chip_proof.n_inst_0_bit_decomps.clone(),
);
builder.set(
&num_inst_1_bit_decomps,
0,
chip_proof.n_inst_1_bit_decomps.clone(),
);
builder.set(
&num_inst_sum_bit_decomps,
0,
chip_proof
.sum_num_instances_minus_one_bit_decomposition
.clone(),
);
vec![
SelectorContextVariable {
offset: Usize::from(0),
offset_bit_decomps: zero_bit_decomps.clone(),
num_instances: Usize::Var(builder.get(&chip_proof.num_instances, 0)),
num_instances_layered_ns: builder.dyn_array(0), /* Only used in QuarkBinaryTreeLessThan(Expression<E>) */
num_instances_bit_decomps: num_inst_0_bit_decomps,
offset_instance_sum_bit_decomps: chip_proof.n_inst_0_bit_decomps.clone(),
num_vars: num_var_with_rotation.clone(),
},
SelectorContextVariable {
offset: Usize::Var(builder.get(&chip_proof.num_instances, 0)),
offset_bit_decomps: chip_proof.n_inst_0_bit_decomps.clone(),
num_instances: Usize::Var(builder.get(&chip_proof.num_instances, 1)),
num_instances_layered_ns: builder.dyn_array(0), /* Only used in QuarkBinaryTreeLessThan(Expression<E>) */
num_instances_bit_decomps: num_inst_1_bit_decomps,
offset_instance_sum_bit_decomps: chip_proof
.sum_num_instances_minus_one_bit_decomposition
.clone(),
num_vars: num_var_with_rotation.clone(),
},
SelectorContextVariable {
offset: Usize::from(0),
offset_bit_decomps: zero_bit_decomps,
num_instances: chip_proof.sum_num_instances.clone(),
num_instances_layered_ns: builder.dyn_array(0), /* Only used in QuarkBinaryTreeLessThan(Expression<E>) */
num_instances_bit_decomps: num_inst_sum_bit_decomps,
offset_instance_sum_bit_decomps: chip_proof
.sum_num_instances_minus_one_bit_decomposition
.clone(),
num_vars: num_var_with_rotation.clone(),
},
]
};
builder.cycle_tracker_start("Verify GKR Circuit");
let rt = verify_gkr_circuit(
builder,
challenger,
num_var_with_rotation,
gkr_circuit,
&chip_proof.gkr_iop_proof,
challenges,
pi_evals,
raw_pi,
raw_pi_num_variables,
&out_evals,
chip_proof,
selector_ctxs,
unipoly_extrapolator,
poly_evaluator,
);
builder.cycle_tracker_end("Verify GKR Circuit");
(rt.fs, shard_ec_sum)
}
pub fn verify_gkr_circuit<C: Config>(
builder: &mut Builder<C>,
challenger: &mut DuplexChallengerVariable<C>,
max_num_variables: Usize<C::N>,
gkr_circuit: GKRCircuit<E>,
gkr_proof: &GKRProofVariable<C>,
challenges: &Array<C, Ext<C::F, C::EF>>,
pub_io_evals: &Array<C, Ext<C::F, C::EF>>,
raw_pi: &Array<C, Array<C, Felt<C::F>>>,
raw_pi_num_variables: &Array<C, Var<C::N>>,
claims: &Array<C, PointAndEvalVariable<C>>,
_chip_proof: &ZKVMChipProofInputVariable<C>,
selector_ctxs: Vec<SelectorContextVariable<C>>,
unipoly_extrapolator: &mut UniPolyExtrapolator<C>,
poly_evaluator: &mut PolyEvaluator<C>,
) -> PointVariable<C> {
let rt = PointVariable {
fs: builder.dyn_array(0),
};
for (i, layer) in gkr_circuit.layers.iter().enumerate() {
let layer_proof = builder.get(&gkr_proof.layer_proofs, i);
let layer_challenges: Array<C, Ext<C::F, C::EF>> =
generate_layer_challenges(builder, challenger, challenges, layer.n_challenges);
let eval_and_dedup_points: Array<C, ClaimAndPoint<C>> = extract_claim_and_point(
builder,
layer,
claims,
&layer_challenges,
&layer_proof.has_rotation,
);
if layer.rotation_sumcheck_expression.is_some() {
builder.assert_usize_eq(
Usize::from(layer.out_sel_and_eval_exprs.len() + 3),
eval_and_dedup_points.len(),
);
} else {
builder.assert_usize_eq(
Usize::from(layer.out_sel_and_eval_exprs.len()),
eval_and_dedup_points.len(),
);
}
// ZeroCheckLayer verification (might include other layer types in the future)
let LayerProofVariable {
main:
SumcheckLayerProofVariable {
proof,
evals: main_evals,
evals_len_div_3: _main_evals_len_div_3,
},
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/arithmetics/mod.rs | ceno_recursion/src/arithmetics/mod.rs | #![allow(dead_code)]
#![allow(unused_variables)]
#![allow(unused_imports)]
use ceno_zkvm::structs::{ChallengeId, WitnessId};
use ff_ext::{BabyBearExt4, ExtensionField, SmallField};
use itertools::Either;
use multilinear_extensions::{Expression, Fixed, Instance};
use openvm_native_circuit::EXT_DEG;
use openvm_native_compiler::prelude::*;
use openvm_native_compiler_derive::iter_zip;
use openvm_native_recursion::challenger::{FeltChallenger, duplex::DuplexChallengerVariable};
use openvm_stark_backend::p3_field::{FieldAlgebra, FieldExtensionAlgebra};
type E = BabyBearExt4;
const MAX_NUM_VARS: usize = 25;
pub fn _print_ext_arr<C: Config>(builder: &mut Builder<C>, arr: &Array<C, Ext<C::F, C::EF>>) {
iter_zip!(builder, arr).for_each(|ptr_vec, builder| {
let e = builder.iter_ptr_get(arr, ptr_vec[0]);
builder.print_e(e);
});
}
pub fn _print_felt_arr<C: Config>(builder: &mut Builder<C>, arr: &Array<C, Felt<C::F>>) {
iter_zip!(builder, arr).for_each(|ptr_vec, builder| {
let f = builder.iter_ptr_get(arr, ptr_vec[0]);
builder.print_f(f);
});
}
pub fn _print_usize_arr<C: Config>(builder: &mut Builder<C>, arr: &Array<C, Usize<C::N>>) {
iter_zip!(builder, arr).for_each(|ptr_vec, builder| {
let n = builder.iter_ptr_get(arr, ptr_vec[0]);
builder.print_v(n.get_var());
});
}
pub fn assert_ext_arr_eq<C: Config>(
builder: &mut Builder<C>,
arr1: &Array<C, Ext<C::F, C::EF>>,
arr2: &Array<C, Ext<C::F, C::EF>>,
) {
let zero: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
iter_zip!(builder, arr1, arr2).for_each(|ptr_vec, builder| {
let e1 = builder.iter_ptr_get(arr1, ptr_vec[0]);
let e2 = builder.iter_ptr_get(arr2, ptr_vec[0]);
let diff: Ext<C::F, C::EF> = builder.eval(e1 - e2);
builder.assert_ext_eq(diff, zero);
});
}
pub unsafe fn exts_to_felts<C: Config>(
builder: &mut Builder<C>,
exts: &Array<C, Ext<C::F, C::EF>>,
) -> Array<C, Felt<C::F>> {
let f_len: Usize<C::N> = builder.eval(exts.len() * Usize::from(C::EF::D));
let f_arr: Array<C, Felt<C::F>> = Array::Dyn(exts.ptr(), f_len);
f_arr
}
pub fn challenger_multi_observe<C: Config>(
builder: &mut Builder<C>,
challenger: &mut DuplexChallengerVariable<C>,
arr: &Array<C, Felt<C::F>>,
) {
let next_input_ptr =
builder.poseidon2_multi_observe(&challenger.sponge_state, challenger.input_ptr, arr);
builder.assign(
&challenger.input_ptr,
challenger.io_empty_ptr + next_input_ptr.clone(),
);
builder.if_ne(next_input_ptr, Usize::from(0)).then_or_else(
|builder| {
builder.assign(&challenger.output_ptr, challenger.io_empty_ptr);
},
|builder| {
builder.assign(&challenger.output_ptr, challenger.io_full_ptr);
},
);
}
pub fn is_smaller_than<C: Config>(
builder: &mut Builder<C>,
left: RVar<C::N>,
right: RVar<C::N>,
) -> RVar<C::N> {
let res: Felt<C::F> = builder.constant(C::F::ZERO);
let one: Felt<C::F> = builder.constant(C::F::ONE);
builder.range(0, right).for_each(|idx_vec, builder| {
builder.if_eq(left, idx_vec[0]).then(|builder| {
builder.assign(&res, res + one);
});
});
let v = builder.cast_felt_to_var(res);
RVar::from(v)
}
pub fn evaluate_at_point_degree_1<C: Config>(
builder: &mut Builder<C>,
evals: &Array<C, Ext<C::F, C::EF>>,
point: &Array<C, Ext<C::F, C::EF>>,
) -> Ext<C::F, C::EF> {
let left = builder.get(evals, 0);
let right = builder.get(evals, 1);
let r = builder.get(point, 0);
builder.eval(r * (right - left) + left)
}
pub fn _fixed_dot_product<C: Config>(
builder: &mut Builder<C>,
a: &[Ext<C::F, C::EF>],
b: &Array<C, Ext<C::F, C::EF>>,
zero: Ext<C::F, C::EF>,
) -> Ext<<C as Config>::F, <C as Config>::EF> {
// simple trick to prefer AddE(1 cycle) than AddEI(4 cycles)
let acc: Ext<C::F, C::EF> = builder.eval(zero + zero);
for (i, va) in a.iter().enumerate() {
let vb = builder.get(b, i);
builder.assign(&acc, acc + *va * vb);
}
acc
}
pub struct PolyEvaluator<C: Config> {
powers_of_2: Array<C, Usize<C::N>>,
}
impl<C: Config> PolyEvaluator<C> {
pub fn new(builder: &mut Builder<C>) -> Self {
let powers_of_2: Array<C, Usize<C::N>> = builder.dyn_array(MAX_NUM_VARS);
builder.set(&powers_of_2, 0, Usize::from(16777216));
builder.set(&powers_of_2, 1, Usize::from(8388608));
builder.set(&powers_of_2, 2, Usize::from(4194304));
builder.set(&powers_of_2, 3, Usize::from(1048576));
builder.set(&powers_of_2, 4, Usize::from(2097152));
builder.set(&powers_of_2, 5, Usize::from(524288));
builder.set(&powers_of_2, 6, Usize::from(262144));
builder.set(&powers_of_2, 7, Usize::from(131072));
builder.set(&powers_of_2, 8, Usize::from(65536));
builder.set(&powers_of_2, 9, Usize::from(32768));
builder.set(&powers_of_2, 10, Usize::from(16384));
builder.set(&powers_of_2, 11, Usize::from(8192));
builder.set(&powers_of_2, 12, Usize::from(4096));
builder.set(&powers_of_2, 13, Usize::from(2048));
builder.set(&powers_of_2, 14, Usize::from(1024));
builder.set(&powers_of_2, 15, Usize::from(512));
builder.set(&powers_of_2, 16, Usize::from(256));
builder.set(&powers_of_2, 17, Usize::from(128));
builder.set(&powers_of_2, 18, Usize::from(64));
builder.set(&powers_of_2, 19, Usize::from(32));
builder.set(&powers_of_2, 20, Usize::from(16));
builder.set(&powers_of_2, 21, Usize::from(8));
builder.set(&powers_of_2, 22, Usize::from(4));
builder.set(&powers_of_2, 23, Usize::from(2));
builder.set(&powers_of_2, 24, Usize::from(1));
Self { powers_of_2 }
}
pub fn evaluate_base_poly_at_point(
&self,
builder: &mut Builder<C>,
evals: &Array<C, Felt<C::F>>,
point: &Array<C, Ext<C::F, C::EF>>,
) -> Ext<C::F, C::EF> {
let num_var = point.len();
let evals_ext: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(evals.len());
iter_zip!(builder, evals, evals_ext).for_each(|ptr_vec, builder| {
let f = builder.iter_ptr_get(evals, ptr_vec[0]);
let e = builder.ext_from_base_slice(&[f]);
builder.iter_ptr_set(&evals_ext, ptr_vec[1], e);
});
let pwr_slice_idx: Usize<C::N> = builder.eval(Usize::from(25) - num_var);
let pwrs = self.powers_of_2.slice(builder, pwr_slice_idx, MAX_NUM_VARS);
iter_zip!(builder, point, pwrs).for_each(|ptr_vec, builder| {
let pt = builder.iter_ptr_get(point, ptr_vec[0]);
let idx_bound = builder.iter_ptr_get(&pwrs, ptr_vec[1]);
builder.range(0, idx_bound).for_each(|idx_vec, builder| {
let left_idx: Usize<C::N> = builder.eval(idx_vec[0] * Usize::from(2));
let right_idx: Usize<C::N> =
builder.eval(idx_vec[0] * Usize::from(2) + Usize::from(1));
let left = builder.get(&evals_ext, left_idx);
let right = builder.get(&evals_ext, right_idx);
let e: Ext<C::F, C::EF> = builder.eval(pt * (right - left) + left);
builder.set(&evals_ext, idx_vec[0], e);
});
});
builder.get(&evals_ext, 0)
}
}
pub fn dot_product<C: Config>(
builder: &mut Builder<C>,
a: &Array<C, Ext<C::F, C::EF>>,
b: &Array<C, Ext<C::F, C::EF>>,
) -> Ext<<C as Config>::F, <C as Config>::EF> {
let acc: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
iter_zip!(builder, a, b).for_each(|ptr_vec, builder| {
let v_a = builder.iter_ptr_get(a, ptr_vec[0]);
let v_b = builder.iter_ptr_get(b, ptr_vec[1]);
builder.assign(&acc, acc + v_a * v_b);
});
acc
}
// In-place masking of a value array with randomness
pub fn mask_arr<C: Config>(
builder: &mut Builder<C>,
a: &Array<C, Ext<C::F, C::EF>>,
mask: &Array<C, Ext<C::F, C::EF>>,
) {
builder.range(0, a.len()).for_each(|idx_vec, builder| {
let a_i = builder.get(a, idx_vec[0]);
let m_i = builder.get(mask, idx_vec[0]);
builder.set(a, idx_vec[0], a_i * m_i);
});
}
pub fn reverse<C: Config, T: MemVariable<C>>(
builder: &mut Builder<C>,
arr: &Array<C, T>,
) -> Array<C, T> {
let len = arr.len();
let res: Array<C, T> = builder.dyn_array(len.clone());
builder.range(0, len.clone()).for_each(|i_vec, builder| {
let i = i_vec[0];
let rev_i: RVar<_> = builder.eval_expr(len.clone() - i - RVar::from(1));
let el = builder.get(arr, i);
builder.set(&res, rev_i, el);
});
res
}
pub fn concat<C: Config, T: MemVariable<C>>(
builder: &mut Builder<C>,
a: &Array<C, T>,
b: &Array<C, T>,
) -> Array<C, T> {
let res_len: Usize<C::N> = builder.eval(a.len() + b.len());
let res: Array<C, T> = builder.dyn_array(res_len);
builder.range(0, a.len()).for_each(|idx_vec, builder| {
let a_v = builder.get(a, idx_vec[0]);
builder.set(&res, idx_vec[0], a_v);
});
builder.range(0, b.len()).for_each(|idx_vec, builder| {
let b_v = builder.get(b, idx_vec[0]);
let res_idx: Usize<C::N> = builder.eval(a.len() + idx_vec[0]);
builder.set(&res, res_idx, b_v);
});
res
}
pub fn gen_idx_arr<C: Config>(builder: &mut Builder<C>, len: Usize<C::N>) -> Array<C, Usize<C::N>> {
let res: Array<C, Usize<C::N>> = builder.dyn_array(len.clone());
builder.range(0, len).for_each(|idx_vec, builder| {
let u: Usize<C::N> = builder.eval(idx_vec[0]);
builder.set(&res, idx_vec[0], u);
});
res
}
// Evaluate eq polynomial.
pub fn eq_eval<C: Config>(
builder: &mut Builder<C>,
x: &Array<C, Ext<C::F, C::EF>>,
y: &Array<C, Ext<C::F, C::EF>>,
one: Ext<C::F, C::EF>,
zero: Ext<C::F, C::EF>,
) -> Ext<C::F, C::EF> {
let acc: Ext<C::F, C::EF> = builder.eval(zero + one); // simple trick to use AddE
iter_zip!(builder, x, y).for_each(|idx_vec, builder| {
let ptr_x = idx_vec[0];
let ptr_y = idx_vec[1];
let v_x = builder.iter_ptr_get(x, ptr_x);
let v_y = builder.iter_ptr_get(y, ptr_y);
// x*y + (1-x)*(1-y)
let xi_yi: Ext<C::F, C::EF> = builder.eval(v_x * v_y);
let new_acc: Ext<C::F, C::EF> = builder.eval(acc * (xi_yi + xi_yi - v_x - v_y + one));
builder.assign(&acc, new_acc);
});
acc
}
// Evaluate eq polynomial.
pub fn eq_eval_with_index<C: Config>(
builder: &mut Builder<C>,
x: &Array<C, Ext<C::F, C::EF>>,
y: &Array<C, Ext<C::F, C::EF>>,
xlo: Usize<C::N>,
ylo: Usize<C::N>,
len: Usize<C::N>,
) -> Ext<C::F, C::EF> {
let acc: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
builder.range(0, len).for_each(|i_vec, builder| {
let i = i_vec[0];
let ptr_x: Var<C::N> = builder.eval(xlo.clone() + i);
let ptr_y: Var<C::N> = builder.eval(ylo.clone() + i);
let v_x = builder.get(x, ptr_x);
let v_y = builder.get(y, ptr_y);
let xi_yi: Ext<C::F, C::EF> = builder.eval(v_x * v_y);
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let new_acc: Ext<C::F, C::EF> = builder.eval(acc * (xi_yi + xi_yi - v_x - v_y + one));
builder.assign(&acc, new_acc);
});
acc
}
// Multiply all elements in a nested Array
pub fn nested_product<C: Config>(
builder: &mut Builder<C>,
arr: &Array<C, Array<C, Ext<C::F, C::EF>>>,
) -> Ext<C::F, C::EF> {
let acc = builder.constant(C::EF::ONE);
iter_zip!(builder, arr).for_each(|ptr_vec, builder| {
let inner_arr = builder.iter_ptr_get(arr, ptr_vec[0]);
iter_zip!(builder, inner_arr).for_each(|ptr_vec, builder| {
let el = builder.iter_ptr_get(&inner_arr, ptr_vec[0]);
builder.assign(&acc, acc * el);
});
});
acc
}
// Multiply all elements in an Array
pub fn arr_product<C: Config>(
builder: &mut Builder<C>,
arr: &Array<C, Ext<C::F, C::EF>>,
) -> Ext<C::F, C::EF> {
let acc = builder.constant(C::EF::ONE);
iter_zip!(builder, arr).for_each(|ptr_vec, builder| {
let el = builder.iter_ptr_get(arr, ptr_vec[0]);
builder.assign(&acc, acc * el);
});
acc
}
// Generate alpha power challenges
pub fn gen_alpha_pows<C: Config>(
builder: &mut Builder<C>,
challenger: &mut DuplexChallengerVariable<C>,
alpha_len: Usize<<C as Config>::N>,
) -> Array<C, Ext<C::F, C::EF>> {
let alpha = challenger.sample_ext(builder);
// let alpha_felts = builder.ext2felt(alpha);
// challenger.observe_slice(builder, alpha_felts);
let alpha_pows: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(alpha_len);
let prev: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
iter_zip!(builder, alpha_pows).for_each(|ptr_vec: Vec<RVar<<C as Config>::N>>, builder| {
let ptr = ptr_vec[0];
builder.iter_ptr_set(&alpha_pows, ptr, prev);
builder.assign(&prev, prev * alpha);
});
alpha_pows
}
/// This is to compute a variant of eq(\mathbf{x}, \mathbf{y}) for indices in
/// [0..=max_idx]. Specifically, it is an MLE of the following vector:
/// partial_eq_{\mathbf{x}}(\mathbf{y})
/// = \sum_{\mathbf{b}=0}^{max_idx} \prod_{i=0}^{n-1} (x_i y_i b_i + (1 - x_i)(1 - y_i)(1 - b_i))
pub fn eq_eval_less_or_equal_than<C: Config>(
builder: &mut Builder<C>,
// opcode_proof: &ZKVMChipProofInputVariable<C>,
eq_bit_decomp: &Array<C, Felt<C::F>>,
a: &Array<C, Ext<C::F, C::EF>>,
b: &Array<C, Ext<C::F, C::EF>>,
) -> Ext<C::F, C::EF> {
builder.cycle_tracker_start("Compute eq_eval_less_or_equal_than");
let eq_bit_decomp: Array<C, Felt<C::F>> = eq_bit_decomp.slice(builder, 0, b.len());
let one_ext: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let rp_len = builder.eval_expr(b.len() + C::N::ONE);
let running_product: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(rp_len);
builder.set(&running_product, 0, one_ext);
builder.range(0, b.len()).for_each(|idx_vec, builder| {
let a_i = builder.get(a, idx_vec[0]);
let b_i = builder.get(b, idx_vec[0]);
let v = builder.get(&running_product, idx_vec[0]);
let next_v: Ext<C::F, C::EF> =
builder.eval(v * (a_i * b_i + (one_ext - a_i) * (one_ext - b_i)));
let next_idx = builder.eval_expr(idx_vec[0] + RVar::from(1));
builder.set(&running_product, next_idx, next_v);
});
let ans = builder.get(&running_product, b.len());
let running_product2: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let idx: Var<C::N> = builder.uninit();
builder.assign(&idx, b.len() - C::N::ONE);
builder.range(0, b.len()).for_each(|_, builder| {
let bit = builder.get(&eq_bit_decomp, idx);
let bit_rvar = RVar::from(builder.cast_felt_to_var(bit));
let bit_ext: Ext<C::F, C::EF> = builder.eval(bit * SymbolicExt::from_f(C::EF::ONE));
let a_i = builder.get(a, idx);
let b_i = builder.get(b, idx);
// Suppose max_idx = (110101)_2
// Then ans = eq(a, b)
// - eq(11011, a[1..6], b[1..6])eq(a[0..1], b[0..1])
// - eq(111, a[3..6], b[3..6])eq(a[0..3], b[0..3])
builder.if_ne(bit_rvar, RVar::from(1)).then(|builder| {
let v1 = builder.get(&running_product, idx);
builder.assign(&ans, ans - v1 * running_product2 * a_i * b_i);
});
builder.assign(
&running_product2,
running_product2
* (a_i * b_i * bit_ext + (one_ext - a_i) * (one_ext - b_i) * (one_ext - bit_ext)),
);
builder.assign(&idx, idx - C::N::ONE);
});
let a_remainder_arr: Array<C, Ext<C::F, C::EF>> = a.slice(builder, b.len(), a.len());
iter_zip!(builder, a_remainder_arr).for_each(|ptr_vec, builder| {
let a = builder.iter_ptr_get(&a_remainder_arr, ptr_vec[0]);
builder.assign(&ans, ans * (one_ext - a));
});
builder.cycle_tracker_end("Compute eq_eval_less_or_equal_than");
ans
}
pub fn build_eq_x_r_vec_sequential<C: Config>(
builder: &mut Builder<C>,
r: &Array<C, Ext<C::F, C::EF>>,
) -> Array<C, Ext<C::F, C::EF>> {
let evals_len = pow_of_2(builder, RVar::from(r.len()));
let evals: Array<C, Ext<C::F, C::EF>> = builder.dyn_array(evals_len);
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
builder.set(&evals, 0, one);
let r_rev = reverse(builder, r);
builder.range(0, r_rev.len()).for_each(|idx_vec, builder| {
let r = builder.get(&r_rev, idx_vec[0]);
let next_size = Usize::Var(pow_of_2(builder, idx_vec[0]).variable());
let idx_arr = gen_idx_arr(builder, next_size);
let idx_arr_rev = reverse(builder, &idx_arr);
iter_zip!(builder, idx_arr_rev).for_each(|ptr_vec, builder| {
let index = builder.iter_ptr_get(&idx_arr_rev, ptr_vec[0]);
let prev_val = builder.get(&evals, index.clone());
let tmp: Ext<C::F, C::EF> = builder.eval(r * prev_val);
let left_i: Usize<C::N> = builder.eval(index.clone() * Usize::from(2) + Usize::from(1));
let right_i: Usize<C::N> = builder.eval(index.clone() * Usize::from(2));
builder.set(&evals, left_i, tmp);
let right_v: Ext<C::F, C::EF> = builder.eval(prev_val - tmp);
builder.set(&evals, right_i, right_v);
});
});
evals
}
pub fn ceil_log2(x: usize) -> usize {
assert!(x > 0, "ceil_log2: x must be positive");
// Calculate the number of bits in usize
let usize_bits = std::mem::size_of::<usize>() * 8;
usize_bits - (x - 1).leading_zeros() as usize
}
pub fn pow_of_2<C: Config>(builder: &mut Builder<C>, log_n: RVar<C::N>) -> RVar<C::N> {
let res: Felt<C::F> = builder.constant(C::F::ONE);
let two: Felt<C::F> = builder.constant(C::F::TWO);
builder.range(0, log_n).for_each(|_idx_vec, builder| {
builder.assign(&res, res * two);
});
let v = builder.cast_felt_to_var(res);
RVar::from(v)
}
/// get next power of 2 instance with minimal size 2
pub fn next_pow2_instance_padding(num_instance: usize) -> usize {
num_instance.next_power_of_two().max(2)
}
pub fn ext_pow<C: Config>(
builder: &mut Builder<C>,
base: Ext<C::F, C::EF>,
exponent: usize,
) -> Ext<C::F, C::EF> {
let res = builder.constant(C::EF::ONE);
builder
.range(0, Usize::from(exponent))
.for_each(|_, builder| {
builder.assign(&res, res * base);
});
res
}
pub fn eval_ceno_expr_with_instance<C: Config>(
builder: &mut Builder<C>,
fixed: &Array<C, Ext<C::F, C::EF>>,
witnesses: &Array<C, Ext<C::F, C::EF>>,
structural_witnesses: &Array<C, Ext<C::F, C::EF>>,
instance: &Array<C, Ext<C::F, C::EF>>,
challenges: &Array<C, Ext<C::F, C::EF>>,
expr: &Expression<E>,
) -> Ext<C::F, C::EF> {
evaluate_ceno_expr::<C, Ext<C::F, C::EF>>(
builder,
expr,
&|builder, f: &Fixed| builder.get(fixed, f.0),
&|builder, witness_id: WitnessId| builder.get(witnesses, witness_id as usize),
&|builder, witness_id, _, _, _| builder.get(structural_witnesses, witness_id as usize),
&|builder, i| builder.get(instance, i.0),
&|builder, scalar| {
let scalar_base_slice = scalar
.as_bases()
.iter()
.map(|b| C::F::from_canonical_u64(b.to_canonical_u64()))
.collect::<Vec<C::F>>();
let scalar_ext: Ext<C::F, C::EF> =
builder.constant(C::EF::from_base_slice(&scalar_base_slice));
scalar_ext
},
&|builder, challenge_id, pow, scalar, offset| {
let challenge = builder.get(challenges, challenge_id as usize);
let challenge_exp = ext_pow(builder, challenge, pow);
let scalar_base_slice = scalar
.as_bases()
.iter()
.map(|b| C::F::from_canonical_u64(b.to_canonical_u64()))
.collect::<Vec<C::F>>();
let scalar_ext: Ext<C::F, C::EF> =
builder.constant(C::EF::from_base_slice(&scalar_base_slice));
let offset_base_slice = offset
.as_bases()
.iter()
.map(|b| C::F::from_canonical_u64(b.to_canonical_u64()))
.collect::<Vec<C::F>>();
let offset_ext: Ext<C::F, C::EF> =
builder.constant(C::EF::from_base_slice(&offset_base_slice));
builder.eval(challenge_exp * scalar_ext + offset_ext)
},
&|builder, a, b| builder.eval(a + b),
&|builder, a, b| builder.eval(a * b),
&|builder, x, a, b| builder.eval(a * x + b),
)
}
pub fn evaluate_ceno_expr<C: Config, T>(
builder: &mut Builder<C>,
expr: &Expression<E>,
fixed_in: &impl Fn(&mut Builder<C>, &Fixed) -> T,
wit_in: &impl Fn(&mut Builder<C>, WitnessId) -> T, // witin id
structural_wit_in: &impl Fn(&mut Builder<C>, WitnessId, usize, u32, usize) -> T,
instance: &impl Fn(&mut Builder<C>, Instance) -> T,
constant: &impl Fn(&mut Builder<C>, E) -> T,
challenge: &impl Fn(&mut Builder<C>, ChallengeId, usize, E, E) -> T,
sum: &impl Fn(&mut Builder<C>, T, T) -> T,
product: &impl Fn(&mut Builder<C>, T, T) -> T,
scaled: &impl Fn(&mut Builder<C>, T, T, T) -> T,
) -> T {
match expr {
Expression::Fixed(f) => fixed_in(builder, f),
Expression::WitIn(witness_id) => wit_in(builder, *witness_id),
Expression::StructuralWitIn(witness_id, _) => {
structural_wit_in(builder, *witness_id, 0, 0, 0)
}
Expression::Instance(i) => instance(builder, *i),
Expression::InstanceScalar(i) => instance(builder, *i),
Expression::Constant(scalar) => match scalar {
Either::Left(s) => constant(builder, E::from_base(*s)),
Either::Right(s) => constant(builder, *s),
},
Expression::Sum(a, b) => {
let a = evaluate_ceno_expr(
builder,
a,
fixed_in,
wit_in,
structural_wit_in,
instance,
constant,
challenge,
sum,
product,
scaled,
);
let b = evaluate_ceno_expr(
builder,
b,
fixed_in,
wit_in,
structural_wit_in,
instance,
constant,
challenge,
sum,
product,
scaled,
);
sum(builder, a, b)
}
Expression::Product(a, b) => {
let a = evaluate_ceno_expr(
builder,
a,
fixed_in,
wit_in,
structural_wit_in,
instance,
constant,
challenge,
sum,
product,
scaled,
);
let b = evaluate_ceno_expr(
builder,
b,
fixed_in,
wit_in,
structural_wit_in,
instance,
constant,
challenge,
sum,
product,
scaled,
);
product(builder, a, b)
}
Expression::ScaledSum(x, a, b) => {
let x = evaluate_ceno_expr(
builder,
x,
fixed_in,
wit_in,
structural_wit_in,
instance,
constant,
challenge,
sum,
product,
scaled,
);
let a = evaluate_ceno_expr(
builder,
a,
fixed_in,
wit_in,
structural_wit_in,
instance,
constant,
challenge,
sum,
product,
scaled,
);
let b = evaluate_ceno_expr(
builder,
b,
fixed_in,
wit_in,
structural_wit_in,
instance,
constant,
challenge,
sum,
product,
scaled,
);
scaled(builder, x, a, b)
}
Expression::Challenge(challenge_id, pow, scalar, offset) => {
challenge(builder, *challenge_id, *pow, *scalar, *offset)
}
}
}
/// evaluate MLE M(x0, x1, x2, ..., xn) address vector with it evaluation format a*[0, 1, 2, 3, ....2^n-1] + b
/// on r = [r0, r1, r2, ...rn] succinctly
/// a, b, is constant
/// the result M(r0, r1,... rn) = r0 + r1 * 2 + r2 * 2^2 + .... rn * 2^n
pub fn eval_wellform_address_vec<C: Config>(
builder: &mut Builder<C>,
offset: impl Into<<Ext<C::F, C::EF> as Variable<C>>::Expression>,
scaled: u32,
r: &Array<C, Ext<C::F, C::EF>>,
descending: bool,
) -> Ext<C::F, C::EF> {
let scaled: Ext<C::F, C::EF> = builder.constant(C::EF::from_canonical_u32(scaled));
let r_sum: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
let two: Ext<C::F, C::EF> = builder.constant(C::EF::TWO);
let state: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
iter_zip!(builder, r).for_each(|ptr_vec, builder| {
let x = builder.iter_ptr_get(r, ptr_vec[0]);
builder.assign(&r_sum, r_sum + x * state);
builder.assign(&state, state * two);
});
let shift: Ext<C::F, C::EF> = builder.eval(scaled * r_sum);
if descending {
let zero: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
builder.assign(&shift, zero - shift);
}
let res: Ext<C::F, C::EF> = builder.eval(offset.into() + shift);
res
}
/// Evaluate MLE M(x0, x1, ..., xn) whose evaluations are [0, 0, 1, 1, 2, 2, 2, 2, ...]
/// on r = [r0, r1, r2, ... rn] succinctly
pub fn eval_stacked_constant<C: Config>(
builder: &mut Builder<C>,
r: &Array<C, Ext<C::F, C::EF>>,
) -> Ext<C::F, C::EF> {
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let res: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
let loop_i: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
builder.range(1, r.len()).for_each(|i_vec, builder| {
let i: Var<C::N> = builder.eval(i_vec[0]);
let ri = builder.get(r, i);
// res = res * (1-ri) + ri * i
builder.assign(&res, res * (one - ri) + ri * loop_i);
builder.assign(&loop_i, loop_i + one);
});
res
}
/// Evaluate MLE M(x0, x1, ..., xn) whose evaluations are [0, 0, 0, 1, 0, 1, 2, 3, ...]
/// on r = [r0, r1, r2, ... rn] succinctly
pub fn eval_stacked_wellform_address_vec<C: Config>(
builder: &mut Builder<C>,
r: &Array<C, Ext<C::F, C::EF>>,
) -> Ext<C::F, C::EF> {
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let two: Ext<C::F, C::EF> = builder.constant(C::EF::TWO);
let pow_two: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
// compute \sum_j r_j * 2^j in an incremental way
let well_formed_inc: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
let res: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
builder.range(1, r.len()).for_each(|i_vec, builder| {
let i: Var<C::N> = builder.eval(i_vec[0]);
let i_minus_1: Var<C::N> = builder.eval(i - RVar::from(1));
let ri = builder.get(r, i);
let r_i_minus_1 = builder.get(r, i_minus_1);
// well_formed_inc += 2^{i-1} * r_{i-1}
builder.assign(&well_formed_inc, well_formed_inc + pow_two * r_i_minus_1);
builder.assign(&pow_two, pow_two * two);
// res = res * (1-ri) + ri * (\sum_{j < i} 2^j * rj)
builder.assign(&res, res * (one - ri) + ri * well_formed_inc);
});
res
}
pub fn max_usize_vec<C: Config>(builder: &mut Builder<C>, vec: Vec<Usize<C::N>>) -> Usize<C::N> {
assert!(!vec.is_empty());
let res = vec[0].clone();
vec.iter().skip(1).for_each(|n| {
let is_less = is_smaller_than(builder, RVar::from(res.clone()), RVar::from(n.clone()));
builder.if_eq(is_less, Usize::from(1)).then(|builder| {
builder.assign(&res, n.clone());
});
});
res
}
pub fn max_usize_arr<C: Config>(
builder: &mut Builder<C>,
arr: &Array<C, Usize<C::N>>,
) -> Usize<C::N> {
let max_var = builder.get(arr, 0).get_var();
builder.range(0, arr.len()).for_each(|idx_vec, builder| {
let n = RVar::from(builder.get(arr, idx_vec[0]).clone());
let is_less = is_smaller_than(builder, RVar::from(max_var), n);
builder.if_eq(is_less, Usize::from(1)).then(|builder| {
builder.assign(&max_var, n.variable());
});
});
Usize::from(max_var)
}
pub struct UniPolyExtrapolator<C: Config> {
constants: [Ext<C::F, C::EF>; 12], // 0, 1, 2, 3, 4, -1, 1/2, -1/2, 1/6, -1/6, 1/4, 1/24
}
impl<C: Config> UniPolyExtrapolator<C> {
pub fn new(builder: &mut Builder<C>) -> Self {
let zero: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
let one: Ext<C::F, C::EF> = builder.constant(C::EF::ONE);
let two: Ext<C::F, C::EF> = builder.constant(C::EF::TWO);
let three: Ext<C::F, C::EF> = builder.constant(C::EF::from_canonical_u32(3));
let four: Ext<C::F, C::EF> = builder.constant(C::EF::from_canonical_u32(4));
let six: Ext<C::F, C::EF> = builder.constant(C::EF::from_canonical_u32(6));
let twenty_four: Ext<C::F, C::EF> = builder.constant(C::EF::from_canonical_u32(24));
let neg_one: Ext<C::F, C::EF> = builder.eval(zero - one);
let two_inverse: Ext<C::F, C::EF> = builder.eval(two.inverse());
let neg_two_inverse: Ext<C::F, C::EF> = builder.eval(zero - two_inverse);
let six_inverse: Ext<C::F, C::EF> = builder.eval(six.inverse());
let neg_six_inverse: Ext<C::F, C::EF> = builder.eval(zero - six_inverse);
let four_inverse: Ext<C::F, C::EF> = builder.eval(four.inverse());
let twenty_four_inverse: Ext<C::F, C::EF> = builder.eval(twenty_four.inverse());
Self {
constants: [
zero,
one,
two,
three,
four,
neg_one,
two_inverse,
neg_two_inverse,
six_inverse,
neg_six_inverse,
four_inverse,
twenty_four_inverse,
],
}
}
pub fn extrapolate_uni_poly(
&mut self,
builder: &mut Builder<C>,
p_i: &Array<C, Ext<C::F, C::EF>>,
eval_at: Ext<C::F, C::EF>,
) -> Ext<C::F, C::EF> {
let res: Ext<C::F, C::EF> = builder.constant(C::EF::ZERO);
builder.if_eq(p_i.len(), Usize::from(4)).then_or_else(
|builder| {
let ext = self.extrapolate_uni_poly_deg_3(builder, p_i, eval_at);
builder.assign(&res, ext);
},
|builder| {
builder.if_eq(p_i.len(), Usize::from(3)).then_or_else(
|builder| {
let ext = self.extrapolate_uni_poly_deg_2(builder, p_i, eval_at);
builder.assign(&res, ext);
},
|builder| {
builder.if_eq(p_i.len(), Usize::from(2)).then_or_else(
|builder| {
let ext = self.extrapolate_uni_poly_deg_1(builder, p_i, eval_at);
builder.assign(&res, ext);
},
|builder| {
builder.if_eq(p_i.len(), Usize::from(5)).then_or_else(
|builder| {
let ext =
self.extrapolate_uni_poly_deg_4(builder, p_i, eval_at);
builder.assign(&res, ext);
},
|builder| {
builder.error();
},
);
},
);
},
);
},
);
res
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_recursion/src/constants/mod.rs | ceno_recursion/src/constants/mod.rs | pub const OPCODE_KEYS: [(usize, usize, &str); 14] = [
(0, 0, "ADD"),
(1, 1, "ADDI"),
(3, 2, "ANDI"),
(4, 3, "BEQ"),
(8, 4, "BLTU"),
(15, 5, "BNE"),
(19, 6, "JALR"),
(25, 7, "LW"),
(32, 8, "ORI"),
(42, 9, "SB"),
(55, 10, "SRAI"),
(57, 11, "SRLI"),
(58, 12, "SUB"),
(59, 13, "SW"),
];
pub const OPCODE_COUNTS: usize = 14;
pub const OPCODE_CS_COUNTS: [[usize; 7]; 14] = [
[4, 4, 9, 2, 2, 2, 17847],
[3, 3, 9, 2, 2, 2, 11941],
[3, 3, 9, 0, 0, 0, 4],
[3, 3, 5, 3, 2, 3, 1],
[3, 3, 7, 2, 2, 2, 7],
[3, 3, 5, 3, 2, 3, 5947],
[3, 3, 9, 2, 2, 2, 4],
[4, 4, 11, 0, 0, 0, 1],
[3, 3, 9, 0, 0, 0, 1],
[4, 4, 15, 5, 2, 5, 8],
[3, 3, 10, 2, 2, 2, 5948],
[3, 3, 9, 1, 2, 1, 5948],
[4, 4, 11, 2, 2, 2, 5950],
[4, 4, 11, 0, 0, 0, 33],
];
pub const TABLE_KEYS: [(usize, usize, &str); 13] = [
(26, 14, "OPS_And"),
(27, 15, "OPS_Ltu"),
(28, 16, "OPS_Or"),
(29, 17, "OPS_Pow"),
(30, 18, "OPS_Xor"),
(33, 19, "PROGRAM"),
(35, 20, "RAM_Memory_PubIOTable"),
(36, 21, "RAM_Memory_StaticMemTable"),
(37, 22, "RAM_Register_RegTable"),
(38, 23, "RANGE_U14"),
(39, 24, "RANGE_U16"),
(40, 25, "RANGE_U5"),
(41, 26, "RANGE_U8"),
];
pub const TABLE_CONSTANTS: [(&[usize], usize); 13] = [
(&[16], 16),
(&[16], 16),
(&[16], 16),
(&[5], 5),
(&[16], 16),
(&[14], 14),
(&[2, 2], 2),
(&[16, 16], 16),
(&[6, 6], 6),
(&[14], 14),
(&[16], 16),
(&[5], 5),
(&[8], 8),
];
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/dense_addr_space.rs | ceno_emul/src/dense_addr_space.rs | use crate::addr::WordAddr;
/// Dense storage for addresses between `[base, end)`, addressed at word granularity.
///
/// The region is pre-allocated up-front so lookups become simple index operations.
#[derive(Debug)]
pub(crate) struct DenseAddrSpace<T> {
base: WordAddr,
end: WordAddr,
cells: Vec<T>,
}
impl<T: Copy + Default> DenseAddrSpace<T> {
pub(crate) fn new(base: WordAddr, end: WordAddr) -> Self {
assert!(
end.0 >= base.0,
"dense address space end must be >= base ({:?} !>= {:?})",
end,
base
);
let len_words = (end.0 - base.0) as usize;
Self {
base,
end,
cells: vec![T::default(); len_words],
}
}
pub(crate) fn read(&self, addr: WordAddr) -> Option<T> {
self.index(addr).map(|idx| self.cells[idx])
}
pub(crate) fn write(&mut self, addr: WordAddr, value: T) -> Option<()> {
self.index(addr).map(|idx| {
self.cells[idx] = value;
})
}
pub(crate) fn replace(&mut self, addr: WordAddr, value: T) -> Option<T> {
self.index(addr).map(|idx| {
let prev = self.cells[idx];
self.cells[idx] = value;
prev
})
}
pub(crate) fn get_ref(&self, addr: WordAddr) -> Option<&T> {
self.index(addr).map(|idx| &self.cells[idx])
}
fn index(&self, addr: WordAddr) -> Option<usize> {
if addr.0 < self.base.0 || addr.0 >= self.end.0 {
return None;
}
Some((addr.0 - self.base.0) as usize)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/rv32im.rs | ceno_emul/src/rv32im.rs | // Based on: https://github.com/risc0/risc0/blob/aeea62f0c8f4223abfba17d4c78cb7e15c513de2/risc0/circuit/rv32im/src/prove/emu/rv32im.rs
//
// Copyright 2024 RISC Zero, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use anyhow::{Result, anyhow};
use ff_ext::ExtensionField;
use itertools::Either;
use multilinear_extensions::{Expression, impl_expr_from_unsigned};
use num_derive::ToPrimitive;
use strum_macros::{Display, EnumCount, EnumIter};
use super::addr::{ByteAddr, RegIdx, WORD_SIZE, Word, WordAddr};
/// Convenience function to create an `Instruction` with the given fields.
///
/// Pass 0 for unused fields.
pub const fn encode_rv32(kind: InsnKind, rs1: u32, rs2: u32, rd: u32, imm: i32) -> Instruction {
Instruction {
kind,
rs1: rs1 as usize,
rs2: rs2 as usize,
rd: rd as usize,
imm,
raw: 0,
}
}
/// Convenience function to create an `Instruction` with the given fields.
///
/// Pass 0 for unused fields.
pub const fn encode_rv32u(kind: InsnKind, rs1: u32, rs2: u32, rd: u32, imm: u32) -> Instruction {
Instruction {
kind,
rs1: rs1 as usize,
rs2: rs2 as usize,
rd: rd as usize,
imm: imm as i32,
raw: 0,
}
}
pub trait EmuContext {
// Handle environment call
fn ecall(&mut self) -> Result<bool>;
// Handle a trap
fn trap(&self, cause: TrapCause) -> Result<bool>;
// Callback when instructions end normally
fn on_normal_end(&mut self, _decoded: &Instruction) {}
// Get the program counter
fn get_pc(&self) -> ByteAddr;
// Set the program counter
fn set_pc(&mut self, addr: ByteAddr);
// Load from a register
fn load_register(&mut self, idx: RegIdx) -> Result<Word>;
// Store to a register
fn store_register(&mut self, idx: RegIdx, data: Word) -> Result<()>;
// Load from memory
fn load_memory(&mut self, addr: WordAddr) -> Result<Word>;
// Store to memory
fn store_memory(&mut self, addr: WordAddr, data: Word) -> Result<()>;
// Get the value of a register without side-effects.
fn peek_register(&self, idx: RegIdx) -> Word;
// Get the value of a memory word without side-effects.
fn peek_memory(&self, addr: WordAddr) -> Word;
/// Load from instruction cache
fn fetch(&mut self, pc: WordAddr) -> Option<Instruction>;
// Check access for data load
fn check_data_load(&self, _addr: ByteAddr) -> bool {
true
}
// Check access for data store
fn check_data_store(&self, _addr: ByteAddr) -> bool {
true
}
}
#[derive(Debug)]
pub enum TrapCause {
InstructionAddressMisaligned,
InstructionAccessFault,
IllegalInstruction(u32),
Breakpoint,
LoadAddressMisaligned,
LoadAccessFault(ByteAddr),
StoreAddressMisaligned(ByteAddr),
StoreAccessFault,
EcallError,
}
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
pub struct Instruction {
pub kind: InsnKind,
pub rs1: RegIdx,
pub rs2: RegIdx,
pub rd: RegIdx,
pub imm: i32,
/// `raw` is there only to produce better logging and error messages.
///
/// Set to 0, if you are creating an instruction directly,
/// instead of decoding it from a raw 32-bit `Word`.
pub raw: Word,
}
#[derive(Clone, Copy, Debug)]
pub enum InsnCategory {
Compute,
Branch,
Load,
Store,
System,
Invalid,
}
use InsnCategory::*;
#[derive(Clone, Copy, Debug)]
pub enum InsnFormat {
R,
I,
S,
B,
U,
J,
}
use InsnFormat::*;
#[derive(
Clone,
Copy,
Display,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
EnumIter,
EnumCount,
ToPrimitive,
Default,
)]
#[allow(clippy::upper_case_acronyms)]
pub enum InsnKind {
#[default]
INVALID,
ADD,
SUB,
XOR,
OR,
AND,
SLL,
SRL,
SRA,
SLT,
SLTU,
ADDI,
XORI,
ORI,
ANDI,
SLLI,
SRLI,
SRAI,
SLTI,
SLTIU,
BEQ,
BNE,
BLT,
BGE,
BLTU,
BGEU,
JAL,
JALR,
MUL,
MULH,
MULHSU,
MULHU,
DIV,
DIVU,
REM,
REMU,
LB,
LH,
LW,
LBU,
LHU,
#[cfg(feature = "u16limb_circuit")]
LUI,
#[cfg(feature = "u16limb_circuit")]
AUIPC,
SB,
SH,
SW,
ECALL,
}
use InsnKind::*;
impl_expr_from_unsigned!(InsnKind);
impl From<InsnKind> for InsnCategory {
fn from(kind: InsnKind) -> Self {
match kind {
INVALID => Invalid,
ADD | SUB | XOR | OR | AND | SLL | SRL | SRA | SLT | SLTU | MUL | MULH | MULHSU
| MULHU | DIV | DIVU | REM | REMU => Compute,
ADDI | XORI | ORI | ANDI | SLLI | SRLI | SRAI | SLTI | SLTIU => Compute,
BEQ | BNE | BLT | BGE | BLTU | BGEU => Branch,
JAL | JALR => Compute,
LB | LH | LW | LBU | LHU => Load,
SB | SH | SW => Store,
ECALL => System,
#[cfg(feature = "u16limb_circuit")]
LUI | AUIPC => Compute,
}
}
}
// For encoding, which is useful for tests.
impl From<InsnKind> for InsnFormat {
fn from(kind: InsnKind) -> Self {
match kind {
ADD | SUB | XOR | OR | AND | SLL | SRL | SRA | SLT | SLTU | MUL | MULH | MULHSU
| MULHU | DIV | DIVU | REM | REMU => R,
ADDI | XORI | ORI | ANDI | SLLI | SRLI | SRAI | SLTI | SLTIU => I,
BEQ | BNE | BLT | BGE | BLTU | BGEU => B,
JAL => J,
JALR => I,
LB | LH | LW | LBU | LHU => I,
SB | SH | SW => S,
ECALL => I,
INVALID => I,
#[cfg(feature = "u16limb_circuit")]
LUI | AUIPC => U,
}
}
}
impl Instruction {
pub const RD_NULL: u32 = 32;
pub fn rd_internal(&self) -> u32 {
match InsnFormat::from(self.kind) {
R | I | U | J if self.rd != 0 => self.rd as u32,
_ => Self::RD_NULL,
}
}
/// Get the register source 1, or zero if the instruction does not use rs1.
pub fn rs1_or_zero(&self) -> u32 {
match InsnFormat::from(self.kind) {
R | I | S | B => self.rs1 as u32,
_ => 0,
}
}
/// Get the register source 2, or zero if the instruction does not use rs2.
pub fn rs2_or_zero(&self) -> u32 {
match InsnFormat::from(self.kind) {
R | S | B => self.rs2 as u32,
_ => 0,
}
}
}
pub fn step<C: EmuContext>(ctx: &mut C) -> Result<()> {
let pc = ctx.get_pc();
let Some(insn) = ctx.fetch(pc.waddr()) else {
ctx.trap(TrapCause::InstructionAccessFault)?;
return Err(anyhow!(
"Fatal: could not fetch instruction at pc={pc:?}, ELF does not have instructions there."
));
};
tracing::trace!("pc: {:x}, kind: {:?}", pc.0, insn.kind);
if match InsnCategory::from(insn.kind) {
InsnCategory::Compute => step_compute(ctx, insn.kind, &insn)?,
InsnCategory::Branch => step_branch(ctx, insn.kind, &insn)?,
InsnCategory::Load => step_load(ctx, insn.kind, &insn)?,
InsnCategory::Store => step_store(ctx, insn.kind, &insn)?,
InsnCategory::System => step_system(ctx, insn.kind, &insn)?,
InsnCategory::Invalid => ctx.trap(TrapCause::IllegalInstruction(insn.raw))?,
} {
ctx.on_normal_end(&insn);
};
Ok(())
}
fn step_compute<M: EmuContext>(ctx: &mut M, kind: InsnKind, insn: &Instruction) -> Result<bool> {
use super::InsnKind::*;
let pc = ctx.get_pc();
let mut new_pc = pc + WORD_SIZE;
let imm_i = insn.imm as u32;
let out = match kind {
// Instructions that do not read rs1 nor rs2.
JAL => {
new_pc = pc.wrapping_add(insn.imm as u32);
(pc + WORD_SIZE).0
}
_ => {
// Instructions that read rs1 but not rs2.
let rs1 = ctx.load_register(insn.rs1)?;
match kind {
ADDI => rs1.wrapping_add(imm_i),
#[cfg(feature = "u16limb_circuit")]
LUI => imm_i,
#[cfg(feature = "u16limb_circuit")]
AUIPC => pc.wrapping_add(imm_i).0,
XORI => rs1 ^ imm_i,
ORI => rs1 | imm_i,
ANDI => rs1 & imm_i,
SLLI => rs1 << (imm_i & 0x1f),
SRLI => rs1 >> (imm_i & 0x1f),
SRAI => ((rs1 as i32) >> (imm_i & 0x1f)) as u32,
SLTI => {
if (rs1 as i32) < (imm_i as i32) {
1
} else {
0
}
}
SLTIU => {
if rs1 < imm_i {
1
} else {
0
}
}
JALR => {
new_pc = ByteAddr(rs1.wrapping_add(imm_i) & !1);
(pc + WORD_SIZE).0
}
_ => {
// Instructions that use rs1 and rs2.
let rs2 = ctx.load_register(insn.rs2)?;
match kind {
ADD => rs1.wrapping_add(rs2),
SUB => rs1.wrapping_sub(rs2),
XOR => rs1 ^ rs2,
OR => rs1 | rs2,
AND => rs1 & rs2,
SLL => rs1 << (rs2 & 0x1f),
SRL => rs1 >> (rs2 & 0x1f),
SRA => ((rs1 as i32) >> (rs2 & 0x1f)) as u32,
SLT => {
if (rs1 as i32) < (rs2 as i32) {
1
} else {
0
}
}
SLTU => {
if rs1 < rs2 {
1
} else {
0
}
}
MUL => rs1.wrapping_mul(rs2),
MULH => {
(sign_extend_u32(rs1).wrapping_mul(sign_extend_u32(rs2)) >> 32) as u32
}
MULHSU => (sign_extend_u32(rs1).wrapping_mul(rs2 as i64) >> 32) as u32,
MULHU => (((rs1 as u64).wrapping_mul(rs2 as u64)) >> 32) as u32,
DIV => {
if rs2 == 0 {
u32::MAX
} else {
((rs1 as i32).wrapping_div(rs2 as i32)) as u32
}
}
DIVU => {
if rs2 == 0 {
u32::MAX
} else {
rs1 / rs2
}
}
REM => {
if rs2 == 0 {
rs1
} else {
((rs1 as i32).wrapping_rem(rs2 as i32)) as u32
}
}
REMU => {
if rs2 == 0 {
rs1
} else {
rs1 % rs2
}
}
_ => unreachable!("Illegal compute instruction: {:?}", kind),
}
}
}
}
};
if !new_pc.is_aligned() {
return ctx.trap(TrapCause::InstructionAddressMisaligned);
}
ctx.store_register(insn.rd_internal() as usize, out)?;
ctx.set_pc(new_pc);
Ok(true)
}
fn step_branch<M: EmuContext>(ctx: &mut M, kind: InsnKind, decoded: &Instruction) -> Result<bool> {
use super::InsnKind::*;
let pc = ctx.get_pc();
let rs1 = ctx.load_register(decoded.rs1 as RegIdx)?;
let rs2 = ctx.load_register(decoded.rs2 as RegIdx)?;
let taken = match kind {
BEQ => rs1 == rs2,
BNE => rs1 != rs2,
BLT => (rs1 as i32) < (rs2 as i32),
BGE => (rs1 as i32) >= (rs2 as i32),
BLTU => rs1 < rs2,
BGEU => rs1 >= rs2,
_ => unreachable!("Illegal branch instruction: {:?}", kind),
};
let new_pc = if taken {
pc.wrapping_add(decoded.imm as u32)
} else {
pc + WORD_SIZE
};
if !new_pc.is_aligned() {
return ctx.trap(TrapCause::InstructionAddressMisaligned);
}
ctx.set_pc(new_pc);
Ok(true)
}
fn step_load<M: EmuContext>(ctx: &mut M, kind: InsnKind, decoded: &Instruction) -> Result<bool> {
let rs1 = ctx.load_register(decoded.rs1)?;
// LOAD instructions do not read rs2.
let addr = ByteAddr(rs1.wrapping_add_signed(decoded.imm));
if !ctx.check_data_load(addr) {
return ctx.trap(TrapCause::LoadAccessFault(addr));
}
let data = ctx.load_memory(addr.waddr())?;
let shift = 8 * (addr.0 & 3);
let out = match kind {
InsnKind::LB => {
let mut out = (data >> shift) & 0xff;
if out & 0x80 != 0 {
out |= 0xffffff00;
}
out
}
InsnKind::LH => {
if addr.0 & 0x01 != 0 {
return ctx.trap(TrapCause::LoadAddressMisaligned);
}
let mut out = (data >> shift) & 0xffff;
if out & 0x8000 != 0 {
out |= 0xffff0000;
}
out
}
InsnKind::LW => {
if addr.0 & 0x03 != 0 {
return ctx.trap(TrapCause::LoadAddressMisaligned);
}
data
}
InsnKind::LBU => (data >> shift) & 0xff,
InsnKind::LHU => {
if addr.0 & 0x01 != 0 {
return ctx.trap(TrapCause::LoadAddressMisaligned);
}
(data >> shift) & 0xffff
}
_ => unreachable!(),
};
ctx.store_register(decoded.rd_internal() as usize, out)?;
ctx.set_pc(ctx.get_pc() + WORD_SIZE);
Ok(true)
}
fn step_store<M: EmuContext>(ctx: &mut M, kind: InsnKind, decoded: &Instruction) -> Result<bool> {
let rs1 = ctx.load_register(decoded.rs1)?;
let rs2 = ctx.load_register(decoded.rs2)?;
let addr = ByteAddr(rs1.wrapping_add(decoded.imm as u32));
let shift = 8 * (addr.0 & 3);
if !ctx.check_data_store(addr) {
tracing::error!("mstore: addr={:x?},rs1={:x}", addr, rs1);
return ctx.trap(TrapCause::StoreAccessFault);
}
let mut data = ctx.peek_memory(addr.waddr());
match kind {
InsnKind::SB => {
data ^= data & (0xff << shift);
data |= (rs2 & 0xff) << shift;
}
InsnKind::SH => {
if addr.0 & 0x01 != 0 {
tracing::debug!("Misaligned SH");
return ctx.trap(TrapCause::StoreAddressMisaligned(addr));
}
data ^= data & (0xffff << shift);
data |= (rs2 & 0xffff) << shift;
}
InsnKind::SW => {
if addr.0 & 0x03 != 0 {
tracing::debug!("Misaligned SW");
return ctx.trap(TrapCause::StoreAddressMisaligned(addr));
}
data = rs2;
}
_ => unreachable!(),
}
ctx.store_memory(addr.waddr(), data)?;
ctx.set_pc(ctx.get_pc() + WORD_SIZE);
Ok(true)
}
fn step_system<M: EmuContext>(ctx: &mut M, kind: InsnKind, decoded: &Instruction) -> Result<bool> {
match kind {
InsnKind::ECALL => ctx.ecall(),
_ => ctx.trap(TrapCause::IllegalInstruction(decoded.raw)),
}
}
fn sign_extend_u32(x: u32) -> i64 {
(x as i32) as i64
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/vm_state.rs | ceno_emul/src/vm_state.rs | use super::rv32im::EmuContext;
use crate::{
PC_STEP_SIZE, Program, WORD_SIZE,
addr::{ByteAddr, RegIdx, Word, WordAddr},
dense_addr_space::DenseAddrSpace,
platform::Platform,
rv32im::{Instruction, TrapCause},
syscalls::{SyscallEffects, handle_syscall},
tracer::{Change, FullTracer, Tracer},
};
use anyhow::{Result, anyhow};
use std::{iter::from_fn, ops::Deref, sync::Arc};
pub struct HaltState {
pub exit_code: u32,
}
/// An implementation of the machine state and of the side-effects of operations.
pub const VM_REG_COUNT: usize = 32 + 1;
pub struct VMState<T: Tracer = FullTracer> {
program: Arc<Program>,
platform: Platform,
pc: Word,
/// Emulated main memory backed by a pre-allocated vector covering the
/// platform layout in `memory.x`.
memory: DenseAddrSpace<Word>,
registers: [Word; VM_REG_COUNT],
// Termination.
halt_state: Option<HaltState>,
tracer: T,
}
impl VMState<FullTracer> {
pub fn new(platform: Platform, program: Arc<Program>) -> Self {
Self::new_with_tracer(platform, program)
}
pub fn new_from_elf(platform: Platform, elf: &[u8]) -> Result<Self> {
VMState::<FullTracer>::new_from_elf_with_tracer(platform, elf)
}
}
impl<T: Tracer> VMState<T> {
/// The number of registers that the VM uses.
/// 32 architectural registers + 1 register RD_NULL for dark writes to x0.
pub const REG_COUNT: usize = VM_REG_COUNT;
pub fn new_with_tracer(platform: Platform, program: Arc<Program>) -> Self {
let pc = program.entry;
let mut vm = Self {
pc,
platform: platform.clone(),
program: program.clone(),
memory: DenseAddrSpace::new(
ByteAddr::from(platform.rom.start).waddr(),
ByteAddr::from(platform.heap.end).waddr(),
),
registers: [0; VM_REG_COUNT],
halt_state: None,
tracer: T::new(&platform),
};
for (&addr, &value) in &program.image {
vm.init_memory(ByteAddr(addr).waddr(), value);
}
vm
}
pub fn new_from_elf_with_tracer(platform: Platform, elf: &[u8]) -> Result<Self> {
let program = Arc::new(Program::load_elf(elf, u32::MAX)?);
let platform = Platform {
prog_data: Arc::new(program.image.keys().copied().collect()),
..platform
};
Ok(Self::new_with_tracer(platform, program))
}
pub fn halted(&self) -> bool {
self.halt_state.is_some()
}
pub fn halted_state(&self) -> Option<&HaltState> {
self.halt_state.as_ref()
}
pub fn tracer(&self) -> &T {
&self.tracer
}
pub fn take_tracer(self) -> T {
self.tracer
}
pub fn platform(&self) -> &Platform {
&self.platform
}
pub fn program(&self) -> &Program {
self.program.deref()
}
/// Set a word in memory without side effects.
pub fn init_memory(&mut self, addr: WordAddr, value: Word) {
self.memory
.write(addr, value)
.unwrap_or_else(|| panic!("addr {addr:?} outside dense memory layout"));
}
pub fn iter_until_halt(&mut self) -> impl Iterator<Item = Result<T::Record>> + '_ {
from_fn(move || {
if self.halted() {
None
} else {
Some(self.step())
}
})
}
pub fn next_step_record(&mut self) -> Result<Option<T::Record>> {
if self.halted() {
return Ok(None);
}
self.step().map(Some)
}
fn step(&mut self) -> Result<T::Record> {
crate::rv32im::step(self)?;
let step = self.tracer.advance();
if self.tracer.is_busy_loop(&step) && !self.halted() {
Err(anyhow!("Stuck in loop {}", "{}"))
} else {
Ok(step)
}
}
pub fn init_register_unsafe(&mut self, idx: RegIdx, value: Word) {
self.registers[idx] = value;
}
fn halt(&mut self, exit_code: u32) {
self.set_pc(0.into());
self.halt_state = Some(HaltState { exit_code });
}
fn apply_syscall(&mut self, effects: SyscallEffects) -> Result<()> {
for (addr, value) in effects.iter_mem_values() {
self.memory
.write(addr, value)
.unwrap_or_else(|| panic!("addr {addr:?} outside dense memory layout"));
}
for (idx, value) in effects.iter_reg_values() {
self.registers[idx] = value;
}
let next_pc = effects.next_pc.unwrap_or(self.pc + PC_STEP_SIZE as u32);
self.set_pc(next_pc.into());
self.tracer.track_syscall(effects);
Ok(())
}
}
impl<T: Tracer> EmuContext for VMState<T> {
// Expect an ecall to terminate the program: function HALT with argument exit_code.
fn ecall(&mut self) -> Result<bool> {
let function = self.load_register(Platform::reg_ecall())?;
if function == Platform::ecall_halt() {
let exit_code = self.load_register(Platform::reg_arg0())?;
tracing::debug!("halt with exit_code={}", exit_code);
self.halt(exit_code);
Ok(true)
} else {
match handle_syscall(self, function) {
Ok(effects) => {
self.apply_syscall(effects)?;
Ok(true)
}
Err(err) if self.platform.unsafe_ecall_nop => {
tracing::warn!("ecall ignored with unsafe_ecall_nop: {:?}", err);
// TODO: remove this example.
// Treat unknown ecalls as all powerful instructions:
// Read two registers, write one register, write one memory word, and branch.
let _arg0 = self.load_register(Platform::reg_arg0())?;
self.store_register(Instruction::RD_NULL as RegIdx, 0)?;
// Example ecall effect - any writable address will do.
let addr = (self.platform.stack.end - WORD_SIZE as u32).into();
self.store_memory(addr, self.peek_memory(addr))?;
self.set_pc(ByteAddr(self.pc) + PC_STEP_SIZE);
Ok(true)
}
Err(err) => {
tracing::error!("ecall error: {:?}", err);
self.trap(TrapCause::EcallError)
}
}
}
}
fn trap(&self, cause: TrapCause) -> Result<bool> {
// Crash.
match cause {
TrapCause::IllegalInstruction(raw) => {
Err(anyhow!("Trap IllegalInstruction({:#x})", raw))
}
_ => Err(anyhow!("Trap {:?}", cause)),
}
}
fn on_normal_end(&mut self, _decoded: &Instruction) {
self.tracer.store_pc(ByteAddr(self.pc));
self.tracer.track_mmu_maxtouch_after();
}
fn get_pc(&self) -> ByteAddr {
ByteAddr(self.pc)
}
fn set_pc(&mut self, after: ByteAddr) {
self.pc = after.0;
}
/// Load a register and record this operation.
fn load_register(&mut self, idx: RegIdx) -> Result<Word> {
self.tracer.load_register(idx, self.peek_register(idx));
Ok(self.peek_register(idx))
}
/// Store a register and record this operation.
fn store_register(&mut self, idx: RegIdx, after: Word) -> Result<()> {
if idx != 0 {
let before = self.peek_register(idx);
self.tracer.store_register(idx, Change { before, after });
self.registers[idx] = after;
}
Ok(())
}
/// Load a memory word and record this operation.
fn load_memory(&mut self, addr: WordAddr) -> Result<Word> {
let value = self.peek_memory(addr);
self.tracer.load_memory(addr, value);
Ok(value)
}
/// Store a memory word and record this operation.
fn store_memory(&mut self, addr: WordAddr, after: Word) -> Result<()> {
let before = self.peek_memory(addr);
self.tracer.store_memory(addr, Change { after, before });
self.memory
.write(addr, after)
.unwrap_or_else(|| panic!("addr {addr:?} outside dense memory layout"));
Ok(())
}
/// Get the value of a register without side-effects.
fn peek_register(&self, idx: RegIdx) -> Word {
self.registers[idx]
}
/// Get the value of a memory word without side-effects.
fn peek_memory(&self, addr: WordAddr) -> Word {
self.memory
.read(addr)
.unwrap_or_else(|| panic!("addr {addr:?} outside dense memory layout"))
}
fn fetch(&mut self, pc: WordAddr) -> Option<Instruction> {
let byte_pc: ByteAddr = pc.into();
let relative_pc = byte_pc.0.wrapping_sub(self.program.base_address);
let idx = (relative_pc / WORD_SIZE as u32) as usize;
let word = self.program.instructions.get(idx).copied()?;
self.tracer.fetch(pc, word);
self.tracer.track_mmu_maxtouch_before();
Some(word)
}
fn check_data_load(&self, addr: ByteAddr) -> bool {
self.platform.can_read(addr.0)
}
fn check_data_store(&self, addr: ByteAddr) -> bool {
self.platform.can_write(addr.0)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/lib.rs | ceno_emul/src/lib.rs | #![deny(clippy::cargo)]
#![feature(step_trait)]
mod addr;
pub use addr::*;
mod dense_addr_space;
mod platform;
pub use platform::{CENO_PLATFORM, Platform};
mod tracer;
pub use tracer::{
Change, FullTracer, LatestAccesses, MemOp, NextAccessPair, NextCycleAccess, PreflightTracer,
ReadOp, StepRecord, Tracer, WriteOp,
};
mod vm_state;
pub use vm_state::{HaltState, VM_REG_COUNT, VMState};
mod rv32im;
pub use rv32im::{
EmuContext, InsnCategory, InsnFormat, InsnKind, Instruction, encode_rv32, encode_rv32u,
};
mod elf;
pub use elf::Program;
pub mod disassemble;
mod syscalls;
pub use syscalls::{
BLS12381_ADD, BLS12381_DECOMPRESS, BLS12381_DOUBLE, BN254_ADD, BN254_DOUBLE, BN254_FP_ADD,
BN254_FP_MUL, BN254_FP2_ADD, BN254_FP2_MUL, KECCAK_PERMUTE, SECP256K1_ADD,
SECP256K1_DECOMPRESS, SECP256K1_DOUBLE, SECP256K1_SCALAR_INVERT, SECP256R1_ADD,
SECP256R1_DECOMPRESS, SECP256R1_DOUBLE, SHA_EXTEND, SyscallSpec, UINT256_MUL,
bn254::{
BN254_FP_WORDS, BN254_FP2_WORDS, BN254_POINT_WORDS, Bn254AddSpec, Bn254DoubleSpec,
Bn254Fp2AddSpec, Bn254Fp2MulSpec, Bn254FpAddSpec, Bn254FpMulSpec,
},
keccak_permute::{KECCAK_WORDS, KeccakSpec},
phantom::LogPcCycleSpec,
secp256k1::{
COORDINATE_WORDS as SECP256K1_COORDINATE_WORDS, SECP256K1_ARG_WORDS, Secp256k1AddSpec,
Secp256k1DecompressSpec, Secp256k1DoubleSpec, Secp256k1ScalarInvertSpec,
},
sha256::{SHA_EXTEND_WORDS, Sha256ExtendSpec},
uint256::{UINT256_WORDS_FIELD_ELEMENT, Uint256MulSpec},
};
pub mod utils;
pub mod test_utils;
mod chunked_vec;
pub mod host_utils;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls.rs | ceno_emul/src/syscalls.rs | use crate::{RegIdx, Tracer, VMState, Word, WordAddr, WriteOp};
use anyhow::Result;
pub mod bn254;
pub mod keccak_permute;
pub mod phantom;
pub mod secp256k1;
pub mod sha256;
pub mod uint256;
// Using the same function codes as sp1:
// https://github.com/succinctlabs/sp1/blob/013c24ea2fa15a0e7ed94f7d11a7ada4baa39ab9/crates/core/executor/src/syscalls/code.rs
pub use ceno_syscall::{
BLS12381_ADD, BLS12381_DECOMPRESS, BLS12381_DOUBLE, BN254_ADD, BN254_DOUBLE, BN254_FP_ADD,
BN254_FP_MUL, BN254_FP2_ADD, BN254_FP2_MUL, KECCAK_PERMUTE, PHANTOM_LOG_PC_CYCLE,
SECP256K1_ADD, SECP256K1_DECOMPRESS, SECP256K1_DOUBLE, SECP256K1_SCALAR_INVERT, SECP256R1_ADD,
SECP256R1_DECOMPRESS, SECP256R1_DOUBLE, SHA_EXTEND, UINT256_MUL,
};
pub trait SyscallSpec {
const NAME: &'static str;
const REG_OPS_COUNT: usize;
const MEM_OPS_COUNT: usize;
const CODE: u32;
const HAS_LOOKUPS: bool = false;
const GKR_OUTPUTS: usize = 0;
}
/// Trace the inputs and effects of a syscall.
pub fn handle_syscall<T: Tracer>(vm: &VMState<T>, function_code: u32) -> Result<SyscallEffects> {
match function_code {
KECCAK_PERMUTE => Ok(keccak_permute::keccak_permute(vm)),
SECP256K1_ADD => Ok(secp256k1::secp256k1_add(vm)),
SECP256K1_DOUBLE => Ok(secp256k1::secp256k1_double(vm)),
SECP256K1_DECOMPRESS => Ok(secp256k1::secp256k1_decompress(vm)),
SECP256K1_SCALAR_INVERT => Ok(secp256k1::secp256k1_invert(vm)),
SHA_EXTEND => Ok(sha256::extend(vm)),
BN254_ADD => Ok(bn254::bn254_add(vm)),
BN254_DOUBLE => Ok(bn254::bn254_double(vm)),
BN254_FP_ADD => Ok(bn254::bn254_fp_add(vm)),
BN254_FP_MUL => Ok(bn254::bn254_fp_mul(vm)),
BN254_FP2_ADD => Ok(bn254::bn254_fp2_add(vm)),
BN254_FP2_MUL => Ok(bn254::bn254_fp2_mul(vm)),
UINT256_MUL => Ok(uint256::uint256_mul(vm)),
// phantom syscall
PHANTOM_LOG_PC_CYCLE => Ok(phantom::log_pc_cycle(vm)),
// TODO: introduce error types.
_ => Err(anyhow::anyhow!("Unknown syscall: {}", function_code)),
}
}
/// A syscall event, available to the circuit witness generators.
/// TODO: separate mem_ops into two stages: reads-and-writes
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct SyscallWitness {
pub mem_ops: Vec<WriteOp>,
pub reg_ops: Vec<WriteOp>,
_marker: (),
}
impl SyscallWitness {
fn new(mem_ops: Vec<WriteOp>, reg_ops: Vec<WriteOp>) -> SyscallWitness {
SyscallWitness {
mem_ops,
reg_ops,
_marker: (),
}
}
}
/// The effects of a syscall to apply on the VM.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct SyscallEffects {
/// The witness being built. Get it with `finalize`.
witness: SyscallWitness,
/// The next PC after the syscall. Defaults to the next instruction.
pub next_pc: Option<u32>,
}
impl SyscallEffects {
/// Iterate over the register values after the syscall.
pub fn iter_reg_values(&self) -> impl Iterator<Item = (RegIdx, Word)> + '_ {
self.witness
.reg_ops
.iter()
.map(|op| (op.register_index(), op.value.after))
}
/// Iterate over the memory values after the syscall.
pub fn iter_mem_values(&self) -> impl Iterator<Item = (WordAddr, Word)> + '_ {
self.witness
.mem_ops
.iter()
.map(|op| (op.addr, op.value.after))
}
/// Keep track of the cycles of registers and memory accesses.
pub fn finalize<T: Tracer>(mut self, tracer: &mut T) -> SyscallWitness {
for op in &mut self.witness.reg_ops {
op.previous_cycle = tracer.track_access(op.addr, T::SUBCYCLE_RD);
}
for op in &mut self.witness.mem_ops {
op.previous_cycle = tracer.track_access(op.addr, T::SUBCYCLE_MEM);
}
self.witness
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/platform.rs | ceno_emul/src/platform.rs | use core::fmt::{self, Formatter};
use once_cell::sync::Lazy;
use std::{collections::BTreeSet, fmt::Display, ops::Range, sync::Arc};
use crate::addr::{Addr, RegIdx};
/// The Platform struct holds the parameters of the VM.
/// It defines:
/// - the layout of virtual memory,
/// - special addresses, such as the initial PC,
/// - codes of environment calls.
#[derive(Clone, Debug)]
pub struct Platform {
pub rom: Range<Addr>,
pub prog_data: Arc<BTreeSet<Addr>>,
pub public_io: Range<Addr>,
pub stack: Range<Addr>,
pub heap: Range<Addr>,
pub hints: Range<Addr>,
/// If true, ecall instructions are no-op instead of trap. Testing only.
pub unsafe_ecall_nop: bool,
pub is_debug: bool,
}
impl Display for Platform {
fn fmt(&self, f: &mut Formatter) -> fmt::Result {
let prog_data: Option<Range<Addr>> = match (self.prog_data.first(), self.prog_data.last()) {
(Some(first), Some(last)) => Some(*first..*last),
_ => None,
};
write!(
f,
"Platform {{ rom: {:#x}..{:#x}, prog_data: {:#x}..{:#x}, stack: {:#x}..{:#x}, heap: {:#x}..{:#x}, \
public_io: {:#x}..{:#x}, hints: {:#x}..{:#x}, unsafe_ecall_nop: {} }}",
self.rom.start,
self.rom.end,
prog_data
.as_ref()
.map(|prog_data| prog_data.start)
.unwrap_or_default(),
prog_data
.as_ref()
.map(|prog_data| prog_data.end)
.unwrap_or_default(),
self.stack.start,
self.stack.end,
self.heap.start,
self.heap.end,
self.public_io.start,
self.public_io.end,
self.hints.start,
self.hints.end,
self.unsafe_ecall_nop
)
}
}
/// alined with [`memory.x`]
// ┌───────────────────────────── 0x4000_0000 (end of _sheap, or heap)
// │
// │ HEAP (128 MB, grows upward)
// │ 0x3800_0000 .. 0x4000_0000
// │
// ├───────────────────────────── 0x3800_0000 (_sheap, align 0x800_0000)
// │ RAM (128 MB)
// │ 0x3000_0000 .. 0x3800_0000
// ├───────────────────────────── 0x3000_0000 (RAM base / hints end)
// │
// │ HINTS (128 MB)
// │ 0x2800_0000 .. 0x3000_0000
// │
// │───────────────────────────── 0x2800_0000 (hint base / gap end)
// │
// │ [Reserved gap: 128 MB for debug I/O]
// │ 0x2000_0000 .. 0x2800_0000
// │───────────────────────────── 0x2000_0000 (gap / stack end)
// │
// │ STACK (≈128 MB, grows downward)
// │ 0x1800_0000 .. 0x2000_0000
// │
// ├───────────────────────────── 0x1800_0000 (stack base / pubio end)
// │
// │ PUBLIC I/O (128 MB)
// │ 0x1000_0000 .. 0x1800_0000
// │
// ├───────────────────────────── 0x1000_0000 (pubio base / rom end)
// │
// │ ROM / TEXT / RODATA (128 MB)
// │ 0x0800_0000 .. 0x1000_0000
// │
// └───────────────────────────── 0x8000_0000 (rom base)
pub static CENO_PLATFORM: Lazy<Platform> = Lazy::new(|| Platform {
rom: 0x0800_0000..0x1000_0000, // 128 MB
public_io: 0x1000_0000..0x1800_0000, // 128 MB
stack: 0x1800_0000..0x2000_4000, // stack grows downward 128MB, 0x4000 reserved for debug io.
// we make hints start from 0x2800_0000 thus reserve a 128MB gap for debug io
// at the end of stack
hints: 0x2800_0000..0x3000_0000, // 128 MB
// heap grows upward, reserved 128 MB for it
// the beginning of heap address got bss/sbss data
// and the real heap start from 0x3800_0000
heap: 0x3000_0000..0x4000_0000,
unsafe_ecall_nop: false,
prog_data: Arc::new(BTreeSet::new()),
is_debug: false,
});
impl Platform {
// Virtual memory layout.
pub fn is_rom(&self, addr: Addr) -> bool {
self.rom.contains(&addr)
}
pub fn is_prog_data(&self, addr: Addr) -> bool {
self.prog_data.contains(&(addr & !0x3))
}
pub fn is_ram(&self, addr: Addr) -> bool {
self.stack.contains(&addr) || self.heap.contains(&addr) || self.is_prog_data(addr)
}
pub fn is_pub_io(&self, addr: Addr) -> bool {
self.public_io.contains(&addr)
}
pub fn is_hints(&self, addr: Addr) -> bool {
self.hints.contains(&addr)
}
/// Virtual address of a register.
pub const fn register_vma(index: RegIdx) -> Addr {
// Register VMAs are aligned, cannot be confused with indices, and readable in hex.
(index << 8) as Addr
}
/// Register index from a virtual address (unchecked).
pub const fn register_index(vma: Addr) -> RegIdx {
(vma >> 8) as RegIdx
}
// Startup.
pub const fn pc_base(&self) -> Addr {
self.rom.start
}
// Permissions.
pub fn can_read(&self, addr: Addr) -> bool {
self.can_write(addr)
}
pub fn can_write(&self, addr: Addr) -> bool {
self.is_ram(addr) || self.is_pub_io(addr) || self.is_hints(addr)
}
// Environment calls.
/// Register containing the ecall function code. (x5, t0)
pub const fn reg_ecall() -> RegIdx {
5
}
/// Register containing the first function argument. (x10, a0)
pub const fn reg_arg0() -> RegIdx {
10
}
/// Register containing the 2nd function argument. (x11, a1)
pub const fn reg_arg1() -> RegIdx {
11
}
/// The code of ecall HALT.
pub const fn ecall_halt() -> u32 {
0
}
/// The code of success.
pub const fn code_success() -> u32 {
0
}
/// Validate the platform configuration, range shall not overlap.
pub fn validate(&self) -> bool {
let mut ranges = [
&self.rom,
&self.stack,
&self.heap,
&self.public_io,
&self.hints,
];
ranges.sort_by_key(|r| r.start);
for i in 0..ranges.len() - 1 {
if ranges[i].end > ranges[i + 1].start {
return false;
}
}
true
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::{PreflightTracer, VMState, WORD_SIZE};
#[test]
fn test_no_overlap() {
let p = CENO_PLATFORM.clone();
// ROM and RAM do not overlap.
assert!(!p.is_rom(p.heap.start));
assert!(!p.is_rom(p.heap.end - WORD_SIZE as Addr));
assert!(!p.is_ram(p.rom.start));
assert!(!p.is_ram(p.rom.end - WORD_SIZE as Addr));
// Registers do not overlap with ROM or RAM.
for reg in [
Platform::register_vma(0),
Platform::register_vma(VMState::<PreflightTracer>::REG_COUNT - 1),
] {
assert!(!p.is_rom(reg));
assert!(!p.is_ram(reg));
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/host_utils.rs | ceno_emul/src/host_utils.rs | use std::iter::from_fn;
use ceno_rt::INFO_OUT_ADDR as CENO_RT_INFO_OUT_ADDR;
use itertools::Itertools;
use crate::{ByteAddr, EmuContext, Tracer, VMState, Word, WordAddr};
const WORD_SIZE: usize = 4;
const INFO_OUT_ADDR: WordAddr = ByteAddr(CENO_RT_INFO_OUT_ADDR).waddr();
pub fn read_all_messages<T: Tracer>(state: &VMState<T>) -> Vec<Vec<u8>> {
let mut offset: WordAddr = WordAddr::from(0);
from_fn(move || match read_message(state, offset) {
out if out.is_empty() => None,
out => {
offset += out.len().div_ceil(WORD_SIZE) as u32 + 1;
Some(out)
}
})
.collect()
}
pub fn read_all_messages_as_words<T: Tracer>(state: &VMState<T>) -> Vec<Vec<Word>> {
read_all_messages(state)
.iter()
.map(|message| {
assert_eq!(message.len() % WORD_SIZE, 0);
message
.chunks_exact(WORD_SIZE)
.map(|chunk| Word::from_le_bytes(chunk.try_into().unwrap()))
.collect_vec()
})
.collect_vec()
}
fn read_message<T: Tracer>(state: &VMState<T>, offset: WordAddr) -> Vec<u8> {
let out_addr = INFO_OUT_ADDR + offset;
let byte_len = state.peek_memory(out_addr) as usize;
(out_addr + 1_usize..)
.map(|address| state.peek_memory(address))
.flat_map(u32::to_le_bytes)
.take(byte_len)
.collect::<Vec<_>>()
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/test_utils.rs | ceno_emul/src/test_utils.rs | use crate::{
CENO_PLATFORM, InsnKind, Instruction, Platform, Program, StepRecord, VMState, encode_rv32,
encode_rv32u, syscalls::KECCAK_PERMUTE,
};
use anyhow::Result;
pub fn keccak_step() -> (StepRecord, Vec<Instruction>) {
let instructions = vec![
// Call Keccak-f.
load_immediate(Platform::reg_arg0() as u32, CENO_PLATFORM.heap.start),
load_immediate(Platform::reg_ecall() as u32, KECCAK_PERMUTE),
encode_rv32(InsnKind::ECALL, 0, 0, 0, 0),
// Halt.
load_immediate(Platform::reg_ecall() as u32, Platform::ecall_halt()),
encode_rv32(InsnKind::ECALL, 0, 0, 0, 0),
];
let pc = CENO_PLATFORM.pc_base();
let program = Program::new(
pc,
pc,
CENO_PLATFORM.heap.start,
instructions.clone(),
Default::default(),
);
let mut vm = VMState::new(CENO_PLATFORM.clone(), program.into());
let steps = vm.iter_until_halt().collect::<Result<Vec<_>>>().unwrap();
(steps[2].clone(), instructions)
}
const fn load_immediate(rd: u32, imm: u32) -> Instruction {
encode_rv32u(InsnKind::ADDI, 0, 0, rd, imm)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/chunked_vec.rs | ceno_emul/src/chunked_vec.rs | use rayon::iter::{IntoParallelIterator, ParallelIterator};
use std::ops::{Index, IndexMut};
/// A growable vector divided into fixed-size chunks.
///
/// This structure behaves similarly to a `Vec`, but allocates memory
/// in discrete chunks of a fixed size rather than continuously.
/// It is especially useful when the total number of elements is large
/// or not known in advance, as it avoids repeated reallocations.
///
/// Conceptually, it can be seen as a "DENSE" map-like container where accessed
/// keys are comparable indices and values are stored in chunked segments.
///
/// This layout is more cache-friendly when keys are accessed in increasing order.
#[derive(Default, Debug, Clone)]
pub struct ChunkedVec<T> {
chunks: Vec<Vec<T>>,
chunk_size: usize,
len: usize,
}
impl<T: Default + Send> ChunkedVec<T> {
/// create a new ChunkedVec with a given chunk size.
pub fn new(chunk_size: usize) -> Self {
assert!(chunk_size > 0, "chunk_size must be > 0");
Self {
chunks: Vec::new(),
chunk_size,
len: 0,
}
}
/// get the current number of elements.
pub fn len(&self) -> usize {
self.len
}
/// returns true if the vector is empty.
pub fn is_empty(&self) -> bool {
self.len == 0
}
/// access element by index (immutable).
pub fn get(&self, index: usize) -> Option<&T> {
if index >= self.len {
return None;
}
let chunk_idx = index / self.chunk_size;
let within_idx = index % self.chunk_size;
self.chunks.get(chunk_idx)?.get(within_idx)
}
/// access element by index (mutable).
/// get mutable reference to element at index, auto-creating chunks as needed
pub fn get_or_create(&mut self, index: usize) -> &mut T {
let chunk_idx = index / self.chunk_size;
let within_idx = index % self.chunk_size;
// Ensure enough chunks exist
if chunk_idx >= self.chunks.len() {
let to_create = chunk_idx + 1 - self.chunks.len();
// Use rayon to create all missing chunks in parallel
let mut new_chunks: Vec<Vec<T>> = (0..to_create)
.map(|_| {
(0..self.chunk_size)
.into_par_iter()
.map(|_| Default::default())
.collect::<Vec<_>>()
})
.collect();
self.chunks.append(&mut new_chunks);
}
let chunk = &mut self.chunks[chunk_idx];
// Update the overall length
if index >= self.len {
self.len = index + 1;
}
&mut chunk[within_idx]
}
}
impl<T: Default + Send> Index<usize> for ChunkedVec<T> {
type Output = T;
fn index(&self, index: usize) -> &Self::Output {
self.get(index).expect("index out of bounds")
}
}
impl<T: Default + Send> IndexMut<usize> for ChunkedVec<T> {
fn index_mut(&mut self, index: usize) -> &mut Self::Output {
self.get_or_create(index)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/tracer.rs | ceno_emul/src/tracer.rs | use crate::{
CENO_PLATFORM, InsnKind, Instruction, PC_STEP_SIZE, Platform,
addr::{ByteAddr, Cycle, RegIdx, Word, WordAddr},
chunked_vec::ChunkedVec,
dense_addr_space::DenseAddrSpace,
encode_rv32,
syscalls::{SyscallEffects, SyscallWitness},
};
use ceno_rt::WORD_SIZE;
use smallvec::SmallVec;
use std::{collections::BTreeMap, fmt, mem};
/// An instruction and its context in an execution trace. That is concrete values of registers and memory.
///
/// - Each instruction is divided into 4 subcycles with the operations on: rs1, rs2, rd, memory. Each op is assigned a unique `cycle + subcycle`.
///
/// - `cycle = 0` means initialization; that is all the special startup logic we are going to have. The RISC-V program starts at `cycle = 4` and each instruction increments `cycle += 4`.
///
/// - Registers are assigned a VMA (virtual memory address, u32). This way they can be unified with other kinds of memory ops.
///
/// - Any of `rs1 / rs2 / rd` **may be `x0`**. The trace handles this like any register, including the value that was _supposed_ to be stored. The circuits must handle this case: either **store `0` or skip `x0` operations**.
///
/// - Any pair of `rs1 / rs2 / rd` **may be the same**. Then, one op will point to the other op in the same instruction but a different subcycle. The circuits may follow the operations **without special handling** of repeated registers.
#[derive(Clone, Debug, Default, PartialEq, Eq)]
pub struct StepRecord {
cycle: Cycle,
pc: Change<ByteAddr>,
pub heap_maxtouch_addr: Change<ByteAddr>,
pub hint_maxtouch_addr: Change<ByteAddr>,
pub insn: Instruction,
rs1: Option<ReadOp>,
rs2: Option<ReadOp>,
rd: Option<WriteOp>,
memory_op: Option<WriteOp>,
syscall: Option<SyscallWitness>,
}
pub type NextAccessPair = SmallVec<[(WordAddr, Cycle); 1]>;
pub type NextCycleAccess = ChunkedVec<NextAccessPair>;
const ACCESSED_CHUNK_SIZE: usize = 1 << 20;
fn init_mmio_min_max_access(
platform: &Platform,
) -> BTreeMap<WordAddr, (WordAddr, WordAddr, WordAddr, WordAddr)> {
let mut mmio_max_access = BTreeMap::new();
mmio_max_access.insert(
ByteAddr::from(platform.heap.start).waddr(),
(
ByteAddr::from(platform.heap.start).waddr(),
ByteAddr::from(platform.heap.end).waddr(),
ByteAddr::from(platform.heap.end).waddr(),
ByteAddr::from(platform.heap.start).waddr(),
),
);
mmio_max_access.insert(
ByteAddr::from(platform.stack.start).waddr(),
(
ByteAddr::from(platform.stack.start).waddr(),
ByteAddr::from(platform.stack.end).waddr(),
ByteAddr::from(platform.stack.end).waddr(),
ByteAddr::from(platform.stack.start).waddr(),
),
);
mmio_max_access.insert(
ByteAddr::from(platform.hints.start).waddr(),
(
ByteAddr::from(platform.hints.start).waddr(),
ByteAddr::from(platform.hints.end).waddr(),
ByteAddr::from(platform.hints.end).waddr(),
ByteAddr::from(platform.hints.start).waddr(),
),
);
mmio_max_access
}
pub trait Tracer {
type Record;
const SUBCYCLE_RS1: Cycle = 0;
const SUBCYCLE_RS2: Cycle = 1;
const SUBCYCLE_RD: Cycle = 2;
const SUBCYCLE_MEM: Cycle = 3;
const SUBCYCLES_PER_INSN: Cycle = 4;
fn new(platform: &Platform) -> Self;
fn advance(&mut self) -> Self::Record;
fn is_busy_loop(&self, record: &Self::Record) -> bool;
fn store_pc(&mut self, pc: ByteAddr);
fn fetch(&mut self, pc: WordAddr, value: Instruction);
fn track_mmu_maxtouch_before(&mut self);
fn track_mmu_maxtouch_after(&mut self);
fn load_register(&mut self, idx: RegIdx, value: Word);
fn store_register(&mut self, idx: RegIdx, value: Change<Word>);
fn load_memory(&mut self, addr: WordAddr, value: Word);
fn store_memory(&mut self, addr: WordAddr, value: Change<Word>);
fn track_syscall(&mut self, effects: SyscallEffects);
fn track_access(&mut self, addr: WordAddr, subcycle: Cycle) -> Cycle;
fn final_accesses(&self) -> &LatestAccesses;
fn into_next_accesses(self) -> NextCycleAccess
where
Self: Sized;
fn cycle(&self) -> Cycle;
fn executed_insts(&self) -> usize;
fn probe_min_max_address_by_start_addr(
&self,
start_addr: WordAddr,
) -> Option<(WordAddr, WordAddr)>;
}
#[derive(Clone, Debug, PartialEq, Eq, Hash)]
pub struct MemOp<T> {
/// Virtual Memory Address.
/// For registers, get it from `Platform::register_vma(idx)`.
pub addr: WordAddr,
/// The Word read, or the Change<Word> to be written.
pub value: T,
/// The cycle when this memory address was last accessed before this operation.
pub previous_cycle: Cycle,
}
impl<T> MemOp<T> {
pub fn new_register_op(idx: RegIdx, value: T, previous_cycle: Cycle) -> MemOp<T> {
MemOp {
addr: Platform::register_vma(idx).into(),
value,
previous_cycle,
}
}
/// Get the register index of this operation.
pub fn register_index(&self) -> RegIdx {
Platform::register_index(self.addr.into())
}
}
pub type ReadOp = MemOp<Word>;
pub type WriteOp = MemOp<Change<Word>>;
#[derive(Debug)]
pub struct LatestAccesses {
store: DenseAddrSpace<Cycle>,
len: usize,
#[cfg(any(test, debug_assertions))]
touched: Vec<WordAddr>,
}
impl LatestAccesses {
fn new(platform: &Platform) -> Self {
Self {
store: DenseAddrSpace::new(
WordAddr::from(0u32),
ByteAddr::from(platform.heap.end).waddr(),
),
len: 0,
#[cfg(any(test, debug_assertions))]
touched: Vec::new(),
}
}
fn track(&mut self, addr: WordAddr, cycle: Cycle) -> Cycle {
let prev = self
.store
.replace(addr, cycle)
.unwrap_or_else(|| panic!("addr {addr:?} outside tracked address space"));
if prev == Cycle::default() {
self.len += 1;
#[cfg(any(test, debug_assertions))]
{
self.touched.push(addr);
}
}
prev
}
pub fn cycle(&self, addr: WordAddr) -> Cycle {
*self
.store
.get_ref(addr)
.expect("address must lie within tracked range")
}
pub fn len(&self) -> usize {
self.len
}
pub fn is_empty(&self) -> bool {
self.len == 0
}
#[cfg(any(test, debug_assertions))]
pub fn iter(&self) -> LatestAccessIter<'_> {
LatestAccessIter {
accesses: self,
idx: 0,
}
}
#[cfg(any(test, debug_assertions))]
pub fn addresses(&self) -> impl Iterator<Item = &WordAddr> + '_ {
self.touched.iter()
}
#[cfg(not(any(test, debug_assertions)))]
pub fn addresses(&self) -> std::iter::Empty<&WordAddr> {
unimplemented!("no track touched address in release build")
}
}
#[cfg(any(test, debug_assertions))]
pub struct LatestAccessIter<'a> {
accesses: &'a LatestAccesses,
idx: usize,
}
#[cfg(any(test, debug_assertions))]
impl<'a> Iterator for LatestAccessIter<'a> {
type Item = (&'a WordAddr, &'a Cycle);
fn next(&mut self) -> Option<Self::Item> {
let addr = self.accesses.touched.get(self.idx)?;
self.idx += 1;
let cycle = self
.accesses
.store
.get_ref(*addr)
.expect("tracked address must exist");
Some((addr, cycle))
}
}
#[cfg(any(test, debug_assertions))]
impl<'a> IntoIterator for &'a LatestAccesses {
type Item = (&'a WordAddr, &'a Cycle);
type IntoIter = LatestAccessIter<'a>;
fn into_iter(self) -> Self::IntoIter {
self.iter()
}
}
impl StepRecord {
pub fn new_r_instruction(
cycle: Cycle,
pc: ByteAddr,
insn_code: Instruction,
rs1_read: Word,
rs2_read: Word,
rd: Change<Word>,
prev_cycle: Cycle,
) -> StepRecord {
let pc = Change::new(pc, pc + PC_STEP_SIZE);
StepRecord::new_insn(
cycle,
pc,
insn_code,
Some(rs1_read),
Some(rs2_read),
Some(rd),
None,
prev_cycle,
Change::default(),
Change::default(),
)
}
pub fn new_b_instruction(
cycle: Cycle,
pc: Change<ByteAddr>,
insn_code: Instruction,
rs1_read: Word,
rs2_read: Word,
prev_cycle: Cycle,
) -> StepRecord {
StepRecord::new_insn(
cycle,
pc,
insn_code,
Some(rs1_read),
Some(rs2_read),
None,
None,
prev_cycle,
Change::default(),
Change::default(),
)
}
pub fn new_i_instruction(
cycle: Cycle,
pc: Change<ByteAddr>,
insn_code: Instruction,
rs1_read: Word,
rd: Change<Word>,
prev_cycle: Cycle,
) -> StepRecord {
StepRecord::new_insn(
cycle,
pc,
insn_code,
Some(rs1_read),
None,
Some(rd),
None,
prev_cycle,
Change::default(),
Change::default(),
)
}
pub fn new_im_instruction(
cycle: Cycle,
pc: ByteAddr,
insn_code: Instruction,
rs1_read: Word,
rd: Change<Word>,
mem_op: ReadOp,
prev_cycle: Cycle,
) -> StepRecord {
let pc = Change::new(pc, pc + PC_STEP_SIZE);
StepRecord::new_insn(
cycle,
pc,
insn_code,
Some(rs1_read),
None,
Some(rd),
Some(WriteOp {
addr: mem_op.addr,
value: Change {
before: mem_op.value,
after: mem_op.value,
},
previous_cycle: mem_op.previous_cycle,
}),
prev_cycle,
Change::default(),
Change::default(),
)
}
pub fn new_u_instruction(
cycle: Cycle,
pc: ByteAddr,
insn_code: Instruction,
rd: Change<Word>,
prev_cycle: Cycle,
) -> StepRecord {
let pc = Change::new(pc, pc + PC_STEP_SIZE);
StepRecord::new_insn(
cycle,
pc,
insn_code,
None,
None,
Some(rd),
None,
prev_cycle,
Change::default(),
Change::default(),
)
}
pub fn new_j_instruction(
cycle: Cycle,
pc: Change<ByteAddr>,
insn_code: Instruction,
rd: Change<Word>,
prev_cycle: Cycle,
) -> StepRecord {
StepRecord::new_insn(
cycle,
pc,
insn_code,
None,
None,
Some(rd),
None,
prev_cycle,
Change::default(),
Change::default(),
)
}
pub fn new_s_instruction(
cycle: Cycle,
pc: ByteAddr,
insn_code: Instruction,
rs1_read: Word,
rs2_read: Word,
memory_op: WriteOp,
prev_cycle: Cycle,
) -> StepRecord {
let pc = Change::new(pc, pc + PC_STEP_SIZE);
StepRecord::new_insn(
cycle,
pc,
insn_code,
Some(rs1_read),
Some(rs2_read),
None,
Some(memory_op),
prev_cycle,
Change::default(),
Change::default(),
)
}
/// Create a test record for an ECALL instruction that can do anything.
pub fn new_ecall_any(cycle: Cycle, pc: ByteAddr) -> StepRecord {
let value = 1234;
Self::new_insn(
cycle,
Change::new(pc, pc + PC_STEP_SIZE),
encode_rv32(InsnKind::ECALL, 0, 0, 0, 0),
Some(value),
Some(value),
Some(Change::new(value, value)),
Some(WriteOp {
addr: CENO_PLATFORM.heap.start.into(),
value: Change {
before: value,
after: value,
},
previous_cycle: 0,
}),
0,
Change::default(),
Change::default(),
)
}
#[allow(clippy::too_many_arguments)]
fn new_insn(
cycle: Cycle,
pc: Change<ByteAddr>,
insn: Instruction,
rs1_read: Option<Word>,
rs2_read: Option<Word>,
rd: Option<Change<Word>>,
memory_op: Option<WriteOp>,
previous_cycle: Cycle,
heap_maxtouch_addr: Change<ByteAddr>,
hint_maxtouch_addr: Change<ByteAddr>,
) -> StepRecord {
StepRecord {
cycle,
pc,
rs1: rs1_read.map(|rs1| ReadOp {
addr: Platform::register_vma(insn.rs1).into(),
value: rs1,
previous_cycle,
}),
rs2: rs2_read.map(|rs2| ReadOp {
addr: Platform::register_vma(insn.rs2).into(),
value: rs2,
previous_cycle,
}),
rd: rd.map(|rd| WriteOp {
addr: Platform::register_vma(insn.rd_internal() as RegIdx).into(),
value: rd,
previous_cycle,
}),
insn,
memory_op,
syscall: None,
heap_maxtouch_addr,
hint_maxtouch_addr,
}
}
pub fn cycle(&self) -> Cycle {
self.cycle
}
pub fn pc(&self) -> Change<ByteAddr> {
self.pc
}
/// The instruction as a decoded structure.
pub fn insn(&self) -> Instruction {
self.insn
}
pub fn rs1(&self) -> Option<ReadOp> {
self.rs1.clone()
}
pub fn rs2(&self) -> Option<ReadOp> {
self.rs2.clone()
}
pub fn rd(&self) -> Option<WriteOp> {
self.rd.clone()
}
pub fn memory_op(&self) -> Option<WriteOp> {
self.memory_op.clone()
}
#[inline(always)]
pub fn is_busy_loop(&self) -> bool {
self.pc.before == self.pc.after
}
pub fn syscall(&self) -> Option<&SyscallWitness> {
self.syscall.as_ref()
}
}
#[derive(Debug)]
pub struct FullTracer {
record: StepRecord,
// record each section max access address
// (start_addr -> (start_addr, end_addr, min_access_addr, max_access_addr))
mmio_min_max_access: Option<BTreeMap<WordAddr, (WordAddr, WordAddr, WordAddr, WordAddr)>>,
max_heap_addr_access: ByteAddr,
max_hint_addr_access: ByteAddr,
platform: Platform,
// keep track of each address that the cycle when they were last accessed.
latest_accesses: LatestAccesses,
// keep track of each cycle that accessed addresses in the future with respective future cycles.
// format: [current cycle -> Vec<(WordAddr, Cycle)>]
next_accesses: NextCycleAccess,
}
impl FullTracer {
pub const SUBCYCLE_RS1: Cycle = <Self as Tracer>::SUBCYCLE_RS1;
pub const SUBCYCLE_RS2: Cycle = <Self as Tracer>::SUBCYCLE_RS2;
pub const SUBCYCLE_RD: Cycle = <Self as Tracer>::SUBCYCLE_RD;
pub const SUBCYCLE_MEM: Cycle = <Self as Tracer>::SUBCYCLE_MEM;
pub const SUBCYCLES_PER_INSN: Cycle = <Self as Tracer>::SUBCYCLES_PER_INSN;
pub fn new(platform: &Platform) -> FullTracer {
let mmio_max_access = init_mmio_min_max_access(platform);
FullTracer {
mmio_min_max_access: Some(mmio_max_access),
record: StepRecord {
cycle: Self::SUBCYCLES_PER_INSN,
..StepRecord::default()
},
platform: platform.clone(),
latest_accesses: LatestAccesses::new(platform),
next_accesses: NextCycleAccess::new(ACCESSED_CHUNK_SIZE),
max_heap_addr_access: ByteAddr::from(platform.heap.start),
max_hint_addr_access: ByteAddr::from(platform.hints.start),
}
}
/// Return the completed step and advance to the next cycle.
#[inline(always)]
pub fn advance(&mut self) -> StepRecord {
let next_cycle = self.record.cycle + Self::SUBCYCLES_PER_INSN;
mem::replace(
&mut self.record,
StepRecord {
cycle: next_cycle,
..StepRecord::default()
},
)
}
#[inline(always)]
pub fn store_pc(&mut self, pc: ByteAddr) {
self.record.pc.after = pc;
}
#[inline(always)]
pub fn fetch(&mut self, pc: WordAddr, value: Instruction) {
self.record.pc.before = pc.baddr();
self.record.insn = value;
}
#[inline(always)]
pub fn track_mmu_maxtouch_before(&mut self) {
self.record.heap_maxtouch_addr.before = self.max_heap_addr_access;
self.record.hint_maxtouch_addr.before = self.max_hint_addr_access;
}
#[inline(always)]
pub fn track_mmu_maxtouch_after(&mut self) {
self.record.heap_maxtouch_addr.after = self.max_heap_addr_access;
self.record.hint_maxtouch_addr.after = self.max_hint_addr_access;
}
#[inline(always)]
pub fn load_register(&mut self, idx: RegIdx, value: Word) {
let addr = Platform::register_vma(idx).into();
match (&self.record.rs1, &self.record.rs2) {
(None, None) => {
self.record.rs1 = Some(ReadOp {
addr,
value,
previous_cycle: self.track_access(addr, Self::SUBCYCLE_RS1),
});
}
(Some(_), None) => {
self.record.rs2 = Some(ReadOp {
addr,
value,
previous_cycle: self.track_access(addr, Self::SUBCYCLE_RS2),
});
}
_ => unimplemented!("Only two register reads are supported"),
}
}
#[inline(always)]
pub fn store_register(&mut self, idx: RegIdx, value: Change<Word>) {
if self.record.rd.is_some() {
unimplemented!("Only one register write is supported");
}
let addr = Platform::register_vma(idx).into();
self.record.rd = Some(WriteOp {
addr,
value,
previous_cycle: self.track_access(addr, Self::SUBCYCLE_RD),
});
}
#[inline(always)]
pub fn load_memory(&mut self, addr: WordAddr, value: Word) {
self.store_memory(addr, Change::new(value, value));
}
#[inline(always)]
pub fn store_memory(&mut self, addr: WordAddr, value: Change<Word>) {
if self.record.memory_op.is_some() {
unimplemented!("Only one memory access is supported");
}
// update min/max mmio access
if let Some((start_addr, (_, end_addr, min_addr, max_addr))) = self
.mmio_min_max_access
.as_mut()
// find the MMIO region whose start address is less than or equal to the target address
.and_then(|mmio_max_access| mmio_max_access.range_mut(..=addr).next_back())
{
// skip if the target address is not within the range tracked by this MMIO region
// this condition ensures the address is within the MMIO region's end address
if addr < *end_addr {
// expand the max bound if the address exceeds the current max
if addr >= *max_addr {
*max_addr = addr + WordAddr::from(WORD_SIZE as u32); // end is exclusive
}
// shrink the min bound if the address is below the current min
if addr < *min_addr {
*min_addr = addr; // start is inclusive
}
if start_addr.baddr().0 == self.platform.heap.start {
let access_end = addr + WordAddr::from(WORD_SIZE as u32);
let access_end_baddr = access_end.baddr();
if access_end_baddr > self.max_heap_addr_access {
self.max_heap_addr_access = access_end_baddr;
}
} else if start_addr.baddr().0 == self.platform.hints.start {
let access_end = addr + WordAddr::from(WORD_SIZE as u32);
let access_end_baddr = access_end.baddr();
if access_end_baddr > self.max_hint_addr_access {
self.max_hint_addr_access = access_end_baddr;
}
}
}
}
self.record.memory_op = Some(WriteOp {
addr,
value,
previous_cycle: self.track_access(addr, Self::SUBCYCLE_MEM),
});
}
#[inline(always)]
pub fn track_syscall(&mut self, effects: SyscallEffects) {
let witness = effects.finalize(self);
assert!(self.record.syscall.is_none(), "Only one syscall per step");
self.record.syscall = Some(witness);
}
/// - Return the cycle when an address was last accessed.
/// - Return 0 if this is the first access.
/// - Record the current instruction as the origin of the latest access.
/// - Accesses within the same instruction are distinguished by `subcycle ∈ [0, 3]`.
#[inline(always)]
pub fn track_access(&mut self, addr: WordAddr, subcycle: Cycle) -> Cycle {
let cur_cycle = self.record.cycle + subcycle;
let prev_cycle = self.latest_accesses.track(addr, cur_cycle);
self.next_accesses
.get_or_create(prev_cycle as usize)
.push((addr, cur_cycle));
prev_cycle
}
pub fn final_accesses(&self) -> &LatestAccesses {
&self.latest_accesses
}
pub fn next_accesses(self) -> NextCycleAccess {
self.next_accesses
}
/// Return the cycle of the pending instruction (after the last completed step).
pub fn cycle(&self) -> Cycle {
self.record.cycle
}
/// Return the number of instruction executed til this moment
/// minus 1 since cycle start from Self::SUBCYCLES_PER_INSN
pub fn executed_insts(&self) -> usize {
(self.record.cycle / Self::SUBCYCLES_PER_INSN)
.saturating_sub(1)
.try_into()
.unwrap()
}
/// giving a start address, return (min, max) accessed address within section
pub fn probe_min_max_address_by_start_addr(
&self,
start_addr: WordAddr,
) -> Option<(WordAddr, WordAddr)> {
self.mmio_min_max_access
.as_ref()
.and_then(|mmio_max_access| {
mmio_max_access.range(..=start_addr).next_back().and_then(
|(_, &(expected_start_addr, _, min, max))| {
assert_eq!(
start_addr, expected_start_addr,
"please use section start for searching"
);
if start_addr == expected_start_addr && min < max {
Some((min, max))
} else {
None
}
},
)
})
}
}
#[derive(Debug)]
pub struct PreflightTracer {
cycle: Cycle,
pc: Change<ByteAddr>,
mmio_min_max_access: Option<BTreeMap<WordAddr, (WordAddr, WordAddr, WordAddr, WordAddr)>>,
latest_accesses: LatestAccesses,
next_accesses: NextCycleAccess,
register_reads_tracked: u8,
}
impl PreflightTracer {
pub const SUBCYCLE_RS1: Cycle = <Self as Tracer>::SUBCYCLE_RS1;
pub const SUBCYCLE_RS2: Cycle = <Self as Tracer>::SUBCYCLE_RS2;
pub const SUBCYCLE_RD: Cycle = <Self as Tracer>::SUBCYCLE_RD;
pub const SUBCYCLE_MEM: Cycle = <Self as Tracer>::SUBCYCLE_MEM;
pub const SUBCYCLES_PER_INSN: Cycle = <Self as Tracer>::SUBCYCLES_PER_INSN;
pub fn new(platform: &Platform) -> Self {
let mut tracer = PreflightTracer {
cycle: <Self as Tracer>::SUBCYCLES_PER_INSN,
pc: Default::default(),
mmio_min_max_access: Some(init_mmio_min_max_access(platform)),
latest_accesses: LatestAccesses::new(platform),
next_accesses: NextCycleAccess::new(ACCESSED_CHUNK_SIZE),
register_reads_tracked: 0,
};
tracer.reset_register_tracking();
tracer
}
#[inline(always)]
fn update_mmio_bounds(&mut self, addr: WordAddr) {
if let Some((_, (_, end_addr, min_addr, max_addr))) = self
.mmio_min_max_access
.as_mut()
.and_then(|mmio_max_access| mmio_max_access.range_mut(..=addr).next_back())
&& addr < *end_addr
{
// skip if the target address is not within the range tracked by this MMIO region
// this condition ensures the address is within the MMIO region's end address
if addr >= *max_addr {
*max_addr = addr + WordAddr::from(WORD_SIZE as u32);
}
if addr < *min_addr {
*min_addr = addr;
}
}
}
#[inline(always)]
fn reset_register_tracking(&mut self) {
self.register_reads_tracked = 0;
}
}
impl Tracer for PreflightTracer {
type Record = ();
fn new(platform: &Platform) -> Self {
PreflightTracer::new(platform)
}
#[inline(always)]
fn advance(&mut self) -> Self::Record {
self.cycle += Self::SUBCYCLES_PER_INSN;
self.reset_register_tracking();
}
fn is_busy_loop(&self, _: &Self::Record) -> bool {
self.pc.before == self.pc.after
}
#[inline(always)]
fn store_pc(&mut self, pc: ByteAddr) {
self.pc.after = pc;
}
#[inline(always)]
fn fetch(&mut self, pc: WordAddr, _value: Instruction) {
self.pc.before = pc.baddr();
}
#[inline(always)]
fn track_mmu_maxtouch_before(&mut self) {}
#[inline(always)]
fn track_mmu_maxtouch_after(&mut self) {}
#[inline(always)]
fn load_register(&mut self, idx: RegIdx, _value: Word) {
let addr = Platform::register_vma(idx).into();
let subcycle = match self.register_reads_tracked {
0 => Self::SUBCYCLE_RS1,
1 => Self::SUBCYCLE_RS2,
_ => unimplemented!("Only two register reads are supported"),
};
self.register_reads_tracked += 1;
self.track_access(addr, subcycle);
}
#[inline(always)]
fn store_register(&mut self, idx: RegIdx, _value: Change<Word>) {
let addr = Platform::register_vma(idx).into();
self.track_access(addr, Self::SUBCYCLE_RD);
}
#[inline(always)]
fn load_memory(&mut self, addr: WordAddr, value: Word) {
self.store_memory(addr, Change::new(value, value));
}
#[inline(always)]
fn store_memory(&mut self, addr: WordAddr, _value: Change<Word>) {
self.update_mmio_bounds(addr);
self.track_access(addr, Self::SUBCYCLE_MEM);
}
#[inline(always)]
fn track_syscall(&mut self, effects: SyscallEffects) {
let _ = effects.finalize(self);
}
#[inline(always)]
fn track_access(&mut self, addr: WordAddr, subcycle: Cycle) -> Cycle {
let cur_cycle = self.cycle + subcycle;
let prev_cycle = self.latest_accesses.track(addr, cur_cycle);
self.next_accesses
.get_or_create(prev_cycle as usize)
.push((addr, cur_cycle));
prev_cycle
}
fn final_accesses(&self) -> &LatestAccesses {
&self.latest_accesses
}
fn into_next_accesses(self) -> NextCycleAccess {
self.next_accesses
}
fn cycle(&self) -> Cycle {
self.cycle
}
fn executed_insts(&self) -> usize {
(self.cycle / Self::SUBCYCLES_PER_INSN)
.saturating_sub(1)
.try_into()
.unwrap()
}
fn probe_min_max_address_by_start_addr(
&self,
start_addr: WordAddr,
) -> Option<(WordAddr, WordAddr)> {
self.mmio_min_max_access
.as_ref()
.and_then(|mmio_max_access| {
mmio_max_access.range(..=start_addr).next_back().and_then(
|(_, &(expected_start_addr, _, min, max))| {
assert_eq!(
start_addr, expected_start_addr,
"please use section start for searching"
);
if start_addr == expected_start_addr && min < max {
Some((min, max))
} else {
None
}
},
)
})
}
}
impl Tracer for FullTracer {
type Record = StepRecord;
fn new(platform: &Platform) -> Self {
FullTracer::new(platform)
}
#[inline(always)]
fn advance(&mut self) -> Self::Record {
FullTracer::advance(self)
}
#[inline(always)]
fn is_busy_loop(&self, record: &Self::Record) -> bool {
record.is_busy_loop()
}
#[inline(always)]
fn store_pc(&mut self, pc: ByteAddr) {
FullTracer::store_pc(self, pc)
}
#[inline(always)]
fn fetch(&mut self, pc: WordAddr, value: Instruction) {
FullTracer::fetch(self, pc, value)
}
fn track_mmu_maxtouch_before(&mut self) {
FullTracer::track_mmu_maxtouch_before(self)
}
fn track_mmu_maxtouch_after(&mut self) {
FullTracer::track_mmu_maxtouch_after(self)
}
#[inline(always)]
fn load_register(&mut self, idx: RegIdx, value: Word) {
FullTracer::load_register(self, idx, value)
}
#[inline(always)]
fn store_register(&mut self, idx: RegIdx, value: Change<Word>) {
FullTracer::store_register(self, idx, value)
}
#[inline(always)]
fn load_memory(&mut self, addr: WordAddr, value: Word) {
FullTracer::load_memory(self, addr, value)
}
#[inline(always)]
fn store_memory(&mut self, addr: WordAddr, value: Change<Word>) {
FullTracer::store_memory(self, addr, value)
}
#[inline(always)]
fn track_syscall(&mut self, effects: SyscallEffects) {
FullTracer::track_syscall(self, effects)
}
#[inline(always)]
fn track_access(&mut self, addr: WordAddr, subcycle: Cycle) -> Cycle {
FullTracer::track_access(self, addr, subcycle)
}
fn final_accesses(&self) -> &LatestAccesses {
FullTracer::final_accesses(self)
}
fn into_next_accesses(self) -> NextCycleAccess {
self.next_accesses()
}
fn cycle(&self) -> Cycle {
FullTracer::cycle(self)
}
fn executed_insts(&self) -> usize {
FullTracer::executed_insts(self)
}
fn probe_min_max_address_by_start_addr(
&self,
start_addr: WordAddr,
) -> Option<(WordAddr, WordAddr)> {
FullTracer::probe_min_max_address_by_start_addr(self, start_addr)
}
}
#[derive(Copy, Clone, Default, PartialEq, Eq)]
pub struct Change<T> {
pub before: T,
pub after: T,
}
impl<T> Change<T> {
pub fn new(before: T, after: T) -> Change<T> {
Change { before, after }
}
}
impl<T: fmt::Debug> fmt::Debug for Change<T> {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "{:?} -> {:?}", self.before, self.after)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/utils.rs | ceno_emul/src/utils.rs | use itertools::{Itertools, izip};
use crate::{Change, EmuContext, Tracer, VMState, WORD_SIZE, Word, WordAddr, WriteOp};
/// Utilities for reading/manipulating a memory segment of fixed length
pub struct MemoryView<'a, T: Tracer, const LENGTH: usize> {
vm: &'a VMState<T>,
start: WordAddr,
writes: Option<[Word; LENGTH]>,
}
impl<'a, T: Tracer, const LENGTH: usize> MemoryView<'a, T, LENGTH> {
/// Creates a new memory segment view
/// Asserts that `start` is a multiple of `WORD_SIZE`
pub fn new(vm: &'a VMState<T>, start: u32) -> Self {
assert!(start.is_multiple_of(WORD_SIZE as u32));
// TODO: do we need stricter alignment requirements for keccak (u64 array)
MemoryView {
vm,
start: WordAddr::from(start),
writes: None,
}
}
pub fn iter_addrs(&self) -> impl Iterator<Item = WordAddr> {
(self.start..).take(LENGTH)
}
pub fn addrs(&self) -> [WordAddr; LENGTH] {
self.iter_addrs().collect_vec().try_into().unwrap()
}
pub fn iter_words(&self) -> impl Iterator<Item = Word> + '_ {
self.iter_addrs().map(|addr| self.vm.peek_memory(addr))
}
pub fn words(&self) -> [Word; LENGTH] {
self.iter_words().collect_vec().try_into().unwrap()
}
pub fn iter_bytes(&self) -> impl Iterator<Item = u8> + '_ {
self.iter_words().flat_map(|word| word.to_le_bytes())
}
pub fn bytes(&self) -> Vec<u8> {
self.iter_bytes().collect_vec()
}
pub fn write(&mut self, writes: [Word; LENGTH]) {
assert!(self.writes.is_none(), "view can only be written once");
self.writes = Some(writes);
}
pub fn mem_ops(&self) -> [WriteOp; LENGTH] {
izip!(
self.addrs(),
self.words(),
self.writes.unwrap_or(self.words())
)
.map(|(addr, before, after)| WriteOp {
addr,
value: Change { before, after },
previous_cycle: 0, // Cycle set later in finalize().
})
.collect_vec()
.try_into()
.unwrap()
}
pub fn debug(&self) {
dbg!(self.start, LENGTH);
dbg!(self.addrs());
dbg!(self.words());
dbg!(self.bytes());
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/addr.rs | ceno_emul/src/addr.rs | // Based on: https://github.com/risc0/risc0/blob/aeea62f0c8f4223abfba17d4c78cb7e15c513de2/risc0/circuit/rv32im/src/prove/emu/addr.rs
//
// Copyright 2024 RISC Zero, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Serialize};
use std::{
fmt,
iter::Step,
ops::{self, Range},
};
pub const WORD_SIZE: usize = 4;
pub const PC_WORD_SIZE: usize = 4;
pub const PC_STEP_SIZE: usize = 4;
// Type aliases to clarify the code without wrapper types.
pub type Word = u32;
pub type SWord = i32;
pub type Addr = u32;
pub type Cycle = u64;
pub type RegIdx = usize;
#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub struct ByteAddr(pub u32);
#[derive(Clone, Copy, Default, PartialEq, Eq, PartialOrd, Ord, Hash)]
pub struct WordAddr(pub u32);
impl From<ByteAddr> for WordAddr {
fn from(addr: ByteAddr) -> Self {
addr.waddr()
}
}
impl From<WordAddr> for ByteAddr {
fn from(addr: WordAddr) -> Self {
addr.baddr()
}
}
impl From<u32> for ByteAddr {
fn from(addr: u32) -> ByteAddr {
ByteAddr(addr)
}
}
impl From<u32> for WordAddr {
fn from(addr: u32) -> WordAddr {
ByteAddr(addr).waddr()
}
}
impl From<ByteAddr> for u32 {
fn from(addr: ByteAddr) -> Self {
addr.0
}
}
impl From<WordAddr> for u32 {
fn from(addr: WordAddr) -> Self {
addr.baddr().0
}
}
impl From<WordAddr> for u64 {
fn from(addr: WordAddr) -> Self {
addr.baddr().0 as u64
}
}
impl ByteAddr {
pub const fn waddr(self) -> WordAddr {
WordAddr(self.0 / WORD_SIZE as u32)
}
pub const fn shift(self) -> u32 {
self.0 & 0x03
}
pub const fn is_aligned(&self) -> bool {
self.0.is_multiple_of(WORD_SIZE as u32)
}
pub const fn is_null(&self) -> bool {
self.0 == 0
}
pub fn wrapping_add(self, rhs: u32) -> Self {
Self(self.0.wrapping_add(rhs))
}
}
impl WordAddr {
pub const fn baddr(self) -> ByteAddr {
ByteAddr(self.0 * WORD_SIZE as u32)
}
}
impl Step for WordAddr {
fn steps_between(start: &Self, end: &Self) -> (usize, Option<usize>) {
u32::steps_between(&start.0, &end.0)
}
fn forward_checked(start: Self, count: usize) -> Option<Self> {
u32::forward_checked(start.0, count).map(Self)
}
fn backward_checked(start: Self, count: usize) -> Option<Self> {
u32::backward_checked(start.0, count).map(Self)
}
}
impl fmt::Debug for ByteAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x{:08x}", self.0)
}
}
impl fmt::Debug for WordAddr {
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
write!(f, "0x{:08x}", self.baddr().0)
}
}
impl ops::Add for WordAddr {
type Output = WordAddr;
fn add(self, rhs: Self) -> Self::Output {
Self(self.0 + rhs.0)
}
}
impl ops::Add<usize> for WordAddr {
type Output = WordAddr;
fn add(self, rhs: usize) -> Self::Output {
Self(self.0 + rhs as u32)
}
}
impl ops::Add<u32> for WordAddr {
type Output = WordAddr;
fn add(self, rhs: u32) -> Self::Output {
Self(self.0 + rhs)
}
}
impl ops::AddAssign<usize> for WordAddr {
fn add_assign(&mut self, rhs: usize) {
self.0 += rhs as u32;
}
}
impl ops::AddAssign<u32> for WordAddr {
fn add_assign(&mut self, rhs: u32) {
self.0 += rhs;
}
}
impl ops::Add for ByteAddr {
type Output = ByteAddr;
fn add(self, rhs: Self) -> Self::Output {
Self(self.0 + rhs.0)
}
}
impl ops::Add<usize> for ByteAddr {
type Output = ByteAddr;
fn add(self, rhs: usize) -> Self::Output {
Self(self.0 + rhs as u32)
}
}
impl ops::Add<u32> for ByteAddr {
type Output = ByteAddr;
fn add(self, rhs: u32) -> Self::Output {
Self(self.0 + rhs)
}
}
impl ops::AddAssign for ByteAddr {
fn add_assign(&mut self, rhs: Self) {
self.0 += rhs.0
}
}
impl ops::AddAssign<usize> for ByteAddr {
fn add_assign(&mut self, rhs: usize) {
self.0 += rhs as u32;
}
}
impl ops::AddAssign<u32> for ByteAddr {
fn add_assign(&mut self, rhs: u32) {
self.0 += rhs;
}
}
pub trait IterAddresses {
fn iter_addresses(&self) -> impl ExactSizeIterator<Item = Addr>;
}
impl IterAddresses for Range<Addr> {
fn iter_addresses(&self) -> impl ExactSizeIterator<Item = Addr> {
self.clone().step_by(WORD_SIZE)
}
}
impl<T: GetAddr> IterAddresses for &[T] {
fn iter_addresses(&self) -> impl ExactSizeIterator<Item = Addr> {
self.iter().map(T::get_addr)
}
}
pub trait GetAddr {
fn get_addr(&self) -> Addr;
}
impl GetAddr for Addr {
fn get_addr(&self) -> Addr {
*self
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/elf.rs | ceno_emul/src/elf.rs | // Based on: https://github.com/risc0/risc0/blob/6b6daeafa1545984aa28581fca56d9ef13dcbae6/risc0/binfmt/src/elf.rs
//
// Copyright 2024 RISC Zero, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
extern crate alloc;
use std::iter::successors;
use alloc::collections::BTreeMap;
use itertools::Itertools;
use crate::{CENO_PLATFORM, addr::WORD_SIZE, disassemble::transpile, rv32im::Instruction};
use anyhow::{Context, Result, anyhow, bail};
use elf::{
ElfBytes,
abi::{PF_R, PF_W, PF_X},
endian::LittleEndian,
file::Class,
};
/// A RISC Zero program
#[derive(Clone, Debug)]
pub struct Program {
/// The entrypoint of the program
pub entry: u32,
/// This is the lowest address of the program's executable code
pub base_address: u32,
/// This is the heap start address, match with _sheap retrieve from elf
pub sheap: u32,
/// The instructions of the program
pub instructions: Vec<Instruction>,
/// The initial memory image
pub image: BTreeMap<u32, u32>,
}
impl From<&[Instruction]> for Program {
fn from(insn_codes: &[Instruction]) -> Program {
Self {
entry: CENO_PLATFORM.pc_base(),
base_address: CENO_PLATFORM.pc_base(),
sheap: CENO_PLATFORM.heap.start,
instructions: insn_codes.to_vec(),
image: Default::default(),
}
}
}
impl Program {
/// Create program
pub fn new(
entry: u32,
base_address: u32,
sheap: u32,
instructions: Vec<Instruction>,
image: BTreeMap<u32, u32>,
) -> Program {
Self {
entry,
base_address,
sheap,
instructions,
image,
}
}
/// Initialize a RISC Zero Program from an appropriate ELF file
pub fn load_elf(input: &[u8], max_mem: u32) -> Result<Program> {
let mut instructions: Vec<u32> = Vec::new();
let mut image: BTreeMap<u32, u32> = BTreeMap::new();
let mut base_address = None;
let elf = ElfBytes::<LittleEndian>::minimal_parse(input)
.map_err(|err| anyhow!("Elf parse error: {err}"))?;
if elf.ehdr.class != Class::ELF32 {
bail!("Not a 32-bit ELF");
}
if elf.ehdr.e_machine != elf::abi::EM_RISCV {
bail!("Invalid machine type, must be RISC-V");
}
if elf.ehdr.e_type != elf::abi::ET_EXEC {
bail!("Invalid ELF type, must be executable");
}
let entry: u32 = elf
.ehdr
.e_entry
.try_into()
.map_err(|err| anyhow!("e_entry was larger than 32 bits. {err}"))?;
if entry >= max_mem || !entry.is_multiple_of(WORD_SIZE as u32) {
bail!("Invalid entrypoint");
}
let segments = elf.segments().ok_or(anyhow!("Missing segment table"))?;
if segments.len() > 256 {
bail!("Too many program headers");
}
let symbols = collect_addr_symbols_mapping(&elf)?;
for (idx, segment) in segments
.iter()
.filter(|x| x.p_type == elf::abi::PT_LOAD)
.enumerate()
{
let file_size: u32 = segment
.p_filesz
.try_into()
.map_err(|err| anyhow!("filesize was larger than 32 bits. {err}"))?;
if file_size >= max_mem {
bail!("Invalid segment file_size");
}
let mem_size: u32 = segment
.p_memsz
.try_into()
.map_err(|err| anyhow!("mem_size was larger than 32 bits {err}"))?;
if mem_size >= max_mem {
bail!("Invalid segment mem_size");
}
let vaddr: u32 = segment
.p_vaddr
.try_into()
.map_err(|err| anyhow!("vaddr is larger than 32 bits. {err}"))?;
let p_flags = segment.p_flags;
if (p_flags & PF_X) != 0 {
if base_address.is_none() {
base_address = Some(vaddr);
} else {
return Err(anyhow!("only support one executable segment"));
}
}
if !vaddr.is_multiple_of(WORD_SIZE as u32) {
bail!("vaddr {vaddr:08x} is unaligned");
}
tracing::debug!(
"ELF segment {idx}: {}{}{} vaddr=0x{vaddr:08x} file_size={file_size} mem_size={mem_size}",
if p_flags & PF_R != 0 { "R" } else { "-" },
if p_flags & PF_W != 0 { "W" } else { "-" },
if p_flags & PF_X != 0 { "X" } else { "-" },
);
let offset: u32 = segment
.p_offset
.try_into()
.map_err(|err| anyhow!("offset is larger than 32 bits. {err}"))?;
// process initialized data
(0..file_size).step_by(WORD_SIZE).try_for_each(|i| {
let addr = vaddr.checked_add(i).context("Invalid segment vaddr")?;
if addr >= max_mem {
bail!("Address [0x{addr:x}] exceeds max [0x{max_mem:x}]");
}
let word = (0..WORD_SIZE as u32)
.take((file_size - i) as usize)
.enumerate()
.fold(0u32, |acc, (j, _)| {
let offset = (offset + i + j as u32) as usize;
let byte = *input.get(offset).unwrap_or(&0);
acc | ((byte as u32) << (j * 8))
});
image.insert(addr, word);
if (segment.p_flags & PF_X) != 0 {
instructions.push(word);
}
Ok(())
})?;
// only pad uninitialized region if a symbol exists in the range
if let Some((max_addr, _)) = find_max_symbol_in_range(
&symbols,
vaddr as u64,
vaddr.checked_add(mem_size).context("Invalid mem_size")? as u64,
) {
let zero_upper = (*max_addr as u32).saturating_sub(vaddr);
(file_size..=zero_upper)
.step_by(WORD_SIZE)
.try_for_each(|i| {
let addr = vaddr.checked_add(i).context("Invalid segment vaddr")?;
if addr >= max_mem {
bail!("zero-fill addr [0x{addr:x}] exceeds max [0x{max_mem:x}]");
}
image.insert(addr, 0);
Ok(())
})?;
}
}
if base_address.is_none() {
return Err(anyhow!("does not have executable segment"));
}
let base_address = base_address.unwrap();
assert!(entry >= base_address);
assert!((entry - base_address) as usize <= instructions.len() * WORD_SIZE);
let instructions = transpile(base_address, &instructions);
// program data include text/rodata/data/bss
// truncate padding 0 section after bss
let mut program_data = image
.into_iter()
.sorted_by_key(|(addr, _)| *addr)
.collect_vec();
// record current max address of bss
// as later when we do static program data padding, it must cover max bss section and assure it's well constrained
let bss_max_addr = program_data.last().cloned();
// padding program_data to next power of 2 from last addr
let padding_size = program_data.len().next_power_of_two() - program_data.len();
if padding_size > 0 {
program_data.extend(
successors(
program_data.last().map(|d| (d.0 + WORD_SIZE as u32, 0)),
|(prev_addr, _)| Some((prev_addr + WORD_SIZE as u32, 0)),
)
.take(padding_size)
.collect_vec(),
);
}
let Some(((padded_max_static_addr, _), (bss_max_addr, _))) =
program_data.last().zip(bss_max_addr)
else {
return Err(anyhow!("invalid size of data"));
};
if *padded_max_static_addr < bss_max_addr {
return Err(anyhow!(
"padded_max_static_addr should larger than bss_max_addr"
));
}
// retrieve _sheap from elf
let sheap = symbols
.iter()
.find(|(_, v)| *v == "_sheap")
.map(|(k, _)| *k)
.ok_or_else(|| anyhow!("unable to find _sheap symbol"))? as u32;
// there should be no
if *padded_max_static_addr >= sheap {
return Err(anyhow!(
"padded_max_static_addr overlap with _sheap heap start address"
));
}
Ok(Program {
entry,
base_address,
sheap,
image: program_data.into_iter().collect::<BTreeMap<u32, u32>>(),
instructions,
})
}
}
fn collect_addr_symbols_mapping<'data>(
elf: &ElfBytes<'data, LittleEndian>,
) -> Result<BTreeMap<u64, String>> {
let mut symbols = BTreeMap::new();
if let Some((symtab, strtab)) = elf.symbol_table()? {
for symbol in symtab.iter() {
if let Ok(name) = strtab.get(symbol.st_name as usize)
&& !name.is_empty()
&& symbol.st_value != 0
{
symbols.insert(symbol.st_value, name.to_string());
}
}
}
Ok(symbols)
}
fn find_max_symbol_in_range(
symbols: &BTreeMap<u64, String>,
start: u64,
end: u64,
) -> Option<(&u64, &String)> {
symbols.range(start..end).max_by_key(|&(addr, _)| addr)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/keccak_permute.rs | ceno_emul/src/syscalls/keccak_permute.rs | use itertools::Itertools;
use tiny_keccak::keccakf;
use crate::{Change, EmuContext, Platform, Tracer, VMState, Word, WriteOp, utils::MemoryView};
use super::{SyscallEffects, SyscallSpec, SyscallWitness};
const KECCAK_CELLS: usize = 25; // u64 cells
pub const KECCAK_WORDS: usize = KECCAK_CELLS * 2; // u32 words
pub struct KeccakSpec;
impl SyscallSpec for KeccakSpec {
const NAME: &'static str = "KECCAK";
const REG_OPS_COUNT: usize = 1;
const MEM_OPS_COUNT: usize = KECCAK_WORDS;
const CODE: u32 = ceno_syscall::KECCAK_PERMUTE;
const HAS_LOOKUPS: bool = true;
}
/// Wrapper type for the keccak_permute argument that implements conversions
/// from and to VM word-representations according to the syscall spec
pub struct KeccakState(pub [u64; KECCAK_CELLS]);
impl From<[Word; KECCAK_WORDS]> for KeccakState {
fn from(words: [Word; KECCAK_WORDS]) -> Self {
KeccakState(
words
.chunks_exact(2)
.map(|chunk| chunk[0] as u64 | ((chunk[1] as u64) << 32))
.collect_vec()
.try_into()
.expect("failed to parse words into [u64; 25]"),
)
}
}
impl From<KeccakState> for [Word; KECCAK_WORDS] {
fn from(state: KeccakState) -> [Word; KECCAK_WORDS] {
state
.0
.iter()
.flat_map(|&elem| [elem as u32, (elem >> 32) as u32])
.collect_vec()
.try_into()
.unwrap()
}
}
/// Trace the execution of a Keccak permutation.
///
/// Compatible with:
/// https://github.com/succinctlabs/sp1/blob/013c24ea2fa15a0e7ed94f7d11a7ada4baa39ab9/crates/core/executor/src/syscalls/precompiles/keccak256/permute.rs
pub fn keccak_permute<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let state_ptr = vm.peek_register(Platform::reg_arg0());
// Read the argument `state_ptr`.
let reg_ops = vec![WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(state_ptr, state_ptr),
0, // Cycle set later in finalize().
)];
let mut state_view = MemoryView::<_, KECCAK_WORDS>::new(vm, state_ptr);
let mut state = KeccakState::from(state_view.words());
keccakf(&mut state.0);
let output_words: [Word; KECCAK_WORDS] = state.into();
state_view.write(output_words);
let mem_ops: Vec<WriteOp> = state_view.mem_ops().to_vec();
assert_eq!(mem_ops.len(), KECCAK_WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/sha256.rs | ceno_emul/src/syscalls/sha256.rs | use crate::{Change, EmuContext, Platform, Tracer, VMState, Word, WriteOp, utils::MemoryView};
use super::{SyscallEffects, SyscallSpec, SyscallWitness};
pub const SHA_EXTEND_WORDS: usize = 64; // u64 cells
pub struct Sha256ExtendSpec;
impl SyscallSpec for Sha256ExtendSpec {
const NAME: &'static str = "SHA256_EXTEND";
const REG_OPS_COUNT: usize = 1;
const MEM_OPS_COUNT: usize = SHA_EXTEND_WORDS;
const CODE: u32 = ceno_syscall::SHA_EXTEND;
}
/// Wrapper type for the sha_extend argument that implements conversions
/// from and to VM word-representations according to the syscall spec
pub struct ShaExtendWords(pub [Word; SHA_EXTEND_WORDS]);
impl From<[Word; SHA_EXTEND_WORDS]> for ShaExtendWords {
fn from(value: [Word; SHA_EXTEND_WORDS]) -> Self {
ShaExtendWords(value)
}
}
impl From<ShaExtendWords> for [Word; SHA_EXTEND_WORDS] {
fn from(state: ShaExtendWords) -> [Word; SHA_EXTEND_WORDS] {
state.0
}
}
/// Based on: https://github.com/succinctlabs/sp1/blob/2aed8fea16a67a5b2983ffc471b2942c2f2512c8/crates/core/machine/src/syscall/precompiles/sha256/extend/mod.rs#L22
pub fn sha_extend(w: &mut [u32]) {
for i in 16..64 {
let s0 = w[i - 15].rotate_right(7) ^ w[i - 15].rotate_right(18) ^ (w[i - 15] >> 3);
let s1 = w[i - 2].rotate_right(17) ^ w[i - 2].rotate_right(19) ^ (w[i - 2] >> 10);
w[i] = w[i - 16]
.wrapping_add(s0)
.wrapping_add(w[i - 7])
.wrapping_add(s1);
}
}
pub fn extend<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let state_ptr = vm.peek_register(Platform::reg_arg0());
// Read the argument `state_ptr`.
let reg_ops = vec![WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(state_ptr, state_ptr),
0, // Cycle set later in finalize().
)];
let mut state_view = MemoryView::<_, SHA_EXTEND_WORDS>::new(vm, state_ptr);
let mut sha_extend_words = ShaExtendWords::from(state_view.words());
sha_extend(&mut sha_extend_words.0);
let output_words: [Word; SHA_EXTEND_WORDS] = sha_extend_words.into();
state_view.write(output_words);
let mem_ops = state_view.mem_ops().to_vec();
assert_eq!(mem_ops.len(), SHA_EXTEND_WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/secp256k1.rs | ceno_emul/src/syscalls/secp256k1.rs | use super::{SyscallEffects, SyscallSpec, SyscallWitness};
use crate::{
Change, EmuContext, Platform, Tracer, VMState, WORD_SIZE, Word, WriteOp, utils::MemoryView,
};
use itertools::Itertools;
use k256::{FieldBytes, elliptic_curve::PrimeField};
use std::iter;
pub struct Secp256k1AddSpec;
pub struct Secp256k1DoubleSpec;
pub struct Secp256k1ScalarInvertSpec;
pub struct Secp256k1DecompressSpec;
impl SyscallSpec for Secp256k1AddSpec {
const NAME: &'static str = "SECP256K1_ADD";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 2 * SECP256K1_ARG_WORDS;
const CODE: u32 = ceno_syscall::SECP256K1_ADD;
}
impl SyscallSpec for Secp256k1DoubleSpec {
const NAME: &'static str = "SECP256K1_DOUBLE";
const REG_OPS_COUNT: usize = 1;
const MEM_OPS_COUNT: usize = SECP256K1_ARG_WORDS;
const CODE: u32 = ceno_syscall::SECP256K1_DOUBLE;
}
impl SyscallSpec for Secp256k1ScalarInvertSpec {
const NAME: &'static str = "SECP256K1_SCALAR_INVERT";
const REG_OPS_COUNT: usize = 1;
const MEM_OPS_COUNT: usize = COORDINATE_WORDS;
const CODE: u32 = ceno_syscall::SECP256K1_SCALAR_INVERT;
}
impl SyscallSpec for Secp256k1DecompressSpec {
const NAME: &'static str = "SECP256K1_DECOMPRESS";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 2 * COORDINATE_WORDS;
const CODE: u32 = ceno_syscall::SECP256K1_DECOMPRESS;
}
// A secp256k1 point in uncompressed form takes 64 bytes
pub const SECP256K1_ARG_WORDS: usize = 16;
/// Wrapper type for a point on the secp256k1 curve that implements conversions
/// from and to VM word-representations according to the syscall spec
pub struct SecpPoint(pub secp::Point);
impl From<[Word; SECP256K1_ARG_WORDS]> for SecpPoint {
fn from(words: [Word; SECP256K1_ARG_WORDS]) -> Self {
// Prepend the "tag" byte as expected by secp
let mut bytes = iter::once(4u8)
.chain(words.iter().flat_map(|word| word.to_le_bytes()))
.collect_vec();
// The call-site uses "little endian", while secp uses "big endian"
// We need to reverse the coordinate representations
// Reverse X coordinate
bytes[1..33].reverse();
// Reverse Y coordinate
bytes[33..].reverse();
SecpPoint(secp::Point::from_slice(&bytes).unwrap())
}
}
impl From<SecpPoint> for [Word; SECP256K1_ARG_WORDS] {
fn from(point: SecpPoint) -> [Word; SECP256K1_ARG_WORDS] {
// reuse MaybePoint implementation
SecpMaybePoint(point.0.into()).into()
}
}
/// Wrapper type for a maybe-point on the secp256k1 curve that implements conversions
/// from and to VM word-representations according to the syscall spec
pub struct SecpMaybePoint(pub secp::MaybePoint);
impl From<SecpMaybePoint> for [Word; SECP256K1_ARG_WORDS] {
fn from(maybe_point: SecpMaybePoint) -> [Word; SECP256K1_ARG_WORDS] {
let mut bytes: [u8; 64] = maybe_point.0.serialize_uncompressed()[1..]
.try_into()
.unwrap();
// The call-site expects "little endian", while secp uses "big endian"
// We need to reverse the coordinate representations
// Reverse X coordinate
bytes[..32].reverse();
// Reverse Y coordinate
bytes[32..].reverse();
bytes
.chunks_exact(4)
.map(|chunk| Word::from_le_bytes(chunk.try_into().unwrap()))
.collect_vec()
.try_into()
.unwrap()
}
}
/// Trace the execution of a secp256k1_add call
pub fn secp256k1_add<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let p_ptr = vm.peek_register(Platform::reg_arg0());
let q_ptr = vm.peek_register(Platform::reg_arg1());
// Read the argument pointers
let reg_ops = vec![
WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(p_ptr, p_ptr),
0, // Cycle set later in finalize().
),
WriteOp::new_register_op(
Platform::reg_arg1(),
Change::new(q_ptr, q_ptr),
0, // Cycle set later in finalize().
),
];
// Memory segments of P and Q
let [mut p_view, q_view] =
[p_ptr, q_ptr].map(|start| MemoryView::<_, SECP256K1_ARG_WORDS>::new(vm, start));
// Read P and Q from words via wrapper type
let [p, q] = [&p_view, &q_view].map(|view| SecpPoint::from(view.words()));
// Compute the sum and convert back to words
let sum = SecpMaybePoint(p.0 + q.0);
let output_words: [Word; SECP256K1_ARG_WORDS] = sum.into();
p_view.write(output_words);
let mem_ops = p_view
.mem_ops()
.into_iter()
.chain(q_view.mem_ops())
.collect_vec();
assert_eq!(mem_ops.len(), 2 * SECP256K1_ARG_WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
/// Trace the execution of a secp256k1_double call
pub fn secp256k1_double<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let p_ptr = vm.peek_register(Platform::reg_arg0());
// Read the argument pointers
let reg_ops = vec![WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(p_ptr, p_ptr),
0, // Cycle set later in finalize().
)];
// P's memory segment
let mut p_view = MemoryView::<_, SECP256K1_ARG_WORDS>::new(vm, p_ptr);
// Create point from words via wrapper type
let p = SecpPoint::from(p_view.words());
// Compute result and convert back into words
let result = SecpPoint(secp::Scalar::two() * p.0);
let output_words: [Word; SECP256K1_ARG_WORDS] = result.into();
p_view.write(output_words);
let mem_ops = p_view.mem_ops().to_vec();
assert_eq!(mem_ops.len(), SECP256K1_ARG_WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
pub fn secp256k1_invert<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let p_ptr = vm.peek_register(Platform::reg_arg0());
// Read the argument pointers
let reg_ops = vec![WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(p_ptr, p_ptr),
0, // Cycle set later in finalize().
)];
// P's memory segment
let mut p_view = MemoryView::<_, COORDINATE_WORDS>::new(vm, p_ptr);
let p = k256::Scalar::from_repr(*FieldBytes::from_slice(&p_view.bytes())).expect("illegal p");
let p_inv = p.invert().unwrap();
let bytes: [u8; 32] = p_inv.to_bytes().into();
let output_words: [Word; COORDINATE_WORDS] = unsafe { std::mem::transmute(bytes) };
p_view.write(output_words);
let mem_ops = p_view.mem_ops().to_vec();
assert_eq!(mem_ops.len(), COORDINATE_WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
pub const COORDINATE_WORDS: usize = SECP256K1_ARG_WORDS / 2;
/// Wrapper type for a single coordinate of a point on the secp256k1 curve.
/// It implements conversions from and to VM word-representations according
/// to the spec of syscall
pub struct SecpCoordinate(pub [u8; COORDINATE_WORDS * WORD_SIZE]);
impl From<[Word; COORDINATE_WORDS]> for SecpCoordinate {
fn from(words: [Word; COORDINATE_WORDS]) -> Self {
let bytes = (words.iter().flat_map(|word| word.to_le_bytes()))
.collect_vec()
.try_into()
.unwrap();
SecpCoordinate(bytes)
}
}
impl From<SecpCoordinate> for [Word; COORDINATE_WORDS] {
fn from(coord: SecpCoordinate) -> [Word; COORDINATE_WORDS] {
coord
.0
.chunks_exact(4)
.map(|chunk| Word::from_le_bytes(chunk.try_into().unwrap()))
.collect_vec()
.try_into()
.unwrap()
}
}
/// Trace the execution of a secp256k1_decompress call
pub fn secp256k1_decompress<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let ptr = vm.peek_register(Platform::reg_arg0());
let y_is_odd = vm.peek_register(Platform::reg_arg1());
// Read the argument pointers
let reg_ops = vec![
WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(ptr, ptr),
0, // Cycle set later in finalize().
),
WriteOp::new_register_op(
Platform::reg_arg1(),
Change::new(y_is_odd, y_is_odd),
0, // Cycle set later in finalize().
),
];
// Memory segment of X coordinate
let input_view = MemoryView::<_, COORDINATE_WORDS>::new(vm, ptr);
// Memory segment where Y coordinate will be written
let mut output_view =
MemoryView::<_, COORDINATE_WORDS>::new(vm, ptr + (COORDINATE_WORDS * WORD_SIZE) as u32);
let point = {
// Encode parity byte according to secp spec
let parity_byte = match y_is_odd {
0 => 2,
1 => 3,
_ => panic!("y_is_odd should be 0/1"),
};
// Read bytes of the X coordinate
let coordinate_bytes = SecpCoordinate::from(input_view.words()).0;
// Prepend parity byte to complete compressed repr.
let bytes = iter::once(parity_byte)
.chain(coordinate_bytes.iter().cloned())
.collect::<Vec<u8>>();
secp::Point::from_slice(&bytes).unwrap()
};
// Get uncompressed repr. of the point and extract the Y-coordinate bytes
// Y-coordinate is the second half after eliminating the "tag" byte
let y_bytes: [u8; 32] = point.serialize_uncompressed()[1..][32..]
.try_into()
.unwrap();
// Convert into words via the internal wrapper type
let output_words: [Word; COORDINATE_WORDS] = SecpCoordinate(y_bytes).into();
output_view.write(output_words);
let y_mem_ops = output_view.mem_ops();
let x_mem_ops = input_view.mem_ops();
let mem_ops = x_mem_ops.into_iter().chain(y_mem_ops).collect_vec();
assert_eq!(mem_ops.len(), 2 * COORDINATE_WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/uint256.rs | ceno_emul/src/syscalls/uint256.rs | use crate::{
Change, EmuContext, Platform, SyscallSpec, Tracer, VMState, WriteOp,
syscalls::{SyscallEffects, SyscallWitness},
utils::MemoryView,
};
use itertools::Itertools;
use num::{BigUint, One, Zero};
use sp1_curves::{
params::NumWords,
uint256::U256Field,
utils::{biguint_from_le_words, biguint_to_words},
};
use typenum::marker_traits::Unsigned;
type WordsFieldElement = <U256Field as NumWords>::WordsFieldElement;
pub const UINT256_WORDS_FIELD_ELEMENT: usize = WordsFieldElement::USIZE;
pub struct Uint256MulSpec;
impl SyscallSpec for Uint256MulSpec {
const NAME: &'static str = "UINT256_MUL";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 3 * UINT256_WORDS_FIELD_ELEMENT; // x, y, modulus
const CODE: u32 = ceno_syscall::UINT256_MUL;
}
pub fn uint256_mul<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let x_ptr = vm.peek_register(Platform::reg_arg0());
let y_ptr = vm.peek_register(Platform::reg_arg1());
// Read the argument pointers
let reg_ops = vec![
WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(x_ptr, x_ptr),
0, // Cycle set later in finalize().
),
WriteOp::new_register_op(
Platform::reg_arg1(),
Change::new(y_ptr, y_ptr),
0, // Cycle set later in finalize().
),
];
// Memory segments of x, y, and modulus
let mut x_view = MemoryView::<_, UINT256_WORDS_FIELD_ELEMENT>::new(vm, x_ptr);
let y_and_modulus_view = MemoryView::<_, { UINT256_WORDS_FIELD_ELEMENT * 2 }>::new(vm, y_ptr);
// Read x, y, and modulus from words via wrapper type
let x = biguint_from_le_words(&x_view.words());
let y = biguint_from_le_words(&y_and_modulus_view.words()[..UINT256_WORDS_FIELD_ELEMENT]);
let modulus = biguint_from_le_words(&y_and_modulus_view.words()[UINT256_WORDS_FIELD_ELEMENT..]);
// Perform the multiplication and take the result modulo the modulus.
let result: BigUint = if modulus.is_zero() {
let modulus = BigUint::one() << 256;
(x * y) % modulus
} else {
(x * y) % modulus
};
// Convert the result to little endian u32 words.
let result_words = biguint_to_words(&result, UINT256_WORDS_FIELD_ELEMENT)
.try_into()
.unwrap();
x_view.write(result_words);
let mem_ops = x_view
.mem_ops()
.into_iter()
.chain(y_and_modulus_view.mem_ops())
.collect_vec();
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/phantom/mod.rs | ceno_emul/src/syscalls/phantom/mod.rs | use crate::{
Change, EmuContext, Platform, SyscallSpec, Tracer, VMState, WordAddr, WriteOp,
syscalls::{SyscallEffects, SyscallWitness},
};
use itertools::Itertools;
pub struct LogPcCycleSpec;
impl SyscallSpec for LogPcCycleSpec {
const NAME: &'static str = "LOG_PC_CYCLE";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 0;
const CODE: u32 = ceno_syscall::PHANTOM_LOG_PC_CYCLE;
}
pub fn log_pc_cycle<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let lable_ptr = vm.peek_register(Platform::reg_arg0());
let lable_len = vm.peek_register(Platform::reg_arg1());
// Read the argument `state_ptr`.
let reg_ops = vec![
WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(lable_ptr, lable_ptr),
0, // Cycle set later in finalize().
),
WriteOp::new_register_op(
Platform::reg_arg1(),
Change::new(lable_len, lable_len),
0, // Cycle set later in finalize().
),
];
let start = lable_ptr;
let raw_string_u8: Vec<u8> = (start..start + lable_len)
.map(|addr| {
let byte_offset = addr % 4;
let word = vm.peek_memory(WordAddr::from(addr));
((word >> (byte_offset * 8)) & 0xFF) as u8
})
.collect_vec();
tracing::debug!(
"PHANTOM_SYSCALL_LOG_PC_CYCLE: label={},pc={:x},cycle={}",
String::from_utf8_lossy(&raw_string_u8),
vm.get_pc().0,
vm.tracer().cycle()
);
SyscallEffects {
witness: SyscallWitness::new(vec![], reg_ops),
next_pc: None,
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/bn254/types.rs | ceno_emul/src/syscalls/bn254/types.rs | use itertools::Itertools;
use substrate_bn::{AffineG1, Fq, Fq2, Fr, G1};
use crate::Word;
pub const BN254_FP_WORDS: usize = 8;
pub const BN254_FP2_WORDS: usize = 2 * BN254_FP_WORDS;
pub const BN254_POINT_WORDS: usize = 2 * BN254_FP_WORDS;
pub struct Bn254Fp(substrate_bn::Fq);
impl From<[Word; BN254_FP_WORDS]> for Bn254Fp {
fn from(value: [Word; BN254_FP_WORDS]) -> Self {
let bytes_be = value
.iter()
.flat_map(|word| word.to_le_bytes())
.rev()
.collect_vec();
Bn254Fp(Fq::from_slice(&bytes_be).expect("cannot parse Fq"))
}
}
impl From<Bn254Fp> for [Word; BN254_FP_WORDS] {
fn from(value: Bn254Fp) -> Self {
let mut bytes_be = [0u8; 32];
value
.0
.to_big_endian(&mut bytes_be)
.expect("cannot serialize Fq");
bytes_be.reverse();
bytes_be
.chunks_exact(4)
.map(|chunk| Word::from_le_bytes(chunk.try_into().unwrap()))
.collect_vec()
.try_into()
.unwrap()
}
}
impl std::ops::Add for Bn254Fp {
type Output = Bn254Fp;
fn add(self, rhs: Self) -> Self::Output {
Bn254Fp(self.0 + rhs.0)
}
}
impl std::ops::Mul for Bn254Fp {
type Output = Bn254Fp;
fn mul(self, rhs: Self) -> Self::Output {
Bn254Fp(self.0 * rhs.0)
}
}
pub struct Bn254Fp2(substrate_bn::Fq2);
impl From<[Word; BN254_FP2_WORDS]> for Bn254Fp2 {
fn from(value: [Word; BN254_FP2_WORDS]) -> Self {
let first_half: [Word; BN254_FP_WORDS] = value[..BN254_FP_WORDS].try_into().unwrap();
let second_half: [Word; BN254_FP_WORDS] = value[BN254_FP_WORDS..].try_into().unwrap();
// notation: Fq2 is a + bi (a real and b imaginary)
let a = Bn254Fp::from(first_half).0;
let b = Bn254Fp::from(second_half).0;
Bn254Fp2(Fq2::new(a, b))
}
}
impl From<Bn254Fp2> for [Word; BN254_FP2_WORDS] {
fn from(value: Bn254Fp2) -> Self {
// notation: Fq2 is a + bi (a real and b imaginary)
let first_half: [Word; BN254_FP_WORDS] = Bn254Fp(value.0.real()).into();
let second_half: [Word; BN254_FP_WORDS] = Bn254Fp(value.0.imaginary()).into();
[first_half, second_half].concat().try_into().unwrap()
}
}
impl std::ops::Add for Bn254Fp2 {
type Output = Bn254Fp2;
fn add(self, rhs: Self) -> Self::Output {
Bn254Fp2(self.0 + rhs.0)
}
}
impl std::ops::Mul for Bn254Fp2 {
type Output = Bn254Fp2;
fn mul(self, rhs: Self) -> Self::Output {
Bn254Fp2(self.0 * rhs.0)
}
}
#[derive(Debug)]
pub struct Bn254Point(substrate_bn::G1);
impl From<[Word; BN254_POINT_WORDS]> for Bn254Point {
fn from(value: [Word; BN254_POINT_WORDS]) -> Self {
let first_half: [Word; BN254_FP_WORDS] = value[..BN254_FP_WORDS].try_into().unwrap();
let second_half: [Word; BN254_FP_WORDS] = value[BN254_FP_WORDS..].try_into().unwrap();
let a = Bn254Fp::from(first_half).0;
let b = Bn254Fp::from(second_half).0;
Bn254Point(G1::new(a, b, Fq::one()))
}
}
impl From<Bn254Point> for [Word; BN254_POINT_WORDS] {
fn from(value: Bn254Point) -> Self {
let affine = AffineG1::from_jacobian(value.0).expect("cannot unpack affine");
let first_half: [Word; BN254_FP_WORDS] = Bn254Fp(affine.x()).into();
let second_half: [Word; BN254_FP_WORDS] = Bn254Fp(affine.y()).into();
[first_half, second_half].concat().try_into().unwrap()
}
}
impl std::ops::Add for Bn254Point {
type Output = Bn254Point;
fn add(self, rhs: Self) -> Self::Output {
Bn254Point(self.0 + rhs.0)
}
}
impl Bn254Point {
pub fn double(&self) -> Self {
let two = Fr::from_str("2").unwrap();
Bn254Point(self.0 * two)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/bn254/mod.rs | ceno_emul/src/syscalls/bn254/mod.rs | mod bn254_curve;
mod bn254_fptower;
mod types;
pub use bn254_curve::{Bn254AddSpec, Bn254DoubleSpec, bn254_add, bn254_double};
pub use bn254_fptower::{
Bn254Fp2AddSpec, Bn254Fp2MulSpec, Bn254FpAddSpec, Bn254FpMulSpec, bn254_fp_add, bn254_fp_mul,
bn254_fp2_add, bn254_fp2_mul,
};
pub use types::{BN254_FP_WORDS, BN254_FP2_WORDS, BN254_POINT_WORDS};
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/bn254/bn254_fptower.rs | ceno_emul/src/syscalls/bn254/bn254_fptower.rs | use itertools::Itertools;
use crate::{
Change, EmuContext, Platform, SyscallSpec, Tracer, VMState, Word, WriteOp,
syscalls::{
SyscallEffects, SyscallWitness,
bn254::types::{Bn254Fp, Bn254Fp2},
},
utils::MemoryView,
};
use super::types::{BN254_FP_WORDS, BN254_FP2_WORDS};
pub struct Bn254FpAddSpec;
impl SyscallSpec for Bn254FpAddSpec {
const NAME: &'static str = "BN254_FP_ADD";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 2 * BN254_FP_WORDS;
const CODE: u32 = ceno_syscall::BN254_FP_ADD;
}
pub struct Bn254Fp2AddSpec;
impl SyscallSpec for Bn254Fp2AddSpec {
const NAME: &'static str = "BN254_FP2_ADD";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 2 * BN254_FP2_WORDS;
const CODE: u32 = ceno_syscall::BN254_FP2_ADD;
}
pub struct Bn254FpMulSpec;
impl SyscallSpec for Bn254FpMulSpec {
const NAME: &'static str = "BN254_FP_MUL";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 2 * BN254_FP_WORDS;
const CODE: u32 = ceno_syscall::BN254_FP_MUL;
}
pub struct Bn254Fp2MulSpec;
impl SyscallSpec for Bn254Fp2MulSpec {
const NAME: &'static str = "BN254_FP2_MUL";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 2 * BN254_FP2_WORDS;
const CODE: u32 = ceno_syscall::BN254_FP2_MUL;
}
fn bn254_fptower_binary_op<
const WORDS: usize,
const IS_ADD: bool,
F: From<[Word; WORDS]>
+ Into<[Word; WORDS]>
+ std::ops::Add<Output = F>
+ std::ops::Mul<Output = F>,
T: Tracer,
>(
vm: &VMState<T>,
) -> SyscallEffects {
let p_ptr = vm.peek_register(Platform::reg_arg0());
let q_ptr = vm.peek_register(Platform::reg_arg1());
// Read the argument pointers
let reg_ops = vec![
WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(p_ptr, p_ptr),
0, // Cycle set later in finalize().
),
WriteOp::new_register_op(
Platform::reg_arg1(),
Change::new(q_ptr, q_ptr),
0, // Cycle set later in finalize().
),
];
let [mut p_view, q_view] = [p_ptr, q_ptr].map(|start| MemoryView::<_, WORDS>::new(vm, start));
let p = F::from(p_view.words());
let q = F::from(q_view.words());
let result = match IS_ADD {
true => p + q,
false => p * q,
};
p_view.write(result.into());
let mem_ops = p_view
.mem_ops()
.into_iter()
.chain(q_view.mem_ops())
.collect_vec();
assert_eq!(mem_ops.len(), 2 * WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
pub fn bn254_fp_add<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
bn254_fptower_binary_op::<BN254_FP_WORDS, true, Bn254Fp, T>(vm)
}
pub fn bn254_fp_mul<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
bn254_fptower_binary_op::<BN254_FP_WORDS, false, Bn254Fp, T>(vm)
}
pub fn bn254_fp2_add<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
bn254_fptower_binary_op::<BN254_FP2_WORDS, true, Bn254Fp2, T>(vm)
}
pub fn bn254_fp2_mul<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
bn254_fptower_binary_op::<BN254_FP2_WORDS, false, Bn254Fp2, T>(vm)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/syscalls/bn254/bn254_curve.rs | ceno_emul/src/syscalls/bn254/bn254_curve.rs | use crate::{
Change, EmuContext, Platform, SyscallSpec, Tracer, VMState, Word, WriteOp,
syscalls::{SyscallEffects, SyscallWitness, bn254::types::Bn254Point},
utils::MemoryView,
};
use super::types::BN254_POINT_WORDS;
use itertools::Itertools;
pub struct Bn254AddSpec;
impl SyscallSpec for Bn254AddSpec {
const NAME: &'static str = "BN254_ADD";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = 2 * BN254_POINT_WORDS;
const CODE: u32 = ceno_syscall::BN254_ADD;
}
pub struct Bn254DoubleSpec;
impl SyscallSpec for Bn254DoubleSpec {
const NAME: &'static str = "BN254_DOUBLE";
const REG_OPS_COUNT: usize = 2;
const MEM_OPS_COUNT: usize = BN254_POINT_WORDS;
const CODE: u32 = ceno_syscall::BN254_DOUBLE;
}
pub fn bn254_add<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let p_ptr = vm.peek_register(Platform::reg_arg0());
let q_ptr = vm.peek_register(Platform::reg_arg1());
// Read the argument pointers
let reg_ops = vec![
WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(p_ptr, p_ptr),
0, // Cycle set later in finalize().
),
WriteOp::new_register_op(
Platform::reg_arg1(),
Change::new(q_ptr, q_ptr),
0, // Cycle set later in finalize().
),
];
// Memory segments of P and Q
let [mut p_view, q_view] =
[p_ptr, q_ptr].map(|start| MemoryView::<_, BN254_POINT_WORDS>::new(vm, start));
// Read P and Q from words via wrapper type
let [p, q] = [&p_view, &q_view].map(|view| Bn254Point::from(view.words()));
// TODO: what does sp1 do with invalid points? equal points?
// Compute the sum and convert back to words
let output_words: [Word; BN254_POINT_WORDS] = (p + q).into();
p_view.write(output_words);
let mem_ops = p_view
.mem_ops()
.into_iter()
.chain(q_view.mem_ops())
.collect_vec();
assert_eq!(mem_ops.len(), 2 * BN254_POINT_WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
pub fn bn254_double<T: Tracer>(vm: &VMState<T>) -> SyscallEffects {
let p_ptr = vm.peek_register(Platform::reg_arg0());
// for compatibility with sp1 spec
assert_eq!(vm.peek_register(Platform::reg_arg1()), 0);
// Read the argument pointers
let reg_ops = vec![WriteOp::new_register_op(
Platform::reg_arg0(),
Change::new(p_ptr, p_ptr),
0, // Cycle set later in finalize().
)];
// P's memory segment
let mut p_view = MemoryView::<_, BN254_POINT_WORDS>::new(vm, p_ptr);
// Create point from words via wrapper type
let p = Bn254Point::from(p_view.words());
let result = p.double();
let output_words: [Word; BN254_POINT_WORDS] = result.into();
p_view.write(output_words);
let mem_ops = p_view.mem_ops().to_vec();
assert_eq!(mem_ops.len(), BN254_POINT_WORDS);
SyscallEffects {
witness: SyscallWitness::new(mem_ops, reg_ops),
next_pc: None,
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/src/disassemble/mod.rs | ceno_emul/src/disassemble/mod.rs | use crate::rv32im::{InsnKind, Instruction};
use itertools::izip;
use rrs_lib::{
InstructionProcessor,
instruction_formats::{BType, IType, ITypeCSR, ITypeShamt, JType, RType, SType, UType},
process_instruction,
};
/// A transpiler that converts the 32-bit encoded instructions into instructions.
pub(crate) struct InstructionTranspiler {
#[allow(dead_code)]
pc: u32,
word: u32,
}
impl Instruction {
/// Create a new [`Instruction`] from an R-type instruction.
#[must_use]
pub const fn from_r_type(kind: InsnKind, dec_insn: &RType, raw: u32) -> Self {
Self {
kind,
rd: dec_insn.rd,
rs1: dec_insn.rs1,
rs2: dec_insn.rs2,
imm: 0,
raw,
}
}
/// Create a new [`Instruction`] from an I-type instruction.
#[must_use]
pub const fn from_i_type(kind: InsnKind, dec_insn: &IType, raw: u32) -> Self {
Self {
kind,
rd: dec_insn.rd,
rs1: dec_insn.rs1,
imm: dec_insn.imm,
rs2: 0,
raw,
}
}
/// Create a new [`Instruction`] from an I-type instruction with a shamt.
#[must_use]
pub const fn from_i_type_shamt(kind: InsnKind, dec_insn: &ITypeShamt, raw: u32) -> Self {
Self {
kind,
rd: dec_insn.rd,
rs1: dec_insn.rs1,
imm: dec_insn.shamt as i32,
rs2: 0,
raw,
}
}
/// Create a new [`Instruction`] from an S-type instruction.
#[must_use]
pub const fn from_s_type(kind: InsnKind, dec_insn: &SType, raw: u32) -> Self {
Self {
kind,
rd: 0,
rs1: dec_insn.rs1,
rs2: dec_insn.rs2,
imm: dec_insn.imm,
raw,
}
}
/// Create a new [`Instruction`] from a B-type instruction.
#[must_use]
pub const fn from_b_type(kind: InsnKind, dec_insn: &BType, raw: u32) -> Self {
Self {
kind,
rd: 0,
rs1: dec_insn.rs1,
rs2: dec_insn.rs2,
imm: dec_insn.imm,
raw,
}
}
/// Create a new [`Instruction`] that is not implemented.
#[must_use]
pub const fn unimp(raw: u32) -> Self {
Self {
kind: InsnKind::INVALID,
rd: 0,
rs1: 0,
rs2: 0,
imm: 0,
raw,
}
}
}
impl InstructionProcessor for InstructionTranspiler {
type InstructionResult = Instruction;
fn process_add(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::ADD, &dec_insn, self.word)
}
fn process_addi(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::ADDI, &dec_insn, self.word)
}
fn process_sub(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::SUB, &dec_insn, self.word)
}
fn process_xor(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::XOR, &dec_insn, self.word)
}
fn process_xori(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::XORI, &dec_insn, self.word)
}
fn process_or(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::OR, &dec_insn, self.word)
}
fn process_ori(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::ORI, &dec_insn, self.word)
}
fn process_and(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::AND, &dec_insn, self.word)
}
fn process_andi(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::ANDI, &dec_insn, self.word)
}
fn process_sll(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::SLL, &dec_insn, self.word)
}
fn process_slli(&mut self, dec_insn: ITypeShamt) -> Self::InstructionResult {
Instruction::from_i_type_shamt(InsnKind::SLLI, &dec_insn, self.word)
}
fn process_srl(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::SRL, &dec_insn, self.word)
}
fn process_srli(&mut self, dec_insn: ITypeShamt) -> Self::InstructionResult {
Instruction::from_i_type_shamt(InsnKind::SRLI, &dec_insn, self.word)
}
fn process_sra(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::SRA, &dec_insn, self.word)
}
fn process_srai(&mut self, dec_insn: ITypeShamt) -> Self::InstructionResult {
Instruction::from_i_type_shamt(InsnKind::SRAI, &dec_insn, self.word)
}
fn process_slt(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::SLT, &dec_insn, self.word)
}
fn process_slti(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::SLTI, &dec_insn, self.word)
}
fn process_sltu(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::SLTU, &dec_insn, self.word)
}
fn process_sltui(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::SLTIU, &dec_insn, self.word)
}
fn process_lb(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::LB, &dec_insn, self.word)
}
fn process_lh(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::LH, &dec_insn, self.word)
}
fn process_lw(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::LW, &dec_insn, self.word)
}
fn process_lbu(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::LBU, &dec_insn, self.word)
}
fn process_lhu(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction::from_i_type(InsnKind::LHU, &dec_insn, self.word)
}
fn process_sb(&mut self, dec_insn: SType) -> Self::InstructionResult {
Instruction::from_s_type(InsnKind::SB, &dec_insn, self.word)
}
fn process_sh(&mut self, dec_insn: SType) -> Self::InstructionResult {
Instruction::from_s_type(InsnKind::SH, &dec_insn, self.word)
}
fn process_sw(&mut self, dec_insn: SType) -> Self::InstructionResult {
Instruction::from_s_type(InsnKind::SW, &dec_insn, self.word)
}
fn process_beq(&mut self, dec_insn: BType) -> Self::InstructionResult {
Instruction::from_b_type(InsnKind::BEQ, &dec_insn, self.word)
}
fn process_bne(&mut self, dec_insn: BType) -> Self::InstructionResult {
Instruction::from_b_type(InsnKind::BNE, &dec_insn, self.word)
}
fn process_blt(&mut self, dec_insn: BType) -> Self::InstructionResult {
Instruction::from_b_type(InsnKind::BLT, &dec_insn, self.word)
}
fn process_bge(&mut self, dec_insn: BType) -> Self::InstructionResult {
Instruction::from_b_type(InsnKind::BGE, &dec_insn, self.word)
}
fn process_bltu(&mut self, dec_insn: BType) -> Self::InstructionResult {
Instruction::from_b_type(InsnKind::BLTU, &dec_insn, self.word)
}
fn process_bgeu(&mut self, dec_insn: BType) -> Self::InstructionResult {
Instruction::from_b_type(InsnKind::BGEU, &dec_insn, self.word)
}
fn process_jal(&mut self, dec_insn: JType) -> Self::InstructionResult {
Instruction {
kind: InsnKind::JAL,
rd: dec_insn.rd,
rs1: 0,
rs2: 0,
imm: dec_insn.imm,
raw: self.word,
}
}
fn process_jalr(&mut self, dec_insn: IType) -> Self::InstructionResult {
Instruction {
kind: InsnKind::JALR,
rd: dec_insn.rd,
rs1: dec_insn.rs1,
rs2: 0,
imm: dec_insn.imm,
raw: self.word,
}
}
fn process_lui(&mut self, dec_insn: UType) -> Self::InstructionResult {
// Verify assumption that the immediate is already shifted left by 12 bits.
assert_eq!(dec_insn.imm & 0xfff, 0);
#[cfg(not(feature = "u16limb_circuit"))]
{
// Convert LUI to ADDI.
//
// RiscV's load-upper-immediate instruction is necessary to build arbitrary constants,
// because its ADDI can only have a relatively small immediate value: there's just not
// enough space in the 32 bits for more.
//
// Our internal ADDI does not have this limitation, so we can convert LUI to ADDI.
// See [`InstructionTranspiler::process_auipc`] for more background on the conversion.
Instruction {
kind: InsnKind::ADDI,
rd: dec_insn.rd,
rs1: 0,
rs2: 0,
imm: dec_insn.imm,
raw: self.word,
}
}
#[cfg(feature = "u16limb_circuit")]
{
Instruction {
kind: InsnKind::LUI,
rd: dec_insn.rd,
rs1: 0,
rs2: 0,
imm: dec_insn.imm,
raw: self.word,
}
}
}
fn process_auipc(&mut self, dec_insn: UType) -> Self::InstructionResult {
// Verify our assumption that the immediate is already shifted left by 12 bits.
assert_eq!(dec_insn.imm & 0xfff, 0);
#[cfg(not(feature = "u16limb_circuit"))]
{
let pc = self.pc;
// Convert AUIPC to ADDI.
//
// RiscV's instructions are designed to be (mosty) position-independent. AUIPC is used
// to get access to the current program counter, even if the code has been moved around
// by the linker.
//
// Our conversion here happens after the linker has done its job, so we can safely hardcode
// the current program counter into the immediate value of our internal ADDI.
//
// Note that our internal ADDI can have arbitrary intermediate values, not just 12 bits.
//
// ADDI is slightly more general than LUI or AUIPC, because you can also specify an
// input register rs1. That generality might cost us sligthtly in the non-recursive proof,
// but we suspect decreasing the total number of different instruction kinds will speed up
// the recursive proof.
//
// In any case, AUIPC and LUI together make up ~0.1% of instructions executed in typical
// real world scenarios like a `reth` run.
Instruction {
kind: InsnKind::ADDI,
rd: dec_insn.rd,
rs1: 0,
rs2: 0,
imm: dec_insn.imm.wrapping_add(pc as i32),
raw: self.word,
}
}
#[cfg(feature = "u16limb_circuit")]
{
Instruction {
kind: InsnKind::AUIPC,
rd: dec_insn.rd,
rs1: 0,
rs2: 0,
imm: dec_insn.imm,
raw: self.word,
}
}
}
fn process_ecall(&mut self) -> Self::InstructionResult {
Instruction {
kind: InsnKind::ECALL,
rd: 0,
rs1: 0,
rs2: 0,
imm: 0,
raw: self.word,
}
}
fn process_ebreak(&mut self) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_mul(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::MUL, &dec_insn, self.word)
}
fn process_mulh(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::MULH, &dec_insn, self.word)
}
fn process_mulhu(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::MULHU, &dec_insn, self.word)
}
fn process_mulhsu(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::MULHSU, &dec_insn, self.word)
}
fn process_div(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::DIV, &dec_insn, self.word)
}
fn process_divu(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::DIVU, &dec_insn, self.word)
}
fn process_rem(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::REM, &dec_insn, self.word)
}
fn process_remu(&mut self, dec_insn: RType) -> Self::InstructionResult {
Instruction::from_r_type(InsnKind::REMU, &dec_insn, self.word)
}
fn process_csrrc(&mut self, _: ITypeCSR) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_csrrci(&mut self, _: ITypeCSR) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_csrrs(&mut self, _: ITypeCSR) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_csrrsi(&mut self, _: ITypeCSR) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_csrrw(&mut self, _: ITypeCSR) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_csrrwi(&mut self, _: ITypeCSR) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_fence(&mut self, _: IType) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_mret(&mut self) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
fn process_wfi(&mut self) -> Self::InstructionResult {
Instruction::unimp(self.word)
}
}
/// Transpile the [`Instruction`]s from the 32-bit encoded instructions.
#[must_use]
pub fn transpile(base: u32, instructions_u32: &[u32]) -> Vec<Instruction> {
izip!(enumerate(base, 4), instructions_u32)
.map(|(pc, &word)| {
process_instruction(&mut InstructionTranspiler { pc, word }, word)
.unwrap_or(Instruction::unimp(word))
})
.collect()
}
fn enumerate(start: u32, step: u32) -> impl Iterator<Item = u32> {
std::iter::successors(Some(start), move |&i| Some(i + step))
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_emul/tests/test_vm_trace.rs | ceno_emul/tests/test_vm_trace.rs | #![allow(clippy::unusual_byte_groupings)]
use anyhow::Result;
use rustc_hash::FxHashMap;
use std::{collections::BTreeMap, sync::Arc};
use ceno_emul::{
CENO_PLATFORM, Cycle, EmuContext, FullTracer as Tracer, InsnKind, Instruction, Platform,
Program, StepRecord, VMState, WordAddr, encode_rv32,
};
#[test]
fn test_vm_trace() -> Result<()> {
let program = Program::new(
CENO_PLATFORM.pc_base(),
CENO_PLATFORM.pc_base(),
CENO_PLATFORM.heap.start,
program_fibonacci_20(),
Default::default(),
);
let mut ctx = VMState::new(CENO_PLATFORM.clone(), Arc::new(program));
let steps = run(&mut ctx)?;
let (x1, x2, x3) = expected_fibonacci_20();
assert_eq!(ctx.peek_register(1), x1);
assert_eq!(ctx.peek_register(2), x2);
assert_eq!(ctx.peek_register(3), x3);
let ops: Vec<InsnKind> = steps.iter().map(|step| step.insn().kind).collect();
assert_eq!(ops, expected_ops_fibonacci_20());
let final_accesses = ctx.tracer().final_accesses();
let expected = expected_final_accesses_fibonacci_20();
assert_eq!(final_accesses.len(), expected.len());
for (addr, cycle) in expected {
assert_eq!(
final_accesses.cycle(addr),
cycle,
"mismatch at addr {:?}",
addr
);
}
Ok(())
}
#[test]
fn test_empty_program() -> Result<()> {
let empty_program = Program::new(
CENO_PLATFORM.pc_base(),
CENO_PLATFORM.pc_base(),
CENO_PLATFORM.heap.start,
vec![],
BTreeMap::new(),
);
let mut ctx = VMState::new(CENO_PLATFORM.clone(), Arc::new(empty_program));
let res = run(&mut ctx);
assert!(matches!(res, Err(e) if e.to_string().contains("InstructionAccessFault")),);
Ok(())
}
fn run(state: &mut VMState) -> Result<Vec<StepRecord>> {
state.iter_until_halt().collect()
}
/// Example in RISC-V bytecode and assembly.
pub fn program_fibonacci_20() -> Vec<Instruction> {
vec![
// x1 = 10;
// x3 = 1;
encode_rv32(InsnKind::ADDI, 0, 0, 1, 10),
encode_rv32(InsnKind::ADDI, 0, 0, 3, 1),
// loop {
// x1 -= 1;
encode_rv32(InsnKind::ADDI, 1, 0, 1, -1),
// x2 += x3;
// x3 += x2;
encode_rv32(InsnKind::ADD, 2, 3, 2, 0),
encode_rv32(InsnKind::ADD, 2, 3, 3, 0),
// if x1 == 0 { break }
encode_rv32(InsnKind::BNE, 1, 0, 0, -12),
// ecall HALT, SUCCESS
encode_rv32(InsnKind::ECALL, 0, 0, 0, 0),
]
}
/// Rust version of the example. Reconstruct the output.
fn expected_fibonacci_20() -> (u32, u32, u32) {
let mut x1 = 10;
let mut x2 = 0; // Even.
let mut x3 = 1; // Odd.
loop {
x1 -= 1;
x2 += x3;
x3 += x2;
if x1 == 0 {
break;
}
}
assert_eq!(x2, 6765); // Fibonacci 20.
assert_eq!(x3, 10946); // Fibonacci 21.
(x1, x2, x3)
}
/// Reconstruct the sequence of opcodes.
fn expected_ops_fibonacci_20() -> Vec<InsnKind> {
use InsnKind::*;
let mut ops = vec![ADDI, ADDI];
for _ in 0..10 {
ops.extend(&[ADDI, ADD, ADD, BNE]);
}
ops.push(ECALL);
ops
}
/// Reconstruct the last access of each register.
fn expected_final_accesses_fibonacci_20() -> FxHashMap<WordAddr, Cycle> {
let mut accesses = FxHashMap::default();
let x = |i| WordAddr::from(Platform::register_vma(i));
const C: Cycle = Tracer::SUBCYCLES_PER_INSN;
let mut cycle = C; // First cycle.
cycle += 2 * C; // Set x1 and x3.
for _ in 0..9 {
// Loop except the last iteration.
cycle += 4 * C; // ADDI, ADD, ADD, BNE.
}
cycle += 2 * C; // Last iteration ADDI and ADD.
// Last ADD.
accesses.insert(x(2), cycle + Tracer::SUBCYCLE_RS1);
accesses.insert(x(3), cycle + Tracer::SUBCYCLE_RD);
cycle += C;
// Last BNE.
accesses.insert(x(1), cycle + Tracer::SUBCYCLE_RS1);
accesses.insert(x(0), cycle + Tracer::SUBCYCLE_RS2);
cycle += C;
// Now at the final ECALL cycle.
accesses.insert(x(Platform::reg_ecall()), cycle + Tracer::SUBCYCLE_RS1);
accesses.insert(x(Platform::reg_arg0()), cycle + Tracer::SUBCYCLE_RS2);
accesses
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/bn254_curve_syscalls.rs | examples/examples/bn254_curve_syscalls.rs | // Test addition of two curve points. Assert result inside the guest
extern crate ceno_rt;
use ceno_syscall::{syscall_bn254_add, syscall_bn254_double};
use substrate_bn::{AffineG1, Fr, G1, Group};
fn bytes_to_words(bytes: [u8; 64]) -> [u32; 16] {
let mut bytes = bytes;
// Reverse the order of bytes for each coordinate
bytes[0..32].reverse();
bytes[32..].reverse();
std::array::from_fn(|i| u32::from_le_bytes(bytes[4 * i..4 * (i + 1)].try_into().unwrap()))
}
fn g1_to_words(elem: G1) -> [u32; 16] {
let elem = AffineG1::from_jacobian(elem).unwrap();
let mut x_bytes = [0u8; 32];
elem.x().to_big_endian(&mut x_bytes).unwrap();
let mut y_bytes = [0u8; 32];
elem.y().to_big_endian(&mut y_bytes).unwrap();
let mut bytes = [0u8; 64];
bytes[..32].copy_from_slice(&x_bytes);
bytes[32..].copy_from_slice(&y_bytes);
bytes_to_words(bytes)
}
fn main() {
let a = G1::one() * Fr::from_str("237").unwrap();
let b = G1::one() * Fr::from_str("450").unwrap();
let mut a = g1_to_words(a);
let b = g1_to_words(b);
log_state(&a);
log_state(&b);
syscall_bn254_add(&mut a, &b);
assert_eq!(
a,
[
3533671058, 384027398, 1667527989, 405931240, 1244739547, 3008185164, 3438692308,
533547881, 4111479971, 1966599592, 1118334819, 3045025257, 3188923637, 1210932908,
947531184, 656119894
]
);
log_state(&a);
let c = G1::one() * Fr::from_str("343").unwrap();
let mut c = g1_to_words(c);
log_state(&c);
syscall_bn254_double(&mut c);
log_state(&c);
let one = g1_to_words(G1::one());
log_state(&one);
syscall_bn254_add(&mut c, &one);
log_state(&c);
// 2 * 343 + 1 == 237 + 450, one hopes
assert_eq!(a, c);
}
#[cfg(debug_assertions)]
fn log_state(state: &[u32]) {
use ceno_rt::info_out;
info_out().write_frame(unsafe {
core::slice::from_raw_parts(state.as_ptr() as *const u8, size_of_val(state))
});
}
#[cfg(not(debug_assertions))]
fn log_state(_state: &[u32]) {}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/keccak_syscall.rs | examples/examples/keccak_syscall.rs | //! Compute the Keccak permutation using a syscall.
//!
//! Iterate multiple times and log the state after each iteration.
extern crate ceno_rt;
use ceno_syscall::syscall_keccak_permute;
const ITERATIONS: usize = 100;
fn main() {
let mut state = [0_u64; 25];
for i in 0..ITERATIONS {
syscall_keccak_permute(&mut state);
if i == 0 {
log_state(&state);
}
}
}
#[cfg(debug_assertions)]
fn log_state(state: &[u64; 25]) {
use ceno_rt::info_out;
info_out().write_frame(unsafe {
core::slice::from_raw_parts(state.as_ptr() as *const u8, state.len() * size_of::<u64>())
});
}
#[cfg(not(debug_assertions))]
fn log_state(_state: &[u64; 25]) {}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/bn254_patched_fp.rs | examples/examples/bn254_patched_fp.rs | extern crate ceno_rt;
use bn::{Fq, Fq2};
use rand::{SeedableRng, rngs::StdRng};
fn to_unpatched_fq(val: Fq) -> substrate_bn::Fq {
substrate_bn::Fq::from_u256(substrate_bn::arith::U256(val.into_u256().0)).unwrap()
}
fn to_unpatched_fq2(val: Fq2) -> substrate_bn::Fq2 {
substrate_bn::Fq2::new(
to_unpatched_fq(val.real()),
to_unpatched_fq(val.imaginary()),
)
}
fn main() {
let mut a = Fq::one();
let mut b = Fq::one();
let seed = [0u8; 32];
let mut rng = StdRng::from_seed(seed);
const RUNS: usize = 10;
for _ in 0..RUNS {
let sum = a + b;
let expected_sum = to_unpatched_fq(a) + to_unpatched_fq(b);
assert_eq!(to_unpatched_fq(sum), expected_sum);
a = Fq::random(&mut rng);
b = Fq::random(&mut rng);
}
let mut a = Fq2::one();
let mut b = Fq2::one();
for _ in 0..RUNS {
let sum = a + b;
let expected_sum = to_unpatched_fq2(a) + to_unpatched_fq2(b);
assert_eq!(to_unpatched_fq2(sum), expected_sum);
a = Fq2::new(Fq::random(&mut rng), Fq::random(&mut rng));
b = Fq2::new(Fq::random(&mut rng), Fq::random(&mut rng));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/bn254_fptower_syscalls.rs | examples/examples/bn254_fptower_syscalls.rs | extern crate ceno_rt;
use ceno_syscall::{
syscall_bn254_fp_addmod, syscall_bn254_fp_mulmod, syscall_bn254_fp2_addmod,
syscall_bn254_fp2_mulmod,
};
use rand::{SeedableRng, rngs::StdRng};
use substrate_bn::{Fq, Fq2};
fn bytes_to_words(bytes: [u8; 32]) -> [u32; 8] {
std::array::from_fn(|i| u32::from_le_bytes(bytes[4 * i..4 * (i + 1)].try_into().unwrap()))
}
fn fq_to_words(val: Fq) -> [u32; 8] {
let mut bytes = [0u8; 32];
val.to_big_endian(&mut bytes).unwrap();
bytes.reverse();
bytes_to_words(bytes)
}
fn fq2_to_words(val: Fq2) -> [u32; 16] {
[fq_to_words(val.real()), fq_to_words(val.imaginary())]
.concat()
.try_into()
.unwrap()
}
fn main() {
let mut a = Fq::one();
let mut b = Fq::one();
let seed = [0u8; 32];
let mut rng = StdRng::from_seed(seed);
const RUNS: usize = 10;
for _ in 0..RUNS {
let mut a_words = fq_to_words(a);
let a_backup = a_words;
let b_words = fq_to_words(b);
log_state(&a_words);
log_state(&b_words);
syscall_bn254_fp_addmod(&mut a_words, &b_words);
let sum_words = fq_to_words(a + b);
assert_eq!(a_words, sum_words);
log_state(&a_words);
a_words.copy_from_slice(&a_backup);
log_state(&a_words);
log_state(&b_words);
syscall_bn254_fp_mulmod(&mut a_words, &b_words);
let prod_words = fq_to_words(a * b);
assert_eq!(a_words, prod_words);
log_state(&a_words);
a = Fq::random(&mut rng);
b = Fq::random(&mut rng);
}
let mut a = Fq2::one();
let mut b = Fq2::one();
for _ in 0..RUNS {
let mut a_words = fq2_to_words(a);
let a_backup = a_words;
let b_words = fq2_to_words(b);
log_state(&a_words);
log_state(&b_words);
syscall_bn254_fp2_addmod(&mut a_words, &b_words);
let sum_words = fq2_to_words(a + b);
assert_eq!(a_words, sum_words);
log_state(&a_words);
a_words.copy_from_slice(&a_backup);
log_state(&a_words);
log_state(&b_words);
syscall_bn254_fp2_mulmod(&mut a_words, &b_words);
let prod_words = fq2_to_words(a * b);
assert_eq!(a_words, prod_words);
log_state(&a_words);
a = Fq2::new(Fq::random(&mut rng), Fq::random(&mut rng));
b = Fq2::new(Fq::random(&mut rng), Fq::random(&mut rng));
}
}
#[cfg(debug_assertions)]
fn log_state(state: &[u32]) {
use ceno_rt::info_out;
info_out().write_frame(unsafe {
core::slice::from_raw_parts(state.as_ptr() as *const u8, size_of_val(state))
});
}
#[cfg(not(debug_assertions))]
fn log_state(_state: &[u32]) {}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/ceno_rt_panic.rs | examples/examples/ceno_rt_panic.rs | extern crate ceno_rt;
fn main() {
panic!("This is a panic message!");
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/is_prime.rs | examples/examples/is_prime.rs | extern crate ceno_rt;
fn is_prime(n: u32) -> bool {
if n < 2 {
return false;
}
let mut i = 2;
while i * i <= n {
if n.is_multiple_of(i) {
return false;
}
i += 1;
}
true
}
fn main() {
let n: u32 = ceno_rt::read();
let mut cnt_primes = 0;
for i in 0..=n {
cnt_primes += is_prime(i) as u32;
}
if cnt_primes > 1000 * 1000 {
panic!();
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/sha_extend_syscall.rs | examples/examples/sha_extend_syscall.rs | // Test addition of two curve points. Assert result inside the guest
extern crate ceno_rt;
use std::array;
use ceno_syscall::syscall_sha256_extend;
fn main() {
let mut words: [u32; 64] = array::from_fn(|i| i as u32);
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 34013193, 67559435, 1711661200,
3020350282, 1447362251, 3118632270, 4004188394, 690615167, 6070360, 1105370215, 2385558114,
2348232513, 507799627, 2098764358, 5845374, 823657968, 2969863067, 3903496557, 4274682881,
2059629362, 1849247231, 2656047431, 835162919, 2096647516, 2259195856, 1779072524,
3152121987, 4210324067, 1557957044, 376930560, 982142628, 3926566666, 4164334963,
789545383, 1028256580, 2867933222, 3843938318, 1135234440, 390334875, 2025924737,
3318322046, 3436065867, 652746999, 4261492214, 2543173532, 3334668051, 3166416553,
634956631,
];
syscall_sha256_extend(&mut words);
assert_eq!(words, expected);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/median.rs | examples/examples/median.rs | //! Find the median of a collection of numbers.
//!
//! Of course, we are asking our good friend, the host, for help, but we still need to verify the answer.
extern crate ceno_rt;
use ceno_rt::debug_println;
#[cfg(debug_assertions)]
use core::fmt::Write;
fn main() {
let numbers: Vec<u32> = ceno_rt::read();
let median_candidate: u32 = ceno_rt::read();
let smaller = numbers.iter().filter(|x| **x < median_candidate).count();
assert_eq!(smaller, numbers.len() / 2);
debug_println!("{}", median_candidate);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/ceno_rt_io.rs | examples/examples/ceno_rt_io.rs | extern crate ceno_rt;
use ceno_rt::debug_println;
#[cfg(debug_assertions)]
use core::fmt::Write;
fn main() {
debug_println!("📜📜📜 Hello, World!");
debug_println!("🌏🌍🌎");
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/syscalls.rs | examples/examples/syscalls.rs | extern crate ceno_rt;
use std::array;
use ceno_syscall::{
syscall_keccak_permute, syscall_secp256k1_add, syscall_secp256k1_decompress,
syscall_secp256k1_double, syscall_sha256_extend, syscall_uint256_mul,
};
/// One unit test for each implemented syscall
/// Meant to be used identically in a sp1 guest to confirm compatibility
pub fn test_syscalls() {
/// `bytes` is expected to contain the uncompressed representation of
/// a curve point, as described in https://docs.rs/secp/latest/secp/struct.Point.html
///
/// The return value is an array of words compatible with the sp1 syscall for `add` and `double`
/// Notably, these words should encode the X and Y coordinates of the point
/// in "little endian" and not "big endian" as is the case of secp
fn bytes_to_words(bytes: [u8; 65]) -> [u32; 16] {
// ignore the tag byte (specific to the secp repr.)
let mut bytes: [u8; 64] = bytes[1..].try_into().unwrap();
// Reverse the order of bytes for each coordinate
bytes[0..32].reverse();
bytes[32..].reverse();
std::array::from_fn(|i| u32::from_le_bytes(bytes[4 * i..4 * (i + 1)].try_into().unwrap()))
}
{
const P: [u8; 65] = [
4, 180, 53, 9, 32, 85, 226, 220, 154, 20, 116, 218, 199, 119, 48, 44, 23, 45, 222, 10,
64, 50, 63, 8, 121, 191, 244, 141, 0, 37, 117, 182, 133, 190, 160, 239, 131, 180, 166,
242, 145, 107, 249, 24, 168, 27, 69, 86, 58, 86, 159, 10, 210, 164, 20, 152, 148, 67,
37, 222, 234, 108, 57, 84, 148,
];
const Q: [u8; 65] = [
4, 117, 102, 61, 142, 169, 5, 99, 112, 146, 4, 241, 177, 255, 72, 34, 34, 12, 251, 37,
126, 213, 96, 38, 9, 40, 35, 20, 186, 78, 125, 73, 44, 215, 29, 243, 127, 197, 147,
216, 206, 110, 116, 63, 96, 72, 143, 182, 205, 11, 234, 96, 127, 206, 19, 1, 103, 103,
219, 255, 25, 229, 210, 4, 141,
];
const P_PLUS_Q: [u8; 65] = [
4, 188, 11, 115, 232, 35, 63, 79, 186, 163, 11, 207, 165, 64, 247, 109, 81, 125, 56,
83, 131, 221, 140, 154, 19, 186, 109, 173, 9, 127, 142, 169, 219, 108, 17, 216, 218,
125, 37, 30, 87, 86, 194, 151, 20, 122, 64, 118, 123, 210, 29, 60, 209, 138, 131, 11,
247, 157, 212, 209, 123, 162, 111, 197, 70,
];
const DOUBLE_P: [u8; 65] = [
4, 111, 137, 182, 244, 228, 50, 13, 91, 93, 34, 231, 93, 191, 248, 105, 28, 226, 251,
23, 66, 192, 188, 66, 140, 44, 218, 130, 239, 101, 255, 164, 76, 202, 170, 134, 48,
127, 46, 14, 9, 192, 64, 102, 67, 163, 33, 48, 157, 140, 217, 10, 97, 231, 183, 28,
129, 177, 185, 253, 179, 135, 182, 253, 203,
];
{
let mut p = bytes_to_words(P);
let q = bytes_to_words(Q);
let p_plus_q = bytes_to_words(P_PLUS_Q);
syscall_secp256k1_add(&mut p, &q);
assert!(p == p_plus_q);
}
{
let mut p = bytes_to_words(P);
let double_p = bytes_to_words(DOUBLE_P);
syscall_secp256k1_double(&mut p);
assert!(p == double_p);
}
}
{
const COMPRESSED: [u8; 33] = [
2, 180, 53, 9, 32, 85, 226, 220, 154, 20, 116, 218, 199, 119, 48, 44, 23, 45, 222, 10,
64, 50, 63, 8, 121, 191, 244, 141, 0, 37, 117, 182, 133,
];
const DECOMPRESSED: [u8; 64] = [
180, 53, 9, 32, 85, 226, 220, 154, 20, 116, 218, 199, 119, 48, 44, 23, 45, 222, 10, 64,
50, 63, 8, 121, 191, 244, 141, 0, 37, 117, 182, 133, 190, 160, 239, 131, 180, 166, 242,
145, 107, 249, 24, 168, 27, 69, 86, 58, 86, 159, 10, 210, 164, 20, 152, 148, 67, 37,
222, 234, 108, 57, 84, 148,
];
let is_odd = match COMPRESSED[0] {
2 => false,
3 => true,
_ => panic!("parity byte should be 2 or 3"),
};
// ignore parity byte, append 32 zero bytes for writing Y
let mut compressed_with_space: [u8; 64] = [COMPRESSED[1..].to_vec(), vec![0; 32]]
.concat()
.try_into()
.unwrap();
// Note that in the case of the `decompress` syscall the X-coordinate which is part of
// the compressed representation has type [u8; 64] and expects the bytes
// to be "big-endian".
//
// Contrast with the format used for `add` and `double`, where arrays of words are used
// and "little-endian" ordering is expected.
syscall_secp256k1_decompress(&mut compressed_with_space, is_odd);
assert!(compressed_with_space == DECOMPRESSED);
}
{
let mut state = [0u64; 25];
syscall_keccak_permute(&mut state);
const KECCAK_ON_ZEROS: [u64; 25] = [
17376452488221285863,
9571781953733019530,
15391093639620504046,
13624874521033984333,
10027350355371872343,
18417369716475457492,
10448040663659726788,
10113917136857017974,
12479658147685402012,
3500241080921619556,
16959053435453822517,
12224711289652453635,
9342009439668884831,
4879704952849025062,
140226327413610143,
424854978622500449,
7259519967065370866,
7004910057750291985,
13293599522548616907,
10105770293752443592,
10668034807192757780,
1747952066141424100,
1654286879329379778,
8500057116360352059,
16929593379567477321,
];
assert!(state == KECCAK_ON_ZEROS);
}
{
let mut words: [u32; 64] = array::from_fn(|i| i as u32);
let expected = [
0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 34013193, 67559435, 1711661200,
3020350282, 1447362251, 3118632270, 4004188394, 690615167, 6070360, 1105370215,
2385558114, 2348232513, 507799627, 2098764358, 5845374, 823657968, 2969863067,
3903496557, 4274682881, 2059629362, 1849247231, 2656047431, 835162919, 2096647516,
2259195856, 1779072524, 3152121987, 4210324067, 1557957044, 376930560, 982142628,
3926566666, 4164334963, 789545383, 1028256580, 2867933222, 3843938318, 1135234440,
390334875, 2025924737, 3318322046, 3436065867, 652746999, 4261492214, 2543173532,
3334668051, 3166416553, 634956631,
];
syscall_sha256_extend(&mut words);
assert_eq!(words, expected);
}
{
let mut a_words: [u32; 8] = [
0xF8EF7F4B, 0x16980341, 0x6044835, 0xD5CE47D3, 0xF33351FC, 0x74FCA157, 0xE35749FD,
0x9418A94B,
];
let b_and_modulus: [u32; 16] = [
0xC8653C55, 0x9C14580B, 0xFFCFBEA7, 0xD04DA9F6, 0xF2F5282D, 0xA3DACD28, 0x51A162ED,
0x0264BEB1, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF,
];
syscall_uint256_mul(&mut a_words, &b_and_modulus);
let expected: [u32; 8] = [
0xF0D2F44F, 0xF0DC2116, 0x253AB7CD, 0x3089E8F6, 0x803BED8F, 0x969E7A64, 0x610CBFFF,
0x80012A20,
];
assert_eq!(a_words, expected);
}
}
fn main() {
test_syscalls();
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/keccak_lib.rs | examples/examples/keccak_lib.rs | //! Compute the Keccak-256.
extern crate ceno_rt;
use ceno_keccak::{Hasher, Keccak};
fn main() {
let keccak = Keccak::v256();
let mut output = [0; 32];
let expected = b"\
\xc5\xd2\x46\x01\x86\xf7\x23\x3c\x92\x7e\x7d\xb2\xdc\xc7\x03\xc0\
\xe5\x00\xb6\x53\xca\x82\x27\x3b\x7b\xfa\xd8\x04\x5d\x85\xa4\x70\
";
keccak.finalize(&mut output);
assert_eq!(expected, &output);
let mut keccak = Keccak::v256();
let mut in_and_out: [u8; 32] = [0; 32];
for i in 1..6 {
in_and_out[i as usize - 1] = i
}
let expected = b"\
\x7d\x87\xc5\xea\x75\xf7\x37\x8b\xb7\x01\xe4\x04\xc5\x06\x39\x16\
\x1a\xf3\xef\xf6\x62\x93\xe9\xf3\x75\xb5\xf1\x7e\xb5\x04\x76\xf4\
";
keccak.update(&in_and_out[0..5]);
keccak.finalize(&mut in_and_out);
assert_eq!(expected, &in_and_out);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/ceno_rt_mem.rs | examples/examples/ceno_rt_mem.rs | // Use volatile functions to prevent compiler optimizations.
use core::ptr::{read_volatile, write_volatile};
extern crate ceno_rt;
const OUTPUT_ADDRESS: u32 = 0x3800_0000;
#[inline(never)]
fn main() {
test_data_section();
let out = fibonacci_recurse(20, 0, 1);
test_output(out);
}
/// Test the .data section is loaded and read/write works.
#[inline(never)]
fn test_data_section() {
// Use X[1] to be sure it is not the same as *OUTPUT_ADDRESS.
static mut X: [u32; 2] = [0, 42];
unsafe {
assert_eq!(read_volatile(&X[1]), 42);
write_volatile(&mut X[1], 99);
assert_eq!(read_volatile(&X[1]), 99);
}
}
// A sufficiently complicated function to test the stack.
#[inline(never)]
fn fibonacci_recurse(count: u32, a: u32, b: u32) -> u32 {
let count = black_box(count);
if count == 0 {
a
} else {
fibonacci_recurse(count - 1, b, a + b)
}
}
// Store the output to a specific memory location so the emulator tests can find it.
#[inline(never)]
fn test_output(out: u32) {
unsafe {
write_volatile(OUTPUT_ADDRESS as *mut u32, out);
}
}
fn black_box<T>(x: T) -> T {
unsafe { read_volatile(&x) }
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/sorting.rs | examples/examples/sorting.rs | extern crate ceno_rt;
use ceno_rt::debug_println;
#[cfg(debug_assertions)]
use core::fmt::Write;
fn main() {
let mut scratch: Vec<u32> = ceno_rt::read();
scratch.sort();
// Print any output you feel like, eg the first element of the sorted vector:
debug_println!("{}", scratch[0]);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/ceno_rt_alloc.rs | examples/examples/ceno_rt_alloc.rs | use core::ptr::{addr_of, read_volatile};
extern crate ceno_rt;
extern crate alloc;
use alloc::{vec, vec::Vec};
static mut OUTPUT: u32 = 0;
fn main() {
// Test writing to a global variable.
unsafe {
OUTPUT = 0xf00d;
black_box(addr_of!(OUTPUT));
}
// Test writing to the heap.
let v: Vec<u32> = vec![0xbeef];
black_box(&v[0]);
// Test writing to a larger vector on the heap
let mut v: Vec<u32> = vec![0; 128 * 1024];
ceno_syscall::syscall_phantom_log_pc_cycle("finish allocation");
v[999] = 0xdead_beef;
black_box(&v[0]);
ceno_syscall::syscall_phantom_log_pc_cycle("start fibonacci");
let log_n: u32 = 12;
let mut a = 0_u32;
let mut b = 1_u32;
let n = 1 << log_n;
for _ in 0..n {
let mut c = a + b;
c %= 7919; // Modulus to prevent overflow.
a = b;
b = c;
}
ceno_syscall::syscall_phantom_log_pc_cycle("end fibonacci");
// write to heap which allocated earlier shard
v[999] = 0xbeef_dead;
let mut v: Vec<u32> = vec![0; 128 * 1024];
// write to heap allocate in current non-first shard
v[0] = 0xdead_beef;
}
/// Prevent compiler optimizations.
fn black_box<T>(x: *const T) -> T {
unsafe { read_volatile(x) }
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/hints.rs | examples/examples/hints.rs | extern crate alloc;
extern crate ceno_rt;
use ceno_rt::debug_println;
#[cfg(debug_assertions)]
use core::fmt::Write;
fn main() {
let condition: bool = ceno_rt::read();
assert!(condition);
#[cfg(debug_assertions)]
{
use alloc::string::String;
let msg: String = ceno_rt::read();
debug_println!("This message is a hint: {msg}");
}
let a: u32 = ceno_rt::read();
let b: u32 = ceno_rt::read();
let product: u32 = a * b;
assert_eq!(product, 3992003);
debug_println!("{product}");
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/sha256.rs | examples/examples/sha256.rs | extern crate ceno_rt;
use ceno_sha2::{Digest, Sha256};
// Example run (private input: 10 zero bytes)
// RUST_LOG=info cargo run --release --package ceno_zkvm --bin e2e -- --profiling=3 --platform=ceno --public-io=30689455,3643278932,1489987339,1626711444,3610619649,1925764735,581441152,321290698 examples/target/riscv32im-ceno-zkvm-elf/release/examples/sha256 --hints=0,0,0,0,0,0,0,0,0,0
fn main() {
// Read input data from the host
let input: Vec<u8> = ceno_rt::read();
let h = Sha256::digest(&input);
let h: [u8; 32] = h.into();
let h: [u32; 8] = core::array::from_fn(|i| {
let chunk = &h[4 * i..][..4];
u32::from_be_bytes(chunk.try_into().unwrap())
});
// Output the final hash values one by one
ceno_rt::commit(&h);
// debug_print!("{:x}", h[0]);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/secp256k1_scalar_algebra.rs | examples/examples/secp256k1_scalar_algebra.rs | extern crate ceno_rt;
use k256::{Scalar, elliptic_curve::Field};
fn main() {
// test scalar invert
let s = Scalar::random(rand::thread_rng());
let s_inv = s.invert().unwrap();
assert_eq!(s * s_inv, Scalar::ONE);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/secp256k1_ecrecover.rs | examples/examples/secp256k1_ecrecover.rs | // Test ecrecover of real world signatures from scroll mainnet. Assert result inside the guest.
extern crate ceno_rt;
use alloy_primitives::{Address, B256, address, b256, hex};
use ceno_crypto::secp256k1::secp256k1_ecrecover;
const TEST_CASES: [(&[u8], u8, B256, Address); 5] = [
// (sig, recid, tx_hash, signer)
(
&hex!(
"15a7bb615483f66a697431cd414294b6bd1e1b9b9d6d163cfd97290ea77b53061810c4d228e424087ad77ee75bb25e77c832ad9038b89f7e573a34b574648348"
),
0,
b256!("b329f831352e37f4426583986465b065d9c867901b42f576f00ef36dfac1cfdf"),
address!("ca585e09df67e83106c9bcd839c989ace537bf95"),
),
(
&hex!(
"870077f742ca34760810033caf13c99e90e207db6f820124b827907e9658d7d04f302d6675c8625c02fc95c131a3ce77e7f90dba10dbda368efeaaba9be60916"
),
0,
b256!("4e13990772a9454712c7560ad8a64b845fd472b913b90d680867ab3dad56a18d"),
address!("a79c12bcf11133af01b6b20f16f8aafaecdebc93"),
),
(
&hex!(
"455a6249244154e8f5d516a3036e26576449bef05171657dbf3a5d7b9c02fe96629f7eb0aa2a006ff4ac6fc0523a6f5a365cf375240f5a560b1972eb21cec087"
),
1,
b256!("4dedbd995fc79db979c6484132568fe30fdf6bfa8b64ac74ba844cc30e764b0c"),
address!("c623f214c8eefc771147c5806be250db39555555"),
),
(
&hex!(
"854c4656c421158b4e5d8c29ccc3adcaee329587cee630398f3ce2e32745e45b67b1fc40e3206c70a75bcdf3c877c26874c75c2fabd5566c85b58c7c7d872e00"
),
0,
b256!("e4559e37c72fb3df0349df42b3aa0e94607287ecb3e6530b7c50ed984e0428a2"),
address!("b82def35c814584d3d929cfb3a1fb1b886b6e57b"),
),
(
&hex!(
"004a0ac1306d096c06fb77f82b76f43fb2459638826f4846444686b3036b9a4b3d6bf124bf22f23b851adfa2c4bdc670b4ecb5129186a4e89032916a77a56b90"
),
0,
b256!("83e5e11daa2d14736ab1d578c41250c6f6445782c215684a18f67b44686ccb90"),
address!("0a6f0ed4896be1caa9e37047578e7519481f22ea"),
),
];
fn main() {
for (sig, recid, tx_hash, signer) in TEST_CASES {
let recovered = secp256k1_ecrecover(sig.try_into().unwrap(), recid, &tx_hash.0).unwrap();
assert_eq!(&recovered[12..], &signer.0);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/secp256k1.rs | examples/examples/secp256k1.rs | extern crate ceno_rt;
#[allow(unused_imports)]
use k256::{
ProjectivePoint, Scalar,
elliptic_curve::{Group, ops::MulByGenerator},
};
fn main() {
let scalar = Scalar::from(5u64);
let a = ProjectivePoint::mul_by_generator(&scalar);
let _ = a.double(); // -> syscall_secp256k1_double
let scalar = Scalar::from(6u64);
let b = ProjectivePoint::mul_by_generator(&scalar);
let _ = a + b; // -> syscall_secp256k1_add
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/bn254_precompile.rs | examples/examples/bn254_precompile.rs | //! bn254 evm precompile from revm:
//! https://github.com/bluealloy/revm/blob/10ff66da1576a3532db657d7b953abcd59ec44a3/crates/precompile/src/bn254.rs
extern crate ceno_rt;
use alloy_primitives::hex;
use ceno_crypto::ceno_crypto;
use revm_precompile::{
PrecompileError,
bn254::{add::*, mul::*, pair::*, *},
};
ceno_crypto!();
fn main() {
CenoCrypto::install();
test_bn254_add();
test_bn254_mul();
test_bn254_pair();
}
fn test_bn254_add() {
let input = hex::decode(
"\
18b18acfb4c2c30276db5411368e7185b311dd124691610c5d3b74034e093dc9\
063c909c4720840cb5134cb9f59fa749755796819658d32efc0d288198f37266\
07c2b7f58a84bd6145f00c9c2bc0bb1a187f20ff2c92963a88019e7c6a014eed\
06614e20c147e940f2d70da3f74c9a17df361706a4485c742bd6788478fa17d7",
)
.unwrap();
let expected = hex::decode(
"\
2243525c5efd4b9c3d3c45ac0ca3fe4dd85e830a4ce6b65fa1eeaee202839703\
301d1d33be6da8e509df21cc35964723180eed7532537db9ae5e7d48f195c915",
)
.unwrap();
let outcome = run_add(&input, BYZANTIUM_ADD_GAS_COST, 500).unwrap();
assert_eq!(outcome.bytes, expected);
// Zero sum test
let input = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let expected = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let outcome = run_add(&input, BYZANTIUM_ADD_GAS_COST, 500).unwrap();
assert_eq!(outcome.bytes, expected);
// Out of gas test
let input = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let res = run_add(&input, BYZANTIUM_ADD_GAS_COST, 499);
assert!(matches!(res, Err(PrecompileError::OutOfGas)));
// No input test
let input = [0u8; 0];
let expected = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let outcome = run_add(&input, BYZANTIUM_ADD_GAS_COST, 500).unwrap();
assert_eq!(outcome.bytes, expected);
// Point not on curve fail
let input = hex::decode(
"\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap();
let res = run_add(&input, BYZANTIUM_ADD_GAS_COST, 500);
assert!(matches!(
res,
Err(PrecompileError::Bn254AffineGFailedToCreate)
));
}
fn test_bn254_mul() {
let input = hex::decode(
"\
2bd3e6d0f3b142924f5ca7b49ce5b9d54c4703d7ae5648e61d02268b1a0a9fb7\
21611ce0a6af85915e2f1d70300909ce2e49dfad4a4619c8390cae66cefdb204\
00000000000000000000000000000000000000000000000011138ce750fa15c2",
)
.unwrap();
let expected = hex::decode(
"\
070a8d6a982153cae4be29d434e8faef8a47b274a053f5a4ee2a6c9c13c31e5c\
031b8ce914eba3a9ffb989f9cdd5b0f01943074bf4f0f315690ec3cec6981afc",
)
.unwrap();
let outcome = run_mul(&input, BYZANTIUM_MUL_GAS_COST, 40_000).unwrap();
assert_eq!(outcome.bytes, expected);
// Out of gas test
let input = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0200000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let res = run_mul(&input, BYZANTIUM_MUL_GAS_COST, 39_999);
assert!(matches!(res, Err(PrecompileError::OutOfGas)));
// Zero multiplication test
let input = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0200000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let expected = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let outcome = run_mul(&input, BYZANTIUM_MUL_GAS_COST, 40_000).unwrap();
assert_eq!(outcome.bytes, expected);
// No input test
let input = [0u8; 0];
let expected = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let outcome = run_mul(&input, BYZANTIUM_MUL_GAS_COST, 40_000).unwrap();
assert_eq!(outcome.bytes, expected);
// Point not on curve fail
let input = hex::decode(
"\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
0f00000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let res = run_mul(&input, BYZANTIUM_MUL_GAS_COST, 40_000);
assert!(matches!(
res,
Err(PrecompileError::Bn254AffineGFailedToCreate)
));
}
fn test_bn254_pair() {
let input = hex::decode(
"\
1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f59\
3034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41\
209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf7\
04bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a41678\
2bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d\
120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550\
111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c\
2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411\
198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2\
1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed\
090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b\
12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
)
.unwrap();
let expected =
hex::decode("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let outcome = run_pair(
&input,
BYZANTIUM_PAIR_PER_POINT,
BYZANTIUM_PAIR_BASE,
260_000,
)
.unwrap();
assert_eq!(outcome.bytes, expected);
// Out of gas test
let input = hex::decode(
"\
1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f59\
3034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41\
209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf7\
04bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a41678\
2bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d\
120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550\
111e129f1cf1097710d41c4ac70fcdfa5ba2023c6ff1cbeac322de49d1b6df7c\
2032c61a830e3c17286de9462bf242fca2883585b93870a73853face6a6bf411\
198e9393920d483a7260bfb731fb5d25f1aa493335a9e71297e485b7aef312c2\
1800deef121f1e76426a00665e5c4479674322d4f75edadd46debd5cd992f6ed\
090689d0585ff075ec9e99ad690c3395bc4b313370b38ef355acdadcd122975b\
12c85ea5db8c6deb4aab71808dcb408fe3d1e7690c43d37b4ce6cc0166fa7daa",
)
.unwrap();
let res = run_pair(
&input,
BYZANTIUM_PAIR_PER_POINT,
BYZANTIUM_PAIR_BASE,
259_999,
);
assert!(matches!(res, Err(PrecompileError::OutOfGas)));
// No input test
let input = [0u8; 0];
let expected =
hex::decode("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let outcome = run_pair(
&input,
BYZANTIUM_PAIR_PER_POINT,
BYZANTIUM_PAIR_BASE,
260_000,
)
.unwrap();
assert_eq!(outcome.bytes, expected);
// Point not on curve fail
let input = hex::decode(
"\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111",
)
.unwrap();
let res = run_pair(
&input,
BYZANTIUM_PAIR_PER_POINT,
BYZANTIUM_PAIR_BASE,
260_000,
);
assert!(matches!(
res,
Err(PrecompileError::Bn254AffineGFailedToCreate)
));
// Invalid input length
let input = hex::decode(
"\
1111111111111111111111111111111111111111111111111111111111111111\
1111111111111111111111111111111111111111111111111111111111111111\
111111111111111111111111111111\
",
)
.unwrap();
let res = run_pair(
&input,
BYZANTIUM_PAIR_PER_POINT,
BYZANTIUM_PAIR_BASE,
260_000,
);
assert!(matches!(res, Err(PrecompileError::Bn254PairLength)));
// Test with point at infinity - should return true (identity element)
// G1 point at infinity (0,0) followed by a valid G2 point
let input = hex::decode(
"\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
209dd15ebff5d46c4bd888e51a93cf99a7329636c63514396b4a452003a35bf7\
04bf11ca01483bfa8b34b43561848d28905960114c8ac04049af4b6315a41678\
2bb8324af6cfc93537a2ad1a445cfd0ca2a71acd7ac41fadbf933c2a51be344d\
120a2a4cf30c1bf9845f20c6fe39e07ea2cce61f0c9bb048165fe5e4de877550",
)
.unwrap();
let expected =
hex::decode("0000000000000000000000000000000000000000000000000000000000000001").unwrap();
let outcome = run_pair(
&input,
BYZANTIUM_PAIR_PER_POINT,
BYZANTIUM_PAIR_BASE,
260_000,
)
.unwrap();
assert_eq!(outcome.bytes, expected);
// Test with G2 point at infinity - should also return true
// Valid G1 point followed by G2 point at infinity (0,0,0,0)
let input = hex::decode(
"\
1c76476f4def4bb94541d57ebba1193381ffa7aa76ada664dd31c16024c43f59\
3034dd2920f673e204fee2811c678745fc819b55d3e9d294e45c9b03a76aef41\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000\
0000000000000000000000000000000000000000000000000000000000000000",
)
.unwrap();
let outcome = run_pair(
&input,
BYZANTIUM_PAIR_PER_POINT,
BYZANTIUM_PAIR_BASE,
260_000,
)
.unwrap();
assert_eq!(outcome.bytes, expected);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/quadratic_sorting.rs | examples/examples/quadratic_sorting.rs | extern crate ceno_rt;
fn sort<T: Ord>(slice: &mut [T]) {
let len = slice.len();
for i in 0..len {
for j in 0..len {
if slice[j] > slice[i] {
slice.swap(j, i);
}
}
}
}
fn main() {
let mut scratch: Vec<u32> = ceno_rt::read();
sort(&mut scratch);
// Print any output you feel like, eg the first element of the sorted vector:
// println!("{}", scratch[0]);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/secp256k1_add_syscall.rs | examples/examples/secp256k1_add_syscall.rs | // Test addition of two curve points. Assert result inside the guest
extern crate ceno_rt;
use ceno_syscall::syscall_secp256k1_add;
// Byte repr. of points from https://docs.rs/secp/latest/secp/#arithmetic-1
const P: [u8; 65] = [
4, 180, 53, 9, 32, 85, 226, 220, 154, 20, 116, 218, 199, 119, 48, 44, 23, 45, 222, 10, 64, 50,
63, 8, 121, 191, 244, 141, 0, 37, 117, 182, 133, 190, 160, 239, 131, 180, 166, 242, 145, 107,
249, 24, 168, 27, 69, 86, 58, 86, 159, 10, 210, 164, 20, 152, 148, 67, 37, 222, 234, 108, 57,
84, 148,
];
const Q: [u8; 65] = [
4, 117, 102, 61, 142, 169, 5, 99, 112, 146, 4, 241, 177, 255, 72, 34, 34, 12, 251, 37, 126,
213, 96, 38, 9, 40, 35, 20, 186, 78, 125, 73, 44, 215, 29, 243, 127, 197, 147, 216, 206, 110,
116, 63, 96, 72, 143, 182, 205, 11, 234, 96, 127, 206, 19, 1, 103, 103, 219, 255, 25, 229, 210,
4, 141,
];
const P_PLUS_Q: [u8; 65] = [
4, 188, 11, 115, 232, 35, 63, 79, 186, 163, 11, 207, 165, 64, 247, 109, 81, 125, 56, 83, 131,
221, 140, 154, 19, 186, 109, 173, 9, 127, 142, 169, 219, 108, 17, 216, 218, 125, 37, 30, 87,
86, 194, 151, 20, 122, 64, 118, 123, 210, 29, 60, 209, 138, 131, 11, 247, 157, 212, 209, 123,
162, 111, 197, 70,
];
type DecompressedPoint = [u32; 16];
/// `bytes` is expected to contain the uncompressed representation of
/// a curve point, as described in https://docs.rs/secp/latest/secp/struct.Point.html
///
/// The return value is an array of words compatible with the sp1 syscall for `add` and `double`
/// Notably, these words should encode the X and Y coordinates of the point
/// in "little endian" and not "big endian" as is the case of secp
fn bytes_to_words(bytes: [u8; 65]) -> [u32; 16] {
// ignore the tag byte (specific to the secp repr.)
let mut bytes: [u8; 64] = bytes[1..].try_into().unwrap();
// Reverse the order of bytes for each coordinate
bytes[0..32].reverse();
bytes[32..].reverse();
std::array::from_fn(|i| u32::from_le_bytes(bytes[4 * i..4 * (i + 1)].try_into().unwrap()))
}
fn main() {
let mut p: DecompressedPoint = bytes_to_words(P);
let q: DecompressedPoint = bytes_to_words(Q);
let p_plus_q: DecompressedPoint = bytes_to_words(P_PLUS_Q);
syscall_secp256k1_add(&mut p, &q);
assert_eq!(p, p_plus_q);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/secp256k1_decompress_syscall.rs | examples/examples/secp256k1_decompress_syscall.rs | // Test decompression of curve point. Assert result inside the guest
extern crate ceno_rt;
use ceno_syscall::syscall_secp256k1_decompress;
// Byte repr. of point P1 from https://docs.rs/secp/latest/secp/#arithmetic-1
const COMPRESSED: [u8; 33] = [
2, 180, 53, 9, 32, 85, 226, 220, 154, 20, 116, 218, 199, 119, 48, 44, 23, 45, 222, 10, 64, 50,
63, 8, 121, 191, 244, 141, 0, 37, 117, 182, 133,
];
const DECOMPRESSED: [u8; 64] = [
180, 53, 9, 32, 85, 226, 220, 154, 20, 116, 218, 199, 119, 48, 44, 23, 45, 222, 10, 64, 50, 63,
8, 121, 191, 244, 141, 0, 37, 117, 182, 133, 190, 160, 239, 131, 180, 166, 242, 145, 107, 249,
24, 168, 27, 69, 86, 58, 86, 159, 10, 210, 164, 20, 152, 148, 67, 37, 222, 234, 108, 57, 84,
148,
];
fn main() {
let is_odd = match COMPRESSED[0] {
2 => false,
3 => true,
_ => panic!("parity byte should be 2 or 3"),
};
// ignore parity byte, append 32 zero bytes for writing Y
let mut compressed_with_space: [u8; 64] = [COMPRESSED[1..].to_vec(), vec![0; 32]]
.concat()
.try_into()
.unwrap();
syscall_secp256k1_decompress(&mut compressed_with_space, is_odd);
assert_eq!(compressed_with_space, DECOMPRESSED);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/fibonacci.rs | examples/examples/fibonacci.rs | extern crate ceno_rt;
fn main() {
// Compute the (1 << log_n) 'th fibonacci number, using normal Rust code.
let log_n: u32 = ceno_rt::read();
let mut a = 0_u32;
let mut b = 1_u32;
let n = 1 << log_n;
for _ in 0..n {
let mut c = a + b;
c %= 7919; // Modulus to prevent overflow.
a = b;
b = c;
}
// Constrain with public io
ceno_rt::commit(&b);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/uint256_mul_syscall.rs | examples/examples/uint256_mul_syscall.rs | extern crate ceno_rt;
use ceno_syscall::syscall_uint256_mul;
fn main() {
let mut a_words: [u32; 8] = [
0xF8EF7F4B, 0x16980341, 0x6044835, 0xD5CE47D3, 0xF33351FC, 0x74FCA157, 0xE35749FD,
0x9418A94B,
];
let b_and_modulus: [u32; 16] = [
0xC8653C55, 0x9C14580B, 0xFFCFBEA7, 0xD04DA9F6, 0xF2F5282D, 0xA3DACD28, 0x51A162ED,
0x0264BEB1, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF, 0xFFFFFFFF,
0xFFFFFFFF, 0xFFFFFFFF,
];
syscall_uint256_mul(&mut a_words, &b_and_modulus);
let expected: [u32; 8] = [
0xF0D2F44F, 0xF0DC2116, 0x253AB7CD, 0x3089E8F6, 0x803BED8F, 0x969E7A64, 0x610CBFFF,
0x80012A20,
];
assert_eq!(a_words, expected);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/secp256k1_double_syscall.rs | examples/examples/secp256k1_double_syscall.rs | // Test double of a curve point via syscall
extern crate ceno_rt;
#[allow(unused_imports)]
use k256::{ProjectivePoint, elliptic_curve::Group};
fn main() {
#[allow(deprecated)]
let g = ProjectivePoint::generator();
let _ = g.double();
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/keccak_no_syscall.rs | examples/examples/keccak_no_syscall.rs | use tiny_keccak::{Hasher, Keccak};
pub fn main() {
let times = 100;
let raw_preimage: Vec<u32> = ceno_rt::read();
let preimage: Vec<u8> = raw_preimage.iter().flat_map(|x| x.to_le_bytes()).collect();
for i in 0..times {
let digest = keccak256(&preimage)
.chunks_exact(4)
.map(|chunk| u32::from_le_bytes(chunk.try_into().unwrap()))
.collect::<Vec<u32>>();
if i == 0 {
log_digest(digest);
}
// TODO define serializable struct
// ceno_rt::commit::<ArchivedVec<u32>, Vec<u32>>(&result);
}
}
/// Simple interface to the [`keccak256`] hash function.
///
/// [`keccak256`]: https://en.wikipedia.org/wiki/SHA-3
pub fn keccak256<T: AsRef<[u8]>>(bytes: T) -> [u8; 32] {
let mut output = [0u8; 32];
let mut hasher = Keccak::v256();
hasher.update(bytes.as_ref());
hasher.finalize(&mut output);
output
}
#[cfg(debug_assertions)]
fn log_digest(digest: Vec<u32>) {
use ceno_rt::debug_print;
use core::fmt::Write;
for d in digest {
debug_print!("{:x}", d)
}
}
#[cfg(not(debug_assertions))]
fn log_digest(_digest: Vec<u32>) {}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/hashing.rs | examples/examples/hashing.rs | //! Here's an example that really makes use of the standard library and couldn't be done without.
//!
//! I mean `HashSet` really lives only in the proper standard library, and not in `alloc` or `core`.
//! You could, of course, rerwite the example to use `alloc::collections::btree_set::BTreeSet`
//! instead of `HashSet`.
extern crate ceno_rt;
use ceno_rt::debug_println;
#[cfg(debug_assertions)]
use core::fmt::Write;
use std::collections::HashSet;
/// Check that the input is a set of unique numbers.
fn main() {
let input: Vec<u32> = ceno_rt::read();
let mut set = HashSet::new();
for value in input {
assert!(set.insert(value));
}
debug_println!("The input is a set of unique numbers.");
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/keccak_native.rs | examples/examples/keccak_native.rs | //! Compute the Keccak-256 using alloy-primitives with native-keccak hook.
extern crate ceno_keccak;
extern crate ceno_rt; // Make sure the native keccak hook is linked in.
use alloy_primitives::keccak256;
fn main() {
let output = keccak256(b"");
let expected = b"\
\xc5\xd2\x46\x01\x86\xf7\x23\x3c\x92\x7e\x7d\xb2\xdc\xc7\x03\xc0\
\xe5\x00\xb6\x53\xca\x82\x27\x3b\x7b\xfa\xd8\x04\x5d\x85\xa4\x70\
";
assert_eq!(expected, &output);
let mut input: [u8; 5] = [0; 5];
for i in 1..6 {
input[i as usize - 1] = i;
}
let output = keccak256(input);
let expected = b"\
\x7d\x87\xc5\xea\x75\xf7\x37\x8b\xb7\x01\xe4\x04\xc5\x06\x39\x16\
\x1a\xf3\xef\xf6\x62\x93\xe9\xf3\x75\xb5\xf1\x7e\xb5\x04\x76\xf4\
";
assert_eq!(expected, &output);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/examples/examples/ceno_rt_mini.rs | examples/examples/ceno_rt_mini.rs | extern crate ceno_rt;
fn main() {}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/build.rs | ceno_zkvm/build.rs | use glob::glob;
use std::{fs, path::Path};
fn main() -> Result<(), Box<dyn std::error::Error>> {
// Path to the file that, when changed, should trigger a cache cleanup
let watched_file = "src/scheme/mock_prover.rs";
// Path to the cache file to remove when the watched file changes
let cache_file = "table_cache_dev_*";
// Instruct Cargo to rerun this script if the watched file changes
println!("cargo:rerun-if-changed={}", watched_file);
// Use the glob crate to find files matching the pattern
for entry in glob(cache_file)? {
match entry {
Ok(path) => {
// Check if the path exists and delete it
if Path::new(&path).exists() {
fs::remove_file(&path)?;
println!("Deleted file: {}", path.display());
}
}
Err(e) => println!("Error reading file: {:?}", e),
}
}
Ok(())
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/stats.rs | ceno_zkvm/src/stats.rs | use crate::{
circuit_builder::{ConstraintSystem, NameSpace},
structs::{ComposedConstrainSystem, ZKVMConstraintSystem, ZKVMWitnesses},
utils,
};
use ff_ext::ExtensionField;
use itertools::Itertools;
use multilinear_extensions::Expression;
use prettytable::{Table, row};
use serde_json::json;
use std::{
collections::{BTreeMap, HashMap},
fs::File,
io::Write,
};
#[derive(Clone, Debug, serde::Serialize, Default)]
pub struct OpCodeStats {
namespace: NameSpace,
witnesses: usize,
reads: usize,
writes: usize,
lookups: usize,
// store degrees as frequency maps
assert_zero_expr_degrees: HashMap<usize, usize>,
assert_zero_sumcheck_expr_degrees: HashMap<usize, usize>,
}
impl std::ops::Add for OpCodeStats {
type Output = OpCodeStats;
fn add(self, rhs: Self) -> Self::Output {
OpCodeStats {
namespace: NameSpace::default(),
witnesses: self.witnesses + rhs.witnesses,
reads: self.reads + rhs.reads,
writes: self.writes + rhs.writes,
lookups: self.lookups + rhs.lookups,
assert_zero_expr_degrees: utils::merge_frequency_tables(
self.assert_zero_expr_degrees,
rhs.assert_zero_expr_degrees,
),
assert_zero_sumcheck_expr_degrees: utils::merge_frequency_tables(
self.assert_zero_sumcheck_expr_degrees,
rhs.assert_zero_sumcheck_expr_degrees,
),
}
}
}
#[derive(Clone, Debug, serde::Serialize)]
pub struct TableStats {
table_len: usize,
}
#[derive(Clone, Debug, serde::Serialize)]
pub enum CircuitStats {
OpCode(OpCodeStats),
Table(TableStats),
}
impl Default for CircuitStats {
fn default() -> Self {
CircuitStats::OpCode(OpCodeStats::default())
}
}
// logic to aggregate two circuit stats; ignore tables
impl std::ops::Add for CircuitStats {
type Output = CircuitStats;
fn add(self, rhs: Self) -> Self::Output {
match (self, rhs) {
(CircuitStats::Table(_), CircuitStats::Table(_)) => {
CircuitStats::OpCode(OpCodeStats::default())
}
(CircuitStats::Table(_), rhs) => rhs,
(lhs, CircuitStats::Table(_)) => lhs,
(CircuitStats::OpCode(lhs), CircuitStats::OpCode(rhs)) => {
CircuitStats::OpCode(lhs + rhs)
}
}
}
}
impl CircuitStats {
pub fn new<E: ExtensionField>(system: &ConstraintSystem<E>) -> Self {
let just_degrees_grouped = |exprs: &Vec<Expression<E>>| {
let mut counter = HashMap::new();
for expr in exprs {
*counter.entry(expr.degree()).or_insert(0) += 1;
}
counter
};
let is_opcode = system.lk_table_expressions.is_empty()
&& system.r_table_expressions.is_empty()
&& system.w_table_expressions.is_empty();
// distinguishing opcodes from tables as done in ZKVMProver::create_proof
if is_opcode {
CircuitStats::OpCode(OpCodeStats {
namespace: system.ns.clone(),
witnesses: system.num_witin as usize,
reads: system.r_expressions.len(),
writes: system.w_expressions.len(),
lookups: system.lk_expressions.len(),
assert_zero_expr_degrees: just_degrees_grouped(&system.assert_zero_expressions),
assert_zero_sumcheck_expr_degrees: just_degrees_grouped(
&system.assert_zero_sumcheck_expressions,
),
})
} else {
let table_len = if !system.lk_table_expressions.is_empty() {
system.lk_table_expressions[0].table_spec.len.unwrap_or(0)
} else {
0
};
CircuitStats::Table(TableStats { table_len })
}
}
}
pub struct Report<INFO> {
metadata: BTreeMap<String, String>,
circuits: Vec<(String, INFO)>,
}
impl<INFO> Report<INFO>
where
INFO: serde::Serialize,
{
pub fn get(&self, circuit_name: &str) -> Option<&INFO> {
self.circuits.iter().find_map(|(name, info)| {
if name == circuit_name {
Some(info)
} else {
None
}
})
}
pub fn save_json(&self, filename: &str) {
let json_data = json!({
"metadata": self.metadata,
"circuits": self.circuits,
});
let mut file = File::create(filename).expect("Unable to create file");
file.write_all(serde_json::to_string_pretty(&json_data).unwrap().as_bytes())
.expect("Unable to write data");
}
}
pub type StaticReport = Report<CircuitStats>;
impl Report<CircuitStats> {
pub fn new<E: ExtensionField>(zkvm_system: &ZKVMConstraintSystem<E>) -> Self {
Report {
metadata: BTreeMap::default(),
circuits: zkvm_system
.get_css()
.iter()
.map(
|(
k,
ComposedConstrainSystem {
zkvm_v1_css: cs, ..
},
)| { (k.clone(), CircuitStats::new(cs)) },
)
.collect_vec(),
}
}
}
#[derive(Clone, Debug, serde::Serialize, Default)]
pub struct CircuitStatsTrace {
static_stats: CircuitStats,
num_instances: usize,
}
impl CircuitStatsTrace {
pub fn new(static_stats: CircuitStats, num_instances: usize) -> Self {
CircuitStatsTrace {
static_stats,
num_instances,
}
}
}
pub type TraceReport = Report<CircuitStatsTrace>;
impl Report<CircuitStatsTrace> {
pub fn new(
static_report: &Report<CircuitStats>,
num_instances: BTreeMap<String, usize>,
program_name: &str,
) -> Self {
let mut metadata = static_report.metadata.clone();
// Note where the num_instances are extracted from
metadata.insert("PROGRAM_NAME".to_owned(), program_name.to_owned());
// Ensure we recognize all circuits from the num_instances map
num_instances.keys().for_each(|key| {
assert!(static_report.get(key).is_some(), r"unrecognized key {key}.");
});
// Stitch num instances to corresponding entries. Sort by num instances
let mut circuits = static_report
.circuits
.iter()
.map(|(key, value)| {
(
key.to_owned(),
CircuitStatsTrace::new(value.clone(), *num_instances.get(key).unwrap_or(&0)),
)
})
.sorted_by(|lhs, rhs| rhs.1.num_instances.cmp(&lhs.1.num_instances))
.collect_vec();
// aggregate results (for opcode circuits only)
let mut total = CircuitStatsTrace::default();
for (_, circuit) in &circuits {
if let CircuitStats::OpCode(_) = &circuit.static_stats {
total = CircuitStatsTrace {
num_instances: total.num_instances + circuit.num_instances,
static_stats: total.static_stats + circuit.static_stats.clone(),
}
}
}
circuits.insert(0, ("OPCODES TOTAL".to_owned(), total));
Report { metadata, circuits }
}
// Extract num_instances from witness data
pub fn new_via_witnesses<E: ExtensionField>(
static_report: &Report<CircuitStats>,
zkvm_witnesses: &ZKVMWitnesses<E>,
program_name: &str,
) -> Self {
let num_instances = zkvm_witnesses
.clone()
.into_iter_sorted()
.map(|chip_input| (chip_input.name, chip_input.num_instances[0]))
.collect::<BTreeMap<_, _>>();
Self::new(static_report, num_instances, program_name)
}
pub fn save_table(&self, filename: &str) {
let mut opcodes_table = Table::new();
opcodes_table.add_row(row![
"opcode_name",
"num_instances",
"lookups",
"reads",
"witnesses",
"writes",
"0_expr_deg",
"0_expr_sumcheck_deg"
]);
let mut tables_table = Table::new();
tables_table.add_row(row!["table_name", "num_instances", "table_len"]);
for (name, circuit) in &self.circuits {
match &circuit.static_stats {
CircuitStats::OpCode(opstats) => {
opcodes_table.add_row(row![
name.to_owned(),
circuit.num_instances,
opstats.lookups,
opstats.reads,
opstats.witnesses,
opstats.writes,
utils::display_hashmap(&opstats.assert_zero_expr_degrees),
utils::display_hashmap(&opstats.assert_zero_sumcheck_expr_degrees)
]);
}
CircuitStats::Table(tablestats) => {
tables_table.add_row(row![
name.to_owned(),
circuit.num_instances,
tablestats.table_len
]);
}
}
}
let mut file = File::create(filename).expect("Unable to create file");
_ = opcodes_table.print(&mut file);
_ = tables_table.print(&mut file);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/lib.rs | ceno_zkvm/src/lib.rs | #![deny(clippy::cargo)]
#![feature(box_patterns)]
#![feature(stmt_expr_attributes)]
#![feature(variant_count)]
pub mod error;
pub mod instructions;
pub mod scheme;
pub mod tables;
pub use utils::u64vec;
mod chip_handler;
pub mod circuit_builder;
pub mod e2e;
pub mod gadgets;
mod keygen;
pub mod precompiles;
pub mod state;
pub mod stats;
pub mod structs;
mod uint;
mod utils;
#[cfg(all(feature = "jemalloc", unix, not(test)))]
pub use utils::print_allocated_bytes;
mod witness;
pub use structs::ROMType;
pub use uint::Value;
pub use utils::with_panic_hook;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/keygen.rs | ceno_zkvm/src/keygen.rs | use std::collections::BTreeMap;
use crate::{
error::ZKVMError,
structs::{ZKVMConstraintSystem, ZKVMFixedTraces, ZKVMProvingKey},
};
use ff_ext::ExtensionField;
use mpcs::PolynomialCommitmentScheme;
impl<E: ExtensionField> ZKVMConstraintSystem<E> {
pub fn key_gen<PCS: PolynomialCommitmentScheme<E>>(
self,
pp: PCS::ProverParam,
vp: PCS::VerifierParam,
entry_pc: u32,
mut vm_fixed_traces: ZKVMFixedTraces<E>,
) -> Result<ZKVMProvingKey<E, PCS>, ZKVMError> {
let mut vm_pk = ZKVMProvingKey::new(pp.clone(), vp);
let mut fixed_traces = BTreeMap::new();
let mut fixed_traces_no_omc_init = BTreeMap::new();
for (circuit_index, (c_name, cs)) in self.circuit_css.into_iter().enumerate() {
// fixed_traces is optional
// verifier will check it existent if cs.num_fixed > 0
if cs.num_fixed() > 0 {
let fixed_trace_rmm = vm_fixed_traces
.circuit_fixed_traces
.remove(&c_name)
.flatten()
.ok_or(ZKVMError::FixedTraceNotFound(c_name.clone().into()))?;
vm_pk
.circuit_index_fixed_num_instances
.insert(circuit_index, fixed_trace_rmm.num_instances());
if !cs.with_omc_init_only() {
fixed_traces_no_omc_init.insert(circuit_index, fixed_trace_rmm.clone());
}
fixed_traces.insert(circuit_index, fixed_trace_rmm);
}
vm_pk
.circuit_name_to_index
.insert(c_name.clone(), circuit_index);
let circuit_pk = cs.key_gen();
assert!(vm_pk.circuit_pks.insert(c_name, circuit_pk).is_none());
}
vm_pk.commit_fixed(fixed_traces, fixed_traces_no_omc_init)?;
vm_pk.initial_global_state_expr = self.initial_global_state_expr;
vm_pk.finalize_global_state_expr = self.finalize_global_state_expr;
vm_pk.set_program_entry_pc(entry_pc);
Ok(vm_pk)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/witness.rs | ceno_zkvm/src/witness.rs | pub use witness::set_val;
pub type LkMultiplicity = gkr_iop::utils::lk_multiplicity::LkMultiplicity;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/circuit_builder.rs | ceno_zkvm/src/circuit_builder.rs | pub type ConstraintSystem<E> = gkr_iop::circuit_builder::ConstraintSystem<E>;
pub type NameSpace = gkr_iop::circuit_builder::NameSpace;
pub type SetTableSpec = gkr_iop::circuit_builder::SetTableSpec;
pub type CircuitBuilder<'a, E> = gkr_iop::circuit_builder::CircuitBuilder<'a, E>;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/structs.rs | ceno_zkvm/src/structs.rs | use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::{E2EProgramCtx, ShardContext},
error::ZKVMError,
instructions::Instruction,
scheme::septic_curve::SepticPoint,
state::StateCircuit,
tables::{
ECPoint, MemFinalRecord, RMMCollections, ShardRamCircuit, ShardRamInput, ShardRamRecord,
TableCircuit,
},
};
use ceno_emul::{Addr, CENO_PLATFORM, Platform, RegIdx, StepRecord, WordAddr};
use ff_ext::{ExtensionField, PoseidonField};
use gkr_iop::{gkr::GKRCircuit, tables::LookupTable, utils::lk_multiplicity::Multiplicity};
use itertools::Itertools;
use mpcs::{Point, PolynomialCommitmentScheme};
use multilinear_extensions::{Expression, Instance};
use rayon::{
iter::{IntoParallelIterator, IntoParallelRefIterator, ParallelIterator},
prelude::ParallelSlice,
};
use rustc_hash::FxHashSet;
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use std::{
collections::{BTreeMap, HashMap},
ops::Range,
sync::Arc,
};
use sumcheck::structs::{IOPProof, IOPProverMessage};
use tracing::Level;
use witness::RowMajorMatrix;
/// proof that the sum of N=2^n EC points is equal to `sum`
/// in one layer instead of GKR layered circuit approach
/// note that this one layer IOP borrowed ideas from
/// [Quark paper](https://eprint.iacr.org/2020/1275.pdf)
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct EccQuarkProof<E: ExtensionField> {
pub zerocheck_proof: IOPProof<E>,
pub num_instances: usize,
pub evals: Vec<E>, // x[rt,0], x[rt,1], y[rt,0], y[rt,1], x[0,rt], y[0,rt], s[0,rt]
pub rt: Point<E>,
pub sum: SepticPoint<E::BaseField>,
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct TowerProofs<E: ExtensionField> {
pub proofs: Vec<Vec<IOPProverMessage<E>>>,
// specs -> layers -> evals
pub prod_specs_eval: Vec<Vec<Vec<E>>>,
// specs -> layers -> point
#[serde(skip)] // verifier can derive points itself
pub prod_specs_points: Vec<Vec<Point<E>>>,
// specs -> layers -> evals
pub logup_specs_eval: Vec<Vec<Vec<E>>>,
// specs -> layers -> point
#[serde(skip)] // verifier can derive points itself
pub logup_specs_points: Vec<Vec<Point<E>>>,
}
pub type WitnessId = u16;
pub type ChallengeId = u16;
pub type ROMType = LookupTable;
pub type RAMType = gkr_iop::RAMType;
pub type PointAndEval<F> = multilinear_extensions::mle::PointAndEval<F>;
#[derive(Clone)]
pub struct ProvingKey<E: ExtensionField> {
pub vk: VerifyingKey<E>,
}
impl<E: ExtensionField> ProvingKey<E> {
pub fn get_cs(&self) -> &ComposedConstrainSystem<E> {
self.vk.get_cs()
}
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[serde(bound = "E: ExtensionField + DeserializeOwned")]
pub struct VerifyingKey<E: ExtensionField> {
pub cs: ComposedConstrainSystem<E>,
}
impl<E: ExtensionField> VerifyingKey<E> {
pub fn get_cs(&self) -> &ComposedConstrainSystem<E> {
&self.cs
}
}
#[derive(Clone, Debug)]
pub struct ProgramParams {
pub platform: Platform,
pub program_size: usize,
pub pubio_len: usize,
pub static_memory_len: usize,
}
impl Default for ProgramParams {
fn default() -> Self {
ProgramParams {
platform: CENO_PLATFORM.clone(),
program_size: (1 << 14),
pubio_len: (1 << 2),
static_memory_len: (1 << 16),
}
}
}
#[derive(Clone, Debug, serde::Serialize, serde::Deserialize)]
#[serde(bound = "E: ExtensionField + DeserializeOwned")]
pub struct ComposedConstrainSystem<E: ExtensionField> {
// TODO combine zkvm_v1_css to `GKRCircuit<E>`
// right now both co-exist because gkr_circuit couldn't cope with dynamic layers features which required by tower argument
pub zkvm_v1_css: ConstraintSystem<E>,
pub gkr_circuit: Option<GKRCircuit<E>>,
}
impl<E: ExtensionField> ComposedConstrainSystem<E> {
pub fn key_gen(self) -> ProvingKey<E> {
ProvingKey {
vk: VerifyingKey { cs: self },
}
}
pub fn num_witin(&self) -> usize {
self.zkvm_v1_css.num_witin.into()
}
pub fn num_structural_witin(&self) -> usize {
self.zkvm_v1_css.num_structural_witin.into()
}
pub fn num_fixed(&self) -> usize {
self.zkvm_v1_css.num_fixed
}
pub fn num_reads(&self) -> usize {
self.zkvm_v1_css.r_expressions.len() + self.zkvm_v1_css.r_table_expressions.len()
}
pub fn num_writes(&self) -> usize {
self.zkvm_v1_css.w_expressions.len() + self.zkvm_v1_css.w_table_expressions.len()
}
pub fn instance_openings(&self) -> &[Instance] {
&self.zkvm_v1_css.instance_openings
}
pub fn has_ecc_ops(&self) -> bool {
!self.zkvm_v1_css.ec_final_sum.is_empty()
}
pub fn is_with_lk_table(&self) -> bool {
!self.zkvm_v1_css.lk_table_expressions.is_empty()
}
/// return number of lookup operation
pub fn num_lks(&self) -> usize {
self.zkvm_v1_css.lk_expressions.len() + self.zkvm_v1_css.lk_table_expressions.len()
}
/// return num_vars belongs to rotation
pub fn rotation_vars(&self) -> Option<usize> {
self.zkvm_v1_css
.rotation_params
.as_ref()
.map(|param| param.rotation_cyclic_group_log2)
}
/// return rotation sub_group size
pub fn rotation_subgroup_size(&self) -> Option<usize> {
self.zkvm_v1_css
.rotation_params
.as_ref()
.map(|param| param.rotation_cyclic_subgroup_size)
}
pub fn with_omc_init_only(&self) -> bool {
self.zkvm_v1_css.with_omc_init_only
}
}
#[derive(Clone)]
pub struct ZKVMConstraintSystem<E: ExtensionField> {
pub(crate) circuit_css: BTreeMap<String, ComposedConstrainSystem<E>>,
pub(crate) initial_global_state_expr: Expression<E>,
pub(crate) finalize_global_state_expr: Expression<E>,
// pub keccak_gkr_iop: Option<KeccakGKRIOP<E>>,
pub params: ProgramParams,
}
impl<E: ExtensionField> Default for ZKVMConstraintSystem<E> {
fn default() -> Self {
ZKVMConstraintSystem {
circuit_css: BTreeMap::new(),
initial_global_state_expr: Expression::ZERO,
finalize_global_state_expr: Expression::ZERO,
params: ProgramParams::default(),
// keccak_gkr_iop: None,
}
}
}
impl<E: ExtensionField> ZKVMConstraintSystem<E> {
pub fn new_with_platform(params: ProgramParams) -> Self {
ZKVMConstraintSystem {
params,
..Default::default()
}
}
pub fn register_opcode_circuit<OC: Instruction<E>>(&mut self) -> OC::InstructionConfig {
let mut cs = ConstraintSystem::new(|| format!("riscv_opcode/{}", OC::name()));
let mut circuit_builder = CircuitBuilder::<E>::new(&mut cs);
let (config, gkr_iop_circuit) =
OC::build_gkr_iop_circuit(&mut circuit_builder, &self.params).unwrap();
let cs = ComposedConstrainSystem {
zkvm_v1_css: cs,
gkr_circuit: Some(gkr_iop_circuit),
};
tracing::trace!(
"opcode circuit {} has {} witnesses, {} reads, {} writes, {} lookups",
OC::name(),
cs.num_witin(),
cs.zkvm_v1_css.r_expressions.len(),
cs.zkvm_v1_css.w_expressions.len(),
cs.zkvm_v1_css.lk_expressions.len(),
);
assert!(
self.circuit_css.insert(OC::name(), cs).is_none(),
"opcode circuit {} already registered",
OC::name()
);
config
}
pub fn register_table_circuit<TC: TableCircuit<E>>(&mut self) -> TC::TableConfig {
let mut cs = ConstraintSystem::new(|| format!("riscv_table/{}", TC::name()));
let mut circuit_builder = CircuitBuilder::<E>::new(&mut cs);
let (config, gkr_iop_circuit) =
TC::build_gkr_iop_circuit(&mut circuit_builder, &self.params).unwrap();
let cs = ComposedConstrainSystem {
zkvm_v1_css: cs,
gkr_circuit: gkr_iop_circuit,
};
assert!(self.circuit_css.insert(TC::name(), cs).is_none());
config
}
pub fn register_global_state<SC: StateCircuit<E>>(&mut self) {
let mut cs = ConstraintSystem::new(|| "riscv_state");
let mut circuit_builder = CircuitBuilder::<E>::new(&mut cs);
self.initial_global_state_expr =
SC::initial_global_state(&mut circuit_builder).expect("global_state_in failed");
self.finalize_global_state_expr =
SC::finalize_global_state(&mut circuit_builder).expect("global_state_out failed");
}
pub fn get_css(&self) -> &BTreeMap<String, ComposedConstrainSystem<E>> {
&self.circuit_css
}
pub fn get_cs(&self, name: &String) -> Option<&ComposedConstrainSystem<E>> {
self.circuit_css.get(name)
}
}
#[derive(Default, Clone)]
pub struct ZKVMFixedTraces<E: ExtensionField> {
pub circuit_fixed_traces: BTreeMap<String, Option<RowMajorMatrix<E::BaseField>>>,
}
impl<E: ExtensionField> ZKVMFixedTraces<E> {
pub fn register_opcode_circuit<OC: Instruction<E>>(
&mut self,
cs: &ZKVMConstraintSystem<E>,
config: &OC::InstructionConfig,
) {
let cs = cs.get_cs(&OC::name()).expect("cs not found");
assert!(
self.circuit_fixed_traces
.insert(
OC::name(),
OC::generate_fixed_traces(config, cs.zkvm_v1_css.num_fixed,)
)
.is_none()
);
}
pub fn register_table_circuit<TC: TableCircuit<E>>(
&mut self,
cs: &ZKVMConstraintSystem<E>,
config: &TC::TableConfig,
input: &TC::FixedInput,
) {
let cs = cs.get_cs(&TC::name()).expect("cs not found");
assert!(
self.circuit_fixed_traces
.insert(
TC::name(),
Some(TC::generate_fixed_traces(
config,
cs.zkvm_v1_css.num_fixed,
input
)),
)
.is_none()
);
}
}
#[derive(Clone)]
pub struct ChipInput<E: ExtensionField> {
pub name: String,
pub witness_rmms: RMMCollections<E::BaseField>,
// in shard ram chip, num_instances length would be > 1
pub num_instances: Vec<usize>,
}
impl<E: ExtensionField> ChipInput<E> {
pub fn new(
name: String,
witness_rmms: RMMCollections<E::BaseField>,
num_instances: Vec<usize>,
) -> Self {
Self {
name,
witness_rmms,
num_instances,
}
}
pub fn num_instances(&self) -> usize {
self.num_instances.iter().sum()
}
}
#[derive(Default, Clone)]
pub struct ZKVMWitnesses<E: ExtensionField> {
pub witnesses: BTreeMap<String, Vec<ChipInput<E>>>,
lk_mlts: BTreeMap<String, Multiplicity<u64>>,
combined_lk_mlt: Option<Vec<HashMap<u64, usize>>>,
}
impl<E: ExtensionField> ZKVMWitnesses<E> {
pub fn get_witness(&self, name: &String) -> Option<&Vec<ChipInput<E>>> {
self.witnesses.get(name)
}
pub fn get_lk_mlt(&self, name: &String) -> Option<&Multiplicity<u64>> {
self.lk_mlts.get(name)
}
pub fn assign_opcode_circuit<OC: Instruction<E>>(
&mut self,
cs: &ZKVMConstraintSystem<E>,
shard_ctx: &mut ShardContext,
config: &OC::InstructionConfig,
records: Vec<&StepRecord>,
) -> Result<(), ZKVMError> {
assert!(self.combined_lk_mlt.is_none());
let cs = cs.get_cs(&OC::name()).unwrap();
let (witness, logup_multiplicity) = OC::assign_instances(
config,
shard_ctx,
cs.zkvm_v1_css.num_witin as usize,
cs.zkvm_v1_css.num_structural_witin as usize,
records,
)?;
let num_instances = vec![witness[0].num_instances()];
let input = ChipInput::new(
OC::name(),
witness,
if num_instances[0] > 0 {
num_instances
} else {
vec![]
},
);
assert!(self.witnesses.insert(OC::name(), vec![input]).is_none());
assert!(
self.lk_mlts
.insert(OC::name(), logup_multiplicity)
.is_none()
);
Ok(())
}
// merge the multiplicities in each opcode circuit into one
pub fn finalize_lk_multiplicities(&mut self) {
assert!(self.combined_lk_mlt.is_none());
assert!(!self.lk_mlts.is_empty());
let mut combined_lk_mlt = vec![];
let keys = self.lk_mlts.keys().cloned().collect_vec();
for name in keys {
let lk_mlt = self.lk_mlts.get(&name).unwrap();
if combined_lk_mlt.is_empty() {
combined_lk_mlt = lk_mlt.to_vec();
} else {
combined_lk_mlt
.iter_mut()
.zip_eq(lk_mlt.iter())
.for_each(|(m1, m2)| {
for (key, value) in m2 {
*m1.entry(*key).or_insert(0) += value;
}
});
}
}
self.combined_lk_mlt = Some(combined_lk_mlt);
}
pub fn assign_table_circuit<TC: TableCircuit<E>>(
&mut self,
cs: &ZKVMConstraintSystem<E>,
config: &TC::TableConfig,
input: &TC::WitnessInput<'_>,
) -> Result<(), ZKVMError> {
assert!(self.combined_lk_mlt.is_some());
let cs = cs.get_cs(&TC::name()).unwrap();
let witness = TC::assign_instances(
config,
cs.zkvm_v1_css.num_witin as usize,
cs.zkvm_v1_css.num_structural_witin as usize,
self.combined_lk_mlt.as_ref().unwrap(),
input,
)?;
let num_instances = std::cmp::max(witness[0].num_instances(), witness[1].num_instances());
let input = ChipInput::new(
TC::name(),
witness,
if num_instances > 0 {
vec![num_instances]
} else {
vec![]
},
);
assert!(self.witnesses.insert(TC::name(), vec![input]).is_none());
Ok(())
}
#[allow(clippy::type_complexity)]
pub fn assign_shared_circuit(
&mut self,
cs: &ZKVMConstraintSystem<E>,
(shard_ctx, final_mem): &(
&ShardContext,
&[(&'static str, Option<Range<Addr>>, &[MemFinalRecord])],
),
config: &<ShardRamCircuit<E> as TableCircuit<E>>::TableConfig,
) -> Result<(), ZKVMError> {
let perm = <E::BaseField as PoseidonField>::get_default_perm();
let addr_accessed = shard_ctx.get_addr_accessed();
// future shard needed records := shard_ctx.write_records ∪ //
// (shard_ctx.after_current_shard_cycle(mem_record.cycle) && !addr_accessed.contains(&waddr))
// 1. process final mem which
// 1.1 init in first shard
// 1.2 not accessed in first shard
// 1.3 accessed in future shard
let first_shard_access_later_records = if shard_ctx.is_first_shard() {
final_mem
.par_iter()
// only process no range restriction memory record
// for range specified it means dynamic init across different shard
.filter(|(_, range, _)| range.is_none())
.flat_map(|(mem_name, _, final_mem)| {
final_mem.par_iter().filter_map(|mem_record| {
let (waddr, addr) = Self::mem_addresses(mem_record);
Self::make_cross_shard_input(
mem_name,
mem_record,
waddr,
addr,
shard_ctx,
&addr_accessed,
&perm,
)
})
})
.collect()
} else {
vec![]
};
// 2. process records which
// 2.1 init within current shard
// 2.2 not accessed in current shard
// 2.3 access by later shards.
let current_shard_access_later = final_mem
.par_iter()
// only process range-restricted memory record
// for range specified it means dynamic init across different shard
.filter(|(_, range, _)| range.is_some())
.flat_map(|(mem_name, range, final_mem)| {
let range = range.as_ref().unwrap();
final_mem.par_iter().filter_map(|mem_record| {
let (waddr, addr) = Self::mem_addresses(mem_record);
if !range.contains(&addr) {
return None;
}
Self::make_cross_shard_input(
mem_name,
mem_record,
waddr,
addr,
shard_ctx,
&addr_accessed,
&perm,
)
})
})
.collect::<Vec<_>>();
let global_input = shard_ctx
.write_records()
.par_iter()
.flat_map(|records| {
// global write -> local reads
records.par_iter().map(|(vma, record)| {
let global_write: ShardRamRecord = (vma, record, true).into();
let ec_point: ECPoint<E> = global_write.to_ec_point(&perm);
ShardRamInput {
name: "current_shard_external_write",
record: global_write,
ec_point,
}
})
})
.chain(first_shard_access_later_records.into_par_iter())
.chain(current_shard_access_later.into_par_iter())
.chain(shard_ctx.read_records().par_iter().flat_map(|records| {
// global read -> local write
records.par_iter().map(|(vma, record)| {
let global_read: ShardRamRecord = (vma, record, false).into();
let ec_point: ECPoint<E> = global_read.to_ec_point(&perm);
ShardRamInput {
name: "current_shard_external_read",
record: global_read,
ec_point,
}
})
}))
.collect::<Vec<_>>();
if tracing::enabled!(Level::DEBUG) {
let total = global_input.len() as f64;
// log global input stats
let record_stats = global_input
.par_iter()
.fold(HashMap::new, |mut local, d| {
*local.entry(d.name).or_insert(0) += 1;
local
})
.reduce(HashMap::new, |mut a, b| {
for (k, v) in b {
*a.entry(k).or_insert(0) += v;
}
a
});
for (mem_name, count) in record_stats {
let pct = (count as f64 / total) * 100.0;
tracing::debug!(
"{}th-shard shard ram circuit records: mem_name={} count={} ({:.2}%)",
shard_ctx.shard_id,
mem_name,
count,
pct
);
}
}
assert!(self.combined_lk_mlt.is_some());
let cs = cs.get_cs(&ShardRamCircuit::<E>::name()).unwrap();
let circuit_inputs = global_input
.par_chunks(shard_ctx.max_num_cross_shard_accesses)
.map(|shard_accesses| {
let witness = ShardRamCircuit::assign_instances(
config,
cs.zkvm_v1_css.num_witin as usize,
cs.zkvm_v1_css.num_structural_witin as usize,
self.combined_lk_mlt.as_ref().unwrap(),
shard_accesses,
)?;
let num_reads = shard_accesses
.par_iter()
.filter(|access| access.record.is_to_write_set)
.count();
let num_writes = shard_accesses.len() - num_reads;
Ok(ChipInput::new(
ShardRamCircuit::<E>::name(),
witness,
vec![num_reads, num_writes],
))
})
.collect::<Result<Vec<_>, ZKVMError>>()?;
// set num_read, num_write as separate instance
assert!(
self.witnesses
.insert(ShardRamCircuit::<E>::name(), circuit_inputs)
.is_none()
);
Ok(())
}
pub fn get_witnesses_name_instance(&self) -> Vec<(String, Vec<usize>)> {
self.witnesses
.iter()
.flat_map(|(_, chip_inputs)| {
chip_inputs
.iter()
.map(|chip_input| (chip_input.name.clone(), chip_input.num_instances.clone()))
})
.collect_vec()
}
/// Iterate opcode/table circuits, sorted by alphabetical order.
pub fn into_iter_sorted(self) -> impl Iterator<Item = ChipInput<E>> {
self.witnesses
.into_iter()
.flat_map(|(_, chip_inputs)| chip_inputs.into_iter())
}
#[inline(always)]
fn mem_addresses(mem_record: &MemFinalRecord) -> (WordAddr, Addr) {
match mem_record.ram_type {
RAMType::Register => (
Platform::register_vma(mem_record.addr as RegIdx).into(),
mem_record.addr,
),
RAMType::Memory => (mem_record.addr.into(), mem_record.addr),
_ => unimplemented!(),
}
}
#[inline(always)]
fn make_cross_shard_input(
mem_name: &'static str,
mem_record: &MemFinalRecord,
waddr: WordAddr,
addr: u32,
shard_ctx: &ShardContext,
addr_accessed: &FxHashSet<WordAddr>,
perm: &<<E as ExtensionField>::BaseField as PoseidonField>::P,
) -> Option<ShardRamInput<E>> {
if addr_accessed.contains(&waddr) || !shard_ctx.after_current_shard_cycle(mem_record.cycle)
{
return None;
}
let global_write = ShardRamRecord {
addr: match mem_record.ram_type {
RAMType::Register => addr,
RAMType::Memory => waddr.into(),
_ => unimplemented!(),
},
ram_type: mem_record.ram_type,
value: mem_record.init_value,
shard: shard_ctx.shard_id as u64,
local_clk: 0,
global_clk: 0,
is_to_write_set: true,
};
let ec_point: ECPoint<E> = global_write.to_ec_point(perm);
Some(ShardRamInput {
name: mem_name,
record: global_write,
ec_point,
})
}
}
pub struct ZKVMProvingKey<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> {
pub pp: PCS::ProverParam,
pub vp: PCS::VerifierParam,
pub program_ctx: Option<E2EProgramCtx<E>>,
// entry program counter
pub entry_pc: u32,
// pk for opcode and table circuits
pub circuit_pks: BTreeMap<String, ProvingKey<E>>,
pub circuit_name_to_index: BTreeMap<String, usize>,
// Fixed commitments are separated into two groups:
//
// 1. `fixed_commit_*`
// - Used by the *main circuit* for offline memory check (OMC) table initialization.
// - This initialization occurs **only in the first shard** (`shard_id = 0`).
//
// 2. `fixed_no_omc_init_commit_*`
// - Used by subsequent shards (`shard_id > 0`), which **omit** OMC table initialization.
// - All circuit components related to OMC init are skipped in these shards.
pub fixed_commit_wd: Option<Arc<<PCS as PolynomialCommitmentScheme<E>>::CommitmentWithWitness>>,
pub fixed_commit: Option<<PCS as PolynomialCommitmentScheme<E>>::Commitment>,
pub fixed_no_omc_init_commit_wd:
Option<Arc<<PCS as PolynomialCommitmentScheme<E>>::CommitmentWithWitness>>,
pub fixed_no_omc_init_commit: Option<<PCS as PolynomialCommitmentScheme<E>>::Commitment>,
pub circuit_index_fixed_num_instances: BTreeMap<usize, usize>,
// expression for global state in/out
pub initial_global_state_expr: Expression<E>,
pub finalize_global_state_expr: Expression<E>,
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> ZKVMProvingKey<E, PCS> {
pub(crate) fn new(pp: PCS::ProverParam, vp: PCS::VerifierParam) -> Self {
Self {
pp,
vp,
program_ctx: None,
entry_pc: 0,
circuit_pks: BTreeMap::new(),
initial_global_state_expr: Expression::ZERO,
finalize_global_state_expr: Expression::ZERO,
circuit_index_fixed_num_instances: BTreeMap::new(),
circuit_name_to_index: BTreeMap::new(),
fixed_commit_wd: None,
fixed_commit: None,
fixed_no_omc_init_commit_wd: None,
fixed_no_omc_init_commit: None,
}
}
pub(crate) fn commit_fixed(
&mut self,
fixed_traces: BTreeMap<usize, RowMajorMatrix<<E as ExtensionField>::BaseField>>,
fixed_traces_no_omc_init: BTreeMap<usize, RowMajorMatrix<<E as ExtensionField>::BaseField>>,
) -> Result<(), ZKVMError> {
if !fixed_traces.is_empty() {
let fixed_commit_wd =
PCS::batch_commit(&self.pp, fixed_traces.into_values().collect_vec())
.map_err(ZKVMError::PCSError)?;
let fixed_commit = PCS::get_pure_commitment(&fixed_commit_wd);
self.fixed_commit_wd = Some(Arc::new(fixed_commit_wd));
self.fixed_commit = Some(fixed_commit);
} else {
self.fixed_commit_wd = None;
self.fixed_commit = None;
}
if !fixed_traces_no_omc_init.is_empty() {
let fixed_commit_wd = PCS::batch_commit(
&self.pp,
fixed_traces_no_omc_init.into_values().collect_vec(),
)
.map_err(ZKVMError::PCSError)?;
let fixed_commit = PCS::get_pure_commitment(&fixed_commit_wd);
self.fixed_no_omc_init_commit_wd = Some(Arc::new(fixed_commit_wd));
self.fixed_no_omc_init_commit = Some(fixed_commit);
} else {
self.fixed_no_omc_init_commit_wd = None;
self.fixed_no_omc_init_commit = None;
}
Ok(())
}
pub(crate) fn set_program_entry_pc(&mut self, entry_pc: u32) {
self.entry_pc = entry_pc;
}
pub fn has_fixed_commitment(&self) -> bool {
self.fixed_commit_wd.is_some() || self.fixed_no_omc_init_commit_wd.is_some()
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> ZKVMProvingKey<E, PCS> {
pub fn get_vk_slow(&self) -> ZKVMVerifyingKey<E, PCS> {
ZKVMVerifyingKey {
vp: self.vp.clone(),
entry_pc: self.entry_pc,
circuit_vks: self
.circuit_pks
.iter()
.map(|(name, pk)| (name.clone(), pk.vk.clone()))
.collect(),
fixed_commit: self.fixed_commit.clone(),
fixed_no_omc_init_commit: self.fixed_no_omc_init_commit.clone(),
// expression for global state in/out
initial_global_state_expr: self.initial_global_state_expr.clone(),
finalize_global_state_expr: self.finalize_global_state_expr.clone(),
circuit_index_to_name: self
.circuit_pks
.keys()
.enumerate()
.map(|(index, name)| (index, name.clone()))
.collect(),
}
}
pub fn set_program_ctx(&mut self, ctx: E2EProgramCtx<E>) {
self.program_ctx = Some(ctx)
}
}
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned",
))]
pub struct ZKVMVerifyingKey<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>
where
PCS::VerifierParam: Sized,
{
pub vp: PCS::VerifierParam,
// entry program counter
pub entry_pc: u32,
// vk for opcode and table circuits
pub circuit_vks: BTreeMap<String, VerifyingKey<E>>,
pub fixed_commit: Option<<PCS as PolynomialCommitmentScheme<E>>::Commitment>,
pub fixed_no_omc_init_commit: Option<<PCS as PolynomialCommitmentScheme<E>>::Commitment>,
// expression for global state in/out
pub initial_global_state_expr: Expression<E>,
pub finalize_global_state_expr: Expression<E>,
// circuit index -> circuit name
// mainly used for debugging
pub circuit_index_to_name: BTreeMap<usize, String>,
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/state.rs | ceno_zkvm/src/state.rs | use ff_ext::ExtensionField;
use crate::{
chip_handler::general::PublicValuesQuery, circuit_builder::CircuitBuilder, error::ZKVMError,
structs::RAMType,
};
use multilinear_extensions::{Expression, ToExpr};
use p3::field::FieldAlgebra;
pub trait StateCircuit<E: ExtensionField> {
fn initial_global_state(
circuit_builder: &mut CircuitBuilder<E>,
) -> Result<Expression<E>, ZKVMError>;
fn finalize_global_state(
circuit_builder: &mut CircuitBuilder<E>,
) -> Result<Expression<E>, ZKVMError>;
}
pub struct GlobalState;
impl<E: ExtensionField> StateCircuit<E> for GlobalState {
fn initial_global_state(
circuit_builder: &mut crate::circuit_builder::CircuitBuilder<E>,
) -> Result<Expression<E>, ZKVMError> {
let states: Vec<Expression<E>> = vec![
E::BaseField::from_canonical_u64(RAMType::GlobalState as u64).expr(),
circuit_builder.query_init_pc()?.expr(),
circuit_builder.query_init_cycle()?.expr(),
];
Ok(circuit_builder.rlc_chip_record(states))
}
fn finalize_global_state(
circuit_builder: &mut crate::circuit_builder::CircuitBuilder<E>,
) -> Result<Expression<E>, ZKVMError> {
let states: Vec<Expression<E>> = vec![
E::BaseField::from_canonical_u64(RAMType::GlobalState as u64).expr(),
circuit_builder.query_end_pc()?.expr(),
circuit_builder.query_end_cycle()?.expr(),
];
Ok(circuit_builder.rlc_chip_record(states))
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/uint.rs | ceno_zkvm/src/uint.rs | mod arithmetic;
pub mod constants;
mod logic;
pub mod util;
use crate::{
chip_handler::{AddressExpr, MemoryExpr, RegisterExpr},
circuit_builder::CircuitBuilder,
error::UtilError,
gadgets::{AssertLtConfig, SignedExtendConfig},
instructions::riscv::constants::UInt,
utils::add_one_to_big_num,
witness::LkMultiplicity,
};
use ff_ext::{ExtensionField, SmallField};
use gkr_iop::error::CircuitBuilderError;
use itertools::{Itertools, enumerate};
use multilinear_extensions::{Expression, ToExpr, WitIn, util::ceil_log2};
use p3::field::FieldAlgebra;
use std::{
borrow::Cow,
mem::{self},
ops::Index,
};
pub use strum::IntoEnumIterator;
use strum_macros::EnumIter;
use util::max_carry_word_for_multiplication;
#[derive(Clone, EnumIter, Debug)]
pub enum UintLimb<E: ExtensionField> {
WitIn(Vec<WitIn>),
Expression(Vec<Expression<E>>),
}
impl<E: ExtensionField> IntoIterator for UintLimb<E> {
type Item = WitIn;
type IntoIter = std::vec::IntoIter<WitIn>;
fn into_iter(self) -> Self::IntoIter {
match self {
UintLimb::WitIn(wits) => wits.into_iter(),
_ => unimplemented!(),
}
}
}
impl<'a, E: ExtensionField> IntoIterator for &'a UintLimb<E> {
type Item = &'a WitIn;
type IntoIter = std::slice::Iter<'a, WitIn>;
fn into_iter(self) -> Self::IntoIter {
match self {
UintLimb::WitIn(wits) => wits.iter(),
_ => unimplemented!(),
}
}
}
impl<E: ExtensionField> UintLimb<E> {
pub fn iter(&self) -> impl Iterator<Item = &WitIn> {
self.into_iter()
}
}
impl<E: ExtensionField> Index<usize> for UintLimb<E> {
type Output = WitIn;
fn index(&self, index: usize) -> &Self::Output {
match self {
UintLimb::WitIn(vec) => &vec[index],
_ => unimplemented!(),
}
}
}
#[derive(Clone, Debug)]
/// Unsigned integer with `M` total bits. `C` denotes the cell bit width.
/// Represented in little endian form.
pub struct UIntLimbs<const M: usize, const C: usize, E: ExtensionField> {
pub limbs: UintLimb<E>,
// We don't need `overflow` witness since the last element of `carries` represents it.
pub carries: Option<Vec<WitIn>>,
// for carry range check using lt tricks
pub carries_auxiliary_lt_config: Option<Vec<AssertLtConfig>>,
}
impl<const M: usize, const C: usize, E: ExtensionField> UIntLimbs<M, C, E> {
pub fn new<NR: Into<String>, N: FnOnce() -> NR>(
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
) -> Result<Self, CircuitBuilderError> {
Self::new_maybe_unchecked(name_fn, circuit_builder, true)
}
pub fn new_unchecked<NR: Into<String>, N: FnOnce() -> NR>(
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
) -> Result<Self, CircuitBuilderError> {
Self::new_maybe_unchecked(name_fn, circuit_builder, false)
}
fn new_maybe_unchecked<NR: Into<String>, N: FnOnce() -> NR>(
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
is_check: bool,
) -> Result<Self, CircuitBuilderError> {
circuit_builder.namespace(name_fn, |cb| {
let witins = (0..Self::NUM_LIMBS)
.map(|i| cb.create_witin(|| format!("limb_{i}")))
.collect::<Vec<_>>();
if is_check {
match C {
8 => {
let _ = witins
.chunks(2)
.enumerate()
.map(|(i, chunk)| {
if chunk.len() == 2 {
cb.assert_double_u8(
|| format!("limbs_{}_{}_in_{C}", i * 2, i * 2 + 1),
chunk[0].expr(),
chunk[1].expr(),
)?;
} else {
cb.assert_const_range(
|| format!("limb_{i}_in_{C}"),
chunk[0].expr(),
C,
)?;
}
Ok::<_, CircuitBuilderError>(())
})
.collect::<Vec<_>>();
}
_ => {
let _ = witins
.iter()
.enumerate()
.map(|(i, witin)| {
cb.assert_ux::<_, _, C>(
|| format!("limb_{i}_in_{C}"),
witin.expr(),
)?;
Ok::<_, CircuitBuilderError>(())
})
.collect::<Vec<_>>();
}
}
}
Ok(UIntLimbs {
limbs: UintLimb::WitIn(witins),
carries: None,
carries_auxiliary_lt_config: None,
})
})
}
/// accepts a vector of externally instantiated witnesses and carries,
/// delegating the responsibility for range checking to the caller.
pub fn from_witins_unchecked(
limbs: Vec<WitIn>,
carries: Option<Vec<WitIn>>,
carries_auxiliary_lt_config: Option<Vec<AssertLtConfig>>,
) -> Self {
assert_eq!(limbs.len(), Self::NUM_LIMBS);
if let Some(carries) = &carries {
let diff = limbs.len() - carries.len();
assert!(
diff == 0 || diff == 1, // diff = 1 imply no overflow
"invalid witness: limb.len() {}, carries.len() {}",
limbs.len(),
carries.len()
);
}
UIntLimbs {
limbs: UintLimb::WitIn(limbs),
carries,
carries_auxiliary_lt_config,
}
}
/// take vector of primative type and instantiate witnesses
pub fn from_const_unchecked<T: Into<u64>>(limbs: Vec<T>) -> Self {
assert_eq!(limbs.len(), Self::NUM_LIMBS);
UIntLimbs {
limbs: UintLimb::Expression(
limbs
.into_iter()
.take(Self::NUM_LIMBS)
.map(|limb| E::BaseField::from_canonical_u64(limb.into()).expr())
.collect::<Vec<Expression<E>>>(),
),
carries: None,
carries_auxiliary_lt_config: None,
}
}
/// expr_limbs is little endian order
pub fn new_as_empty() -> Self {
Self {
limbs: UintLimb::Expression(vec![]),
carries: None,
carries_auxiliary_lt_config: None,
}
}
/// expr_limbs is little endian order
pub fn create_witin_from_exprs(
circuit_builder: &mut CircuitBuilder<E>,
expr_limbs: Vec<Expression<E>>,
) -> Self {
assert_eq!(expr_limbs.len(), Self::NUM_LIMBS);
let limbs = (0..Self::NUM_LIMBS)
.map(|i| {
let w = circuit_builder.create_witin(|| "wit for limb");
circuit_builder
.assert_ux::<_, _, C>(|| "range check", w.expr())
.unwrap();
circuit_builder
.require_zero(|| "create_witin_from_expr", w.expr() - &expr_limbs[i])
.unwrap();
w
})
.collect_vec();
Self {
limbs: UintLimb::WitIn(limbs),
carries: None,
carries_auxiliary_lt_config: None,
}
}
pub fn assign_value<T: Into<u64> + Default + From<u32> + Copy>(
&self,
instance: &mut [E::BaseField],
value: Value<T>,
) {
self.assign_limbs(instance, value.as_u16_limbs())
}
pub fn assign_add_outcome(&self, instance: &mut [E::BaseField], value: &ValueAdd) {
self.assign_limbs(instance, &value.limbs);
self.assign_carries(instance, &value.carries);
}
pub fn assign_mul_outcome(
&self,
instance: &mut [E::BaseField],
lkm: &mut LkMultiplicity,
value: &ValueMul,
) -> Result<(), CircuitBuilderError> {
self.assign_limbs(instance, &value.limbs);
self.assign_carries(instance, &value.carries);
self.assign_carries_auxiliary(instance, lkm, &value.carries, value.max_carry_value)
}
pub fn assign_limbs(&self, instance: &mut [E::BaseField], limbs_values: &[u16]) {
assert!(
limbs_values.len() <= Self::NUM_LIMBS,
"assign input length mismatch. input_len={}, NUM_CELLS={}",
limbs_values.len(),
Self::NUM_LIMBS
);
if let UintLimb::WitIn(wires) = &self.limbs {
for (wire, limb) in wires.iter().zip(
limbs_values
.iter()
.map(|v| E::BaseField::from_canonical_u64(*v as u64))
.chain(std::iter::repeat(E::BaseField::ZERO)),
) {
instance[wire.id as usize] = limb;
}
}
}
pub fn assign_carries<T: Into<u64> + Copy>(
&self,
instance: &mut [E::BaseField],
carry_values: &[T],
) {
assert!(
carry_values.len()
<= self
.carries
.as_ref()
.map(|carries| carries.len())
.unwrap_or_default(),
"assign input length mismatch",
);
if let Some(carries) = &self.carries {
for (wire, carry) in carries.iter().zip(
carry_values
.iter()
.map(|v| E::BaseField::from_canonical_u64(Into::<u64>::into(*v)))
.chain(std::iter::repeat(E::BaseField::ZERO)),
) {
instance[wire.id as usize] = carry;
}
}
}
pub fn assign_carries_auxiliary<T: Into<u64> + Copy>(
&self,
instance: &mut [E::BaseField],
lkm: &mut LkMultiplicity,
carry_values: &[T],
max_carry: u64,
) -> Result<(), CircuitBuilderError> {
assert!(
carry_values.len()
<= self
.carries
.as_ref()
.map(|carries| carries.len())
.unwrap_or_default(),
"assign input length mismatch",
);
if let Some(carries_auxiliary_lt_config) = &self.carries_auxiliary_lt_config {
// constrain carry range
for (lt_config, carry) in carries_auxiliary_lt_config.iter().zip_eq(carry_values) {
lt_config.assign_instance(instance, lkm, Into::<u64>::into(*carry), max_carry)?;
}
}
Ok(())
}
/// conversion is needed for lt/ltu
/// TODO: add general conversion between any two limb sizes C1 <-> C2
pub fn from_u8_limbs(
x: &UIntLimbs<M, 8, E>,
) -> Result<UIntLimbs<M, C, E>, CircuitBuilderError> {
assert!(
C.is_multiple_of(8),
"we only support multiple of 8 limb sizes"
);
assert!(x.carries.is_none());
let k = C / 8;
let shift_pows = {
let mut shift_pows = Vec::with_capacity(k);
shift_pows.push(E::BaseField::ONE.expr());
(0..k - 1).for_each(|_| shift_pows.push(shift_pows.last().unwrap() << 8));
shift_pows
};
let combined_limbs = x
.limbs
.iter()
.collect_vec()
.chunks(k)
.map(|chunk| {
chunk
.iter()
.zip(shift_pows.iter())
.map(|(limb, shift)| shift * limb.expr())
.reduce(|a, b| a + b)
.unwrap()
})
.collect_vec();
Ok(UIntLimbs::<M, C, E>::from_exprs_unchecked(combined_limbs))
}
pub fn to_u8_limbs(
circuit_builder: &mut CircuitBuilder<E>,
x: UIntLimbs<M, C, E>,
) -> UIntLimbs<M, 8, E> {
assert!(
C.is_multiple_of(8),
"we only support multiple of 8 limb sizes"
);
assert!(x.carries.is_none());
let k = C / 8;
let shift_pows = {
let mut shift_pows = Vec::with_capacity(k);
shift_pows.push(E::BaseField::ONE.expr());
(0..k - 1).for_each(|_| shift_pows.push(shift_pows.last().unwrap() << 8));
shift_pows
};
let split_limbs = x
.limbs
.iter()
.flat_map(|large_limb| {
let limbs = (0..k)
.map(|_| {
let w = circuit_builder.create_witin(|| "");
circuit_builder.assert_byte(|| "", w.expr()).unwrap();
w.expr()
})
.collect_vec();
let combined_limb = limbs
.iter()
.zip(shift_pows.iter())
.map(|(limb, shift)| shift * limb)
.reduce(|a, b| a + b)
.unwrap();
circuit_builder
.require_zero(|| "zero check", large_limb.expr() - combined_limb)
.unwrap();
limbs
})
.collect_vec();
UIntLimbs::<M, 8, E>::create_witin_from_exprs(circuit_builder, split_limbs)
}
pub fn from_exprs_unchecked(expr_limbs: Vec<Expression<E>>) -> Self {
Self {
limbs: UintLimb::Expression(
expr_limbs
.into_iter()
.chain(std::iter::repeat(Expression::ZERO))
.take(Self::NUM_LIMBS)
.collect_vec(),
),
carries: None,
carries_auxiliary_lt_config: None,
}
}
/// If current limbs are Expression, this function will create witIn and replace the limbs
pub fn replace_limbs_with_witin<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
) -> Result<(), CircuitBuilderError> {
if let UintLimb::Expression(_) = self.limbs {
circuit_builder.namespace(name_fn, |cb| {
self.limbs = UintLimb::WitIn(
(0..Self::NUM_LIMBS)
.map(|i| {
let w = cb.create_witin(|| format!("limb_{i}"));
cb.assert_ux::<_, _, C>(|| format!("limb_{i}_in_{C}"), w.expr())?;
Ok(w)
})
.collect::<Result<Vec<WitIn>, CircuitBuilderError>>()?,
);
Ok(())
})?;
}
Ok(())
}
// Create witIn for carries
fn alloc_carry_unchecked<NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
with_overflow: bool,
num_carries: usize,
) -> Result<(), CircuitBuilderError> {
if self.carries.is_none() {
circuit_builder.namespace(name_fn, |cb| {
let carries_len = if with_overflow {
num_carries
} else {
num_carries - 1
};
self.carries = Some(
(0..carries_len)
.map(|i| {
let c = cb.create_witin(|| format!("carry_{i}"));
Ok(c)
})
.collect::<Result<Vec<WitIn>, CircuitBuilderError>>()?,
);
Ok(())
})?;
}
Ok(())
}
/// Return if the limbs are in Expression form or not.
pub fn is_expr(&self) -> bool {
matches!(&self.limbs, UintLimb::Expression(_))
}
/// Return the `UIntLimbs` underlying cell id's
pub fn wits_in(&self) -> Option<&[WitIn]> {
match &self.limbs {
UintLimb::WitIn(c) => Some(c),
_ => None,
}
}
/// Generate ((0)_{2^C}, (1)_{2^C}, ..., (size - 1)_{2^C})
pub fn counter_vector<F: SmallField>(size: usize) -> Vec<Vec<F>> {
let num_vars = ceil_log2(size);
let number_of_limbs = num_vars.div_ceil(C);
let cell_modulo = F::from_canonical_u64(1 << C);
let mut res = vec![vec![F::ZERO; number_of_limbs]];
for i in 1..size {
res.push(add_one_to_big_num(cell_modulo, &res[i - 1]));
}
res
}
/// Get an Expression<E> from the limbs, unsafe if Uint value exceeds field limit
pub fn value(&self) -> Expression<E> {
self.expr()
.into_iter()
.rev()
.reduce(|sum, limb| (sum << C) + limb)
.unwrap()
}
/// split into 2 UIntLimbs with each taking half size of limbs
pub fn as_lo_hi<const M2: usize>(
&self,
) -> Result<(UIntLimbs<M2, C, E>, UIntLimbs<M2, C, E>), CircuitBuilderError> {
assert_eq!(M, 2 * M2);
let mut self_lo = self.expr();
let self_hi = self_lo.split_off(self_lo.len() / 2);
Ok((
UIntLimbs::from_exprs_unchecked(self_lo),
UIntLimbs::from_exprs_unchecked(self_hi),
))
}
pub fn to_field_expr(&self, is_neg: Expression<E>) -> Expression<E> {
// Convert two's complement representation into field arithmetic.
// Example: 0xFFFF_FFFF = 2^32 - 1 --> shift --> -1
self.value() - is_neg * (1_u64 << 32)
}
}
impl<E: ExtensionField> UInt<E> {
/// Determine whether a UInt is negative (as 2s complement)
///
/// Also called Most Significant Bit extraction, when
/// interpreted as an unsigned int.
pub fn is_negative(
&self,
cb: &mut CircuitBuilder<E>,
) -> Result<SignedExtendConfig<E>, CircuitBuilderError> {
SignedExtendConfig::<E>::construct_limb(cb, self.limbs.iter().last().unwrap().expr())
}
}
/// Construct `UIntLimbs` from `Vec<CellId>`
impl<const M: usize, const C: usize, E: ExtensionField> TryFrom<Vec<WitIn>> for UIntLimbs<M, C, E> {
type Error = UtilError;
fn try_from(limbs: Vec<WitIn>) -> Result<Self, Self::Error> {
if limbs.len() != Self::NUM_LIMBS {
return Err(UtilError::UIntError(
format!(
"cannot construct UIntLimbs<{}, {}> from {} cells, requires {} cells",
M,
C,
limbs.len(),
Self::NUM_LIMBS
)
.into(),
));
}
Ok(Self {
limbs: UintLimb::WitIn(limbs),
carries: None,
carries_auxiliary_lt_config: None,
})
}
}
/// Construct `UIntLimbs` from `$[CellId]`
impl<const M: usize, const C: usize, E: ExtensionField> TryFrom<&[WitIn]> for UIntLimbs<M, C, E> {
type Error = UtilError;
fn try_from(values: &[WitIn]) -> Result<Self, Self::Error> {
values.to_vec().try_into()
}
}
impl<E: ExtensionField, const M: usize, const C: usize> ToExpr<E> for UIntLimbs<M, C, E> {
type Output = Vec<Expression<E>>;
fn expr(&self) -> Vec<Expression<E>> {
match &self.limbs {
UintLimb::WitIn(limbs) => limbs
.iter()
.map(ToExpr::expr)
.collect::<Vec<Expression<E>>>(),
UintLimb::Expression(e) => e.clone(),
}
}
}
impl<E: ExtensionField> UIntLimbs<32, 16, E> {
/// Return a value suitable for register read/write. From [u16; 2] limbs.
pub fn register_expr(&self) -> RegisterExpr<E> {
let u16_limbs = self.expr();
u16_limbs.try_into().expect("two limbs with M=32 and C=16")
}
/// Interpret this UInt as a memory address.
pub fn address_expr(&self) -> AddressExpr<E> {
self.value()
}
/// Return a value suitable for memory read/write. From [u16; 2] limbs
pub fn memory_expr(&self) -> MemoryExpr<E> {
let u16_limbs = self.expr();
u16_limbs.try_into().expect("two limbs with M=32 and C=16")
}
}
impl<E: ExtensionField> UIntLimbs<32, 8, E> {
/// Return a value suitable for register read/write. From [u8; 4] limbs.
pub fn register_expr(&self) -> RegisterExpr<E> {
let u8_limbs = self.expr();
let u16_limbs = u8_limbs
.chunks(2)
.map(|chunk| {
let (a, b) = (&chunk[0], &chunk[1]);
a + b * 256
})
.collect_vec();
u16_limbs.try_into().expect("four limbs with M=32 and C=8")
}
}
/// A struct holding intermediate results of arithmetic add operations from Value
pub struct ValueAdd {
pub limbs: Vec<u16>,
pub carries: Vec<u16>,
}
/// A struct holding intermediate results of arithmetic mul operations from Value
pub struct ValueMul {
pub limbs: Vec<u16>,
pub carries: Vec<u64>,
pub max_carry_value: u64,
}
impl ValueMul {
pub fn as_hi_value<T: Into<u64> + From<u32> + Copy + Default>(&self) -> Value<'_, T> {
Value::<T>::from_limb_slice_unchecked(self.as_hi_limb_slice())
}
pub fn as_hi_limb_slice(&self) -> &[u16] {
&self.limbs[self.limbs.len() / 2..]
}
}
#[derive(Clone, Debug)]
pub struct Value<'a, T: Into<u64> + From<u32> + Copy + Default> {
val: T,
pub limbs: Cow<'a, [u16]>,
}
impl<'a, T: Into<u64> + From<u32> + Copy + Default> From<&'a Value<'a, T>> for &'a [u16] {
fn from(v: &'a Value<'a, T>) -> Self {
v.as_u16_limbs()
}
}
impl<'a, T: Into<u64> + From<u32> + Copy + Default> From<&Value<'a, T>> for u64 {
fn from(v: &Value<'a, T>) -> Self {
v.as_u64()
}
}
impl<'a, T: Into<u64> + From<u32> + Copy + Default> From<&Value<'a, T>> for u32 {
fn from(v: &Value<'a, T>) -> Self {
v.as_u32()
}
}
impl<'a, T: Into<u64> + From<u32> + Copy + Default> From<&Value<'a, T>> for i32 {
fn from(v: &Value<'a, T>) -> Self {
v.as_i32()
}
}
// TODO generalize to support non 16 bit limbs
// TODO optimize api with fixed size array
impl<'a, T: Into<u64> + From<u32> + Copy + Default> Value<'a, T> {
const M: usize = { mem::size_of::<T>() * 8 };
const C: usize = 16;
const LIMBS: usize = Self::M.div_ceil(16);
pub fn new(val: T, lkm: &mut LkMultiplicity) -> Self {
let uint = Self::new_unchecked(val);
Self::assert_u16(&uint.limbs, lkm);
uint
}
pub fn new_unchecked(val: T) -> Self {
Value::<T> {
val,
limbs: Cow::Owned(Self::split_to_u16(val)),
}
}
pub fn from_limb_unchecked(limbs: Vec<u16>) -> Self {
Value::<T> {
val: limbs
.iter()
.rev()
.fold(0u32, |acc, &v| (acc << 16) + v as u32)
.into(),
limbs: Cow::Owned(limbs),
}
}
pub fn from_limb_slice_unchecked(limbs: &'a [u16]) -> Self {
Value::<T> {
val: limbs
.iter()
.rev()
.fold(0u32, |acc, &v| (acc << 16) + v as u32)
.into(),
limbs: Cow::Borrowed(limbs),
}
}
fn assert_u16(v: &[u16], lkm: &mut LkMultiplicity) {
v.iter().for_each(|v| {
lkm.assert_ux::<16>(*v as u64);
})
}
fn split_to_u16(value: T) -> Vec<u16> {
let value: u64 = value.into(); // Convert to u64 for generality
(0..Self::LIMBS)
.scan(value, |acc, _| {
let limb = (*acc & 0xFFFF) as u16;
*acc >>= 16;
Some(limb)
})
.collect_vec()
}
pub fn as_u16_limbs(&self) -> &[u16] {
&self.limbs
}
/// Convert the limbs to a u64 value
pub fn as_u64(&self) -> u64 {
self.val.into()
}
/// Convert the limbs to a u32 value
pub fn as_u32(&self) -> u32 {
self.as_u64() as u32
}
/// Convert the limbs to an i32 value
pub fn as_i32(&self) -> i32 {
self.as_u32() as i32
}
pub fn u16_fields<F: SmallField>(&self) -> Vec<F> {
self.limbs
.iter()
.map(|v| F::from_canonical_u64(*v as u64))
.collect_vec()
}
pub fn add(&self, rhs: &Self, lkm: &mut LkMultiplicity, with_overflow: bool) -> ValueAdd {
let res = self.as_u16_limbs().iter().zip(rhs.as_u16_limbs()).fold(
vec![],
|mut acc, (a_limb, b_limb)| {
let (a, b) = a_limb.overflowing_add(*b_limb);
if let Some((_, prev_carry)) = acc.last() {
let (e, d) = a.overflowing_add(*prev_carry);
acc.push((e, (b || d) as u16));
} else {
acc.push((a, b as u16));
}
// range check
if let Some((limb, _)) = acc.last() {
lkm.assert_ux::<16>(*limb as u64);
};
acc
},
);
let (limbs, mut carries): (Vec<u16>, Vec<u16>) = res.into_iter().unzip();
if !with_overflow {
carries.resize(carries.len() - 1, 0);
}
ValueAdd { limbs, carries }
}
pub fn mul(&self, rhs: &Self, lkm: &mut LkMultiplicity, with_overflow: bool) -> ValueMul {
self.internal_mul(rhs, lkm, with_overflow, false)
}
pub fn mul_hi(&self, rhs: &Self, lkm: &mut LkMultiplicity, with_overflow: bool) -> ValueMul {
self.internal_mul(rhs, lkm, with_overflow, true)
}
#[allow(clippy::type_complexity)]
pub fn mul_add(
&self,
mul: &Self,
addend: &Self,
lkm: &mut LkMultiplicity,
with_overflow: bool,
) -> (ValueAdd, ValueMul) {
let mul_result = self.internal_mul(mul, lkm, with_overflow, false);
let add_result = addend.add(
&Self::from_limb_unchecked(mul_result.limbs.clone()),
lkm,
with_overflow,
);
(add_result, mul_result)
}
fn internal_mul(
&self,
mul: &Self,
lkm: &mut LkMultiplicity,
with_overflow: bool,
with_hi_limbs: bool,
) -> ValueMul {
let a_limbs = self.as_u16_limbs();
let b_limbs = mul.as_u16_limbs();
let num_limbs = if !with_hi_limbs {
a_limbs.len()
} else {
2 * a_limbs.len()
};
let mut c_limbs = vec![0u16; num_limbs];
let mut carries = vec![0u64; num_limbs];
let mut tmp = vec![0u64; num_limbs];
enumerate(a_limbs).for_each(|(i, &a_limb)| {
enumerate(b_limbs).for_each(|(j, &b_limb)| {
let idx = i + j;
if idx < num_limbs {
tmp[idx] += a_limb as u64 * b_limb as u64;
}
})
});
tmp.iter()
.zip(c_limbs.iter_mut())
.enumerate()
.for_each(|(i, (tmp, limb))| {
// tmp + prev_carry - carry * Self::LIMB_BASE_MUL
let mut tmp = *tmp;
if i > 0 {
tmp += carries[i - 1];
}
// update carry
carries[i] = tmp >> Self::C;
// update limb with only lsb 16 bit
*limb = tmp as u16;
});
if !with_overflow {
// If the outcome overflows, `with_overflow` can't be false
assert_eq!(carries[carries.len() - 1], 0, "incorrect overflow flag");
carries.resize(carries.len() - 1, 0);
}
// range check
c_limbs.iter().for_each(|c| lkm.assert_ux::<16>(*c as u64));
ValueMul {
limbs: c_limbs,
carries,
max_carry_value: max_carry_word_for_multiplication(2, Self::M, Self::C),
}
}
}
#[cfg(test)]
mod tests {
mod value {
use crate::{Value, witness::LkMultiplicity};
#[test]
fn test_add() {
let a = Value::new_unchecked(1u32);
let b = Value::new_unchecked(2u32);
let mut lkm = LkMultiplicity::default();
let ret = a.add(&b, &mut lkm, true);
assert_eq!(ret.limbs[0], 3);
assert_eq!(ret.limbs[1], 0);
assert_eq!(ret.carries[0], 0);
assert_eq!(ret.carries[1], 0);
}
#[test]
fn test_add_carry() {
let a = Value::new_unchecked(u16::MAX as u32);
let b = Value::new_unchecked(2u32);
let mut lkm = LkMultiplicity::default();
let ret = a.add(&b, &mut lkm, true);
assert_eq!(ret.limbs[0], 1);
assert_eq!(ret.limbs[1], 1);
assert_eq!(ret.carries[0], 1);
assert_eq!(ret.carries[1], 0);
}
#[test]
fn test_mul() {
let a = Value::new_unchecked(1u32);
let b = Value::new_unchecked(2u32);
let mut lkm = LkMultiplicity::default();
let ret = a.mul(&b, &mut lkm, true);
assert_eq!(ret.limbs[0], 2);
assert_eq!(ret.limbs[1], 0);
assert_eq!(ret.carries[0], 0);
assert_eq!(ret.carries[1], 0);
}
#[test]
fn test_mul_carry() {
let a = Value::new_unchecked(u16::MAX as u32);
let b = Value::new_unchecked(2u32);
let mut lkm = LkMultiplicity::default();
let ret = a.mul(&b, &mut lkm, true);
assert_eq!(ret.limbs[0], u16::MAX - 1);
assert_eq!(ret.limbs[1], 1);
assert_eq!(ret.carries[0], 1);
assert_eq!(ret.carries[1], 0);
}
#[test]
fn test_mul_overflow() {
let a = Value::new_unchecked(u32::MAX / 2 + 1);
let b = Value::new_unchecked(2u32);
let mut lkm = LkMultiplicity::default();
let ret = a.mul(&b, &mut lkm, true);
assert_eq!(ret.limbs[0], 0);
assert_eq!(ret.limbs[1], 0);
assert_eq!(ret.carries[0], 0);
assert_eq!(ret.carries[1], 1);
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/error.rs | ceno_zkvm/src/error.rs | use gkr_iop::error::{BackendError, CircuitBuilderError};
use mpcs::Error;
#[derive(Debug)]
pub enum UtilError {
UIntError(Box<str>),
}
#[derive(Debug)]
pub enum ZKVMError {
CircuitError,
CircuitBuilderError(CircuitBuilderError),
BackendError(BackendError),
UtilError(UtilError),
WitnessNotFound(Box<str>),
InvalidWitness(Box<str>),
InvalidProof(Box<str>),
VKNotFound(Box<str>),
FixedTraceNotFound(Box<str>),
VerifyError(Box<str>),
PCSError(Error),
}
impl From<UtilError> for ZKVMError {
fn from(error: UtilError) -> Self {
Self::UtilError(error)
}
}
impl From<CircuitBuilderError> for ZKVMError {
fn from(e: CircuitBuilderError) -> Self {
ZKVMError::CircuitBuilderError(e)
}
}
impl From<BackendError> for ZKVMError {
fn from(e: BackendError) -> Self {
ZKVMError::BackendError(e)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/utils.rs | ceno_zkvm/src/utils.rs | use std::{
collections::HashMap,
fmt::Display,
hash::Hash,
panic::{self, PanicHookInfo},
};
use ff_ext::ExtensionField;
pub use gkr_iop::utils::i64_to_base;
use itertools::Itertools;
use p3::field::Field;
#[cfg(feature = "u16limb_circuit")]
use crate::instructions::riscv::constants::UINT_LIMBS;
#[cfg(feature = "u16limb_circuit")]
use multilinear_extensions::Expression;
#[cfg(feature = "u16limb_circuit")]
use multilinear_extensions::ToExpr;
#[cfg(feature = "u16limb_circuit")]
use p3::field::FieldAlgebra;
pub fn split_to_u8<T: From<u8>>(value: u32) -> Vec<T> {
(0..(u32::BITS / 8))
.scan(value, |acc, _| {
let limb = ((*acc & 0xFF) as u8).into();
*acc >>= 8;
Some(limb)
})
.collect_vec()
}
#[allow(dead_code)]
pub fn split_to_limb<T: From<u8>, const LIMB_BITS: usize>(value: u32) -> Vec<T> {
(0..(u32::BITS as usize / LIMB_BITS))
.scan(value, |acc, _| {
let limb = ((*acc & ((1 << LIMB_BITS) - 1)) as u8).into();
*acc >>= LIMB_BITS;
Some(limb)
})
.collect_vec()
}
/// Compile time evaluated minimum function
/// returns min(a, b)
pub(crate) const fn const_min(a: usize, b: usize) -> usize {
if a <= b { a } else { b }
}
/// Assumes each limb < max_value
/// adds 1 to the big value, while preserving the above constraint
pub(crate) fn add_one_to_big_num<F: Field>(limb_modulo: F, limbs: &[F]) -> Vec<F> {
let mut should_add_one = true;
let mut result = vec![];
for limb in limbs {
let mut new_limb_value = *limb;
if should_add_one {
new_limb_value += F::ONE;
if new_limb_value == limb_modulo {
new_limb_value = F::ZERO;
} else {
should_add_one = false;
}
}
result.push(new_limb_value);
}
result
}
// split single u64 value into W slices, each slice got C bits.
// all the rest slices will be filled with 0 if W x C > 64
pub fn u64vec<const W: usize, const C: usize>(x: u64) -> [u64; W] {
assert!(C <= 64);
let mut x = x;
let mut ret = [0; W];
for ret in ret.iter_mut() {
*ret = x & ((1 << C) - 1);
x >>= C;
}
ret
}
pub fn display_hashmap<K: Display, V: Display>(map: &HashMap<K, V>) -> String {
format!(
"[{}]",
map.iter().map(|(k, v)| format!("{k}: {v}")).join(",")
)
}
pub fn merge_frequency_tables<K: Hash + std::cmp::Eq>(
lhs: HashMap<K, usize>,
rhs: HashMap<K, usize>,
) -> HashMap<K, usize> {
let mut ret = lhs;
rhs.into_iter().for_each(|(key, value)| {
*ret.entry(key).or_insert(0) += value;
});
ret
}
/// Temporarily override the panic hook
///
/// We restore the original hook after we are done.
pub fn with_panic_hook<F, R>(
hook: Box<dyn Fn(&PanicHookInfo<'_>) + Sync + Send + 'static>,
f: F,
) -> R
where
F: FnOnce() -> R,
{
// Save the current panic hook
let original_hook = panic::take_hook();
// Set the new panic hook
panic::set_hook(hook);
let result = f();
// Restore the original panic hook
panic::set_hook(original_hook);
result
}
#[cfg(feature = "u16limb_circuit")]
pub fn imm_sign_extend_circuit<E: ExtensionField>(
require_signed: bool,
is_signed: Expression<E>,
imm: Expression<E>,
) -> [Expression<E>; UINT_LIMBS] {
if !require_signed {
[imm, E::BaseField::ZERO.expr()]
} else {
[
imm,
is_signed * E::BaseField::from_canonical_u16(0xffff).expr(),
]
}
}
#[cfg(feature = "u16limb_circuit")]
#[inline(always)]
pub fn imm_sign_extend(is_signed_extension: bool, imm: i16) -> [u16; UINT_LIMBS] {
#[allow(clippy::if_same_then_else)]
if !is_signed_extension {
[imm as u16, 0]
} else if imm >= 0 {
[imm as u16, 0u16]
} else {
[imm as u16, 0xffff]
}
}
#[cfg(all(feature = "jemalloc", unix, not(test)))]
pub fn print_allocated_bytes() {
use tikv_jemalloc_ctl::{epoch, stats};
// Advance the epoch to refresh the stats
let e = epoch::mib().unwrap();
e.advance().unwrap();
// Read allocated bytes
let allocated = stats::allocated::read().unwrap();
tracing::info!("jemalloc total allocated bytes: {}", allocated);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/e2e.rs | ceno_zkvm/src/e2e.rs | use crate::{
error::ZKVMError,
instructions::riscv::{DummyExtraConfig, MemPadder, MmuConfig, Rv32imConfig},
scheme::{
PublicValues, ZKVMProof,
constants::SEPTIC_EXTENSION_DEGREE,
hal::ProverDevice,
mock_prover::{LkMultiplicityKey, MockProver},
prover::ZKVMProver,
septic_curve::SepticPoint,
verifier::ZKVMVerifier,
},
state::GlobalState,
structs::{
ProgramParams, ZKVMConstraintSystem, ZKVMFixedTraces, ZKVMProvingKey, ZKVMVerifyingKey,
ZKVMWitnesses,
},
tables::{
MemFinalRecord, MemInitRecord, ProgramTableCircuit, ProgramTableConfig, ShardRamCircuit,
TableCircuit,
},
};
use ceno_emul::{
Addr, ByteAddr, CENO_PLATFORM, Cycle, EmuContext, FullTracer, IterAddresses, NextCycleAccess,
Platform, PreflightTracer, Program, StepRecord, Tracer, VM_REG_COUNT, VMState, WORD_SIZE, Word,
WordAddr, host_utils::read_all_messages,
};
use clap::ValueEnum;
use either::Either;
use ff_ext::{ExtensionField, SmallField};
#[cfg(debug_assertions)]
use ff_ext::{Instrumented, PoseidonField};
use gkr_iop::{RAMType, hal::ProverBackend};
#[cfg(debug_assertions)]
use itertools::MinMaxResult;
use itertools::{Itertools, chain};
use mpcs::{PolynomialCommitmentScheme, SecurityLevel};
use multilinear_extensions::util::max_usable_threads;
use rustc_hash::FxHashSet;
use serde::Serialize;
#[cfg(debug_assertions)]
use std::collections::{HashMap, HashSet};
use std::{
collections::{BTreeMap, BTreeSet},
marker::PhantomData,
ops::Range,
sync::Arc,
};
use tracing::info_span;
use transcript::BasicTranscript as Transcript;
use witness::next_pow2_instance_padding;
// default value: 16GB VRAM, each cell 4 byte, log explosion 2
pub const DEFAULT_MAX_CELLS_PER_SHARDS: u64 = (1 << 30) * 16 / 4 / 2;
pub const DEFAULT_MAX_CYCLE_PER_SHARDS: Cycle = 1 << 29;
pub const DEFAULT_CROSS_SHARD_ACCESS_LIMIT: usize = 1 << 20;
// define a relative small number to make first shard handle much less instruction
pub const DEFAULT_MAX_CELL_FIRST_SHARD: u64 = 1 << 20;
/// The polynomial commitment scheme kind
#[derive(
Default,
Copy,
Clone,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
ValueEnum,
strum_macros::AsRefStr,
strum_macros::Display,
strum_macros::IntoStaticStr,
)]
pub enum PcsKind {
#[default]
Basefold,
Whir,
}
/// The field type
#[derive(
Default,
Copy,
Clone,
Debug,
PartialEq,
Eq,
PartialOrd,
Ord,
ValueEnum,
strum_macros::AsRefStr,
strum_macros::Display,
strum_macros::IntoStaticStr,
)]
pub enum FieldType {
#[default]
BabyBear,
Goldilocks,
}
#[derive(Clone)]
pub struct FullMemState<Record> {
pub mem: Vec<Record>,
pub io: Vec<Record>,
pub reg: Vec<Record>,
pub hints: Vec<Record>,
pub stack: Vec<Record>,
pub heap: Vec<Record>,
}
pub(crate) type InitMemState = FullMemState<MemInitRecord>;
type FinalMemState = FullMemState<MemFinalRecord>;
pub struct EmulationResult<'a> {
pub exit_code: Option<u32>,
pub final_mem_state: FinalMemState,
pub pi: PublicValues,
pub shard_ctx_builder: ShardContextBuilder,
pub executed_steps: usize,
pub phantom: PhantomData<&'a ()>,
// pub shard_ctxs: Vec<ShardContext<'a>>,
}
pub struct RAMRecord {
pub ram_type: RAMType,
// reg_id is the raw id of register, e.g. in riv32 it's range from [0, 32)
// meaningful when RAMType::Register
pub reg_id: u64,
pub addr: WordAddr,
// prev_cycle and cycle are global cycle
pub prev_cycle: Cycle,
pub cycle: Cycle,
// shard_cycle is cycle in current local shard, which already offset by start cycle
pub shard_cycle: Cycle,
pub prev_value: Option<Word>,
pub value: Word,
// for global reads, `shard_id` refers to the shard that previously produced this value.
// for global write, `shard_id` refers to current shard.
pub shard_id: usize,
}
#[derive(Clone, Debug)]
pub struct MultiProver {
pub prover_id: usize,
pub max_provers: usize,
pub max_cell_per_shard: u64,
pub max_cycle_per_shard: Cycle,
}
impl MultiProver {
pub fn new(
prover_id: usize,
max_provers: usize,
max_cell_per_shard: u64,
max_cycle_per_shard: Cycle,
) -> Self {
assert!(prover_id < max_provers);
Self {
prover_id,
max_provers,
max_cell_per_shard,
max_cycle_per_shard,
}
}
}
impl Default for MultiProver {
fn default() -> Self {
Self {
prover_id: 0,
max_provers: 1,
max_cell_per_shard: u64::MAX,
max_cycle_per_shard: DEFAULT_MAX_CYCLE_PER_SHARDS,
}
}
}
pub struct ShardContext<'a> {
pub shard_id: usize,
num_shards: usize,
max_cycle: Cycle,
pub addr_future_accesses: Arc<NextCycleAccess>,
addr_accessed_tbs: Either<Vec<Vec<WordAddr>>, &'a mut Vec<WordAddr>>,
read_records_tbs:
Either<Vec<BTreeMap<WordAddr, RAMRecord>>, &'a mut BTreeMap<WordAddr, RAMRecord>>,
write_records_tbs:
Either<Vec<BTreeMap<WordAddr, RAMRecord>>, &'a mut BTreeMap<WordAddr, RAMRecord>>,
pub cur_shard_cycle_range: std::ops::Range<usize>,
pub expected_inst_per_shard: usize,
pub max_num_cross_shard_accesses: usize,
// shard 0: [v[0], v[1]), shard 1: [v[1], v[2]), shard 2: [v[2], v[3])
pub prev_shard_cycle_range: Vec<Cycle>,
pub prev_shard_heap_range: Vec<Addr>,
pub prev_shard_hint_range: Vec<Addr>,
pub platform: Platform,
pub shard_heap_addr_range: Range<Addr>,
pub shard_hint_addr_range: Range<Addr>,
}
impl<'a> Default for ShardContext<'a> {
fn default() -> Self {
let max_threads = max_usable_threads();
let max_num_cross_shard_accesses = std::env::var("CENO_CROSS_SHARD_LIMIT")
.map(|v| v.parse().unwrap_or(DEFAULT_CROSS_SHARD_ACCESS_LIMIT))
.unwrap_or(DEFAULT_CROSS_SHARD_ACCESS_LIMIT);
Self {
shard_id: 0,
num_shards: 1,
max_cycle: Cycle::MAX,
addr_future_accesses: Arc::new(Default::default()),
addr_accessed_tbs: Either::Left(vec![Vec::new(); max_threads]),
read_records_tbs: Either::Left(
(0..max_threads)
.map(|_| BTreeMap::new())
.collect::<Vec<_>>(),
),
write_records_tbs: Either::Left(
(0..max_threads)
.map(|_| BTreeMap::new())
.collect::<Vec<_>>(),
),
cur_shard_cycle_range: FullTracer::SUBCYCLES_PER_INSN as usize..usize::MAX,
expected_inst_per_shard: usize::MAX,
max_num_cross_shard_accesses,
prev_shard_cycle_range: vec![],
prev_shard_heap_range: vec![],
prev_shard_hint_range: vec![],
platform: CENO_PLATFORM.clone(),
shard_heap_addr_range: CENO_PLATFORM.heap.clone(),
shard_hint_addr_range: CENO_PLATFORM.hints.clone(),
}
}
}
/// `prover_id` and `num_provers` in MultiProver are exposed as arguments
/// to specify the number of physical provers in a cluster,
/// each mark with a prover_id.
/// The overall trace data is divided into shards, which are distributed evenly among the provers.
/// The number of shards are in general agnostic to number of provers.
/// Each prover is assigned n shard where n can be even empty
///
/// Shard distribution follows a balanced allocation strategy
/// for example, if there are 10 shards and 3 provers,
/// the shard counts will be distributed as 3, 3, and 4, ensuring an even workload across all provers.
impl<'a> ShardContext<'a> {
pub fn get_forked(&mut self) -> Vec<ShardContext<'_>> {
match (
&mut self.read_records_tbs,
&mut self.write_records_tbs,
&mut self.addr_accessed_tbs,
) {
(
Either::Left(read_thread_based_record_storage),
Either::Left(write_thread_based_record_storage),
Either::Left(addr_accessed_tbs),
) => read_thread_based_record_storage
.iter_mut()
.zip(write_thread_based_record_storage.iter_mut())
.zip(addr_accessed_tbs.iter_mut())
.map(|((read, write), addr_accessed_tbs)| ShardContext {
shard_id: self.shard_id,
num_shards: self.num_shards,
max_cycle: self.max_cycle,
addr_future_accesses: self.addr_future_accesses.clone(),
addr_accessed_tbs: Either::Right(addr_accessed_tbs),
read_records_tbs: Either::Right(read),
write_records_tbs: Either::Right(write),
cur_shard_cycle_range: self.cur_shard_cycle_range.clone(),
expected_inst_per_shard: self.expected_inst_per_shard,
max_num_cross_shard_accesses: self.max_num_cross_shard_accesses,
prev_shard_cycle_range: self.prev_shard_cycle_range.clone(),
prev_shard_heap_range: self.prev_shard_heap_range.clone(),
prev_shard_hint_range: self.prev_shard_hint_range.clone(),
platform: self.platform.clone(),
shard_heap_addr_range: self.shard_heap_addr_range.clone(),
shard_hint_addr_range: self.shard_hint_addr_range.clone(),
})
.collect_vec(),
_ => panic!("invalid type"),
}
}
pub fn read_records(&self) -> &[BTreeMap<WordAddr, RAMRecord>] {
match &self.read_records_tbs {
Either::Left(m) => m,
Either::Right(_) => panic!("undefined behaviour"),
}
}
pub fn write_records(&self) -> &[BTreeMap<WordAddr, RAMRecord>] {
match &self.write_records_tbs {
Either::Left(m) => m,
Either::Right(_) => panic!("undefined behaviour"),
}
}
#[inline(always)]
pub fn is_first_shard(&self) -> bool {
self.shard_id == 0
}
#[inline(always)]
pub fn is_last_shard(&self) -> bool {
self.shard_id == self.num_shards - 1
}
#[inline(always)]
pub fn is_in_current_shard(&self, cycle: Cycle) -> bool {
self.cur_shard_cycle_range.contains(&(cycle as usize))
}
#[inline(always)]
pub fn before_current_shard_cycle(&self, cycle: Cycle) -> bool {
(cycle as usize) < self.cur_shard_cycle_range.start
}
#[inline(always)]
pub fn after_current_shard_cycle(&self, cycle: Cycle) -> bool {
(cycle as usize) >= self.cur_shard_cycle_range.end
}
/// Extract shard_id which produce this record by cycle
/// NOTE prev_shard_cycle_range[0] should be 0 otherwise it will panic with subtract-overflow
#[inline(always)]
pub fn extract_shard_id_by_cycle(&self, cycle: Cycle) -> usize {
self.prev_shard_cycle_range.partition_point(|&t| t <= cycle) - 1
}
/// Extract shard_id which produce this record by heap addr
/// NOTE prev_shard_heap_range[0] should be 0 otherwise it will panic with subtract-overflow
#[inline(always)]
pub fn extract_shard_id_by_heap_addr(&self, addr: Addr) -> usize {
self.prev_shard_heap_range.partition_point(|&a| a <= addr) - 1
}
/// Extract shard_id which produce this record by hint addr
/// NOTE prev_shard_hint_range[0] should be 0 otherwise it will panic with subtract-overflow
#[inline(always)]
pub fn extract_shard_id_by_hint_addr(&self, addr: Addr) -> usize {
self.prev_shard_hint_range.partition_point(|&a| a <= addr) - 1
}
#[inline(always)]
pub fn aligned_prev_ts(&self, prev_cycle: Cycle) -> Cycle {
let mut ts = prev_cycle.saturating_sub(self.current_shard_offset_cycle());
if ts < FullTracer::SUBCYCLES_PER_INSN {
ts = 0
}
ts
}
#[inline(always)]
pub fn aligned_current_ts(&self, cycle: Cycle) -> Cycle {
cycle.saturating_sub(self.current_shard_offset_cycle())
}
pub fn current_shard_offset_cycle(&self) -> Cycle {
// cycle of each local shard start from Tracer::SUBCYCLES_PER_INSN
(self.cur_shard_cycle_range.start as Cycle) - FullTracer::SUBCYCLES_PER_INSN
}
/// Finds the **next** future access cycle for the given address, starting from
/// the specified current cycle.
///
/// Note that the returned cycle is simply the *next* access, not necessarily
/// the final (last) access of the address.
///
/// For example, if address `0xabc` is accessed at cycles `4` and `8`,
/// then `find_future_next_access(0xabc, 4)` returns `8`.
#[inline(always)]
pub fn find_future_next_access(&self, cycle: Cycle, addr: WordAddr) -> Option<Cycle> {
self.addr_future_accesses
.get(cycle as usize)
.and_then(|res| {
if res.len() == 1 {
Some(res[0].1)
} else if res.len() > 1 {
res.iter()
.find(|(m_addr, _)| *m_addr == addr)
.map(|(_, cycle)| *cycle)
} else {
None
}
})
}
#[inline(always)]
#[allow(clippy::too_many_arguments)]
pub fn send(
&mut self,
ram_type: crate::structs::RAMType,
addr: WordAddr,
id: u64,
cycle: Cycle,
prev_cycle: Cycle,
value: Word,
prev_value: Option<Word>,
) {
if !self.is_first_shard()
&& self.is_in_current_shard(cycle)
&& self.before_current_shard_cycle(prev_cycle)
{
let addr_raw = addr.baddr().0;
let is_heap = self.platform.heap.contains(&addr_raw);
let is_hint = self.platform.hints.contains(&addr_raw);
// 1. checking reads from the external bus
if prev_cycle > 0 || (prev_cycle == 0 && (!is_heap && !is_hint)) {
let prev_shard_id = self.extract_shard_id_by_cycle(prev_cycle);
let ram_record = self
.read_records_tbs
.as_mut()
.right()
.expect("illegal type");
ram_record.insert(
addr,
RAMRecord {
ram_type,
reg_id: id,
addr,
prev_cycle,
cycle,
shard_cycle: 0,
prev_value,
value,
shard_id: prev_shard_id,
},
);
} else {
assert!(
prev_cycle == 0 && (is_heap || is_hint),
"addr {addr_raw:x} prev_cycle {prev_cycle}, is_heap {is_heap}, is_hint {is_hint}",
);
// 2. handle heap/hint initial reads outside the shard range.
if !self.shard_heap_addr_range.contains(&addr_raw) {
let prev_shard_id = if is_heap {
self.extract_shard_id_by_heap_addr(addr_raw)
} else if is_hint {
self.extract_shard_id_by_hint_addr(addr_raw)
} else {
unreachable!()
};
let ram_record = self
.read_records_tbs
.as_mut()
.right()
.expect("illegal type");
ram_record.insert(
addr,
RAMRecord {
ram_type,
reg_id: id,
addr,
prev_cycle,
cycle,
shard_cycle: 0,
prev_value,
value,
shard_id: prev_shard_id,
},
);
}
}
}
// check write to external mem bus
if let Some(future_touch_cycle) = self.find_future_next_access(cycle, addr)
&& self.after_current_shard_cycle(future_touch_cycle)
&& self.is_in_current_shard(cycle)
{
let shard_cycle = self.aligned_current_ts(cycle);
let ram_record = self
.write_records_tbs
.as_mut()
.right()
.expect("illegal type");
ram_record.insert(
addr,
RAMRecord {
ram_type,
reg_id: id,
addr,
prev_cycle,
cycle,
shard_cycle,
prev_value,
value,
shard_id: self.shard_id,
},
);
}
let addr_accessed = self
.addr_accessed_tbs
.as_mut()
.right()
.expect("illegal type");
addr_accessed.push(addr);
}
/// merge addr accessed in different threads
pub fn get_addr_accessed(&self) -> FxHashSet<WordAddr> {
let mut merged = FxHashSet::default();
if let Either::Left(addr_accessed_tbs) = &self.addr_accessed_tbs {
for addrs in addr_accessed_tbs {
merged.extend(addrs.iter().copied());
}
} else {
panic!("invalid type");
}
merged
}
/// Splits a total count `num_shards` into up to `num_provers` non-empty parts, distributing as evenly as possible.
///
/// # Behavior
///
/// - If `num_shards == 0` or `num_provers == 0`, returns an empty vector `[]`.
/// - If `num_shards <= num_provers`, each part will have size `1`, and the total number of parts equals `num_shards`.
/// - Otherwise, divides `num_shards` evenly across `num_provers` parts so that:
/// - The first `num_shards % num_provers` parts get `base + 1` elements,
/// - The rest get `base` elements,
/// where `base = num_shards / num_provers`.
///
/// This ensures that:
/// - Every part is non-zero in size.
/// - The sum of all parts equals `num_shards`.
/// - The distribution is as balanced as possible (difference <= 1).
///
/// # Examples
///
/// ```
/// # fn main() {
/// use ceno_zkvm::e2e::ShardContext;
/// assert_eq!(ShardContext::distribute_shards_into_provers(3, 2), vec![2, 1]);
/// assert_eq!(ShardContext::distribute_shards_into_provers(4, 2), vec![2, 2]);
/// assert_eq!(ShardContext::distribute_shards_into_provers(5, 2), vec![3, 2]);
/// assert_eq!(ShardContext::distribute_shards_into_provers(10, 3), vec![4, 3, 3]);
///
/// // When n <= m, each item gets its own shard.
/// assert_eq!(ShardContext::distribute_shards_into_provers(1, 2), vec![1]);
/// assert_eq!(ShardContext::distribute_shards_into_provers(2, 3), vec![1, 1]);
/// assert_eq!(ShardContext::distribute_shards_into_provers(3, 4), vec![1, 1, 1]);
///
/// // Edge cases
/// assert_eq!(ShardContext::distribute_shards_into_provers(0, 3), Vec::<usize>::new());
/// assert_eq!(ShardContext::distribute_shards_into_provers(5, 0), Vec::<usize>::new());
/// # }
/// ```
/// # Returns
///
/// A `Vec<usize>` representing the size of each part, whose total sum equals `n`.
pub fn distribute_shards_into_provers(num_shards: usize, num_provers: usize) -> Vec<usize> {
if num_shards == 0 || num_provers == 0 {
return vec![];
}
// If there are more shards than items, just give each item its own shard
if num_shards <= num_provers {
return vec![1; num_shards];
}
let base = num_shards / num_provers;
let remainder = num_shards % num_provers;
(0..num_provers)
.map(|i| if i < remainder { base + 1 } else { base })
.collect()
}
}
pub trait StepCellExtractor {
fn extract_cells(&self, step: &StepRecord) -> u64;
}
pub struct ShardContextBuilder {
pub cur_shard_id: usize,
addr_future_accesses: Arc<NextCycleAccess>,
cur_cells: u64,
cur_acc_cycle: Cycle,
max_cell_per_shard: u64,
max_cycle_per_shard: Cycle,
target_cell_first_shard: u64,
prev_shard_cycle_range: Vec<Cycle>,
prev_shard_heap_range: Vec<Addr>,
prev_shard_hint_range: Vec<Addr>,
// holds the first step for the next shard once the current shard hits its limit
pending_step: Option<StepRecord>,
platform: Platform,
}
impl Default for ShardContextBuilder {
fn default() -> Self {
ShardContextBuilder {
cur_shard_id: 0,
addr_future_accesses: Arc::new(Default::default()),
cur_cells: 0,
cur_acc_cycle: 0,
max_cell_per_shard: 0,
max_cycle_per_shard: 0,
target_cell_first_shard: 0,
prev_shard_cycle_range: vec![],
prev_shard_heap_range: vec![],
prev_shard_hint_range: vec![],
pending_step: None,
platform: CENO_PLATFORM.clone(),
}
}
}
impl ShardContextBuilder {
/// set max_cell_per_shard == u64::MAX if target for single shard
pub fn new(
multi_prover: &MultiProver,
platform: Platform,
addr_future_accesses: NextCycleAccess,
) -> Self {
assert_eq!(multi_prover.max_provers, 1);
assert_eq!(multi_prover.prover_id, 0);
ShardContextBuilder {
cur_shard_id: 0,
cur_cells: 0,
cur_acc_cycle: 0,
max_cell_per_shard: multi_prover.max_cell_per_shard,
max_cycle_per_shard: multi_prover.max_cycle_per_shard,
target_cell_first_shard: {
if multi_prover.max_cell_per_shard == u64::MAX {
u64::MAX
} else {
multi_prover.max_cell_per_shard
}
},
addr_future_accesses: Arc::new(addr_future_accesses),
prev_shard_cycle_range: vec![0],
prev_shard_heap_range: vec![0],
prev_shard_hint_range: vec![0],
pending_step: None,
platform,
}
}
pub fn position_next_shard<'a>(
&mut self,
steps_iter: &mut impl Iterator<Item = StepRecord>,
step_cell_extractor: impl StepCellExtractor,
steps: &mut Vec<StepRecord>,
) -> Option<ShardContext<'a>> {
steps.clear();
let target_cost_current_shard = if self.cur_shard_id == 0 {
self.target_cell_first_shard
} else {
self.max_cell_per_shard
};
loop {
let step = if let Some(step) = self.pending_step.take() {
step
} else {
match steps_iter.next() {
Some(step) => step,
None => break,
}
};
let next_cells = self.cur_cells + step_cell_extractor.extract_cells(&step);
let next_cycle = self.cur_acc_cycle + FullTracer::SUBCYCLES_PER_INSN;
if next_cells >= target_cost_current_shard || next_cycle >= self.max_cycle_per_shard {
assert!(
!steps.is_empty(),
"empty record match when splitting shards"
);
self.pending_step = Some(step);
break;
}
self.cur_cells = next_cells;
self.cur_acc_cycle = next_cycle;
steps.push(step);
}
if steps.is_empty() {
return None;
}
if self.cur_shard_id > 0 {
assert_eq!(
steps.first().map(|step| step.cycle()).unwrap_or_default(),
self.prev_shard_cycle_range
.last()
.copied()
.unwrap_or(FullTracer::SUBCYCLES_PER_INSN)
);
assert_eq!(
steps
.first()
.map(|step| step.heap_maxtouch_addr.before)
.unwrap_or_default(),
self.prev_shard_heap_range
.last()
.copied()
.unwrap_or(self.platform.heap.start)
.into()
);
assert_eq!(
steps
.first()
.map(|step| step.hint_maxtouch_addr.before)
.unwrap_or_default(),
self.prev_shard_hint_range
.last()
.copied()
.unwrap_or(self.platform.hints.start)
.into()
);
}
let shard_ctx = ShardContext {
shard_id: self.cur_shard_id,
cur_shard_cycle_range: steps.first().map(|step| step.cycle() as usize).unwrap()
..(steps.last().unwrap().cycle() + FullTracer::SUBCYCLES_PER_INSN) as usize,
addr_future_accesses: self.addr_future_accesses.clone(),
prev_shard_cycle_range: self.prev_shard_cycle_range.clone(),
prev_shard_heap_range: self.prev_shard_heap_range.clone(),
prev_shard_hint_range: self.prev_shard_hint_range.clone(),
platform: self.platform.clone(),
shard_heap_addr_range: steps
.first()
.map(|step| step.heap_maxtouch_addr.before.0)
.unwrap_or_default()
..steps
.last()
.map(|step| step.heap_maxtouch_addr.after.0)
.unwrap_or_default(),
shard_hint_addr_range: steps
.first()
.map(|step| step.hint_maxtouch_addr.before.0)
.unwrap_or_default()
..steps
.last()
.map(|step| step.hint_maxtouch_addr.after.0)
.unwrap_or_default(),
..Default::default()
};
self.prev_shard_cycle_range
.push(shard_ctx.cur_shard_cycle_range.end as u64);
self.prev_shard_heap_range
.push(shard_ctx.shard_heap_addr_range.end);
self.prev_shard_hint_range
.push(shard_ctx.shard_hint_addr_range.end);
self.cur_cells = 0;
self.cur_acc_cycle = 0;
self.cur_shard_id += 1;
Some(shard_ctx)
}
}
/// Lazily replays `StepRecord`s by re-running the VM up to the number of steps
/// recorded during the preflight execution. This keeps shard generation memory
/// usage bounded without storing the entire trace.
struct StepReplay {
vm: VMState,
remaining_steps: usize,
}
impl StepReplay {
fn new(
platform: Platform,
program: Arc<Program>,
init_mem_state: &InitMemState,
remaining_steps: usize,
) -> Self {
let mut vm = VMState::new(platform, program);
for record in chain!(init_mem_state.hints.iter(), init_mem_state.io.iter()) {
vm.init_memory(record.addr.into(), record.value);
}
StepReplay {
vm,
remaining_steps,
}
}
}
impl Iterator for StepReplay {
type Item = StepRecord;
fn next(&mut self) -> Option<Self::Item> {
if self.remaining_steps == 0 {
return None;
}
match self.vm.next_step_record() {
Ok(Some(step)) => {
self.remaining_steps -= 1;
Some(step)
}
Ok(None) => {
self.remaining_steps = 0;
None
}
Err(err) => panic!("vm exec failed during witness replay: {err:?}"),
}
}
}
pub fn emulate_program<'a>(
program: Arc<Program>,
max_steps: usize,
init_mem_state: &InitMemState,
platform: &Platform,
multi_prover: &MultiProver,
) -> EmulationResult<'a> {
let InitMemState {
mem: mem_init,
io: io_init,
reg: reg_init,
hints: hints_init,
stack: _,
heap: _,
} = init_mem_state;
let mut vm: VMState<PreflightTracer> = VMState::new_with_tracer(platform.clone(), program);
for record in chain!(hints_init, io_init) {
vm.init_memory(record.addr.into(), record.value);
}
let exit_code = info_span!("[ceno] emulator.preflight-execute").in_scope(|| {
vm.iter_until_halt()
.take(max_steps)
.try_for_each(|step| step.map(|_| ()))
.unwrap_or_else(|err| panic!("emulator trapped before halt: {err}"));
vm.halted_state().map(|halt_state| halt_state.exit_code)
});
if platform.is_debug {
let all_messages = read_all_messages(&vm)
.iter()
.map(|msg| String::from_utf8_lossy(msg).to_string())
.collect::<Vec<_>>();
if !all_messages.is_empty() {
tracing::info!("========= BEGIN: I/O from guest =========");
for msg in &all_messages {
tracing::info!("│ {}", msg);
}
tracing::info!("========= END: I/O from guest =========");
}
}
let final_access = vm.tracer().final_accesses();
let end_cycle = vm.tracer().cycle();
let insts = vm.tracer().executed_insts();
tracing::info!("program executed {insts} instructions in {end_cycle} cycles");
metrics::gauge!("cycles").set(insts as f64);
// Find the final register values and cycles.
let reg_final = reg_init
.iter()
.map(|rec| {
let index = rec.addr as usize;
if index < VM_REG_COUNT {
let vma: WordAddr = Platform::register_vma(index).into();
MemFinalRecord {
ram_type: RAMType::Register,
addr: rec.addr,
value: vm.peek_register(index),
init_value: rec.value,
cycle: final_access.cycle(vma),
}
} else {
// The table is padded beyond the number of registers.
MemFinalRecord {
ram_type: RAMType::Register,
addr: rec.addr,
value: 0,
init_value: 0,
cycle: 0,
}
}
})
.collect_vec();
// Find the final memory values and cycles.
let mem_final = mem_init
.iter()
.map(|rec| {
let vma: WordAddr = rec.addr.into();
MemFinalRecord {
ram_type: RAMType::Memory,
addr: rec.addr,
value: vm.peek_memory(vma),
init_value: rec.value,
cycle: final_access.cycle(vma),
}
})
.collect_vec();
// Find the final public IO cycles.
let io_final = io_init
.iter()
.map(|rec| MemFinalRecord {
ram_type: RAMType::Memory,
addr: rec.addr,
value: rec.value,
init_value: rec.value,
cycle: final_access.cycle(rec.addr.into()),
})
.collect_vec();
// Find the final hints IO cycles.
let hints_final = hints_init
.iter()
.map(|rec| MemFinalRecord {
ram_type: RAMType::Memory,
addr: rec.addr,
value: rec.value,
init_value: rec.value,
cycle: final_access.cycle(rec.addr.into()),
})
.collect_vec();
// get stack access by min/max range
let stack_final = if let Some((min_waddr, _)) = vm
.tracer()
.probe_min_max_address_by_start_addr(ByteAddr::from(platform.stack.start).waddr())
{
(min_waddr..ByteAddr::from(platform.stack.end).waddr())
// stack record collect in reverse order
.rev()
.map(|vma| {
let byte_addr = vma.baddr();
MemFinalRecord {
ram_type: RAMType::Memory,
addr: byte_addr.0,
value: vm.peek_memory(vma),
init_value: 0,
cycle: final_access.cycle(vma),
}
})
.collect_vec()
} else {
vec![]
};
// get heap access by min/max range
let heap_start_waddr = ByteAddr::from(platform.heap.start).waddr();
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme.rs | ceno_zkvm/src/scheme.rs | use crate::structs::EccQuarkProof;
use ff_ext::ExtensionField;
use gkr_iop::gkr::GKRProof;
use itertools::Itertools;
use mpcs::PolynomialCommitmentScheme;
use p3::field::FieldAlgebra;
use serde::{Deserialize, Serialize, de::DeserializeOwned};
use std::{
collections::{BTreeMap, HashMap},
fmt::{self, Debug},
iter,
ops::Div,
rc::Rc,
};
use sumcheck::structs::IOPProverMessage;
use crate::{
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, UINT_LIMBS},
ecall::HaltInstruction,
},
},
structs::{TowerProofs, ZKVMVerifyingKey},
};
pub mod constants;
pub mod cpu;
#[cfg(feature = "gpu")]
pub mod gpu;
pub mod hal;
pub mod prover;
pub mod septic_curve;
pub mod utils;
pub mod verifier;
pub mod mock_prover;
#[cfg(test)]
mod tests;
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct GKROpcodeProof<E: ExtensionField>(pub GKRProof<E>);
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct ZKVMChipProof<E: ExtensionField> {
// tower evaluation at layer 1
pub r_out_evals: Vec<Vec<E>>,
pub w_out_evals: Vec<Vec<E>>,
pub lk_out_evals: Vec<Vec<E>>,
pub main_sumcheck_proofs: Option<Vec<IOPProverMessage<E>>>,
pub gkr_iop_proof: Option<GKRProof<E>>,
pub tower_proof: TowerProofs<E>,
pub ecc_proof: Option<EccQuarkProof<E>>,
pub num_instances: Vec<usize>,
pub fixed_in_evals: Vec<E>,
pub wits_in_evals: Vec<E>,
}
/// each field will be interpret to (constant) polynomial
#[derive(Default, Clone, Debug)]
pub struct PublicValues {
pub exit_code: u32,
pub init_pc: u32,
pub init_cycle: u64,
pub end_pc: u32,
pub end_cycle: u64,
pub shard_id: u32,
pub heap_start_addr: u32,
pub heap_shard_len: u32,
pub hint_start_addr: u32,
pub hint_shard_len: u32,
pub public_io: Vec<u32>,
pub shard_rw_sum: Vec<u32>,
}
impl PublicValues {
#[allow(clippy::too_many_arguments)]
pub fn new(
exit_code: u32,
init_pc: u32,
init_cycle: u64,
end_pc: u32,
end_cycle: u64,
shard_id: u32,
heap_start_addr: u32,
heap_shard_len: u32,
hint_start_addr: u32,
hint_shard_len: u32,
public_io: Vec<u32>,
shard_rw_sum: Vec<u32>,
) -> Self {
Self {
exit_code,
init_pc,
init_cycle,
end_pc,
end_cycle,
shard_id,
heap_start_addr,
heap_shard_len,
hint_start_addr,
hint_shard_len,
public_io,
shard_rw_sum,
}
}
pub fn to_vec<E: ExtensionField>(&self) -> Vec<Vec<E::BaseField>> {
vec![
vec![E::BaseField::from_canonical_u32(self.exit_code & 0xffff)],
vec![E::BaseField::from_canonical_u32(
(self.exit_code >> 16) & 0xffff,
)],
vec![E::BaseField::from_canonical_u32(self.init_pc)],
vec![E::BaseField::from_canonical_u64(self.init_cycle)],
vec![E::BaseField::from_canonical_u32(self.end_pc)],
vec![E::BaseField::from_canonical_u64(self.end_cycle)],
vec![E::BaseField::from_canonical_u32(self.shard_id)],
vec![E::BaseField::from_canonical_u32(self.heap_start_addr)],
vec![E::BaseField::from_canonical_u32(self.heap_shard_len)],
vec![E::BaseField::from_canonical_u32(self.hint_start_addr)],
vec![E::BaseField::from_canonical_u32(self.hint_shard_len)],
]
.into_iter()
.chain(
// public io processed into UINT_LIMBS column
(0..UINT_LIMBS)
.map(|limb_index| {
self.public_io
.iter()
.map(|value| {
E::BaseField::from_canonical_u16(
((value >> (limb_index * LIMB_BITS)) & LIMB_MASK) as u16,
)
})
.collect_vec()
})
.collect_vec(),
)
.chain(
self.shard_rw_sum
.iter()
.map(|value| vec![E::BaseField::from_canonical_u32(*value)])
.collect_vec(),
)
.collect::<Vec<_>>()
}
}
/// Map circuit names to
/// - an opcode or table proof,
/// - an index unique across both types.
#[derive(Clone, Serialize, Deserialize)]
#[serde(bound(
serialize = "E::BaseField: Serialize",
deserialize = "E::BaseField: DeserializeOwned"
))]
pub struct ZKVMProof<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> {
// TODO preserve in serde only for auxiliary public input
// other raw value can be construct by verifier directly.
pub raw_pi: Vec<Vec<E::BaseField>>,
// the evaluation of raw_pi.
pub pi_evals: Vec<E>,
// each circuit may have multiple proof instances
pub chip_proofs: BTreeMap<usize, Vec<ZKVMChipProof<E>>>,
pub witin_commit: <PCS as PolynomialCommitmentScheme<E>>::Commitment,
pub opening_proof: PCS::Proof,
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>> ZKVMProof<E, PCS> {
pub fn new(
raw_pi: Vec<Vec<E::BaseField>>,
pi_evals: Vec<E>,
chip_proofs: BTreeMap<usize, Vec<ZKVMChipProof<E>>>,
witin_commit: <PCS as PolynomialCommitmentScheme<E>>::Commitment,
opening_proof: PCS::Proof,
) -> Self {
Self {
raw_pi,
pi_evals,
chip_proofs,
witin_commit,
opening_proof,
}
}
pub fn pi_evals(raw_pi: &[Vec<E::BaseField>]) -> Vec<E> {
raw_pi
.iter()
.map(|pv| {
if pv.len() == 1 {
// this is constant poly, and always evaluate to same constant value
E::from(pv[0])
} else {
// set 0 as placeholder. will be evaluate lazily
// Or the vector is empty, i.e. the constant 0 polynomial.
E::ZERO
}
})
.collect_vec()
}
pub fn update_pi_eval(&mut self, idx: usize, v: E) {
self.pi_evals[idx] = v;
}
pub fn num_circuits(&self) -> usize {
self.chip_proofs.len()
}
pub fn has_halt(&self, vk: &ZKVMVerifyingKey<E, PCS>) -> bool {
let halt_circuit_index = vk
.circuit_vks
.keys()
.position(|circuit_name| *circuit_name == HaltInstruction::<E>::name())
.expect("halt circuit not exist");
let halt_instance_count = self
.chip_proofs
.get(&halt_circuit_index)
.map_or(0, |proofs| {
proofs
.iter()
.flat_map(|proof| &proof.num_instances)
.copied()
.sum()
});
if halt_instance_count > 0 {
assert_eq!(
halt_instance_count, 1,
"abnormal halt instance count {halt_instance_count} != 1"
);
}
halt_instance_count == 1
}
}
impl<E: ExtensionField, PCS: PolynomialCommitmentScheme<E> + Serialize> fmt::Display
for ZKVMProof<E, PCS>
{
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
// break down zkvm proof size
// also provide by-circuit stats
let mut by_circuitname_stats = HashMap::new();
// opcode circuit mpcs size
let mpcs_opcode_commitment =
bincode::serialized_size(&self.witin_commit).expect("serialization error");
let mpcs_opcode_opening =
bincode::serialized_size(&self.opening_proof).expect("serialization error");
// tower proof size
let tower_proof = self
.chip_proofs
.iter()
.flat_map(|(circuit_index, proofs)| {
iter::repeat_n(circuit_index, proofs.len()).zip(proofs)
})
.map(|(circuit_index, proof)| {
let size = bincode::serialized_size(&proof.tower_proof);
size.inspect(|size| {
*by_circuitname_stats.entry(circuit_index).or_insert(0) += size;
})
})
.collect::<Result<Vec<u64>, _>>()
.expect("serialization error")
.iter()
.sum::<u64>();
// main sumcheck
let main_sumcheck = self
.chip_proofs
.iter()
.flat_map(|(circuit_index, proofs)| {
iter::repeat_n(circuit_index, proofs.len()).zip(proofs)
})
.map(|(circuit_index, proof)| {
let size = bincode::serialized_size(&proof.main_sumcheck_proofs);
size.inspect(|size| {
*by_circuitname_stats.entry(circuit_index).or_insert(0) += size;
})
})
.collect::<Result<Vec<u64>, _>>()
.expect("serialization error")
.iter()
.sum::<u64>();
// overall size
let overall_size = bincode::serialized_size(&self).expect("serialization error");
// break down by circuit name
let by_circuitname_stats = by_circuitname_stats
.iter()
.sorted_by(|(_, size1), (_, size2)| size1.cmp(size2).reverse())
.map(|(key, size)| {
format!(
"{}: {:.2}mb({}%)",
key,
byte_to_mb(*size),
(size * 100).div(overall_size)
)
})
.collect::<Vec<String>>()
.join("\n");
// let mpcs_size = bincode::serialized_size(&proof.).unwrap().len();
write!(
f,
"overall_size {:.2}mb. \n\
mpcs commitment {:?}% \n\
mpcs opening {:?}% \n\
tower proof {:?}% \n\
main sumcheck proof {:?}% \n\
by circuit_name break down: \n\
{}
",
byte_to_mb(overall_size),
(mpcs_opcode_commitment * 100).div(overall_size),
(mpcs_opcode_opening * 100).div(overall_size),
(tower_proof * 100).div(overall_size),
(main_sumcheck * 100).div(overall_size),
by_circuitname_stats,
)
}
}
fn byte_to_mb(byte_size: u64) -> f64 {
byte_size as f64 / (1024.0 * 1024.0)
}
#[cfg(not(feature = "gpu"))]
pub fn create_backend<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>(
max_num_variables: usize,
security_level: mpcs::SecurityLevel,
) -> Rc<gkr_iop::cpu::CpuBackend<E, PCS>> {
gkr_iop::cpu::CpuBackend::<E, PCS>::new(max_num_variables, security_level).into()
}
#[cfg(not(feature = "gpu"))]
pub fn create_prover<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>(
backend: Rc<gkr_iop::cpu::CpuBackend<E, PCS>>,
) -> gkr_iop::cpu::CpuProver<gkr_iop::cpu::CpuBackend<E, PCS>> {
gkr_iop::cpu::CpuProver::new(backend)
}
#[cfg(feature = "gpu")]
pub fn create_backend<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>(
max_num_variables: usize,
security_level: mpcs::SecurityLevel,
) -> Rc<gkr_iop::gpu::GpuBackend<E, PCS>> {
gkr_iop::gpu::GpuBackend::<E, PCS>::new(max_num_variables, security_level).into()
}
#[cfg(feature = "gpu")]
pub fn create_prover<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>>(
backend: Rc<gkr_iop::gpu::GpuBackend<E, PCS>>,
) -> gkr_iop::gpu::GpuProver<gkr_iop::gpu::GpuBackend<E, PCS>> {
gkr_iop::gpu::GpuProver::new(backend)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions.rs | ceno_zkvm/src/instructions.rs | use crate::{
circuit_builder::CircuitBuilder, e2e::ShardContext, error::ZKVMError, structs::ProgramParams,
tables::RMMCollections, witness::LkMultiplicity,
};
use ceno_emul::StepRecord;
use ff_ext::ExtensionField;
use gkr_iop::{
chip::Chip,
gkr::{GKRCircuit, layer::Layer},
selector::SelectorType,
utils::lk_multiplicity::Multiplicity,
};
use itertools::Itertools;
use multilinear_extensions::{ToExpr, util::max_usable_threads};
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, ParallelIterator},
slice::ParallelSlice,
};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
pub mod riscv;
pub trait Instruction<E: ExtensionField> {
type InstructionConfig: Send + Sync;
fn padding_strategy() -> InstancePaddingStrategy {
InstancePaddingStrategy::Default
}
fn name() -> String;
/// construct circuit and manipulate circuit builder, then return the respective config
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError>;
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
let config = Self::construct_circuit(cb, param)?;
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
let selector = cb.create_placeholder_structural_witin(|| "selector");
let selector_type = SelectorType::Prefix(selector.expr());
// all shared the same selector
let (out_evals, mut chip) = (
[
// r_record
(0..r_len).collect_vec(),
// w_record
(r_len..r_len + w_len).collect_vec(),
// lk_record
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
// zero_record
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, 0),
);
// register selector to legacy constrain system
cb.cs.r_selector = Some(selector_type.clone());
cb.cs.w_selector = Some(selector_type.clone());
cb.cs.lk_selector = Some(selector_type.clone());
cb.cs.zero_selector = Some(selector_type.clone());
let layer = Layer::from_circuit_builder(cb, format!("{}_main", Self::name()), 0, out_evals);
chip.add_layer(layer);
Ok((config, chip.gkr_circuit()))
}
fn generate_fixed_traces(
_config: &Self::InstructionConfig,
_num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
None
}
// assign single instance giving step from trace
fn assign_instance<'a>(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext<'a>,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError>;
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
// TODO: selector is the only structural witness
// this is workaround, as call `construct_circuit` will not initialized selector
// we can remove this one all opcode unittest migrate to call `build_gkr_iop_circuit`
assert!(num_structural_witin == 0 || num_structural_witin == 1);
let num_structural_witin = num_structural_witin.max(1);
let nthreads = max_usable_threads();
let num_instance_per_batch = if steps.len() > 256 {
steps.len().div_ceil(nthreads)
} else {
steps.len()
}
.max(1);
let lk_multiplicity = LkMultiplicity::default();
let mut raw_witin =
RowMajorMatrix::<E::BaseField>::new(steps.len(), num_witin, Self::padding_strategy());
let mut raw_structual_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
Self::padding_strategy(),
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let raw_structual_witin_iter =
raw_structual_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
raw_witin_iter
.zip_eq(raw_structual_witin_iter)
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(
|(((instances, structural_instance), steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(structural_instance.chunks_mut(num_structural_witin))
.zip_eq(steps)
.map(|((instance, structural_instance), step)| {
*structural_instance.last_mut().unwrap() = E::BaseField::ONE;
Self::assign_instance(
config,
&mut shard_ctx,
instance,
&mut lk_multiplicity,
step,
)
})
.collect::<Vec<_>>()
},
)
.collect::<Result<(), ZKVMError>>()?;
raw_witin.padding_by_strategy();
raw_structual_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structual_witin],
lk_multiplicity.into_finalize_result(),
))
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/chip_handler.rs | ceno_zkvm/src/chip_handler.rs | use ff_ext::ExtensionField;
use gkr_iop::{error::CircuitBuilderError, gadgets::AssertLtConfig};
use crate::instructions::riscv::constants::UINT_LIMBS;
use multilinear_extensions::{Expression, ToExpr};
pub mod general;
pub mod global_state;
pub mod memory;
pub mod register;
pub trait GlobalStateRegisterMachineChipOperations<E: ExtensionField> {
fn state_in(&mut self, pc: Expression<E>, ts: Expression<E>)
-> Result<(), CircuitBuilderError>;
fn state_out(
&mut self,
pc: Expression<E>,
ts: Expression<E>,
) -> Result<(), CircuitBuilderError>;
}
/// The common representation of a register value.
/// Format: `[u16; UINT_LIMBS]`, least-significant-first.
pub type RegisterExpr<E> = [Expression<E>; UINT_LIMBS];
pub trait RegisterChipOperations<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR> {
fn register_read(
&mut self,
name_fn: N,
register_id: impl ToExpr<E, Output = Expression<E>>,
prev_ts: Expression<E>,
ts: Expression<E>,
value: RegisterExpr<E>,
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError>;
#[allow(clippy::too_many_arguments)]
fn register_write(
&mut self,
name_fn: N,
register_id: impl ToExpr<E, Output = Expression<E>>,
prev_ts: Expression<E>,
ts: Expression<E>,
prev_values: RegisterExpr<E>,
value: RegisterExpr<E>,
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError>;
}
/// The common representation of a memory address.
pub type AddressExpr<E> = Expression<E>;
/// The common representation of a register value.
/// Format: `[u16; UINT_LIMBS]`, least-significant-first.
pub type MemoryExpr<E> = [Expression<E>; UINT_LIMBS];
pub trait MemoryChipOperations<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR> {
fn memory_read(
&mut self,
name_fn: N,
memory_addr: &AddressExpr<E>,
prev_ts: Expression<E>,
ts: Expression<E>,
value: MemoryExpr<E>,
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError>;
#[allow(clippy::too_many_arguments)]
fn memory_write(
&mut self,
name_fn: N,
memory_addr: &AddressExpr<E>,
prev_ts: Expression<E>,
ts: Expression<E>,
prev_values: MemoryExpr<E>,
value: MemoryExpr<E>,
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError>;
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/uint/arithmetic.rs | ceno_zkvm/src/uint/arithmetic.rs | use ff_ext::{ExtensionField, SmallField};
use gkr_iop::error::CircuitBuilderError;
use itertools::{Itertools, izip};
use super::{UIntLimbs, UintLimb};
use crate::{
circuit_builder::CircuitBuilder, gadgets::AssertLtConfig,
instructions::riscv::config::IsEqualConfig,
};
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::FieldAlgebra;
impl<const M: usize, const C: usize, E: ExtensionField> UIntLimbs<M, C, E> {
const POW_OF_C: usize = 2_usize.pow(C as u32);
const LIMB_BIT_MASK: u64 = (1 << C) - 1;
fn internal_add(
&self,
circuit_builder: &mut CircuitBuilder<E>,
addend: &Vec<Expression<E>>,
with_overflow: bool,
) -> Result<UIntLimbs<M, C, E>, CircuitBuilderError> {
let mut c = UIntLimbs::<M, C, E>::new_as_empty();
// allocate witness cells and do range checks for carries
c.alloc_carry_unchecked(
|| "add_carry",
circuit_builder,
with_overflow,
Self::NUM_LIMBS,
)?;
let Some(carries) = &c.carries else {
return Err(CircuitBuilderError::CircuitError(
"empty carry".to_string().into(),
));
};
carries.iter().enumerate().try_for_each(|(i, carry)| {
circuit_builder.assert_bit(|| format!("carry_{i}_in_as_bit"), carry.expr())
})?;
// perform add operation
// c[i] = a[i] + b[i] + carry[i-1] - carry[i] * 2 ^ C
c.limbs = UintLimb::Expression(
(self.expr())
.iter()
.zip((*addend).iter())
.enumerate()
.map(|(i, (a, b))| {
let carries = c.carries.as_ref().unwrap();
let carry = if i > 0 { carries.get(i - 1) } else { None };
let next_carry = carries.get(i);
let mut limb_expr = a.clone() + b.clone();
if let Some(carry) = carry {
limb_expr = limb_expr.clone() + carry.expr();
}
if let Some(next_carry) = next_carry {
limb_expr = limb_expr.clone() - next_carry.expr() * Self::POW_OF_C;
}
circuit_builder
.assert_ux::<_, _, C>(|| format!("limb_{i}_in_{C}"), limb_expr.clone())?;
Ok(limb_expr)
})
.collect::<Result<Vec<Expression<E>>, CircuitBuilderError>>()?,
);
Ok(c)
}
pub fn add_const<NR: Into<String>, N: FnOnce() -> NR>(
&self,
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
constant: Expression<E>,
with_overflow: bool,
) -> Result<Self, CircuitBuilderError> {
circuit_builder.namespace(name_fn, |cb| {
let Expression::Constant(c) = constant else {
panic!("addend is not a constant type");
};
let b = c
.left()
.expect("do not support extension field here")
.to_canonical_u64();
// convert Expression::Constant to limbs
let b_limbs = (0..Self::NUM_LIMBS)
.map(|i| {
E::BaseField::from_canonical_u64((b >> (C * i)) & Self::LIMB_BIT_MASK).expr()
})
.collect_vec();
self.internal_add(cb, &b_limbs, with_overflow)
})
}
/// Little-endian addition.
pub fn add<NR: Into<String>, N: FnOnce() -> NR>(
&self,
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
addend: &UIntLimbs<M, C, E>,
with_overflow: bool,
) -> Result<UIntLimbs<M, C, E>, CircuitBuilderError> {
circuit_builder.namespace(name_fn, |cb| {
self.internal_add(cb, &addend.expr(), with_overflow)
})
}
fn internal_mul<const M2: usize>(
&mut self,
circuit_builder: &mut CircuitBuilder<E>,
multiplier: &mut UIntLimbs<M, C, E>,
with_overflow: bool,
) -> Result<UIntLimbs<M2, C, E>, CircuitBuilderError> {
debug_assert!(M2 == M || M2 == 2 * M, "illegal M2 {M2} and M {M}");
let is_hi_limb = M2 == 2 * M;
let num_limbs = if is_hi_limb {
2 * Self::NUM_LIMBS
} else {
Self::NUM_LIMBS
};
// with high limb, overall cell will be double
let c_limbs: Vec<WitIn> = (0..num_limbs).try_fold(vec![], |mut c_limbs, i| {
let limb = circuit_builder.create_witin(|| format!("limb_{i}"));
circuit_builder.assert_ux::<_, _, C>(|| format!("limb_{i}_in_{C}"), limb.expr())?;
c_limbs.push(limb);
Result::<Vec<WitIn>, CircuitBuilderError>::Ok(c_limbs)
})?;
let c_carries: Vec<WitIn> = (0..num_limbs).try_fold(vec![], |mut c_carries, i| {
// skip last carry if with_overflow == false
if i != num_limbs - 1 || with_overflow {
let carry = circuit_builder.create_witin(|| format!("carry_{i}"));
c_carries.push(carry);
}
Result::<Vec<WitIn>, CircuitBuilderError>::Ok(c_carries)
})?;
// assert carry range less than max carry value constant
let carries_auxiliary_lt_config = c_carries
.iter()
.enumerate()
.map(|(i, carry)| {
AssertLtConfig::construct_circuit(
circuit_builder,
|| format!("carry_{i}_in_less_than"),
carry.expr(),
(Self::MAX_DEGREE_2_MUL_CARRY_VALUE as usize).into(),
Self::MAX_DEGREE_2_MUL_CARRY_BITS,
)
})
.collect::<Result<Vec<AssertLtConfig>, CircuitBuilderError>>()?;
// creating a witness constrained as expression to reduce overall degree
let mut swap_witin = |name: &str,
u: &mut UIntLimbs<M, C, E>|
-> Result<Vec<Expression<E>>, CircuitBuilderError> {
if u.is_expr() {
circuit_builder.namespace(
|| name.to_owned(),
|cb| {
let existing_expr = u.expr();
// this will overwrite existing expressions
u.replace_limbs_with_witin(|| "replace_limbs_with_witin".to_string(), cb)?;
// check if the new witness equals the existing expression
izip!(u.expr(), existing_expr).try_for_each(|(lhs, rhs)| {
cb.require_equal(|| "new_witin_equal_expr".to_string(), lhs, rhs)
})?;
Ok(())
},
)?;
}
Ok(u.expr())
};
let a_expr = swap_witin("lhs", self)?;
let b_expr = swap_witin("rhs", multiplier)?;
// compute the result
let mut result_c: Vec<Expression<E>> = Vec::<Expression<E>>::with_capacity(c_limbs.len());
a_expr.iter().enumerate().for_each(|(i, a)| {
b_expr.iter().enumerate().for_each(|(j, b)| {
let idx = i + j;
if idx < c_limbs.len() {
if result_c.get(idx).is_none() {
result_c.push(a * b);
} else {
result_c[idx] += a * b;
}
}
});
});
result_c.resize(c_limbs.len(), Expression::ZERO);
// constrain each limb with carry
c_limbs.iter().enumerate().try_for_each(|(i, c_limb)| {
let carry = if i > 0 { c_carries.get(i - 1) } else { None };
let next_carry = c_carries.get(i);
result_c[i] = result_c[i].clone() - c_limb.expr();
if let Some(carry) = carry {
result_c[i] = result_c[i].clone() + carry.expr();
}
if let Some(next_carry) = next_carry {
result_c[i] = result_c[i].clone() - next_carry.expr() * Self::POW_OF_C;
}
circuit_builder.require_zero(|| format!("mul_zero_{i}"), result_c[i].clone())?;
Ok::<(), CircuitBuilderError>(())
})?;
Ok(UIntLimbs::from_witins_unchecked(
c_limbs,
Some(c_carries),
Some(carries_auxiliary_lt_config),
))
}
pub fn mul<const M2: usize, NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
multiplier: &mut UIntLimbs<M, C, E>,
with_overflow: bool,
) -> Result<UIntLimbs<M2, C, E>, CircuitBuilderError> {
circuit_builder.namespace(name_fn, |cb| {
self.internal_mul(cb, multiplier, with_overflow)
})
}
pub fn mul_add<const M2: usize, NR: Into<String>, N: FnOnce() -> NR>(
&mut self,
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
multiplier: &mut UIntLimbs<M, C, E>,
addend: &UIntLimbs<M, C, E>,
with_overflow: bool,
) -> Result<(UIntLimbs<M, C, E>, UIntLimbs<M2, C, E>), CircuitBuilderError> {
circuit_builder.namespace(name_fn, |cb| {
let mul = cb.namespace(
|| "mul",
|cb| self.internal_mul::<M2>(cb, multiplier, with_overflow),
)?;
let mul_lo_or_hi = if M2 == 2 * M {
// hi limb
let (_, mul_hi) = mul.as_lo_hi()?;
mul_hi
} else {
// lo limb
UIntLimbs::from_exprs_unchecked(mul.expr())
};
let add = cb.namespace(
|| "add",
|cb| mul_lo_or_hi.internal_add(cb, &addend.expr(), with_overflow),
)?;
Ok((add, mul))
})
}
/// Check two UIntLimbs are equal
pub fn require_equal<NR: Into<String>, N: FnOnce() -> NR>(
&self,
name_fn: N,
circuit_builder: &mut CircuitBuilder<E>,
rhs: &UIntLimbs<M, C, E>,
) -> Result<(), CircuitBuilderError> {
circuit_builder.namespace(name_fn, |cb| {
for (i, (limb_lhs, limb_rhs)) in self.expr().into_iter().zip_eq(rhs.expr()).enumerate()
{
// skip when both expression are constant
if matches!(limb_lhs, Expression::Constant(_))
&& matches!(limb_rhs, Expression::Constant(_))
{
continue;
}
cb.require_equal(
|| format!("lhs_limb[{i}] == rhs_limb[{i}]"),
limb_lhs,
limb_rhs,
)?;
}
Ok(())
})
}
pub fn is_equal(
&self,
circuit_builder: &mut CircuitBuilder<E>,
rhs: &UIntLimbs<M, C, E>,
) -> Result<IsEqualConfig, CircuitBuilderError> {
let n_limbs = Self::NUM_LIMBS;
let (is_equal_per_limb, diff_inv_per_limb): (Vec<WitIn>, Vec<WitIn>) =
izip!(&self.limbs, &rhs.limbs)
.map(|(a, b)| circuit_builder.is_equal(a.expr(), b.expr()))
.collect::<Result<Vec<(WitIn, WitIn)>, CircuitBuilderError>>()?
.into_iter()
.unzip();
let sum_expr = is_equal_per_limb.iter().map(ToExpr::expr).sum();
let sum_flag = circuit_builder.create_witin_from_exprs(|| "sum_flag", sum_expr, false)?;
let (is_equal, diff_inv) =
circuit_builder.is_equal(sum_flag.expr(), Expression::from(n_limbs))?;
Ok(IsEqualConfig {
is_equal_per_limb,
diff_inv_per_limb,
is_equal,
diff_inv,
})
}
}
#[cfg(test)]
mod tests {
mod add {
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
uint::UIntLimbs,
};
use ff_ext::{ExtensionField, GoldilocksExt2};
use itertools::Itertools;
use multilinear_extensions::{ToExpr, utils::eval_by_expr};
use p3::field::FieldAlgebra;
type E = GoldilocksExt2;
#[test]
fn test_add64_16_no_carries() {
// a = 1 + 1 * 2^16
// b = 2 + 1 * 2^16
// c = 3 + 2 * 2^16 with 0 carries
let a = vec![1, 1, 0, 0];
let b = vec![2, 1, 0, 0];
let carries = vec![0; 3]; // no overflow
let witness_values = [a, b, carries].concat();
verify::<64, 16, E>(witness_values, None, false);
}
#[test]
fn test_add64_16_w_carry() {
// a = 65535 + 1 * 2^16
// b = 2 + 1 * 2^16
// c = 1 + 3 * 2^16 with carries [1, 0, 0, 0]
let a = vec![0xFFFF, 1, 0, 0];
let b = vec![2, 1, 0, 0];
let carries = vec![1, 0, 0]; // no overflow
let witness_values = [a, b, carries].concat();
verify::<64, 16, E>(witness_values, None, false);
}
#[test]
fn test_add64_16_w_carries() {
// a = 65535 + 65534 * 2^16
// b = 2 + 1 * 2^16
// c = 1 + 0 * 2^16 + 1 * 2^32 with carries [1, 1, 0, 0]
let a = vec![0xFFFF, 0xFFFE, 0, 0];
let b = vec![2, 1, 0, 0];
let carries = vec![1, 1, 0]; // no overflow
let witness_values = [a, b, carries].concat();
verify::<64, 16, E>(witness_values, None, false);
}
#[test]
fn test_add64_16_w_overflow() {
// a = 1 + 1 * 2^16 + 0 + 65535 * 2^48
// b = 2 + 1 * 2^16 + 0 + 2 * 2^48
// c = 3 + 2 * 2^16 + 0 + 1 * 2^48 with carries [0, 0, 0, 1]
let a = vec![1, 1, 0, 0xFFFF];
let b = vec![2, 1, 0, 2];
let carries = vec![0, 0, 0, 1];
let witness_values = [a, b, carries].concat();
verify::<64, 16, E>(witness_values, None, false);
}
#[test]
fn test_add32_16_w_carry() {
// a = 65535 + 1 * 2^16
// b = 2 + 1 * 2^16
// c = 1 + 3 * 2^16 with carries [1]
let a = vec![0xFFFF, 1];
let b = vec![2, 1];
let carries = vec![1]; // no overflow
let witness_values = [a, b, carries].concat();
verify::<32, 16, E>(witness_values, None, false);
}
#[test]
fn test_add32_5_w_carry() {
// a = 31
// b = 2 + 1 * 2^5
// c = 1 + 1 * 2^5 with carries [1, 0, 0, 0]
let a = vec![31, 1, 0, 0, 0, 0, 0];
let b = vec![2, 1, 0, 0, 0, 0, 0];
let carries = vec![1, 0, 0, 0, 0, 0]; // no overflow
let witness_values = [a, b, carries].concat();
verify::<32, 5, E>(witness_values, None, false);
}
#[test]
fn test_add_const64_16_no_carries() {
// a = 1 + 1 * 2^16
// const b = 2
// c = 3 + 1 * 2^16 with 0 carries
let a = vec![1, 1, 0, 0];
let carries = vec![0; 3]; // no overflow
let witness_values = [a, carries].concat();
verify::<64, 16, E>(witness_values, Some(2), false);
}
#[test]
fn test_add_const64_16_w_carries() {
// a = 65535 + 65534 * 2^16
// const b = 2 + 1 * 2^16 = 65,538
// c = 1 + 0 * 2^16 + 1 * 2^32 with carries [1, 1, 0, 0]
let a = vec![0xFFFF, 0xFFFE, 0, 0];
let carries = vec![1, 1, 0]; // no overflow
let witness_values = [a, carries].concat();
verify::<64, 16, E>(witness_values, Some(65538), false);
}
#[test]
fn test_add_const32_16_w_carry() {
// a = 65535 + 1 * 2^16
// const b = 2 + 1 * 2^16 = 65,538
// c = 1 + 3 * 2^16 with carries [1]
let a = vec![0xFFFF, 1];
let carries = vec![1]; // no overflow
let witness_values = [a, carries].concat();
verify::<32, 16, E>(witness_values, Some(65538), false);
}
#[test]
fn test_add_const32_5_w_carry() {
// a = 31
// const b = 2 + 1 * 2^5 = 34
// c = 1 + 1 * 2^5 with carries [1, 0, 0, 0]
let a = vec![31, 1, 0, 0, 0, 0, 0];
let carries = vec![1, 0, 0, 0, 0, 0]; // no overflow
let witness_values = [a, carries].concat();
verify::<32, 5, E>(witness_values, Some(34), false);
}
fn verify<const M: usize, const C: usize, E: ExtensionField>(
witness_values: Vec<u64>,
const_b: Option<u64>,
overflow: bool,
) {
let mut cs = ConstraintSystem::new(|| "test_add");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
let challenges = vec![E::ONE; witness_values.len()];
let uint_a = UIntLimbs::<M, C, E>::new(|| "uint_a", &mut cb).unwrap();
let uint_c = if let Some(const_b) = const_b {
let const_b = E::BaseField::from_canonical_u64(const_b).expr();
uint_a
.add_const(|| "uint_c", &mut cb, const_b, overflow)
.unwrap()
} else {
let uint_b = UIntLimbs::<M, C, E>::new(|| "uint_b", &mut cb).unwrap();
uint_a.add(|| "uint_c", &mut cb, &uint_b, overflow).unwrap()
};
let pow_of_c: u64 = 2_usize.pow(UIntLimbs::<M, C, E>::MAX_LIMB_BIT_WIDTH as u32) as u64;
let single_wit_size = UIntLimbs::<M, C, E>::NUM_LIMBS;
let a = &witness_values[0..single_wit_size];
let mut const_b_pre_allocated = vec![0u64; single_wit_size];
let b = if let Some(b) = const_b {
let limb_bit_mask: u64 = (1 << C) - 1;
const_b_pre_allocated
.iter_mut()
.enumerate()
.for_each(|(i, limb)| *limb = (b >> (C * i)) & limb_bit_mask);
&const_b_pre_allocated
} else {
&witness_values[single_wit_size..2 * single_wit_size]
};
// the num of witness is 3, a, b and c_carries if it's a `add`
// only the num is 2 if it's a `add_const` bcs there is no `b`
let num_witness = if const_b.is_none() { 3 } else { 2 };
let wit_end_idx = if overflow {
num_witness * single_wit_size
} else {
num_witness * single_wit_size - 1
};
let carries = &witness_values[(num_witness - 1) * single_wit_size..wit_end_idx];
// limbs cal.
let mut result = vec![0u64; single_wit_size];
a.iter()
.zip(b)
.enumerate()
.for_each(|(i, (&limb_a, &limb_b))| {
let carry = carries.get(i);
result[i] = limb_a + limb_b;
if i != 0 {
result[i] += carries[i - 1];
}
if let Some(carry) = carry
&& !overflow
{
result[i] -= carry * pow_of_c;
}
});
// verify
let wit: Vec<E> = witness_values
.iter()
.cloned()
.map(E::from_canonical_u64)
.collect_vec();
uint_c.expr().iter().zip(result).for_each(|(c, ret)| {
assert_eq!(
eval_by_expr(&wit, &[], &challenges, c),
E::from_canonical_u64(ret)
);
});
// overflow
if overflow {
let carries = uint_c.carries.unwrap().last().unwrap().expr();
assert_eq!(eval_by_expr(&wit, &[], &challenges, &carries), E::ONE);
} else {
// non-overflow case, the len of carries should be (NUM_CELLS - 1)
assert_eq!(uint_c.carries.unwrap().len(), single_wit_size - 1)
}
}
}
mod mul {
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
uint::UIntLimbs,
};
use ff_ext::{ExtensionField, GoldilocksExt2};
use itertools::Itertools;
use multilinear_extensions::{ToExpr, utils::eval_by_expr};
type E = GoldilocksExt2; // 18446744069414584321
#[test]
fn test_mul64_16_no_carries() {
// a = 1 + 1 * 2^16
// b = 2 + 1 * 2^16
// c = 2 + 3 * 2^16 + 1 * 2^32 = 4,295,163,906
let wit_a = vec![1, 1, 0, 0];
let wit_b = vec![2, 1, 0, 0];
let wit_c = vec![2, 3, 1, 0];
let wit_carries = vec![0, 0, 0];
let witness_values = [wit_a, wit_b, wit_c, wit_carries].concat();
verify::<64, 16, E>(witness_values, false);
}
#[test]
fn test_mul64_16_w_carry() {
// a = 256 + 1 * 2^16
// b = 257 + 1 * 2^16
// c = 256 + 514 * 2^16 + 1 * 2^32 = 4,328,653,056
let wit_a = vec![256, 1, 0, 0];
let wit_b = vec![257, 1, 0, 0];
let wit_c = vec![256, 514, 1, 0];
let wit_carries = vec![1, 0, 0];
let witness_values = [wit_a, wit_b, wit_c, wit_carries].concat();
verify::<64, 16, E>(witness_values, false);
}
#[test]
fn test_mul64_16_w_carries() {
// a = 256 + 256 * 2^16 = 16,777,472
// b = 257 + 256 * 2^16 = 16,777,473
// c = 256 + 257 * 2^16 + 2 * 2^32 + 1 * 2^48 = 281,483,583,488,256
let wit_a = vec![256, 256, 0, 0];
let wit_b = vec![257, 256, 0, 0];
// result = [256 * 257, 256*256 + 256*257, 256*256, 0]
// ==> [256 + 1 * (2^16), 256 + 2 * (2^16), 0 + 1 * (2^16), 0]
// so we get wit_c = [256, 256, 0, 0] and carries = [1, 2, 1, 0]
let wit_c = vec![256, 257, 2, 1];
let wit_carries = vec![1, 2, 1];
let witness_values = [wit_a, wit_b, wit_c, wit_carries].concat();
verify::<64, 16, E>(witness_values, false);
}
#[test]
fn test_mul64_16_w_overflow() {
// 18,446,744,073,709,551,616
// a = 1 * 2^16 + 1 * 2^32 = 4,295,032,832
// b = 1 * 2^32 = 4,294,967,296
// c = 1 * 2^48 + 1 * 2^64 = 18,447,025,548,686,262,272
let wit_a = vec![0, 1, 1, 0];
let wit_b = vec![0, 0, 1, 0];
let wit_c = vec![0, 0, 0, 1];
let wit_carries = vec![0, 0, 0, 1];
let witness_values = [wit_a, wit_b, wit_c, wit_carries].concat();
verify::<64, 16, E>(witness_values, true);
}
#[test]
fn test_mul64_8_w_carries() {
// a = 256
// b = 257
// c = 254 + 1 * 2^16 = 510
let wit_a = vec![255, 0, 0, 0, 0, 0, 0, 0];
let wit_b = vec![2, 0, 0, 0, 0, 0, 0, 0];
let wit_c = vec![254, 1, 0, 0, 0, 0, 0, 0];
let wit_carries = vec![1, 0, 0, 0, 0, 0, 0];
let witness_values = [wit_a, wit_b, wit_c, wit_carries].concat();
verify::<64, 8, E>(witness_values, false);
}
#[test]
fn test_mul32_16_w_carries() {
let wit_a = vec![48683, 2621];
let wit_b = vec![7, 0];
let wit_c = vec![13101, 18352];
let wit_carries = vec![5, 0];
let witness_values = [wit_a, wit_b, wit_c, wit_carries].concat();
verify::<32, 16, E>(witness_values, false);
}
#[test]
fn test_mul32_5_w_carries() {
// a = 31
// b = 2
// c = 30 + 1 * 2^8 = 62
let wit_a = vec![31, 0, 0, 0, 0, 0, 0];
let wit_b = vec![2, 0, 0, 0, 0, 0, 0];
let wit_c = vec![30, 1, 0, 0, 0, 0, 0];
let wit_carries = vec![1, 0, 0, 0, 0, 0];
let witness_values = [wit_a, wit_b, wit_c, wit_carries].concat();
verify::<32, 5, E>(witness_values, false);
}
fn verify<const M: usize, const C: usize, E: ExtensionField>(
witness_values: Vec<u64>,
overflow: bool,
) {
let pow_of_c: u64 = 2_usize.pow(UIntLimbs::<M, C, E>::MAX_LIMB_BIT_WIDTH as u32) as u64;
let single_wit_size = UIntLimbs::<M, C, E>::NUM_LIMBS;
if overflow {
assert_eq!(
witness_values.len() % single_wit_size,
0,
"witness len is incorrect"
)
}
let mut cs = ConstraintSystem::new(|| "test_mul");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
let challenges = vec![E::ONE; witness_values.len()];
let mut uint_a = UIntLimbs::<M, C, E>::new(|| "uint_a", &mut cb).unwrap();
let mut uint_b = UIntLimbs::<M, C, E>::new(|| "uint_b", &mut cb).unwrap();
let uint_c: UIntLimbs<M, C, E> = uint_a
.mul(|| "uint_c", &mut cb, &mut uint_b, overflow)
.unwrap();
let wit_end_idx = if overflow {
4 * single_wit_size
} else {
4 * single_wit_size - 1
};
let a = &witness_values[0..single_wit_size];
let b = &witness_values[single_wit_size..2 * single_wit_size];
let carries = &witness_values[3 * single_wit_size..wit_end_idx];
// limbs cal.
let mut result = vec![0u64; single_wit_size];
a.iter().enumerate().for_each(|(i, a_limb)| {
b.iter().enumerate().for_each(|(j, b_limb)| {
let idx = i + j;
if idx < single_wit_size {
result[idx] += a_limb * b_limb;
}
});
});
// take care carries
result.iter_mut().enumerate().for_each(|(i, ret)| {
if i != 0 {
*ret += carries[i - 1];
}
if !overflow && carries.get(i).is_some() {
*ret -= carries[i] * pow_of_c;
}
});
// verify
let wit: Vec<E> = witness_values
.iter()
.cloned()
.map(E::from_canonical_u64)
.collect_vec();
uint_c.expr().iter().zip(result).for_each(|(c, ret)| {
assert_eq!(
eval_by_expr(&wit, &[], &challenges, c),
E::from_canonical_u64(ret)
);
});
// overflow
if overflow {
let overflow = uint_c.carries.unwrap().last().unwrap().expr();
assert_eq!(eval_by_expr(&wit, &[], &challenges, &overflow), E::ONE);
} else {
// non-overflow case, the len of carries should be (NUM_CELLS - 1)
assert_eq!(uint_c.carries.unwrap().len(), single_wit_size - 1)
}
}
}
mod mul_add {
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
gadgets::cal_lt_diff,
scheme::mock_prover::MockProver,
uint::UIntLimbs,
witness::LkMultiplicity,
};
use ff_ext::{ExtensionField, GoldilocksExt2};
use itertools::Itertools;
use multilinear_extensions::mle::{ArcMultilinearExtension, MultilinearExtension};
use p3::field::FieldAlgebra;
type E = GoldilocksExt2; // 18446744069414584321
trait ValueToArcMle<E: ExtensionField> {
#[allow(clippy::wrong_self_convention)]
fn into_arc_mle<'a>(&self) -> Vec<ArcMultilinearExtension<'a, E>>;
}
impl<E: ExtensionField> ValueToArcMle<E> for Vec<u64> {
fn into_arc_mle<'a>(&self) -> Vec<ArcMultilinearExtension<'a, E>> {
self.iter()
.map(|a| {
let mle: ArcMultilinearExtension<E> =
MultilinearExtension::from_evaluation_vec_smart(
0,
vec![E::BaseField::from_canonical_u64(*a)],
)
.into();
mle
})
.collect_vec()
}
}
fn calculate_carry_diff<const M: usize, const C: usize>(carries: Vec<u64>) -> Vec<u64> {
carries
.into_iter()
.flat_map(|carry| {
let max_carry_value = UIntLimbs::<M, C, E>::MAX_DEGREE_2_MUL_CARRY_VALUE;
let max_carry_u16_limb = UIntLimbs::<M, C, E>::MAX_DEGREE_2_MUL_CARRY_U16_LIMB;
let max_carry_bit = UIntLimbs::<M, C, E>::MAX_DEGREE_2_MUL_CARRY_BITS;
let diff = cal_lt_diff(true, max_carry_bit, carry, max_carry_value);
let mut diff_u16_limb = Value::new_unchecked(diff).as_u16_limbs().to_vec();
diff_u16_limb.resize(max_carry_u16_limb, 0);
diff_u16_limb.iter().map(|v| *v as u64).collect_vec()
})
.collect_vec()
}
#[test]
fn test_add_mul() {
let witness_values: Vec<ArcMultilinearExtension<E>> = [
vec![1, 1, 0, 0],
// alloc b = 2 + 1 * 2^16
vec![2, 1, 0, 0],
// c = a + b = 3 + 2 * 2^16 with 0 carries, no overflow bit,
vec![0; 3],
// alloc d
vec![1, 1, 0, 0],
// e = c * d
// alloc e
vec![3, 5, 2, 0],
// alloc e carry
vec![0; 3],
// each carry alloc with diff
calculate_carry_diff::<64, 16>(vec![0; 3]),
// alloc c limb
vec![3, 2, 0, 0],
]
.concat()
.into_arc_mle();
let mut cs = ConstraintSystem::new(|| "test_add_mul");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
let uint_a = UIntLimbs::<64, 16, E>::new(|| "uint_a", &mut cb).unwrap();
let uint_b = UIntLimbs::<64, 16, E>::new(|| "uint_b", &mut cb).unwrap();
let mut uint_c = uint_a.add(|| "uint_c", &mut cb, &uint_b, false).unwrap();
let mut uint_d = UIntLimbs::<64, 16, E>::new(|| "uint_d", &mut cb).unwrap();
let uint_e: UIntLimbs<64, 16, E> = uint_c
.mul(|| "uint_e", &mut cb, &mut uint_d, false)
.unwrap();
let expected_e = UIntLimbs::<64, 16, E>::from_const_unchecked(vec![3u64, 5, 2, 0]);
expected_e
.require_equal(|| "assert_g", &mut cb, &uint_e)
.unwrap();
MockProver::assert_satisfied(&cb, &witness_values, &[], &[], None, None);
}
#[test]
fn test_add_mul2() {
let witness_values: Vec<ArcMultilinearExtension<E>> = vec![
// alloc a = 1 + 1 * 2^16
vec![1, 1, 0, 0],
// alloc b = 2 + 1 * 2^16
vec![2, 1, 0, 0],
// c = a + b = 3 + 2 * 2^16 with 0 carries, no overflow bit
vec![0; 3],
// alloc d
vec![1, 1, 0, 0],
// alloc e
vec![2, 1, 0, 0],
// f = d + e = 3 + 2 * 2^16 with 0 carries, no overflow bit
vec![0; 3],
// g = c * f
// alloc g
vec![9, 12, 4, 0],
// alloc g carry
vec![0; 3],
// each carry alloc with diff
calculate_carry_diff::<64, 16>(vec![0; 3]),
// alloc c limb
vec![3, 2, 0, 0],
// alloc f limb
vec![3, 2, 0, 0],
]
.concat()
.into_arc_mle();
let mut cs = ConstraintSystem::new(|| "test_add_mul2");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
let uint_a = UIntLimbs::<64, 16, E>::new(|| "uint_a", &mut cb).unwrap();
let uint_b = UIntLimbs::<64, 16, E>::new(|| "uint_b", &mut cb).unwrap();
let mut uint_c = uint_a.add(|| "uint_c", &mut cb, &uint_b, false).unwrap();
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/uint/util.rs | ceno_zkvm/src/uint/util.rs | // calculate the maximum number of combinations for stars and bars formula
const fn max_combinations(degree: usize, num_cells: usize) -> usize {
// compute factorial of n using usize
const fn factorial(n: usize) -> usize {
let mut result = 1;
let mut i = 1;
while i <= n {
result *= i;
i += 1;
}
result
}
// compute binomial coefficient "n choose k" using usize
const fn binomial(n: usize, k: usize) -> usize {
if k > n {
return 0;
}
factorial(n) / (factorial(k) * factorial(n - k))
}
// Here we consider the sum as num_cells - 1 (max value each degree can take)
let n = num_cells - 1;
binomial(n + degree - 1, degree - 1)
}
// compute the max_word (max value of carry) for n limbs with each m overall bit, c limb bit multiplication
// for example, n = 2 means u1*u2, while n = 3 means u1*u2*u3
pub(crate) const fn max_carry_word_for_multiplication(n: usize, m: usize, c: usize) -> u64 {
assert!(n > 1);
assert!(m <= u64::BITS as usize);
let num_cells = m.div_ceil(c);
// calculate maximum multiplication value max_limb^(n)
let mut max_mul_value = 1u128;
let max_val = (1 << c) - 1;
let mut i = 0;
while i < n {
max_mul_value *= max_val as u128;
i += 1;
}
let max_mul_sum_value: u128 = max_mul_value * (max_combinations(n, num_cells)) as u128;
let estimated_max_prev_carry_bound = max_mul_sum_value >> c;
let max_carry_value = (max_mul_sum_value + estimated_max_prev_carry_bound) >> c;
let max_carry_value_gt = max_carry_value + 1; // + 1 for less than comparison
assert!(max_carry_value_gt <= u64::MAX as u128);
max_carry_value_gt as u64
}
#[cfg(test)]
mod tests {
use crate::uint::util::{max_carry_word_for_multiplication, max_combinations};
#[test]
fn test_max_combinations_degree() {
// degree=1 is pure add, therefore only one term
assert_eq!(1, max_combinations(1, 4));
// for degree=2 mul, we have u[0]*v[3], u[1]*v[2], u[2]*v[1], u[3]*v[0]
// thus 4 terms
assert_eq!(4, max_combinations(2, 4));
}
#[test]
fn test_max_word_of_limb_degree() {
assert_eq!(131070, max_carry_word_for_multiplication(2, 32, 16));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/uint/constants.rs | ceno_zkvm/src/uint/constants.rs | use crate::utils::const_min;
use super::{UIntLimbs, util::max_carry_word_for_multiplication};
use ff_ext::ExtensionField;
impl<const TOTAL_BITS: usize, const CAPACITY: usize, E: ExtensionField>
UIntLimbs<TOTAL_BITS, CAPACITY, E>
{
pub const TOTAL_BITS: usize = TOTAL_BITS;
pub const LIMB_BITS: usize = CAPACITY;
/// Determines the maximum number of bits that should be represented in each limb
/// independent of the limb capacity.
/// If total bits < limb capacity, the maximum_usable_limb_capacity
/// is actually 'total bits'.
/// but if total bits >= limb capacity then maximum_usable_limb_capacity = 'limb capacity'.
pub const MAX_LIMB_BIT_WIDTH: usize = const_min(TOTAL_BITS, CAPACITY);
/// `NUM_LIMBS` represent the minimum number of limbs needed
/// to hold total bits
pub const NUM_LIMBS: usize = TOTAL_BITS.div_ceil(CAPACITY);
/// Max carry value during degree 2 limb multiplication
pub const MAX_DEGREE_2_MUL_CARRY_VALUE: u64 =
max_carry_word_for_multiplication(2, Self::TOTAL_BITS, Self::LIMB_BITS);
/// Min bits to cover MAX_DEGREE_2_MUL_CARRY_VALUE
pub const MAX_DEGREE_2_MUL_CARRY_BITS: usize = {
let max_bit_of_carry = u64::BITS - Self::MAX_DEGREE_2_MUL_CARRY_VALUE.leading_zeros();
max_bit_of_carry as usize
};
/// Min number of u16 limb to cover max carry value
pub const MAX_DEGREE_2_MUL_CARRY_U16_LIMB: usize =
Self::MAX_DEGREE_2_MUL_CARRY_BITS.div_ceil(16);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/uint/logic.rs | ceno_zkvm/src/uint/logic.rs | use ff_ext::ExtensionField;
use gkr_iop::tables::OpsTable;
use itertools::izip;
use super::UIntLimbs;
use crate::{ROMType, circuit_builder::CircuitBuilder, error::ZKVMError, witness::LkMultiplicity};
use multilinear_extensions::ToExpr;
// Only implemented for u8 limbs.
impl<const M: usize, E: ExtensionField> UIntLimbs<M, 8, E> {
/// Assert `rom_type(a, b) = c` and range-check `a, b, c`.
/// This works with a lookup for each u8 limb.
pub fn logic(
cb: &mut CircuitBuilder<E>,
rom_type: ROMType,
a: &Self,
b: &Self,
c: &Self,
) -> Result<(), ZKVMError> {
for (a_byte_expr, b_byte_expr, c_byte_expr) in izip!(a.expr(), b.expr(), c.expr()) {
cb.logic_u8(rom_type, a_byte_expr, b_byte_expr, c_byte_expr)?;
}
Ok(())
}
pub fn logic_assign<OP: OpsTable>(lk_multiplicity: &mut LkMultiplicity, a: u64, b: u64) {
for i in 0..M.div_ceil(8) {
let a_byte = (a >> (i * 8)) & 0xff;
let b_byte = (b >> (i * 8)) & 0xff;
lk_multiplicity.logic_u8::<OP>(a_byte, b_byte);
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/chip_handler/general.rs | ceno_zkvm/src/chip_handler/general.rs | use ff_ext::ExtensionField;
use gkr_iop::{error::CircuitBuilderError, tables::LookupTable};
use crate::{
circuit_builder::CircuitBuilder,
instructions::riscv::constants::{
END_CYCLE_IDX, END_PC_IDX, EXIT_CODE_IDX, HEAP_LENGTH_IDX, HEAP_START_ADDR_IDX,
HINT_LENGTH_IDX, HINT_START_ADDR_IDX, INIT_CYCLE_IDX, INIT_PC_IDX, PUBLIC_IO_IDX,
SHARD_ID_IDX, SHARD_RW_SUM_IDX, UINT_LIMBS,
},
scheme::constants::SEPTIC_EXTENSION_DEGREE,
tables::InsnRecord,
};
use multilinear_extensions::{Expression, Instance};
pub trait InstFetch<E: ExtensionField> {
fn lk_fetch(&mut self, record: &InsnRecord<Expression<E>>) -> Result<(), CircuitBuilderError>;
}
pub trait PublicValuesQuery {
fn query_exit_code(&mut self) -> Result<[Instance; UINT_LIMBS], CircuitBuilderError>;
fn query_init_pc(&mut self) -> Result<Instance, CircuitBuilderError>;
fn query_init_cycle(&mut self) -> Result<Instance, CircuitBuilderError>;
fn query_end_pc(&mut self) -> Result<Instance, CircuitBuilderError>;
fn query_end_cycle(&mut self) -> Result<Instance, CircuitBuilderError>;
fn query_global_rw_sum(&mut self) -> Result<Vec<Instance>, CircuitBuilderError>;
fn query_public_io(&mut self) -> Result<[Instance; UINT_LIMBS], CircuitBuilderError>;
#[allow(dead_code)]
fn query_shard_id(&mut self) -> Result<Instance, CircuitBuilderError>;
fn query_heap_start_addr(&self) -> Result<Instance, CircuitBuilderError>;
#[allow(dead_code)]
fn query_heap_shard_len(&self) -> Result<Instance, CircuitBuilderError>;
fn query_hint_start_addr(&self) -> Result<Instance, CircuitBuilderError>;
#[allow(dead_code)]
fn query_hint_shard_len(&self) -> Result<Instance, CircuitBuilderError>;
}
impl<'a, E: ExtensionField> InstFetch<E> for CircuitBuilder<'a, E> {
/// Fetch an instruction at a given PC from the Program table.
fn lk_fetch(&mut self, record: &InsnRecord<Expression<E>>) -> Result<(), CircuitBuilderError> {
self.lk_record(
|| "fetch",
LookupTable::Instruction,
record.as_slice().to_vec(),
)
}
}
impl<'a, E: ExtensionField> PublicValuesQuery for CircuitBuilder<'a, E> {
fn query_exit_code(&mut self) -> Result<[Instance; UINT_LIMBS], CircuitBuilderError> {
Ok([
self.cs.query_instance(EXIT_CODE_IDX)?,
self.cs.query_instance(EXIT_CODE_IDX + 1)?,
])
}
fn query_init_pc(&mut self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(INIT_PC_IDX)
}
fn query_init_cycle(&mut self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(INIT_CYCLE_IDX)
}
fn query_end_pc(&mut self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(END_PC_IDX)
}
fn query_end_cycle(&mut self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(END_CYCLE_IDX)
}
fn query_global_rw_sum(&mut self) -> Result<Vec<Instance>, CircuitBuilderError> {
let x = (0..SEPTIC_EXTENSION_DEGREE)
.map(|i| self.cs.query_instance(SHARD_RW_SUM_IDX + i))
.collect::<Result<Vec<Instance>, CircuitBuilderError>>()?;
let y = (0..SEPTIC_EXTENSION_DEGREE)
.map(|i| {
self.cs
.query_instance(SHARD_RW_SUM_IDX + SEPTIC_EXTENSION_DEGREE + i)
})
.collect::<Result<Vec<Instance>, CircuitBuilderError>>()?;
Ok([x, y].concat())
}
fn query_public_io(&mut self) -> Result<[Instance; UINT_LIMBS], CircuitBuilderError> {
Ok([
self.cs.query_instance_for_openings(PUBLIC_IO_IDX)?,
self.cs.query_instance_for_openings(PUBLIC_IO_IDX + 1)?,
])
}
fn query_shard_id(&mut self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(SHARD_ID_IDX)
}
fn query_heap_start_addr(&self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(HEAP_START_ADDR_IDX)
}
fn query_heap_shard_len(&self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(HEAP_LENGTH_IDX)
}
fn query_hint_start_addr(&self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(HINT_START_ADDR_IDX)
}
fn query_hint_shard_len(&self) -> Result<Instance, CircuitBuilderError> {
self.cs.query_instance(HINT_LENGTH_IDX)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/chip_handler/global_state.rs | ceno_zkvm/src/chip_handler/global_state.rs | use ff_ext::ExtensionField;
use gkr_iop::error::CircuitBuilderError;
use super::GlobalStateRegisterMachineChipOperations;
use crate::{circuit_builder::CircuitBuilder, structs::RAMType};
use multilinear_extensions::{Expression, ToExpr};
use p3::field::FieldAlgebra;
impl<E: ExtensionField> GlobalStateRegisterMachineChipOperations<E> for CircuitBuilder<'_, E> {
fn state_in(
&mut self,
pc: Expression<E>,
ts: Expression<E>,
) -> Result<(), CircuitBuilderError> {
let record: Vec<Expression<E>> = vec![
E::BaseField::from_canonical_u64(RAMType::GlobalState as u64).expr(),
pc,
ts,
];
self.read_record(|| "state_in", RAMType::GlobalState, record)
}
fn state_out(
&mut self,
pc: Expression<E>,
ts: Expression<E>,
) -> Result<(), CircuitBuilderError> {
let record: Vec<Expression<E>> = vec![
E::BaseField::from_canonical_u64(RAMType::GlobalState as u64).expr(),
pc,
ts,
];
self.write_record(|| "state_out", RAMType::GlobalState, record)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/chip_handler/register.rs | ceno_zkvm/src/chip_handler/register.rs | use ff_ext::ExtensionField;
use gkr_iop::error::CircuitBuilderError;
use crate::{circuit_builder::CircuitBuilder, gadgets::AssertLtConfig, structs::RAMType};
use multilinear_extensions::{Expression, ToExpr};
use super::{RegisterChipOperations, RegisterExpr};
impl<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR> RegisterChipOperations<E, NR, N>
for CircuitBuilder<'_, E>
{
fn register_read(
&mut self,
name_fn: N,
register_id: impl ToExpr<E, Output = Expression<E>>,
prev_ts: Expression<E>,
ts: Expression<E>,
value: RegisterExpr<E>,
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError> {
self.ram_type_read(name_fn, RAMType::Register, register_id, prev_ts, ts, value)
}
fn register_write(
&mut self,
name_fn: N,
register_id: impl ToExpr<E, Output = Expression<E>>,
prev_ts: Expression<E>,
ts: Expression<E>,
prev_values: RegisterExpr<E>,
value: RegisterExpr<E>,
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError> {
self.ram_type_write(
name_fn,
RAMType::Register,
register_id,
prev_ts,
ts,
prev_values,
value,
)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/chip_handler/memory.rs | ceno_zkvm/src/chip_handler/memory.rs | use crate::{
chip_handler::{AddressExpr, MemoryChipOperations, MemoryExpr},
circuit_builder::CircuitBuilder,
gadgets::AssertLtConfig,
structs::RAMType,
};
use ff_ext::ExtensionField;
use gkr_iop::error::CircuitBuilderError;
use multilinear_extensions::Expression;
impl<E: ExtensionField, NR: Into<String>, N: FnOnce() -> NR> MemoryChipOperations<E, NR, N>
for CircuitBuilder<'_, E>
{
fn memory_read(
&mut self,
name_fn: N,
memory_addr: &AddressExpr<E>,
prev_ts: Expression<E>,
ts: Expression<E>,
value: MemoryExpr<E>,
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError> {
self.ram_type_read(
name_fn,
RAMType::Memory,
memory_addr.clone(),
prev_ts,
ts,
value,
)
}
fn memory_write(
&mut self,
name_fn: N,
memory_addr: &AddressExpr<E>,
prev_ts: Expression<E>,
ts: Expression<E>,
prev_values: MemoryExpr<E>,
value: MemoryExpr<E>,
) -> Result<(Expression<E>, AssertLtConfig), CircuitBuilderError> {
self.ram_type_write(
name_fn,
RAMType::Memory,
memory_addr.clone(),
prev_ts,
ts,
prev_values,
value,
)
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv.rs | ceno_zkvm/src/instructions/riscv.rs | use ceno_emul::InsnKind;
mod rv32im;
pub use rv32im::{
DummyExtraConfig, Rv32imConfig,
mmu::{MemPadder, MmuConfig},
};
pub mod arith;
pub mod arith_imm;
pub mod branch;
pub mod config;
pub mod constants;
pub mod div;
pub mod dummy;
pub mod ecall;
pub mod ecall_base;
pub mod jump;
pub mod logic;
pub mod logic_imm;
pub mod mulh;
pub mod shift;
pub mod shift_imm;
pub mod slt;
pub mod slti;
mod b_insn;
mod i_insn;
pub mod insn_base;
mod j_insn;
mod r_insn;
mod ecall_insn;
#[cfg(feature = "u16limb_circuit")]
mod auipc;
mod im_insn;
#[cfg(feature = "u16limb_circuit")]
mod lui;
mod memory;
mod s_insn;
#[cfg(test)]
mod test;
#[cfg(test)]
mod test_utils;
pub trait RIVInstruction {
const INST_KIND: InsnKind;
}
pub use arith::{AddInstruction, SubInstruction};
pub use jump::{JalInstruction, JalrInstruction};
pub use memory::{
LbInstruction, LbuInstruction, LhInstruction, LhuInstruction, LwInstruction, SbInstruction,
ShInstruction, SwInstruction,
};
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.