repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1 value | license stringclasses 7 values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2 classes |
|---|---|---|---|---|---|---|---|---|
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/slt.rs | ceno_zkvm/src/instructions/riscv/slt.rs | #[cfg(not(feature = "u16limb_circuit"))]
mod slt_circuit;
#[cfg(feature = "u16limb_circuit")]
mod slt_circuit_v2;
use ceno_emul::InsnKind;
use super::RIVInstruction;
pub struct SltOp;
impl RIVInstruction for SltOp {
const INST_KIND: InsnKind = InsnKind::SLT;
}
#[cfg(feature = "u16limb_circuit")]
pub type SltInstruction<E> = slt_circuit_v2::SetLessThanInstruction<E, SltOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type SltInstruction<E> = slt_circuit::SetLessThanInstruction<E, SltOp>;
pub struct SltuOp;
impl RIVInstruction for SltuOp {
const INST_KIND: InsnKind = InsnKind::SLTU;
}
#[cfg(feature = "u16limb_circuit")]
pub type SltuInstruction<E> = slt_circuit_v2::SetLessThanInstruction<E, SltuOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type SltuInstruction<E> = slt_circuit::SetLessThanInstruction<E, SltuOp>;
#[cfg(test)]
mod test {
use ceno_emul::{Change, StepRecord, Word, encode_rv32};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
use ff_ext::{ExtensionField, GoldilocksExt2};
use rand::RngCore;
use super::*;
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{Instruction, riscv::constants::UInt},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
#[cfg(not(feature = "u16limb_circuit"))]
use slt_circuit::SetLessThanInstruction;
#[cfg(feature = "u16limb_circuit")]
use slt_circuit_v2::SetLessThanInstruction;
fn verify<E: ExtensionField, I: RIVInstruction>(
name: &'static str,
rs1: Word,
rs2: Word,
rd: Word,
) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| format!("{}/{name}", I::INST_KIND),
|cb| {
let config = SetLessThanInstruction::<_, I>::construct_circuit(
cb,
&ProgramParams::default(),
);
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(I::INST_KIND, 2, 3, 4, 0);
let (raw_witin, lkm) = SetLessThanInstruction::<_, I>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
rs1,
rs2,
Change::new(0, rd),
0,
)],
)
.unwrap();
let expected_rd_written =
UInt::from_const_unchecked(Value::new_unchecked(rd).as_u16_limbs().to_vec());
config
.rd_written
.require_equal(|| "assert_rd_written", &mut cb, &expected_rd_written)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_slt_true() {
let cases = vec![
("lt = true, 0 < 1", 0, 1, 1),
("lt = true, 1 < 2", 1, 2, 1),
("lt = true, -1 < 0", -1i32 as Word, 0, 1),
("lt = true, -1 < 1", -1i32 as Word, 1, 1),
("lt = true, -2 < -1", -2i32 as Word, -1i32 as Word, 1),
(
"lt = true, large number",
i32::MIN as Word,
i32::MAX as Word,
1,
),
];
for &(name, a, b, expected) in &cases {
verify::<GoldilocksExt2, SltOp>(name, a, b, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SltOp>(name, a, b, expected);
}
}
#[test]
fn test_slt_false() {
let cases = vec![
("lt = false, 1 < 0", 1, 0, 0),
("lt = false, 2 < 1", 2, 1, 0),
("lt = false, 0 < -1", 0, -1i32 as Word, 0),
("lt = false, 1 < -1", 1, -1i32 as Word, 0),
("lt = false, -1 < -2", -1i32 as Word, -2i32 as Word, 0),
("lt = false, 0 == 0", 0, 0, 0),
("lt = false, 1 == 1", 1, 1, 0),
("lt = false, -1 == -1", -1i32 as Word, -1i32 as Word, 0),
// This case causes subtract overflow in `assign_instance_signed`
(
"lt = false, large number",
i32::MAX as Word,
i32::MIN as Word,
0,
),
];
for &(name, a, b, expected) in &cases {
verify::<GoldilocksExt2, SltOp>(name, a, b, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SltOp>(name, a, b, expected);
}
}
#[test]
fn test_slt_random() {
let mut rng = rand::thread_rng();
let a: i32 = rng.next_u32() as i32;
let b: i32 = rng.next_u32() as i32;
verify::<GoldilocksExt2, SltOp>("random 1", a as Word, b as Word, (a < b) as u32);
verify::<GoldilocksExt2, SltOp>("random 2", b as Word, a as Word, (a >= b) as u32);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SltOp>("random 1", a as Word, b as Word, (a < b) as u32);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SltOp>("random 2", b as Word, a as Word, (a >= b) as u32);
}
#[test]
fn test_sltu_simple() {
let cases = vec![
("lt = true, 0 < 1", 0, 1, 1),
("lt = true, 1 < 2", 1, 2, 1),
("lt = true, 0 < u32::MAX", 0, u32::MAX, 1),
("lt = true, u32::MAX - 1", u32::MAX - 1, u32::MAX, 1),
("lt = false, u32::MAX", u32::MAX, u32::MAX, 0),
("lt = false, u32::MAX - 1", u32::MAX, u32::MAX - 1, 0),
("lt = false, u32::MAX > 0", u32::MAX, 0, 0),
("lt = false, 2 > 1", 2, 1, 0),
];
for &(name, a, b, expected) in &cases {
verify::<GoldilocksExt2, SltuOp>(name, a, b, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SltuOp>(name, a, b, expected);
}
}
#[test]
fn test_sltu_random() {
let mut rng = rand::thread_rng();
let a: u32 = rng.next_u32();
let b: u32 = rng.next_u32();
verify::<GoldilocksExt2, SltuOp>("random 1", a, b, (a < b) as u32);
verify::<GoldilocksExt2, SltuOp>("random 2", b, a, (a >= b) as u32);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SltuOp>("random 1", a, b, (a < b) as u32);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SltuOp>("random 2", b, a, (a >= b) as u32);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall_insn.rs | ceno_zkvm/src/instructions/riscv/ecall_insn.rs | use crate::{
chip_handler::{
GlobalStateRegisterMachineChipOperations, RegisterChipOperations, RegisterExpr,
general::InstFetch,
},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::AssertLtConfig,
tables::InsnRecord,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::{FullTracer as Tracer, InsnKind::ECALL, PC_STEP_SIZE, Platform, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::FieldAlgebra;
pub struct EcallInstructionConfig {
pub pc: WitIn,
pub ts: WitIn,
prev_x5_ts: WitIn,
lt_x5_cfg: AssertLtConfig,
}
impl EcallInstructionConfig {
pub fn construct_circuit<E: ExtensionField>(
cb: &mut CircuitBuilder<E>,
syscall_id: RegisterExpr<E>,
syscall_ret_value: Option<RegisterExpr<E>>,
next_pc: Option<Expression<E>>,
) -> Result<Self, ZKVMError> {
let pc = cb.create_witin(|| "pc");
let ts = cb.create_witin(|| "cur_ts");
cb.state_in(pc.expr(), ts.expr())?;
cb.state_out(
next_pc.map_or(pc.expr() + PC_STEP_SIZE, |next_pc| next_pc),
ts.expr() + (Tracer::SUBCYCLES_PER_INSN as usize),
)?;
cb.lk_fetch(&InsnRecord::new(
pc.expr(),
ECALL.into(),
None,
0.into(),
0.into(),
0.into(), // imm = 0
#[cfg(feature = "u16limb_circuit")]
0.into(), // imm_sign = 0
))?;
let prev_x5_ts = cb.create_witin(|| "prev_x5_ts");
// read syscall_id from x5 and write return value to x5
let (_, lt_x5_cfg) = cb.register_write(
|| "write x5",
E::BaseField::from_canonical_u64(Platform::reg_ecall() as u64),
prev_x5_ts.expr(),
ts.expr() + Tracer::SUBCYCLE_RS1,
syscall_id.clone(),
syscall_ret_value.map_or(syscall_id, |v| v),
)?;
Ok(Self {
pc,
ts,
prev_x5_ts,
lt_x5_cfg,
})
}
pub fn assign_instance<E: ExtensionField>(
&self,
instance: &mut [E::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
let shard_prev_cycle = shard_ctx.aligned_prev_ts(step.rs1().unwrap().previous_cycle);
let shard_cycle = step.cycle() - current_shard_offset_cycle;
set_val!(instance, self.pc, step.pc().before.0 as u64);
set_val!(instance, self.ts, shard_cycle);
lk_multiplicity.fetch(step.pc().before.0);
// the access of X5 register is stored in rs1()
set_val!(instance, self.prev_x5_ts, shard_prev_cycle);
self.lt_x5_cfg.assign_instance(
instance,
lk_multiplicity,
shard_prev_cycle,
shard_cycle + Tracer::SUBCYCLE_RS1,
)?;
// skip shard_ctx.send() as ecall_halt is the last instruction
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/config.rs | ceno_zkvm/src/instructions/riscv/config.rs | use crate::{utils::i64_to_base, witness::LkMultiplicity};
use ff_ext::{FieldInto, SmallField};
use itertools::Itertools;
use multilinear_extensions::WitIn;
use witness::set_val;
#[derive(Clone)]
pub struct IsEqualConfig {
pub is_equal_per_limb: Vec<WitIn>,
pub diff_inv_per_limb: Vec<WitIn>,
pub diff_inv: WitIn,
pub is_equal: WitIn,
}
#[derive(Clone)]
pub struct MsbConfig {
pub msb: WitIn,
pub high_limb_no_msb: WitIn,
}
pub struct MsbInput<'a> {
pub limbs: &'a [u8],
}
impl MsbInput<'_> {
pub fn assign<F: SmallField>(
&self,
instance: &mut [F],
config: &MsbConfig,
lk_multiplicity: &mut LkMultiplicity,
) -> (u8, u8) {
let n_limbs = self.limbs.len();
assert!(n_limbs > 0);
let mut high_limb = self.limbs[n_limbs - 1];
let msb = (high_limb >> 7) & 1;
set_val!(instance, config.msb, { i64_to_base::<F>(msb as i64) });
high_limb &= 0b0111_1111;
set_val!(instance, config.high_limb_no_msb, {
i64_to_base::<F>(high_limb as i64)
});
lk_multiplicity.lookup_and_byte(high_limb as u64, 0b0111_1111);
(msb, high_limb)
}
}
#[derive(Clone)]
pub struct UIntLtuConfig {
pub indexes: Vec<WitIn>,
pub acc_indexes: Vec<WitIn>,
pub byte_diff_inv: WitIn,
pub lhs_ne_byte: WitIn,
pub rhs_ne_byte: WitIn,
pub is_ltu: WitIn,
}
pub struct UIntLtuInput<'a> {
pub lhs_limbs: &'a [u8],
pub rhs_limbs: &'a [u8],
}
impl UIntLtuInput<'_> {
pub fn assign<F: SmallField>(
&self,
instance: &mut [F],
config: &UIntLtuConfig,
lk_multiplicity: &mut LkMultiplicity,
) -> bool {
let mut idx = 0;
let mut flag: bool = false;
for (i, (&lhs, &rhs)) in self
.lhs_limbs
.iter()
.zip(self.rhs_limbs.iter())
.enumerate()
.rev()
{
if lhs != rhs {
idx = i;
flag = true;
break;
}
}
config.indexes.iter().for_each(|witin| {
set_val!(instance, witin, { i64_to_base::<F>(0) });
});
set_val!(instance, config.indexes[idx], {
i64_to_base::<F>(flag as i64)
});
// (0..config.indexes.len()).for_each(|i| {
// if i == idx {
// lk_multiplicity.assert_ux::<1>(0);
// } else {
// lk_multiplicity.assert_ux::<1>(flag as u64);
// }
// });
// this corresponds to assert_bit of index_sum
// lk_multiplicity.assert_ux::<1>(flag as u64);
config.acc_indexes.iter().enumerate().for_each(|(id, wit)| {
if id <= idx {
set_val!(instance, wit, { i64_to_base::<F>(flag as i64) });
} else {
set_val!(instance, wit, 0);
}
});
let lhs_ne_byte = i64_to_base::<F>(self.lhs_limbs[idx] as i64);
let rhs_ne_byte = i64_to_base::<F>(self.rhs_limbs[idx] as i64);
set_val!(instance, config.lhs_ne_byte, lhs_ne_byte);
set_val!(instance, config.rhs_ne_byte, rhs_ne_byte);
set_val!(instance, config.byte_diff_inv, {
if flag {
(lhs_ne_byte - rhs_ne_byte).inverse()
} else {
F::ONE
}
});
let is_ltu = self.lhs_limbs[idx] < self.rhs_limbs[idx];
lk_multiplicity.lookup_ltu_byte(self.lhs_limbs[idx] as u64, self.rhs_limbs[idx] as u64);
set_val!(instance, config.is_ltu, { i64_to_base::<F>(is_ltu as i64) });
is_ltu
}
}
#[derive(Clone)]
pub struct UIntLtConfig {
pub lhs_msb: MsbConfig,
pub rhs_msb: MsbConfig,
pub msb_is_equal: WitIn,
pub msb_diff_inv: WitIn,
pub is_ltu: UIntLtuConfig,
pub is_lt: WitIn,
}
pub struct UIntLtInput<'a> {
pub lhs_limbs: &'a [u8],
pub rhs_limbs: &'a [u8],
}
impl UIntLtInput<'_> {
pub fn assign<F: SmallField>(
&self,
instance: &mut [F],
config: &UIntLtConfig,
lk_multiplicity: &mut LkMultiplicity,
) -> bool {
let n_limbs = self.lhs_limbs.len();
let lhs_msb_input = MsbInput {
limbs: self.lhs_limbs,
};
let (lhs_msb, lhs_high_limb_no_msb) =
lhs_msb_input.assign(instance, &config.lhs_msb, lk_multiplicity);
let rhs_msb_input = MsbInput {
limbs: self.rhs_limbs,
};
let (rhs_msb, rhs_high_limb_no_msb) =
rhs_msb_input.assign(instance, &config.rhs_msb, lk_multiplicity);
let mut lhs_limbs_no_msb = self.lhs_limbs.iter().copied().collect_vec();
lhs_limbs_no_msb[n_limbs - 1] = lhs_high_limb_no_msb;
let mut rhs_limbs_no_msb = self.rhs_limbs.iter().copied().collect_vec();
rhs_limbs_no_msb[n_limbs - 1] = rhs_high_limb_no_msb;
let ltu_input = UIntLtuInput {
lhs_limbs: &lhs_limbs_no_msb,
rhs_limbs: &rhs_limbs_no_msb,
};
let is_ltu = ltu_input.assign::<F>(instance, &config.is_ltu, lk_multiplicity);
let msb_is_equal = lhs_msb == rhs_msb;
let msb_diff_inv = if msb_is_equal {
0
} else {
lhs_msb as i64 - rhs_msb as i64
};
set_val!(instance, config.msb_is_equal, {
i64_to_base::<F>(msb_is_equal as i64)
});
set_val!(instance, config.msb_diff_inv, {
i64_to_base::<F>(msb_diff_inv)
});
// is_lt = a_s\cdot (1-b_s)+eq(a_s,b_s)\cdot ltu(a_{<s},b_{<s})$
let is_lt = lhs_msb * (1 - rhs_msb) + msb_is_equal as u8 * is_ltu as u8;
set_val!(instance, config.is_lt, { i64_to_base::<F>(is_lt as i64) });
// lk_multiplicity.assert_ux::<1>(is_lt as u64);
assert!(is_lt == 0 || is_lt == 1);
is_lt > 0
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/slti.rs | ceno_zkvm/src/instructions/riscv/slti.rs | #[cfg(feature = "u16limb_circuit")]
mod slti_circuit_v2;
#[cfg(not(feature = "u16limb_circuit"))]
mod slti_circuit;
#[cfg(feature = "u16limb_circuit")]
use crate::instructions::riscv::slti::slti_circuit_v2::SetLessThanImmInstruction;
#[cfg(not(feature = "u16limb_circuit"))]
use crate::instructions::riscv::slti::slti_circuit::SetLessThanImmInstruction;
use super::RIVInstruction;
pub struct SltiOp;
impl RIVInstruction for SltiOp {
const INST_KIND: ceno_emul::InsnKind = ceno_emul::InsnKind::SLTI;
}
pub type SltiInstruction<E> = SetLessThanImmInstruction<E, SltiOp>;
pub struct SltiuOp;
impl RIVInstruction for SltiuOp {
const INST_KIND: ceno_emul::InsnKind = ceno_emul::InsnKind::SLTIU;
}
pub type SltiuInstruction<E> = SetLessThanImmInstruction<E, SltiuOp>;
#[cfg(test)]
mod test {
use ceno_emul::{Change, PC_STEP_SIZE, StepRecord, encode_rv32};
use ff_ext::{ExtensionField, GoldilocksExt2};
use proptest::proptest;
use super::*;
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{
Instruction,
riscv::{
constants::UInt,
test_utils::{i32_extra, imm_extra, immu_extra, u32_extra},
},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
#[test]
fn test_sltiu_true() {
let cases = vec![
("lt = true, 0 < 1", 0, 1i32),
("lt = true, 1 < 2", 1, 2),
("lt = true, 10 < 20", 10, 20),
("lt = true, 0 < imm upper boundary", 0, 2047),
// negative imm is treated as positive
("lt = true, 0 < u32::MAX-1", 0, -1),
("lt = true, 1 < u32::MAX-1", 1, -1),
("lt = true, 0 < imm lower boundary", 0, -2048),
("lt = true, 65535 < imm lower boundary", 65535, -1),
];
for &(name, a, imm) in &cases {
verify::<SltiuOp, GoldilocksExt2>(name, a, imm, true);
#[cfg(feature = "u16limb_circuit")]
verify::<SltiuOp, BabyBearExt4>(name, a, imm, true);
}
}
#[test]
fn test_sltiu_false() {
let cases = vec![
("lt = false, 1 < 0", 1, 0i32),
("lt = false, 2 < 1", 2, 1),
("lt = false, 100 < 50", 100, 50),
("lt = false, 500 < 100", 500, 100),
("lt = false, 100000 < 2047", 100_000, 2047),
("lt = false, 100000 < 0", 100_000, 0),
("lt = false, 0 == 0", 0, 0),
("lt = false, 1 == 1", 1, 1),
("lt = false, imm upper boundary", u32::MAX, 2047),
("lt = false, imm lower boundary", u32::MAX, -2048), /* negative imm treated as positive */
];
for &(name, a, imm) in &cases {
verify::<SltiuOp, GoldilocksExt2>(name, a, imm, false);
#[cfg(feature = "u16limb_circuit")]
verify::<SltiuOp, BabyBearExt4>(name, a, imm, false);
}
}
proptest! {
#[test]
fn test_sltiu_prop(
a in u32_extra(),
imm in immu_extra(12),
) {
verify::<SltiuOp, GoldilocksExt2>("random SltiuOp", a, imm as i32, a < imm);
#[cfg(feature = "u16limb_circuit")]
verify::<SltiuOp, BabyBearExt4>("random SltiuOp", a, imm as i32, a < imm);
}
}
#[test]
fn test_slti_true() {
let cases = vec![
("lt = true, 0 < 1", 0, 1),
("lt = true, 1 < 2", 1, 2),
("lt = true, -1 < 0", -1, 0),
("lt = true, -1 < 1", -1, 1),
("lt = true, -2 < -1", -2, -1),
// -2048 <= imm <= 2047
("lt = true, imm upper boundary", i32::MIN, 2047),
("lt = true, imm lower boundary", i32::MIN, -2048),
];
for &(name, a, imm) in &cases {
verify::<SltiOp, GoldilocksExt2>(name, a as u32, imm, true);
#[cfg(feature = "u16limb_circuit")]
verify::<SltiOp, BabyBearExt4>(name, a as u32, imm, true);
}
}
#[test]
fn test_slti_false() {
let cases = vec![
("lt = false, 1 < 0", 1, 0),
("lt = false, 2 < 1", 2, 1),
("lt = false, 0 < -1", 0, -1),
("lt = false, 1 < -1", 1, -1),
("lt = false, -1 < -2", -1, -2),
("lt = false, 0 == 0", 0, 0),
("lt = false, 1 == 1", 1, 1),
("lt = false, -1 == -1", -1, -1),
// -2048 <= imm <= 2047
("lt = false, imm upper boundary", i32::MAX, 2047),
("lt = false, imm lower boundary", i32::MAX, -2048),
];
for &(name, a, imm) in &cases {
verify::<SltiOp, GoldilocksExt2>(name, a as u32, imm, false);
#[cfg(feature = "u16limb_circuit")]
verify::<SltiOp, BabyBearExt4>(name, a as u32, imm, false);
}
}
proptest! {
#[test]
fn test_slti_prop(
a in i32_extra(),
imm in imm_extra(12),
) {
verify::<SltiOp, GoldilocksExt2>("random SltiOp", a as u32, imm, a < imm);
#[cfg(feature = "u16limb_circuit")]
verify::<SltiOp, BabyBearExt4>("random SltiOp", a as u32, imm, a < imm);
}
}
fn verify<I: RIVInstruction, E: ExtensionField>(
name: &'static str,
rs1_read: u32,
imm: i32,
expected_rd: bool,
) {
let expected_rd = expected_rd as u32;
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let insn_code = encode_rv32(I::INST_KIND, 2, 0, 4, imm);
let config = cb
.namespace(
|| format!("{:?}_({name})", I::INST_KIND),
|cb| {
Ok(SetLessThanImmInstruction::<E, I>::construct_circuit(
cb,
&ProgramParams::default(),
))
},
)
.unwrap()
.unwrap();
let (raw_witin, lkm) = SetLessThanImmInstruction::<E, I>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_i_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + PC_STEP_SIZE),
insn_code,
rs1_read,
Change::new(0, expected_rd),
0,
)],
)
.unwrap();
let expected_rd =
UInt::from_const_unchecked(Value::new_unchecked(expected_rd).as_u16_limbs().to_vec());
config
.rd_written
.require_equal(
|| format!("{:?}_({name})_assert_rd_written", I::INST_KIND),
&mut cb,
&expected_rd,
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/insn_base.rs | ceno_zkvm/src/instructions/riscv/insn_base.rs | use ceno_emul::{Cycle, StepRecord, Word, WriteOp};
use ff_ext::{ExtensionField, FieldInto, SmallField};
use itertools::Itertools;
use p3::field::{Field, FieldAlgebra};
use super::constants::{BIT_WIDTH, PC_STEP_SIZE, UINT_LIMBS, UInt};
use crate::{
chip_handler::{
AddressExpr, GlobalStateRegisterMachineChipOperations, MemoryChipOperations, MemoryExpr,
RegisterChipOperations, RegisterExpr,
},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::AssertLtConfig,
structs::RAMType,
uint::Value,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::FullTracer as Tracer;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use std::{iter, marker::PhantomData};
#[derive(Debug)]
pub struct StateInOut<E: ExtensionField> {
pub pc: WitIn,
pub next_pc: Option<WitIn>,
pub ts: WitIn,
_field_type: PhantomData<E>,
}
impl<E: ExtensionField> StateInOut<E> {
/// If circuit is branching, leave witness for next_pc free and return in
/// configuration so that calling circuit can constrain its value.
/// Otherwise, internally increment by PC_STEP_SIZE
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
branching: bool,
) -> Result<Self, ZKVMError> {
let pc = circuit_builder.create_witin(|| "pc");
let (next_pc_opt, next_pc_expr) = if branching {
let next_pc = circuit_builder.create_witin(|| "next_pc");
(Some(next_pc), next_pc.expr())
} else {
(None, pc.expr() + PC_STEP_SIZE)
};
let ts = circuit_builder.create_witin(|| "ts");
let next_ts = ts.expr() + Tracer::SUBCYCLES_PER_INSN;
circuit_builder.state_in(pc.expr(), ts.expr())?;
circuit_builder.state_out(next_pc_expr, next_ts)?;
Ok(StateInOut {
pc,
next_pc: next_pc_opt,
ts,
_field_type: PhantomData,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &ShardContext,
// lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
set_val!(instance, self.pc, step.pc().before.0 as u64);
if let Some(n_pc) = self.next_pc {
set_val!(instance, n_pc, step.pc().after.0 as u64);
}
set_val!(instance, self.ts, step.cycle() - current_shard_offset_cycle);
Ok(())
}
}
#[derive(Debug)]
pub struct ReadRS1<E: ExtensionField> {
pub id: WitIn,
pub prev_ts: WitIn,
pub lt_cfg: AssertLtConfig,
_field_type: PhantomData<E>,
}
impl<E: ExtensionField> ReadRS1<E> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
rs1_read: RegisterExpr<E>,
cur_ts: WitIn,
) -> Result<Self, ZKVMError> {
let id = circuit_builder.create_witin(|| "rs1_id");
let prev_ts = circuit_builder.create_witin(|| "prev_rs1_ts");
let (_, lt_cfg) = circuit_builder.register_read(
|| "read_rs1",
id,
prev_ts.expr(),
cur_ts.expr() + Tracer::SUBCYCLE_RS1,
rs1_read,
)?;
Ok(ReadRS1 {
id,
prev_ts,
lt_cfg,
_field_type: PhantomData,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let op = step.rs1().expect("rs1 op");
let shard_prev_cycle = shard_ctx.aligned_prev_ts(op.previous_cycle);
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
let shard_cycle = step.cycle() - current_shard_offset_cycle;
set_val!(instance, self.id, op.register_index() as u64);
set_val!(instance, self.prev_ts, shard_prev_cycle);
// Register read
self.lt_cfg.assign_instance(
instance,
lk_multiplicity,
shard_prev_cycle,
shard_cycle + Tracer::SUBCYCLE_RS1,
)?;
shard_ctx.send(
RAMType::Register,
op.addr,
op.register_index() as u64,
step.cycle() + Tracer::SUBCYCLE_RS1,
op.previous_cycle,
op.value,
None,
);
Ok(())
}
}
#[derive(Debug)]
pub struct ReadRS2<E: ExtensionField> {
pub id: WitIn,
pub prev_ts: WitIn,
pub lt_cfg: AssertLtConfig,
_field_type: PhantomData<E>,
}
impl<E: ExtensionField> ReadRS2<E> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
rs2_read: RegisterExpr<E>,
cur_ts: WitIn,
) -> Result<Self, ZKVMError> {
let id = circuit_builder.create_witin(|| "rs2_id");
let prev_ts = circuit_builder.create_witin(|| "prev_rs2_ts");
let (_, lt_cfg) = circuit_builder.register_read(
|| "read_rs2",
id,
prev_ts.expr(),
cur_ts.expr() + Tracer::SUBCYCLE_RS2,
rs2_read,
)?;
Ok(ReadRS2 {
id,
prev_ts,
lt_cfg,
_field_type: PhantomData,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let op = step.rs2().expect("rs2 op");
let shard_prev_cycle = shard_ctx.aligned_prev_ts(op.previous_cycle);
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
let shard_cycle = step.cycle() - current_shard_offset_cycle;
set_val!(instance, self.id, op.register_index() as u64);
set_val!(instance, self.prev_ts, shard_prev_cycle);
// Register read
self.lt_cfg.assign_instance(
instance,
lk_multiplicity,
shard_prev_cycle,
shard_cycle + Tracer::SUBCYCLE_RS2,
)?;
shard_ctx.send(
RAMType::Register,
op.addr,
op.register_index() as u64,
step.cycle() + Tracer::SUBCYCLE_RS2,
op.previous_cycle,
op.value,
None,
);
Ok(())
}
}
#[derive(Debug)]
pub struct WriteRD<E: ExtensionField> {
pub id: WitIn,
pub prev_ts: WitIn,
pub prev_value: UInt<E>,
pub lt_cfg: AssertLtConfig,
}
impl<E: ExtensionField> WriteRD<E> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
rd_written: RegisterExpr<E>,
cur_ts: WitIn,
) -> Result<Self, ZKVMError> {
let id = circuit_builder.create_witin(|| "rd_id");
let prev_ts = circuit_builder.create_witin(|| "prev_rd_ts");
let prev_value = UInt::new_unchecked(|| "prev_rd_value", circuit_builder)?;
let (_, lt_cfg) = circuit_builder.register_write(
|| "write_rd",
id,
prev_ts.expr(),
cur_ts.expr() + Tracer::SUBCYCLE_RD,
prev_value.register_expr(),
rd_written,
)?;
Ok(WriteRD {
id,
prev_ts,
prev_value,
lt_cfg,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let op = step.rd().expect("rd op");
self.assign_op(instance, shard_ctx, lk_multiplicity, step.cycle(), &op)
}
pub fn assign_op(
&self,
instance: &mut [E::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
cycle: Cycle,
op: &WriteOp,
) -> Result<(), ZKVMError> {
let shard_prev_cycle = shard_ctx.aligned_prev_ts(op.previous_cycle);
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
let shard_cycle = cycle - current_shard_offset_cycle;
set_val!(instance, self.id, op.register_index() as u64);
set_val!(instance, self.prev_ts, shard_prev_cycle);
// Register state
self.prev_value.assign_limbs(
instance,
Value::new_unchecked(op.value.before).as_u16_limbs(),
);
// Register write
self.lt_cfg.assign_instance(
instance,
lk_multiplicity,
shard_prev_cycle,
shard_cycle + Tracer::SUBCYCLE_RD,
)?;
shard_ctx.send(
RAMType::Register,
op.addr,
op.register_index() as u64,
cycle + Tracer::SUBCYCLE_RD,
op.previous_cycle,
op.value.after,
Some(op.value.before),
);
Ok(())
}
}
#[derive(Debug)]
pub struct ReadMEM<E: ExtensionField> {
pub prev_ts: WitIn,
pub lt_cfg: AssertLtConfig,
_field_type: PhantomData<E>,
}
impl<E: ExtensionField> ReadMEM<E> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
mem_addr: AddressExpr<E>,
mem_read: MemoryExpr<E>,
cur_ts: WitIn,
) -> Result<Self, ZKVMError> {
let prev_ts = circuit_builder.create_witin(|| "prev_ts");
let (_, lt_cfg) = circuit_builder.memory_read(
|| "read_memory",
&mem_addr,
prev_ts.expr(),
cur_ts.expr() + Tracer::SUBCYCLE_MEM,
mem_read,
)?;
Ok(ReadMEM {
prev_ts,
lt_cfg,
_field_type: PhantomData,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let op = step.memory_op().unwrap();
let shard_prev_cycle = shard_ctx.aligned_prev_ts(op.previous_cycle);
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
let shard_cycle = step.cycle() - current_shard_offset_cycle;
// Memory state
set_val!(instance, self.prev_ts, shard_prev_cycle);
// Memory read
self.lt_cfg.assign_instance(
instance,
lk_multiplicity,
shard_prev_cycle,
shard_cycle + Tracer::SUBCYCLE_MEM,
)?;
shard_ctx.send(
RAMType::Memory,
op.addr,
op.addr.baddr().0 as u64,
step.cycle() + Tracer::SUBCYCLE_MEM,
op.previous_cycle,
op.value.after,
None,
);
Ok(())
}
}
#[derive(Debug)]
pub struct WriteMEM {
pub prev_ts: WitIn,
pub lt_cfg: AssertLtConfig,
}
impl WriteMEM {
pub fn construct_circuit<E: ExtensionField>(
circuit_builder: &mut CircuitBuilder<E>,
mem_addr: AddressExpr<E>,
prev_value: MemoryExpr<E>,
new_value: MemoryExpr<E>,
cur_ts: WitIn,
) -> Result<Self, ZKVMError> {
let prev_ts = circuit_builder.create_witin(|| "prev_ts");
let (_, lt_cfg) = circuit_builder.memory_write(
|| "write_memory",
&mem_addr,
prev_ts.expr(),
cur_ts.expr() + Tracer::SUBCYCLE_MEM,
prev_value,
new_value,
)?;
Ok(WriteMEM { prev_ts, lt_cfg })
}
pub fn assign_instance<E: ExtensionField>(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let op = step.memory_op().unwrap();
self.assign_op(instance, shard_ctx, lk_multiplicity, step.cycle(), &op)
}
pub fn assign_op<F: SmallField>(
&self,
instance: &mut [F],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
cycle: Cycle,
op: &WriteOp,
) -> Result<(), ZKVMError> {
let shard_prev_cycle = shard_ctx.aligned_prev_ts(op.previous_cycle);
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
let shard_cycle = cycle - current_shard_offset_cycle;
set_val!(instance, self.prev_ts, shard_prev_cycle);
self.lt_cfg.assign_instance(
instance,
lk_multiplicity,
shard_prev_cycle,
shard_cycle + Tracer::SUBCYCLE_MEM,
)?;
shard_ctx.send(
RAMType::Memory,
op.addr,
op.addr.baddr().0 as u64,
cycle + Tracer::SUBCYCLE_MEM,
op.previous_cycle,
op.value.after,
Some(op.value.before),
);
Ok(())
}
}
#[derive(Debug)]
pub struct MemAddr<E: ExtensionField> {
addr: UInt<E>,
low_bits: Vec<WitIn>,
max_bits: usize,
}
impl<E: ExtensionField> MemAddr<E> {
const N_LOW_BITS: usize = 2;
/// An address which is range-checked, and not aligned. Bits 0 and 1 are variables.
pub fn construct_unaligned(cb: &mut CircuitBuilder<E>) -> Result<Self, ZKVMError> {
Self::construct(cb, 0)
}
/// An address which is range-checked, and aligned to 2 bytes. Bit 0 is constant 0. Bit 1 is variable.
pub fn construct_align2(cb: &mut CircuitBuilder<E>) -> Result<Self, ZKVMError> {
Self::construct(cb, 1)
}
/// An address which is range-checked, and aligned to 4 bytes. Bits 0 and 1 are constant 0.
pub fn construct_align4(cb: &mut CircuitBuilder<E>) -> Result<Self, ZKVMError> {
Self::construct(cb, 2)
}
/// Represent the address as an expression.
pub fn expr_unaligned(&self) -> AddressExpr<E> {
self.addr.address_expr()
}
pub fn uint_unaligned(&self) -> UInt<E> {
UInt::from_exprs_unchecked(self.addr.expr())
}
pub fn uint_align2(&self) -> UInt<E> {
UInt::from_exprs_unchecked(vec![
self.addr.limbs[0].expr() - &self.low_bit_exprs()[0],
self.addr.limbs[1].expr(),
])
}
/// Represent the address aligned to 2 bytes.
pub fn expr_align2(&self) -> AddressExpr<E> {
self.addr.address_expr() - &self.low_bit_exprs()[0]
}
/// Represent the address aligned to 4 bytes.
pub fn expr_align4(&self) -> AddressExpr<E> {
let low_bits = self.low_bit_exprs();
self.addr.address_expr() - &low_bits[1] * 2 - &low_bits[0]
}
pub fn uint_align4(&self) -> UInt<E> {
let low_bits = self.low_bit_exprs();
UInt::from_exprs_unchecked(vec![
self.addr.limbs[0].expr() - &low_bits[1] * 2 - &low_bits[0],
self.addr.limbs[1].expr(),
])
}
/// Expressions of the low bits of the address, LSB-first: [bit_0, bit_1].
pub fn low_bit_exprs(&self) -> Vec<Expression<E>> {
iter::repeat_n(Expression::ZERO, self.n_zeros())
.chain(self.low_bits.iter().map(ToExpr::expr))
.collect()
}
fn construct(cb: &mut CircuitBuilder<E>, n_zeros: usize) -> Result<Self, ZKVMError> {
Self::construct_with_max_bits(cb, n_zeros, BIT_WIDTH)
}
pub fn construct_with_max_bits(
cb: &mut CircuitBuilder<E>,
n_zeros: usize,
max_bits: usize,
) -> Result<Self, ZKVMError> {
assert!(n_zeros <= Self::N_LOW_BITS);
// The address as two u16 limbs.
// Soundness: This does not use the UInt range-check but specialized checks instead.
let addr = UInt::new_unchecked(|| "memory_addr", cb)?;
let limbs = addr.expr();
// Witness and constrain the non-zero low bits.
let low_bits = (n_zeros..Self::N_LOW_BITS)
.map(|i| {
let bit = cb.create_witin(|| format!("addr_bit_{}", i));
cb.assert_bit(|| format!("addr_bit_{}", i), bit.expr())?;
Ok(bit)
})
.collect::<Result<Vec<WitIn>, ZKVMError>>()?;
// Express the value of the low bits.
let low_sum: Expression<E> = (n_zeros..Self::N_LOW_BITS)
.zip_eq(low_bits.iter())
.map(|(pos, bit)| bit.expr() << pos)
.sum();
// Range check the middle bits, that is the low limb excluding the low bits.
let shift_right = E::BaseField::from_canonical_u64(1 << Self::N_LOW_BITS)
.inverse()
.expr();
let mid_u14 = (&limbs[0] - low_sum) * shift_right;
cb.assert_ux::<_, _, 14>(|| "mid_u14", mid_u14)?;
// Range check the high limb.
for (i, high_limb) in limbs.iter().enumerate().skip(1) {
cb.assert_const_range(
|| "high_limb",
high_limb.clone(),
(max_bits - i * 16).min(16),
)?;
}
Ok(MemAddr {
addr,
low_bits,
max_bits,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
lkm: &mut LkMultiplicity,
addr: Word,
) -> Result<(), ZKVMError> {
self.addr.assign_value(instance, Value::new_unchecked(addr));
// Witness the non-zero low bits.
for (pos, bit) in (self.n_zeros()..Self::N_LOW_BITS).zip_eq(&self.low_bits) {
let b = (addr >> pos) & 1;
set_val!(instance, bit, b as u64);
}
// Range check the low limb besides the low bits.
let mid_u14 = (addr & 0xffff) >> Self::N_LOW_BITS;
lkm.assert_ux::<14>(mid_u14 as u64);
// Range check the high limb.
for i in 1..UINT_LIMBS {
let high_u16 = (addr >> (i * 16)) & 0xffff;
lkm.assert_const_range(high_u16 as u64, (self.max_bits - i * 16).min(16));
}
Ok(())
}
fn n_zeros(&self) -> usize {
Self::N_LOW_BITS - self.low_bits.len()
}
}
#[cfg(test)]
mod test {
use ff_ext::GoldilocksExt2 as E;
use itertools::Itertools;
use p3::goldilocks::Goldilocks as F;
use std::collections::HashSet;
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
ROMType,
circuit_builder::{CircuitBuilder, ConstraintSystem},
error::ZKVMError,
scheme::mock_prover::MockProver,
witness::LkMultiplicity,
};
use super::MemAddr;
#[test]
fn test_mem_addr() -> Result<(), ZKVMError> {
let aligned_1 = 0xbeadbeef;
let aligned_2 = 0xbeadbeee;
let aligned_4 = 0xbeadbeec;
impl_test_mem_addr(1, aligned_1, true)?;
impl_test_mem_addr(1, aligned_2, true)?;
impl_test_mem_addr(1, aligned_4, true)?;
impl_test_mem_addr(2, aligned_1, false)?;
impl_test_mem_addr(2, aligned_2, true)?;
impl_test_mem_addr(2, aligned_4, true)?;
impl_test_mem_addr(4, aligned_1, false)?;
impl_test_mem_addr(4, aligned_2, false)?;
impl_test_mem_addr(4, aligned_4, true)?;
Ok(())
}
fn impl_test_mem_addr(align: u32, addr: u32, is_ok: bool) -> Result<(), ZKVMError> {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let mem_addr = match align {
1 => MemAddr::construct_unaligned(&mut cb)?,
2 => MemAddr::construct_align2(&mut cb)?,
4 => MemAddr::construct_align4(&mut cb)?,
_ => unreachable!(),
};
let mut lkm = LkMultiplicity::default();
let num_rows = 2;
let mut raw_witin = RowMajorMatrix::<F>::new(
num_rows,
cb.cs.num_witin as usize,
InstancePaddingStrategy::Default,
);
for instance in raw_witin.iter_mut() {
mem_addr.assign_instance(instance, &mut lkm, addr)?;
}
// Check the range lookups.
let lkm = lkm.into_finalize_result();
let expected = vec![
// 14 bits range
((1u64 << 14) + (0xbeef >> 2), num_rows),
// 16 bits range
((1 << 16) + 0xbead, num_rows),
]
.into_iter()
.collect::<HashSet<(u64, usize)>>();
let result = lkm[ROMType::Dynamic as usize]
.iter()
.map(|(k, v)| (*k, *v))
.collect::<HashSet<(u64, usize)>>();
assert_eq!(expected, result);
assert_eq!(lkm[ROMType::Dynamic as usize].len(), 2);
if is_ok {
cb.require_equal(|| "", mem_addr.expr_unaligned(), addr.into())?;
cb.require_equal(|| "", mem_addr.expr_align2(), (addr & !1).into())?;
cb.require_equal(|| "", mem_addr.expr_align4(), (addr & !3).into())?;
}
MockProver::assert_with_expected_errors(
&cb,
&[],
&raw_witin
.to_mles()
.into_iter()
.map(|v| v.into())
.collect_vec(),
&[],
&[],
if is_ok { &[] } else { &["mid_u14"] },
None,
None,
);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/test.rs | ceno_zkvm/src/instructions/riscv/test.rs | use ff_ext::GoldilocksExt2;
use mpcs::{BasefoldDefault, PolynomialCommitmentScheme, SecurityLevel};
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
instructions::Instruction,
structs::{ComposedConstrainSystem, ProgramParams},
};
use super::arith::{AddInstruction, SubInstruction};
#[test]
fn test_multiple_opcode() {
type E = GoldilocksExt2;
type Pcs = BasefoldDefault<E>;
let params = ProgramParams::default();
let mut cs = ConstraintSystem::new(|| "riscv");
let _add_config = cs.namespace(
|| "add",
|cs| AddInstruction::construct_circuit(&mut CircuitBuilder::<E>::new(cs), ¶ms),
);
let _sub_config = cs.namespace(
|| "sub",
|cs| SubInstruction::construct_circuit(&mut CircuitBuilder::<E>::new(cs), ¶ms),
);
let param = Pcs::setup(1 << 10, SecurityLevel::default()).unwrap();
let (_, _) = Pcs::trim(param, 1 << 10).unwrap();
let cs = ComposedConstrainSystem {
zkvm_v1_css: cs,
gkr_circuit: None,
};
cs.key_gen();
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/rv32im.rs | ceno_zkvm/src/instructions/riscv/rv32im.rs | use super::{
arith::AddInstruction, branch::BltuInstruction, ecall::HaltInstruction, jump::JalInstruction,
memory::LwInstruction,
};
#[cfg(feature = "u16limb_circuit")]
use crate::instructions::riscv::auipc::AuipcInstruction;
#[cfg(feature = "u16limb_circuit")]
use crate::instructions::riscv::lui::LuiInstruction;
#[cfg(not(feature = "u16limb_circuit"))]
use crate::tables::PowTableCircuit;
use crate::{
e2e::{ShardContext, StepCellExtractor},
error::ZKVMError,
instructions::{
Instruction,
riscv::{
arith_imm::AddiInstruction,
branch::{
BeqInstruction, BgeInstruction, BgeuInstruction, BltInstruction, BneInstruction,
},
div::{DivInstruction, DivuInstruction, RemInstruction, RemuInstruction},
ecall::{
Fp2AddInstruction, Fp2MulInstruction, FpAddInstruction, FpMulInstruction,
KeccakInstruction, Secp256k1InvInstruction, Uint256MulInstruction,
WeierstrassAddAssignInstruction, WeierstrassDecompressInstruction,
WeierstrassDoubleAssignInstruction,
},
logic::{AndInstruction, OrInstruction, XorInstruction},
logic_imm::{AndiInstruction, OriInstruction, XoriInstruction},
mulh::MulhuInstruction,
shift::{SllInstruction, SrlInstruction},
shift_imm::{SlliInstruction, SraiInstruction, SrliInstruction},
slti::SltiInstruction,
*,
},
},
scheme::constants::DYNAMIC_RANGE_MAX_BITS,
structs::{ZKVMConstraintSystem, ZKVMFixedTraces, ZKVMWitnesses},
tables::{
AndTableCircuit, DoubleU8TableCircuit, DynamicRangeTableCircuit, LtuTableCircuit,
OrTableCircuit, TableCircuit, XorTableCircuit,
},
};
use ceno_emul::{
Bn254AddSpec, Bn254DoubleSpec, Bn254Fp2AddSpec, Bn254Fp2MulSpec, Bn254FpAddSpec,
Bn254FpMulSpec,
InsnKind::{self, *},
KeccakSpec, LogPcCycleSpec, Platform, Secp256k1AddSpec, Secp256k1DecompressSpec,
Secp256k1DoubleSpec, Secp256k1ScalarInvertSpec, Sha256ExtendSpec, StepRecord, SyscallSpec,
Uint256MulSpec,
};
use dummy::LargeEcallDummy;
use ecall::EcallDummy;
use ff_ext::ExtensionField;
use itertools::{Itertools, izip};
use mulh::{MulInstruction, MulhInstruction, MulhsuInstruction};
use shift::SraInstruction;
use slt::{SltInstruction, SltuInstruction};
use slti::SltiuInstruction;
use sp1_curves::weierstrass::{
SwCurve,
bn254::{Bn254, Bn254BaseField},
secp256k1::Secp256k1,
};
use std::{
cmp::Reverse,
collections::{BTreeMap, BTreeSet, HashMap},
};
use strum::{EnumCount, IntoEnumIterator};
pub mod mmu;
const ECALL_HALT: u32 = Platform::ecall_halt();
pub struct Rv32imConfig<E: ExtensionField> {
// ALU Opcodes.
pub add_config: <AddInstruction<E> as Instruction<E>>::InstructionConfig,
pub sub_config: <SubInstruction<E> as Instruction<E>>::InstructionConfig,
pub and_config: <AndInstruction<E> as Instruction<E>>::InstructionConfig,
pub or_config: <OrInstruction<E> as Instruction<E>>::InstructionConfig,
pub xor_config: <XorInstruction<E> as Instruction<E>>::InstructionConfig,
pub sll_config: <SllInstruction<E> as Instruction<E>>::InstructionConfig,
pub srl_config: <SrlInstruction<E> as Instruction<E>>::InstructionConfig,
pub sra_config: <SraInstruction<E> as Instruction<E>>::InstructionConfig,
pub slt_config: <SltInstruction<E> as Instruction<E>>::InstructionConfig,
pub sltu_config: <SltuInstruction<E> as Instruction<E>>::InstructionConfig,
pub mul_config: <MulInstruction<E> as Instruction<E>>::InstructionConfig,
pub mulh_config: <MulhInstruction<E> as Instruction<E>>::InstructionConfig,
pub mulhsu_config: <MulhsuInstruction<E> as Instruction<E>>::InstructionConfig,
pub mulhu_config: <MulhuInstruction<E> as Instruction<E>>::InstructionConfig,
pub divu_config: <DivuInstruction<E> as Instruction<E>>::InstructionConfig,
pub remu_config: <RemuInstruction<E> as Instruction<E>>::InstructionConfig,
pub div_config: <DivInstruction<E> as Instruction<E>>::InstructionConfig,
pub rem_config: <RemInstruction<E> as Instruction<E>>::InstructionConfig,
// ALU with imm
pub addi_config: <AddiInstruction<E> as Instruction<E>>::InstructionConfig,
pub andi_config: <AndiInstruction<E> as Instruction<E>>::InstructionConfig,
pub ori_config: <OriInstruction<E> as Instruction<E>>::InstructionConfig,
pub xori_config: <XoriInstruction<E> as Instruction<E>>::InstructionConfig,
pub slli_config: <SlliInstruction<E> as Instruction<E>>::InstructionConfig,
pub srli_config: <SrliInstruction<E> as Instruction<E>>::InstructionConfig,
pub srai_config: <SraiInstruction<E> as Instruction<E>>::InstructionConfig,
pub slti_config: <SltiInstruction<E> as Instruction<E>>::InstructionConfig,
pub sltiu_config: <SltiuInstruction<E> as Instruction<E>>::InstructionConfig,
#[cfg(feature = "u16limb_circuit")]
pub lui_config: <LuiInstruction<E> as Instruction<E>>::InstructionConfig,
#[cfg(feature = "u16limb_circuit")]
pub auipc_config: <AuipcInstruction<E> as Instruction<E>>::InstructionConfig,
// Branching Opcodes
pub beq_config: <BeqInstruction<E> as Instruction<E>>::InstructionConfig,
pub bne_config: <BneInstruction<E> as Instruction<E>>::InstructionConfig,
pub blt_config: <BltInstruction<E> as Instruction<E>>::InstructionConfig,
pub bltu_config: <BltuInstruction<E> as Instruction<E>>::InstructionConfig,
pub bge_config: <BgeInstruction<E> as Instruction<E>>::InstructionConfig,
pub bgeu_config: <BgeuInstruction<E> as Instruction<E>>::InstructionConfig,
// Jump Opcodes
pub jal_config: <JalInstruction<E> as Instruction<E>>::InstructionConfig,
pub jalr_config: <JalrInstruction<E> as Instruction<E>>::InstructionConfig,
// Memory Opcodes
pub lw_config: <LwInstruction<E> as Instruction<E>>::InstructionConfig,
pub lhu_config: <LhuInstruction<E> as Instruction<E>>::InstructionConfig,
pub lh_config: <LhInstruction<E> as Instruction<E>>::InstructionConfig,
pub lbu_config: <LbuInstruction<E> as Instruction<E>>::InstructionConfig,
pub lb_config: <LbInstruction<E> as Instruction<E>>::InstructionConfig,
pub sw_config: <SwInstruction<E> as Instruction<E>>::InstructionConfig,
pub sh_config: <ShInstruction<E> as Instruction<E>>::InstructionConfig,
pub sb_config: <SbInstruction<E> as Instruction<E>>::InstructionConfig,
// Ecall Opcodes
pub halt_config: <HaltInstruction<E> as Instruction<E>>::InstructionConfig,
pub keccak_config: <KeccakInstruction<E> as Instruction<E>>::InstructionConfig,
pub bn254_add_config:
<WeierstrassAddAssignInstruction<E, SwCurve<Bn254>> as Instruction<E>>::InstructionConfig,
pub bn254_double_config:
<WeierstrassDoubleAssignInstruction<E, SwCurve<Bn254>> as Instruction<E>>::InstructionConfig,
pub bn254_fp_add_config:
<FpAddInstruction<E, Bn254BaseField> as Instruction<E>>::InstructionConfig,
pub bn254_fp_mul_config:
<FpMulInstruction<E, Bn254BaseField> as Instruction<E>>::InstructionConfig,
pub bn254_fp2_add_config:
<Fp2AddInstruction<E, Bn254BaseField> as Instruction<E>>::InstructionConfig,
pub bn254_fp2_mul_config:
<Fp2MulInstruction<E, Bn254BaseField> as Instruction<E>>::InstructionConfig,
pub secp256k1_add_config:
<WeierstrassAddAssignInstruction<E, SwCurve<Secp256k1>> as Instruction<E>>::InstructionConfig,
pub secp256k1_double_config:
<WeierstrassDoubleAssignInstruction<E, SwCurve<Secp256k1>> as Instruction<E>>::InstructionConfig,
pub secp256k1_scalar_invert:
<Secp256k1InvInstruction<E> as Instruction<E>>::InstructionConfig,
pub secp256k1_decompress_config:
<WeierstrassDecompressInstruction<E, SwCurve<Secp256k1>> as Instruction<E>>::InstructionConfig,
pub uint256_mul_config:
<Uint256MulInstruction<E> as Instruction<E>>::InstructionConfig,
// Tables.
pub dynamic_range_config: <DynamicRangeTableCircuit<E, 18> as TableCircuit<E>>::TableConfig,
pub double_u8_range_config: <DoubleU8TableCircuit<E> as TableCircuit<E>>::TableConfig,
pub and_table_config: <AndTableCircuit<E> as TableCircuit<E>>::TableConfig,
pub or_table_config: <OrTableCircuit<E> as TableCircuit<E>>::TableConfig,
pub xor_table_config: <XorTableCircuit<E> as TableCircuit<E>>::TableConfig,
pub ltu_config: <LtuTableCircuit<E> as TableCircuit<E>>::TableConfig,
#[cfg(not(feature = "u16limb_circuit"))]
pub pow_config: <PowTableCircuit<E> as TableCircuit<E>>::TableConfig,
// record InsnKind -> cells
pub inst_cells_map: Vec<u64>,
// record opcode name -> cells
// serve ecall/table for no InsnKind
pub ecall_cells_map: HashMap<String, u64>,
}
const KECCAK_CELL_BLOWUP_FACTOR: u64 = 2;
impl<E: ExtensionField> Rv32imConfig<E> {
pub fn construct_circuits(cs: &mut ZKVMConstraintSystem<E>) -> Self {
let mut inst_cells_map = vec![0; InsnKind::COUNT];
let mut ecall_cells_map = HashMap::new();
macro_rules! register_opcode_circuit {
($insn_kind:ident, $instruction:ty, $inst_cells_map:ident) => {{
let config = cs.register_opcode_circuit::<$instruction>();
// update estimated cell
$inst_cells_map[$insn_kind as usize] = cs
.get_cs(&<$instruction>::name())
.as_ref()
.map(|cs| {
(cs.zkvm_v1_css.num_witin as u64
+ cs.zkvm_v1_css.num_structural_witin as u64
+ cs.zkvm_v1_css.num_fixed as u64)
* (1 << cs.rotation_vars().unwrap_or(0))
})
.unwrap_or_default();
config
}};
}
// opcode circuits
// alu opcodes
let add_config = register_opcode_circuit!(ADD, AddInstruction<E>, inst_cells_map);
let sub_config = register_opcode_circuit!(SUB, SubInstruction<E>, inst_cells_map);
let and_config = register_opcode_circuit!(AND, AndInstruction<E>, inst_cells_map);
let or_config = register_opcode_circuit!(OR, OrInstruction<E>, inst_cells_map);
let xor_config = register_opcode_circuit!(XOR, XorInstruction<E>, inst_cells_map);
let sll_config = register_opcode_circuit!(SLL, SllInstruction<E>, inst_cells_map);
let srl_config = register_opcode_circuit!(SRL, SrlInstruction<E>, inst_cells_map);
let sra_config = register_opcode_circuit!(SRA, SraInstruction<E>, inst_cells_map);
let slt_config = register_opcode_circuit!(SLT, SltInstruction<E>, inst_cells_map);
let sltu_config = register_opcode_circuit!(SLTU, SltuInstruction<E>, inst_cells_map);
let mul_config = register_opcode_circuit!(MUL, MulInstruction<E>, inst_cells_map);
let mulh_config = register_opcode_circuit!(MULH, MulhInstruction<E>, inst_cells_map);
let mulhsu_config = register_opcode_circuit!(MULHSU, MulhsuInstruction<E>, inst_cells_map);
let mulhu_config = register_opcode_circuit!(MULHU, MulhuInstruction<E>, inst_cells_map);
let divu_config = register_opcode_circuit!(DIVU, DivuInstruction<E>, inst_cells_map);
let remu_config = register_opcode_circuit!(REMU, RemuInstruction<E>, inst_cells_map);
let div_config = register_opcode_circuit!(DIV, DivInstruction<E>, inst_cells_map);
let rem_config = register_opcode_circuit!(REM, RemInstruction<E>, inst_cells_map);
// alu with imm opcodes
let addi_config = register_opcode_circuit!(ADDI, AddiInstruction<E>, inst_cells_map);
let andi_config = register_opcode_circuit!(ANDI, AndiInstruction<E>, inst_cells_map);
let ori_config = register_opcode_circuit!(ORI, OriInstruction<E>, inst_cells_map);
let xori_config = register_opcode_circuit!(XORI, XoriInstruction<E>, inst_cells_map);
let slli_config = register_opcode_circuit!(SLLI, SlliInstruction<E>, inst_cells_map);
let srli_config = register_opcode_circuit!(SRLI, SrliInstruction<E>, inst_cells_map);
let srai_config = register_opcode_circuit!(SRAI, SraiInstruction<E>, inst_cells_map);
let slti_config = register_opcode_circuit!(SLTI, SltiInstruction<E>, inst_cells_map);
let sltiu_config = register_opcode_circuit!(SLTIU, SltiuInstruction<E>, inst_cells_map);
#[cfg(feature = "u16limb_circuit")]
let lui_config = register_opcode_circuit!(LUI, LuiInstruction<E>, inst_cells_map);
#[cfg(feature = "u16limb_circuit")]
let auipc_config = register_opcode_circuit!(AUIPC, AuipcInstruction<E>, inst_cells_map);
// branching opcodes
let beq_config = register_opcode_circuit!(BEQ, BeqInstruction<E>, inst_cells_map);
let bne_config = register_opcode_circuit!(BNE, BneInstruction<E>, inst_cells_map);
let blt_config = register_opcode_circuit!(BLT, BltInstruction<E>, inst_cells_map);
let bltu_config = register_opcode_circuit!(BLTU, BltuInstruction<E>, inst_cells_map);
let bge_config = register_opcode_circuit!(BGE, BgeInstruction<E>, inst_cells_map);
let bgeu_config = register_opcode_circuit!(BGEU, BgeuInstruction<E>, inst_cells_map);
// jump opcodes
let jal_config = register_opcode_circuit!(JAL, JalInstruction<E>, inst_cells_map);
let jalr_config = register_opcode_circuit!(JALR, JalrInstruction<E>, inst_cells_map);
// memory opcodes
let lw_config = register_opcode_circuit!(LW, LwInstruction<E>, inst_cells_map);
let lhu_config = register_opcode_circuit!(LHU, LhuInstruction<E>, inst_cells_map);
let lh_config = register_opcode_circuit!(LH, LhInstruction<E>, inst_cells_map);
let lbu_config = register_opcode_circuit!(LBU, LbuInstruction<E>, inst_cells_map);
let lb_config = register_opcode_circuit!(LB, LbInstruction<E>, inst_cells_map);
let sw_config = register_opcode_circuit!(SW, SwInstruction<E>, inst_cells_map);
let sh_config = register_opcode_circuit!(SH, ShInstruction<E>, inst_cells_map);
let sb_config = register_opcode_circuit!(SB, SbInstruction<E>, inst_cells_map);
// ecall opcodes
macro_rules! register_ecall_circuit {
($instruction:ty, $ecall_cells_map:ident) => {{
let config = cs.register_opcode_circuit::<$instruction>();
// update estimated cell
assert!(
$ecall_cells_map
.insert(
<$instruction>::name(),
cs.get_cs(&<$instruction>::name())
.as_ref()
.map(|cs| {
(cs.zkvm_v1_css.num_witin as u64
+ cs.zkvm_v1_css.num_structural_witin as u64
+ cs.zkvm_v1_css.num_fixed as u64)
* (1 << cs.rotation_vars().unwrap_or(0))
})
.unwrap_or_default(),
)
.is_none()
);
config
}};
}
let halt_config = register_ecall_circuit!(HaltInstruction<E>, ecall_cells_map);
// Keccak precompile is a known hotspot for peak memory.
// Its heavy read/write/LK activity inflates tower-witness usage, causing
// substantial memory overhead which not reflected on basic column count.
//
// We estimate this effect by applying an extra scaling factor that models
// tower-witness blowup proportional to the number of base columns.
let keccak_config = cs.register_opcode_circuit::<KeccakInstruction<E>>();
assert!(
ecall_cells_map
.insert(
<KeccakInstruction<E>>::name(),
cs.get_cs(&<KeccakInstruction<E>>::name())
.as_ref()
.map(|cs| {
(cs.zkvm_v1_css.num_witin as u64
+ cs.zkvm_v1_css.num_structural_witin as u64
+ cs.zkvm_v1_css.num_fixed as u64)
* (1 << cs.rotation_vars().unwrap_or(0))
* KECCAK_CELL_BLOWUP_FACTOR
})
.unwrap_or_default(),
)
.is_none()
);
let bn254_add_config = register_ecall_circuit!(WeierstrassAddAssignInstruction<E, SwCurve<Bn254>>, ecall_cells_map);
let bn254_double_config = register_ecall_circuit!(WeierstrassDoubleAssignInstruction<E, SwCurve<Bn254>>, ecall_cells_map);
let bn254_fp_add_config =
register_ecall_circuit!(FpAddInstruction<E, Bn254BaseField>, ecall_cells_map);
let bn254_fp_mul_config =
register_ecall_circuit!(FpMulInstruction<E, Bn254BaseField>, ecall_cells_map);
let bn254_fp2_add_config =
register_ecall_circuit!(Fp2AddInstruction<E, Bn254BaseField>, ecall_cells_map);
let bn254_fp2_mul_config =
register_ecall_circuit!(Fp2MulInstruction<E, Bn254BaseField>, ecall_cells_map);
let secp256k1_add_config = register_ecall_circuit!(WeierstrassAddAssignInstruction<E, SwCurve<Secp256k1>>, ecall_cells_map);
let secp256k1_double_config = register_ecall_circuit!(WeierstrassDoubleAssignInstruction<E, SwCurve<Secp256k1>>, ecall_cells_map);
let secp256k1_decompress_config = register_ecall_circuit!(WeierstrassDecompressInstruction<E, SwCurve<Secp256k1>>, ecall_cells_map);
let secp256k1_scalar_invert =
register_ecall_circuit!(Secp256k1InvInstruction<E>, ecall_cells_map);
let uint256_mul_config = register_ecall_circuit!(Uint256MulInstruction<E>, ecall_cells_map);
// tables
let dynamic_range_config =
cs.register_table_circuit::<DynamicRangeTableCircuit<E, DYNAMIC_RANGE_MAX_BITS>>();
let double_u8_range_config = cs.register_table_circuit::<DoubleU8TableCircuit<E>>();
let and_table_config = cs.register_table_circuit::<AndTableCircuit<E>>();
let or_table_config = cs.register_table_circuit::<OrTableCircuit<E>>();
let xor_table_config = cs.register_table_circuit::<XorTableCircuit<E>>();
let ltu_config = cs.register_table_circuit::<LtuTableCircuit<E>>();
#[cfg(not(feature = "u16limb_circuit"))]
let pow_config = cs.register_table_circuit::<PowTableCircuit<E>>();
Self {
// alu opcodes
add_config,
sub_config,
and_config,
or_config,
xor_config,
sll_config,
srl_config,
sra_config,
slt_config,
sltu_config,
mul_config,
mulh_config,
mulhsu_config,
mulhu_config,
divu_config,
remu_config,
div_config,
rem_config,
// alu with imm
addi_config,
andi_config,
ori_config,
xori_config,
slli_config,
srli_config,
srai_config,
slti_config,
sltiu_config,
#[cfg(feature = "u16limb_circuit")]
lui_config,
#[cfg(feature = "u16limb_circuit")]
auipc_config,
// branching opcodes
beq_config,
bne_config,
blt_config,
bltu_config,
bge_config,
bgeu_config,
// jump opcodes
jal_config,
jalr_config,
// memory opcodes
sw_config,
sh_config,
sb_config,
lw_config,
lhu_config,
lh_config,
lbu_config,
lb_config,
// ecall opcodes
halt_config,
keccak_config,
bn254_add_config,
bn254_double_config,
bn254_fp_add_config,
bn254_fp_mul_config,
bn254_fp2_add_config,
bn254_fp2_mul_config,
secp256k1_add_config,
secp256k1_double_config,
secp256k1_scalar_invert,
secp256k1_decompress_config,
uint256_mul_config,
// tables
dynamic_range_config,
double_u8_range_config,
and_table_config,
or_table_config,
xor_table_config,
ltu_config,
#[cfg(not(feature = "u16limb_circuit"))]
pow_config,
inst_cells_map,
ecall_cells_map,
}
}
pub fn generate_fixed_traces(
&self,
cs: &ZKVMConstraintSystem<E>,
fixed: &mut ZKVMFixedTraces<E>,
) {
// alu
fixed.register_opcode_circuit::<AddInstruction<E>>(cs, &self.add_config);
fixed.register_opcode_circuit::<SubInstruction<E>>(cs, &self.sub_config);
fixed.register_opcode_circuit::<AndInstruction<E>>(cs, &self.and_config);
fixed.register_opcode_circuit::<OrInstruction<E>>(cs, &self.or_config);
fixed.register_opcode_circuit::<XorInstruction<E>>(cs, &self.xor_config);
fixed.register_opcode_circuit::<SllInstruction<E>>(cs, &self.sll_config);
fixed.register_opcode_circuit::<SrlInstruction<E>>(cs, &self.srl_config);
fixed.register_opcode_circuit::<SraInstruction<E>>(cs, &self.sra_config);
fixed.register_opcode_circuit::<SltInstruction<E>>(cs, &self.slt_config);
fixed.register_opcode_circuit::<SltuInstruction<E>>(cs, &self.sltu_config);
fixed.register_opcode_circuit::<MulInstruction<E>>(cs, &self.mul_config);
fixed.register_opcode_circuit::<MulhInstruction<E>>(cs, &self.mulh_config);
fixed.register_opcode_circuit::<MulhsuInstruction<E>>(cs, &self.mulhsu_config);
fixed.register_opcode_circuit::<MulhuInstruction<E>>(cs, &self.mulhu_config);
fixed.register_opcode_circuit::<DivuInstruction<E>>(cs, &self.divu_config);
fixed.register_opcode_circuit::<RemuInstruction<E>>(cs, &self.remu_config);
fixed.register_opcode_circuit::<DivInstruction<E>>(cs, &self.div_config);
fixed.register_opcode_circuit::<RemInstruction<E>>(cs, &self.rem_config);
// alu with imm
fixed.register_opcode_circuit::<AddiInstruction<E>>(cs, &self.addi_config);
fixed.register_opcode_circuit::<AndiInstruction<E>>(cs, &self.andi_config);
fixed.register_opcode_circuit::<OriInstruction<E>>(cs, &self.ori_config);
fixed.register_opcode_circuit::<XoriInstruction<E>>(cs, &self.xori_config);
fixed.register_opcode_circuit::<SlliInstruction<E>>(cs, &self.slli_config);
fixed.register_opcode_circuit::<SrliInstruction<E>>(cs, &self.srli_config);
fixed.register_opcode_circuit::<SraiInstruction<E>>(cs, &self.srai_config);
fixed.register_opcode_circuit::<SltiInstruction<E>>(cs, &self.slti_config);
fixed.register_opcode_circuit::<SltiuInstruction<E>>(cs, &self.sltiu_config);
#[cfg(feature = "u16limb_circuit")]
fixed.register_opcode_circuit::<LuiInstruction<E>>(cs, &self.lui_config);
#[cfg(feature = "u16limb_circuit")]
fixed.register_opcode_circuit::<AuipcInstruction<E>>(cs, &self.auipc_config);
// branching
fixed.register_opcode_circuit::<BeqInstruction<E>>(cs, &self.beq_config);
fixed.register_opcode_circuit::<BneInstruction<E>>(cs, &self.bne_config);
fixed.register_opcode_circuit::<BltInstruction<E>>(cs, &self.blt_config);
fixed.register_opcode_circuit::<BltuInstruction<E>>(cs, &self.bltu_config);
fixed.register_opcode_circuit::<BgeInstruction<E>>(cs, &self.bge_config);
fixed.register_opcode_circuit::<BgeuInstruction<E>>(cs, &self.bgeu_config);
// jump
fixed.register_opcode_circuit::<JalInstruction<E>>(cs, &self.jal_config);
fixed.register_opcode_circuit::<JalrInstruction<E>>(cs, &self.jalr_config);
// memory
fixed.register_opcode_circuit::<SwInstruction<E>>(cs, &self.sw_config);
fixed.register_opcode_circuit::<ShInstruction<E>>(cs, &self.sh_config);
fixed.register_opcode_circuit::<SbInstruction<E>>(cs, &self.sb_config);
fixed.register_opcode_circuit::<LwInstruction<E>>(cs, &self.lw_config);
fixed.register_opcode_circuit::<LhuInstruction<E>>(cs, &self.lhu_config);
fixed.register_opcode_circuit::<LhInstruction<E>>(cs, &self.lh_config);
fixed.register_opcode_circuit::<LbuInstruction<E>>(cs, &self.lbu_config);
fixed.register_opcode_circuit::<LbInstruction<E>>(cs, &self.lb_config);
// system
fixed.register_opcode_circuit::<HaltInstruction<E>>(cs, &self.halt_config);
fixed.register_opcode_circuit::<KeccakInstruction<E>>(cs, &self.keccak_config);
fixed.register_opcode_circuit::<WeierstrassAddAssignInstruction<E, SwCurve<Bn254>>>(
cs,
&self.bn254_add_config,
);
fixed.register_opcode_circuit::<WeierstrassDoubleAssignInstruction<E, SwCurve<Bn254>>>(
cs,
&self.bn254_double_config,
);
fixed.register_opcode_circuit::<FpAddInstruction<E, Bn254BaseField>>(
cs,
&self.bn254_fp_add_config,
);
fixed.register_opcode_circuit::<FpMulInstruction<E, Bn254BaseField>>(
cs,
&self.bn254_fp_mul_config,
);
fixed.register_opcode_circuit::<Fp2AddInstruction<E, Bn254BaseField>>(
cs,
&self.bn254_fp2_add_config,
);
fixed.register_opcode_circuit::<Fp2MulInstruction<E, Bn254BaseField>>(
cs,
&self.bn254_fp2_mul_config,
);
fixed.register_opcode_circuit::<WeierstrassAddAssignInstruction<E, SwCurve<Secp256k1>>>(
cs,
&self.secp256k1_add_config,
);
fixed.register_opcode_circuit::<WeierstrassDoubleAssignInstruction<E, SwCurve<Secp256k1>>>(
cs,
&self.secp256k1_double_config,
);
fixed.register_opcode_circuit::<WeierstrassDecompressInstruction<E, SwCurve<Secp256k1>>>(
cs,
&self.secp256k1_decompress_config,
);
fixed.register_opcode_circuit::<Uint256MulInstruction<E>>(cs, &self.uint256_mul_config);
// table
fixed.register_table_circuit::<DynamicRangeTableCircuit<E, DYNAMIC_RANGE_MAX_BITS>>(
cs,
&self.dynamic_range_config,
&(),
);
fixed.register_table_circuit::<DoubleU8TableCircuit<E>>(
cs,
&self.double_u8_range_config,
&(),
);
fixed.register_table_circuit::<AndTableCircuit<E>>(cs, &self.and_table_config, &());
fixed.register_table_circuit::<OrTableCircuit<E>>(cs, &self.or_table_config, &());
fixed.register_table_circuit::<XorTableCircuit<E>>(cs, &self.xor_table_config, &());
fixed.register_table_circuit::<LtuTableCircuit<E>>(cs, &self.ltu_config, &());
#[cfg(not(feature = "u16limb_circuit"))]
fixed.register_table_circuit::<PowTableCircuit<E>>(cs, &self.pow_config, &());
}
pub fn assign_opcode_circuit<'a>(
&self,
cs: &ZKVMConstraintSystem<E>,
shard_ctx: &mut ShardContext,
witness: &mut ZKVMWitnesses<E>,
steps: &'a [StepRecord],
) -> Result<GroupedSteps<'a>, ZKVMError> {
let mut all_records: BTreeMap<InsnKind, Vec<&StepRecord>> = InsnKind::iter()
.map(|insn_kind| (insn_kind, Vec::new()))
.collect();
let mut halt_records = Vec::new();
let mut keccak_records = Vec::new();
let mut bn254_add_records = Vec::new();
let mut bn254_double_records = Vec::new();
let mut bn254_fp_add_records = Vec::new();
let mut bn254_fp_mul_records = Vec::new();
let mut bn254_fp2_add_records = Vec::new();
let mut bn254_fp2_mul_records = Vec::new();
let mut secp256k1_add_records = Vec::new();
let mut secp256k1_double_records = Vec::new();
let mut secp256k1_decompress_records = Vec::new();
let mut uint256_mul_records = Vec::new();
let mut secp256k1_scalar_invert_records = Vec::new();
steps.iter().for_each(|record| {
let insn_kind = record.insn.kind;
match insn_kind {
// ecall / halt
InsnKind::ECALL if record.rs1().unwrap().value == Platform::ecall_halt() => {
halt_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == KeccakSpec::CODE => {
keccak_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Bn254AddSpec::CODE => {
bn254_add_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Bn254DoubleSpec::CODE => {
bn254_double_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Bn254FpAddSpec::CODE => {
bn254_fp_add_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Bn254FpMulSpec::CODE => {
bn254_fp_mul_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Bn254Fp2AddSpec::CODE => {
bn254_fp2_add_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Bn254Fp2MulSpec::CODE => {
bn254_fp2_mul_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Secp256k1AddSpec::CODE => {
secp256k1_add_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Secp256k1DoubleSpec::CODE => {
secp256k1_double_records.push(record);
}
InsnKind::ECALL
if record.rs1().unwrap().value == Secp256k1ScalarInvertSpec::CODE =>
{
secp256k1_scalar_invert_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Secp256k1DecompressSpec::CODE => {
secp256k1_decompress_records.push(record);
}
InsnKind::ECALL if record.rs1().unwrap().value == Uint256MulSpec::CODE => {
uint256_mul_records.push(record);
}
// other type of ecalls are handled by dummy ecall instruction
_ => {
// it's safe to unwrap as all_records are initialized with Vec::new()
all_records.get_mut(&insn_kind).unwrap().push(record);
}
}
});
for (insn_kind, (_, records)) in
izip!(InsnKind::iter(), &all_records).sorted_by_key(|(_, (_, a))| Reverse(a.len()))
{
tracing::debug!("tracer generated {:?} {} records", insn_kind, records.len());
}
tracing::debug!("tracer generated HALT {} records", halt_records.len());
tracing::debug!("tracer generated KECCAK {} records", keccak_records.len());
tracing::debug!(
"tracer generated bn254_add_records {} records",
bn254_add_records.len()
);
tracing::debug!(
"tracer generated bn254_double_records {} records",
bn254_double_records.len()
);
tracing::debug!(
"tracer generated bn254_fp_add_records {} records",
bn254_fp_add_records.len()
);
tracing::debug!(
"tracer generated bn254_fp_mul_records {} records",
bn254_fp_mul_records.len()
);
tracing::debug!(
"tracer generated bn254_fp2_add_records {} records",
bn254_fp2_add_records.len()
);
tracing::debug!(
"tracer generated bn254_fp2_mul_records {} records",
bn254_fp2_mul_records.len()
);
tracing::debug!(
"tracer generated secp256k1_add_records {} records",
secp256k1_add_records.len()
);
tracing::debug!(
"tracer generated secp256k1_double_records {} records",
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/branch.rs | ceno_zkvm/src/instructions/riscv/branch.rs | use super::RIVInstruction;
use ceno_emul::InsnKind;
#[cfg(not(feature = "u16limb_circuit"))]
mod branch_circuit;
#[cfg(feature = "u16limb_circuit")]
mod branch_circuit_v2;
#[cfg(test)]
mod test;
pub struct BeqOp;
impl RIVInstruction for BeqOp {
const INST_KIND: InsnKind = InsnKind::BEQ;
}
#[cfg(feature = "u16limb_circuit")]
pub type BeqInstruction<E> = branch_circuit_v2::BranchCircuit<E, BeqOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type BeqInstruction<E> = branch_circuit::BranchCircuit<E, BeqOp>;
pub struct BneOp;
impl RIVInstruction for BneOp {
const INST_KIND: InsnKind = InsnKind::BNE;
}
#[cfg(feature = "u16limb_circuit")]
pub type BneInstruction<E> = branch_circuit_v2::BranchCircuit<E, BneOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type BneInstruction<E> = branch_circuit::BranchCircuit<E, BneOp>;
pub struct BltuOp;
impl RIVInstruction for BltuOp {
const INST_KIND: InsnKind = InsnKind::BLTU;
}
#[cfg(feature = "u16limb_circuit")]
pub type BltuInstruction<E> = branch_circuit_v2::BranchCircuit<E, BltuOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type BltuInstruction<E> = branch_circuit::BranchCircuit<E, BltuOp>;
pub struct BgeuOp;
impl RIVInstruction for BgeuOp {
const INST_KIND: InsnKind = InsnKind::BGEU;
}
#[cfg(feature = "u16limb_circuit")]
pub type BgeuInstruction<E> = branch_circuit_v2::BranchCircuit<E, BgeuOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type BgeuInstruction<E> = branch_circuit::BranchCircuit<E, BgeuOp>;
pub struct BltOp;
impl RIVInstruction for BltOp {
const INST_KIND: InsnKind = InsnKind::BLT;
}
#[cfg(feature = "u16limb_circuit")]
pub type BltInstruction<E> = branch_circuit_v2::BranchCircuit<E, BltOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type BltInstruction<E> = branch_circuit::BranchCircuit<E, BltOp>;
pub struct BgeOp;
impl RIVInstruction for BgeOp {
const INST_KIND: InsnKind = InsnKind::BGE;
}
#[cfg(feature = "u16limb_circuit")]
pub type BgeInstruction<E> = branch_circuit_v2::BranchCircuit<E, BgeOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type BgeInstruction<E> = branch_circuit::BranchCircuit<E, BgeOp>;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/arith.rs | ceno_zkvm/src/instructions/riscv/arith.rs | use std::marker::PhantomData;
use super::{RIVInstruction, constants::UInt, r_insn::RInstructionConfig};
use crate::{
circuit_builder::CircuitBuilder, e2e::ShardContext, error::ZKVMError,
instructions::Instruction, structs::ProgramParams, uint::Value, witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::ExtensionField;
/// This config handles R-Instructions that represent registers values as 2 * u16.
#[derive(Debug)]
pub struct ArithConfig<E: ExtensionField> {
r_insn: RInstructionConfig<E>,
rs1_read: UInt<E>,
rs2_read: UInt<E>,
rd_written: UInt<E>,
}
pub struct ArithInstruction<E, I>(PhantomData<(E, I)>);
pub struct AddOp;
impl RIVInstruction for AddOp {
const INST_KIND: InsnKind = InsnKind::ADD;
}
pub type AddInstruction<E> = ArithInstruction<E, AddOp>;
pub struct SubOp;
impl RIVInstruction for SubOp {
const INST_KIND: InsnKind = InsnKind::SUB;
}
pub type SubInstruction<E> = ArithInstruction<E, SubOp>;
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for ArithInstruction<E, I> {
type InstructionConfig = ArithConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let (rs1_read, rs2_read, rd_written) = match I::INST_KIND {
InsnKind::ADD => {
// rd_written = rs1_read + rs2_read
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?;
let rs2_read = UInt::new_unchecked(|| "rs2_read", circuit_builder)?;
let rd_written = rs1_read.add(|| "rd_written", circuit_builder, &rs2_read, true)?;
(rs1_read, rs2_read, rd_written)
}
InsnKind::SUB => {
// rd_written + rs2_read = rs1_read
// rd_written is the new value to be updated in register so we need to constrain its range.
let rd_written = UInt::new(|| "rd_written", circuit_builder)?;
let rs2_read = UInt::new_unchecked(|| "rs2_read", circuit_builder)?;
let rs1_read = rs2_read.clone().add(
|| "rs1_read",
circuit_builder,
&rd_written.clone(),
true,
)?;
(rs1_read, rs2_read, rd_written)
}
_ => unreachable!("Unsupported instruction kind"),
};
let r_insn = RInstructionConfig::construct_circuit(
circuit_builder,
I::INST_KIND,
rs1_read.register_expr(),
rs2_read.register_expr(),
rd_written.register_expr(),
)?;
Ok(ArithConfig {
r_insn,
rs1_read,
rs2_read,
rd_written,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
config
.r_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rs2_read = Value::new_unchecked(step.rs2().unwrap().value);
config
.rs2_read
.assign_limbs(instance, rs2_read.as_u16_limbs());
match I::INST_KIND {
InsnKind::ADD => {
// rs1_read + rs2_read = rd_written
let rs1_read = Value::new_unchecked(step.rs1().unwrap().value);
config
.rs1_read
.assign_limbs(instance, rs1_read.as_u16_limbs());
let result = rs1_read.add(&rs2_read, lk_multiplicity, true);
config.rd_written.assign_carries(instance, &result.carries);
}
InsnKind::SUB => {
// rs1_read = rd_written + rs2_read
let rd_written = Value::new(step.rd().unwrap().value.after, lk_multiplicity);
config
.rd_written
.assign_limbs(instance, rd_written.as_u16_limbs());
let result = rs2_read.add(&rd_written, lk_multiplicity, true);
config.rs1_read.assign_carries(instance, &result.carries);
}
_ => unreachable!("Unsupported instruction kind"),
};
Ok(())
}
}
#[cfg(test)]
mod test {
use ceno_emul::{Change, StepRecord, encode_rv32};
use ff_ext::GoldilocksExt2;
use gkr_iop::circuit_builder::DebugIndex;
use super::*;
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
instructions::Instruction,
scheme::mock_prover::{MOCK_PC_START, MockProver},
};
#[test]
fn test_opcode_add() {
verify::<AddOp>("basic", 11, 2);
verify::<AddOp>("0 + 0", 0, 0);
verify::<AddOp>("0 + 1", 0, 1);
verify::<AddOp>("u16::MAX", u16::MAX as u32, 2);
verify::<AddOp>("overflow: u32::MAX", u32::MAX - 1, 2);
verify::<AddOp>("overflow: u32::MAX x 2", u32::MAX - 1, u32::MAX - 1);
}
#[test]
fn test_opcode_sub() {
verify::<SubOp>("basic", 11, 2);
verify::<SubOp>("0 - 0", 0, 0);
verify::<SubOp>("1 - 0", 1, 0);
verify::<SubOp>("1 - 1", 1, 1);
verify::<SubOp>("underflow", 3, 11);
}
fn verify<I: RIVInstruction>(name: &'static str, rs1: u32, rs2: u32) {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| format!("{:?}_({name})", I::INST_KIND),
|cb| {
Ok(ArithInstruction::<GoldilocksExt2, I>::construct_circuit(
cb,
&ProgramParams::default(),
))
},
)
.unwrap()
.unwrap();
let outcome = match I::INST_KIND {
InsnKind::ADD => rs1.wrapping_add(rs2),
InsnKind::SUB => rs1.wrapping_sub(rs2),
_ => unreachable!("Unsupported instruction kind"),
};
// values assignment
let insn_code = encode_rv32(I::INST_KIND, 2, 3, 4, 0);
let (raw_witin, lkm) = ArithInstruction::<GoldilocksExt2, I>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
rs1,
rs2,
Change::new(0, outcome),
0,
)],
)
.unwrap();
// verify rd_written
let expected_rd_written =
UInt::from_const_unchecked(Value::new_unchecked(outcome).as_u16_limbs().to_vec());
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/auipc.rs | ceno_zkvm/src/instructions/riscv/auipc.rs | use ff_ext::{ExtensionField, FieldInto};
use itertools::izip;
use std::marker::PhantomData;
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{PC_BITS, UINT_BYTE_LIMBS, UInt8},
i_insn::IInstructionConfig,
},
},
structs::ProgramParams,
tables::InsnRecord,
utils::split_to_u8,
witness::LkMultiplicity,
};
use ceno_emul::InsnKind;
use gkr_iop::tables::{LookupTable, ops::XorTable};
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::{Field, FieldAlgebra};
use witness::set_val;
pub struct AuipcConfig<E: ExtensionField> {
pub i_insn: IInstructionConfig<E>,
// The limbs of the immediate except the least significant limb since it is always 0
pub imm_limbs: [WitIn; UINT_BYTE_LIMBS - 1],
// The limbs of the PC except the most significant and the least significant limbs
pub pc_limbs: [WitIn; UINT_BYTE_LIMBS - 2],
pub rd_written: UInt8<E>,
}
pub struct AuipcInstruction<E>(PhantomData<E>);
impl<E: ExtensionField> Instruction<E> for AuipcInstruction<E> {
type InstructionConfig = AuipcConfig<E>;
fn name() -> String {
format!("{:?}", InsnKind::AUIPC)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<AuipcConfig<E>, ZKVMError> {
let rd_written = UInt8::<E>::new(|| "rd_written", circuit_builder)?;
let rd_exprs = rd_written.expr();
// TODO: use double u8 for these limbs
let pc_limbs = std::array::from_fn(|i| {
circuit_builder
.create_u8(|| format!("pc_limbs_{}", i))
.unwrap()
});
let imm_limbs = std::array::from_fn(|i| {
circuit_builder
.create_u8(|| format!("imm_limbs_{}", i))
.unwrap()
});
let imm = imm_limbs
.iter()
.enumerate()
.fold(E::BaseField::ZERO.expr(), |acc, (i, &val)| {
acc + val.expr()
* E::BaseField::from_canonical_u32(1 << (i * UInt8::<E>::LIMB_BITS)).expr()
});
let i_insn = IInstructionConfig::<E>::construct_circuit(
circuit_builder,
InsnKind::AUIPC,
imm.expr(),
0.into(),
[0.into(), 0.into()],
UInt8::from_exprs_unchecked(rd_exprs.clone()).register_expr(),
false,
)?;
let intermed_val = rd_exprs[0].expr()
+ pc_limbs
.iter()
.enumerate()
.fold(E::BaseField::ZERO.expr(), |acc, (i, val)| {
acc + val.expr()
* E::BaseField::from_canonical_u32(1 << ((i + 1) * UInt8::<E>::LIMB_BITS))
.expr()
});
// Compute the most significant limb of PC
let pc_msl = (i_insn.vm_state.pc.expr() - intermed_val.expr())
* (E::BaseField::from_canonical_usize(
1 << (UInt8::<E>::LIMB_BITS * (UINT_BYTE_LIMBS - 1)),
)
.inverse())
.expr();
// The vector pc_limbs contains the actual limbs of PC in little endian order
let pc_limbs_expr = [rd_exprs[0].expr()]
.into_iter()
.chain(pc_limbs.iter().map(|w| w.expr()))
.map(|x| x.expr())
.chain([pc_msl.expr()])
.collect::<Vec<_>>();
assert_eq!(pc_limbs_expr.len(), UINT_BYTE_LIMBS);
// Range check the most significant limb of pc to be in [0, 2^{PC_BITS-(RV32_REGISTER_NUM_LIMBS-1)*RV32_CELL_BITS})
let last_limb_bits = PC_BITS - UInt8::<E>::LIMB_BITS * (UINT_BYTE_LIMBS - 1);
let additional_bits =
(last_limb_bits..UInt8::<E>::LIMB_BITS).fold(0, |acc, x| acc + (1 << x));
let additional_bits = E::BaseField::from_canonical_u32(additional_bits);
circuit_builder.logic_u8(
LookupTable::Xor,
pc_limbs_expr[3].expr(),
additional_bits.expr(),
pc_limbs_expr[3].expr() + additional_bits.expr(),
)?;
let mut carry: [Expression<E>; UINT_BYTE_LIMBS] =
std::array::from_fn(|_| E::BaseField::ZERO.expr());
let carry_divide = E::BaseField::from_canonical_usize(1 << UInt8::<E>::LIMB_BITS)
.inverse()
.expr();
// Don't need to constrain the least significant limb of the addition
// since we already know that rd_data[0] = pc_limbs[0] and the least significant limb of imm is 0
// Note: imm_limbs doesn't include the least significant limb so imm_limbs[i - 1] means the i-th limb of imm
for i in 1..UINT_BYTE_LIMBS {
carry[i] = carry_divide.expr()
* (pc_limbs_expr[i].expr() + imm_limbs[i - 1].expr() - rd_exprs[i].expr()
+ carry[i - 1].expr());
// carry[i] * 2^(UInt8::LIMB_BITS) + rd_exprs[i].expr() = pc_limbs_expr[i] + imm_limbs[i].expr() + carry[i - 1].expr()
circuit_builder.assert_bit(|| format!("carry_bit_{i}"), carry[i].expr())?;
}
Ok(AuipcConfig {
i_insn,
imm_limbs,
pc_limbs,
rd_written,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), ZKVMError> {
config
.i_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rd_written = split_to_u8(step.rd().unwrap().value.after);
config.rd_written.assign_limbs(instance, &rd_written);
for chunk in rd_written.chunks(2) {
if chunk.len() == 2 {
lk_multiplicity.assert_double_u8(chunk[0] as u64, chunk[1] as u64)
} else {
lk_multiplicity.assert_const_range(chunk[0] as u64, 8);
}
}
let pc = split_to_u8(step.pc().before.0);
for (val, witin) in izip!(pc.iter().skip(1), config.pc_limbs) {
lk_multiplicity.assert_ux::<8>(*val as u64);
set_val!(instance, witin, E::BaseField::from_canonical_u8(*val));
}
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn()).0 as u32;
let imm = split_to_u8(imm);
for (val, witin) in izip!(imm.iter(), config.imm_limbs) {
lk_multiplicity.assert_ux::<8>(*val as u64);
set_val!(instance, witin, E::BaseField::from_canonical_u8(*val));
}
// constrain pc msb limb range via xor
let last_limb_bits = PC_BITS - UInt8::<E>::LIMB_BITS * (UINT_BYTE_LIMBS - 1);
let additional_bits =
(last_limb_bits..UInt8::<E>::LIMB_BITS).fold(0, |acc, x| acc + (1 << x));
lk_multiplicity.logic_u8::<XorTable>(pc[3] as u64, additional_bits as u64);
Ok(())
}
}
#[cfg(test)]
mod tests {
use ceno_emul::{Change, InsnKind, PC_STEP_SIZE, StepRecord, encode_rv32};
use ff_ext::{BabyBearExt4, ExtensionField, GoldilocksExt2};
use gkr_iop::circuit_builder::DebugIndex;
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{
Instruction,
riscv::{auipc::AuipcInstruction, constants::UInt},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
#[test]
fn test_auipc() {
let cases = vec![
// imm without lower 12 bits zero
0, 0x1,
// imm = -1 → all 1’s in 20-bit imm
// rd = PC - 0x1000
-1i32, 0x12345, // imm = 0x12345
// max positive imm
0xfffff,
];
for imm in &cases {
test_opcode_auipc::<GoldilocksExt2>(
MOCK_PC_START.0.wrapping_add((*imm as u32) << 12),
imm << 12,
);
#[cfg(feature = "u16limb_circuit")]
test_opcode_auipc::<BabyBearExt4>(
MOCK_PC_START.0.wrapping_add((*imm as u32) << 12),
imm << 12,
);
}
}
fn test_opcode_auipc<E: ExtensionField>(rd: u32, imm: i32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "auipc",
|cb| {
let config =
AuipcInstruction::<E>::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::AUIPC, 0, 0, 4, imm);
let (raw_witin, lkm) = AuipcInstruction::<E>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_i_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + PC_STEP_SIZE),
insn_code,
0,
Change::new(0, rd),
0,
)],
)
.unwrap();
// verify rd_written
let expected_rd_written =
UInt::from_const_unchecked(Value::new_unchecked(rd).as_u16_limbs().to_vec());
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/im_insn.rs | ceno_zkvm/src/instructions/riscv/im_insn.rs | use crate::{
chip_handler::{AddressExpr, MemoryExpr, RegisterExpr, general::InstFetch},
circuit_builder::CircuitBuilder,
error::ZKVMError,
instructions::riscv::insn_base::{ReadMEM, ReadRS1, StateInOut, WriteRD},
tables::InsnRecord,
witness::LkMultiplicity,
};
use crate::e2e::ShardContext;
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::ExtensionField;
use multilinear_extensions::{Expression, ToExpr};
/// This config handle the common part of I-type Instruction (memory variant)
/// - PC, cycle, fetch
/// - Register reads and writes
/// - Memory reads
pub struct IMInstructionConfig<E: ExtensionField> {
vm_state: StateInOut<E>,
rs1: ReadRS1<E>,
rd: WriteRD<E>,
mem_read: ReadMEM<E>,
}
impl<E: ExtensionField> IMInstructionConfig<E> {
#[allow(clippy::too_many_arguments)]
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
imm: &Expression<E>,
#[cfg(feature = "u16limb_circuit")] imm_sign: &Expression<E>,
rs1_read: RegisterExpr<E>,
memory_read: MemoryExpr<E>,
memory_addr: AddressExpr<E>,
rd_written: RegisterExpr<E>,
) -> Result<Self, ZKVMError> {
let vm_state = StateInOut::construct_circuit(circuit_builder, false)?;
// Registers
let rs1 = ReadRS1::construct_circuit(circuit_builder, rs1_read, vm_state.ts)?;
let rd = WriteRD::construct_circuit(circuit_builder, rd_written, vm_state.ts)?;
// Memory
let mem_read =
ReadMEM::construct_circuit(circuit_builder, memory_addr, memory_read, vm_state.ts)?;
// Fetch the instruction
circuit_builder.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
insn_kind.into(),
Some(rd.id.expr()),
rs1.id.expr(),
0.into(),
imm.clone(),
#[cfg(feature = "u16limb_circuit")]
imm_sign.expr(),
))?;
Ok(IMInstructionConfig {
vm_state,
rs1,
rd,
mem_read,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
self.vm_state.assign_instance(instance, shard_ctx, step)?;
self.rs1
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
self.rd
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
self.mem_read
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
// Fetch instruction
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall.rs | ceno_zkvm/src/instructions/riscv/ecall.rs | mod fptower_fp;
mod fptower_fp2_add;
mod fptower_fp2_mul;
mod halt;
mod keccak;
mod uint256;
mod weierstrass_add;
mod weierstrass_decompress;
mod weierstrass_double;
pub use fptower_fp::{FpAddInstruction, FpMulInstruction};
pub use fptower_fp2_add::Fp2AddInstruction;
pub use fptower_fp2_mul::Fp2MulInstruction;
pub use keccak::KeccakInstruction;
pub use uint256::{Secp256k1InvInstruction, Uint256MulInstruction};
pub use weierstrass_add::WeierstrassAddAssignInstruction;
pub use weierstrass_decompress::WeierstrassDecompressInstruction;
pub use weierstrass_double::WeierstrassDoubleAssignInstruction;
use ceno_emul::InsnKind;
pub use halt::HaltInstruction;
use super::{RIVInstruction, dummy::DummyInstruction};
pub struct EcallOp;
impl RIVInstruction for EcallOp {
const INST_KIND: InsnKind = InsnKind::ECALL;
}
/// Unsafe. A dummy ecall circuit that ignores unimplemented functions.
pub type EcallDummy<E> = DummyInstruction<E, EcallOp>;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/r_insn.rs | ceno_zkvm/src/instructions/riscv/r_insn.rs | use ceno_emul::{InsnKind, StepRecord};
use ff_ext::ExtensionField;
use crate::{
chip_handler::{RegisterExpr, general::InstFetch},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::riscv::insn_base::{ReadRS1, ReadRS2, StateInOut, WriteRD},
tables::InsnRecord,
witness::LkMultiplicity,
};
use multilinear_extensions::ToExpr;
/// This config handles the common part of R-type instructions:
/// - PC, cycle, fetch.
/// - Registers read and write.
///
/// It does not witness of the register values, nor the actual function (e.g. add, sub, etc).
#[derive(Debug)]
pub struct RInstructionConfig<E: ExtensionField> {
pub vm_state: StateInOut<E>,
pub rs1: ReadRS1<E>,
pub rs2: ReadRS2<E>,
pub rd: WriteRD<E>,
}
impl<E: ExtensionField> RInstructionConfig<E> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
rs1_read: RegisterExpr<E>,
rs2_read: RegisterExpr<E>,
rd_written: RegisterExpr<E>,
) -> Result<Self, ZKVMError> {
// State in and out
let vm_state = StateInOut::construct_circuit(circuit_builder, false)?;
// Registers
let rs1 = ReadRS1::construct_circuit(circuit_builder, rs1_read, vm_state.ts)?;
let rs2 = ReadRS2::construct_circuit(circuit_builder, rs2_read, vm_state.ts)?;
let rd = WriteRD::construct_circuit(circuit_builder, rd_written, vm_state.ts)?;
// Fetch instruction
circuit_builder.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
insn_kind.into(),
Some(rd.id.expr()),
rs1.id.expr(),
rs2.id.expr(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
Ok(RInstructionConfig {
vm_state,
rs1,
rs2,
rd,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
self.vm_state.assign_instance(instance, shard_ctx, step)?;
self.rs1
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
self.rs2
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
self.rd
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
// Fetch instruction
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall_base.rs | ceno_zkvm/src/instructions/riscv/ecall_base.rs | use ceno_emul::{Cycle, WriteOp};
use ff_ext::{ExtensionField, FieldInto};
use p3::field::FieldAlgebra;
use witness::set_val;
use super::constants::UInt;
use crate::{
chip_handler::{RegisterChipOperations, RegisterExpr},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::AssertLtConfig,
structs::RAMType,
uint::Value,
witness::LkMultiplicity,
};
use ceno_emul::FullTracer as Tracer;
use multilinear_extensions::{ToExpr, WitIn};
#[derive(Debug)]
pub struct OpFixedRS<E: ExtensionField, const REG_ID: usize, const RW: bool> {
pub prev_ts: WitIn,
pub prev_value: Option<UInt<E>>,
pub lt_cfg: AssertLtConfig,
}
impl<E: ExtensionField, const REG_ID: usize, const RW: bool> OpFixedRS<E, REG_ID, RW> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
rd_written: RegisterExpr<E>,
cur_ts: WitIn,
) -> Result<Self, ZKVMError> {
let prev_ts = circuit_builder.create_witin(|| "prev_rd_ts");
let (prev_value, lt_cfg) = if RW {
let prev_value = UInt::new_unchecked(|| "prev_rd_value", circuit_builder)?;
let (_, lt_cfg) = circuit_builder.register_write(
|| "write_rd",
E::BaseField::from_canonical_u64(REG_ID as u64),
prev_ts.expr(),
cur_ts.expr() + Tracer::SUBCYCLE_RD,
prev_value.register_expr(),
rd_written,
)?;
(Some(prev_value), lt_cfg)
} else {
let (_, lt_cfg) = circuit_builder.register_read(
|| "read_rs",
E::BaseField::from_canonical_u64(REG_ID as u64),
prev_ts.expr(),
// share same ts with RS1
cur_ts.expr() + Tracer::SUBCYCLE_RS1,
rd_written,
)?;
(None, lt_cfg)
};
Ok(Self {
prev_ts,
prev_value,
lt_cfg,
})
}
pub fn assign_op(
&self,
instance: &mut [E::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
cycle: Cycle,
op: &WriteOp,
) -> Result<(), ZKVMError> {
let shard_prev_cycle = shard_ctx.aligned_prev_ts(op.previous_cycle);
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
let shard_cycle = cycle - current_shard_offset_cycle;
set_val!(instance, self.prev_ts, shard_prev_cycle);
// Register state
if let Some(prev_value) = self.prev_value.as_ref() {
prev_value.assign_limbs(
instance,
Value::new_unchecked(op.value.before).as_u16_limbs(),
);
}
let (shard_cycle, cycle) = if RW {
(
shard_cycle + Tracer::SUBCYCLE_RD,
cycle + Tracer::SUBCYCLE_RD,
)
} else {
(
shard_cycle + Tracer::SUBCYCLE_RS1,
cycle + Tracer::SUBCYCLE_RS1,
)
};
// Register write
self.lt_cfg
.assign_instance(instance, lk_multiplicity, shard_prev_cycle, shard_cycle)?;
shard_ctx.send(
RAMType::Register,
op.addr,
REG_ID as u64,
cycle,
op.previous_cycle,
op.value.after,
None,
);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/memory.rs | ceno_zkvm/src/instructions/riscv/memory.rs | mod gadget;
#[cfg(not(feature = "u16limb_circuit"))]
pub mod load;
#[cfg(not(feature = "u16limb_circuit"))]
pub mod store;
#[cfg(feature = "u16limb_circuit")]
mod load_v2;
#[cfg(feature = "u16limb_circuit")]
mod store_v2;
#[cfg(test)]
mod test;
use crate::instructions::riscv::RIVInstruction;
#[cfg(not(feature = "u16limb_circuit"))]
pub use crate::instructions::riscv::memory::load::LoadInstruction;
#[cfg(feature = "u16limb_circuit")]
pub use crate::instructions::riscv::memory::load_v2::LoadInstruction;
#[cfg(not(feature = "u16limb_circuit"))]
pub use crate::instructions::riscv::memory::store::StoreInstruction;
#[cfg(feature = "u16limb_circuit")]
pub use crate::instructions::riscv::memory::store_v2::StoreInstruction;
use ceno_emul::InsnKind;
pub struct LwOp;
impl RIVInstruction for LwOp {
const INST_KIND: InsnKind = InsnKind::LW;
}
pub type LwInstruction<E> = LoadInstruction<E, LwOp>;
pub struct LhOp;
impl RIVInstruction for LhOp {
const INST_KIND: InsnKind = InsnKind::LH;
}
pub type LhInstruction<E> = LoadInstruction<E, LhOp>;
pub struct LhuOp;
impl RIVInstruction for LhuOp {
const INST_KIND: InsnKind = InsnKind::LHU;
}
pub type LhuInstruction<E> = LoadInstruction<E, LhuOp>;
pub struct LbOp;
impl RIVInstruction for LbOp {
const INST_KIND: InsnKind = InsnKind::LB;
}
pub type LbInstruction<E> = LoadInstruction<E, LbOp>;
pub struct LbuOp;
impl RIVInstruction for LbuOp {
const INST_KIND: InsnKind = InsnKind::LBU;
}
pub type LbuInstruction<E> = LoadInstruction<E, LbuOp>;
pub struct SWOp;
impl RIVInstruction for SWOp {
const INST_KIND: InsnKind = InsnKind::SW;
}
pub type SwInstruction<E> = StoreInstruction<E, SWOp, 2>;
pub struct SHOp;
impl RIVInstruction for SHOp {
const INST_KIND: InsnKind = InsnKind::SH;
}
pub type ShInstruction<E> = StoreInstruction<E, SHOp, 1>;
pub struct SBOp;
impl RIVInstruction for SBOp {
const INST_KIND: InsnKind = InsnKind::SB;
}
pub type SbInstruction<E> = StoreInstruction<E, SBOp, 0>;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/jump.rs | ceno_zkvm/src/instructions/riscv/jump.rs | #[cfg(not(feature = "u16limb_circuit"))]
mod jal;
#[cfg(feature = "u16limb_circuit")]
mod jal_v2;
#[cfg(not(feature = "u16limb_circuit"))]
mod jalr;
#[cfg(feature = "u16limb_circuit")]
mod jalr_v2;
#[cfg(not(feature = "u16limb_circuit"))]
pub use jal::JalInstruction;
#[cfg(feature = "u16limb_circuit")]
pub use jal_v2::JalInstruction;
#[cfg(not(feature = "u16limb_circuit"))]
pub use jalr::JalrInstruction;
#[cfg(feature = "u16limb_circuit")]
pub use jalr_v2::JalrInstruction;
#[cfg(test)]
mod test;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/test_utils.rs | ceno_zkvm/src/instructions/riscv/test_utils.rs | use proptest::{
prelude::any,
prop_oneof,
strategy::{Just, Strategy},
};
fn imm_with_max_valid_bits(imm: i32, bits: u32) -> u32 {
let shift = 32 - bits;
(imm << shift >> shift) as u32
}
#[allow(clippy::cast_sign_loss)]
pub fn u32_extra() -> impl Strategy<Value = u32> {
prop_oneof![
Just(0_u32),
Just(1_u32),
Just(u32::MAX),
any::<u32>(),
Just(i32::MIN as u32),
Just(i32::MAX as u32),
]
}
#[allow(clippy::cast_possible_wrap)]
pub fn i32_extra() -> impl Strategy<Value = i32> {
u32_extra().prop_map(|x| x as i32)
}
pub fn imm_extra(bits: u32) -> impl Strategy<Value = i32> {
i32_extra().prop_map(move |x| imm_with_max_valid_bits(x, bits) as i32)
}
pub fn immu_extra(bits: u32) -> impl Strategy<Value = u32> {
i32_extra().prop_map(move |x| imm_with_max_valid_bits(x, bits))
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/shift_imm.rs | ceno_zkvm/src/instructions/riscv/shift_imm.rs | #[cfg(not(feature = "u16limb_circuit"))]
mod shift_imm_circuit;
use super::RIVInstruction;
use ceno_emul::InsnKind;
#[cfg(feature = "u16limb_circuit")]
use crate::instructions::riscv::shift::shift_circuit_v2::ShiftImmInstruction;
#[cfg(not(feature = "u16limb_circuit"))]
use crate::instructions::riscv::shift_imm::shift_imm_circuit::ShiftImmInstruction;
pub struct SlliOp;
impl RIVInstruction for SlliOp {
const INST_KIND: InsnKind = InsnKind::SLLI;
}
pub type SlliInstruction<E> = ShiftImmInstruction<E, SlliOp>;
pub struct SraiOp;
impl RIVInstruction for SraiOp {
const INST_KIND: ceno_emul::InsnKind = ceno_emul::InsnKind::SRAI;
}
pub type SraiInstruction<E> = ShiftImmInstruction<E, SraiOp>;
pub struct SrliOp;
impl RIVInstruction for SrliOp {
const INST_KIND: ceno_emul::InsnKind = InsnKind::SRLI;
}
pub type SrliInstruction<E> = ShiftImmInstruction<E, SrliOp>;
#[cfg(test)]
mod test {
use ceno_emul::{Change, InsnKind, PC_STEP_SIZE, StepRecord, encode_rv32u};
use ff_ext::{ExtensionField, GoldilocksExt2};
use super::{ShiftImmInstruction, SlliOp, SraiOp, SrliOp};
#[cfg(not(feature = "u16limb_circuit"))]
use crate::Value;
#[cfg(not(feature = "u16limb_circuit"))]
use crate::instructions::riscv::constants::UInt;
#[cfg(feature = "u16limb_circuit")]
use crate::instructions::riscv::constants::UInt8;
#[cfg(feature = "u16limb_circuit")]
use crate::utils::split_to_u8;
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{Instruction, riscv::RIVInstruction},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
#[test]
fn test_opcode_slli() {
let cases = [
// imm = 3
("32 << 3", 32, 3, 32 << 3),
("33 << 3", 33, 3, 33 << 3),
// imm = 31
("32 << 31", 32, 31, 32 << 31),
("33 << 31", 33, 31, 33 << 31),
];
for (name, lhs, imm, expected) in cases {
verify::<GoldilocksExt2, SlliOp>(name, lhs, imm, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SlliOp>(name, lhs, imm, expected);
}
}
#[test]
fn test_opcode_srai() {
let cases = [
// positive rs1
("32 >> 3", 32, 3, 32 >> 3),
("33 >> 3", 33, 3, 33 >> 3),
("32 >> 31", 32, 31, 32 >> 31),
("33 >> 31", 33, 31, 33 >> 31),
// negative rs1
("-32 >> 3", (-32_i32) as u32, 3, (-32_i32 >> 3) as u32),
("-33 >> 3", (-33_i32) as u32, 3, (-33_i32 >> 3) as u32),
("-32 >> 31", (-32_i32) as u32, 31, (-32_i32 >> 31) as u32),
("-33 >> 31", (-33_i32) as u32, 31, (-33_i32 >> 31) as u32),
];
for (name, lhs, imm, expected) in cases {
verify::<GoldilocksExt2, SraiOp>(name, lhs, imm, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SraiOp>(name, lhs, imm, expected);
}
}
#[test]
fn test_opcode_srli() {
let cases = [
// imm = 3
("32 >> 3", 32, 3, 32 >> 3),
("33 >> 3", 33, 3, 33 >> 3),
// imm = 31
("32 >> 31", 32, 31, 32 >> 31),
("33 >> 31", 33, 31, 33 >> 31),
// rs1 top bit is 1
("-32 >> 3", (-32_i32) as u32, 3, ((-32_i32) as u32) >> 3),
];
for (name, lhs, imm, expected) in cases {
verify::<GoldilocksExt2, SrliOp>(name, lhs, imm, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SrliOp>(name, lhs, imm, expected);
}
}
fn verify<E: ExtensionField, I: RIVInstruction>(
name: &'static str,
rs1_read: u32,
imm: u32,
expected_rd_written: u32,
) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let (prefix, insn_code, rd_written) = match I::INST_KIND {
InsnKind::SLLI => (
"SLLI",
encode_rv32u(InsnKind::SLLI, 2, 0, 4, imm),
rs1_read << imm,
),
InsnKind::SRAI => (
"SRAI",
encode_rv32u(InsnKind::SRAI, 2, 0, 4, imm),
(rs1_read as i32 >> imm as i32) as u32,
),
InsnKind::SRLI => (
"SRLI",
encode_rv32u(InsnKind::SRLI, 2, 0, 4, imm),
rs1_read >> imm,
),
_ => unreachable!(),
};
let config = cb
.namespace(
|| format!("{prefix}_({name})"),
|cb| {
let config = ShiftImmInstruction::<E, I>::construct_circuit(
cb,
&ProgramParams::default(),
);
Ok(config)
},
)
.unwrap()
.unwrap();
config
.rd_written
.require_equal(
|| format!("{prefix}_({name})_assert_rd_written"),
&mut cb,
#[cfg(not(feature = "u16limb_circuit"))]
&UInt::from_const_unchecked(
Value::new_unchecked(expected_rd_written)
.as_u16_limbs()
.to_vec(),
),
#[cfg(feature = "u16limb_circuit")]
&UInt8::from_const_unchecked(split_to_u8::<u8>(expected_rd_written)),
)
.unwrap();
let (raw_witin, lkm) = ShiftImmInstruction::<E, I>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_i_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + PC_STEP_SIZE),
insn_code,
rs1_read,
Change::new(0, rd_written),
0,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/b_insn.rs | ceno_zkvm/src/instructions/riscv/b_insn.rs | use ceno_emul::{InsnKind, StepRecord};
use ff_ext::ExtensionField;
use super::constants::PC_STEP_SIZE;
use crate::{
chip_handler::{RegisterExpr, general::InstFetch},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::riscv::insn_base::{ReadRS1, ReadRS2, StateInOut},
tables::InsnRecord,
witness::{LkMultiplicity, set_val},
};
use ff_ext::FieldInto;
use multilinear_extensions::{Expression, ToExpr, WitIn};
// Opcode: 1100011
// Funct3:
// 000 BEQ
// 001 BNE
// 100 BLT
// 101 BGE
// 110 BLTU
// 111 BGEU
//
/// This config handles the common part of B-type instructions (branches):
/// - PC, cycle, fetch.
/// - Registers read.
/// - Jump based on the immediate and the given `branch_taken_bit`.
///
/// It does _not_ range-check the `branch_taken_bit`.
/// It does not witness of the register values, nor the actual function (e.g. BNE).
#[derive(Debug)]
pub struct BInstructionConfig<E: ExtensionField> {
pub vm_state: StateInOut<E>,
pub rs1: ReadRS1<E>,
pub rs2: ReadRS2<E>,
pub imm: WitIn,
}
impl<E: ExtensionField> BInstructionConfig<E> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
rs1_read: RegisterExpr<E>,
rs2_read: RegisterExpr<E>,
branch_taken_bit: Expression<E>,
) -> Result<Self, ZKVMError> {
// State in and out
let vm_state = StateInOut::construct_circuit(circuit_builder, true)?;
// Registers
let rs1 = ReadRS1::construct_circuit(circuit_builder, rs1_read, vm_state.ts)?;
let rs2 = ReadRS2::construct_circuit(circuit_builder, rs2_read, vm_state.ts)?;
// Immediate
let imm = circuit_builder.create_witin(|| "imm");
// Fetch instruction
circuit_builder.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
insn_kind.into(),
None,
rs1.id.expr(),
rs2.id.expr(),
imm.expr(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
// Branch program counter
let pc_offset =
branch_taken_bit.clone() * imm.expr() - branch_taken_bit * PC_STEP_SIZE + PC_STEP_SIZE;
let next_pc = vm_state.next_pc.unwrap();
circuit_builder.require_equal(
|| "pc_branch",
next_pc.expr(),
vm_state.pc.expr() + pc_offset,
)?;
Ok(BInstructionConfig {
vm_state,
rs1,
rs2,
imm,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
self.vm_state.assign_instance(instance, shard_ctx, step)?;
self.rs1
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
self.rs2
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
// Immediate
set_val!(
instance,
self.imm,
InsnRecord::<E::BaseField>::imm_internal(&step.insn()).1
);
// Fetch the instruction.
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/div.rs | ceno_zkvm/src/instructions/riscv/div.rs | use ceno_emul::InsnKind;
#[cfg(not(feature = "u16limb_circuit"))]
mod div_circuit;
#[cfg(feature = "u16limb_circuit")]
mod div_circuit_v2;
use super::RIVInstruction;
pub struct DivuOp;
impl RIVInstruction for DivuOp {
const INST_KIND: InsnKind = InsnKind::DIVU;
}
#[cfg(feature = "u16limb_circuit")]
pub type DivuInstruction<E> = div_circuit_v2::ArithInstruction<E, DivuOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type DivuInstruction<E> = div_circuit::ArithInstruction<E, DivuOp>;
pub struct RemuOp;
impl RIVInstruction for RemuOp {
const INST_KIND: InsnKind = InsnKind::REMU;
}
#[cfg(feature = "u16limb_circuit")]
pub type RemuInstruction<E> = div_circuit_v2::ArithInstruction<E, RemuOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type RemuInstruction<E> = div_circuit::ArithInstruction<E, RemuOp>;
pub struct RemOp;
impl RIVInstruction for RemOp {
const INST_KIND: InsnKind = InsnKind::REM;
}
#[cfg(feature = "u16limb_circuit")]
pub type RemInstruction<E> = div_circuit_v2::ArithInstruction<E, RemOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type RemInstruction<E> = div_circuit::ArithInstruction<E, RemOp>;
pub struct DivOp;
impl RIVInstruction for DivOp {
const INST_KIND: InsnKind = InsnKind::DIV;
}
#[cfg(feature = "u16limb_circuit")]
pub type DivInstruction<E> = div_circuit_v2::ArithInstruction<E, DivOp>;
#[cfg(not(feature = "u16limb_circuit"))]
pub type DivInstruction<E> = div_circuit::ArithInstruction<E, DivOp>;
#[cfg(test)]
mod test {
#[cfg(not(feature = "u16limb_circuit"))]
use super::div_circuit::DivRemConfig;
#[cfg(feature = "u16limb_circuit")]
use super::div_circuit_v2::DivRemConfig;
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{
Instruction,
riscv::{
constants::UInt,
div::{DivInstruction, DivuInstruction, RemInstruction, RemuInstruction},
},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
use ceno_emul::{Change, InsnKind, StepRecord, encode_rv32};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4 as BE;
use ff_ext::{ExtensionField, GoldilocksExt2 as GE};
use itertools::Itertools;
use rand::RngCore;
// unifies DIV/REM/DIVU/REMU interface for testing purposes
trait TestInstance<E: ExtensionField>
where
Self: Instruction<E>,
{
// type the instruction works with (i32 or u32)
type NumType: Copy;
// conv to register necessary due to lack of native "as" trait
fn as_u32(val: Self::NumType) -> u32;
// designates output value of the circuit that is under scrutiny
fn output(config: Self::InstructionConfig) -> UInt<E>;
// the correct/expected value for given parameters
fn correct(dividend: Self::NumType, divisor: Self::NumType) -> Self::NumType;
const INSN_KIND: InsnKind;
}
impl<E: ExtensionField> TestInstance<E> for DivInstruction<E> {
type NumType = i32;
fn as_u32(val: Self::NumType) -> u32 {
val as u32
}
fn output(config: DivRemConfig<E>) -> UInt<E> {
config.quotient
}
fn correct(dividend: i32, divisor: i32) -> i32 {
if divisor == 0 {
-1i32
} else {
dividend.wrapping_div(divisor)
}
}
const INSN_KIND: InsnKind = InsnKind::DIV;
}
impl<E: ExtensionField> TestInstance<E> for RemInstruction<E> {
type NumType = i32;
fn as_u32(val: Self::NumType) -> u32 {
val as u32
}
fn output(config: DivRemConfig<E>) -> UInt<E> {
config.remainder
}
fn correct(dividend: i32, divisor: i32) -> i32 {
if divisor == 0 {
dividend
} else {
dividend.wrapping_rem(divisor)
}
}
const INSN_KIND: InsnKind = InsnKind::REM;
}
impl<E: ExtensionField> TestInstance<E> for DivuInstruction<E> {
type NumType = u32;
fn as_u32(val: Self::NumType) -> u32 {
val
}
fn output(config: DivRemConfig<E>) -> UInt<E> {
config.quotient
}
fn correct(dividend: u32, divisor: u32) -> u32 {
if divisor == 0 {
u32::MAX
} else {
dividend / divisor
}
}
const INSN_KIND: InsnKind = InsnKind::DIVU;
}
impl<E: ExtensionField> TestInstance<E> for RemuInstruction<E> {
type NumType = u32;
fn as_u32(val: Self::NumType) -> u32 {
val
}
fn output(config: DivRemConfig<E>) -> UInt<E> {
config.remainder
}
fn correct(dividend: u32, divisor: u32) -> u32 {
if divisor == 0 {
dividend
} else {
dividend % divisor
}
}
const INSN_KIND: InsnKind = InsnKind::REMU;
}
fn verify<E: ExtensionField, Insn: Instruction<E> + TestInstance<E>>(
name: &str,
dividend: <Insn as TestInstance<E>>::NumType,
divisor: <Insn as TestInstance<E>>::NumType,
exp_outcome: <Insn as TestInstance<E>>::NumType,
is_ok: bool,
) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| format!("{}_({})", Insn::name(), name),
|cb| Ok(Insn::construct_circuit(cb, &ProgramParams::default())),
)
.unwrap()
.unwrap();
let outcome = Insn::correct(dividend, divisor);
let insn_code = encode_rv32(Insn::INSN_KIND, 2, 3, 4, 0);
// values assignment
let ([raw_witin, _], lkm) = Insn::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
Insn::as_u32(dividend),
Insn::as_u32(divisor),
Change::new(0, Insn::as_u32(outcome)),
0,
)],
)
.unwrap();
let expected_rd_written = UInt::from_const_unchecked(
Value::new_unchecked(Insn::as_u32(exp_outcome))
.as_u16_limbs()
.to_vec(),
);
Insn::output(config)
.require_equal(|| "assert_outcome", &mut cb, &expected_rd_written)
.unwrap();
let expected_errors: &[_] = if is_ok { &[] } else { &[name] };
MockProver::assert_with_expected_errors(
&cb,
&[],
&raw_witin
.to_mles()
.into_iter()
.map(|v| v.into())
.collect_vec(),
&[],
&[insn_code],
expected_errors,
None,
Some(lkm),
);
}
// shortcut to verify given pair produces correct output
fn verify_positive<E: ExtensionField, Insn: Instruction<E> + TestInstance<E>>(
name: &str,
dividend: <Insn as TestInstance<E>>::NumType,
divisor: <Insn as TestInstance<E>>::NumType,
) {
verify::<E, Insn>(
name,
dividend,
divisor,
Insn::correct(dividend, divisor),
true,
);
}
// Test unsigned opcodes
type DivuG = DivuInstruction<GE>;
type RemuG = RemuInstruction<GE>;
#[cfg(feature = "u16limb_circuit")]
type DivuB = DivuInstruction<BE>;
#[cfg(feature = "u16limb_circuit")]
type RemuB = RemuInstruction<BE>;
#[test]
fn test_divrem_unsigned_handmade() {
let test_cases = [
("10 / 2", 10, 2),
("10 / 11", 10, 11),
("11 / 2", 11, 2),
("large values 1", 1234 * 5678 * 100, 1234),
("large values 2", 1202729773, 171818539),
];
for (name, dividend, divisor) in test_cases.into_iter() {
verify_positive::<GE, DivuG>(name, dividend, divisor);
verify_positive::<GE, RemuG>(name, dividend, divisor);
#[cfg(feature = "u16limb_circuit")]
{
verify_positive::<BE, DivuB>(name, dividend, divisor);
verify_positive::<BE, RemuB>(name, dividend, divisor);
}
}
}
#[test]
fn test_divrem_unsigned_edges() {
let interesting_values = [u32::MAX, u32::MAX - 1, 0, 1, 2];
for dividend in interesting_values {
for divisor in interesting_values {
let name = format!("dividend = {}, divisor = {}", dividend, divisor);
verify_positive::<GE, DivuG>(&name, dividend, divisor);
verify_positive::<GE, RemuG>(&name, dividend, divisor);
#[cfg(feature = "u16limb_circuit")]
{
verify_positive::<BE, DivuB>(&name, dividend, divisor);
verify_positive::<BE, RemuB>(&name, dividend, divisor);
}
}
}
}
#[test]
fn test_divrem_unsigned_unsatisfied() {
verify::<GE, DivuG>("assert_outcome", 10, 2, 3, false);
}
#[test]
fn test_divrem_unsigned_random() {
for _ in 0..10 {
let mut rng = rand::thread_rng();
let dividend: u32 = rng.next_u32();
let divisor: u32 = rng.next_u32();
let name = format!("random: dividend = {}, divisor = {}", dividend, divisor);
verify_positive::<GE, DivuG>(&name, dividend, divisor);
verify_positive::<GE, RemuG>(&name, dividend, divisor);
#[cfg(feature = "u16limb_circuit")]
{
verify_positive::<BE, DivuB>(&name, dividend, divisor);
verify_positive::<BE, RemuB>(&name, dividend, divisor);
}
}
}
// Test signed opcodes
type DivG = DivInstruction<GE>;
type RemG = RemInstruction<GE>;
#[cfg(feature = "u16limb_circuit")]
type DivB = DivInstruction<BE>;
#[cfg(feature = "u16limb_circuit")]
type RemB = RemInstruction<BE>;
#[test]
fn test_divrem_signed_handmade() {
let test_cases = [
("10 / 2", 10, 2),
("10 / 11", 10, 11),
("11 / 2", 11, 2),
("-10 / 3", -10, 3),
("-10 / -3", -10, -3),
("large values 1", -1234 * 5678 * 100, 5678),
("large values 2", 1234 * 5678 * 100, 1234),
("large values 3", 1202729773, 171818539),
];
for (name, dividend, divisor) in test_cases.into_iter() {
verify_positive::<GE, DivG>(name, dividend, divisor);
verify_positive::<GE, RemG>(name, dividend, divisor);
#[cfg(feature = "u16limb_circuit")]
{
verify_positive::<BE, DivB>(name, dividend, divisor);
verify_positive::<BE, RemB>(name, dividend, divisor);
}
}
}
#[test]
fn test_divrem_signed_edges() {
let interesting_values = [i32::MIN, i32::MAX, i32::MIN + 1, i32::MAX - 1, 0, -1, 1, 2];
for dividend in interesting_values {
for divisor in interesting_values {
let name = format!("dividend = {}, divisor = {}", dividend, divisor);
verify_positive::<GE, DivG>(&name, dividend, divisor);
verify_positive::<GE, RemG>(&name, dividend, divisor);
#[cfg(feature = "u16limb_circuit")]
{
verify_positive::<BE, DivB>(&name, dividend, divisor);
verify_positive::<BE, RemB>(&name, dividend, divisor);
}
}
}
}
#[test]
fn test_divrem_signed_random() {
for _ in 0..10 {
let mut rng = rand::thread_rng();
let dividend: i32 = rng.next_u32() as i32;
let divisor: i32 = rng.next_u32() as i32;
let name = format!("random: dividend = {}, divisor = {}", dividend, divisor);
verify_positive::<GE, DivG>(&name, dividend, divisor);
verify_positive::<GE, RemG>(&name, dividend, divisor);
#[cfg(feature = "u16limb_circuit")]
{
verify_positive::<BE, DivB>(&name, dividend, divisor);
verify_positive::<BE, RemB>(&name, dividend, divisor);
}
}
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/shift.rs | ceno_zkvm/src/instructions/riscv/shift.rs | #[cfg(not(feature = "u16limb_circuit"))]
pub mod shift_circuit;
#[cfg(feature = "u16limb_circuit")]
pub mod shift_circuit_v2;
use ceno_emul::InsnKind;
use super::RIVInstruction;
#[cfg(not(feature = "u16limb_circuit"))]
use crate::instructions::riscv::shift::shift_circuit::ShiftLogicalInstruction;
#[cfg(feature = "u16limb_circuit")]
use crate::instructions::riscv::shift::shift_circuit_v2::ShiftLogicalInstruction;
pub struct SllOp;
impl RIVInstruction for SllOp {
const INST_KIND: InsnKind = InsnKind::SLL;
}
pub type SllInstruction<E> = ShiftLogicalInstruction<E, SllOp>;
pub struct SrlOp;
impl RIVInstruction for SrlOp {
const INST_KIND: InsnKind = InsnKind::SRL;
}
pub type SrlInstruction<E> = ShiftLogicalInstruction<E, SrlOp>;
pub struct SraOp;
impl RIVInstruction for SraOp {
const INST_KIND: InsnKind = InsnKind::SRA;
}
pub type SraInstruction<E> = ShiftLogicalInstruction<E, SraOp>;
#[cfg(test)]
mod tests {
use ceno_emul::{Change, InsnKind, StepRecord, encode_rv32};
use ff_ext::{ExtensionField, GoldilocksExt2};
use super::{ShiftLogicalInstruction, SllOp, SraOp, SrlOp};
#[cfg(not(feature = "u16limb_circuit"))]
use crate::Value;
#[cfg(not(feature = "u16limb_circuit"))]
use crate::instructions::riscv::constants::UInt;
#[cfg(feature = "u16limb_circuit")]
use crate::instructions::riscv::constants::UInt8;
#[cfg(feature = "u16limb_circuit")]
use crate::utils::split_to_u8;
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{Instruction, riscv::RIVInstruction},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
#[test]
fn test_opcode_sll() {
let cases = [
("basic 1", 32, 3, 32 << 3),
("basic 2", 0b_0001, 3, 0b_1000),
// 33 << 33 === 33 << 1
("rs2 over 5-bits", 0b_0001, 33, 0b_0010),
("bit loss", (1 << 31) | 1, 1, 0b_0010),
("zero shift", 0b_0001, 0, 0b_0001),
("all zeros", 0b_0000, 0, 0b_0000),
("base is zero", 0b_0000, 1, 0b_0000),
];
for (name, lhs, rhs, expected) in cases {
verify::<GoldilocksExt2, SllOp>(name, lhs, rhs, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SllOp>(name, lhs, rhs, expected);
}
}
#[test]
fn test_opcode_srl() {
let cases = [
("basic", 0b_1000, 3, 0b_0001),
// 33 >> 33 === 33 >> 1
("rs2 over 5-bits", 0b_1010, 33, 0b_0101),
("bit loss", 0b_1001, 1, 0b_0100),
("zero shift", 0b_1000, 0, 0b_1000),
("all zeros", 0b_0000, 0, 0b_0000),
("base is zero", 0b_0000, 1, 0b_0000),
];
for (name, lhs, rhs, expected) in cases {
verify::<GoldilocksExt2, SrlOp>(name, lhs, rhs, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SrlOp>(name, lhs, rhs, expected);
}
}
#[test]
fn test_opcode_sra() {
let cases = [
// positive rs1
("32 >> 3", 32, 3, 32 >> 3),
("33 >> 3", 33, 3, 33 >> 3),
("32 >> 31", 32, 31, 32 >> 31),
("33 >> 31", 33, 31, 33 >> 31),
// negative rs1
("-32 >> 3", (-32_i32) as u32, 3, (-32_i32 >> 3) as u32),
("-33 >> 3", (-33_i32) as u32, 3, (-33_i32 >> 3) as u32),
("-32 >> 31", (-32_i32) as u32, 31, (-32_i32 >> 31) as u32),
("-33 >> 31", (-33_i32) as u32, 31, (-33_i32 >> 31) as u32),
];
for (name, lhs, rhs, expected) in cases {
verify::<GoldilocksExt2, SraOp>(name, lhs, rhs, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<BabyBearExt4, SraOp>(name, lhs, rhs, expected);
}
}
fn verify<E: ExtensionField, I: RIVInstruction>(
name: &'static str,
rs1_read: u32,
rs2_read: u32,
expected_rd_written: u32,
) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let shift = rs2_read & 0b11111;
let (prefix, insn_code, rd_written) = match I::INST_KIND {
InsnKind::SLL => (
"SLL",
encode_rv32(InsnKind::SLL, 2, 3, 4, 0),
rs1_read << shift,
),
InsnKind::SRL => (
"SRL",
encode_rv32(InsnKind::SRL, 2, 3, 4, 0),
rs1_read >> shift,
),
InsnKind::SRA => (
"SRA",
encode_rv32(InsnKind::SRA, 2, 3, 4, 0),
(rs1_read as i32 >> shift) as u32,
),
_ => unreachable!(),
};
let config = cb
.namespace(
|| format!("{prefix}_({name})"),
|cb| {
Ok(ShiftLogicalInstruction::<E, I>::construct_circuit(
cb,
&ProgramParams::default(),
))
},
)
.unwrap()
.unwrap();
config
.rd_written
.require_equal(
|| format!("{prefix}_({name})_assert_rd_written"),
&mut cb,
#[cfg(not(feature = "u16limb_circuit"))]
&UInt::from_const_unchecked(
Value::new_unchecked(expected_rd_written)
.as_u16_limbs()
.to_vec(),
),
#[cfg(feature = "u16limb_circuit")]
&UInt8::from_const_unchecked(split_to_u8::<u8>(expected_rd_written)),
)
.unwrap();
let (raw_witin, lkm) = ShiftLogicalInstruction::<E, I>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
rs1_read,
rs2_read,
Change::new(0, rd_written),
0,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/lui.rs | ceno_zkvm/src/instructions/riscv/lui.rs | use ff_ext::{ExtensionField, FieldInto};
use itertools::{Itertools, izip};
use std::marker::PhantomData;
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{UINT_BYTE_LIMBS, UInt8},
i_insn::IInstructionConfig,
},
},
structs::ProgramParams,
tables::InsnRecord,
utils::split_to_u8,
witness::LkMultiplicity,
};
use ceno_emul::InsnKind;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::FieldAlgebra;
use witness::set_val;
pub struct LuiConfig<E: ExtensionField> {
pub i_insn: IInstructionConfig<E>,
pub imm: WitIn,
// for rd, we skip lsb byte as it's always zero
pub rd_written: [WitIn; UINT_BYTE_LIMBS - 1],
}
pub struct LuiInstruction<E>(PhantomData<E>);
impl<E: ExtensionField> Instruction<E> for LuiInstruction<E> {
type InstructionConfig = LuiConfig<E>;
fn name() -> String {
format!("{:?}", InsnKind::LUI)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<LuiConfig<E>, ZKVMError> {
let rd_written = std::array::from_fn(|i| {
circuit_builder
.create_u8(|| format!("rd_written_limb_{}", i))
.unwrap()
});
// rd lsb byte is always zero
let rd_exprs = std::iter::once(0.into())
.chain(rd_written.map(|w| w.expr()))
.collect_vec();
let imm = circuit_builder.create_witin(|| "imm");
let i_insn = IInstructionConfig::<E>::construct_circuit(
circuit_builder,
InsnKind::LUI,
imm.expr(),
0.into(),
[0.into(), 0.into()],
UInt8::from_exprs_unchecked(rd_exprs.clone()).register_expr(),
false,
)?;
let intermed_val =
rd_exprs
.iter()
.skip(1)
.enumerate()
.fold(Expression::ZERO, |acc, (i, val)| {
acc + val.expr()
* E::BaseField::from_canonical_u32(1 << (i * UInt8::<E>::LIMB_BITS)).expr()
});
// imm * 2^4 is the correct composition of intermed_val in case of LUI
circuit_builder.require_equal(
|| "imm * 2^4 is the correct composition of intermed_val in case of LUI",
intermed_val.expr(),
imm.expr() * E::BaseField::from_canonical_u32(1 << (12 - UInt8::<E>::LIMB_BITS)).expr(),
)?;
Ok(LuiConfig {
i_insn,
imm,
rd_written,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), ZKVMError> {
config
.i_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rd_written = split_to_u8(step.rd().unwrap().value.after);
for (val, witin) in izip!(rd_written.iter().skip(1), config.rd_written) {
lk_multiplicity.assert_ux::<8>(*val as u64);
set_val!(instance, witin, E::BaseField::from_canonical_u8(*val));
}
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn()).0 as u64;
set_val!(instance, config.imm, imm);
Ok(())
}
}
#[cfg(test)]
mod tests {
use ceno_emul::{Change, InsnKind, PC_STEP_SIZE, StepRecord, encode_rv32};
use ff_ext::{BabyBearExt4, ExtensionField, GoldilocksExt2};
use gkr_iop::circuit_builder::DebugIndex;
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{
Instruction,
riscv::{constants::UInt, lui::LuiInstruction},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
#[test]
fn test_lui() {
let cases = vec![0, 0x1, 0xfffff];
for imm in &cases {
test_opcode_lui::<GoldilocksExt2>((*imm as u32) << 12, imm << 12);
#[cfg(feature = "u16limb_circuit")]
test_opcode_lui::<BabyBearExt4>((*imm as u32) << 12, imm << 12);
}
}
fn test_opcode_lui<E: ExtensionField>(rd: u32, imm: i32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "lui",
|cb| {
let config =
LuiInstruction::<E>::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::LUI, 0, 0, 4, imm);
let (raw_witin, lkm) = LuiInstruction::<E>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_i_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + PC_STEP_SIZE),
insn_code,
0,
Change::new(0, rd),
0,
)],
)
.unwrap();
// verify rd_written
let expected_rd_written =
UInt::from_const_unchecked(Value::new_unchecked(rd).as_u16_limbs().to_vec());
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/i_insn.rs | ceno_zkvm/src/instructions/riscv/i_insn.rs | use ceno_emul::{InsnKind, StepRecord};
use ff_ext::ExtensionField;
use crate::{
chip_handler::{RegisterExpr, general::InstFetch},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::riscv::insn_base::{ReadRS1, StateInOut, WriteRD},
tables::InsnRecord,
witness::LkMultiplicity,
};
use multilinear_extensions::{Expression, ToExpr};
/// This config handles the common part of I-type instructions:
/// - PC, cycle, fetch.
/// - Registers read and write.
///
/// It does not witness of the register values, nor the actual function (e.g. srli, addi, etc).
#[derive(Debug)]
pub struct IInstructionConfig<E: ExtensionField> {
pub vm_state: StateInOut<E>,
pub rs1: ReadRS1<E>,
pub rd: WriteRD<E>,
}
impl<E: ExtensionField> IInstructionConfig<E> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
imm: Expression<E>,
#[cfg(feature = "u16limb_circuit")] imm_sign: Expression<E>,
rs1_read: RegisterExpr<E>,
rd_written: RegisterExpr<E>,
branching: bool,
) -> Result<Self, ZKVMError> {
// State in and out
let vm_state = StateInOut::construct_circuit(circuit_builder, branching)?;
// Registers
let rs1 = ReadRS1::construct_circuit(circuit_builder, rs1_read, vm_state.ts)?;
let rd = WriteRD::construct_circuit(circuit_builder, rd_written, vm_state.ts)?;
// TODO make imm representation consistent between instruction types
// Fetch the instruction.
circuit_builder.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
insn_kind.into(),
Some(rd.id.expr()),
rs1.id.expr(),
0.into(),
imm.clone(),
#[cfg(feature = "u16limb_circuit")]
imm_sign,
))?;
Ok(IInstructionConfig { vm_state, rs1, rd })
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
self.vm_state.assign_instance(instance, shard_ctx, step)?;
self.rs1
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
self.rd
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
// Fetch instruction
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/arith_imm.rs | ceno_zkvm/src/instructions/riscv/arith_imm.rs | #[cfg(not(feature = "u16limb_circuit"))]
mod arith_imm_circuit;
#[cfg(feature = "u16limb_circuit")]
mod arith_imm_circuit_v2;
#[cfg(feature = "u16limb_circuit")]
pub use crate::instructions::riscv::arith_imm::arith_imm_circuit_v2::AddiInstruction;
#[cfg(not(feature = "u16limb_circuit"))]
pub use crate::instructions::riscv::arith_imm::arith_imm_circuit::AddiInstruction;
use super::RIVInstruction;
impl<E> RIVInstruction for AddiInstruction<E> {
const INST_KIND: ceno_emul::InsnKind = ceno_emul::InsnKind::ADDI;
}
#[cfg(test)]
mod test {
use super::AddiInstruction;
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{Instruction, riscv::constants::UInt},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
use ceno_emul::{Change, InsnKind, PC_STEP_SIZE, StepRecord, encode_rv32};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
use ff_ext::{ExtensionField, GoldilocksExt2};
use gkr_iop::circuit_builder::DebugIndex;
#[test]
fn test_opcode_addi() {
let cases = vec![
(1000, 1003, 3), // positive immediate
(1000, 997, -3), // negative immediate
];
for &(rs1, expected, imm) in &cases {
test_opcode_addi_internal::<GoldilocksExt2>(rs1, expected, imm);
#[cfg(feature = "u16limb_circuit")]
test_opcode_addi_internal::<BabyBearExt4>(rs1, expected, imm);
}
}
fn test_opcode_addi_internal<E: ExtensionField>(rs1: u32, rd: u32, imm: i32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "addi",
|cb| {
let config =
AddiInstruction::<E>::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::ADDI, 2, 0, 4, imm);
let (raw_witin, lkm) = AddiInstruction::<E>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_i_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + PC_STEP_SIZE),
insn_code,
rs1,
Change::new(0, rd),
0,
)],
)
.unwrap();
// verify rd_written
let expected_rd_written =
UInt::from_const_unchecked(Value::new_unchecked(rd).as_u16_limbs().to_vec());
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/s_insn.rs | ceno_zkvm/src/instructions/riscv/s_insn.rs | use crate::{
chip_handler::{AddressExpr, MemoryExpr, RegisterExpr, general::InstFetch},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::riscv::insn_base::{ReadRS1, ReadRS2, StateInOut, WriteMEM},
tables::InsnRecord,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::ExtensionField;
use multilinear_extensions::{Expression, ToExpr};
/// This config handles the common part of S-type instructions:
/// - PC, cycle, fetch.
/// - Registers reads.
/// - Memory write
pub struct SInstructionConfig<E: ExtensionField> {
vm_state: StateInOut<E>,
rs1: ReadRS1<E>,
rs2: ReadRS2<E>,
mem_write: WriteMEM,
}
impl<E: ExtensionField> SInstructionConfig<E> {
#[allow(clippy::too_many_arguments)]
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
imm: &Expression<E>,
#[cfg(feature = "u16limb_circuit")] imm_sign: &Expression<E>,
rs1_read: RegisterExpr<E>,
rs2_read: RegisterExpr<E>,
memory_addr: AddressExpr<E>,
prev_memory_value: MemoryExpr<E>,
new_memory_value: MemoryExpr<E>,
) -> Result<Self, ZKVMError> {
// State in and out
let vm_state = StateInOut::construct_circuit(circuit_builder, false)?;
// Registers
let rs1 = ReadRS1::construct_circuit(circuit_builder, rs1_read, vm_state.ts)?;
let rs2 = ReadRS2::construct_circuit(circuit_builder, rs2_read, vm_state.ts)?;
// Fetch instruction
circuit_builder.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
insn_kind.into(),
None,
rs1.id.expr(),
rs2.id.expr(),
imm.clone(),
#[cfg(feature = "u16limb_circuit")]
imm_sign.expr(),
))?;
// Memory
let mem_write = WriteMEM::construct_circuit(
circuit_builder,
memory_addr,
prev_memory_value,
new_memory_value,
vm_state.ts,
)?;
Ok(SInstructionConfig {
vm_state,
rs1,
rs2,
mem_write,
})
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
self.vm_state.assign_instance(instance, shard_ctx, step)?;
self.rs1
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
self.rs2
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
self.mem_write
.assign_instance::<E>(instance, shard_ctx, lk_multiplicity, step)?;
// Fetch instruction
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/j_insn.rs | ceno_zkvm/src/instructions/riscv/j_insn.rs | use ceno_emul::{InsnKind, StepRecord};
use ff_ext::ExtensionField;
use crate::{
chip_handler::{RegisterExpr, general::InstFetch},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::riscv::insn_base::{StateInOut, WriteRD},
tables::InsnRecord,
witness::LkMultiplicity,
};
use multilinear_extensions::ToExpr;
// Opcode: 1101111
/// This config handles the common part of the J-type instruction (JAL):
/// - PC, cycle, fetch
/// - Register access
///
/// It does not witness the output rd value produced by the JAL opcode, but
/// does constrain next_pc = pc + imm using the instruction table lookup
#[derive(Debug)]
pub struct JInstructionConfig<E: ExtensionField> {
pub vm_state: StateInOut<E>,
pub rd: WriteRD<E>,
}
impl<E: ExtensionField> JInstructionConfig<E> {
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
rd_written: RegisterExpr<E>,
) -> Result<Self, ZKVMError> {
// State in and out
let vm_state = StateInOut::construct_circuit(circuit_builder, true)?;
// Registers
let rd = WriteRD::construct_circuit(circuit_builder, rd_written, vm_state.ts)?;
// Fetch instruction
circuit_builder.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
insn_kind.into(),
Some(rd.id.expr()),
0.into(),
0.into(),
vm_state.next_pc.unwrap().expr() - vm_state.pc.expr(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
Ok(JInstructionConfig { vm_state, rd })
}
pub fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
self.vm_state.assign_instance(instance, shard_ctx, step)?;
self.rd
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
// Fetch the instruction.
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/mulh.rs | ceno_zkvm/src/instructions/riscv/mulh.rs | use crate::instructions::riscv::RIVInstruction;
use ceno_emul::InsnKind;
#[cfg(not(feature = "u16limb_circuit"))]
mod mulh_circuit;
#[cfg(feature = "u16limb_circuit")]
mod mulh_circuit_v2;
#[cfg(not(feature = "u16limb_circuit"))]
use mulh_circuit::MulhInstructionBase;
#[cfg(feature = "u16limb_circuit")]
use mulh_circuit_v2::MulhInstructionBase;
pub struct MulOp;
impl RIVInstruction for MulOp {
const INST_KIND: InsnKind = InsnKind::MUL;
}
pub type MulInstruction<E> = MulhInstructionBase<E, MulOp>;
pub struct MulhOp;
impl RIVInstruction for MulhOp {
const INST_KIND: InsnKind = InsnKind::MULH;
}
pub type MulhInstruction<E> = MulhInstructionBase<E, MulhOp>;
pub struct MulhuOp;
impl RIVInstruction for MulhuOp {
const INST_KIND: InsnKind = InsnKind::MULHU;
}
pub type MulhuInstruction<E> = MulhInstructionBase<E, MulhuOp>;
pub struct MulhsuOp;
impl RIVInstruction for MulhsuOp {
const INST_KIND: InsnKind = InsnKind::MULHSU;
}
pub type MulhsuInstruction<E> = MulhInstructionBase<E, MulhsuOp>;
#[cfg(test)]
mod test {
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::UInt,
mulh::{MulOp, MulhInstruction, MulhsuInstruction, MulhuOp},
},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
witness::LkMultiplicity,
};
use ceno_emul::{Change, InsnKind, StepRecord, encode_rv32};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
use ff_ext::{ExtensionField, GoldilocksExt2};
use gkr_iop::circuit_builder::DebugIndex;
#[test]
fn test_opcode_mul() {
verify_mulu::<MulOp, GoldilocksExt2>("basic", 2, 11);
verify_mulu::<MulOp, GoldilocksExt2>("2 * 0", 2, 0);
verify_mulu::<MulOp, GoldilocksExt2>("0 * 0", 0, 0);
verify_mulu::<MulOp, GoldilocksExt2>("0 * 2", 0, 2);
verify_mulu::<MulOp, GoldilocksExt2>("0 * u32::MAX", 0, u32::MAX);
verify_mulu::<MulOp, GoldilocksExt2>("u32::MAX", u32::MAX, u32::MAX);
verify_mulu::<MulOp, GoldilocksExt2>("u16::MAX", u16::MAX as u32, u16::MAX as u32);
#[cfg(feature = "u16limb_circuit")]
{
verify_mulu::<MulOp, BabyBearExt4>("basic", 2, 11);
verify_mulu::<MulOp, BabyBearExt4>("2 * 0", 2, 0);
verify_mulu::<MulOp, BabyBearExt4>("0 * 0", 0, 0);
verify_mulu::<MulOp, BabyBearExt4>("0 * 2", 0, 2);
verify_mulu::<MulOp, BabyBearExt4>("0 * u32::MAX", 0, u32::MAX);
verify_mulu::<MulOp, BabyBearExt4>("u32::MAX", u32::MAX, u32::MAX);
verify_mulu::<MulOp, BabyBearExt4>("u16::MAX", u16::MAX as u32, u16::MAX as u32);
}
}
#[test]
fn test_opcode_mulhu() {
verify_mulu::<MulhuOp, GoldilocksExt2>("basic", 2, 11);
verify_mulu::<MulhuOp, GoldilocksExt2>("2 * 0", 2, 0);
verify_mulu::<MulhuOp, GoldilocksExt2>("0 * 0", 0, 0);
verify_mulu::<MulhuOp, GoldilocksExt2>("0 * 2", 0, 2);
verify_mulu::<MulhuOp, GoldilocksExt2>("0 * u32::MAX", 0, u32::MAX);
verify_mulu::<MulhuOp, GoldilocksExt2>("u32::MAX", u32::MAX, u32::MAX);
verify_mulu::<MulhuOp, GoldilocksExt2>("u16::MAX", u16::MAX as u32, u16::MAX as u32);
#[cfg(feature = "u16limb_circuit")]
{
verify_mulu::<MulhuOp, BabyBearExt4>("basic", 2, 11);
verify_mulu::<MulhuOp, BabyBearExt4>("2 * 0", 2, 0);
verify_mulu::<MulhuOp, BabyBearExt4>("0 * 0", 0, 0);
verify_mulu::<MulhuOp, BabyBearExt4>("0 * 2", 0, 2);
verify_mulu::<MulhuOp, BabyBearExt4>("0 * u32::MAX", 0, u32::MAX);
verify_mulu::<MulhuOp, BabyBearExt4>("u32::MAX", u32::MAX, u32::MAX);
verify_mulu::<MulhuOp, BabyBearExt4>("u16::MAX", u16::MAX as u32, u16::MAX as u32);
}
}
fn verify_mulu<I: RIVInstruction, E: ExtensionField>(name: &'static str, rs1: u32, rs2: u32) {
#[cfg(not(feature = "u16limb_circuit"))]
use super::mulh_circuit::MulhInstructionBase;
#[cfg(feature = "u16limb_circuit")]
use super::mulh_circuit_v2::MulhInstructionBase;
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| format!("{:?}_({name})", I::INST_KIND),
|cb| {
Ok(MulhInstructionBase::<E, I>::construct_circuit(
cb,
&ProgramParams::default(),
))
},
)
.unwrap()
.unwrap();
let outcome = match I::INST_KIND {
InsnKind::MUL => rs1.wrapping_mul(rs2),
InsnKind::MULHU => {
let a = Value::<'_, u32>::new_unchecked(rs1);
let b = Value::<'_, u32>::new_unchecked(rs2);
let value_mul = a.mul_hi(&b, &mut LkMultiplicity::default(), true);
value_mul.as_hi_value::<u32>().as_u32()
}
_ => unreachable!("Unsupported instruction kind"),
};
// values assignment
let insn_code = encode_rv32(I::INST_KIND, 2, 3, 4, 0);
let (raw_witin, lkm) = MulhInstructionBase::<E, I>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
rs1,
rs2,
Change::new(0, outcome),
0,
)],
)
.unwrap();
// verify value write to register, which is only hi
let expected_rd_written =
UInt::from_const_unchecked(Value::new_unchecked(outcome).as_u16_limbs().to_vec());
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_opcode_mulh() {
let test_cases = [
(2, 11),
(7, 0),
(0, 5),
(0, -3),
(-19, 0),
(0, 0),
(-12, -31),
(2, -1),
(1, i32::MIN),
(i32::MAX, -1),
(i32::MAX, i32::MIN),
(i32::MAX, i32::MAX),
(i32::MIN, i32::MIN),
];
test_cases
.iter()
.for_each(|(rs1, rs2)| verify_mulh::<GoldilocksExt2>(*rs1, *rs2));
#[cfg(feature = "u16limb_circuit")]
{
test_cases
.iter()
.for_each(|(rs1, rs2)| verify_mulh::<BabyBearExt4>(*rs1, *rs2));
}
}
fn verify_mulh<E: ExtensionField>(rs1: i32, rs2: i32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "mulh",
|cb| {
Ok(MulhInstruction::construct_circuit(
cb,
&ProgramParams::default(),
))
},
)
.unwrap()
.unwrap();
let signed_prod_high = ((rs1 as i64).wrapping_mul(rs2 as i64) >> 32) as u32;
// values assignment
let insn_code = encode_rv32(InsnKind::MULH, 2, 3, 4, 0);
let (raw_witin, lkm) = MulhInstruction::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
rs1 as u32,
rs2 as u32,
Change::new(0, signed_prod_high),
0,
)],
)
.unwrap();
// verify value written to register
let expected_rd_written = UInt::from_const_unchecked(
Value::new_unchecked(signed_prod_high)
.as_u16_limbs()
.to_vec(),
);
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_opcode_mulhsu() {
let test_cases = [
(0, 0),
(0, 5),
(0, u32::MAX),
(7, 0),
(2, 11),
(91, u32::MAX),
(i32::MAX, 0),
(i32::MAX, 2),
(i32::MAX, u32::MAX),
(-4, 0),
(-1, 3),
(-1000, u32::MAX),
(i32::MIN, 0),
(i32::MIN, 21),
(i32::MIN, u32::MAX),
];
test_cases
.iter()
.for_each(|(rs1, rs2)| verify_mulhsu::<GoldilocksExt2>(*rs1, *rs2));
#[cfg(feature = "u16limb_circuit")]
{
test_cases
.iter()
.for_each(|(rs1, rs2)| verify_mulhsu::<BabyBearExt4>(*rs1, *rs2));
}
}
fn verify_mulhsu<E: ExtensionField>(rs1: i32, rs2: u32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "mulhsu",
|cb| {
Ok(MulhsuInstruction::construct_circuit(
cb,
&ProgramParams::default(),
))
},
)
.unwrap()
.unwrap();
let signed_unsigned_prod_high = ((rs1 as i64).wrapping_mul(rs2 as i64) >> 32) as u32;
// values assignment
let insn_code = encode_rv32(InsnKind::MULHSU, 2, 3, 4, 0);
let (raw_witin, lkm) = MulhsuInstruction::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
rs1 as u32,
rs2,
Change::new(0, signed_unsigned_prod_high),
0,
)],
)
.unwrap();
// verify value written to register
let expected_rd_written = UInt::from_const_unchecked(
Value::new_unchecked(signed_unsigned_prod_high)
.as_u16_limbs()
.to_vec(),
);
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/logic_imm.rs | ceno_zkvm/src/instructions/riscv/logic_imm.rs | #[cfg(not(feature = "u16limb_circuit"))]
mod logic_imm_circuit;
#[cfg(feature = "u16limb_circuit")]
mod logic_imm_circuit_v2;
#[cfg(not(feature = "u16limb_circuit"))]
pub use crate::instructions::riscv::logic_imm::logic_imm_circuit::LogicInstruction;
#[cfg(feature = "u16limb_circuit")]
pub use crate::instructions::riscv::logic_imm::logic_imm_circuit_v2::LogicInstruction;
#[cfg(test)]
mod test;
/// This trait defines a logic instruction, connecting an instruction type to a lookup table.
pub trait LogicOp {
const INST_KIND: InsnKind;
type OpsTable: OpsTable;
}
use gkr_iop::tables::ops::{AndTable, OrTable, XorTable};
use ceno_emul::InsnKind;
use gkr_iop::tables::OpsTable;
pub struct AndiOp;
impl LogicOp for AndiOp {
const INST_KIND: InsnKind = InsnKind::ANDI;
type OpsTable = AndTable;
}
pub type AndiInstruction<E> = LogicInstruction<E, AndiOp>;
pub struct OriOp;
impl LogicOp for OriOp {
const INST_KIND: InsnKind = InsnKind::ORI;
type OpsTable = OrTable;
}
pub type OriInstruction<E> = LogicInstruction<E, OriOp>;
pub struct XoriOp;
impl LogicOp for XoriOp {
const INST_KIND: InsnKind = InsnKind::XORI;
type OpsTable = XorTable;
}
pub type XoriInstruction<E> = LogicInstruction<E, XoriOp>;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/constants.rs | ceno_zkvm/src/instructions/riscv/constants.rs | use crate::uint::UIntLimbs;
pub use ceno_emul::PC_STEP_SIZE;
pub const ECALL_HALT_OPCODE: [usize; 2] = [0x00_00, 0x00_00];
pub const EXIT_PC: usize = 0;
pub const EXIT_CODE_IDX: usize = 0; // exit code u32 occupied 2 limb, each with 16
pub const INIT_PC_IDX: usize = EXIT_CODE_IDX + 2;
pub const INIT_CYCLE_IDX: usize = INIT_PC_IDX + 1;
pub const END_PC_IDX: usize = INIT_CYCLE_IDX + 1;
pub const END_CYCLE_IDX: usize = END_PC_IDX + 1;
pub const SHARD_ID_IDX: usize = END_CYCLE_IDX + 1;
pub const HEAP_START_ADDR_IDX: usize = SHARD_ID_IDX + 1;
pub const HEAP_LENGTH_IDX: usize = HEAP_START_ADDR_IDX + 1;
pub const HINT_START_ADDR_IDX: usize = HEAP_LENGTH_IDX + 1;
pub const HINT_LENGTH_IDX: usize = HINT_START_ADDR_IDX + 1;
pub const PUBLIC_IO_IDX: usize = HINT_LENGTH_IDX + 1;
pub const SHARD_RW_SUM_IDX: usize = PUBLIC_IO_IDX + 2;
pub const LIMB_BITS: usize = 16;
pub const LIMB_MASK: u32 = 0xFFFF;
pub const BIT_WIDTH: usize = 32usize;
pub const PC_BITS: usize = 30;
pub const MEM_BITS: usize = 30;
pub type UInt<E> = UIntLimbs<BIT_WIDTH, LIMB_BITS, E>;
pub type UIntMul<E> = UIntLimbs<{ 2 * BIT_WIDTH }, LIMB_BITS, E>;
/// use UInt<x> for x bits limb size
pub type UInt8<E> = UIntLimbs<BIT_WIDTH, 8, E>;
pub const UINT_LIMBS: usize = BIT_WIDTH.div_ceil(LIMB_BITS);
pub const UINT_BYTE_LIMBS: usize = BIT_WIDTH.div_ceil(8);
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/logic.rs | ceno_zkvm/src/instructions/riscv/logic.rs | mod logic_circuit;
use gkr_iop::tables::ops::{AndTable, OrTable, XorTable};
use logic_circuit::{LogicInstruction, LogicOp};
#[cfg(test)]
mod test;
use ceno_emul::InsnKind;
pub struct AndOp;
impl LogicOp for AndOp {
const INST_KIND: InsnKind = InsnKind::AND;
type OpsTable = AndTable;
}
pub type AndInstruction<E> = LogicInstruction<E, AndOp>;
pub struct OrOp;
impl LogicOp for OrOp {
const INST_KIND: InsnKind = InsnKind::OR;
type OpsTable = OrTable;
}
pub type OrInstruction<E> = LogicInstruction<E, OrOp>;
pub struct XorOp;
impl LogicOp for XorOp {
const INST_KIND: InsnKind = InsnKind::XOR;
type OpsTable = XorTable;
}
pub type XorInstruction<E> = LogicInstruction<E, XorOp>;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/slti/slti_circuit.rs | ceno_zkvm/src/instructions/riscv/slti/slti_circuit.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::SignedExtendConfig,
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{LIMB_BITS, UINT_LIMBS, UInt},
i_insn::IInstructionConfig,
},
},
structs::ProgramParams,
tables::InsnRecord,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, SWord, StepRecord, Word};
use ff_ext::{ExtensionField, FieldInto};
use gkr_iop::gadgets::IsLtConfig;
use multilinear_extensions::{ToExpr, WitIn};
use std::marker::PhantomData;
use witness::set_val;
#[derive(Debug)]
pub struct SetLessThanImmConfig<E: ExtensionField> {
i_insn: IInstructionConfig<E>,
rs1_read: UInt<E>,
imm: WitIn,
#[allow(dead_code)]
pub(crate) rd_written: UInt<E>,
lt: IsLtConfig,
// SLTI
is_rs1_neg: Option<SignedExtendConfig<E>>,
}
pub struct SetLessThanImmInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for SetLessThanImmInstruction<E, I> {
type InstructionConfig = SetLessThanImmConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
// If rs1_read < imm, rd_written = 1. Otherwise rd_written = 0
let rs1_read = UInt::new_unchecked(|| "rs1_read", cb)?;
let imm = cb.create_witin(|| "imm");
let (value_expr, is_rs1_neg) = match I::INST_KIND {
InsnKind::SLTIU => (rs1_read.value(), None),
InsnKind::SLTI => {
let is_rs1_neg = rs1_read.is_negative(cb)?;
(rs1_read.to_field_expr(is_rs1_neg.expr()), Some(is_rs1_neg))
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
let lt = IsLtConfig::construct_circuit(
cb,
|| "rs1 < imm",
value_expr,
imm.expr(),
UINT_LIMBS * LIMB_BITS,
)?;
let rd_written = UInt::from_exprs_unchecked(vec![lt.expr()]);
let i_insn = IInstructionConfig::<E>::construct_circuit(
cb,
I::INST_KIND,
imm.expr(),
#[cfg(feature = "u16limb_circuit")]
E::BaseField::ZERO.expr(),
rs1_read.register_expr(),
rd_written.register_expr(),
false,
)?;
Ok(SetLessThanImmConfig {
i_insn,
rs1_read,
imm,
rd_written,
is_rs1_neg,
lt,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
config
.i_insn
.assign_instance(instance, shard_ctx, lkm, step)?;
let rs1 = step.rs1().unwrap().value;
let rs1_value = Value::new_unchecked(rs1 as Word);
config
.rs1_read
.assign_value(instance, Value::new_unchecked(rs1));
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn());
set_val!(instance, config.imm, imm.1);
match I::INST_KIND {
InsnKind::SLTIU => {
config
.lt
.assign_instance(instance, lkm, rs1 as u64, imm.0 as u64)?;
}
InsnKind::SLTI => {
config.is_rs1_neg.as_ref().unwrap().assign_instance(
instance,
lkm,
*rs1_value.as_u16_limbs().last().unwrap() as u64,
)?;
let (rs1, imm) = (rs1 as SWord, imm.0 as SWord);
config
.lt
.assign_instance_signed(instance, lkm, rs1 as i64, imm as i64)?;
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/slti/slti_circuit_v2.rs | ceno_zkvm/src/instructions/riscv/slti/slti_circuit_v2.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::{UIntLimbsLT, UIntLimbsLTConfig},
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{UINT_LIMBS, UInt},
i_insn::IInstructionConfig,
},
},
structs::ProgramParams,
utils::{imm_sign_extend, imm_sign_extend_circuit},
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord, Word};
use ff_ext::{ExtensionField, FieldInto};
use multilinear_extensions::{ToExpr, WitIn};
use p3::field::FieldAlgebra;
use std::marker::PhantomData;
use witness::set_val;
#[derive(Debug)]
pub struct SetLessThanImmConfig<E: ExtensionField> {
i_insn: IInstructionConfig<E>,
rs1_read: UInt<E>,
imm: WitIn,
// 0 positive, 1 negative
imm_sign: WitIn,
#[allow(dead_code)]
pub(crate) rd_written: UInt<E>,
uint_lt_config: UIntLimbsLTConfig<E>,
}
pub struct SetLessThanImmInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for SetLessThanImmInstruction<E, I> {
type InstructionConfig = SetLessThanImmConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
assert_eq!(UINT_LIMBS, 2);
// If rs1_read < imm, rd_written = 1. Otherwise rd_written = 0
let rs1_read = UInt::new_unchecked(|| "rs1_read", cb)?;
let imm = cb.create_witin(|| "imm");
// a bool witness to mark sign extend of imm no matter sign/unsign
let imm_sign = cb.create_witin(|| "imm_sign");
let imm_sign_extend = UInt::from_exprs_unchecked(
imm_sign_extend_circuit::<E>(true, imm_sign.expr(), imm.expr()).to_vec(),
);
let uint_lt_config = match I::INST_KIND {
InsnKind::SLTIU => {
UIntLimbsLT::construct_circuit(cb, &rs1_read, &imm_sign_extend, false)?
}
InsnKind::SLTI => {
UIntLimbsLT::construct_circuit(cb, &rs1_read, &imm_sign_extend, true)?
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
let rd_written = UInt::from_exprs_unchecked(vec![uint_lt_config.is_lt()]);
let i_insn = IInstructionConfig::<E>::construct_circuit(
cb,
I::INST_KIND,
imm_sign_extend.expr().remove(0),
imm_sign.expr(),
rs1_read.register_expr(),
rd_written.register_expr(),
false,
)?;
Ok(SetLessThanImmConfig {
i_insn,
rs1_read,
imm,
imm_sign,
rd_written,
uint_lt_config,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
config
.i_insn
.assign_instance(instance, shard_ctx, lkm, step)?;
let rs1 = step.rs1().unwrap().value;
let rs1_value = Value::new_unchecked(rs1 as Word);
config
.rs1_read
.assign_value(instance, Value::new_unchecked(rs1));
let imm = step.insn().imm as i16 as u16;
set_val!(instance, config.imm, E::BaseField::from_canonical_u16(imm));
// according to riscvim32 spec, imm always do signed extension
let imm_sign_extend = imm_sign_extend(true, step.insn().imm as i16);
set_val!(
instance,
config.imm_sign,
E::BaseField::from_bool(imm_sign_extend[1] > 0)
);
UIntLimbsLT::<E>::assign(
&config.uint_lt_config,
instance,
lkm,
rs1_value.as_u16_limbs(),
&imm_sign_extend,
matches!(step.insn().kind, InsnKind::SLTI),
)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/fptower_fp2_add.rs | ceno_zkvm/src/instructions/riscv/ecall/fptower_fp2_add.rs | use std::marker::PhantomData;
use ceno_emul::{
BN254_FP2_ADD, ByteAddr, Change, InsnKind, Platform, StepRecord, WORD_SIZE, WriteOp,
};
use ff_ext::ExtensionField;
use generic_array::typenum::Unsigned;
use gkr_iop::{
ProtocolBuilder, ProtocolWitnessGenerator,
gkr::{GKRCircuit, layer::Layer},
utils::lk_multiplicity::Multiplicity,
};
use itertools::{Itertools, izip};
use multilinear_extensions::{ToExpr, util::max_usable_threads};
use p3::{field::FieldAlgebra, matrix::Matrix};
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::ParallelSlice,
};
use sp1_curves::{
params::NumWords,
utils::biguint_from_le_words,
weierstrass::{FpOpField, bn254::Bn254BaseField},
};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::FieldOperation,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, MEM_BITS, UInt},
ecall_base::OpFixedRS,
insn_base::{MemAddr, StateInOut, WriteMEM},
},
},
precompiles::{Fp2AddSubAssignLayout, Fp2AddSubInstance, Fp2AddSubTrace},
structs::ProgramParams,
tables::{InsnRecord, RMMCollections},
witness::LkMultiplicity,
};
pub trait Fp2AddSpec: FpOpField {
const SYSCALL_CODE: u32;
}
impl Fp2AddSpec for Bn254BaseField {
const SYSCALL_CODE: u32 = BN254_FP2_ADD;
}
#[derive(Debug)]
pub struct EcallFp2AddConfig<E: ExtensionField, P: FpOpField> {
pub layout: Fp2AddSubAssignLayout<E, P>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
value_ptr_0: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
value_ptr_1: (OpFixedRS<E, { Platform::reg_arg1() }, true>, MemAddr<E>),
mem_rw: Vec<WriteMEM>,
}
pub struct Fp2AddInstruction<E, P>(PhantomData<(E, P)>);
impl<E: ExtensionField, P: FpOpField + Fp2AddSpec + NumWords> Instruction<E>
for Fp2AddInstruction<E, P>
{
type InstructionConfig = EcallFp2AddConfig<E, P>;
fn name() -> String {
"Ecall_Fp2Add".to_string()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
build_fp2_add_circuit::<E, P>(cb)
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
assign_fp2_add_instances::<E, P>(config, shard_ctx, num_witin, num_structural_witin, steps)
}
}
fn build_fp2_add_circuit<E: ExtensionField, P: FpOpField + Fp2AddSpec + NumWords>(
cb: &mut CircuitBuilder<E>,
) -> Result<(EcallFp2AddConfig<E, P>, GKRCircuit<E>), ZKVMError> {
let vm_state = StateInOut::construct_circuit(cb, false)?;
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
P::SYSCALL_CODE & LIMB_MASK,
(P::SYSCALL_CODE >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let value_ptr_value_0 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let value_ptr_value_1 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let value_ptr_0 = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
value_ptr_value_0.uint_unaligned().register_expr(),
vm_state.ts,
)?;
let value_ptr_1 = OpFixedRS::<_, { Platform::reg_arg1() }, true>::construct_circuit(
cb,
value_ptr_value_1.uint_unaligned().register_expr(),
vm_state.ts,
)?;
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let mut layout =
<Fp2AddSubAssignLayout<E, P> as ProtocolBuilder<E>>::build_layer_logic(cb, ())?;
let mut mem_rw = izip!(&layout.input32_exprs[0], &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
value_ptr_0.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(ByteAddr::from((i * WORD_SIZE) as u32).0)
.expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
mem_rw.extend(
layout.input32_exprs[1]
.iter()
.enumerate()
.map(|(i, val_before)| {
WriteMEM::construct_circuit(
cb,
value_ptr_1.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_before.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?,
);
let (out_evals, mut chip) = layout.finalize(cb);
let layer =
Layer::from_circuit_builder(cb, "fp2_add".to_string(), layout.n_challenges, out_evals);
chip.add_layer(layer);
Ok((
EcallFp2AddConfig {
layout,
vm_state,
ecall_id,
value_ptr_0: (value_ptr_0, value_ptr_value_0),
value_ptr_1: (value_ptr_1, value_ptr_value_1),
mem_rw,
},
chip.gkr_circuit(),
))
}
fn assign_fp2_add_instances<E: ExtensionField, P: FpOpField + Fp2AddSpec + NumWords>(
config: &EcallFp2AddConfig<E, P>,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(steps)
.map(|(instance, step)| {
let ops = &step.syscall().expect("syscall step");
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(P::SYSCALL_CODE, P::SYSCALL_CODE),
step.rs1().unwrap().previous_cycle,
),
)?;
config.value_ptr_0.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.value_ptr_0.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
config.value_ptr_1.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[1].value.after,
)?;
config.value_ptr_1.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[1],
)?;
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
})
.collect::<Vec<_>>()
})
.collect::<Result<(), ZKVMError>>()?;
let words = <P as NumWords>::WordsFieldElement::USIZE;
let words_fp2 = <P as NumWords>::WordsCurvePoint::USIZE;
let instances: Vec<Fp2AddSubInstance<P>> = steps
.par_iter()
.map(|step| {
let values: Vec<u32> = step
.syscall()
.unwrap()
.mem_ops
.iter()
.map(|op| op.value.before)
.collect();
let x_c0 = biguint_from_le_words(&values[0..words]);
let x_c1 = biguint_from_le_words(&values[words..2 * words]);
let y_base = words_fp2;
let y_c0 = biguint_from_le_words(&values[y_base..y_base + words]);
let y_c1 = biguint_from_le_words(&values[y_base + words..y_base + 2 * words]);
Fp2AddSubInstance::new(x_c0, x_c1, y_c0, y_c1, FieldOperation::Add)
})
.collect();
config.layout.phase1_witness_group(
Fp2AddSubTrace { instances },
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/weierstrass_add.rs | ceno_zkvm/src/instructions/riscv/ecall/weierstrass_add.rs | use std::marker::PhantomData;
use ceno_emul::{
BLS12381_ADD, BN254_ADD, ByteAddr, Change, Cycle, InsnKind, Platform, SECP256K1_ADD,
SECP256R1_ADD, StepRecord, WORD_SIZE, WriteOp,
};
use ff_ext::ExtensionField;
use generic_array::{GenericArray, typenum::Unsigned};
use gkr_iop::{
ProtocolBuilder, ProtocolWitnessGenerator,
gkr::{GKRCircuit, layer::Layer},
utils::lk_multiplicity::Multiplicity,
};
use itertools::{Itertools, izip};
use multilinear_extensions::{ToExpr, util::max_usable_threads};
use p3::{field::FieldAlgebra, matrix::Matrix};
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::ParallelSlice,
};
use sp1_curves::{CurveType, EllipticCurve, params::NumWords};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, MEM_BITS, UInt},
ecall_base::OpFixedRS,
insn_base::{MemAddr, StateInOut, WriteMEM},
},
},
precompiles::{
EllipticCurveAddInstance, WeierstrassAddAssignLayout, WeierstrassAddAssignTrace,
},
structs::ProgramParams,
tables::{InsnRecord, RMMCollections},
witness::LkMultiplicity,
};
#[derive(Debug)]
pub struct EcallWeierstrassAddAssignConfig<E: ExtensionField, EC: EllipticCurve> {
pub layout: WeierstrassAddAssignLayout<E, EC>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
point_ptr_0: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
point_ptr_1: (OpFixedRS<E, { Platform::reg_arg1() }, true>, MemAddr<E>),
mem_rw: Vec<WriteMEM>,
}
/// WeierstrassAddAssignInstruction can handle any instruction and produce its side-effects.
pub struct WeierstrassAddAssignInstruction<E, EC>(PhantomData<(E, EC)>);
impl<E: ExtensionField, EC: EllipticCurve> Instruction<E>
for WeierstrassAddAssignInstruction<E, EC>
{
type InstructionConfig = EcallWeierstrassAddAssignConfig<E, EC>;
fn name() -> String {
"Ecall_WeierstrassAddAssign_".to_string() + format!("{:?}", EC::CURVE_TYPE).as_str()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
// constrain vmstate
let vm_state = StateInOut::construct_circuit(cb, false)?;
let syscall_code = match EC::CURVE_TYPE {
CurveType::Secp256k1 => SECP256K1_ADD,
CurveType::Secp256r1 => SECP256R1_ADD,
CurveType::Bn254 => BN254_ADD,
CurveType::Bls12381 => BLS12381_ADD,
CurveType::Ed25519 => {
unreachable!("WeierstrassAddAssign is not supported for Ed25519")
}
};
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
syscall_code & LIMB_MASK,
(syscall_code >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let point_ptr_value_0 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let point_ptr_value_1 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let point_ptr_0 = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
point_ptr_value_0.uint_unaligned().register_expr(),
vm_state.ts,
)?;
let point_ptr_1 = OpFixedRS::<_, { Platform::reg_arg1() }, true>::construct_circuit(
cb,
point_ptr_value_1.uint_unaligned().register_expr(),
vm_state.ts,
)?;
// fetch
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let mut layout =
<WeierstrassAddAssignLayout<E, EC> as gkr_iop::ProtocolBuilder<E>>::build_layer_logic(
cb,
(),
)?;
// Write the result to the same address of the first input point.
let mut mem_rw = izip!(&layout.input32_exprs[0], &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
// mem address := point_ptr_0 + i
point_ptr_0.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
// Keep the second input point unchanged in memory.
mem_rw.extend(
layout.input32_exprs[1]
.iter()
.enumerate()
.map(|(i, val_before)| {
WriteMEM::construct_circuit(
cb,
// mem address := point_ptr_1 + i
point_ptr_1.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_before.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?,
);
let (out_evals, mut chip) = layout.finalize(cb);
let layer = Layer::from_circuit_builder(
cb,
"weierstrass_add".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
let circuit = chip.gkr_circuit();
Ok((
EcallWeierstrassAddAssignConfig {
layout,
vm_state,
ecall_id,
point_ptr_0: (point_ptr_0, point_ptr_value_0),
point_ptr_1: (point_ptr_1, point_ptr_value_1),
mem_rw,
},
circuit,
))
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let syscall_code = match EC::CURVE_TYPE {
CurveType::Secp256k1 => SECP256K1_ADD,
CurveType::Secp256r1 => SECP256R1_ADD,
CurveType::Bn254 => BN254_ADD,
CurveType::Bls12381 => BLS12381_ADD,
CurveType::Ed25519 => {
unreachable!("WeierstrassAddAssign is not supported for Ed25519")
}
};
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
// 1st pass: assign witness outside of gkr-iop scope
raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(steps)
.map(|(instance, step)| {
let ops = &step.syscall().expect("syscall step");
// vm_state
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(syscall_code, syscall_code),
step.rs1().unwrap().previous_cycle,
),
)?;
// assign point_ptr_0
config.point_ptr_0.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.point_ptr_0.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
// assign point_ptr_1
config.point_ptr_1.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[1].value.after,
)?;
config.point_ptr_1.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[1],
)?;
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
// fetch
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
})
.collect::<Vec<_>>()
})
.collect::<Result<(), ZKVMError>>()?;
// second pass
let instances: Vec<EllipticCurveAddInstance<EC::BaseField>> = steps
.par_iter()
.map(|step| {
let (instance, _prev_ts): (Vec<u32>, Vec<Cycle>) = step
.syscall()
.unwrap()
.mem_ops
.iter()
.map(|op| (op.value.before, op.previous_cycle))
.unzip();
let p = GenericArray::try_from(
instance[0..<EC::BaseField as NumWords>::WordsCurvePoint::USIZE].to_vec(),
);
let q = GenericArray::try_from(
instance[<EC::BaseField as NumWords>::WordsCurvePoint::USIZE..].to_vec(),
);
p.and_then(|p| q.map(|q| EllipticCurveAddInstance::<EC::BaseField> { p, q }))
.map_err(|_| {
ZKVMError::InvalidWitness("Failed to parse EllipticCurveAddInstance".into())
})
})
.collect::<Result<_, _>>()?;
config.layout.phase1_witness_group(
WeierstrassAddAssignTrace { instances },
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/halt.rs | ceno_zkvm/src/instructions/riscv/ecall/halt.rs | use crate::{
chip_handler::{RegisterChipOperations, general::PublicValuesQuery},
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::AssertLtConfig,
instructions::{
Instruction,
riscv::{
constants::{ECALL_HALT_OPCODE, EXIT_PC},
ecall_insn::EcallInstructionConfig,
},
},
structs::{ProgramParams, RAMType},
witness::LkMultiplicity,
};
use ceno_emul::{FullTracer as Tracer, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use multilinear_extensions::{ToExpr, WitIn};
use p3::field::FieldAlgebra;
use std::marker::PhantomData;
use witness::set_val;
pub struct HaltConfig {
ecall_cfg: EcallInstructionConfig,
prev_x10_ts: WitIn,
lt_x10_cfg: AssertLtConfig,
}
pub struct HaltInstruction<E>(PhantomData<E>);
impl<E: ExtensionField> Instruction<E> for HaltInstruction<E> {
type InstructionConfig = HaltConfig;
fn name() -> String {
"ECALL_HALT".into()
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let prev_x10_ts = cb.create_witin(|| "prev_x10_ts");
let exit_code = {
let exit_code = cb.query_exit_code()?;
[exit_code[0].expr(), exit_code[1].expr()]
};
let ecall_cfg = EcallInstructionConfig::construct_circuit(
cb,
[ECALL_HALT_OPCODE[0].into(), ECALL_HALT_OPCODE[1].into()],
None,
Some(EXIT_PC.into()),
)?;
// read exit_code from arg0 (X10 register)
let (_, lt_x10_cfg) = cb.register_read(
|| "read x10",
E::BaseField::from_canonical_u64(ceno_emul::Platform::reg_arg0() as u64),
prev_x10_ts.expr(),
ecall_cfg.ts.expr() + Tracer::SUBCYCLE_RS2,
exit_code,
)?;
Ok(HaltConfig {
ecall_cfg,
prev_x10_ts,
lt_x10_cfg,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
assert_eq!(
step.rs1().unwrap().value,
(ECALL_HALT_OPCODE[0] + (ECALL_HALT_OPCODE[1] << 16)) as u32
);
assert_eq!(
step.pc().after.0,
0,
"pc after ecall/halt {:x}",
step.pc().after.0
);
let current_shard_offset_cycle = shard_ctx.current_shard_offset_cycle();
let shard_cycle = step.cycle() - current_shard_offset_cycle;
let rs2_prev_cycle = shard_ctx.aligned_prev_ts(step.rs2().unwrap().previous_cycle);
// the access of X10 register is stored in rs2()
set_val!(instance, config.prev_x10_ts, rs2_prev_cycle);
shard_ctx.send(
RAMType::Register,
step.rs2().unwrap().addr,
ceno_emul::Platform::reg_arg0() as u64,
step.cycle() + Tracer::SUBCYCLE_RS2,
step.rs2().unwrap().previous_cycle,
step.rs2().unwrap().value,
None,
);
config.lt_x10_cfg.assign_instance(
instance,
lk_multiplicity,
rs2_prev_cycle,
shard_cycle + Tracer::SUBCYCLE_RS2,
)?;
config
.ecall_cfg
.assign_instance::<E>(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/fptower_fp.rs | ceno_zkvm/src/instructions/riscv/ecall/fptower_fp.rs | use std::marker::PhantomData;
use ceno_emul::{
BN254_FP_ADD, BN254_FP_MUL, ByteAddr, Change, InsnKind, Platform, StepRecord, WORD_SIZE,
WriteOp,
};
use ff_ext::ExtensionField;
use generic_array::typenum::Unsigned;
use gkr_iop::{
ProtocolBuilder, ProtocolWitnessGenerator,
gkr::{GKRCircuit, layer::Layer},
utils::lk_multiplicity::Multiplicity,
};
use itertools::{Itertools, izip};
use multilinear_extensions::{ToExpr, util::max_usable_threads};
use p3::{field::FieldAlgebra, matrix::Matrix};
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::ParallelSlice,
};
use sp1_curves::{
params::NumWords,
utils::biguint_from_le_words,
weierstrass::{FpOpField, bn254::Bn254BaseField},
};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::FieldOperation,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, MEM_BITS, UInt},
ecall_base::OpFixedRS,
insn_base::{MemAddr, StateInOut, WriteMEM},
},
},
precompiles::{FpOpInstance, FpOpLayout, FpOpTrace},
structs::ProgramParams,
tables::{InsnRecord, RMMCollections},
witness::LkMultiplicity,
};
pub trait FpAddSpec: FpOpField {
const SYSCALL_CODE: u32;
}
pub trait FpMulSpec: FpOpField {
const SYSCALL_CODE: u32;
}
impl FpAddSpec for Bn254BaseField {
const SYSCALL_CODE: u32 = BN254_FP_ADD;
}
impl FpMulSpec for Bn254BaseField {
const SYSCALL_CODE: u32 = BN254_FP_MUL;
}
#[derive(Debug)]
pub struct EcallFpOpConfig<E: ExtensionField, P: FpOpField> {
pub layout: FpOpLayout<E, P>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
value_ptr_0: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
value_ptr_1: (OpFixedRS<E, { Platform::reg_arg1() }, true>, MemAddr<E>),
mem_rw: Vec<WriteMEM>,
}
pub struct FpAddInstruction<E, P>(PhantomData<(E, P)>);
impl<E: ExtensionField, P: FpOpField + FpAddSpec + NumWords> Instruction<E>
for FpAddInstruction<E, P>
{
type InstructionConfig = EcallFpOpConfig<E, P>;
fn name() -> String {
"Ecall_FpAdd".to_string()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
build_fp_op_circuit::<E, P>(cb, P::SYSCALL_CODE, "fp_add")
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
assign_fp_op_instances::<E, P>(
config,
shard_ctx,
num_witin,
num_structural_witin,
steps,
P::SYSCALL_CODE,
FieldOperation::Add,
)
}
}
pub struct FpMulInstruction<E, P>(PhantomData<(E, P)>);
impl<E: ExtensionField, P: FpOpField + FpMulSpec + NumWords> Instruction<E>
for FpMulInstruction<E, P>
{
type InstructionConfig = EcallFpOpConfig<E, P>;
fn name() -> String {
"Ecall_FpMul".to_string()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
build_fp_op_circuit::<E, P>(cb, P::SYSCALL_CODE, "fp_mul")
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
assign_fp_op_instances::<E, P>(
config,
shard_ctx,
num_witin,
num_structural_witin,
steps,
P::SYSCALL_CODE,
FieldOperation::Mul,
)
}
}
fn build_fp_op_circuit<E: ExtensionField, P: FpOpField + NumWords>(
cb: &mut CircuitBuilder<E>,
syscall_code: u32,
layer_name: &str,
) -> Result<(EcallFpOpConfig<E, P>, GKRCircuit<E>), ZKVMError> {
let vm_state = StateInOut::construct_circuit(cb, false)?;
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
syscall_code & LIMB_MASK,
(syscall_code >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let value_ptr_value_0 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let value_ptr_value_1 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let value_ptr_0 = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
value_ptr_value_0.uint_unaligned().register_expr(),
vm_state.ts,
)?;
let value_ptr_1 = OpFixedRS::<_, { Platform::reg_arg1() }, true>::construct_circuit(
cb,
value_ptr_value_1.uint_unaligned().register_expr(),
vm_state.ts,
)?;
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let mut layout = <FpOpLayout<E, P> as ProtocolBuilder<E>>::build_layer_logic(cb, ())?;
let mut mem_rw = izip!(&layout.input32_exprs[0], &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
value_ptr_0.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(ByteAddr::from((i * WORD_SIZE) as u32).0)
.expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
mem_rw.extend(
layout.input32_exprs[1]
.iter()
.enumerate()
.map(|(i, val_before)| {
WriteMEM::construct_circuit(
cb,
value_ptr_1.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_before.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?,
);
let (out_evals, mut chip) = layout.finalize(cb);
let layer =
Layer::from_circuit_builder(cb, layer_name.to_string(), layout.n_challenges, out_evals);
chip.add_layer(layer);
Ok((
EcallFpOpConfig {
layout,
vm_state,
ecall_id,
value_ptr_0: (value_ptr_0, value_ptr_value_0),
value_ptr_1: (value_ptr_1, value_ptr_value_1),
mem_rw,
},
chip.gkr_circuit(),
))
}
fn assign_fp_op_instances<E: ExtensionField, P: FpOpField + NumWords>(
config: &EcallFpOpConfig<E, P>,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
syscall_code: u32,
op: FieldOperation,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(steps)
.map(|(instance, step)| {
let ops = &step.syscall().expect("syscall step");
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(syscall_code, syscall_code),
step.rs1().unwrap().previous_cycle,
),
)?;
config.value_ptr_0.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.value_ptr_0.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
config.value_ptr_1.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[1].value.after,
)?;
config.value_ptr_1.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[1],
)?;
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
})
.collect::<Vec<_>>()
})
.collect::<Result<(), ZKVMError>>()?;
let words = <P as NumWords>::WordsFieldElement::USIZE;
let instances: Vec<FpOpInstance<P>> = steps
.par_iter()
.map(|step| {
let values: Vec<u32> = step
.syscall()
.unwrap()
.mem_ops
.iter()
.map(|op| op.value.before)
.collect();
let x = biguint_from_le_words(&values[0..words]);
let y = biguint_from_le_words(&values[words..2 * words]);
FpOpInstance::new(x, y, op)
})
.collect();
config.layout.phase1_witness_group(
FpOpTrace { instances },
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/weierstrass_double.rs | ceno_zkvm/src/instructions/riscv/ecall/weierstrass_double.rs | use std::marker::PhantomData;
use ceno_emul::{
BLS12381_DOUBLE, BN254_DOUBLE, ByteAddr, Change, Cycle, InsnKind, Platform, SECP256K1_DOUBLE,
SECP256R1_DOUBLE, StepRecord, WORD_SIZE, WriteOp,
};
use ff_ext::ExtensionField;
use generic_array::{GenericArray, typenum::Unsigned};
use gkr_iop::{
ProtocolBuilder, ProtocolWitnessGenerator,
gkr::{GKRCircuit, layer::Layer},
utils::lk_multiplicity::Multiplicity,
};
use itertools::{Itertools, izip};
use multilinear_extensions::{ToExpr, util::max_usable_threads};
use p3::{field::FieldAlgebra, matrix::Matrix};
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::ParallelSlice,
};
use sp1_curves::{CurveType, EllipticCurve, params::NumWords, weierstrass::WeierstrassParameters};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, MEM_BITS, UInt},
ecall_base::OpFixedRS,
insn_base::{MemAddr, StateInOut, WriteMEM},
},
},
precompiles::{
EllipticCurveDoubleInstance, WeierstrassDoubleAssignLayout, WeierstrassDoubleAssignTrace,
},
structs::ProgramParams,
tables::{InsnRecord, RMMCollections},
witness::LkMultiplicity,
};
#[derive(Debug)]
pub struct EcallWeierstrassDoubleAssignConfig<
E: ExtensionField,
EC: EllipticCurve + WeierstrassParameters,
> {
pub layout: WeierstrassDoubleAssignLayout<E, EC>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
point_ptr: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
mem_rw: Vec<WriteMEM>,
}
/// WeierstrassDoubleAssignInstruction can handle any instruction and produce its side-effects.
pub struct WeierstrassDoubleAssignInstruction<E, EC>(PhantomData<(E, EC)>);
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters> Instruction<E>
for WeierstrassDoubleAssignInstruction<E, EC>
{
type InstructionConfig = EcallWeierstrassDoubleAssignConfig<E, EC>;
fn name() -> String {
"Ecall_WeierstrassDoubleAssign_".to_string() + format!("{:?}", EC::CURVE_TYPE).as_str()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
// constrain vmstate
let vm_state = StateInOut::construct_circuit(cb, false)?;
let syscall_code = match EC::CURVE_TYPE {
CurveType::Secp256k1 => SECP256K1_DOUBLE,
CurveType::Secp256r1 => SECP256R1_DOUBLE,
CurveType::Bn254 => BN254_DOUBLE,
CurveType::Bls12381 => BLS12381_DOUBLE,
CurveType::Ed25519 => {
unreachable!("WeierstrassDoubleAssign is not supported for Ed25519")
}
};
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
syscall_code & LIMB_MASK,
(syscall_code >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let point_ptr_value = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let point_ptr = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
point_ptr_value.uint_unaligned().register_expr(),
vm_state.ts,
)?;
// fetch
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let mut layout =
<WeierstrassDoubleAssignLayout<E, EC> as gkr_iop::ProtocolBuilder<E>>::build_layer_logic(
cb,
(),
)?;
// Write the result to the same address of the first input point.
let mem_rw = izip!(&layout.input32_exprs, &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
// mem address := point_ptr + i
point_ptr.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
let (out_evals, mut chip) = layout.finalize(cb);
let layer = Layer::from_circuit_builder(
cb,
"weierstrass_double".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
let circuit = chip.gkr_circuit();
Ok((
EcallWeierstrassDoubleAssignConfig {
layout,
vm_state,
ecall_id,
point_ptr: (point_ptr, point_ptr_value),
mem_rw,
},
circuit,
))
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let syscall_code = match EC::CURVE_TYPE {
CurveType::Secp256k1 => SECP256K1_DOUBLE,
CurveType::Secp256r1 => SECP256R1_DOUBLE,
CurveType::Bn254 => BN254_DOUBLE,
CurveType::Bls12381 => BLS12381_DOUBLE,
CurveType::Ed25519 => {
unreachable!("WeierstrassDoubleAssign is not supported for Ed25519")
}
};
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
// 1st pass: assign witness outside of gkr-iop scope
raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(steps)
.map(|(instance, step)| {
let ops = &step.syscall().expect("syscall step");
// vm_state
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(syscall_code, syscall_code),
step.rs1().unwrap().previous_cycle,
),
)?;
// assign point_ptr_0
config.point_ptr.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.point_ptr.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
// fetch
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
})
.collect::<Vec<_>>()
})
.collect::<Result<(), ZKVMError>>()?;
// second pass
let instances: Vec<EllipticCurveDoubleInstance<EC::BaseField>> = steps
.par_iter()
.map(|step| {
let (instance, _prev_ts): (Vec<u32>, Vec<Cycle>) = step
.syscall()
.unwrap()
.mem_ops
.iter()
.map(|op| (op.value.before, op.previous_cycle))
.unzip();
let p = GenericArray::try_from(
instance[0..<EC::BaseField as NumWords>::WordsCurvePoint::USIZE].to_vec(),
);
p.map(|p| EllipticCurveDoubleInstance::<EC::BaseField> { p })
.map_err(|_| {
ZKVMError::InvalidWitness(
"Failed to parse EllipticCurveDoubleInstance".into(),
)
})
})
.collect::<Result<_, _>>()?;
config.layout.phase1_witness_group(
WeierstrassDoubleAssignTrace { instances },
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/weierstrass_decompress.rs | ceno_zkvm/src/instructions/riscv/ecall/weierstrass_decompress.rs | use std::marker::PhantomData;
use ceno_emul::{
Change, Cycle, InsnKind, Platform, SECP256K1_DECOMPRESS, SECP256R1_DECOMPRESS, StepRecord,
WriteOp,
};
use ff_ext::ExtensionField;
use generic_array::{GenericArray, typenum::Unsigned};
use gkr_iop::{
ProtocolBuilder, ProtocolWitnessGenerator,
gkr::{GKRCircuit, layer::Layer},
utils::lk_multiplicity::Multiplicity,
};
use itertools::{Itertools, izip};
use multilinear_extensions::{Expression, ToExpr, util::max_usable_threads};
use num::BigUint;
use p3::matrix::Matrix;
use rayon::{
iter::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator, ParallelIterator,
},
slice::ParallelSlice,
};
use sp1_curves::{
CurveType, EllipticCurve,
params::{NumLimbs, NumWords},
weierstrass::WeierstrassParameters,
};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, MEM_BITS, UInt},
ecall_base::OpFixedRS,
insn_base::{MemAddr, StateInOut, WriteMEM},
},
},
precompiles::{
EllipticCurveDecompressInstance, WeierstrassDecompressLayout, WeierstrassDecompressTrace,
},
structs::ProgramParams,
tables::{InsnRecord, RMMCollections},
witness::LkMultiplicity,
};
#[derive(Debug)]
pub struct EcallWeierstrassDecompressConfig<E: ExtensionField, EC: EllipticCurve> {
pub layout: WeierstrassDecompressLayout<E, EC>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
field_ptr: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
sign_bit: OpFixedRS<E, { Platform::reg_arg1() }, true>,
mem_rw: Vec<WriteMEM>,
}
/// WeierstrassDecompressInstruction can handle any instruction and produce its side-effects.
pub struct WeierstrassDecompressInstruction<E, EC>(PhantomData<(E, EC)>);
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters> Instruction<E>
for WeierstrassDecompressInstruction<E, EC>
{
type InstructionConfig = EcallWeierstrassDecompressConfig<E, EC>;
fn name() -> String {
"Ecall_WeierstrassDecompress_".to_string() + format!("{:?}", EC::CURVE_TYPE).as_str()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
// constrain vmstate
let mut layout =
<WeierstrassDecompressLayout<E, EC> as gkr_iop::ProtocolBuilder<E>>::build_layer_logic(
cb,
(),
)?;
let vm_state = StateInOut::construct_circuit(cb, false)?;
let syscall_code = match EC::CURVE_TYPE {
CurveType::Secp256k1 => SECP256K1_DECOMPRESS,
CurveType::Secp256r1 => SECP256R1_DECOMPRESS,
_ => {
unreachable!("WeierstrassDecompress is not supported for this curve")
}
};
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
syscall_code & LIMB_MASK,
(syscall_code >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let field_ptr_value = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let field_ptr = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
field_ptr_value.uint_unaligned().register_expr(),
vm_state.ts,
)?;
let sign_bit_value = layout.layer_exprs.wits.sign_bit;
let sign_bit = OpFixedRS::<_, { Platform::reg_arg1() }, true>::construct_circuit(
cb,
[sign_bit_value.expr(), Expression::ZERO],
vm_state.ts,
)?;
// fetch
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let num_limbs = <EC::BaseField as NumLimbs>::Limbs::U32;
assert_eq!(num_limbs, 32);
let field_ptr_expr = field_ptr.prev_value.as_ref().unwrap().value();
let mut mem_rw = layout
.input32_exprs
.iter()
.enumerate()
.map(|(i, val)| {
WriteMEM::construct_circuit(
cb,
// mem address := field_ptr + i * 4
field_ptr_expr.expr() + (i as u32) * 4,
val.clone(),
val.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
mem_rw.extend(
izip!(
layout.old_output32_exprs.iter(),
layout.output32_exprs.iter()
)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
// mem address := field_ptr + i * 4 + num_limbs
field_ptr_expr.expr() + (i as u32) * 4 + num_limbs,
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?,
);
let (out_evals, mut chip) = layout.finalize(cb);
let layer = Layer::from_circuit_builder(
cb,
"weierstrass_decompress".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
let circuit = chip.gkr_circuit();
Ok((
EcallWeierstrassDecompressConfig {
layout,
vm_state,
ecall_id,
field_ptr: (field_ptr, field_ptr_value),
sign_bit,
mem_rw,
},
circuit,
))
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let syscall_code = match EC::CURVE_TYPE {
CurveType::Secp256k1 => SECP256K1_DECOMPRESS,
CurveType::Secp256r1 => SECP256R1_DECOMPRESS,
_ => {
unreachable!("WeierstrassDecompress is not supported for this curve")
}
};
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
let ec_field_num_words = <EC::BaseField as NumWords>::WordsFieldElement::USIZE;
// 1st pass: assign witness outside of gkr-iop scope
let sign_bit_and_y_words = raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(steps)
.map(|(instance, step)| {
let ops = &step.syscall().expect("syscall step");
// vm_state
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(syscall_code, syscall_code),
step.rs1().unwrap().previous_cycle,
),
)?;
// assign field_ptr
config.field_ptr.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.field_ptr.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
// register read for sign_bit
config.sign_bit.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[1],
)?;
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
// fetch
lk_multiplicity.fetch(step.pc().before.0);
let old_output32: Vec<_> = ops
.mem_ops
.iter()
.skip(ec_field_num_words)
.map(|op| op.value.before)
.collect();
Ok((
ops.reg_ops[1].value.before != 0,
old_output32.try_into().unwrap(),
))
})
.collect::<Vec<_>>()
})
.collect::<Result<Vec<_>, ZKVMError>>()?;
// second pass
let instances = steps
.par_iter()
.zip(sign_bit_and_y_words.into_par_iter())
.map(|(step, (sign_bit, old_output32))| {
let (instance, _prev_ts): (Vec<u32>, Vec<Cycle>) = step
.syscall()
.unwrap()
.mem_ops
.iter()
.take(ec_field_num_words)
.map(|op| (op.value.before, op.previous_cycle))
.unzip();
let x_words =
GenericArray::<_, <EC::BaseField as NumWords>::WordsFieldElement>::try_from(
instance[0..<EC::BaseField as NumWords>::WordsFieldElement::USIZE].to_vec(),
);
x_words
.map(|x_words: GenericArray<u32, _>| {
let x = BigUint::from_bytes_be(
&x_words
.iter()
.flat_map(|n| n.to_le_bytes())
.collect::<Vec<_>>(),
);
EllipticCurveDecompressInstance {
x,
sign_bit,
old_y_words: old_output32,
}
})
.map_err(|_| {
ZKVMError::InvalidWitness(
"Failed to parse EllipticCurveDecompressInstance".into(),
)
})
})
.collect::<Result<_, _>>()?;
config.layout.phase1_witness_group(
WeierstrassDecompressTrace {
instances,
_phantom: PhantomData,
},
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/keccak.rs | ceno_zkvm/src/instructions/riscv/ecall/keccak.rs | use std::marker::PhantomData;
use ceno_emul::{
ByteAddr, Change, Cycle, InsnKind, KECCAK_PERMUTE, Platform, StepRecord, WORD_SIZE, WriteOp,
};
use ff_ext::ExtensionField;
use gkr_iop::{
ProtocolBuilder, ProtocolWitnessGenerator,
gkr::{GKRCircuit, booleanhypercube::BooleanHypercube, layer::Layer},
utils::lk_multiplicity::Multiplicity,
};
use itertools::{Itertools, izip};
use multilinear_extensions::{ToExpr, util::max_usable_threads};
use p3::{field::FieldAlgebra, matrix::Matrix};
use rayon::{
iter::{IndexedParallelIterator, ParallelIterator},
slice::ParallelSlice,
};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, MEM_BITS, UInt},
ecall_base::OpFixedRS,
insn_base::{MemAddr, StateInOut, WriteMEM},
},
},
precompiles::{
KECCAK_ROUNDS, KECCAK_ROUNDS_CEIL_LOG2, KeccakInstance, KeccakLayout, KeccakParams,
KeccakStateInstance, KeccakTrace, KeccakWitInstance,
},
structs::ProgramParams,
tables::{InsnRecord, RMMCollections},
witness::LkMultiplicity,
};
#[derive(Debug)]
pub struct EcallKeccakConfig<E: ExtensionField> {
pub layout: KeccakLayout<E>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
state_ptr: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
mem_rw: Vec<WriteMEM>,
}
/// KeccakInstruction can handle any instruction and produce its side-effects.
pub struct KeccakInstruction<E>(PhantomData<E>);
impl<E: ExtensionField> Instruction<E> for KeccakInstruction<E> {
type InstructionConfig = EcallKeccakConfig<E>;
fn name() -> String {
"Ecall_Keccak".to_string()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
// constrain vmstate
let vm_state = StateInOut::construct_circuit(cb, false)?;
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
KECCAK_PERMUTE & LIMB_MASK,
(KECCAK_PERMUTE >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let state_ptr_value = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let state_ptr = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
state_ptr_value.uint_unaligned().register_expr(),
vm_state.ts,
)?;
// fetch
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let mut layout = <KeccakLayout<E> as gkr_iop::ProtocolBuilder<E>>::build_layer_logic(
cb,
KeccakParams {},
)?;
// memory rw, for we in-place update
let mem_rw = izip!(&layout.input32_exprs, &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
state_ptr.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
let (out_evals, mut chip) = layout.finalize(cb);
let layer = Layer::from_circuit_builder(cb, Self::name(), layout.n_challenges, out_evals);
chip.add_layer(layer);
let circuit = chip.gkr_circuit();
Ok((
EcallKeccakConfig {
layout,
vm_state,
ecall_id,
state_ptr: (state_ptr, state_ptr_value),
mem_rw,
},
circuit,
))
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new_by_rotation(
steps.len(),
KECCAK_ROUNDS.next_power_of_two().ilog2() as usize,
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new_by_rotation(
steps.len(),
KECCAK_ROUNDS.next_power_of_two().ilog2() as usize,
num_structural_witin,
InstancePaddingStrategy::Default,
);
// each instance are composed of KECCAK_ROUNDS.next_power_of_two()
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
// 1st pass: assign witness outside of gkr-iop scope
raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin * KECCAK_ROUNDS.next_power_of_two())
.zip_eq(steps)
.map(|(instance_with_rotation, step)| {
let ops = &step.syscall().expect("syscall step");
let bh = BooleanHypercube::new(KECCAK_ROUNDS_CEIL_LOG2);
let mut cyclic_group = bh.into_iter();
for _ in 0..KECCAK_ROUNDS {
let round_index = cyclic_group.next().unwrap();
let instance = &mut instance_with_rotation
[round_index as usize * num_witin..][..num_witin];
// vm_state
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(KECCAK_PERMUTE, KECCAK_PERMUTE),
step.rs1().unwrap().previous_cycle,
),
)?;
// assign state_ptr
config.state_ptr.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.state_ptr.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
// assign mem_rw
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
// fetch
lk_multiplicity.fetch(step.pc().before.0);
}
Ok(())
})
.collect::<Vec<_>>()
})
.collect::<Result<(), ZKVMError>>()?;
// second pass
let instances: Vec<KeccakInstance> = steps
.iter()
.map(|step| -> KeccakInstance {
let (instance, prev_ts): (Vec<u32>, Vec<Cycle>) = step
.syscall()
.unwrap()
.mem_ops
.iter()
.map(|op| (op.value.before, op.previous_cycle))
.unzip();
KeccakInstance {
state: KeccakStateInstance {
state_ptr_address: ByteAddr::from(step.rs1().unwrap().value),
cur_ts: step.cycle(),
read_ts: prev_ts.try_into().unwrap(),
},
witin: KeccakWitInstance {
instance: instance.try_into().unwrap(),
},
}
})
.collect_vec();
config.layout.phase1_witness_group(
KeccakTrace { instances },
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/uint256.rs | ceno_zkvm/src/instructions/riscv/ecall/uint256.rs | use std::marker::PhantomData;
use ceno_emul::{
ByteAddr, Change, Cycle, InsnKind, Platform, SECP256K1_SCALAR_INVERT, StepRecord, UINT256_MUL,
WORD_SIZE, WriteOp,
};
use ff_ext::ExtensionField;
use generic_array::typenum::Unsigned;
use gkr_iop::{
ProtocolBuilder, ProtocolWitnessGenerator,
gkr::{GKRCircuit, layer::Layer},
utils::lk_multiplicity::Multiplicity,
};
use itertools::{Itertools, chain, izip};
use multilinear_extensions::{ToExpr, util::max_usable_threads};
use num_bigint::BigUint;
use p3::{field::FieldAlgebra, matrix::Matrix};
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::ParallelSlice,
};
use sp1_curves::{
params::NumWords,
uint256::U256Field,
utils::{biguint_from_be_words, biguint_from_le_words},
weierstrass::{
WeierstrassParameters,
secp256k1::{Secp256k1, Secp256k1BaseField},
},
};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, MEM_BITS, UInt},
ecall_base::OpFixedRS,
insn_base::{MemAddr, StateInOut, WriteMEM},
},
},
precompiles::{
Uint256InvLayout, Uint256InvSpec, Uint256InvTrace, Uint256MulInstance, Uint256MulLayout,
Uint256MulTrace,
},
structs::ProgramParams,
tables::{InsnRecord, RMMCollections},
witness::LkMultiplicity,
};
#[derive(Debug)]
pub struct EcallUint256MulConfig<E: ExtensionField> {
pub layout: Uint256MulLayout<E>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
word_ptr_0: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
word_ptr_1: (OpFixedRS<E, { Platform::reg_arg1() }, true>, MemAddr<E>),
mem_rw: Vec<WriteMEM>,
}
/// Uint256MulInstruction can handle any instruction and produce its side-effects.
pub struct Uint256MulInstruction<E>(PhantomData<E>);
impl<E: ExtensionField> Instruction<E> for Uint256MulInstruction<E> {
type InstructionConfig = EcallUint256MulConfig<E>;
fn name() -> String {
"Ecall_Uint256Mul".to_string()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
// constrain vmstate
let vm_state = StateInOut::construct_circuit(cb, false)?;
let syscall_code = UINT256_MUL;
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
syscall_code & LIMB_MASK,
(syscall_code >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let word_ptr_value_0 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let word_ptr_value_1 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let word_ptr_0 = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
word_ptr_value_0.uint_unaligned().register_expr(),
vm_state.ts,
)?;
let word_ptr_1 = OpFixedRS::<_, { Platform::reg_arg1() }, true>::construct_circuit(
cb,
word_ptr_value_1.uint_unaligned().register_expr(),
vm_state.ts,
)?;
// fetch
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let mut layout =
<Uint256MulLayout<E> as gkr_iop::ProtocolBuilder<E>>::build_layer_logic(cb, ())?;
// Write the result to the same address of the first input point.
let mut mem_rw = izip!(&layout.input32_exprs[0], &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
// mem address := word_ptr_0 + i
word_ptr_0.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
// Keep the second input point unchanged in memory.
mem_rw.extend(
chain![
layout.input32_exprs[1].iter(),
layout.input32_exprs[2].iter()
]
.enumerate()
.map(|(i, val_before)| {
WriteMEM::construct_circuit(
cb,
// mem address := word_ptr_1 + i
word_ptr_1.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_before.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?,
);
let (out_evals, mut chip) = layout.finalize(cb);
let layer = Layer::from_circuit_builder(
cb,
"uint256_mul".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
let circuit = chip.gkr_circuit();
Ok((
EcallUint256MulConfig {
layout,
vm_state,
ecall_id,
word_ptr_0: (word_ptr_0, word_ptr_value_0),
word_ptr_1: (word_ptr_1, word_ptr_value_1),
mem_rw,
},
circuit,
))
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let syscall_code = UINT256_MUL;
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
// 1st pass: assign witness outside of gkr-iop scope
raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(steps)
.map(|(instance, step)| {
let ops = &step.syscall().expect("syscall step");
// vm_state
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(syscall_code, syscall_code),
step.rs1().unwrap().previous_cycle,
),
)?;
// assign word_ptr_0
config.word_ptr_0.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.word_ptr_0.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
// assign word_ptr_1
config.word_ptr_1.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[1].value.after,
)?;
config.word_ptr_1.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[1],
)?;
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
// fetch
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
})
.collect::<Vec<_>>()
})
.collect::<Result<(), ZKVMError>>()?;
// second pass
let instances: Vec<Uint256MulInstance> = steps
.par_iter()
.map(|step| {
let (instance, _prev_ts): (Vec<u32>, Vec<Cycle>) = step
.syscall()
.unwrap()
.mem_ops
.iter()
.map(|op| (op.value.before, op.previous_cycle))
.unzip();
let x = biguint_from_le_words(
&instance[0..<U256Field as NumWords>::WordsFieldElement::USIZE],
);
let y = biguint_from_le_words(
&instance[<U256Field as NumWords>::WordsFieldElement::USIZE..],
);
let modulus = biguint_from_le_words(
&instance[2 * <U256Field as NumWords>::WordsFieldElement::USIZE..],
);
Uint256MulInstance { x, y, modulus }
})
.collect();
config.layout.phase1_witness_group(
Uint256MulTrace { instances },
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
}
/// Uint256InvInstruction can handle any instruction and produce its side-effects.
pub struct Uint256InvInstruction<E, P>(PhantomData<(E, P)>);
pub struct Secp256K1EcallSpec;
impl Uint256InvSpec for Secp256K1EcallSpec {
type P = Secp256k1BaseField;
fn syscall() -> u32 {
SECP256K1_SCALAR_INVERT
}
fn name() -> String {
"secp256k1_scalar_invert".to_string()
}
fn modulus() -> BigUint {
Secp256k1::prime_group_order()
}
}
pub type Secp256k1InvInstruction<E> = Uint256InvInstruction<E, Secp256K1EcallSpec>;
#[derive(Debug)]
pub struct EcallUint256InvConfig<E: ExtensionField, Spec: Uint256InvSpec> {
pub layout: Uint256InvLayout<E, Spec>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
word_ptr_0: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
mem_rw: Vec<WriteMEM>,
}
impl<E: ExtensionField, Spec: Uint256InvSpec> Instruction<E> for Uint256InvInstruction<E, Spec> {
type InstructionConfig = EcallUint256InvConfig<E, Spec>;
fn name() -> String {
Spec::name()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
// constrain vmstate
let vm_state = StateInOut::construct_circuit(cb, false)?;
let syscall_code = Spec::syscall();
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
syscall_code & LIMB_MASK,
(syscall_code >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let word_ptr_value_0 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let word_ptr_0 = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
word_ptr_value_0.uint_unaligned().register_expr(),
vm_state.ts,
)?;
// fetch
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let mut layout =
<Uint256InvLayout<E, Spec> as ProtocolBuilder<E>>::build_layer_logic(cb, ())?;
// Write the result to the same address of the first input point.
let mem_rw = layout
.input32_exprs
.iter()
.zip_eq(&layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
// mem address := word_ptr_0 + i
word_ptr_0.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
let (out_evals, mut chip) = layout.finalize(cb);
let layer = Layer::from_circuit_builder(cb, Spec::name(), layout.n_challenges, out_evals);
chip.add_layer(layer);
let circuit = chip.gkr_circuit();
Ok((
EcallUint256InvConfig {
layout,
vm_state,
ecall_id,
word_ptr_0: (word_ptr_0, word_ptr_value_0),
mem_rw,
},
circuit,
))
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let syscall_code = Spec::syscall();
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
// 1st pass: assign witness outside of gkr-iop scope
raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(steps)
.map(|(instance, step)| {
let ops = &step.syscall().expect("syscall step");
// vm_state
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(syscall_code, syscall_code),
step.rs1().unwrap().previous_cycle,
),
)?;
// assign word_ptr_0
config.word_ptr_0.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.word_ptr_0.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
// fetch
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
})
.collect::<Vec<_>>()
})
.collect::<Result<(), ZKVMError>>()?;
// second pass
let instances: Vec<BigUint> = steps
.par_iter()
.map(|step| {
let (instance, _): (Vec<u32>, Vec<Cycle>) = step
.syscall()
.unwrap()
.mem_ops
.iter()
.map(|op| (op.value.before, op.previous_cycle))
.unzip();
biguint_from_be_words(&instance)
})
.collect();
config.layout.phase1_witness_group(
Uint256InvTrace { instances },
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/ecall/fptower_fp2_mul.rs | ceno_zkvm/src/instructions/riscv/ecall/fptower_fp2_mul.rs | use std::marker::PhantomData;
use ceno_emul::{
BN254_FP2_MUL, ByteAddr, Change, InsnKind, Platform, StepRecord, WORD_SIZE, WriteOp,
};
use ff_ext::ExtensionField;
use generic_array::typenum::Unsigned;
use gkr_iop::{
ProtocolBuilder, ProtocolWitnessGenerator,
gkr::{GKRCircuit, layer::Layer},
utils::lk_multiplicity::Multiplicity,
};
use itertools::{Itertools, izip};
use multilinear_extensions::{ToExpr, util::max_usable_threads};
use p3::{field::FieldAlgebra, matrix::Matrix};
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::ParallelSlice,
};
use sp1_curves::{
params::NumWords,
utils::biguint_from_le_words,
weierstrass::{FpOpField, bn254::Bn254BaseField},
};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, MEM_BITS, UInt},
ecall_base::OpFixedRS,
insn_base::{MemAddr, StateInOut, WriteMEM},
},
},
precompiles::{Fp2MulAssignLayout, Fp2MulInstance, Fp2MulTrace},
structs::ProgramParams,
tables::{InsnRecord, RMMCollections},
witness::LkMultiplicity,
};
pub trait Fp2MulSpec: FpOpField {
const SYSCALL_CODE: u32;
}
impl Fp2MulSpec for Bn254BaseField {
const SYSCALL_CODE: u32 = BN254_FP2_MUL;
}
#[derive(Debug)]
pub struct EcallFp2MulConfig<E: ExtensionField, P: FpOpField> {
pub layout: Fp2MulAssignLayout<E, P>,
vm_state: StateInOut<E>,
ecall_id: OpFixedRS<E, { Platform::reg_ecall() }, false>,
value_ptr_0: (OpFixedRS<E, { Platform::reg_arg0() }, true>, MemAddr<E>),
value_ptr_1: (OpFixedRS<E, { Platform::reg_arg1() }, true>, MemAddr<E>),
mem_rw: Vec<WriteMEM>,
}
pub struct Fp2MulInstruction<E, P>(PhantomData<(E, P)>);
impl<E: ExtensionField, P: FpOpField + Fp2MulSpec + NumWords> Instruction<E>
for Fp2MulInstruction<E, P>
{
type InstructionConfig = EcallFp2MulConfig<E, P>;
fn name() -> String {
"Ecall_Fp2Mul".to_string()
}
fn construct_circuit(
_circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
unimplemented!()
}
fn build_gkr_iop_circuit(
cb: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<(Self::InstructionConfig, GKRCircuit<E>), ZKVMError> {
build_fp2_mul_circuit::<E, P>(cb)
}
fn generate_fixed_traces(
config: &Self::InstructionConfig,
num_fixed: usize,
) -> Option<RowMajorMatrix<E::BaseField>> {
let fixed = config.layout.fixed_witness_group();
assert_eq!(fixed.width(), num_fixed);
Some(fixed)
}
fn assign_instance(
_config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
_instance: &mut [<E as ExtensionField>::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
unreachable!("we override logic in assign_instances")
}
fn assign_instances(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
assign_fp2_mul_instances::<E, P>(config, shard_ctx, num_witin, num_structural_witin, steps)
}
}
fn build_fp2_mul_circuit<E: ExtensionField, P: FpOpField + Fp2MulSpec + NumWords>(
cb: &mut CircuitBuilder<E>,
) -> Result<(EcallFp2MulConfig<E, P>, GKRCircuit<E>), ZKVMError> {
let vm_state = StateInOut::construct_circuit(cb, false)?;
let ecall_id = OpFixedRS::<_, { Platform::reg_ecall() }, false>::construct_circuit(
cb,
UInt::from_const_unchecked(vec![
P::SYSCALL_CODE & LIMB_MASK,
(P::SYSCALL_CODE >> LIMB_BITS) & LIMB_MASK,
])
.register_expr(),
vm_state.ts,
)?;
let value_ptr_value_0 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let value_ptr_value_1 = MemAddr::construct_with_max_bits(cb, 2, MEM_BITS)?;
let value_ptr_0 = OpFixedRS::<_, { Platform::reg_arg0() }, true>::construct_circuit(
cb,
value_ptr_value_0.uint_unaligned().register_expr(),
vm_state.ts,
)?;
let value_ptr_1 = OpFixedRS::<_, { Platform::reg_arg1() }, true>::construct_circuit(
cb,
value_ptr_value_1.uint_unaligned().register_expr(),
vm_state.ts,
)?;
cb.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::ECALL.into(),
None,
0.into(),
0.into(),
0.into(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
))?;
let mut layout = <Fp2MulAssignLayout<E, P> as ProtocolBuilder<E>>::build_layer_logic(cb, ())?;
let mut mem_rw = izip!(&layout.input32_exprs[0], &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
cb,
value_ptr_0.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(ByteAddr::from((i * WORD_SIZE) as u32).0)
.expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
mem_rw.extend(
layout.input32_exprs[1]
.iter()
.enumerate()
.map(|(i, val_before)| {
WriteMEM::construct_circuit(
cb,
value_ptr_1.prev_value.as_ref().unwrap().value()
+ E::BaseField::from_canonical_u32(
ByteAddr::from((i * WORD_SIZE) as u32).0,
)
.expr(),
val_before.clone(),
val_before.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?,
);
let (out_evals, mut chip) = layout.finalize(cb);
let layer =
Layer::from_circuit_builder(cb, "fp2_mul".to_string(), layout.n_challenges, out_evals);
chip.add_layer(layer);
Ok((
EcallFp2MulConfig {
layout,
vm_state,
ecall_id,
value_ptr_0: (value_ptr_0, value_ptr_value_0),
value_ptr_1: (value_ptr_1, value_ptr_value_1),
mem_rw,
},
chip.gkr_circuit(),
))
}
fn assign_fp2_mul_instances<E: ExtensionField, P: FpOpField + Fp2MulSpec + NumWords>(
config: &EcallFp2MulConfig<E, P>,
shard_ctx: &mut ShardContext,
num_witin: usize,
num_structural_witin: usize,
steps: Vec<&StepRecord>,
) -> Result<(RMMCollections<E::BaseField>, Multiplicity<u64>), ZKVMError> {
let mut lk_multiplicity = LkMultiplicity::default();
if steps.is_empty() {
return Ok((
[
RowMajorMatrix::new(0, num_witin, InstancePaddingStrategy::Default),
RowMajorMatrix::new(0, num_structural_witin, InstancePaddingStrategy::Default),
],
lk_multiplicity.into_finalize_result(),
));
}
let nthreads = max_usable_threads();
let num_instance_per_batch = steps.len().div_ceil(nthreads).max(1);
let mut raw_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_witin,
InstancePaddingStrategy::Default,
);
let mut raw_structural_witin = RowMajorMatrix::<E::BaseField>::new(
steps.len(),
num_structural_witin,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = raw_witin.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
raw_witin_iter
.zip_eq(steps.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.flat_map(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin)
.zip_eq(steps)
.map(|(instance, step)| {
let ops = &step.syscall().expect("syscall step");
config
.vm_state
.assign_instance(instance, &shard_ctx, step)?;
config.ecall_id.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&WriteOp::new_register_op(
Platform::reg_ecall(),
Change::new(P::SYSCALL_CODE, P::SYSCALL_CODE),
step.rs1().unwrap().previous_cycle,
),
)?;
config.value_ptr_0.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[0].value.after,
)?;
config.value_ptr_0.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[0],
)?;
config.value_ptr_1.1.assign_instance(
instance,
&mut lk_multiplicity,
ops.reg_ops[1].value.after,
)?;
config.value_ptr_1.0.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
&ops.reg_ops[1],
)?;
for (writer, op) in config.mem_rw.iter().zip_eq(&ops.mem_ops) {
writer.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
step.cycle(),
op,
)?;
}
lk_multiplicity.fetch(step.pc().before.0);
Ok(())
})
.collect::<Vec<_>>()
})
.collect::<Result<(), ZKVMError>>()?;
let words = <P as NumWords>::WordsFieldElement::USIZE;
let words_fp2 = <P as NumWords>::WordsCurvePoint::USIZE;
let instances: Vec<Fp2MulInstance<P>> = steps
.par_iter()
.map(|step| {
let values: Vec<u32> = step
.syscall()
.unwrap()
.mem_ops
.iter()
.map(|op| op.value.before)
.collect();
let x_c0 = biguint_from_le_words(&values[0..words]);
let x_c1 = biguint_from_le_words(&values[words..2 * words]);
let y_base = words_fp2;
let y_c0 = biguint_from_le_words(&values[y_base..y_base + words]);
let y_c1 = biguint_from_le_words(&values[y_base + words..y_base + 2 * words]);
Fp2MulInstance::new(x_c0, x_c1, y_c0, y_c1)
})
.collect();
config.layout.phase1_witness_group(
Fp2MulTrace { instances },
[&mut raw_witin, &mut raw_structural_witin],
&mut lk_multiplicity,
);
raw_witin.padding_by_strategy();
raw_structural_witin.padding_by_strategy();
Ok((
[raw_witin, raw_structural_witin],
lk_multiplicity.into_finalize_result(),
))
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/jump/jal.rs | ceno_zkvm/src/instructions/riscv/jump/jal.rs | use std::marker::PhantomData;
use ff_ext::ExtensionField;
use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{constants::UInt, j_insn::JInstructionConfig},
},
structs::ProgramParams,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, PC_STEP_SIZE};
use multilinear_extensions::ToExpr;
pub struct JalConfig<E: ExtensionField> {
pub j_insn: JInstructionConfig<E>,
pub rd_written: UInt<E>,
}
pub struct JalInstruction<E>(PhantomData<E>);
/// JAL instruction circuit
///
/// Note: does not validate that next_pc is aligned by 4-byte increments, which
/// should be verified by lookup argument of the next execution step against
/// the program table
///
/// Assumption: values for valid initial program counter must lie between
/// 2^20 and 2^32 - 2^20 + 2 inclusive, probably enforced by the static
/// program lookup table. If this assumption does not hold, then resulting
/// value for next_pc may not correctly wrap mod 2^32 because of the use
/// of native WitIn values for address space arithmetic.
impl<E: ExtensionField> Instruction<E> for JalInstruction<E> {
type InstructionConfig = JalConfig<E>;
fn name() -> String {
format!("{:?}", InsnKind::JAL)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<JalConfig<E>, ZKVMError> {
let rd_written = UInt::new(|| "rd_written", circuit_builder)?;
let j_insn = JInstructionConfig::construct_circuit(
circuit_builder,
InsnKind::JAL,
rd_written.register_expr(),
)?;
circuit_builder.require_equal(
|| "jal rd_written",
rd_written.value(),
j_insn.vm_state.pc.expr() + PC_STEP_SIZE,
)?;
Ok(JalConfig { j_insn, rd_written })
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), ZKVMError> {
config
.j_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rd_written = Value::new(step.rd().unwrap().value.after, lk_multiplicity);
config.rd_written.assign_value(instance, rd_written);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/jump/test.rs | ceno_zkvm/src/instructions/riscv/jump/test.rs | use super::{JalInstruction, JalrInstruction};
use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{Instruction, riscv::constants::UInt},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
use ceno_emul::{ByteAddr, Change, InsnKind, PC_STEP_SIZE, StepRecord, Word, encode_rv32};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
use ff_ext::{ExtensionField, GoldilocksExt2};
use gkr_iop::circuit_builder::DebugIndex;
#[test]
fn test_opcode_jal() {
verify_test_opcode_jal::<GoldilocksExt2>(-8);
verify_test_opcode_jal::<GoldilocksExt2>(8);
#[cfg(feature = "u16limb_circuit")]
{
verify_test_opcode_jal::<BabyBearExt4>(-8);
verify_test_opcode_jal::<BabyBearExt4>(8);
}
}
fn verify_test_opcode_jal<E: ExtensionField>(pc_offset: i32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "jal",
|cb| {
let config = JalInstruction::<E>::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let new_pc: ByteAddr = ByteAddr(MOCK_PC_START.0.wrapping_add_signed(pc_offset));
let insn_code = encode_rv32(InsnKind::JAL, 0, 0, 4, pc_offset);
let (raw_witin, lkm) = JalInstruction::<E>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_j_instruction(
4,
Change::new(MOCK_PC_START, new_pc),
insn_code,
Change::new(0, (MOCK_PC_START + PC_STEP_SIZE).into()),
0,
)],
)
.unwrap();
// verify rd_written
let expected_rd_written = UInt::from_const_unchecked(
Value::new_unchecked(MOCK_PC_START.0 + PC_STEP_SIZE as u32)
.as_u16_limbs()
.to_vec(),
);
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_opcode_jalr() {
verify_test_opcode_jalr::<GoldilocksExt2>(100, 3);
verify_test_opcode_jalr::<GoldilocksExt2>(100, -3);
#[cfg(feature = "u16limb_circuit")]
{
verify_test_opcode_jalr::<BabyBearExt4>(100, 3);
verify_test_opcode_jalr::<BabyBearExt4>(100, -3);
}
}
fn verify_test_opcode_jalr<E: ExtensionField>(rs1_read: Word, imm: i32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "jalr",
|cb| {
let config = JalrInstruction::<E>::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
// trim lower bit to 0
let new_pc: ByteAddr = ByteAddr(rs1_read.wrapping_add_signed(imm) & (!1));
let insn_code = encode_rv32(InsnKind::JALR, 2, 0, 4, imm);
// verify rd_written
let expected_rd_written = UInt::from_const_unchecked(
Value::new_unchecked(MOCK_PC_START.0 + PC_STEP_SIZE as u32)
.as_u16_limbs()
.to_vec(),
);
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(
|| "assert_rd_written",
rd_written_expr,
expected_rd_written.value(),
)
.unwrap();
let (raw_witin, lkm) = JalrInstruction::<E>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_i_instruction(
4,
Change::new(MOCK_PC_START, new_pc),
insn_code,
rs1_read,
Change::new(0, (MOCK_PC_START + PC_STEP_SIZE).into()),
0,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/jump/jal_v2.rs | ceno_zkvm/src/instructions/riscv/jump/jal_v2.rs | use std::marker::PhantomData;
use ff_ext::ExtensionField;
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{PC_BITS, UINT_BYTE_LIMBS, UInt8},
j_insn::JInstructionConfig,
},
},
structs::ProgramParams,
utils::split_to_u8,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, PC_STEP_SIZE};
use gkr_iop::tables::{LookupTable, ops::XorTable};
use multilinear_extensions::{Expression, ToExpr};
use p3::field::FieldAlgebra;
pub struct JalConfig<E: ExtensionField> {
pub j_insn: JInstructionConfig<E>,
pub rd_written: UInt8<E>,
}
pub struct JalInstruction<E>(PhantomData<E>);
/// JAL instruction circuit
///
/// Note: does not validate that next_pc is aligned by 4-byte increments, which
/// should be verified by lookup argument of the next execution step against
/// the program table
///
/// Assumption: values for valid initial program counter must lie between
/// 2^20 and 2^32 - 2^20 + 2 inclusive, probably enforced by the static
/// program lookup table. If this assumption does not hold, then resulting
/// value for next_pc may not correctly wrap mod 2^32 because of the use
/// of native WitIn values for address space arithmetic.
impl<E: ExtensionField> Instruction<E> for JalInstruction<E> {
type InstructionConfig = JalConfig<E>;
fn name() -> String {
format!("{:?}", InsnKind::JAL)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<JalConfig<E>, ZKVMError> {
let rd_written = UInt8::new(|| "rd_written", circuit_builder)?;
let rd_exprs = rd_written.expr();
let j_insn = JInstructionConfig::construct_circuit(
circuit_builder,
InsnKind::JAL,
rd_written.register_expr(),
)?;
// constrain rd_exprs [PC_BITS .. u32::BITS] are all 0 via xor
let last_limb_bits = PC_BITS - UInt8::<E>::LIMB_BITS * (UInt8::<E>::NUM_LIMBS - 1);
let additional_bits =
(last_limb_bits..UInt8::<E>::LIMB_BITS).fold(0, |acc, x| acc + (1 << x));
let additional_bits = E::BaseField::from_canonical_u32(additional_bits);
circuit_builder.logic_u8(
LookupTable::Xor,
rd_exprs[3].expr(),
additional_bits.expr(),
rd_exprs[3].expr() + additional_bits.expr(),
)?;
circuit_builder.require_equal(
|| "jal rd_written",
rd_exprs
.iter()
.enumerate()
.fold(Expression::ZERO, |acc, (i, val)| {
acc + val.expr()
* E::BaseField::from_canonical_u32(1 << (i * UInt8::<E>::LIMB_BITS)).expr()
}),
j_insn.vm_state.pc.expr() + PC_STEP_SIZE,
)?;
Ok(JalConfig { j_insn, rd_written })
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), ZKVMError> {
config
.j_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rd_written = split_to_u8(step.rd().unwrap().value.after);
config.rd_written.assign_limbs(instance, &rd_written);
for chunk in rd_written.chunks(2) {
if chunk.len() == 2 {
lk_multiplicity.assert_double_u8(chunk[0] as u64, chunk[1] as u64)
} else {
lk_multiplicity.assert_const_range(chunk[0] as u64, 8);
}
}
// constrain pc msb limb range via xor
let last_limb_bits = PC_BITS - UInt8::<E>::LIMB_BITS * (UINT_BYTE_LIMBS - 1);
let additional_bits =
(last_limb_bits..UInt8::<E>::LIMB_BITS).fold(0, |acc, x| acc + (1 << x));
lk_multiplicity.logic_u8::<XorTable>(rd_written[3] as u64, additional_bits as u64);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/jump/jalr.rs | ceno_zkvm/src/instructions/riscv/jump/jalr.rs | use std::marker::PhantomData;
use ff_ext::ExtensionField;
use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{constants::UInt, i_insn::IInstructionConfig, insn_base::MemAddr},
},
structs::ProgramParams,
tables::InsnRecord,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::{InsnKind, PC_STEP_SIZE};
use ff_ext::FieldInto;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::FieldAlgebra;
pub struct JalrConfig<E: ExtensionField> {
pub i_insn: IInstructionConfig<E>,
pub rs1_read: UInt<E>,
pub imm: WitIn,
pub next_pc_addr: MemAddr<E>,
pub overflow: Option<(WitIn, WitIn)>,
pub rd_written: UInt<E>,
}
pub struct JalrInstruction<E>(PhantomData<E>);
/// JALR instruction circuit
/// NOTE: does not validate that next_pc is aligned by 4-byte increments, which
/// should be verified by lookup argument of the next execution step against
/// the program table
impl<E: ExtensionField> Instruction<E> for JalrInstruction<E> {
type InstructionConfig = JalrConfig<E>;
fn name() -> String {
format!("{:?}", InsnKind::JALR)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<JalrConfig<E>, ZKVMError> {
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?; // unsigned 32-bit value
let imm = circuit_builder.create_witin(|| "imm"); // signed 12-bit value
let rd_written = UInt::new(|| "rd_written", circuit_builder)?;
let i_insn = IInstructionConfig::construct_circuit(
circuit_builder,
InsnKind::JALR,
imm.expr(),
rs1_read.register_expr(),
rd_written.register_expr(),
true,
)?;
// Next pc is obtained by rounding rs1+imm down to an even value.
// To implement this, check three conditions:
// 1. rs1 + imm = next_pc_addr + overflow*2^32
// 2. overflow in {-1, 0, 1}
// 3. next_pc = next_pc_addr aligned to even value (round down)
let next_pc_addr = MemAddr::<E>::construct_unaligned(circuit_builder)?;
let (overflow_expr, overflow) = if cfg!(feature = "forbid_overflow") {
(Expression::ZERO, None)
} else {
let overflow = circuit_builder.create_witin(|| "overflow");
let tmp = circuit_builder.create_witin(|| "overflow1");
circuit_builder.require_zero(|| "overflow_0_or_pm1", overflow.expr() * tmp.expr())?;
circuit_builder.require_equal(
|| "overflow_tmp",
tmp.expr(),
(1 - overflow.expr()) * (1 + overflow.expr()),
)?;
(overflow.expr(), Some((overflow, tmp)))
};
circuit_builder.require_equal(
|| "rs1+imm = next_pc_unrounded + overflow*2^32",
rs1_read.value() + imm.expr(),
next_pc_addr.expr_unaligned() + overflow_expr * (1u64 << 32),
)?;
circuit_builder.require_equal(
|| "next_pc_addr = next_pc",
next_pc_addr.expr_align2(),
i_insn.vm_state.next_pc.unwrap().expr(),
)?;
// write pc+4 to rd
circuit_builder.require_equal(
|| "rd_written = pc+4",
rd_written.value(),
i_insn.vm_state.pc.expr() + PC_STEP_SIZE,
)?;
Ok(JalrConfig {
i_insn,
rs1_read,
imm,
next_pc_addr,
overflow,
rd_written,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), ZKVMError> {
let insn = step.insn();
let rs1 = step.rs1().unwrap().value;
let imm = InsnRecord::<E::BaseField>::imm_internal(&insn);
let rd = step.rd().unwrap().value.after;
let (sum, overflowing) = rs1.overflowing_add_signed(imm.0 as i32);
config
.rs1_read
.assign_value(instance, Value::new_unchecked(rs1));
config
.rd_written
.assign_value(instance, Value::new(rd, lk_multiplicity));
set_val!(instance, config.imm, imm.1);
config
.next_pc_addr
.assign_instance(instance, lk_multiplicity, sum)?;
if let Some((overflow_cfg, tmp_cfg)) = &config.overflow {
let (overflow, tmp) = match (overflowing, imm.0 < 0) {
(false, _) => (E::BaseField::ZERO, E::BaseField::ONE),
(true, false) => (E::BaseField::ONE, E::BaseField::ZERO),
(true, true) => (-E::BaseField::ONE, E::BaseField::ZERO),
};
set_val!(instance, overflow_cfg, overflow);
set_val!(instance, tmp_cfg, tmp);
} else {
assert!(!overflowing, "overflow not allowed in JALR");
}
config
.i_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/jump/jalr_v2.rs | ceno_zkvm/src/instructions/riscv/jump/jalr_v2.rs | use ff_ext::ExtensionField;
use std::marker::PhantomData;
use crate::{
Value,
chip_handler::general::InstFetch,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{PC_BITS, UINT_LIMBS, UInt},
i_insn::IInstructionConfig,
insn_base::{MemAddr, ReadRS1, StateInOut, WriteRD},
},
},
structs::ProgramParams,
tables::InsnRecord,
utils::imm_sign_extend,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::{InsnKind, PC_STEP_SIZE};
use ff_ext::FieldInto;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::{Field, FieldAlgebra};
pub struct JalrConfig<E: ExtensionField> {
pub i_insn: IInstructionConfig<E>,
pub rs1_read: UInt<E>,
pub imm: WitIn,
pub imm_sign: WitIn,
pub jump_pc_addr: MemAddr<E>,
pub rd_high: WitIn,
}
pub struct JalrInstruction<E>(PhantomData<E>);
/// JALR instruction circuit
/// NOTE: does not validate that next_pc is aligned by 4-byte increments, which
/// should be verified by lookup argument of the next execution step against
/// the program table
impl<E: ExtensionField> Instruction<E> for JalrInstruction<E> {
type InstructionConfig = JalrConfig<E>;
fn name() -> String {
format!("{:?}", InsnKind::JALR)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<JalrConfig<E>, ZKVMError> {
assert_eq!(UINT_LIMBS, 2);
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?; // unsigned 32-bit value
let imm = circuit_builder.create_witin(|| "imm"); // signed 12-bit value
let imm_sign = circuit_builder.create_witin(|| "imm_sign");
// State in and out
let vm_state = StateInOut::construct_circuit(circuit_builder, true)?;
let rd_high = circuit_builder.create_witin(|| "rd_high");
let rd_low: Expression<_> = vm_state.pc.expr()
+ E::BaseField::from_canonical_usize(PC_STEP_SIZE).expr()
- rd_high.expr() * E::BaseField::from_canonical_u32(1 << UInt::<E>::LIMB_BITS).expr();
// rd range check
// rd_low
circuit_builder.assert_const_range(|| "rd_low_u16", rd_low.expr(), UInt::<E>::LIMB_BITS)?;
// rd_high
circuit_builder.assert_const_range(
|| "rd_high_range",
rd_high.expr(),
PC_BITS - UInt::<E>::LIMB_BITS,
)?;
let rd_uint = UInt::from_exprs_unchecked(vec![rd_low.expr(), rd_high.expr()]);
let jump_pc_addr = MemAddr::construct_with_max_bits(circuit_builder, 0, PC_BITS)?;
// Registers
let rs1 =
ReadRS1::construct_circuit(circuit_builder, rs1_read.register_expr(), vm_state.ts)?;
let rd = WriteRD::construct_circuit(circuit_builder, rd_uint.register_expr(), vm_state.ts)?;
// Fetch the instruction.
circuit_builder.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
InsnKind::JALR.into(),
Some(rd.id.expr()),
rs1.id.expr(),
0.into(),
imm.expr(),
imm_sign.expr(),
))?;
let i_insn = IInstructionConfig { vm_state, rs1, rd };
// Next pc is obtained by rounding rs1+imm down to an even value.
// To implement this, check three conditions:
// 1. rs1 + imm = jump_pc_addr + overflow*2^32
// 3. next_pc = jump_pc_addr aligned to even value (round down)
let inv = E::BaseField::from_canonical_u32(1 << UInt::<E>::LIMB_BITS).inverse();
let carry = (rs1_read.expr()[0].expr() + imm.expr()
- jump_pc_addr.uint_unaligned().expr()[0].expr())
* inv.expr();
circuit_builder.assert_bit(|| "carry_lo_bit", carry.expr())?;
let imm_extend_limb = imm_sign.expr()
* E::BaseField::from_canonical_u32((1 << UInt::<E>::LIMB_BITS) - 1).expr();
let carry = (rs1_read.expr()[1].expr() + imm_extend_limb.expr() + carry
- jump_pc_addr.uint_unaligned().expr()[1].expr())
* inv.expr();
circuit_builder.assert_bit(|| "overflow_bit", carry)?;
circuit_builder.require_equal(
|| "jump_pc_addr = next_pc",
jump_pc_addr.expr_align2(),
i_insn.vm_state.next_pc.unwrap().expr(),
)?;
// write pc+4 to rd
circuit_builder.require_equal(
|| "rd_written = pc+4",
rd_uint.value(), // this operation is safe
i_insn.vm_state.pc.expr() + PC_STEP_SIZE,
)?;
Ok(JalrConfig {
i_insn,
rs1_read,
imm,
imm_sign,
jump_pc_addr,
rd_high,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), ZKVMError> {
let insn = step.insn();
let rs1 = step.rs1().unwrap().value;
let imm = InsnRecord::<E::BaseField>::imm_internal(&insn);
set_val!(instance, config.imm, imm.1);
// according to riscvim32 spec, imm always do signed extension
let imm_sign_extend = imm_sign_extend(true, step.insn().imm as i16);
set_val!(
instance,
config.imm_sign,
E::BaseField::from_bool(imm_sign_extend[1] > 0)
);
let rd = Value::new_unchecked(step.rd().unwrap().value.after);
let rd_limb = rd.as_u16_limbs();
lk_multiplicity.assert_const_range(rd_limb[0] as u64, 16);
lk_multiplicity.assert_const_range(rd_limb[1] as u64, PC_BITS - 16);
config
.rs1_read
.assign_value(instance, Value::new_unchecked(rs1));
set_val!(
instance,
config.rd_high,
E::BaseField::from_canonical_u16(rd_limb[1])
);
let (sum, _) = rs1.overflowing_add_signed(i32::from_ne_bytes([
imm_sign_extend[0] as u8,
(imm_sign_extend[0] >> 8) as u8,
imm_sign_extend[1] as u8,
(imm_sign_extend[1] >> 8) as u8,
]));
config
.jump_pc_addr
.assign_instance(instance, lk_multiplicity, sum)?;
config
.i_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/rv32im/mmu.rs | ceno_zkvm/src/instructions/riscv/rv32im/mmu.rs | use crate::{
e2e::ShardContext,
error::ZKVMError,
scheme::PublicValues,
structs::{ProgramParams, ZKVMConstraintSystem, ZKVMFixedTraces, ZKVMWitnesses},
tables::{
DynVolatileRamTable, HeapInitCircuit, HeapTable, HintsInitCircuit, HintsTable,
LocalFinalCircuit, MemFinalRecord, MemInitRecord, NonVolatileTable, PubIOInitCircuit,
PubIOTable, RegTable, RegTableInitCircuit, ShardRamCircuit, StackInitCircuit, StackTable,
StaticMemInitCircuit, StaticMemTable, TableCircuit,
},
};
use ceno_emul::{Addr, IterAddresses, WORD_SIZE, Word};
use ff_ext::ExtensionField;
use itertools::{Itertools, chain};
use std::{collections::HashSet, iter::zip, ops::Range};
pub struct MmuConfig<E: ExtensionField> {
/// Initialization of registers.
pub reg_init_config: <RegTableInitCircuit<E> as TableCircuit<E>>::TableConfig,
/// Initialization of memory with static addresses.
pub static_mem_init_config: <StaticMemInitCircuit<E> as TableCircuit<E>>::TableConfig,
/// Initialization of public IO.
pub public_io_init_config: <PubIOInitCircuit<E> as TableCircuit<E>>::TableConfig,
/// Initialization of hints.
pub hints_init_config: <HintsInitCircuit<E> as TableCircuit<E>>::TableConfig,
/// Initialization of heap.
pub heap_init_config: <HeapInitCircuit<E> as TableCircuit<E>>::TableConfig,
/// Initialization of stack.
pub stack_init_config: <StackInitCircuit<E> as TableCircuit<E>>::TableConfig,
/// finalized circuit for all MMIO
pub local_final_circuit: <LocalFinalCircuit<E> as TableCircuit<E>>::TableConfig,
/// ram bus to deal with cross shard read/write
pub ram_bus_circuit: <ShardRamCircuit<E> as TableCircuit<E>>::TableConfig,
pub params: ProgramParams,
}
impl<E: ExtensionField> MmuConfig<E> {
pub fn construct_circuits(cs: &mut ZKVMConstraintSystem<E>) -> Self {
let reg_init_config = cs.register_table_circuit::<RegTableInitCircuit<E>>();
let static_mem_init_config = cs.register_table_circuit::<StaticMemInitCircuit<E>>();
let public_io_init_config = cs.register_table_circuit::<PubIOInitCircuit<E>>();
let hints_init_config = cs.register_table_circuit::<HintsInitCircuit<E>>();
let stack_init_config = cs.register_table_circuit::<StackInitCircuit<E>>();
let heap_init_config = cs.register_table_circuit::<HeapInitCircuit<E>>();
let local_final_circuit = cs.register_table_circuit::<LocalFinalCircuit<E>>();
let ram_bus_circuit = cs.register_table_circuit::<ShardRamCircuit<E>>();
Self {
reg_init_config,
static_mem_init_config,
public_io_init_config,
hints_init_config,
stack_init_config,
heap_init_config,
local_final_circuit,
ram_bus_circuit,
params: cs.params.clone(),
}
}
pub fn generate_fixed_traces(
&self,
cs: &ZKVMConstraintSystem<E>,
fixed: &mut ZKVMFixedTraces<E>,
reg_init: &[MemInitRecord],
static_mem_init: &[MemInitRecord],
io_addrs: &[Addr],
) {
assert!(
chain!(
static_mem_init.iter_addresses(),
io_addrs.iter_addresses(),
// TODO: optimize with min_max and Range.
self.params.platform.hints.iter_addresses(),
)
.all_unique(),
"memory addresses must be unique"
);
fixed.register_table_circuit::<RegTableInitCircuit<E>>(cs, &self.reg_init_config, reg_init);
fixed.register_table_circuit::<StaticMemInitCircuit<E>>(
cs,
&self.static_mem_init_config,
static_mem_init,
);
fixed.register_table_circuit::<PubIOInitCircuit<E>>(
cs,
&self.public_io_init_config,
io_addrs,
);
fixed.register_table_circuit::<HintsInitCircuit<E>>(cs, &self.hints_init_config, &());
fixed.register_table_circuit::<StackInitCircuit<E>>(cs, &self.stack_init_config, &());
fixed.register_table_circuit::<HeapInitCircuit<E>>(cs, &self.heap_init_config, &());
fixed.register_table_circuit::<LocalFinalCircuit<E>>(cs, &self.local_final_circuit, &());
// fixed.register_table_circuit::<RBCircuit<E>>(cs, &self.ram_bus_circuit, &());
}
pub fn assign_dynamic_init_table_circuit(
&self,
cs: &ZKVMConstraintSystem<E>,
witness: &mut ZKVMWitnesses<E>,
pv: &PublicValues,
hints_final: &[MemFinalRecord],
heap_final: &[MemFinalRecord],
) -> Result<(), ZKVMError> {
witness.assign_table_circuit::<HeapInitCircuit<E>>(
cs,
&self.heap_init_config,
&(heap_final, pv, pv.heap_shard_len as usize),
)?;
witness.assign_table_circuit::<HintsInitCircuit<E>>(
cs,
&self.hints_init_config,
&(hints_final, pv, pv.hint_shard_len as usize),
)?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn assign_init_table_circuit(
&self,
cs: &ZKVMConstraintSystem<E>,
witness: &mut ZKVMWitnesses<E>,
pv: &PublicValues,
reg_final: &[MemFinalRecord],
static_mem_final: &[MemFinalRecord],
io_final: &[MemFinalRecord],
stack_final: &[MemFinalRecord],
) -> Result<(), ZKVMError> {
witness.assign_table_circuit::<RegTableInitCircuit<E>>(
cs,
&self.reg_init_config,
reg_final,
)?;
witness.assign_table_circuit::<StaticMemInitCircuit<E>>(
cs,
&self.static_mem_init_config,
static_mem_final,
)?;
witness.assign_table_circuit::<PubIOInitCircuit<E>>(
cs,
&self.public_io_init_config,
io_final,
)?;
witness.assign_table_circuit::<StackInitCircuit<E>>(
cs,
&self.stack_init_config,
&(stack_final, pv, stack_final.len()),
)?;
Ok(())
}
#[allow(clippy::too_many_arguments)]
pub fn assign_continuation_circuit(
&self,
cs: &ZKVMConstraintSystem<E>,
shard_ctx: &ShardContext,
witness: &mut ZKVMWitnesses<E>,
pv: &PublicValues,
reg_final: &[MemFinalRecord],
static_mem_final: &[MemFinalRecord],
io_final: &[MemFinalRecord],
hints_final: &[MemFinalRecord],
stack_final: &[MemFinalRecord],
heap_final: &[MemFinalRecord],
) -> Result<(), ZKVMError> {
let all_records = vec![
(PubIOTable::name(), None, io_final),
(RegTable::name(), None, reg_final),
(StaticMemTable::name(), None, static_mem_final),
(StackTable::name(), None, stack_final),
(
HintsTable::name(),
Some(
pv.hint_start_addr
..(pv.hint_start_addr + pv.hint_shard_len * (WORD_SIZE as u32)),
),
hints_final,
),
(
HeapTable::name(),
Some(
pv.heap_start_addr
..(pv.heap_start_addr + pv.heap_shard_len * (WORD_SIZE as u32)),
),
heap_final,
),
]
.into_iter()
.filter(|(_, _, record)| !record.is_empty())
.collect_vec();
witness.assign_table_circuit::<LocalFinalCircuit<E>>(
cs,
&self.local_final_circuit,
&(shard_ctx, all_records.as_slice()),
)?;
witness.assign_shared_circuit(
cs,
&(shard_ctx, all_records.as_slice()),
&self.ram_bus_circuit,
)?;
Ok(())
}
pub fn initial_registers(&self) -> Vec<MemInitRecord> {
(0..<RegTable as NonVolatileTable>::len(&self.params))
.map(|index| MemInitRecord {
addr: index as Addr,
value: 0,
})
.collect()
}
pub fn static_mem_len(&self) -> usize {
<StaticMemTable as NonVolatileTable>::len(&self.params)
}
pub fn public_io_len(&self) -> usize {
<PubIOTable as NonVolatileTable>::len(&self.params)
}
}
pub struct MemPadder {
valid_addresses: Range<Addr>,
used_addresses: HashSet<Addr>,
}
impl MemPadder {
/// Create memory records with uninitialized values.
pub fn new_mem_records_uninit(
address_range: Range<Addr>,
padded_len: usize,
) -> Vec<MemInitRecord> {
Self::new(address_range).padded_sorted(padded_len, vec![])
}
/// Create initial memory records.
/// Store `values` at the start of `address_range`, in order.
/// Pad with zero values up to `padded_len`.
///
/// Require: `values.len() <= padded_len <= address_range.len()`
pub fn new_mem_records(
address_range: Range<Addr>,
padded_len: usize,
values: &[Word],
) -> Vec<MemInitRecord> {
assert!(
values.len() <= padded_len,
"values.len() {} exceeds padded_len {}",
values.len(),
padded_len
);
let address_capacity = address_range.iter_addresses().len();
assert!(
padded_len <= address_capacity,
"padded_len {} exceeds address_range capacity {}",
padded_len,
address_capacity
);
let mut records = Self::new_mem_records_uninit(address_range, padded_len);
for (record, &value) in zip(&mut records, values) {
record.value = value;
}
records
}
/// Initialize memory records created `new_mem_records_uninit` with values.
///
/// Require: `values.len() <= padded_len <= address_range.len()`
///
/// See `new_mem_records` for more details.
pub fn init_mem_records(records: &mut Vec<MemInitRecord>, values: &[Word]) {
assert!(
values.len() <= records.len(),
"values.len() {} exceeds records.len() {}",
values.len(),
records.len()
);
for (record, &value) in zip(records, values) {
record.value = value;
}
}
pub fn new(valid_addresses: Range<Addr>) -> Self {
Self {
valid_addresses,
used_addresses: HashSet::new(),
}
}
/// Pad `records` to `new_len` with valid records.
/// The padding uses fresh addresses not yet seen by this `MemPadder`.
/// Sort the records by address.
pub fn padded_sorted(
&mut self,
new_len: usize,
records: Vec<MemInitRecord>,
) -> Vec<MemInitRecord> {
if records.is_empty() {
self.padded(new_len, records)
} else {
self.padded(new_len, records)
.into_iter()
.sorted_by_key(|record| record.addr)
.collect()
}
}
/// Pad `records` to `new_len` using unused addresses.
fn padded(&mut self, new_len: usize, mut records: Vec<MemInitRecord>) -> Vec<MemInitRecord> {
let old_len = records.len();
assert!(
old_len <= new_len,
"cannot fit {old_len} memory records in {new_len} space"
);
// Keep track of addresses that were explicitly used.
self.used_addresses
.extend(records.iter().map(|record| record.addr));
records.extend(
// Search for some addresses in the given range.
(&mut self.valid_addresses)
.step_by(WORD_SIZE)
// Exclude addresses already used.
.filter(|addr| !self.used_addresses.contains(addr))
// Create the padding records.
.take(new_len - old_len)
.map(|addr| MemInitRecord { addr, value: 0 }),
);
assert_eq!(
records.len(),
new_len,
"not enough addresses to pad memory records from {old_len} to {new_len}"
);
records
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/logic_imm/test.rs | ceno_zkvm/src/instructions/riscv/logic_imm/test.rs | use ceno_emul::{Change, InsnKind, PC_STEP_SIZE, StepRecord, encode_rv32u};
use ff_ext::GoldilocksExt2;
use gkr_iop::circuit_builder::DebugIndex;
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{
Instruction,
riscv::{
constants::UInt8,
logic_imm::{AndiOp, LogicInstruction, LogicOp, OriOp, XoriOp},
},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
utils::split_to_u8,
};
/// An arbitrary test value.
const TEST: u32 = 0xabed_5eff;
/// An example of a sign-extended negative immediate value.
const NEG: u32 = 0xffff_ff55;
#[test]
fn test_opcode_andi() {
verify::<AndiOp>("basic", 0x0000_0011, 3, 0x0000_0011 & 3);
verify::<AndiOp>("zero result", 0x0000_0100, 3, 0x0000_0100 & 3);
verify::<AndiOp>("negative imm", TEST, NEG, TEST & NEG);
}
#[test]
fn test_opcode_ori() {
verify::<OriOp>("basic", 0x0000_0011, 3, 0x0000_0011 | 3);
verify::<OriOp>("basic2", 0x0000_0100, 3, 0x0000_0100 | 3);
verify::<OriOp>("negative imm", TEST, NEG, TEST | NEG);
}
#[test]
fn test_opcode_xori() {
verify::<XoriOp>("basic", 0x0000_0011, 3, 0x0000_0011 ^ 3);
verify::<XoriOp>("non-overlap", 0x0000_0100, 3, 0x0000_0100 ^ 3);
verify::<XoriOp>("negative imm", TEST, NEG, TEST ^ NEG);
}
fn verify<I: LogicOp>(name: &'static str, rs1_read: u32, imm: u32, expected_rd_written: u32) {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let (prefix, rd_written) = match I::INST_KIND {
InsnKind::ANDI => ("ANDI", rs1_read & imm),
InsnKind::ORI => ("ORI", rs1_read | imm),
InsnKind::XORI => ("XORI", rs1_read ^ imm),
_ => unreachable!(),
};
let config = cb
.namespace(
|| format!("{prefix}_({name})"),
|cb| {
let config = LogicInstruction::<GoldilocksExt2, I>::construct_circuit(
cb,
&ProgramParams::default(),
);
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32u(I::INST_KIND, 2, 0, 4, imm);
let (raw_witin, lkm) = LogicInstruction::<GoldilocksExt2, I>::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_i_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + PC_STEP_SIZE),
insn_code,
rs1_read,
Change::new(0, rd_written),
0,
)],
)
.unwrap();
let expected = UInt8::from_const_unchecked(split_to_u8::<u64>(expected_rd_written));
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(|| "assert_rd_written", rd_written_expr, expected.value())
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/logic_imm/logic_imm_circuit.rs | ceno_zkvm/src/instructions/riscv/logic_imm/logic_imm_circuit.rs | //! The circuit implementation of logic instructions.
use ff_ext::ExtensionField;
use gkr_iop::tables::OpsTable;
use std::marker::PhantomData;
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{constants::UInt8, i_insn::IInstructionConfig, logic_imm::LogicOp},
},
structs::ProgramParams,
tables::InsnRecord,
utils::split_to_u8,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
/// The Instruction circuit for a given LogicOp.
pub struct LogicInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: LogicOp> Instruction<E> for LogicInstruction<E, I> {
type InstructionConfig = LogicConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let config = LogicConfig::construct_circuit(cb, I::INST_KIND)?;
// Constrain the registers based on the given lookup table.
UInt8::logic(
cb,
I::OpsTable::ROM_TYPE,
&config.rs1_read,
&config.imm,
&config.rd_written,
)?;
Ok(config)
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
UInt8::<E>::logic_assign::<I::OpsTable>(
lkm,
step.rs1().unwrap().value.into(),
InsnRecord::<E::BaseField>::imm_internal(&step.insn()).0 as u64,
);
config.assign_instance(instance, shard_ctx, lkm, step)
}
}
/// This config implements I-Instructions that represent registers values as 4 * u8.
/// Non-generic code shared by several circuits.
#[derive(Debug)]
pub struct LogicConfig<E: ExtensionField> {
i_insn: IInstructionConfig<E>,
rs1_read: UInt8<E>,
pub(crate) rd_written: UInt8<E>,
imm: UInt8<E>,
}
impl<E: ExtensionField> LogicConfig<E> {
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
) -> Result<Self, ZKVMError> {
let rs1_read = UInt8::new_unchecked(|| "rs1_read", cb)?;
let rd_written = UInt8::new_unchecked(|| "rd_written", cb)?;
let imm = UInt8::new_unchecked(|| "imm", cb)?;
let i_insn = IInstructionConfig::<E>::construct_circuit(
cb,
insn_kind,
imm.value(),
rs1_read.register_expr(),
rd_written.register_expr(),
false,
)?;
Ok(Self {
i_insn,
rs1_read,
imm,
rd_written,
})
}
fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
self.i_insn
.assign_instance(instance, shard_ctx, lkm, step)?;
let rs1_read = split_to_u8(step.rs1().unwrap().value);
self.rs1_read.assign_limbs(instance, &rs1_read);
let imm =
split_to_u8::<u16>(InsnRecord::<E::BaseField>::imm_internal(&step.insn()).0 as u32);
self.imm.assign_limbs(instance, &imm);
let rd_written = split_to_u8(step.rd().unwrap().value.after);
self.rd_written.assign_limbs(instance, &rd_written);
Ok(())
}
}
#[cfg(test)]
mod test {
use ceno_emul::{Change, InsnKind, PC_STEP_SIZE, StepRecord, encode_rv32u};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
use ff_ext::{ExtensionField, GoldilocksExt2};
use gkr_iop::circuit_builder::DebugIndex;
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
instructions::{
Instruction,
riscv::{
constants::UInt8,
logic_imm::{AndiOp, OriOp, XoriOp, logic_imm_circuit::LogicInstruction},
},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
utils::split_to_u8,
};
use super::LogicOp;
/// An arbitrary test value.
const TEST: u32 = 0xabed_5eff;
/// An example of a sign-extended negative immediate value.
const NEG: u32 = 0xffff_ff55;
#[test]
fn test_opcode_andi() {
let cases = vec![
("basic", 0x0000_0011, 3, 0x0000_0011 & 3),
("zero result", 0x0000_0100, 3, 0x0000_0100 & 3),
("negative imm", TEST, NEG, TEST & NEG),
];
for &(name, rs1, imm, expected) in &cases {
verify::<AndiOp, GoldilocksExt2>(name, rs1, imm, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<AndiOp, BabyBearExt4>(name, rs1, imm, expected);
}
}
#[test]
fn test_opcode_ori() {
let cases = vec![
("basic", 0x0000_0011, 3, 0x0000_0011 | 3),
("basic2", 0x0000_0100, 3, 0x0000_0100 | 3),
("negative imm", TEST, NEG, TEST | NEG),
];
for &(name, rs1, imm, expected) in &cases {
verify::<OriOp, GoldilocksExt2>(name, rs1, imm, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<OriOp, BabyBearExt4>(name, rs1, imm, expected);
}
}
#[test]
fn test_opcode_xori() {
let cases = vec![
("basic", 0x0000_0011, 3, 0x0000_0011 ^ 3),
("non-overlap", 0x0000_0100, 3, 0x0000_0100 ^ 3),
("negative imm", TEST, NEG, TEST ^ NEG),
];
for &(name, rs1, imm, expected) in &cases {
verify::<XoriOp, GoldilocksExt2>(name, rs1, imm, expected);
#[cfg(feature = "u16limb_circuit")]
verify::<XoriOp, BabyBearExt4>(name, rs1, imm, expected);
}
}
fn verify<I: LogicOp, E: ExtensionField>(
name: &'static str,
rs1_read: u32,
imm: u32,
expected_rd_written: u32,
) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let (prefix, rd_written) = match I::INST_KIND {
InsnKind::ANDI => ("ANDI", rs1_read & imm),
InsnKind::ORI => ("ORI", rs1_read | imm),
InsnKind::XORI => ("XORI", rs1_read ^ imm),
_ => unreachable!(),
};
let config = cb
.namespace(
|| format!("{prefix}_({name})"),
|cb| {
let config =
LogicInstruction::<E, I>::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32u(I::INST_KIND, 2, 0, 4, imm);
let (raw_witin, lkm) = LogicInstruction::<E, I>::assign_instances(
&config,
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_i_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + PC_STEP_SIZE),
insn_code,
rs1_read,
Change::new(0, rd_written),
0,
)],
)
.unwrap();
let expected = UInt8::from_const_unchecked(split_to_u8::<u64>(expected_rd_written));
let rd_written_expr = cb.get_debug_expr(DebugIndex::RdWrite as usize)[0].clone();
cb.require_equal(|| "assert_rd_written", rd_written_expr, expected.value())
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/logic_imm/logic_imm_circuit_v2.rs | ceno_zkvm/src/instructions/riscv/logic_imm/logic_imm_circuit_v2.rs | //! The circuit implementation of logic instructions.
use ff_ext::ExtensionField;
use gkr_iop::tables::OpsTable;
use itertools::Itertools;
use std::marker::PhantomData;
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
constants::{LIMB_BITS, LIMB_MASK, UInt8},
i_insn::IInstructionConfig,
logic_imm::LogicOp,
},
},
structs::ProgramParams,
tables::InsnRecord,
uint::UIntLimbs,
utils::split_to_u8,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
use multilinear_extensions::ToExpr;
/// The Instruction circuit for a given LogicOp.
pub struct LogicInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: LogicOp> Instruction<E> for LogicInstruction<E, I> {
type InstructionConfig = LogicConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let num_limbs = LIMB_BITS / 8;
let config = LogicConfig::construct_circuit(cb, I::INST_KIND)?;
// Constrain the registers based on the given lookup table.
// lo
UIntLimbs::<{ LIMB_BITS }, 8, E>::logic(
cb,
I::OpsTable::ROM_TYPE,
&UIntLimbs::from_exprs_unchecked(
config
.rs1_read
.expr()
.into_iter()
.take(num_limbs)
.collect_vec(),
),
&config.imm_lo,
&UIntLimbs::from_exprs_unchecked(
config
.rd_written
.expr()
.into_iter()
.take(num_limbs)
.collect_vec(),
),
)?;
// hi
UIntLimbs::<{ LIMB_BITS }, 8, E>::logic(
cb,
I::OpsTable::ROM_TYPE,
&UIntLimbs::from_exprs_unchecked(
config
.rs1_read
.expr()
.into_iter()
.skip(num_limbs)
.take(num_limbs)
.collect_vec(),
),
&config.imm_hi,
&UIntLimbs::from_exprs_unchecked(
config
.rd_written
.expr()
.into_iter()
.skip(num_limbs)
.take(num_limbs)
.collect_vec(),
),
)?;
Ok(config)
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let rs1_lo = step.rs1().unwrap().value & LIMB_MASK;
let rs1_hi = (step.rs1().unwrap().value >> LIMB_BITS) & LIMB_MASK;
let imm_lo = InsnRecord::<E::BaseField>::imm_internal(&step.insn()).0 as u32 & LIMB_MASK;
let imm_hi = (InsnRecord::<E::BaseField>::imm_signed_internal(&step.insn()).0 as u32
>> LIMB_BITS)
& LIMB_MASK;
UIntLimbs::<{ LIMB_BITS }, 8, E>::logic_assign::<I::OpsTable>(
lkm,
rs1_lo.into(),
imm_lo.into(),
);
UIntLimbs::<{ LIMB_BITS }, 8, E>::logic_assign::<I::OpsTable>(
lkm,
rs1_hi.into(),
imm_hi.into(),
);
config.assign_instance(instance, shard_ctx, lkm, step)
}
}
/// This config implements I-Instructions that represent registers values as 4 * u8.
/// Non-generic code shared by several circuits.
#[derive(Debug)]
pub struct LogicConfig<E: ExtensionField> {
i_insn: IInstructionConfig<E>,
rs1_read: UInt8<E>,
pub(crate) rd_written: UInt8<E>,
imm_lo: UIntLimbs<{ LIMB_BITS }, 8, E>,
imm_hi: UIntLimbs<{ LIMB_BITS }, 8, E>,
}
impl<E: ExtensionField> LogicConfig<E> {
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
) -> Result<Self, ZKVMError> {
let rs1_read = UInt8::new_unchecked(|| "rs1_read", cb)?;
let rd_written = UInt8::new_unchecked(|| "rd_written", cb)?;
let imm_lo = UIntLimbs::<{ LIMB_BITS }, 8, E>::new_unchecked(|| "imm_lo", cb)?;
let imm_hi = UIntLimbs::<{ LIMB_BITS }, 8, E>::new_unchecked(|| "imm_hi", cb)?;
let i_insn = IInstructionConfig::<E>::construct_circuit(
cb,
insn_kind,
imm_lo.value(),
imm_hi.value(),
rs1_read.register_expr(),
rd_written.register_expr(),
false,
)?;
Ok(Self {
i_insn,
rs1_read,
imm_lo,
imm_hi,
rd_written,
})
}
fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let num_limbs = LIMB_BITS / 8;
self.i_insn
.assign_instance(instance, shard_ctx, lkm, step)?;
let rs1_read = split_to_u8(step.rs1().unwrap().value);
self.rs1_read.assign_limbs(instance, &rs1_read);
let imm_lo =
split_to_u8::<u16>(InsnRecord::<E::BaseField>::imm_internal(&step.insn()).0 as u32)
[..num_limbs]
.to_vec();
let imm_hi = split_to_u8::<u16>(
InsnRecord::<E::BaseField>::imm_signed_internal(&step.insn()).0 as u32,
)[2..]
.to_vec();
self.imm_lo.assign_limbs(instance, &imm_lo);
self.imm_hi.assign_limbs(instance, &imm_hi);
let rd_written = split_to_u8(step.rd().unwrap().value.after);
self.rd_written.assign_limbs(instance, &rd_written);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/arith_imm/arith_imm_circuit.rs | ceno_zkvm/src/instructions/riscv/arith_imm/arith_imm_circuit.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{RIVInstruction, constants::UInt, i_insn::IInstructionConfig},
},
structs::ProgramParams,
tables::InsnRecord,
witness::LkMultiplicity,
};
use ceno_emul::StepRecord;
use ff_ext::ExtensionField;
use std::marker::PhantomData;
pub struct AddiInstruction<E>(PhantomData<E>);
pub struct InstructionConfig<E: ExtensionField> {
i_insn: IInstructionConfig<E>,
rs1_read: UInt<E>,
imm: UInt<E>,
rd_written: UInt<E>,
}
impl<E: ExtensionField> Instruction<E> for AddiInstruction<E> {
type InstructionConfig = InstructionConfig<E>;
fn name() -> String {
format!("{:?}", Self::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?;
let imm = UInt::new(|| "imm", circuit_builder)?;
let rd_written = rs1_read.add(|| "rs1_read + imm", circuit_builder, &imm, true)?;
let i_insn = IInstructionConfig::<E>::construct_circuit(
circuit_builder,
Self::INST_KIND,
imm.value(),
rs1_read.register_expr(),
rd_written.register_expr(),
false,
)?;
Ok(InstructionConfig {
i_insn,
rs1_read,
imm,
rd_written,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let rs1_read = Value::new_unchecked(step.rs1().unwrap().value);
let imm = Value::new(
InsnRecord::<E::BaseField>::imm_internal(&step.insn()).0 as u32,
lk_multiplicity,
);
let result = rs1_read.add(&imm, lk_multiplicity, true);
config.rs1_read.assign_value(instance, rs1_read);
config.imm.assign_value(instance, imm);
config.rd_written.assign_add_outcome(instance, &result);
config
.i_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/arith_imm/arith_imm_circuit_v2.rs | ceno_zkvm/src/instructions/riscv/arith_imm/arith_imm_circuit_v2.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{RIVInstruction, constants::UInt, i_insn::IInstructionConfig},
},
structs::ProgramParams,
utils::{imm_sign_extend, imm_sign_extend_circuit},
witness::LkMultiplicity,
};
use ceno_emul::StepRecord;
use ff_ext::{ExtensionField, FieldInto};
use multilinear_extensions::{ToExpr, WitIn};
use p3::field::FieldAlgebra;
use std::marker::PhantomData;
use witness::set_val;
pub struct AddiInstruction<E>(PhantomData<E>);
pub struct InstructionConfig<E: ExtensionField> {
i_insn: IInstructionConfig<E>,
rs1_read: UInt<E>,
imm: WitIn,
// 0 positive, 1 negative
imm_sign: WitIn,
rd_written: UInt<E>,
}
impl<E: ExtensionField> Instruction<E> for AddiInstruction<E> {
type InstructionConfig = InstructionConfig<E>;
fn name() -> String {
format!("{:?}", Self::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?;
let imm = circuit_builder.create_witin(|| "imm");
let imm_sign = circuit_builder.create_witin(|| "imm_sign");
let imm_sign_extend = UInt::from_exprs_unchecked(
imm_sign_extend_circuit::<E>(true, imm_sign.expr(), imm.expr()).to_vec(),
);
let rd_written =
rs1_read.add(|| "rs1_read + imm", circuit_builder, &imm_sign_extend, true)?;
let i_insn = IInstructionConfig::<E>::construct_circuit(
circuit_builder,
Self::INST_KIND,
imm_sign_extend.expr().remove(0),
imm_sign.expr(),
rs1_read.register_expr(),
rd_written.register_expr(),
false,
)?;
Ok(InstructionConfig {
i_insn,
rs1_read,
imm,
imm_sign,
rd_written,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let rs1_read = Value::new_unchecked(step.rs1().unwrap().value);
let imm = step.insn().imm as i16 as u16;
set_val!(instance, config.imm, E::BaseField::from_canonical_u16(imm));
let imm_sign_extend = imm_sign_extend(true, step.insn().imm as i16);
set_val!(
instance,
config.imm_sign,
E::BaseField::from_bool(imm_sign_extend[1] > 0)
);
let imm_sign_extend = Value::from_limb_slice_unchecked(&imm_sign_extend);
let result = rs1_read.add(&imm_sign_extend, lk_multiplicity, true);
config.rs1_read.assign_value(instance, rs1_read);
config.rd_written.assign_add_outcome(instance, &result);
config
.i_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/branch/branch_circuit.rs | ceno_zkvm/src/instructions/riscv/branch/branch_circuit.rs | use std::marker::PhantomData;
use ceno_emul::{InsnKind, SWord, StepRecord};
use ff_ext::ExtensionField;
use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::{IsEqualConfig, IsLtConfig, SignedLtConfig},
instructions::{
Instruction,
riscv::{
RIVInstruction,
b_insn::BInstructionConfig,
constants::{LIMB_BITS, UINT_LIMBS, UInt},
},
},
structs::ProgramParams,
witness::LkMultiplicity,
};
use multilinear_extensions::Expression;
pub use p3::field::FieldAlgebra;
pub struct BranchCircuit<E, I>(PhantomData<(E, I)>);
pub struct BranchConfig<E: ExtensionField> {
pub b_insn: BInstructionConfig<E>,
pub read_rs1: UInt<E>,
pub read_rs2: UInt<E>,
pub is_equal: Option<IsEqualConfig>, // For equality comparisons
pub is_signed_lt: Option<SignedLtConfig<E>>, // For signed comparisons
pub is_unsigned_lt: Option<IsLtConfig>, // For unsigned comparisons
}
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for BranchCircuit<E, I> {
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
type InstructionConfig = BranchConfig<E>;
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<BranchConfig<E>, ZKVMError> {
let read_rs1 = UInt::new_unchecked(|| "rs1_limbs", circuit_builder)?;
let read_rs2 = UInt::new_unchecked(|| "rs2_limbs", circuit_builder)?;
let (branch_taken_bit, is_equal, is_signed_lt, is_unsigned_lt) = match I::INST_KIND {
InsnKind::BEQ => {
let equal = IsEqualConfig::construct_circuit(
circuit_builder,
|| "rs1!=rs2",
read_rs2.value(),
read_rs1.value(),
)?;
(equal.expr(), Some(equal), None, None)
}
InsnKind::BNE => {
let equal = IsEqualConfig::construct_circuit(
circuit_builder,
|| "rs1==rs2",
read_rs2.value(),
read_rs1.value(),
)?;
(Expression::ONE - equal.expr(), Some(equal), None, None)
}
InsnKind::BLT => {
let signed_lt = SignedLtConfig::construct_circuit(
circuit_builder,
|| "rs1<rs2",
&read_rs1,
&read_rs2,
)?;
(signed_lt.expr(), None, Some(signed_lt), None)
}
InsnKind::BGE => {
let signed_lt = SignedLtConfig::construct_circuit(
circuit_builder,
|| "rs1>=rs2",
&read_rs1,
&read_rs2,
)?;
(
Expression::ONE - signed_lt.expr(),
None,
Some(signed_lt),
None,
)
}
InsnKind::BLTU => {
let unsigned_lt = IsLtConfig::construct_circuit(
circuit_builder,
|| "rs1<rs2",
read_rs1.value(),
read_rs2.value(),
UINT_LIMBS * LIMB_BITS,
)?;
(unsigned_lt.expr(), None, None, Some(unsigned_lt))
}
InsnKind::BGEU => {
let unsigned_lt = IsLtConfig::construct_circuit(
circuit_builder,
|| "rs1 >= rs2",
read_rs1.value(),
read_rs2.value(),
UINT_LIMBS * LIMB_BITS,
)?;
(
Expression::ONE - unsigned_lt.expr(),
None,
None,
Some(unsigned_lt),
)
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
let b_insn = BInstructionConfig::construct_circuit(
circuit_builder,
I::INST_KIND,
read_rs1.register_expr(),
read_rs2.register_expr(),
branch_taken_bit,
)?;
Ok(BranchConfig {
b_insn,
read_rs1,
read_rs2,
is_equal,
is_signed_lt,
is_unsigned_lt,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
config
.b_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rs1 = Value::new_unchecked(step.rs1().unwrap().value);
let rs2 = Value::new_unchecked(step.rs2().unwrap().value);
config.read_rs1.assign_limbs(instance, rs1.as_u16_limbs());
config.read_rs2.assign_limbs(instance, rs2.as_u16_limbs());
if let Some(equal) = &config.is_equal {
equal.assign_instance(
instance,
E::BaseField::from_canonical_u64(rs2.as_u64()),
E::BaseField::from_canonical_u64(rs1.as_u64()),
)?;
}
if let Some(signed_lt) = &config.is_signed_lt {
signed_lt.assign_instance(
instance,
lk_multiplicity,
step.rs1().unwrap().value as SWord,
step.rs2().unwrap().value as SWord,
)?;
}
if let Some(unsigned_lt) = &config.is_unsigned_lt {
unsigned_lt.assign_instance(
instance,
lk_multiplicity,
step.rs1().unwrap().value as u64,
step.rs2().unwrap().value as u64,
)?;
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/branch/test.rs | ceno_zkvm/src/instructions/riscv/branch/test.rs | use ceno_emul::{ByteAddr, Change, PC_STEP_SIZE, StepRecord, Word, encode_rv32};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
use ff_ext::{ExtensionField, GoldilocksExt2};
use super::*;
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
error::ZKVMError,
instructions::Instruction,
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
#[test]
fn test_opcode_beq() {
impl_opcode_beq(false, 0xbead1010, 0xef552020);
impl_opcode_beq(true, 0xef552020, 0xef552020);
impl_opcode_beq(true, 0xffffffff, 0xffffffff);
}
fn impl_opcode_beq(take_branch: bool, a: u32, b: u32) {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "beq",
|cb| {
let config = BeqInstruction::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::BEQ, 2, 3, 0, 8);
let pc_offset = if take_branch { 8 } else { PC_STEP_SIZE };
let (raw_witin, lkm) = BeqInstruction::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_b_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + pc_offset),
insn_code,
a as Word,
b as Word,
0,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_opcode_bne() {
impl_opcode_bne(true, 0xbead1010, 0xef552020);
impl_opcode_bne(false, 0xef552020, 0xef552020);
impl_opcode_bne(false, 0xffffffff, 0xffffffff);
}
fn impl_opcode_bne(take_branch: bool, a: u32, b: u32) {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "bne",
|cb| {
let config = BneInstruction::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::BNE, 2, 3, 0, 8);
let pc_offset = if take_branch { 8 } else { PC_STEP_SIZE };
let (raw_witin, lkm) = BneInstruction::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_b_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + pc_offset),
insn_code,
a as Word,
b as Word,
0,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_bltu_circuit() -> Result<(), ZKVMError> {
impl_bltu_circuit(false, 1, 0)?;
impl_bltu_circuit(false, 0, 0)?;
impl_bltu_circuit(false, 0xFFFF_FFFF, 0xFFFF_FFFF)?;
impl_bltu_circuit(true, 0, 1)?;
impl_bltu_circuit(true, 0xFFFF_FFFE, 0xFFFF_FFFF)?;
impl_bltu_circuit(true, 0xEFFF_FFFF, 0xFFFF_FFFF)?;
Ok(())
}
fn impl_bltu_circuit(taken: bool, a: u32, b: u32) -> Result<(), ZKVMError> {
let mut cs = ConstraintSystem::new(|| "riscv");
let mut circuit_builder = CircuitBuilder::<GoldilocksExt2>::new(&mut cs);
let config =
BltuInstruction::construct_circuit(&mut circuit_builder, &ProgramParams::default())?;
let pc_after = if taken {
ByteAddr(MOCK_PC_START.0 - 8)
} else {
MOCK_PC_START + PC_STEP_SIZE
};
let insn_code = encode_rv32(InsnKind::BLTU, 2, 3, 0, -8);
let (raw_witin, lkm) = BltuInstruction::assign_instances(
&config,
&mut ShardContext::default(),
circuit_builder.cs.num_witin as usize,
circuit_builder.cs.num_structural_witin as usize,
vec![&StepRecord::new_b_instruction(
12,
Change::new(MOCK_PC_START, pc_after),
insn_code,
a as Word,
b as Word,
10,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&circuit_builder, raw_witin, &[insn_code], None, Some(lkm));
Ok(())
}
#[test]
fn test_bgeu_circuit() -> Result<(), ZKVMError> {
impl_bgeu_circuit(true, 1, 0)?;
impl_bgeu_circuit(true, 0, 0)?;
impl_bgeu_circuit(true, 0xFFFF_FFFF, 0xFFFF_FFFF)?;
impl_bgeu_circuit(false, 0, 1)?;
impl_bgeu_circuit(false, 0xFFFF_FFFE, 0xFFFF_FFFF)?;
impl_bgeu_circuit(false, 0xEFFF_FFFF, 0xFFFF_FFFF)?;
Ok(())
}
fn impl_bgeu_circuit(taken: bool, a: u32, b: u32) -> Result<(), ZKVMError> {
let mut cs = ConstraintSystem::new(|| "riscv");
let mut circuit_builder = CircuitBuilder::<GoldilocksExt2>::new(&mut cs);
let config =
BgeuInstruction::construct_circuit(&mut circuit_builder, &ProgramParams::default())?;
let pc_after = if taken {
ByteAddr(MOCK_PC_START.0 - 8)
} else {
MOCK_PC_START + PC_STEP_SIZE
};
let insn_code = encode_rv32(InsnKind::BGEU, 2, 3, 0, -8);
let (raw_witin, lkm) = BgeuInstruction::assign_instances(
&config,
&mut ShardContext::default(),
circuit_builder.cs.num_witin as usize,
circuit_builder.cs.num_structural_witin as usize,
vec![&StepRecord::new_b_instruction(
12,
Change::new(MOCK_PC_START, pc_after),
insn_code,
a as Word,
b as Word,
10,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&circuit_builder, raw_witin, &[insn_code], None, Some(lkm));
Ok(())
}
#[test]
fn test_blt_circuit() -> Result<(), ZKVMError> {
let cases = vec![
(false, 0, 0),
(true, 0, 1),
(false, 1, -10),
(false, -10, -10),
(false, -9, -10),
(true, -9, 1),
(true, -10, -9),
];
for &(expected, a, b) in &cases {
impl_blt_circuit::<GoldilocksExt2>(expected, a, b)?;
#[cfg(feature = "u16limb_circuit")]
impl_blt_circuit::<BabyBearExt4>(expected, a, b)?;
}
Ok(())
}
fn impl_blt_circuit<E: ExtensionField>(taken: bool, a: i32, b: i32) -> Result<(), ZKVMError> {
let mut cs = ConstraintSystem::new(|| "riscv");
let mut circuit_builder = CircuitBuilder::<E>::new(&mut cs);
let config =
BltInstruction::construct_circuit(&mut circuit_builder, &ProgramParams::default())?;
let pc_after = if taken {
ByteAddr(MOCK_PC_START.0 - 8)
} else {
MOCK_PC_START + PC_STEP_SIZE
};
let insn_code = encode_rv32(InsnKind::BLT, 2, 3, 0, -8);
let (raw_witin, lkm) = BltInstruction::assign_instances(
&config,
&mut ShardContext::default(),
circuit_builder.cs.num_witin as usize,
circuit_builder.cs.num_structural_witin as usize,
vec![&StepRecord::new_b_instruction(
12,
Change::new(MOCK_PC_START, pc_after),
insn_code,
a as Word,
b as Word,
10,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&circuit_builder, raw_witin, &[insn_code], None, Some(lkm));
Ok(())
}
#[test]
fn test_bge_circuit() -> Result<(), ZKVMError> {
let cases = vec![
(true, 0, 0),
(false, 0, 1),
(true, 1, -10),
(true, -10, -10),
(true, -9, -10),
(false, -9, 1),
(false, -10, -9),
];
for &(expected, a, b) in &cases {
impl_bge_circuit::<GoldilocksExt2>(expected, a, b)?;
#[cfg(feature = "u16limb_circuit")]
impl_bge_circuit::<BabyBearExt4>(expected, a, b)?;
}
Ok(())
}
fn impl_bge_circuit<E: ExtensionField>(taken: bool, a: i32, b: i32) -> Result<(), ZKVMError> {
let mut cs = ConstraintSystem::new(|| "riscv");
let mut circuit_builder = CircuitBuilder::<E>::new(&mut cs);
let config =
BgeInstruction::construct_circuit(&mut circuit_builder, &ProgramParams::default())?;
let pc_after = if taken {
ByteAddr(MOCK_PC_START.0 - 8)
} else {
MOCK_PC_START + PC_STEP_SIZE
};
let insn_code = encode_rv32(InsnKind::BGE, 2, 3, 0, -8);
let (raw_witin, lkm) = BgeInstruction::assign_instances(
&config,
&mut ShardContext::default(),
circuit_builder.cs.num_witin as usize,
circuit_builder.cs.num_structural_witin as usize,
vec![&StepRecord::new_b_instruction(
12,
Change::new(MOCK_PC_START, pc_after),
insn_code,
a as Word,
b as Word,
10,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&circuit_builder, raw_witin, &[insn_code], None, Some(lkm));
Ok(())
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/branch/branch_circuit_v2.rs | ceno_zkvm/src/instructions/riscv/branch/branch_circuit_v2.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::{UIntLimbsLT, UIntLimbsLTConfig},
instructions::{
Instruction,
riscv::{
RIVInstruction,
b_insn::BInstructionConfig,
constants::{UINT_LIMBS, UInt},
},
},
structs::ProgramParams,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::{Field, FieldAlgebra};
use std::{array, marker::PhantomData};
use witness::set_val;
pub struct BranchCircuit<E, I>(PhantomData<(E, I)>);
pub struct BranchConfig<E: ExtensionField> {
pub b_insn: BInstructionConfig<E>,
pub read_rs1: UInt<E>,
pub read_rs2: UInt<E>,
// for non eq opcode config
pub uint_lt_config: Option<UIntLimbsLTConfig<E>>,
// for beq/bne
pub eq_diff_inv_marker: Option<[WitIn; UINT_LIMBS]>,
pub eq_branch_taken_bit: Option<WitIn>,
phantom: PhantomData<E>,
}
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for BranchCircuit<E, I> {
type InstructionConfig = BranchConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_param: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let read_rs1 = UInt::new_unchecked(|| "rs1_limbs", circuit_builder)?;
let read_rs2 = UInt::new_unchecked(|| "rs2_limbs", circuit_builder)?;
let (branch_taken_bit_expr, eq_branch_taken_bit, eq_diff_inv_marker, uint_lt_config) =
if matches!(I::INST_KIND, InsnKind::BEQ | InsnKind::BNE) {
let branch_taken_bit = circuit_builder.create_bit(|| "branch_taken_bit")?;
let eq_diff_inv_marker = array::from_fn(|i| {
circuit_builder.create_witin(|| format!("eq_diff_inv_marker_{i}"))
});
// 1 if cmp_result indicates a and b are EQUAL, 0 otherwise
let cmp_eq = match I::INST_KIND {
InsnKind::BEQ => branch_taken_bit.expr(),
InsnKind::BNE => Expression::ONE - branch_taken_bit.expr(),
_ => unreachable!(),
};
let mut sum = cmp_eq.expr();
// For BEQ, inv_marker is used to check equality of a and b:
// - If a == b, all inv_marker values must be 0 (sum = 0)
// - If a != b, inv_marker contains 0s for all positions except ONE position i where a[i] !=
// b[i]
// - At this position, inv_marker[i] contains the multiplicative inverse of (a[i] - b[i])
// - This ensures inv_marker[i] * (a[i] - b[i]) = 1, making the sum = 1
// Note: There might be multiple valid inv_marker if a != b.
// But as long as the trace can provide at least one, that’s sufficient to prove a != b.
//
// Note:
// - If cmp_eq == 0, then it is impossible to have sum != 0 if a == b.
// - If cmp_eq == 1, then it is impossible for a[i] - b[i] == 0 to pass for all i if a != b.
#[allow(clippy::needless_range_loop)]
for i in 0..UINT_LIMBS {
sum += (read_rs1.limbs[i].expr() - read_rs2.limbs[i].expr())
* eq_diff_inv_marker[i].expr();
circuit_builder.require_zero(
|| "require_zero",
cmp_eq.expr() * (read_rs1.limbs[i].expr() - read_rs2.limbs[i].expr()),
)?
}
circuit_builder.require_one(|| "sum", sum)?;
(
branch_taken_bit.expr(),
Some(branch_taken_bit),
Some(eq_diff_inv_marker),
None,
)
} else {
let is_signed = matches!(I::INST_KIND, InsnKind::BLT | InsnKind::BGE);
let is_ge = matches!(I::INST_KIND, InsnKind::BGEU | InsnKind::BGE);
let uint_lt_config = UIntLimbsLT::<E>::construct_circuit(
circuit_builder,
&read_rs1,
&read_rs2,
is_signed,
)?;
let branch_taken_bit = if is_ge {
Expression::ONE - uint_lt_config.is_lt()
} else {
uint_lt_config.is_lt()
};
(branch_taken_bit, None, None, Some(uint_lt_config))
};
let b_insn = BInstructionConfig::construct_circuit(
circuit_builder,
I::INST_KIND,
read_rs1.register_expr(),
read_rs2.register_expr(),
branch_taken_bit_expr,
)?;
Ok(BranchConfig {
b_insn,
read_rs1,
read_rs2,
uint_lt_config,
eq_branch_taken_bit,
eq_diff_inv_marker,
phantom: Default::default(),
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
config
.b_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rs1 = Value::new_unchecked(step.rs1().unwrap().value);
let rs1_limbs = rs1.as_u16_limbs();
let rs2 = Value::new_unchecked(step.rs2().unwrap().value);
let rs2_limbs = rs2.as_u16_limbs();
config.read_rs1.assign_limbs(instance, rs1_limbs);
config.read_rs2.assign_limbs(instance, rs2_limbs);
if matches!(I::INST_KIND, InsnKind::BEQ | InsnKind::BNE) {
// Returns (branch_taken, diff_idx, x[diff_idx] - y[diff_idx])
#[inline(always)]
fn run_eq<F, const NUM_LIMBS: usize>(
is_beq: bool,
x: &[u16],
y: &[u16],
) -> (bool, usize, F)
where
F: FieldAlgebra + Field,
{
for i in 0..NUM_LIMBS {
if x[i] != y[i] {
return (
!is_beq,
i,
(F::from_canonical_u16(x[i]) - F::from_canonical_u16(y[i])).inverse(),
);
}
}
(is_beq, 0, F::ZERO)
}
let (branch_taken, diff_idx, diff_inv_val) = run_eq::<E::BaseField, UINT_LIMBS>(
matches!(I::INST_KIND, InsnKind::BEQ),
rs1_limbs,
rs2_limbs,
);
set_val!(
instance,
config.eq_branch_taken_bit.as_ref().unwrap(),
E::BaseField::from_bool(branch_taken)
);
set_val!(
instance,
config.eq_diff_inv_marker.as_ref().unwrap()[diff_idx],
diff_inv_val
);
} else {
let is_signed = matches!(step.insn().kind, InsnKind::BLT | InsnKind::BGE);
UIntLimbsLT::<E>::assign(
config.uint_lt_config.as_ref().unwrap(),
instance,
lk_multiplicity,
rs1_limbs,
rs2_limbs,
is_signed,
)?;
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/shift/shift_circuit.rs | ceno_zkvm/src/instructions/riscv/shift/shift_circuit.rs | use crate::{
Value,
e2e::ShardContext,
error::ZKVMError,
gadgets::SignedExtendConfig,
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{LIMB_BITS, UINT_LIMBS, UInt},
r_insn::RInstructionConfig,
},
},
structs::ProgramParams,
};
use ceno_emul::InsnKind;
use ff_ext::{ExtensionField, FieldInto};
use gkr_iop::gadgets::AssertLtConfig;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use std::marker::PhantomData;
use witness::set_val;
pub struct ShiftConfig<E: ExtensionField> {
r_insn: RInstructionConfig<E>,
rs1_read: UInt<E>,
rs2_read: UInt<E>,
pub rd_written: UInt<E>,
rs2_high: UInt<E>,
rs2_low5: WitIn,
pow2_rs2_low5: WitIn,
outflow: WitIn,
assert_lt_config: AssertLtConfig,
// SRA
signed_extend_config: Option<SignedExtendConfig<E>>,
}
pub struct ShiftLogicalInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for ShiftLogicalInstruction<E, I> {
type InstructionConfig = ShiftConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut crate::circuit_builder::CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, crate::error::ZKVMError> {
// treat bit shifting as a bit "inflow" and "outflow" process, flowing from left to right or vice versa
// this approach simplifies constraint and witness allocation compared to using multiplication/division gadget,
// as the divisor/multiplier is a power of 2.
//
// example: right shift (bit flow from left to right)
// inflow || rs1_read == rd_written || outflow
// in this case, inflow consists of either all 0s or all 1s for sign extension (if the value is signed).
//
// for left shifts, the inflow is always 0:
// rs1_read || inflow == outflow || rd_written
//
// additional constraint: outflow < (1 << shift), which lead to unique solution
// soundness: take Goldilocks as example, both sides of the equation are 63 bits numbers (<2**63)
// rd_written * pow2_rs2_low5 + outflow == inflow * 2**32 + rs1_read
// 32 + 31. 31. 31 + 32. 32. (Bit widths)
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?;
let rd_written = UInt::new(|| "rd_written", circuit_builder)?;
let rs2_read = UInt::new_unchecked(|| "rs2_read", circuit_builder)?;
let rs2_low5 = circuit_builder.create_witin(|| "rs2_low5");
// pow2_rs2_low5 is unchecked because it's assignment will be constrained due it's use in lookup_pow2 below
let pow2_rs2_low5 = circuit_builder.create_witin(|| "pow2_rs2_low5");
// rs2 = rs2_high | rs2_low5
let rs2_high = UInt::new(|| "rs2_high", circuit_builder)?;
let outflow = circuit_builder.create_witin(|| "outflow");
let assert_lt_config = AssertLtConfig::construct_circuit(
circuit_builder,
|| "outflow < pow2_rs2_low5",
outflow.expr(),
pow2_rs2_low5.expr(),
UINT_LIMBS * LIMB_BITS,
)?;
let two_pow_total_bits: Expression<_> = (1u64 << UInt::<E>::TOTAL_BITS).into();
let signed_extend_config = match I::INST_KIND {
InsnKind::SLL => {
circuit_builder.require_equal(
|| "shift check",
rs1_read.value() * pow2_rs2_low5.expr(),
outflow.expr() * two_pow_total_bits + rd_written.value(),
)?;
None
}
InsnKind::SRL | InsnKind::SRA => {
let (inflow, signed_extend_config) = match I::INST_KIND {
InsnKind::SRA => {
let signed_extend_config = rs1_read.is_negative(circuit_builder)?;
let msb_expr = signed_extend_config.expr();
let ones = pow2_rs2_low5.expr() - Expression::ONE;
(msb_expr * ones, Some(signed_extend_config))
}
InsnKind::SRL => (Expression::ZERO, None),
_ => unreachable!(),
};
circuit_builder.require_equal(
|| "shift check",
rd_written.value() * pow2_rs2_low5.expr() + outflow.expr(),
inflow * two_pow_total_bits + rs1_read.value(),
)?;
signed_extend_config
}
_ => unreachable!(),
};
let r_insn = RInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
rs1_read.register_expr(),
rs2_read.register_expr(),
rd_written.register_expr(),
)?;
circuit_builder.lookup_pow2(rs2_low5.expr(), pow2_rs2_low5.expr())?;
circuit_builder.assert_ux::<_, _, 5>(|| "rs2_low5 in u5", rs2_low5.expr())?;
circuit_builder.require_equal(
|| "rs2 == rs2_high * 2^5 + rs2_low5",
rs2_read.value(),
(rs2_high.value() << 5) + rs2_low5.expr(),
)?;
Ok(ShiftConfig {
r_insn,
rs1_read,
rs2_read,
rd_written,
rs2_high,
rs2_low5,
pow2_rs2_low5,
outflow,
assert_lt_config,
signed_extend_config,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut crate::witness::LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), crate::error::ZKVMError> {
// rs2 & its derived values
let rs2_read = Value::new_unchecked(step.rs2().unwrap().value);
let rs2_low5 = rs2_read.as_u64() & 0b11111;
lk_multiplicity.assert_ux::<5>(rs2_low5);
lk_multiplicity.lookup_pow2(rs2_low5);
let pow2_rs2_low5 = 1u64 << rs2_low5;
let rs2_high = Value::new(
((rs2_read.as_u64() - rs2_low5) >> 5) as u32,
lk_multiplicity,
);
config.rs2_high.assign_value(instance, rs2_high);
config.rs2_read.assign_value(instance, rs2_read);
set_val!(instance, config.pow2_rs2_low5, pow2_rs2_low5);
set_val!(instance, config.rs2_low5, rs2_low5);
// rs1
let rs1_read = Value::new_unchecked(step.rs1().unwrap().value);
// rd
let rd_written = Value::new(step.rd().unwrap().value.after, lk_multiplicity);
// outflow
let outflow = match I::INST_KIND {
InsnKind::SLL => (rs1_read.as_u64() * pow2_rs2_low5) >> UInt::<E>::TOTAL_BITS,
InsnKind::SRL => rs1_read.as_u64() & (pow2_rs2_low5 - 1),
InsnKind::SRA => {
let Some(signed_ext_config) = config.signed_extend_config.as_ref() else {
Err(ZKVMError::CircuitError)?
};
signed_ext_config.assign_instance(
instance,
lk_multiplicity,
*rs1_read.as_u16_limbs().last().unwrap() as u64,
)?;
rs1_read.as_u64() & (pow2_rs2_low5 - 1)
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
set_val!(instance, config.outflow, outflow);
config.rs1_read.assign_value(instance, rs1_read);
config.rd_written.assign_value(instance, rd_written);
config.assert_lt_config.assign_instance(
instance,
lk_multiplicity,
outflow,
pow2_rs2_low5,
)?;
config
.r_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/shift/shift_circuit_v2.rs | ceno_zkvm/src/instructions/riscv/shift/shift_circuit_v2.rs | use crate::e2e::ShardContext;
/// constrain implementation follow from https://github.com/openvm-org/openvm/blob/main/extensions/rv32im/circuit/src/shift/core.rs
use crate::{
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{UINT_BYTE_LIMBS, UInt8},
i_insn::IInstructionConfig,
r_insn::RInstructionConfig,
},
},
structs::ProgramParams,
utils::{split_to_limb, split_to_u8},
};
use ceno_emul::InsnKind;
use ff_ext::{ExtensionField, FieldInto};
use itertools::Itertools;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::{Field, FieldAlgebra};
use std::{array, marker::PhantomData};
use witness::set_val;
pub struct ShiftBaseConfig<E: ExtensionField, const NUM_LIMBS: usize, const LIMB_BITS: usize> {
// bit_multiplier = 2^bit_shift
pub bit_multiplier_left: WitIn,
pub bit_multiplier_right: WitIn,
// Sign of x for SRA
pub b_sign: WitIn,
// Boolean columns that are 1 exactly at the index of the bit/limb shift amount
pub bit_shift_marker: [WitIn; LIMB_BITS],
pub limb_shift_marker: [WitIn; NUM_LIMBS],
// Part of each x[i] that gets bit shifted to the next limb
pub bit_shift_carry: [WitIn; NUM_LIMBS],
pub phantom: PhantomData<E>,
}
impl<E: ExtensionField, const NUM_LIMBS: usize, const LIMB_BITS: usize>
ShiftBaseConfig<E, NUM_LIMBS, LIMB_BITS>
{
pub fn construct_circuit(
circuit_builder: &mut crate::circuit_builder::CircuitBuilder<E>,
kind: InsnKind,
a: [Expression<E>; NUM_LIMBS],
b: [Expression<E>; NUM_LIMBS],
c: [Expression<E>; NUM_LIMBS],
) -> Result<Self, crate::error::ZKVMError> {
let bit_shift_marker =
array::from_fn(|i| circuit_builder.create_witin(|| format!("bit_shift_marker_{}", i)));
let limb_shift_marker =
array::from_fn(|i| circuit_builder.create_witin(|| format!("limb_shift_marker_{}", i)));
let bit_multiplier_left = circuit_builder.create_witin(|| "bit_multiplier_left");
let bit_multiplier_right = circuit_builder.create_witin(|| "bit_multiplier_right");
let b_sign = circuit_builder.create_bit(|| "b_sign")?;
let bit_shift_carry =
array::from_fn(|i| circuit_builder.create_witin(|| format!("bit_shift_carry_{}", i)));
// Constrain that bit_shift, bit_multiplier are correct, i.e. that bit_multiplier =
// 1 << bit_shift. Because the sum of all bit_shift_marker[i] is constrained to be
// 1, bit_shift is guaranteed to be in range.
let mut bit_marker_sum = Expression::ZERO;
let mut bit_shift = Expression::ZERO;
for (i, bit_shift_marker_i) in bit_shift_marker.iter().enumerate().take(LIMB_BITS) {
circuit_builder.assert_bit(
|| format!("bit_shift_marker_{i}_assert_bit"),
bit_shift_marker_i.expr(),
)?;
bit_marker_sum += bit_shift_marker_i.expr();
bit_shift += E::BaseField::from_canonical_usize(i).expr() * bit_shift_marker_i.expr();
match kind {
InsnKind::SLL | InsnKind::SLLI => {
circuit_builder.condition_require_zero(
|| "bit_multiplier_left_condition",
bit_shift_marker_i.expr(),
bit_multiplier_left.expr()
- E::BaseField::from_canonical_usize(1 << i).expr(),
)?;
}
InsnKind::SRL | InsnKind::SRLI | InsnKind::SRA | InsnKind::SRAI => {
circuit_builder.condition_require_zero(
|| "bit_multiplier_right_condition",
bit_shift_marker_i.expr(),
bit_multiplier_right.expr()
- E::BaseField::from_canonical_usize(1 << i).expr(),
)?;
}
_ => unreachable!(),
}
}
circuit_builder.require_one(|| "bit_marker_sum_one_hot", bit_marker_sum.expr())?;
// Check that a[i] = b[i] <</>> c[i] both on the bit and limb shift level if c <
// NUM_LIMBS * LIMB_BITS.
let mut limb_marker_sum = Expression::ZERO;
let mut limb_shift = Expression::ZERO;
for i in 0..NUM_LIMBS {
circuit_builder.assert_bit(
|| format!("limb_shift_marker_{i}_assert_bit"),
limb_shift_marker[i].expr(),
)?;
limb_marker_sum += limb_shift_marker[i].expr();
limb_shift +=
E::BaseField::from_canonical_usize(i).expr() * limb_shift_marker[i].expr();
for j in 0..NUM_LIMBS {
match kind {
InsnKind::SLL | InsnKind::SLLI => {
if j < i {
circuit_builder.condition_require_zero(
|| format!("limb_shift_marker_a_{i}_{j}"),
limb_shift_marker[i].expr(),
a[j].expr(),
)?;
} else {
let expected_a_left = if j - i == 0 {
Expression::ZERO
} else {
bit_shift_carry[j - i - 1].expr()
} + b[j - i].expr() * bit_multiplier_left.expr()
- E::BaseField::from_canonical_usize(1 << LIMB_BITS).expr()
* bit_shift_carry[j - i].expr();
circuit_builder.condition_require_zero(
|| format!("limb_shift_marker_a_expected_a_left_{i}_{j}",),
limb_shift_marker[i].expr(),
a[j].expr() - expected_a_left,
)?;
}
}
InsnKind::SRL | InsnKind::SRLI | InsnKind::SRA | InsnKind::SRAI => {
// SRL and SRA constraints. Combining with above would require an additional column.
if j + i > NUM_LIMBS - 1 {
circuit_builder.condition_require_zero(
|| format!("limb_shift_marker_a_{i}_{j}"),
limb_shift_marker[i].expr(),
a[j].expr()
- b_sign.expr()
* E::BaseField::from_canonical_usize((1 << LIMB_BITS) - 1)
.expr(),
)?;
} else {
let expected_a_right =
if j + i == NUM_LIMBS - 1 {
b_sign.expr() * (bit_multiplier_right.expr() - Expression::ONE)
} else {
bit_shift_carry[j + i + 1].expr()
} * E::BaseField::from_canonical_usize(1 << LIMB_BITS).expr()
+ (b[j + i].expr() - bit_shift_carry[j + i].expr());
circuit_builder.condition_require_zero(
|| format!("limb_shift_marker_a_expected_a_right_{i}_{j}",),
limb_shift_marker[i].expr(),
a[j].expr() * bit_multiplier_right.expr() - expected_a_right,
)?;
}
}
_ => unimplemented!(),
}
}
}
circuit_builder.require_one(|| "limb_marker_sum_one_hot", limb_marker_sum.expr())?;
// Check that bit_shift and limb_shift are correct.
let num_bits = E::BaseField::from_canonical_usize(NUM_LIMBS * LIMB_BITS);
circuit_builder.assert_const_range(
|| "bit_shift_vs_limb_shift",
(c[0].expr()
- limb_shift * E::BaseField::from_canonical_usize(LIMB_BITS).expr()
- bit_shift.expr())
* num_bits.inverse().expr(),
LIMB_BITS - ((NUM_LIMBS * LIMB_BITS) as u32).ilog2() as usize,
)?;
if !matches!(kind, InsnKind::SRA | InsnKind::SRAI) {
circuit_builder.require_zero(|| "b_sign_zero", b_sign.expr())?;
} else {
let mask = E::BaseField::from_canonical_u32(1 << (LIMB_BITS - 1)).expr();
let b_sign_shifted = b_sign.expr() * mask.expr();
circuit_builder.lookup_xor_byte(
b[NUM_LIMBS - 1].expr(),
mask.expr(),
b[NUM_LIMBS - 1].expr() + mask.expr()
- (E::BaseField::from_canonical_u32(2).expr()) * b_sign_shifted.expr(),
)?;
}
for (i, carry) in bit_shift_carry.iter().enumerate() {
circuit_builder.assert_dynamic_range(
|| format!("bit_shift_carry_range_check_{i}"),
carry.expr(),
bit_shift.expr(),
)?;
}
Ok(Self {
bit_shift_marker,
bit_multiplier_left,
bit_multiplier_right,
limb_shift_marker,
bit_shift_carry,
b_sign,
phantom: PhantomData,
})
}
pub fn assign_instances(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut crate::witness::LkMultiplicity,
kind: InsnKind,
b: u32,
c: u32,
) {
let b = split_to_limb::<_, LIMB_BITS>(b);
let c = split_to_limb::<_, LIMB_BITS>(c);
let (_, limb_shift, bit_shift) = run_shift::<NUM_LIMBS, LIMB_BITS>(
kind,
&b.clone().try_into().unwrap(),
&c.clone().try_into().unwrap(),
);
match kind {
InsnKind::SLL | InsnKind::SLLI => set_val!(
instance,
self.bit_multiplier_left,
E::BaseField::from_canonical_usize(1 << bit_shift)
),
_ => set_val!(
instance,
self.bit_multiplier_right,
E::BaseField::from_canonical_usize(1 << bit_shift)
),
};
let bit_shift_carry: [u32; NUM_LIMBS] = array::from_fn(|i| match kind {
InsnKind::SLL | InsnKind::SLLI => b[i] >> (LIMB_BITS - bit_shift),
_ => b[i] % (1 << bit_shift),
});
for (val, witin) in bit_shift_carry.iter().zip_eq(&self.bit_shift_carry) {
set_val!(instance, witin, E::BaseField::from_canonical_u32(*val));
lk_multiplicity.assert_dynamic_range(*val as u64, bit_shift as u64);
}
for (i, witin) in self.bit_shift_marker.iter().enumerate() {
set_val!(instance, witin, E::BaseField::from_bool(i == bit_shift));
}
for (i, witin) in self.limb_shift_marker.iter().enumerate() {
set_val!(instance, witin, E::BaseField::from_bool(i == limb_shift));
}
let num_bits_log = (NUM_LIMBS * LIMB_BITS).ilog2();
lk_multiplicity.assert_const_range(
(((c[0] as usize) - bit_shift - limb_shift * LIMB_BITS) >> num_bits_log) as u64,
LIMB_BITS - num_bits_log as usize,
);
let mut b_sign = 0;
if matches!(kind, InsnKind::SRA | InsnKind::SRAI) {
b_sign = b[NUM_LIMBS - 1] >> (LIMB_BITS - 1);
lk_multiplicity.lookup_xor_byte(b[NUM_LIMBS - 1] as u64, 1 << (LIMB_BITS - 1));
}
set_val!(instance, self.b_sign, E::BaseField::from_bool(b_sign != 0));
}
}
pub struct ShiftRTypeConfig<E: ExtensionField> {
shift_base_config: ShiftBaseConfig<E, UINT_BYTE_LIMBS, 8>,
rs1_read: UInt8<E>,
rs2_read: UInt8<E>,
pub rd_written: UInt8<E>,
r_insn: RInstructionConfig<E>,
}
pub struct ShiftLogicalInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for ShiftLogicalInstruction<E, I> {
type InstructionConfig = ShiftRTypeConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut crate::circuit_builder::CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, crate::error::ZKVMError> {
let (rd_written, rs1_read, rs2_read) = match I::INST_KIND {
InsnKind::SLL | InsnKind::SRL | InsnKind::SRA => {
let rs1_read = UInt8::new_unchecked(|| "rs1_read", circuit_builder)?;
let rs2_read = UInt8::new_unchecked(|| "rs2_read", circuit_builder)?;
let rd_written = UInt8::new(|| "rd_written", circuit_builder)?;
(rd_written, rs1_read, rs2_read)
}
_ => unimplemented!(),
};
let r_insn = RInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
rs1_read.register_expr(),
rs2_read.register_expr(),
rd_written.register_expr(),
)?;
let shift_base_config = ShiftBaseConfig::construct_circuit(
circuit_builder,
I::INST_KIND,
rd_written.expr().try_into().unwrap(),
rs1_read.expr().try_into().unwrap(),
rs2_read.expr().try_into().unwrap(),
)?;
Ok(ShiftRTypeConfig {
r_insn,
rs1_read,
rs2_read,
rd_written,
shift_base_config,
})
}
fn assign_instance(
config: &ShiftRTypeConfig<E>,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut crate::witness::LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), crate::error::ZKVMError> {
// rs2
let rs2_read = split_to_u8::<u16>(step.rs2().unwrap().value);
// rs1
let rs1_read = split_to_u8::<u16>(step.rs1().unwrap().value);
// rd
let rd_written = split_to_u8::<u16>(step.rd().unwrap().value.after);
for chunk in rd_written.chunks(2) {
if chunk.len() == 2 {
lk_multiplicity.assert_double_u8(chunk[0] as u64, chunk[1] as u64)
} else {
lk_multiplicity.assert_const_range(chunk[0] as u64, 8);
}
}
config.rs1_read.assign_limbs(instance, &rs1_read);
config.rs2_read.assign_limbs(instance, &rs2_read);
config.rd_written.assign_limbs(instance, &rd_written);
config.shift_base_config.assign_instances(
instance,
lk_multiplicity,
I::INST_KIND,
step.rs1().unwrap().value,
step.rs2().unwrap().value,
);
config
.r_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
pub struct ShiftImmConfig<E: ExtensionField> {
shift_base_config: ShiftBaseConfig<E, UINT_BYTE_LIMBS, 8>,
rs1_read: UInt8<E>,
pub rd_written: UInt8<E>,
i_insn: IInstructionConfig<E>,
imm: WitIn,
}
pub struct ShiftImmInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for ShiftImmInstruction<E, I> {
type InstructionConfig = ShiftImmConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut crate::circuit_builder::CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, crate::error::ZKVMError> {
let (rd_written, rs1_read, imm) = match I::INST_KIND {
InsnKind::SLLI | InsnKind::SRLI | InsnKind::SRAI => {
let rs1_read = UInt8::new_unchecked(|| "rs1_read", circuit_builder)?;
let imm = circuit_builder.create_witin(|| "imm");
let rd_written = UInt8::new(|| "rd_written", circuit_builder)?;
(rd_written, rs1_read, imm)
}
_ => unimplemented!(),
};
let uint8_imm = UInt8::from_exprs_unchecked(vec![imm.expr(), 0.into(), 0.into(), 0.into()]);
let i_insn = IInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
imm.expr(),
0.into(),
rs1_read.register_expr(),
rd_written.register_expr(),
false,
)?;
let shift_base_config = ShiftBaseConfig::construct_circuit(
circuit_builder,
I::INST_KIND,
rd_written.expr().try_into().unwrap(),
rs1_read.expr().try_into().unwrap(),
uint8_imm.expr().try_into().unwrap(),
)?;
Ok(ShiftImmConfig {
i_insn,
imm,
rs1_read,
rd_written,
shift_base_config,
})
}
fn assign_instance(
config: &ShiftImmConfig<E>,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut crate::witness::LkMultiplicity,
step: &ceno_emul::StepRecord,
) -> Result<(), crate::error::ZKVMError> {
let imm = step.insn().imm as i16 as u16;
set_val!(instance, config.imm, E::BaseField::from_canonical_u16(imm));
// rs1
let rs1_read = split_to_u8::<u16>(step.rs1().unwrap().value);
// rd
let rd_written = split_to_u8::<u16>(step.rd().unwrap().value.after);
for chunk in rd_written.chunks(2) {
if chunk.len() == 2 {
lk_multiplicity.assert_double_u8(chunk[0] as u64, chunk[1] as u64)
} else {
lk_multiplicity.assert_const_range(chunk[0] as u64, 8);
}
}
config.rs1_read.assign_limbs(instance, &rs1_read);
config.rd_written.assign_limbs(instance, &rd_written);
config.shift_base_config.assign_instances(
instance,
lk_multiplicity,
I::INST_KIND,
step.rs1().unwrap().value,
imm as u32,
);
config
.i_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
fn run_shift<const NUM_LIMBS: usize, const LIMB_BITS: usize>(
kind: InsnKind,
x: &[u32; NUM_LIMBS],
y: &[u32; NUM_LIMBS],
) -> ([u32; NUM_LIMBS], usize, usize) {
match kind {
InsnKind::SLL | InsnKind::SLLI => run_shift_left::<NUM_LIMBS, LIMB_BITS>(x, y),
InsnKind::SRL | InsnKind::SRLI => run_shift_right::<NUM_LIMBS, LIMB_BITS>(x, y, true),
InsnKind::SRA | InsnKind::SRAI => run_shift_right::<NUM_LIMBS, LIMB_BITS>(x, y, false),
_ => unreachable!(),
}
}
fn run_shift_left<const NUM_LIMBS: usize, const LIMB_BITS: usize>(
x: &[u32; NUM_LIMBS],
y: &[u32; NUM_LIMBS],
) -> ([u32; NUM_LIMBS], usize, usize) {
let mut result = [0u32; NUM_LIMBS];
let (limb_shift, bit_shift) = get_shift::<NUM_LIMBS, LIMB_BITS>(y);
for i in limb_shift..NUM_LIMBS {
result[i] = if i > limb_shift {
((x[i - limb_shift] << bit_shift) + (x[i - limb_shift - 1] >> (LIMB_BITS - bit_shift)))
% (1 << LIMB_BITS)
} else {
(x[i - limb_shift] << bit_shift) % (1 << LIMB_BITS)
};
}
(result, limb_shift, bit_shift)
}
fn run_shift_right<const NUM_LIMBS: usize, const LIMB_BITS: usize>(
x: &[u32; NUM_LIMBS],
y: &[u32; NUM_LIMBS],
logical: bool,
) -> ([u32; NUM_LIMBS], usize, usize) {
let fill = if logical {
0
} else {
((1 << LIMB_BITS) - 1) * (x[NUM_LIMBS - 1] >> (LIMB_BITS - 1))
};
let mut result = [fill; NUM_LIMBS];
let (limb_shift, bit_shift) = get_shift::<NUM_LIMBS, LIMB_BITS>(y);
for i in 0..(NUM_LIMBS - limb_shift) {
result[i] = if i + limb_shift + 1 < NUM_LIMBS {
((x[i + limb_shift] >> bit_shift) + (x[i + limb_shift + 1] << (LIMB_BITS - bit_shift)))
% (1 << LIMB_BITS)
} else {
((x[i + limb_shift] >> bit_shift) + (fill << (LIMB_BITS - bit_shift)))
% (1 << LIMB_BITS)
}
}
(result, limb_shift, bit_shift)
}
fn get_shift<const NUM_LIMBS: usize, const LIMB_BITS: usize>(y: &[u32]) -> (usize, usize) {
// We assume `NUM_LIMBS * LIMB_BITS <= 2^LIMB_BITS` so so the shift is defined
// entirely in y[0].
let shift = (y[0] as usize) % (NUM_LIMBS * LIMB_BITS);
(shift / LIMB_BITS, shift % LIMB_BITS)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/slt/slt_circuit.rs | ceno_zkvm/src/instructions/riscv/slt/slt_circuit.rs | use crate::{
Value,
e2e::ShardContext,
error::ZKVMError,
gadgets::SignedLtConfig,
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{LIMB_BITS, UINT_LIMBS, UInt},
r_insn::RInstructionConfig,
},
},
structs::ProgramParams,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, SWord, StepRecord};
use ff_ext::ExtensionField;
use gkr_iop::{circuit_builder::CircuitBuilder, gadgets::IsLtConfig};
use std::marker::PhantomData;
pub struct SetLessThanInstruction<E, I>(PhantomData<(E, I)>);
/// This config handles R-Instructions that represent registers values as 2 * u16.
pub struct SetLessThanConfig<E: ExtensionField> {
r_insn: RInstructionConfig<E>,
rs1_read: UInt<E>,
rs2_read: UInt<E>,
#[allow(dead_code)]
pub(crate) rd_written: UInt<E>,
deps: SetLessThanDependencies<E>,
}
enum SetLessThanDependencies<E: ExtensionField> {
Slt { signed_lt: SignedLtConfig<E> },
Sltu { is_lt: IsLtConfig },
}
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for SetLessThanInstruction<E, I> {
type InstructionConfig = SetLessThanConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
// If rs1_read < rs2_read, rd_written = 1. Otherwise rd_written = 0
let rs1_read = UInt::new_unchecked(|| "rs1_read", cb)?;
let rs2_read = UInt::new_unchecked(|| "rs2_read", cb)?;
let (deps, rd_written) = match I::INST_KIND {
InsnKind::SLT => {
let signed_lt =
SignedLtConfig::construct_circuit(cb, || "rs1 < rs2", &rs1_read, &rs2_read)?;
let rd_written = UInt::from_exprs_unchecked(vec![signed_lt.expr()]);
(SetLessThanDependencies::Slt { signed_lt }, rd_written)
}
InsnKind::SLTU => {
let is_lt = IsLtConfig::construct_circuit(
cb,
|| "rs1 < rs2",
rs1_read.value(),
rs2_read.value(),
UINT_LIMBS * LIMB_BITS,
)?;
let rd_written = UInt::from_exprs_unchecked(vec![is_lt.expr()]);
(SetLessThanDependencies::Sltu { is_lt }, rd_written)
}
_ => unreachable!(),
};
let r_insn = RInstructionConfig::<E>::construct_circuit(
cb,
I::INST_KIND,
rs1_read.register_expr(),
rs2_read.register_expr(),
rd_written.register_expr(),
)?;
Ok(SetLessThanConfig {
r_insn,
rs1_read,
rs2_read,
rd_written,
deps,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
config
.r_insn
.assign_instance(instance, shard_ctx, lkm, step)?;
let rs1 = step.rs1().unwrap().value;
let rs2 = step.rs2().unwrap().value;
let rs1_read = Value::new_unchecked(rs1);
let rs2_read = Value::new_unchecked(rs2);
config
.rs1_read
.assign_limbs(instance, rs1_read.as_u16_limbs());
config
.rs2_read
.assign_limbs(instance, rs2_read.as_u16_limbs());
match &config.deps {
SetLessThanDependencies::Slt { signed_lt } => {
signed_lt.assign_instance(instance, lkm, rs1 as SWord, rs2 as SWord)?
}
SetLessThanDependencies::Sltu { is_lt } => {
is_lt.assign_instance(instance, lkm, rs1.into(), rs2.into())?
}
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/slt/slt_circuit_v2.rs | ceno_zkvm/src/instructions/riscv/slt/slt_circuit_v2.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::{UIntLimbsLT, UIntLimbsLTConfig},
instructions::{
Instruction,
riscv::{RIVInstruction, constants::UInt, r_insn::RInstructionConfig},
},
structs::ProgramParams,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::ExtensionField;
use std::marker::PhantomData;
pub struct SetLessThanInstruction<E, I>(PhantomData<(E, I)>);
/// This config handles R-Instructions that represent registers values as 2 * u16.
pub struct SetLessThanConfig<E: ExtensionField> {
r_insn: RInstructionConfig<E>,
rs1_read: UInt<E>,
rs2_read: UInt<E>,
#[allow(dead_code)]
pub(crate) rd_written: UInt<E>,
uint_lt_config: UIntLimbsLTConfig<E>,
}
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for SetLessThanInstruction<E, I> {
type InstructionConfig = SetLessThanConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
// If rs1_read < rs2_read, rd_written = 1. Otherwise rd_written = 0
let rs1_read = UInt::new_unchecked(|| "rs1_read", cb)?;
let rs2_read = UInt::new_unchecked(|| "rs2_read", cb)?;
let (rd_written, uint_lt_config) = match I::INST_KIND {
InsnKind::SLT => {
let config = UIntLimbsLT::construct_circuit(cb, &rs1_read, &rs2_read, true)?;
let rd_written = UInt::from_exprs_unchecked(vec![config.is_lt()]);
(rd_written, config)
}
InsnKind::SLTU => {
let config = UIntLimbsLT::construct_circuit(cb, &rs1_read, &rs2_read, false)?;
let rd_written = UInt::from_exprs_unchecked(vec![config.is_lt()]);
(rd_written, config)
}
_ => unreachable!(),
};
let r_insn = RInstructionConfig::<E>::construct_circuit(
cb,
I::INST_KIND,
rs1_read.register_expr(),
rs2_read.register_expr(),
rd_written.register_expr(),
)?;
Ok(SetLessThanConfig {
r_insn,
rs1_read,
rs2_read,
rd_written,
uint_lt_config,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
config
.r_insn
.assign_instance(instance, shard_ctx, lkm, step)?;
let rs1 = step.rs1().unwrap().value;
let rs2 = step.rs2().unwrap().value;
let rs1_read = Value::new_unchecked(rs1);
let rs2_read = Value::new_unchecked(rs2);
config
.rs1_read
.assign_limbs(instance, rs1_read.as_u16_limbs());
config
.rs2_read
.assign_limbs(instance, rs2_read.as_u16_limbs());
let is_signed = matches!(step.insn().kind, InsnKind::SLT);
UIntLimbsLT::<E>::assign(
&config.uint_lt_config,
instance,
lkm,
rs1_read.as_u16_limbs(),
rs2_read.as_u16_limbs(),
is_signed,
)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/div/div_circuit.rs | ceno_zkvm/src/instructions/riscv/div/div_circuit.rs | //! Circuit implementations for DIVU, REMU, DIV, and REM RISC-V opcodes
//!
//! The signed and unsigned division and remainder opcodes are handled by
//! simulating the division algorithm expression:
//!
//! `dividend = divisor * quotient + remainder` (1)
//!
//! where `remainder` is constrained to be between 0 and the divisor in a way
//! that suitably respects signed values, except for the case of division by 0.
//! Of particular note for this implememntation is the fact that in the
//! Goldilocks field, the right hand side of (1) does not wrap around under
//! modular arithmetic for either unsigned or signed 32-bit range-checked
//! values of `divisor`, `quotient`, and `remainder`, taking values between `0`
//! and `2^64 - 2^32` in the unsigned case, and between `-2^62` and `2^62 +
//! 2^31 - 1` in the signed case.
//!
//! This means that in either the unsigned or the signed setting, equation
//! (1) can be checked directly using native field expressions without
//! ambiguity due to modular field arithmetic -- more specifically, `dividend`
//! and `divisor` are taken from RISC-V registers, so are constrained to 32-bit
//! unsigned or signed values, and `quotient` and `remainder` values are
//! explicitly constrained to 32 bits by the checked UInt construction.
//!
//! The remainder of the complexity of this circuit comes about because of two
//! edge cases in the opcodes: division by zero, and signed division overflow.
//! For division by zero, equation (1) still holds, but an extra constraint is
//! imposed on the value of `quotient` to be `u32::MAX` in the unsigned case,
//! or `-1` in the signed case (the 32-bit vector with all 1s for both).
//!
//! Signed division overflow occurs when `dividend` is set to `i32::MIN
//! = -2^31`, and `divisor` is set to `-1`. In this case, the natural value of
//! `quotient` is `2^31`, but this value cannot be properly represented as a
//! signed 32-bit integer, so an error output must be enforced with `quotient =
//! i32::MIN`, and `remainder = 0`. In this one case, the proper RISC-V values
//! for `dividend`, `divisor`, `quotient`, and `remainder` do not satisfy the
//! division algorithm expression (1), so the proper values of `quotient` and
//! `remainder` can be enforced by instead imposing the variant constraint
//!
//! `2^31 = divisor * quotient + remainder` (2)
//!
//! Once (1) or (2) is appropriately satisfied, an inequality condition is
//! imposed on remainder, which varies depending on signs of the inputs. In
//! the case of unsigned inputs, this is just
//!
//! `0 <= remainder < divisor` (3)
//!
//! For signed inputs the situation is slightly more complicated, as `remainder`
//! and `divisor` may be either positive or negative. To handle sign
//! variations for the remainder inequality in a uniform manner, we derive
//! expressions representing the "positively oriented" values with signs set so
//! that the inequalities are always of the form (3). The correct sign
//! normalization is to take the absolute value of `divisor`, and to multiply
//! `remainder` by the sign of `dividend` since these two values are required
//! to have matching signs.
//!
//! For the special case of signed division overflow, the inequality condition
//! (3) still holds for the remainder and divisor after normalizing signs in
//! this way (specifically: `0 <= 0 < 1`), so no special treatment is needed.
//! In the division by 0 case, since `divisor` is `0`, the inequality cannot be
//! satisfied. To address this case, we require that exactly one of `remainder
//! < divisor` and `divisor = 0` holds. Specifically, since these conditions
//! are expressed as 0/1-valued booleans, we require just that the sum of these
//! booleans is equal to 1.
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto, SmallField};
use p3::goldilocks::Goldilocks;
use super::{
super::{
constants::{UINT_LIMBS, UInt},
r_insn::RInstructionConfig,
},
RIVInstruction,
};
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::{AssertLtConfig, IsEqualConfig, IsLtConfig, IsZeroConfig, Signed},
instructions::{Instruction, riscv::constants::LIMB_BITS},
structs::ProgramParams,
uint::Value,
witness::{LkMultiplicity, set_val},
};
use multilinear_extensions::{Expression, ToExpr, WitIn};
use std::marker::PhantomData;
pub struct DivRemConfig<E: ExtensionField> {
dividend: UInt<E>, // rs1_read
divisor: UInt<E>, // rs2_read
pub(super) quotient: UInt<E>,
pub(super) remainder: UInt<E>,
internal_config: InternalDivRem<E>,
is_divisor_zero: IsZeroConfig,
is_remainder_lt_divisor: IsLtConfig,
r_insn: RInstructionConfig<E>,
}
enum InternalDivRem<E: ExtensionField> {
Unsigned,
Signed {
dividend_signed: Box<Signed<E>>,
divisor_signed: Box<Signed<E>>,
quotient_signed: Box<Signed<E>>,
remainder_signed: Box<Signed<E>>,
is_dividend_signed_min: IsEqualConfig,
is_divisor_neg_one: IsEqualConfig,
is_signed_overflow: WitIn,
remainder_nonnegative: Box<AssertLtConfig>,
},
}
pub struct ArithInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for ArithInstruction<E, I> {
type InstructionConfig = DivRemConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
// The soundness analysis for these constraints is only valid for
// 32-bit registers represented over the Goldilocks field, so verify
// these parameters
assert_eq!(UInt::<E>::TOTAL_BITS, u32::BITS as usize);
assert_eq!(E::BaseField::MODULUS_U64, Goldilocks::MODULUS_U64);
// 32-bit value from rs1
let dividend = UInt::new_unchecked(|| "dividend", cb)?;
// 32-bit value from rs2
let divisor = UInt::new_unchecked(|| "divisor", cb)?;
let quotient = UInt::new(|| "quotient", cb)?;
let remainder = UInt::new(|| "remainder", cb)?;
// `rem_e` and `div_e` are expressions representing the remainder and
// divisor from the signed or unsigned division operation, with signs
// normalized to be nonnegative, so that correct values must satisfy
// either `0 <= rem_e < div_e` or `div_e == 0`. The `rem_e` value
// should be constrained to be nonnegative before being returned from
// this block, while the checks `rem_e < div_e` or `div_e == 0` are
// done later.
let (internal_config, rem_e, div_e) = match I::INST_KIND {
InsnKind::DIVU | InsnKind::REMU => {
cb.require_equal(
|| "unsigned_division_relation",
dividend.value(),
divisor.value() * quotient.value() + remainder.value(),
)?;
(InternalDivRem::Unsigned, remainder.value(), divisor.value())
}
InsnKind::DIV | InsnKind::REM => {
let dividend_signed =
Signed::construct_circuit(cb, || "dividend_signed", ÷nd)?;
let divisor_signed = Signed::construct_circuit(cb, || "divisor_signed", &divisor)?;
let quotient_signed =
Signed::construct_circuit(cb, || "quotient_signed", "ient)?;
let remainder_signed =
Signed::construct_circuit(cb, || "remainder_signed", &remainder)?;
// Check for signed division overflow: i32::MIN / -1
let is_dividend_signed_min = IsEqualConfig::construct_circuit(
cb,
|| "is_dividend_signed_min",
dividend.value(),
(i32::MIN as u32).into(),
)?;
let is_divisor_neg_one = IsEqualConfig::construct_circuit(
cb,
|| "is_divisor_neg_one",
divisor.value(),
(-1i32 as u32).into(),
)?;
let is_signed_overflow = cb.flatten_expr(
|| "signed_division_overflow",
is_dividend_signed_min.expr() * is_divisor_neg_one.expr(),
)?;
// For signed division overflow, dividend = -2^31 and divisor
// = -1, so that quotient = 2^31 would be required for proper
// arithmetic, which is too large to represent in a 32-bit
// register. This case is therefore handled specially in the
// spec, setting quotient and remainder to -2^31 and 0
// respectively. These values are assured by the constraints
//
// 2^31 = divisor * quotient + remainder
// 0 <= |remainder| < |divisor|
//
// The second condition is the same inequality as required when
// there is no overflow, so no special handling is needed. The
// first condition is only different from the proper value in
// the left side of the equality, which can be controlled by a
// conditional equality constraint using fixed dividend value
// +2^31 in the signed overflow case.
let div_rel_expr =
quotient_signed.expr() * divisor_signed.expr() + remainder_signed.expr();
cb.condition_require_equal(
|| "signed_division_relation",
is_signed_overflow.expr(),
div_rel_expr,
// overflow replacement dividend value, +2^31
(1u64 << 31).into(),
dividend_signed.expr(),
)?;
// Check the required inequalities for the signed remainder.
// Change the signs of `remainder_signed` and `divisor_signed`
// so that the inequality matches the usual unsigned one: `0 <=
// remainder < divisor`
let remainder_pos_orientation: Expression<E> =
(1 - 2 * dividend_signed.is_negative.expr()) * remainder_signed.expr();
let divisor_pos_orientation =
(1 - 2 * divisor_signed.is_negative.expr()) * divisor_signed.expr();
let remainder_nonnegative = AssertLtConfig::construct_circuit(
cb,
|| "oriented_remainder_nonnegative",
(-1i32).into(),
remainder_pos_orientation.clone(),
UINT_LIMBS * LIMB_BITS,
)?;
(
InternalDivRem::Signed {
dividend_signed: Box::new(dividend_signed),
divisor_signed: Box::new(divisor_signed),
quotient_signed: Box::new(quotient_signed),
remainder_signed: Box::new(remainder_signed),
is_dividend_signed_min,
is_divisor_neg_one,
is_signed_overflow,
remainder_nonnegative: Box::new(remainder_nonnegative),
},
remainder_pos_orientation,
divisor_pos_orientation,
)
}
_ => unreachable!("Unsupported instruction kind"),
};
let is_divisor_zero =
IsZeroConfig::construct_circuit(cb, || "is_divisor_zero", divisor.value())?;
// For zero division, quotient must be the "all ones" register for both
// unsigned and signed cases, representing 2^32-1 and -1 respectively.
cb.condition_require_equal(
|| "quotient_zero_division",
is_divisor_zero.expr(),
quotient.value(),
u32::MAX.into(),
quotient.value(),
)?;
// Check whether the remainder is less than the divisor, where both
// values have sign normalized to be nonnegative (for correct values)
// in the signed case
let is_remainder_lt_divisor = IsLtConfig::construct_circuit(
cb,
|| "is_remainder_lt_divisor",
rem_e,
div_e,
UINT_LIMBS * LIMB_BITS,
)?;
// When divisor is nonzero, (nonnegative) remainder must be less than
// divisor, but when divisor is zero, remainder can't be less than
// divisor; so require that exactly one of these is true, i.e. sum of
// bit expressions is equal to 1.
cb.require_equal(
|| "remainder < divisor iff divisor nonzero",
is_divisor_zero.expr() + is_remainder_lt_divisor.expr(),
1.into(),
)?;
// TODO determine whether any optimizations are possible for getting
// just one of quotient or remainder
let rd_written_e = match I::INST_KIND {
InsnKind::DIVU | InsnKind::DIV => quotient.register_expr(),
InsnKind::REMU | InsnKind::REM => remainder.register_expr(),
_ => unreachable!("Unsupported instruction kind"),
};
let r_insn = RInstructionConfig::<E>::construct_circuit(
cb,
I::INST_KIND,
dividend.register_expr(),
divisor.register_expr(),
rd_written_e,
)?;
Ok(DivRemConfig {
dividend,
divisor,
quotient,
remainder,
internal_config,
is_divisor_zero,
is_remainder_lt_divisor,
r_insn,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
// dividend = quotient * divisor + remainder
let dividend = step.rs1().unwrap().value;
let divisor = step.rs2().unwrap().value;
let dividend_v = Value::new_unchecked(dividend);
let divisor_v = Value::new_unchecked(divisor);
let (quotient, remainder) = match &config.internal_config {
InternalDivRem::Unsigned => (
dividend.checked_div(divisor).unwrap_or(u32::MAX),
dividend.checked_rem(divisor).unwrap_or(dividend),
),
InternalDivRem::Signed { .. } => {
let dividend = dividend as i32;
let divisor = divisor as i32;
let (quotient, remainder) = if divisor == 0 {
// i32::MIN / 0 => remainder == i32::MIN
(-1i32, dividend)
} else {
// these correctly handle signed division overflow
(
dividend.wrapping_div(divisor),
dividend.wrapping_rem(divisor),
)
};
(quotient as u32, remainder as u32)
}
};
let quotient_v = Value::new(quotient, lkm);
let remainder_v = Value::new(remainder, lkm);
let (rem_pos, div_pos) = match &config.internal_config {
InternalDivRem::Unsigned => (remainder, divisor),
InternalDivRem::Signed {
dividend_signed,
divisor_signed,
is_dividend_signed_min,
is_divisor_neg_one,
is_signed_overflow,
quotient_signed,
remainder_signed,
remainder_nonnegative,
} => {
let dividend = dividend as i32;
let divisor = divisor as i32;
let remainder = remainder as i32;
dividend_signed.assign_instance(instance, lkm, ÷nd_v)?;
divisor_signed.assign_instance(instance, lkm, &divisor_v)?;
is_dividend_signed_min.assign_instance(
instance,
(dividend as u32 as u64).into_f(),
(i32::MIN as u32 as u64).into_f(),
)?;
is_divisor_neg_one.assign_instance(
instance,
(divisor as u32 as u64).into_f(),
(-1i32 as u32 as u64).into_f(),
)?;
let signed_div_overflow_b = dividend == i32::MIN && divisor == -1i32;
set_val!(instance, is_signed_overflow, signed_div_overflow_b as u64);
quotient_signed.assign_instance(instance, lkm, "ient_v)?;
remainder_signed.assign_instance(instance, lkm, &remainder_v)?;
let negate_if = |b: bool, x: i32| if b { -(x as i64) } else { x as i64 };
let remainder_pos_orientation = negate_if(dividend < 0, remainder);
let divisor_pos_orientation = negate_if(divisor < 0, divisor);
remainder_nonnegative.assign_instance_signed(
instance,
lkm,
-1,
remainder_pos_orientation,
)?;
(
remainder_pos_orientation as u32,
divisor_pos_orientation as u32,
)
}
};
config.dividend.assign_value(instance, dividend_v);
config.divisor.assign_value(instance, divisor_v);
config.quotient.assign_value(instance, quotient_v);
config.remainder.assign_value(instance, remainder_v);
config
.is_divisor_zero
.assign_instance(instance, (divisor as u64).into_f())?;
config.is_remainder_lt_divisor.assign_instance(
instance,
lkm,
rem_pos as u64,
div_pos as u64,
)?;
config
.r_insn
.assign_instance(instance, shard_ctx, lkm, step)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/div/div_circuit_v2.rs | ceno_zkvm/src/instructions/riscv/div/div_circuit_v2.rs | /// refer constraints implementation from https://github.com/openvm-org/openvm/blob/main/extensions/rv32im/circuit/src/divrem/core.rs
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use p3::field::Field;
use super::{
super::{
constants::{UINT_LIMBS, UInt},
r_insn::RInstructionConfig,
},
RIVInstruction,
};
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{Instruction, riscv::constants::LIMB_BITS},
structs::ProgramParams,
uint::Value,
witness::{LkMultiplicity, set_val},
};
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::FieldAlgebra;
use std::{array, marker::PhantomData};
pub struct DivRemConfig<E: ExtensionField> {
pub(crate) dividend: UInt<E>, // rs1_read
pub(crate) divisor: UInt<E>, // rs2_read
pub(crate) quotient: UInt<E>,
pub(crate) remainder: UInt<E>,
pub(crate) r_insn: RInstructionConfig<E>,
dividend_sign: WitIn,
divisor_sign: WitIn,
quotient_sign: WitIn,
remainder_zero: WitIn,
divisor_zero: WitIn,
divisor_sum_inv: WitIn,
remainder_sum_inv: WitIn,
remainder_inv: [WitIn; UINT_LIMBS],
sign_xor: WitIn,
remainder_prime: UInt<E>, // r'
lt_marker: [WitIn; UINT_LIMBS],
lt_diff: WitIn,
}
pub struct ArithInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for ArithInstruction<E, I> {
type InstructionConfig = DivRemConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
assert_eq!(UInt::<E>::TOTAL_BITS, u32::BITS as usize);
assert_eq!(UInt::<E>::LIMB_BITS, 16);
assert_eq!(UInt::<E>::NUM_LIMBS, 2);
// 32-bit value from rs1
let dividend = UInt::new_unchecked(|| "dividend", cb)?;
// 32-bit value from rs2
let divisor = UInt::new_unchecked(|| "divisor", cb)?;
let quotient = UInt::new(|| "quotient", cb)?;
let remainder = UInt::new(|| "remainder", cb)?;
let dividend_expr = dividend.expr();
let divisor_expr = divisor.expr();
let quotient_expr = quotient.expr();
let remainder_expr = remainder.expr();
// TODO determine whether any optimizations are possible for getting
// just one of quotient or remainder
let rd_written_e = match I::INST_KIND {
InsnKind::DIVU | InsnKind::DIV => quotient.register_expr(),
InsnKind::REMU | InsnKind::REM => remainder.register_expr(),
_ => unreachable!("Unsupported instruction kind"),
};
let r_insn = RInstructionConfig::<E>::construct_circuit(
cb,
I::INST_KIND,
dividend.register_expr(),
divisor.register_expr(),
rd_written_e,
)?;
let dividend_sign = cb.create_bit(|| "dividend_sign".to_string())?;
let divisor_sign = cb.create_bit(|| "divisor_sign".to_string())?;
let dividend_ext: Expression<E> =
dividend_sign.expr() * E::BaseField::from_canonical_u32((1 << LIMB_BITS) - 1).expr();
let divisor_ext: Expression<E> =
divisor_sign.expr() * E::BaseField::from_canonical_u32((1 << LIMB_BITS) - 1).expr();
let carry_divide = E::BaseField::from_canonical_u32(1 << UInt::<E>::LIMB_BITS).inverse();
let mut carry_expr: [Expression<E>; UINT_LIMBS] =
array::from_fn(|_| E::BaseField::ZERO.expr());
for i in 0..UINT_LIMBS {
let expected_limb = if i == 0 {
E::BaseField::ZERO.expr()
} else {
carry_expr[i - 1].clone()
} + (0..=i).fold(remainder_expr[i].expr(), |ac, k| {
ac + (divisor_expr[k].clone() * quotient_expr[i - k].clone())
});
carry_expr[i] = carry_divide.expr() * (expected_limb - dividend_expr[i].clone());
}
for (i, carry) in carry_expr.iter().enumerate() {
cb.assert_const_range(
|| format!("range_check_carry_{i}"),
carry.clone(),
// carry up to 16 + 2 = 18 bits
LIMB_BITS + 2,
)?;
}
let quotient_sign = cb.create_bit(|| "quotient_sign".to_string())?;
let quotient_ext: Expression<E> =
quotient_sign.expr() * E::BaseField::from_canonical_u32((1 << LIMB_BITS) - 1).expr();
let mut carry_ext: [Expression<E>; UINT_LIMBS] =
array::from_fn(|_| E::BaseField::ZERO.expr());
let remainder_zero = cb.create_bit(|| "remainder_zero".to_string())?;
for j in 0..UINT_LIMBS {
let expected_limb =
if j == 0 {
carry_expr[UINT_LIMBS - 1].clone()
} else {
carry_ext[j - 1].clone()
} + ((j + 1)..UINT_LIMBS).fold(E::BaseField::ZERO.expr(), |acc, k| {
acc + (divisor_expr[k].clone() * quotient_expr[UINT_LIMBS + j - k].clone())
}) + (0..(j + 1)).fold(E::BaseField::ZERO.expr(), |acc, k| {
acc + (divisor_expr[k].clone() * quotient_ext.expr())
+ (quotient_expr[k].clone() * divisor_ext.expr())
}) + (E::BaseField::ONE.expr() - remainder_zero.expr()) * dividend_ext.clone();
carry_ext[j] = carry_divide.expr() * (expected_limb - dividend_ext.clone());
}
for (i, carry_ext) in carry_ext.iter().enumerate() {
cb.assert_const_range(
|| format!("range_check_carry_ext_{i}"),
carry_ext.clone(),
// carry up to 16 + 2 = 18 bits
LIMB_BITS + 2,
)?;
}
let divisor_zero = cb.create_bit(|| "divisor_zero".to_string())?;
cb.assert_bit(
|| "divisor_remainder_not_both_zero",
divisor_zero.expr() + remainder_zero.expr(),
)?;
for (i, (divisor_expr, quotient_expr)) in
divisor_expr.iter().zip(quotient_expr.iter()).enumerate()
{
cb.condition_require_zero(
|| format!("check_divisor_zero_{}", i),
divisor_zero.expr(),
divisor_expr.clone(),
)?;
cb.condition_require_zero(
|| "check_quotient_on_divisor_zero".to_string(),
divisor_zero.expr(),
quotient_expr.clone()
- E::BaseField::from_canonical_u32((1 << LIMB_BITS) - 1).expr(),
)?;
}
// divisor_sum is guaranteed to be non-zero if divisor is non-zero since we assume
// each limb of divisor to be within [0, 2^LIMB_BITS) already.
// To constrain that if divisor = 0 then divisor_zero = 1, we check that if divisor_zero = 0 then divisor_sum is non-zero using divisor_sum_inv.
let divisor_sum_inv = cb.create_witin(|| "divisor_sum_inv".to_string());
let divisor_sum: Expression<E> = divisor_expr
.iter()
.fold(E::BaseField::ZERO.expr(), |acc, d| acc + d.clone());
let divisor_not_zero: Expression<E> = E::BaseField::ONE.expr() - divisor_zero.expr();
cb.condition_require_one(
|| "check_divisor_sum_inv",
divisor_not_zero.clone(),
divisor_sum.clone() * divisor_sum_inv.expr(),
)?;
for (i, remainder_expr) in remainder_expr.iter().enumerate() {
cb.condition_require_zero(
|| format!("check_divisor_zero_{}", i),
remainder_zero.expr(),
remainder_expr.clone(),
)?;
}
let remainder_sum_inv = cb.create_witin(|| "remainder_sum_inv".to_string());
let remainder_sum: Expression<E> = remainder_expr
.iter()
.fold(E::BaseField::ZERO.expr(), |acc, r| acc + r.clone());
let divisor_remainder_not_zero: Expression<E> =
E::BaseField::ONE.expr() - divisor_zero.expr() - remainder_zero.expr();
cb.condition_require_one(
|| "check_remainder_sum_inv",
divisor_remainder_not_zero,
remainder_sum.clone() * remainder_sum_inv.expr(),
)?;
// TODO: can directly define sign_xor as expr?
// Tried once, it will cause degree too high (although increases just one).
// So the current degree is already at the brink of maximal supported.
// The high degree mostly comes from the carry expressions.
let sign_xor = cb.create_witin(|| "sign_xor".to_string());
cb.require_equal(
|| "sign_xor_zero",
dividend_sign.expr() + divisor_sign.expr()
- E::BaseField::from_canonical_u32(2).expr()
* dividend_sign.expr()
* divisor_sign.expr(),
sign_xor.expr(),
)?;
let quotient_sum: Expression<E> = quotient_expr
.iter()
.fold(E::BaseField::ZERO.expr(), |acc, q| acc + q.clone());
cb.condition_require_zero(
|| "check_quotient_sign_eq_xor",
quotient_sum * divisor_not_zero.clone(),
quotient_sign.expr() - sign_xor.expr(),
)?;
cb.condition_require_zero(
|| "check_quotient_sign_zero_when_not_eq_xor",
(quotient_sign.expr() - sign_xor.expr()) * divisor_not_zero.clone(),
quotient_sign.expr(),
)?;
let sign_mask = E::BaseField::from_canonical_u32(1 << (LIMB_BITS - 1));
let remainder_prime = UInt::<E>::new_unchecked(|| "remainder_prime", cb)?;
let remainder_prime_expr = remainder_prime.expr();
let mut carry_lt: [Expression<E>; UINT_LIMBS] =
array::from_fn(|_| E::BaseField::ZERO.expr());
let remainder_inv: [_; UINT_LIMBS] =
array::from_fn(|i| cb.create_witin(|| format!("remainder_inv_{i}")));
for i in 0..UINT_LIMBS {
// When the signs of remainer (i.e., dividend) and divisor are the same, r_prime = r.
cb.condition_require_zero(
|| "r_rp_equal_when_xor_zero",
E::BaseField::ONE.expr() - sign_xor.expr(),
remainder_expr[i].clone() - remainder_prime_expr[i].clone(),
)?;
// When the signs of remainder and divisor are different, r_prime = -r. To constrain this, we
// first ensure each r[i] + r_prime[i] + carry[i - 1] is in {0, 2^LIMB_BITS}, and
// that when the sum is 0 then r_prime[i] = 0 as well. Passing both constraints
// implies that 0 <= r_prime[i] <= 2^LIMB_BITS, and in order to ensure r_prime[i] !=
// 2^LIMB_BITS we check that r_prime[i] - 2^LIMB_BITS has an inverse in F.
let last_carry = if i > 0 {
carry_lt[i - 1].clone()
} else {
E::BaseField::ZERO.expr()
};
carry_lt[i] =
(last_carry.clone() + remainder_expr[i].clone() + remainder_prime_expr[i].clone())
* carry_divide.expr();
cb.condition_require_zero(
|| "check_carry_lt",
sign_xor.expr(),
(carry_lt[i].clone() - last_carry.clone())
* (carry_lt[i].clone() - E::BaseField::ONE.expr()),
)?;
cb.condition_require_zero(
|| "check_remainder_prime_not_max",
sign_xor.expr(),
(remainder_prime_expr[i].clone()
- E::BaseField::from_canonical_u32(1 << LIMB_BITS).expr())
* remainder_inv[i].expr()
- E::BaseField::ONE.expr(),
)?;
cb.condition_require_zero(
|| "check_remainder_prime_zero",
sign_xor.expr() * (E::BaseField::ONE.expr() - carry_lt[i].clone()),
remainder_prime_expr[i].clone(),
)?;
}
let lt_marker: [_; UINT_LIMBS] = array::from_fn(|i| {
cb.create_bit(|| format!("lt_marker_{i}"))
.expect("create bit error")
});
let mut prefix_sum: Expression<E> = divisor_zero.expr() + remainder_zero.expr();
let lt_diff = cb.create_witin(|| "lt_diff");
for i in (0..UINT_LIMBS).rev() {
let diff = remainder_prime_expr[i].clone()
* (E::BaseField::from_canonical_u8(2).expr() * divisor_sign.expr()
- E::BaseField::ONE.expr())
+ divisor_expr[i].clone()
* (E::BaseField::ONE.expr()
- E::BaseField::from_canonical_u8(2).expr() * divisor_sign.expr());
prefix_sum += lt_marker[i].expr();
cb.require_zero(
|| "prefix_sum_not_zero_or_diff_zero",
(E::BaseField::ONE.expr() - prefix_sum.clone()) * diff.clone(),
)?;
cb.condition_require_zero(
|| "check_lt_diff_equal_diff".to_string(),
lt_marker[i].expr(),
lt_diff.expr() - diff.clone(),
)?;
}
// - If r_prime != divisor, then prefix_sum = 1 so marker[i] must be 1 iff i is the first index
// where diff != 0. Constrains that diff == lt_diff where lt_diff is non-zero.
// - If r_prime == divisor, then prefix_sum = 0. Here, prefix_sum cannot be 1 because all diff are
// zero, making diff == lt_diff fails.
cb.require_one(|| "prefix_sum_one", prefix_sum.clone())?;
// When not special case (divisor = 0 or remainder = 0), ensure lt_diff
// is not zero by a range check
cb.assert_dynamic_range(
|| "lt_diff_nonzero",
(lt_diff.expr() - E::BaseField::ONE.expr())
* (E::BaseField::ONE.expr() - divisor_zero.expr() - remainder_zero.expr()),
E::BaseField::from_canonical_u32(16).expr(),
)?;
match I::INST_KIND {
InsnKind::DIV | InsnKind::REM => {
cb.assert_dynamic_range(
|| "div_rem_range_check_dividend_last",
E::BaseField::from_canonical_u32(2).expr()
* (dividend_expr[UINT_LIMBS - 1].clone()
- dividend_sign.expr() * sign_mask.expr()),
E::BaseField::from_canonical_u32(16).expr(),
)?;
cb.assert_dynamic_range(
|| "div_rem_range_check_divisor_last",
E::BaseField::from_canonical_u32(2).expr()
* (divisor_expr[UINT_LIMBS - 1].clone()
- divisor_sign.expr() * sign_mask.expr()),
E::BaseField::from_canonical_u32(16).expr(),
)?;
}
InsnKind::DIVU | InsnKind::REMU => {
cb.require_zero(
|| "divu_remu_sign_equal_zero",
dividend_sign.expr() + divisor_sign.expr(),
)?;
}
_ => unreachable!("Unsupported instruction kind"),
}
Ok(DivRemConfig {
dividend,
divisor,
quotient,
remainder,
r_insn,
dividend_sign,
divisor_sign,
quotient_sign,
remainder_zero,
divisor_zero,
divisor_sum_inv,
remainder_sum_inv,
remainder_inv,
sign_xor,
remainder_prime,
lt_marker,
lt_diff,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lkm: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
// dividend = quotient * divisor + remainder
let dividend = step.rs1().unwrap().value;
let dividend_value = Value::new_unchecked(dividend);
let dividend_limbs = dividend_value.as_u16_limbs();
config.dividend.assign_limbs(instance, dividend_limbs);
let divisor = step.rs2().unwrap().value;
let divisor_value = Value::new_unchecked(divisor);
let divisor_limbs = divisor_value.as_u16_limbs();
config.divisor.assign_limbs(instance, divisor_limbs);
// R-type instruction
config
.r_insn
.assign_instance(instance, shard_ctx, lkm, step)?;
let (signed, _div) = match I::INST_KIND {
InsnKind::DIV => (true, true),
InsnKind::REM => (true, false),
InsnKind::DIVU => (false, true),
InsnKind::REMU => (false, false),
_ => unreachable!("Unsupported instruction kind"),
};
let (quotient, remainder, dividend_sign, divisor_sign, quotient_sign, case) =
run_divrem(signed, &u32_to_limbs(÷nd), &u32_to_limbs(&divisor));
let quotient_val = Value::new(limbs_to_u32("ient), lkm);
let remainder_val = Value::new(limbs_to_u32(&remainder), lkm);
config
.quotient
.assign_limbs(instance, quotient_val.as_u16_limbs());
config
.remainder
.assign_limbs(instance, remainder_val.as_u16_limbs());
set_val!(instance, config.dividend_sign, dividend_sign as u64);
set_val!(instance, config.divisor_sign, divisor_sign as u64);
set_val!(instance, config.quotient_sign, quotient_sign as u64);
set_val!(
instance,
config.divisor_zero,
(case == DivRemCoreSpecialCase::ZeroDivisor) as u64
);
let carries = run_mul_carries(
signed,
&u32_to_limbs(&divisor),
"ient,
&remainder,
quotient_sign,
);
for i in 0..UINT_LIMBS {
lkm.assert_dynamic_range(carries[i] as u64, LIMB_BITS as u64 + 2);
lkm.assert_dynamic_range(carries[i + UINT_LIMBS] as u64, LIMB_BITS as u64 + 2);
}
let sign_xor = dividend_sign ^ divisor_sign;
let remainder_prime = if sign_xor {
negate(&remainder)
} else {
remainder
};
let remainder_zero =
remainder.iter().all(|&v| v == 0) && case != DivRemCoreSpecialCase::ZeroDivisor;
set_val!(instance, config.remainder_zero, remainder_zero as u64);
if signed {
let dividend_sign_mask = if dividend_sign {
1 << (LIMB_BITS - 1)
} else {
0
};
let divisor_sign_mask = if divisor_sign {
1 << (LIMB_BITS - 1)
} else {
0
};
lkm.assert_dynamic_range(
(dividend_limbs[UINT_LIMBS - 1] as u64 - dividend_sign_mask) << 1,
16,
);
lkm.assert_dynamic_range(
(divisor_limbs[UINT_LIMBS - 1] as u64 - divisor_sign_mask) << 1,
16,
);
}
let divisor_sum_f = divisor_limbs.iter().fold(E::BaseField::ZERO, |acc, c| {
acc + E::BaseField::from_canonical_u16(*c)
});
let divisor_sum_inv_f = divisor_sum_f.try_inverse().unwrap_or(E::BaseField::ZERO);
let remainder_sum_f = remainder.iter().fold(E::BaseField::ZERO, |acc, r| {
acc + E::BaseField::from_canonical_u32(*r)
});
let remainder_sum_inv_f = remainder_sum_f.try_inverse().unwrap_or(E::BaseField::ZERO);
let (lt_diff_idx, lt_diff_val) = if case == DivRemCoreSpecialCase::None && !remainder_zero {
let idx = run_sltu_diff_idx(&u32_to_limbs(&divisor), &remainder_prime, divisor_sign);
let val = if divisor_sign {
remainder_prime[idx] - divisor_limbs[idx] as u32
} else {
divisor_limbs[idx] as u32 - remainder_prime[idx]
};
lkm.assert_dynamic_range(val as u64 - 1, 16);
(idx, val)
} else {
lkm.assert_dynamic_range(0, 16);
(UINT_LIMBS, 0)
};
let remainder_prime_f = remainder_prime.map(E::BaseField::from_canonical_u32);
set_val!(instance, config.divisor_sum_inv, divisor_sum_inv_f);
set_val!(instance, config.remainder_sum_inv, remainder_sum_inv_f);
for i in 0..UINT_LIMBS {
set_val!(
instance,
config.remainder_inv[i],
(remainder_prime_f[i] - E::BaseField::from_canonical_u32(1 << LIMB_BITS)).inverse()
);
set_val!(instance, config.lt_marker[i], (i == lt_diff_idx) as u64);
}
set_val!(instance, config.sign_xor, sign_xor as u64);
config.remainder_prime.assign_limbs(
instance,
remainder_prime
.iter()
.map(|x| *x as u16)
.collect::<Vec<_>>()
.as_slice(),
);
set_val!(instance, config.lt_diff, lt_diff_val as u64);
Ok(())
}
}
#[derive(Debug, Eq, PartialEq)]
#[repr(u8)]
pub(super) enum DivRemCoreSpecialCase {
None,
ZeroDivisor,
SignedOverflow,
}
// Returns (quotient, remainder, x_sign, y_sign, q_sign, case) where case = 0 for normal, 1
// for zero divisor, and 2 for signed overflow
pub(super) fn run_divrem(
signed: bool,
x: &[u32; UINT_LIMBS],
y: &[u32; UINT_LIMBS],
) -> (
[u32; UINT_LIMBS],
[u32; UINT_LIMBS],
bool,
bool,
bool,
DivRemCoreSpecialCase,
) {
let x_sign = signed && (x[UINT_LIMBS - 1] >> (LIMB_BITS - 1) == 1);
let y_sign = signed && (y[UINT_LIMBS - 1] >> (LIMB_BITS - 1) == 1);
let max_limb = (1 << LIMB_BITS) - 1;
let zero_divisor = y.iter().all(|val| *val == 0);
let overflow = x[UINT_LIMBS - 1] == 1 << (LIMB_BITS - 1)
&& x[..(UINT_LIMBS - 1)].iter().all(|val| *val == 0)
&& y.iter().all(|val| *val == max_limb)
&& x_sign
&& y_sign;
if zero_divisor {
return (
[max_limb; UINT_LIMBS],
*x,
x_sign,
y_sign,
signed,
DivRemCoreSpecialCase::ZeroDivisor,
);
} else if overflow {
return (
*x,
[0; UINT_LIMBS],
x_sign,
y_sign,
false,
DivRemCoreSpecialCase::SignedOverflow,
);
}
let x_abs = if x_sign { negate(x) } else { *x };
let y_abs = if y_sign { negate(y) } else { *y };
let x_big = limbs_to_u32(&x_abs);
let y_big = limbs_to_u32(&y_abs);
let q_big = x_big / y_big;
let r_big = x_big % y_big;
let q = if x_sign ^ y_sign {
negate(&u32_to_limbs(&q_big))
} else {
u32_to_limbs(&q_big)
};
let q_sign = signed && (q[UINT_LIMBS - 1] >> (LIMB_BITS - 1) == 1);
// In C |q * y| <= |x|, which means if x is negative then r <= 0 and vice versa.
let r = if x_sign {
negate(&u32_to_limbs(&r_big))
} else {
u32_to_limbs(&r_big)
};
(q, r, x_sign, y_sign, q_sign, DivRemCoreSpecialCase::None)
}
pub(super) fn run_sltu_diff_idx(x: &[u32; UINT_LIMBS], y: &[u32; UINT_LIMBS], cmp: bool) -> usize {
for i in (0..UINT_LIMBS).rev() {
if x[i] != y[i] {
assert!((x[i] < y[i]) == cmp);
return i;
}
}
assert!(!cmp);
UINT_LIMBS
}
// returns carries of d * q + r
pub(super) fn run_mul_carries(
signed: bool,
d: &[u32; UINT_LIMBS],
q: &[u32; UINT_LIMBS],
r: &[u32; UINT_LIMBS],
q_sign: bool,
) -> Vec<u32> {
let mut carry = vec![0u32; 2 * UINT_LIMBS];
for i in 0..UINT_LIMBS {
let mut val: u64 = r[i] as u64 + if i > 0 { carry[i - 1] } else { 0 } as u64;
for j in 0..=i {
val += d[j] as u64 * q[i - j] as u64;
}
carry[i] = (val >> LIMB_BITS) as u32;
}
let q_ext = if q_sign && signed {
(1 << LIMB_BITS) - 1
} else {
0
};
let d_ext =
(d[UINT_LIMBS - 1] >> (LIMB_BITS - 1)) * if signed { (1 << LIMB_BITS) - 1 } else { 0 };
let r_ext =
(r[UINT_LIMBS - 1] >> (LIMB_BITS - 1)) * if signed { (1 << LIMB_BITS) - 1 } else { 0 };
let mut d_prefix = 0;
let mut q_prefix = 0;
for i in 0..UINT_LIMBS {
d_prefix += d[i];
q_prefix += q[i];
let mut val: u64 = carry[UINT_LIMBS + i - 1] as u64
+ (d_prefix as u64 * q_ext as u64)
+ (q_prefix as u64 * d_ext as u64)
+ r_ext as u64;
for j in (i + 1)..UINT_LIMBS {
val += d[j] as u64 * q[UINT_LIMBS + i - j] as u64;
}
carry[UINT_LIMBS + i] = (val >> LIMB_BITS) as u32;
}
carry
}
fn limbs_to_u32(x: &[u32; UINT_LIMBS]) -> u32 {
let base = 1 << LIMB_BITS;
let mut res = 0;
for val in x.iter().rev() {
res *= base;
res += *val;
}
res
}
fn u32_to_limbs(x: &u32) -> [u32; UINT_LIMBS] {
let mut res = [0; UINT_LIMBS];
let mut x = *x;
let base = 1u32 << LIMB_BITS;
for limb in res.iter_mut() {
let (quot, rem) = (x / base, x % base);
*limb = rem;
x = quot;
}
debug_assert_eq!(x, 0u32);
res
}
fn negate(x: &[u32; UINT_LIMBS]) -> [u32; UINT_LIMBS] {
let mut carry = 1;
array::from_fn(|i| {
let val = (1 << LIMB_BITS) + carry - 1 - x[i];
carry = val >> LIMB_BITS;
val % (1 << LIMB_BITS)
})
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/memory/test.rs | ceno_zkvm/src/instructions/riscv/memory/test.rs | use crate::{
Value,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{
Instruction,
riscv::{
LbInstruction, LbuInstruction, LhInstruction, LhuInstruction, RIVInstruction,
constants::UInt,
memory::{
LbOp, LbuOp, LhOp, LhuOp, LwInstruction, LwOp, SBOp, SHOp, SWOp, SbInstruction,
ShInstruction, SwInstruction,
},
},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
use ceno_emul::{ByteAddr, Change, InsnKind, ReadOp, StepRecord, Word, WriteOp, encode_rv32};
#[cfg(feature = "u16limb_circuit")]
use ff_ext::BabyBearExt4;
use ff_ext::{ExtensionField, GoldilocksExt2};
use gkr_iop::circuit_builder::DebugIndex;
use std::hash::Hash;
fn sb(prev: Word, rs2: Word, shift: u32) -> Word {
let shift = (shift * 8) as usize;
let mut data = prev;
data ^= data & (0xff << shift);
data |= (rs2 & 0xff) << shift;
data
}
fn sh(prev: Word, rs2: Word, shift: u32) -> Word {
assert_eq!(shift & 1, 0);
let shift = (shift * 8) as usize;
let mut data = prev;
data ^= data & (0xffff << shift);
data |= (rs2 & 0xffff) << shift;
data
}
fn sw(_prev: Word, rs2: Word) -> Word {
rs2
}
fn signed_extend(val: u32, n_bits: u32) -> u32 {
match n_bits {
8 => (val as i8) as u32,
16 => (val as i16) as u32,
_ => unreachable!("unsupported n_bits = {}", n_bits),
}
}
fn load(mem_value: Word, insn: InsnKind, shift: u32) -> Word {
let val = mem_value >> (8 * shift);
match insn {
InsnKind::LB => signed_extend(val & 0xff_u32, 8),
InsnKind::LBU => val & 0xff_u32,
InsnKind::LH => {
assert_eq!(shift & 0x01, 0);
signed_extend(val & 0xffff_u32, 16)
}
InsnKind::LHU => {
assert_eq!(shift & 0x01, 0);
val & 0xffff_u32
}
InsnKind::LW => {
assert_eq!(shift & 0x03, 0);
mem_value
}
_ => unreachable!(),
}
}
fn impl_opcode_store<E: ExtensionField + Hash, I: RIVInstruction, Inst: Instruction<E>>(imm: i32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| Inst::name(),
|cb| {
let config = Inst::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(I::INST_KIND, 2, 3, 0, imm);
let prev_mem_value = 0x40302010;
let rs2_word = Word::from(0x12345678_u32);
let rs1_word = Word::from(0x4000000_u32);
let unaligned_addr = ByteAddr::from(rs1_word.wrapping_add_signed(imm));
let new_mem_value = match I::INST_KIND {
InsnKind::SB => sb(prev_mem_value, rs2_word, unaligned_addr.shift()),
InsnKind::SH => sh(prev_mem_value, rs2_word, unaligned_addr.shift()),
InsnKind::SW => sw(prev_mem_value, rs2_word),
x => unreachable!("{:?} is not store instruction", x),
};
let (raw_witin, lkm) = Inst::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_s_instruction(
12,
MOCK_PC_START,
insn_code,
rs1_word,
rs2_word,
WriteOp {
addr: unaligned_addr.waddr(),
value: Change {
before: prev_mem_value,
after: new_mem_value,
},
previous_cycle: 4,
},
8,
)],
)
.unwrap();
// verify mem_write
let expected_mem_written =
UInt::from_const_unchecked(Value::new_unchecked(new_mem_value).as_u16_limbs().to_vec());
let mem_written_expr = cb.get_debug_expr(DebugIndex::MemWrite as usize)[0].clone();
cb.require_equal(
|| "assert_mem_written",
mem_written_expr,
expected_mem_written.value(),
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
fn impl_opcode_load<E: ExtensionField + Hash, I: RIVInstruction, Inst: Instruction<E>>(imm: i32) {
let mut cs = ConstraintSystem::<E>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| Inst::name(),
|cb| {
let config = Inst::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(I::INST_KIND, 2, 3, 0, imm);
let mem_value = 0x40302010;
let rs1_word = Word::from(0x4000000_u32);
let prev_rd_word = Word::from(0x12345678_u32);
let unaligned_addr = ByteAddr::from(rs1_word.wrapping_add_signed(imm));
let new_rd_word = load(mem_value, I::INST_KIND, unaligned_addr.shift());
let rd_change = Change {
before: prev_rd_word,
after: new_rd_word,
};
let (raw_witin, lkm) = Inst::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_im_instruction(
12,
MOCK_PC_START,
insn_code,
rs1_word,
rd_change,
ReadOp {
addr: unaligned_addr.waddr(),
value: mem_value,
previous_cycle: 4,
},
8,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
fn impl_opcode_sb(imm: i32) {
impl_opcode_store::<GoldilocksExt2, SBOp, SbInstruction<GoldilocksExt2>>(imm)
}
fn impl_opcode_sh(imm: i32) {
assert_eq!(imm & 0x01, 0);
impl_opcode_store::<GoldilocksExt2, SHOp, ShInstruction<GoldilocksExt2>>(imm)
}
fn impl_opcode_sw(imm: i32) {
assert_eq!(imm & 0x03, 0);
impl_opcode_store::<GoldilocksExt2, SWOp, SwInstruction<GoldilocksExt2>>(imm)
}
#[test]
fn test_sb() {
let cases = vec![(0,), (5,), (10,), (15,), (-4,), (-3,), (-2,), (-1,)];
for &(imm,) in &cases {
impl_opcode_sb(imm);
#[cfg(feature = "u16limb_circuit")]
impl_opcode_sb(imm);
}
}
#[test]
fn test_sh() {
let cases = vec![(0,), (2,), (-4,), (-2,)];
for &(imm,) in &cases {
impl_opcode_sh(imm);
#[cfg(feature = "u16limb_circuit")]
impl_opcode_sh(imm);
}
}
#[test]
fn test_sw() {
let cases = vec![(0,), (4,), (-4,)];
for &(imm,) in &cases {
impl_opcode_sw(imm);
#[cfg(feature = "u16limb_circuit")]
impl_opcode_sw(imm);
}
}
#[test]
fn test_lb() {
let cases = vec![
// positive immediates
(0,),
(1,),
(2,),
(3,),
// negative immediates
(-3,),
(-2,),
(-1,),
];
for &(imm,) in &cases {
impl_opcode_load::<GoldilocksExt2, LbOp, LbInstruction<GoldilocksExt2>>(imm);
#[cfg(feature = "u16limb_circuit")]
impl_opcode_load::<BabyBearExt4, LbOp, LbInstruction<BabyBearExt4>>(imm);
}
}
#[test]
fn test_lbu() {
let cases = vec![
// positive immediates
(0,),
(1,),
(2,),
(3,),
// negative immediates
(-3,),
(-2,),
(-1,),
];
for &(imm,) in &cases {
impl_opcode_load::<GoldilocksExt2, LbuOp, LbuInstruction<GoldilocksExt2>>(imm);
#[cfg(feature = "u16limb_circuit")]
impl_opcode_load::<BabyBearExt4, LbuOp, LbuInstruction<BabyBearExt4>>(imm);
}
}
#[test]
fn test_lh() {
let cases = vec![
// positive immediates
(0,),
(2,),
(4,),
// negative immediates
(-4,),
(-2,),
];
for &(imm,) in &cases {
impl_opcode_load::<GoldilocksExt2, LhOp, LhInstruction<GoldilocksExt2>>(imm);
#[cfg(feature = "u16limb_circuit")]
impl_opcode_load::<BabyBearExt4, LhOp, LhInstruction<BabyBearExt4>>(imm);
}
}
#[test]
fn test_lhu() {
let cases = vec![
// positive immediates
(0,),
(2,),
(4,),
// negative immediates
(-4,),
(-2,),
];
for &(imm,) in &cases {
impl_opcode_load::<GoldilocksExt2, LhuOp, LhuInstruction<GoldilocksExt2>>(imm);
#[cfg(feature = "u16limb_circuit")]
impl_opcode_load::<BabyBearExt4, LhuOp, LhuInstruction<BabyBearExt4>>(imm);
}
}
#[test]
fn test_lw() {
let cases = vec![(0,), (4,), (-4,)];
for &(imm,) in &cases {
impl_opcode_load::<GoldilocksExt2, LwOp, LwInstruction<GoldilocksExt2>>(imm);
#[cfg(feature = "u16limb_circuit")]
impl_opcode_load::<BabyBearExt4, LwOp, LwInstruction<BabyBearExt4>>(imm);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/memory/store.rs | ceno_zkvm/src/instructions/riscv/memory/store.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
RIVInstruction, constants::UInt, insn_base::MemAddr, memory::gadget::MemWordUtil,
s_insn::SInstructionConfig,
},
},
structs::ProgramParams,
tables::InsnRecord,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::{ByteAddr, InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use multilinear_extensions::{ToExpr, WitIn};
use std::marker::PhantomData;
pub struct StoreConfig<E: ExtensionField, const N_ZEROS: usize> {
s_insn: SInstructionConfig<E>,
rs1_read: UInt<E>,
rs2_read: UInt<E>,
imm: WitIn,
prev_memory_value: UInt<E>,
memory_addr: MemAddr<E>,
next_memory_value: Option<MemWordUtil<E, N_ZEROS>>,
}
pub struct StoreInstruction<E, I, const N_ZEROS: usize>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction, const N_ZEROS: usize> Instruction<E>
for StoreInstruction<E, I, N_ZEROS>
{
type InstructionConfig = StoreConfig<E, N_ZEROS>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?; // unsigned 32-bit value
let rs2_read = UInt::new_unchecked(|| "rs2_read", circuit_builder)?;
let prev_memory_value = UInt::new(|| "prev_memory_value", circuit_builder)?;
let imm = circuit_builder.create_witin(|| "imm"); // signed 12-bit value
let memory_addr = match I::INST_KIND {
InsnKind::SW => MemAddr::construct_align4(circuit_builder),
InsnKind::SH => MemAddr::construct_align2(circuit_builder),
InsnKind::SB => MemAddr::construct_unaligned(circuit_builder),
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
}?;
if cfg!(feature = "forbid_overflow") {
const MAX_RAM_ADDR: u32 = u32::MAX - 0x7FF; // max positive imm is 0x7FF
const MIN_RAM_ADDR: u32 = 0x800; // min negative imm is -0x800
assert!(
!params.platform.can_write(MAX_RAM_ADDR + 1)
&& !params.platform.can_write(MIN_RAM_ADDR - 1)
);
}
circuit_builder.require_equal(
|| "memory_addr = rs1_read + imm",
memory_addr.expr_unaligned(),
rs1_read.value() + imm.expr(),
)?;
let (next_memory_value, next_memory) = match I::INST_KIND {
InsnKind::SW => (rs2_read.memory_expr(), None),
InsnKind::SH | InsnKind::SB => {
let next_memory = MemWordUtil::<E, N_ZEROS>::construct_circuit(
circuit_builder,
&memory_addr,
&prev_memory_value,
&rs2_read,
)?;
(next_memory.as_lo_hi().clone(), Some(next_memory))
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
let s_insn = SInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
&imm.expr(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
rs1_read.register_expr(),
rs2_read.register_expr(),
memory_addr.expr_align4(),
prev_memory_value.memory_expr(),
next_memory_value,
)?;
Ok(StoreConfig {
s_insn,
rs1_read,
rs2_read,
imm,
prev_memory_value,
memory_addr,
next_memory_value: next_memory,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let rs1 = Value::new_unchecked(step.rs1().unwrap().value);
let rs2 = Value::new_unchecked(step.rs2().unwrap().value);
let memory_op = step.memory_op().unwrap();
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn());
let prev_mem_value = Value::new(memory_op.value.before, lk_multiplicity);
let addr = ByteAddr::from(step.rs1().unwrap().value.wrapping_add_signed(imm.0 as i32));
config
.s_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
config.rs1_read.assign_value(instance, rs1);
config.rs2_read.assign_value(instance, rs2);
set_val!(instance, config.imm, imm.1);
config
.prev_memory_value
.assign_value(instance, prev_mem_value);
config
.memory_addr
.assign_instance(instance, lk_multiplicity, addr.into())?;
if let Some(change) = config.next_memory_value.as_ref() {
change.assign_instance(instance, lk_multiplicity, step, addr.shift())?;
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/memory/load_v2.rs | ceno_zkvm/src/instructions/riscv/memory/load_v2.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::SignedExtendConfig,
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{MEM_BITS, UInt},
im_insn::IMInstructionConfig,
insn_base::MemAddr,
},
},
structs::ProgramParams,
tables::InsnRecord,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::{ByteAddr, InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use itertools::izip;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::{Field, FieldAlgebra};
use std::marker::PhantomData;
pub struct LoadConfig<E: ExtensionField> {
im_insn: IMInstructionConfig<E>,
rs1_read: UInt<E>,
imm: WitIn,
imm_sign: WitIn,
memory_addr: MemAddr<E>,
memory_read: UInt<E>,
target_limb: Option<WitIn>,
target_limb_bytes: Option<Vec<WitIn>>,
signed_extend_config: Option<SignedExtendConfig<E>>,
}
pub struct LoadInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for LoadInstruction<E, I> {
type InstructionConfig = LoadConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?; // unsigned 32-bit value
let imm = circuit_builder.create_witin(|| "imm"); // signed 16-bit value
let imm_sign = circuit_builder.create_witin(|| "imm_sign");
// skip read range check, assuming constraint in write.
let memory_read = UInt::new_unchecked(|| "memory_read", circuit_builder)?;
let memory_addr = match I::INST_KIND {
InsnKind::LW => MemAddr::construct_with_max_bits(circuit_builder, 2, MEM_BITS),
InsnKind::LH | InsnKind::LHU => {
MemAddr::construct_with_max_bits(circuit_builder, 1, MEM_BITS)
}
InsnKind::LB | InsnKind::LBU => {
MemAddr::construct_with_max_bits(circuit_builder, 0, MEM_BITS)
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
}?;
// rs1 + imm = mem_addr
let inv = E::BaseField::from_canonical_u32(1 << UInt::<E>::LIMB_BITS).inverse();
let carry = (rs1_read.expr()[0].expr() + imm.expr()
- memory_addr.uint_unaligned().expr()[0].expr())
* inv.expr();
circuit_builder.assert_bit(|| "carry_lo_bit", carry.expr())?;
let imm_extend_limb = imm_sign.expr()
* E::BaseField::from_canonical_u32((1 << UInt::<E>::LIMB_BITS) - 1).expr();
let carry = (rs1_read.expr()[1].expr() + imm_extend_limb.expr() + carry
- memory_addr.uint_unaligned().expr()[1].expr())
* inv.expr();
circuit_builder.assert_bit(|| "overflow_bit", carry)?;
let addr_low_bits = memory_addr.low_bit_exprs();
let memory_value = memory_read.expr();
// get target limb from memory word for load instructions except LW
let target_limb = match I::INST_KIND {
InsnKind::LB | InsnKind::LBU | InsnKind::LH | InsnKind::LHU => {
let target_limb = circuit_builder.create_witin(|| "target_limb");
circuit_builder.condition_require_equal(
|| "target_limb = memory_value[low_bits[1]]",
addr_low_bits[1].clone(),
target_limb.expr(),
memory_value[1].clone(),
memory_value[0].clone(),
)?;
Some(target_limb)
}
_ => None,
};
// get target byte from memory word for LB and LBU
let (target_byte_expr, target_limb_bytes) = match I::INST_KIND {
InsnKind::LB | InsnKind::LBU => {
let target_byte = circuit_builder.create_u8(|| "limb.le_bytes[low_bits[0]]")?;
let dummy_byte = circuit_builder.create_u8(|| "limb.le_bytes[1-low_bits[0]]")?;
circuit_builder.condition_require_equal(
|| "target_byte = target_limb[low_bits[0]]",
addr_low_bits[0].clone(),
target_limb.unwrap().expr(),
target_byte.expr() * (1<<8) + dummy_byte.expr(), // target_byte = limb.le_bytes[1]
dummy_byte.expr() * (1<<8) + target_byte.expr(), // target_byte = limb.le_bytes[0]
)?;
(
Some(target_byte.expr()),
Some(vec![target_byte, dummy_byte]),
)
}
_ => (None, None),
};
let (signed_extend_config, rd_written) = match I::INST_KIND {
InsnKind::LW => (None, memory_read.clone()),
InsnKind::LH => {
let val = target_limb.unwrap();
let signed_extend_config =
SignedExtendConfig::construct_limb(circuit_builder, val.expr())?;
let rd_written = signed_extend_config.signed_extended_value(val.expr());
(Some(signed_extend_config), rd_written)
}
InsnKind::LHU => {
(
None,
// it's safe to unwrap as `UInt::from_exprs_unchecked` never return error
UInt::from_exprs_unchecked(vec![
target_limb.as_ref().map(|limb| limb.expr()).unwrap(),
Expression::ZERO,
]),
)
}
InsnKind::LB => {
let val = target_byte_expr.unwrap();
let signed_extend_config =
SignedExtendConfig::construct_byte(circuit_builder, val.clone())?;
let rd_written = signed_extend_config.signed_extended_value(val);
(Some(signed_extend_config), rd_written)
}
InsnKind::LBU => (
None,
UInt::from_exprs_unchecked(vec![target_byte_expr.unwrap(), Expression::ZERO]),
),
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
let im_insn = IMInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
&imm.expr(),
&imm_sign.expr(),
rs1_read.register_expr(),
memory_read.memory_expr(),
memory_addr.expr_align4(),
rd_written.register_expr(),
)?;
Ok(LoadConfig {
im_insn,
rs1_read,
imm,
imm_sign,
memory_addr,
memory_read,
target_limb,
target_limb_bytes,
signed_extend_config,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let rs1 = Value::new_unchecked(step.rs1().unwrap().value);
let memory_value = step.memory_op().unwrap().value.before;
let memory_read = Value::new_unchecked(memory_value);
// imm is signed 16-bit value
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn());
let imm_sign_extend = crate::utils::imm_sign_extend(true, step.insn().imm as i16);
set_val!(
instance,
config.imm_sign,
E::BaseField::from_bool(imm_sign_extend[1] > 0)
);
let unaligned_addr =
ByteAddr::from(step.rs1().unwrap().value.wrapping_add_signed(imm.0 as i32));
let shift = unaligned_addr.shift();
let addr_low_bits = [shift & 0x01, (shift >> 1) & 0x01];
let target_limb = memory_read.as_u16_limbs()[addr_low_bits[1] as usize];
let mut target_limb_bytes = target_limb.to_le_bytes();
set_val!(instance, config.imm, imm.1);
config
.im_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
config.rs1_read.assign_value(instance, rs1);
config.memory_read.assign_value(instance, memory_read);
config
.memory_addr
.assign_instance(instance, lk_multiplicity, unaligned_addr.into())?;
if let Some(&limb) = config.target_limb.as_ref() {
set_val!(
instance,
limb,
E::BaseField::from_canonical_u16(target_limb)
);
}
if let Some(limb_bytes) = config.target_limb_bytes.as_ref() {
if addr_low_bits[0] == 1 {
// target_limb_bytes[0] = target_limb.to_le_bytes[1]
// target_limb_bytes[1] = target_limb.to_le_bytes[0]
target_limb_bytes.reverse();
}
for (&col, byte) in izip!(limb_bytes.iter(), target_limb_bytes.into_iter()) {
lk_multiplicity.assert_ux::<8>(byte as u64);
set_val!(instance, col, E::BaseField::from_canonical_u8(byte));
}
}
let val = match I::INST_KIND {
InsnKind::LB | InsnKind::LBU => target_limb_bytes[0] as u64,
InsnKind::LH | InsnKind::LHU => target_limb as u64,
_ => 0,
};
if let Some(signed_ext_config) = config.signed_extend_config.as_ref() {
signed_ext_config.assign_instance(instance, lk_multiplicity, val)?;
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/memory/store_v2.rs | ceno_zkvm/src/instructions/riscv/memory/store_v2.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{MEM_BITS, UInt},
insn_base::MemAddr,
memory::gadget::MemWordUtil,
s_insn::SInstructionConfig,
},
},
structs::ProgramParams,
tables::InsnRecord,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::{ByteAddr, InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use multilinear_extensions::{ToExpr, WitIn};
use p3::field::{Field, FieldAlgebra};
use std::marker::PhantomData;
pub struct StoreConfig<E: ExtensionField, const N_ZEROS: usize> {
s_insn: SInstructionConfig<E>,
rs1_read: UInt<E>,
rs2_read: UInt<E>,
imm: WitIn,
imm_sign: WitIn,
prev_memory_value: UInt<E>,
memory_addr: MemAddr<E>,
next_memory_value: Option<MemWordUtil<E, N_ZEROS>>,
}
pub struct StoreInstruction<E, I, const N_ZEROS: usize>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction, const N_ZEROS: usize> Instruction<E>
for StoreInstruction<E, I, N_ZEROS>
{
type InstructionConfig = StoreConfig<E, N_ZEROS>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?; // unsigned 32-bit value
let rs2_read = UInt::new_unchecked(|| "rs2_read", circuit_builder)?;
let prev_memory_value = UInt::new(|| "prev_memory_value", circuit_builder)?;
let imm = circuit_builder.create_witin(|| "imm"); // signed 16-bit value
let imm_sign = circuit_builder.create_witin(|| "imm_sign");
let memory_addr = match I::INST_KIND {
InsnKind::SW => MemAddr::construct_with_max_bits(circuit_builder, 2, MEM_BITS),
InsnKind::SH => MemAddr::construct_with_max_bits(circuit_builder, 1, MEM_BITS),
InsnKind::SB => MemAddr::construct_with_max_bits(circuit_builder, 0, MEM_BITS),
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
}?;
if cfg!(feature = "forbid_overflow") {
const MAX_RAM_ADDR: u32 = u32::MAX - 0x7FF; // max positive imm is 0x7FF
const MIN_RAM_ADDR: u32 = 0x800; // min negative imm is -0x800
assert!(
!params.platform.can_write(MAX_RAM_ADDR + 1)
&& !params.platform.can_write(MIN_RAM_ADDR - 1)
);
}
// rs1 + imm = mem_addr
let inv = E::BaseField::from_canonical_u32(1 << UInt::<E>::LIMB_BITS).inverse();
let carry = (rs1_read.expr()[0].expr() + imm.expr()
- memory_addr.uint_unaligned().expr()[0].expr())
* inv.expr();
circuit_builder.assert_bit(|| "carry_lo_bit", carry.expr())?;
let imm_extend_limb = imm_sign.expr()
* E::BaseField::from_canonical_u32((1 << UInt::<E>::LIMB_BITS) - 1).expr();
let carry = (rs1_read.expr()[1].expr() + imm_extend_limb.expr() + carry
- memory_addr.uint_unaligned().expr()[1].expr())
* inv.expr();
circuit_builder.assert_bit(|| "overflow_bit", carry)?;
let (next_memory_value, next_memory) = match I::INST_KIND {
InsnKind::SW => (rs2_read.memory_expr(), None),
InsnKind::SH | InsnKind::SB => {
let next_memory = MemWordUtil::<E, N_ZEROS>::construct_circuit(
circuit_builder,
&memory_addr,
&prev_memory_value,
&rs2_read,
)?;
(next_memory.as_lo_hi().clone(), Some(next_memory))
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
let s_insn = SInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
&imm.expr(),
&imm_sign.expr(),
rs1_read.register_expr(),
rs2_read.register_expr(),
memory_addr.expr_align4(),
prev_memory_value.memory_expr(),
next_memory_value,
)?;
Ok(StoreConfig {
s_insn,
rs1_read,
rs2_read,
imm,
imm_sign,
prev_memory_value,
memory_addr,
next_memory_value: next_memory,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let rs1 = Value::new_unchecked(step.rs1().unwrap().value);
let rs2 = Value::new_unchecked(step.rs2().unwrap().value);
let memory_op = step.memory_op().unwrap();
// imm is signed 16-bit value
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn());
let imm_sign_extend = crate::utils::imm_sign_extend(true, step.insn().imm as i16);
set_val!(
instance,
config.imm_sign,
E::BaseField::from_bool(imm_sign_extend[1] > 0)
);
let prev_mem_value = Value::new(memory_op.value.before, lk_multiplicity);
let addr = ByteAddr::from(step.rs1().unwrap().value.wrapping_add_signed(imm.0 as i32));
config
.s_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
config.rs1_read.assign_value(instance, rs1);
config.rs2_read.assign_value(instance, rs2);
set_val!(instance, config.imm, imm.1);
config
.prev_memory_value
.assign_value(instance, prev_mem_value);
config
.memory_addr
.assign_instance(instance, lk_multiplicity, addr.into())?;
if let Some(change) = config.next_memory_value.as_ref() {
change.assign_instance(instance, lk_multiplicity, step, addr.shift())?;
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/memory/load.rs | ceno_zkvm/src/instructions/riscv/memory/load.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::SignedExtendConfig,
instructions::{
Instruction,
riscv::{
RIVInstruction, constants::UInt, im_insn::IMInstructionConfig, insn_base::MemAddr,
},
},
structs::ProgramParams,
tables::InsnRecord,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::{ByteAddr, InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use itertools::izip;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::FieldAlgebra;
use std::marker::PhantomData;
pub struct LoadConfig<E: ExtensionField> {
im_insn: IMInstructionConfig<E>,
rs1_read: UInt<E>,
imm: WitIn,
memory_addr: MemAddr<E>,
memory_read: UInt<E>,
target_limb: Option<WitIn>,
target_limb_bytes: Option<Vec<WitIn>>,
signed_extend_config: Option<SignedExtendConfig<E>>,
}
pub struct LoadInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for LoadInstruction<E, I> {
type InstructionConfig = LoadConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?; // unsigned 32-bit value
let imm = circuit_builder.create_witin(|| "imm"); // signed 12-bit value
// skip read range check, assuming constraint in write.
let memory_read = UInt::new_unchecked(|| "memory_read", circuit_builder)?;
let memory_addr = match I::INST_KIND {
InsnKind::LW => MemAddr::construct_align4(circuit_builder),
InsnKind::LH | InsnKind::LHU => MemAddr::construct_align2(circuit_builder),
InsnKind::LB | InsnKind::LBU => MemAddr::construct_unaligned(circuit_builder),
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
}?;
circuit_builder.require_equal(
|| "memory_addr = rs1_read + imm",
memory_addr.expr_unaligned(),
rs1_read.value() + imm.expr(),
)?;
let addr_low_bits = memory_addr.low_bit_exprs();
let memory_value = memory_read.expr();
// get target limb from memory word for load instructions except LW
let target_limb = match I::INST_KIND {
InsnKind::LB | InsnKind::LBU | InsnKind::LH | InsnKind::LHU => {
let target_limb = circuit_builder.create_witin(|| "target_limb");
circuit_builder.condition_require_equal(
|| "target_limb = memory_value[low_bits[1]]",
addr_low_bits[1].clone(),
target_limb.expr(),
memory_value[1].clone(),
memory_value[0].clone(),
)?;
Some(target_limb)
}
_ => None,
};
// get target byte from memory word for LB and LBU
let (target_byte_expr, target_limb_bytes) = match I::INST_KIND {
InsnKind::LB | InsnKind::LBU => {
let target_byte = circuit_builder.create_u8(|| "limb.le_bytes[low_bits[0]]")?;
let dummy_byte = circuit_builder.create_u8(|| "limb.le_bytes[1-low_bits[0]]")?;
circuit_builder.condition_require_equal(
|| "target_byte = target_limb[low_bits[0]]",
addr_low_bits[0].clone(),
target_limb.unwrap().expr(),
target_byte.expr() * (1<<8) + dummy_byte.expr(), // target_byte = limb.le_bytes[1]
dummy_byte.expr() * (1<<8) + target_byte.expr(), // target_byte = limb.le_bytes[0]
)?;
(
Some(target_byte.expr()),
Some(vec![target_byte, dummy_byte]),
)
}
_ => (None, None),
};
let (signed_extend_config, rd_written) = match I::INST_KIND {
InsnKind::LW => (None, memory_read.clone()),
InsnKind::LH => {
let val = target_limb.unwrap();
let signed_extend_config =
SignedExtendConfig::construct_limb(circuit_builder, val.expr())?;
let rd_written = signed_extend_config.signed_extended_value(val.expr());
(Some(signed_extend_config), rd_written)
}
InsnKind::LHU => {
(
None,
// it's safe to unwrap as `UInt::from_exprs_unchecked` never return error
UInt::from_exprs_unchecked(vec![
target_limb.as_ref().map(|limb| limb.expr()).unwrap(),
Expression::ZERO,
]),
)
}
InsnKind::LB => {
let val = target_byte_expr.unwrap();
let signed_extend_config =
SignedExtendConfig::construct_byte(circuit_builder, val.clone())?;
let rd_written = signed_extend_config.signed_extended_value(val);
(Some(signed_extend_config), rd_written)
}
InsnKind::LBU => (
None,
UInt::from_exprs_unchecked(vec![target_byte_expr.unwrap(), Expression::ZERO]),
),
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
let im_insn = IMInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
&imm.expr(),
#[cfg(feature = "u16limb_circuit")]
0.into(),
rs1_read.register_expr(),
memory_read.memory_expr(),
memory_addr.expr_align4(),
rd_written.register_expr(),
)?;
Ok(LoadConfig {
im_insn,
rs1_read,
imm,
memory_addr,
memory_read,
target_limb,
target_limb_bytes,
signed_extend_config,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let rs1 = Value::new_unchecked(step.rs1().unwrap().value);
let memory_value = step.memory_op().unwrap().value.before;
let memory_read = Value::new_unchecked(memory_value);
// imm is signed 12-bit value
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn());
let unaligned_addr =
ByteAddr::from(step.rs1().unwrap().value.wrapping_add_signed(imm.0 as i32));
let shift = unaligned_addr.shift();
let addr_low_bits = [shift & 0x01, (shift >> 1) & 0x01];
let target_limb = memory_read.as_u16_limbs()[addr_low_bits[1] as usize];
let mut target_limb_bytes = target_limb.to_le_bytes();
set_val!(instance, config.imm, imm.1);
config
.im_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
config.rs1_read.assign_value(instance, rs1);
config.memory_read.assign_value(instance, memory_read);
config
.memory_addr
.assign_instance(instance, lk_multiplicity, unaligned_addr.into())?;
if let Some(&limb) = config.target_limb.as_ref() {
set_val!(
instance,
limb,
E::BaseField::from_canonical_u16(target_limb)
);
}
if let Some(limb_bytes) = config.target_limb_bytes.as_ref() {
if addr_low_bits[0] == 1 {
// target_limb_bytes[0] = target_limb.to_le_bytes[1]
// target_limb_bytes[1] = target_limb.to_le_bytes[0]
target_limb_bytes.reverse();
}
for (&col, byte) in izip!(limb_bytes.iter(), target_limb_bytes.into_iter()) {
lk_multiplicity.assert_ux::<8>(byte as u64);
set_val!(instance, col, E::BaseField::from_canonical_u8(byte));
}
}
let val = match I::INST_KIND {
InsnKind::LB | InsnKind::LBU => target_limb_bytes[0] as u64,
InsnKind::LH | InsnKind::LHU => target_limb as u64,
_ => 0,
};
if let Some(signed_ext_config) = config.signed_extend_config.as_ref() {
signed_ext_config.assign_instance(instance, lk_multiplicity, val)?;
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/memory/gadget.rs | ceno_zkvm/src/instructions/riscv/memory/gadget.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
error::ZKVMError,
instructions::riscv::{constants::UInt, insn_base::MemAddr},
witness::LkMultiplicity,
};
use ceno_emul::StepRecord;
use either::Either;
use ff_ext::{ExtensionField, FieldInto};
use itertools::izip;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use p3::field::{Field, FieldAlgebra};
use witness::set_val;
pub struct MemWordUtil<E: ExtensionField, const N_ZEROS: usize> {
prev_limb_bytes: Vec<WitIn>,
rs2_limb_bytes: Vec<WitIn>,
expected_limb: Option<WitIn>,
expect_limbs_expr: [Expression<E>; 2],
}
impl<E: ExtensionField, const N_ZEROS: usize> MemWordUtil<E, N_ZEROS> {
pub(crate) fn construct_circuit(
cb: &mut CircuitBuilder<E>,
addr: &MemAddr<E>,
prev_word: &UInt<E>,
rs2_word: &UInt<E>,
) -> Result<Self, ZKVMError> {
let alloc_bytes = |cb: &mut CircuitBuilder<E>,
anno: &str,
num_bytes: usize|
-> Result<Vec<WitIn>, ZKVMError> {
(0..num_bytes)
.map(|i| {
let byte = cb.create_witin(|| format!("{}.le_bytes[{}]", anno, i));
cb.assert_ux::<_, _, 8>(|| "byte range check", byte.expr())?;
Ok(byte)
})
.collect()
};
let decompose_limb = |cb: &mut CircuitBuilder<E>,
limb_anno: &str,
limb: &Expression<E>,
num_bytes: usize|
-> Result<Vec<WitIn>, ZKVMError> {
let bytes = alloc_bytes(cb, limb_anno, num_bytes)?;
cb.require_equal(
|| format!("decompose {} into {} bytes", limb_anno, num_bytes),
limb.clone(),
bytes
.iter()
.enumerate()
.map(|(idx, byte)| byte.expr() << (idx * 8))
.sum(),
)?;
Ok(bytes)
};
assert!(prev_word.wits_in().is_some() && rs2_word.wits_in().is_some());
let low_bits = addr.low_bit_exprs();
let prev_limbs = prev_word.expr();
let rs2_limbs = rs2_word.expr();
assert_eq!(UInt::<E>::NUM_LIMBS, 2);
// for sb (n_zeros = 0)
let (expected_limb, prev_limb_bytes, rs2_limb_bytes) = match N_ZEROS {
0 => {
let expected_limb = cb.create_witin(|| "expected_limb");
// degree 2 expression
let prev_target_limb = cb.select(&low_bits[1], &prev_limbs[1], &prev_limbs[0]);
let prev_limb_bytes = decompose_limb(cb, "prev_limb", &prev_target_limb, 2)?;
// extract the least significant byte from u16 limb
let rs2_limb_bytes = alloc_bytes(cb, "rs2_limb[0]", 1)?;
let u8_base_inv = E::BaseField::from_canonical_u64(1 << 8).inverse();
cb.assert_ux::<_, _, 8>(
|| "rs2_limb[0].le_bytes[1]",
u8_base_inv.expr() * (&rs2_limbs[0] - rs2_limb_bytes[0].expr()),
)?;
cb.condition_require_equal(
|| "expected_limb = select(low_bits[0], rs2_limb_bytes[0] ++ prev_limb_bytes[0], prev_limb_bytes[1] ++ rs2_limb_bytes[0])",
low_bits[0].clone(),
expected_limb.expr(),
(rs2_limb_bytes[0].expr() << 8) + prev_limb_bytes[0].expr(),
(prev_limb_bytes[1].expr() << 8) + rs2_limb_bytes[0].expr(),
)?;
(Either::Left(expected_limb), prev_limb_bytes, rs2_limb_bytes)
}
// for sh (n_zeros = 1)
1 => (Either::Right(rs2_limbs[0].expr()), vec![], vec![]),
_ => unreachable!("N_ZEROS cannot be larger than 1"),
};
let hi_limb = cb.select(
&low_bits[1],
&expected_limb
.as_ref()
.map_either(|witin| witin.expr(), |expr| expr.expr())
.into_inner(),
&prev_limbs[1],
);
let lo_limb = cb.select(
&low_bits[1],
&prev_limbs[0],
&expected_limb
.as_ref()
.map_either(|witin| witin.expr(), |expr| expr.expr())
.into_inner(),
);
Ok(MemWordUtil {
prev_limb_bytes,
rs2_limb_bytes,
expected_limb: expected_limb.map_either(Some, |_| None).into_inner(),
expect_limbs_expr: [lo_limb, hi_limb],
})
}
pub(crate) fn as_lo_hi(&self) -> &[Expression<E>; 2] {
&self.expect_limbs_expr
}
pub fn assign_instance(
&self,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
shift: u32,
) -> Result<(), ZKVMError> {
let memory_op = step.memory_op().clone().unwrap();
let prev_value = Value::new_unchecked(memory_op.value.before);
let rs2_value = Value::new_unchecked(step.rs2().unwrap().value);
let low_bits = [shift & 1, (shift >> 1) & 1];
let prev_limb = prev_value.as_u16_limbs()[low_bits[1] as usize];
let rs2_limb = rs2_value.as_u16_limbs()[0];
match N_ZEROS {
0 => {
for (&col, byte) in izip!(&self.prev_limb_bytes, prev_limb.to_le_bytes()) {
set_val!(instance, col, E::BaseField::from_canonical_u8(byte));
lk_multiplicity.assert_ux::<8>(byte as u64);
}
let Some(expected_limb_witin) = self.expected_limb.as_ref() else {
unreachable!()
};
set_val!(
instance,
self.rs2_limb_bytes[0],
E::BaseField::from_canonical_u8(rs2_limb.to_le_bytes()[0])
);
rs2_limb.to_le_bytes().into_iter().for_each(|byte| {
lk_multiplicity.assert_ux::<8>(byte as u64);
});
let change = if low_bits[0] == 0 {
E::BaseField::from_canonical_u16((prev_limb.to_le_bytes()[1] as u16) << 8)
+ E::BaseField::from_canonical_u8(rs2_limb.to_le_bytes()[0])
} else {
E::BaseField::from_canonical_u16((rs2_limb.to_le_bytes()[0] as u16) << 8)
+ E::BaseField::from_canonical_u8(prev_limb.to_le_bytes()[0])
};
set_val!(instance, expected_limb_witin, change);
}
1 => {
// do nothing
}
_ => unreachable!("N_ZEROS cannot be larger than 1"),
}
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/dummy/test.rs | ceno_zkvm/src/instructions/riscv/dummy/test.rs | use ceno_emul::{Change, InsnKind, KeccakSpec, StepRecord, encode_rv32};
use ff_ext::GoldilocksExt2;
use super::*;
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{
Instruction,
riscv::{arith::AddOp, branch::BeqOp, ecall::EcallDummy},
},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
};
type AddDummy<E> = DummyInstruction<E, AddOp>;
type BeqDummy<E> = DummyInstruction<E, BeqOp>;
#[test]
fn test_dummy_ecall() {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "ecall_dummy",
|cb| {
let config = EcallDummy::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let step = StepRecord::new_ecall_any(4, MOCK_PC_START);
let insn_code = step.insn();
let (raw_witin, lkm) = EcallDummy::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&step],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_dummy_keccak() {
type KeccakDummy = LargeEcallDummy<GoldilocksExt2, KeccakSpec>;
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "keccak_dummy",
|cb| {
let config = KeccakDummy::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let (step, program) = ceno_emul::test_utils::keccak_step();
let (raw_witin, lkm) = KeccakDummy::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&step],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &program, None, Some(lkm));
}
#[test]
fn test_dummy_r() {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "add_dummy",
|cb| {
let config = AddDummy::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::ADD, 2, 3, 4, 0);
let (raw_witin, lkm) = AddDummy::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
11,
0xfffffffe,
Change::new(0, 11_u32.wrapping_add(0xfffffffe)),
0,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_dummy_b() {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "beq_dummy",
|cb| {
let config = BeqDummy::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::BEQ, 2, 3, 0, 8);
let (raw_witin, lkm) = BeqDummy::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_b_instruction(
3,
Change::new(MOCK_PC_START, MOCK_PC_START + 8_usize),
insn_code,
0xbead1010,
0xbead1010,
0,
)],
)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/dummy/dummy_ecall.rs | ceno_zkvm/src/instructions/riscv/dummy/dummy_ecall.rs | use std::marker::PhantomData;
use ceno_emul::{Change, InsnKind, StepRecord, SyscallSpec};
use ff_ext::ExtensionField;
use itertools::Itertools;
use super::{super::insn_base::WriteMEM, dummy_circuit::DummyConfig};
use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{constants::UInt, insn_base::WriteRD},
},
structs::ProgramParams,
witness::LkMultiplicity,
};
use ff_ext::FieldInto;
use multilinear_extensions::{ToExpr, WitIn};
use witness::set_val;
/// LargeEcallDummy can handle any instruction and produce its effects,
/// including multiple memory operations.
///
/// Unsafe: The content is not constrained.
pub struct LargeEcallDummy<E, S>(PhantomData<(E, S)>);
impl<E: ExtensionField, S: SyscallSpec> Instruction<E> for LargeEcallDummy<E, S> {
type InstructionConfig = LargeEcallConfig<E>;
fn name() -> String {
S::NAME.to_owned()
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let dummy_insn = DummyConfig::construct_circuit(
cb,
InsnKind::ECALL,
true, // Read the ecall function code.
false,
false,
false,
false,
false,
)?;
let start_addr = if S::MEM_OPS_COUNT > 0 {
Some(cb.create_witin(|| "mem_addr"))
} else {
None
};
let reg_writes = (0..S::REG_OPS_COUNT)
.map(|i| {
let val_after = UInt::new_unchecked(|| format!("reg_after_{}", i), cb)?;
WriteRD::construct_circuit(cb, val_after.register_expr(), dummy_insn.ts())
.map(|writer| (val_after, writer))
})
.collect::<Result<Vec<_>, _>>()?;
let mem_writes = (0..S::MEM_OPS_COUNT)
.map(|i| {
let val_before = UInt::new_unchecked(|| format!("mem_before_{}_WRITE_ARG", i), cb)?;
let val_after = UInt::new(|| format!("mem_after_{}_WRITE_ARG", i), cb)?;
let addr = cb.create_witin(|| format!("addr_{}", i));
WriteMEM::construct_circuit(
cb,
addr.expr(),
val_before.memory_expr(),
val_after.memory_expr(),
dummy_insn.ts(),
)
.map(|writer| (addr, Change::new(val_before, val_after), writer))
})
.collect::<Result<Vec<_>, _>>()?;
Ok(LargeEcallConfig {
dummy_insn,
start_addr,
reg_writes,
mem_writes,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
let ops = &step.syscall().expect("syscall step");
// Assign instruction.
config
.dummy_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
if S::MEM_OPS_COUNT > 0 {
set_val!(
instance,
config.start_addr.as_ref().unwrap(),
u64::from(ops.mem_ops[0].addr)
);
}
// Assign registers.
for ((value, writer), op) in config.reg_writes.iter().zip_eq(&ops.reg_ops) {
value.assign_value(instance, Value::new_unchecked(op.value.after));
writer.assign_op(instance, shard_ctx, lk_multiplicity, step.cycle(), op)?;
}
// Assign memory.
for ((addr, value, writer), op) in config.mem_writes.iter().zip_eq(&ops.mem_ops) {
value
.before
.assign_value(instance, Value::new_unchecked(op.value.before));
value
.after
.assign_value(instance, Value::new(op.value.after, lk_multiplicity));
set_val!(instance, addr, u64::from(op.addr));
writer.assign_op(instance, shard_ctx, lk_multiplicity, step.cycle(), op)?;
}
Ok(())
}
}
#[derive(Debug)]
pub struct LargeEcallConfig<E: ExtensionField> {
dummy_insn: DummyConfig<E>,
reg_writes: Vec<(UInt<E>, WriteRD<E>)>,
start_addr: Option<WitIn>,
mem_writes: Vec<(WitIn, Change<UInt<E>>, WriteMEM)>,
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/dummy/dummy_circuit.rs | ceno_zkvm/src/instructions/riscv/dummy/dummy_circuit.rs | use std::marker::PhantomData;
use ceno_emul::{InsnCategory, InsnFormat, InsnKind, StepRecord};
use ff_ext::ExtensionField;
use super::super::{
RIVInstruction,
constants::UInt,
insn_base::{ReadMEM, ReadRS1, ReadRS2, StateInOut, WriteMEM, WriteRD},
};
use crate::{
chip_handler::general::InstFetch, circuit_builder::CircuitBuilder, e2e::ShardContext,
error::ZKVMError, instructions::Instruction, structs::ProgramParams, tables::InsnRecord,
uint::Value, witness::LkMultiplicity,
};
use ff_ext::FieldInto;
use multilinear_extensions::{ToExpr, WitIn};
#[cfg(feature = "u16limb_circuit")]
use p3::field::FieldAlgebra;
use witness::set_val;
/// DummyInstruction can handle any instruction and produce its side-effects.
pub struct DummyInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for DummyInstruction<E, I> {
type InstructionConfig = DummyConfig<E>;
fn name() -> String {
format!("{:?}_DUMMY", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let kind = I::INST_KIND;
let format = InsnFormat::from(kind);
let category = InsnCategory::from(kind);
// ECALL can do everything.
let is_ecall = matches!(kind, InsnKind::ECALL);
// Regular instructions do what is implied by their format.
let (with_rs1, with_rs2, with_rd) = match format {
_ if is_ecall => (true, true, true),
InsnFormat::R => (true, true, true),
InsnFormat::I => (true, false, true),
InsnFormat::S => (true, true, false),
InsnFormat::B => (true, true, false),
InsnFormat::U => (false, false, true),
InsnFormat::J => (false, false, true),
};
let with_mem_write = matches!(category, InsnCategory::Store) || is_ecall;
let with_mem_read = matches!(category, InsnCategory::Load);
let branching = matches!(category, InsnCategory::Branch)
|| matches!(kind, InsnKind::JAL | InsnKind::JALR)
|| is_ecall;
DummyConfig::construct_circuit(
circuit_builder,
I::INST_KIND,
with_rs1,
with_rs2,
with_rd,
with_mem_write,
with_mem_read,
branching,
)
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
config.assign_instance(instance, shard_ctx, lk_multiplicity, step)
}
}
#[derive(Debug)]
pub struct MemAddrVal<E: ExtensionField> {
mem_addr: WitIn,
mem_before: UInt<E>,
mem_after: UInt<E>,
}
#[derive(Debug)]
pub struct DummyConfig<E: ExtensionField> {
vm_state: StateInOut<E>,
rs1: Option<(ReadRS1<E>, UInt<E>)>,
rs2: Option<(ReadRS2<E>, UInt<E>)>,
rd: Option<(WriteRD<E>, UInt<E>)>,
mem_addr_val: Option<MemAddrVal<E>>,
mem_read: Option<ReadMEM<E>>,
mem_write: Option<WriteMEM>,
imm: WitIn,
}
impl<E: ExtensionField> DummyConfig<E> {
#[allow(clippy::too_many_arguments)]
pub fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
kind: InsnKind,
with_rs1: bool,
with_rs2: bool,
with_rd: bool,
with_mem_write: bool,
with_mem_read: bool,
branching: bool,
) -> Result<Self, ZKVMError> {
// State in and out
let vm_state = StateInOut::construct_circuit(circuit_builder, branching)?;
// Registers
let rs1 = if with_rs1 {
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?;
let rs1_op =
ReadRS1::construct_circuit(circuit_builder, rs1_read.register_expr(), vm_state.ts)?;
Some((rs1_op, rs1_read))
} else {
None
};
let rs2 = if with_rs2 {
let rs2_read = UInt::new_unchecked(|| "rs2_read", circuit_builder)?;
let rs2_op =
ReadRS2::construct_circuit(circuit_builder, rs2_read.register_expr(), vm_state.ts)?;
Some((rs2_op, rs2_read))
} else {
None
};
let rd = if with_rd {
let rd_written = UInt::new_unchecked(|| "rd_written", circuit_builder)?;
let rd_op = WriteRD::construct_circuit(
circuit_builder,
rd_written.register_expr(),
vm_state.ts,
)?;
Some((rd_op, rd_written))
} else {
None
};
// Memory
let mem_addr_val = if with_mem_read || with_mem_write {
Some(MemAddrVal {
mem_addr: circuit_builder.create_witin(|| "mem_addr"),
mem_before: UInt::new_unchecked(|| "mem_before", circuit_builder)?,
mem_after: UInt::new(|| "mem_after", circuit_builder)?,
})
} else {
None
};
let mem_read = if with_mem_read {
let Some(MemAddrVal {
mem_addr,
mem_before,
..
}) = mem_addr_val.as_ref()
else {
unreachable!()
};
Some(ReadMEM::construct_circuit(
circuit_builder,
mem_addr.expr(),
mem_before.expr().try_into().unwrap(),
vm_state.ts,
)?)
} else {
None
};
let mem_write = if with_mem_write {
let Some(MemAddrVal {
mem_addr,
mem_before,
mem_after,
}) = mem_addr_val.as_ref()
else {
unreachable!()
};
Some(WriteMEM::construct_circuit(
circuit_builder,
mem_addr.expr(),
mem_before.expr().try_into().unwrap(),
mem_after.expr().try_into().unwrap(),
vm_state.ts,
)?)
} else {
None
};
// Fetch instruction
// The register IDs of ECALL is fixed, not encoded.
let is_ecall = matches!(kind, InsnKind::ECALL);
let rs1_id = match &rs1 {
Some((r, _)) if !is_ecall => r.id.expr(),
_ => 0.into(),
};
let rs2_id = match &rs2 {
Some((r, _)) if !is_ecall => r.id.expr(),
_ => 0.into(),
};
let rd_id = match &rd {
Some((r, _)) if !is_ecall => Some(r.id.expr()),
_ => None,
};
let imm = circuit_builder.create_witin(|| "imm");
circuit_builder.lk_fetch(&InsnRecord::new(
vm_state.pc.expr(),
kind.into(),
rd_id,
rs1_id,
rs2_id,
imm.expr(),
#[cfg(feature = "u16limb_circuit")]
E::BaseField::ZERO.expr(),
))?;
Ok(DummyConfig {
vm_state,
rs1,
rs2,
rd,
mem_addr_val,
mem_read,
mem_write,
imm,
})
}
pub(super) fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
// State in and out
self.vm_state.assign_instance(instance, shard_ctx, step)?;
// Fetch instruction
lk_multiplicity.fetch(step.pc().before.0);
// Registers
if let Some((rs1_op, rs1_read)) = &self.rs1 {
rs1_op.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rs1_val = Value::new_unchecked(step.rs1().expect("rs1 value").value);
rs1_read.assign_value(instance, rs1_val);
}
if let Some((rs2_op, rs2_read)) = &self.rs2 {
rs2_op.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rs2_val = Value::new_unchecked(step.rs2().expect("rs2 value").value);
rs2_read.assign_value(instance, rs2_val);
}
if let Some((rd_op, rd_written)) = &self.rd {
rd_op.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rd_val = Value::new_unchecked(step.rd().expect("rd value").value.after);
rd_written.assign_value(instance, rd_val);
}
// Memory
if let Some(MemAddrVal {
mem_addr,
mem_before,
mem_after,
}) = &self.mem_addr_val
{
let mem_op = step.memory_op().expect("memory operation");
set_val!(instance, mem_addr, u64::from(mem_op.addr));
mem_before.assign_value(instance, Value::new_unchecked(mem_op.value.before));
mem_after.assign_value(instance, Value::new(mem_op.value.after, lk_multiplicity));
}
if let Some(mem_read) = &self.mem_read {
mem_read.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
}
if let Some(mem_write) = &self.mem_write {
mem_write.assign_instance::<E>(instance, shard_ctx, lk_multiplicity, step)?;
}
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn()).1;
set_val!(instance, self.imm, imm);
Ok(())
}
pub(super) fn ts(&self) -> WitIn {
self.vm_state.ts
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/dummy/mod.rs | ceno_zkvm/src/instructions/riscv/dummy/mod.rs | //! Dummy instruction circuits for testing.
//! Support instructions that don’t have a complete implementation yet.
//! It connects all the state together (register writes, etc), but does not verify the values.
//!
//! Usage:
//! Specify an instruction with `trait RIVInstruction` and define a `DummyInstruction` like so:
//!
//! use ceno_zkvm::instructions::riscv::{arith::AddOp, dummy::DummyInstruction};
//!
//! type AddDummy<E> = DummyInstruction<E, AddOp>;
mod dummy_circuit;
pub use dummy_circuit::{DummyConfig, DummyInstruction};
mod dummy_ecall;
pub use dummy_ecall::LargeEcallDummy;
#[cfg(test)]
mod test;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/mulh/mulh_circuit.rs | ceno_zkvm/src/instructions/riscv/mulh/mulh_circuit.rs | //! Circuit implementations for MULH, MULHU, and MULHSU RISC-V opcodes
//!
//! Approach for computing the upper limb of a product of two 32-bit values
//! which are signed/signed, unsigned/unsigned, or signed/unsigned is the
//! following:
//!
//! - Compute the signed or unsigned value associated with input and output
//! registers of the instruction
//! - Verify that the product of input values is equal to the value obtained
//! by interpreting the output register `rd` as the high limb of a signed or
//! unsigned 64-bit value with some additional 32-bit low limb
//!
//! Soundness of this approach is almost straightforward except for a
//! complication, which is that the 64-bit values represented by `rd` as a
//! high limb have a small number of values that are ambiguously represented
//! as field elements over the Goldilocks field. The numbers for which the
//! projections into Goldilocks are unique are:
//!
//! - Signed 64-bits: `-2^63 + 2^32 - 1` to `2^63 - 2^32`
//! - Unsigned 64-bits: `2^32 - 1` to `2^64 - 2^32`
//!
//! The intervals of values corresponding to products of signed and/or unsigned
//! 32-bit integers are given by
//!
//! - Signed/signed: `-2^62 + 2^31` to `2^62`, length `2^63 - 2^31 + 1`
//! - Unsigned/unsigned: `0` to `2^64 - 2^33 + 1`, length `2^64 - 2^33 + 2`
//! - Signed/unsigned: `-2^63 + 2^31` to `2^63 - 2^32 - 2^31 + 1`, length
//! `2^64 - 2^33 + 2`
//!
//! In particular, all of these intervals have length smaller than the
//! Goldilocks prime `p = 2^64 - 2^32 + 1`, and so these values are uniquely
//! represented as Goldilocks elements. To ensure that the equality of the
//! product of input register values with the full 64-bit value with high limb
//! represented by `rd` is fully unambiguous, it is sufficient to ensure that
//! the domain of product values does not overlap with the intervals of
//! ambiguous 64-bit number representations.
//!
//! This is immediately the case for the signed/signed products because of the
//! smaller length of the interval of product values. Since all signed/signed
//! products lie in the unambiguous range `-2^63 + 2^32 - 1` to `2^63 - 2^32` of
//! 64-bit 2s complement signed values, each such value associated with a
//! product value is uniquely determined.
//!
//! For unsigned/unsigned and signed/unsigned products, the situation is
//! different. For unsigned/unsigned products, the interval of product values
//! between `0` and `2^32 - 2` is represented ambiguously by two unsigned 64-bit
//! values each, as Goldilocks field elements, but only the smaller of these
//! two representations is the correct product value. Similarly for signed/
//! unsigned products, the product values between `-2^63 + 2^31` and
//! `-2^63 + 2^32 - 2` are ambiguously represented by two signed 64-bit values
//! each, as Goldilocks field elements, but only the smaller (more negative) of
//! these gives the correct product.
//!
//! Examples of these ambiguous representations:
//! - Unsigned/unsigned: for `rs1 = rs2 = 0`, the product should be represented
//! by `hi = low = 0`, but can also be represented by `hi = 2^32 - 1` and
//! `low = 1`, so that `hi * 2^32 + low = 2^64 - 2^32 + 1` which is congruent
//! to 0 mod the Goldilocks prime.
//! - Signed/unsigned: for `rs1 = -2^31` and `rs2 = 2^32 - 1`, the product
//! `-2^63 + 2^31` should be represented by `rd = -2^31` and `low = 2^31`,
//! but can also be represented by `rd = 2^31 - 1` and `low = 2^31 + 1`,
//! such that `rd*2^32 + low = 2^63 - 2^32 + 2^31 + 1`, which can be written
//! as `(-2^63 + 2^31) + (2^64 - 2^32 + 1)`.
//!
//! As it happens, this issue can be remedied in each case by the following
//! mitigation: constrain the high limb `rd` to not be equal to its maximal
//! value, which is `2^32 - 1` in the unsigned case, and `2^31 - 1` in the
//! signed case. Removing this possibility eliminates the entire high
//! interval of ambiguous values represented in 64-bits, but still allows
//! representing the entire range of product values in each case.
//! Specifically, with this added restriction, the numbers represented by
//! (restricted) 64-bit values unambiguously over Goldilocks are
//!
//! - Signed (restricted) 64-bits: `-2^63` to `2^63 - 2^32 - 1`
//! - Unsigned (restricted) 64-bits: `0` to `2^64 - 2^32 - 1`
//!
//! With this added check in place, the 64-bit values represented with `rd` as
//! the high limb uniquely represent the product values for unsigned/unsigned
//! and signed/unsigned products.
use std::marker::PhantomData;
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::{ExtensionField, SmallField};
use p3::{field::FieldAlgebra, goldilocks::Goldilocks};
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::{IsEqualConfig, Signed},
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{BIT_WIDTH, UInt},
r_insn::RInstructionConfig,
},
},
structs::ProgramParams,
uint::Value,
utils::i64_to_base,
witness::LkMultiplicity,
};
use multilinear_extensions::Expression;
pub struct MulhInstructionBase<E, I>(PhantomData<(E, I)>);
enum MulhSignDependencies<E: ExtensionField> {
LL {
constrain_rd: IsEqualConfig,
},
UU {
constrain_rd: IsEqualConfig,
},
SU {
rs1_signed: Signed<E>,
rd_signed: Signed<E>,
constrain_rd: IsEqualConfig,
},
SS {
rs1_signed: Signed<E>,
rs2_signed: Signed<E>,
rd_signed: Signed<E>,
},
}
pub struct MulhConfig<E: ExtensionField> {
rs1_read: UInt<E>,
rs2_read: UInt<E>,
rd_written: UInt<E>,
sign_deps: MulhSignDependencies<E>,
r_insn: RInstructionConfig<E>,
/// The low/high part of the result of multiplying two Uint32.
///
/// Whether it's low or high depends on the operation.
prod_lo_hi: UInt<E>,
}
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for MulhInstructionBase<E, I> {
type InstructionConfig = MulhConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<MulhConfig<E>, ZKVMError> {
// The soundness analysis for these constraints is only valid for
// 32-bit registers represented over the Goldilocks field, so verify
// these parameters
assert_eq!(UInt::<E>::TOTAL_BITS, u32::BITS as usize);
assert_eq!(E::BaseField::MODULUS_U64, Goldilocks::MODULUS_U64);
// 0. Registers and instruction lookup
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?;
let rs2_read = UInt::new_unchecked(|| "rs2_read", circuit_builder)?;
let rd_written = UInt::new(|| "rd_written", circuit_builder)?;
let r_insn = RInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
rs1_read.register_expr(),
rs2_read.register_expr(),
rd_written.register_expr(),
)?;
// 1. Compute the signed values associated with `rs1`, `rs2`, `rd` and 2nd half of the prod
let (rs1_val, rs2_val, rd_val, sign_deps, prod_lo_hi) = match I::INST_KIND {
InsnKind::MULH => {
let rs1_signed = Signed::construct_circuit(circuit_builder, || "rs1", &rs1_read)?;
let rs2_signed = Signed::construct_circuit(circuit_builder, || "rs2", &rs2_read)?;
let rd_signed = Signed::construct_circuit(circuit_builder, || "rd", &rd_written)?;
let prod_low = UInt::new(|| "prod_low", circuit_builder)?;
(
rs1_signed.expr(),
rs2_signed.expr(),
rd_signed.expr(),
MulhSignDependencies::SS {
rs1_signed,
rs2_signed,
rd_signed,
},
prod_low,
)
}
InsnKind::MULHU => {
let prod_low = UInt::new(|| "prod_low", circuit_builder)?;
// constrain that rd does not represent 2^32 - 1
let rd_avoid = Expression::<E>::from(u32::MAX);
let constrain_rd = IsEqualConfig::construct_non_equal(
circuit_builder,
|| "constrain_rd",
rd_written.value(),
rd_avoid,
)?;
(
rs1_read.value(),
rs2_read.value(),
rd_written.value(),
MulhSignDependencies::UU { constrain_rd },
prod_low,
)
}
InsnKind::MUL => {
// constrain that prod_hi does not represent 2^32 - 1
let prod_hi_avoid = Expression::<E>::from(u32::MAX);
let prod_hi = UInt::new(|| "prod_hi", circuit_builder)?;
let constrain_rd = IsEqualConfig::construct_non_equal(
circuit_builder,
|| "constrain_prod_hi",
prod_hi.value(),
prod_hi_avoid,
)?;
(
rs1_read.value(),
rs2_read.value(),
rd_written.value(),
MulhSignDependencies::LL { constrain_rd },
prod_hi,
)
}
InsnKind::MULHSU => {
let rs1_signed = Signed::construct_circuit(circuit_builder, || "rs1", &rs1_read)?;
let rd_signed = Signed::construct_circuit(circuit_builder, || "rd", &rd_written)?;
let prod_low = UInt::new(|| "prod_low", circuit_builder)?;
// constrain that (signed) rd does not represent 2^31 - 1
let rd_avoid = Expression::<E>::from(i32::MAX);
let constrain_rd = IsEqualConfig::construct_non_equal(
circuit_builder,
|| "constrain_rd",
rd_signed.expr(),
rd_avoid,
)?;
(
rs1_signed.expr(),
rs2_read.value(),
rd_signed.expr(),
MulhSignDependencies::SU {
rs1_signed,
rd_signed,
constrain_rd,
},
prod_low,
)
}
_ => unreachable!("Unsupported instruction kind"),
};
// 2. Verify that the product of signed inputs `rs1` and `rs2` is equal to
// the result of interpreting `rd` as the high limb of a 2s complement
// value
match I::INST_KIND {
InsnKind::MUL => circuit_builder.require_equal(
|| "validate_prod_low_limb",
rs1_val * rs2_val,
(prod_lo_hi.value() << 32) + rd_val,
)?,
// MULH families
InsnKind::MULHU | InsnKind::MULHSU | InsnKind::MULH => circuit_builder.require_equal(
|| "validate_prod_high_limb",
rs1_val * rs2_val,
(rd_val << 32) + prod_lo_hi.value(),
)?,
_ => unreachable!("Unsupported instruction kind"),
}
Ok(MulhConfig {
rs1_read,
rs2_read,
rd_written,
sign_deps,
prod_lo_hi,
r_insn,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
// Read registers from step
let rs1 = step.rs1().unwrap().value;
let rs1_val = Value::new_unchecked(rs1);
config
.rs1_read
.assign_limbs(instance, rs1_val.as_u16_limbs());
let rs2 = step.rs2().unwrap().value;
let rs2_val = Value::new_unchecked(rs2);
config
.rs2_read
.assign_limbs(instance, rs2_val.as_u16_limbs());
let rd = step.rd().unwrap().value.after;
let rd_val = Value::new(rd, lk_multiplicity);
config
.rd_written
.assign_limbs(instance, rd_val.as_u16_limbs());
// R-type instruction
config
.r_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
// Assign signed values, if any, and compute low 32-bit limb of product
let prod_lo_hi = match &config.sign_deps {
MulhSignDependencies::SS {
rs1_signed,
rs2_signed,
rd_signed,
} => {
// Signed register values
let rs1_s = rs1_signed.assign_instance(instance, lk_multiplicity, &rs1_val)?;
let rs2_s = rs2_signed.assign_instance(instance, lk_multiplicity, &rs2_val)?;
rd_signed.assign_instance(instance, lk_multiplicity, &rd_val)?;
// only take the low part of the product
rs1_s.wrapping_mul(rs2_s) as u32
}
MulhSignDependencies::UU { constrain_rd } => {
// assign nonzero value (u32::MAX - rd)
let rd_f = E::BaseField::from_canonical_u64(rd as u64);
let avoid_f = E::BaseField::from_canonical_u32(u32::MAX);
constrain_rd.assign_instance(instance, rd_f, avoid_f)?;
// only take the low part of the product
rs1.wrapping_mul(rs2)
}
MulhSignDependencies::LL { constrain_rd } => {
let prod = rs1_val.as_u64() * rs2_val.as_u64();
let prod_lo = prod as u32;
assert_eq!(prod_lo, rd);
let prod_hi = prod >> BIT_WIDTH;
let avoid_f = E::BaseField::from_canonical_u32(u32::MAX);
constrain_rd.assign_instance(
instance,
E::BaseField::from_canonical_u64(prod_hi),
avoid_f,
)?;
prod_hi as u32
}
MulhSignDependencies::SU {
rs1_signed,
rd_signed,
constrain_rd,
} => {
// Signed register values
let rs1_s = rs1_signed.assign_instance(instance, lk_multiplicity, &rs1_val)?;
let rd_s = rd_signed.assign_instance(instance, lk_multiplicity, &rd_val)?;
// assign nonzero value (i32::MAX - rd)
let rd_f = i64_to_base(rd_s as i64);
let avoid_f = i64_to_base(i32::MAX.into());
constrain_rd.assign_instance(instance, rd_f, avoid_f)?;
// only take the low part of the product
(rs2).wrapping_mul(rs1_s as u32)
}
};
let prod_lo_hi_val = Value::new(prod_lo_hi, lk_multiplicity);
config
.prod_lo_hi
.assign_limbs(instance, prod_lo_hi_val.as_u16_limbs());
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/mulh/mulh_circuit_v2.rs | ceno_zkvm/src/instructions/riscv/mulh/mulh_circuit_v2.rs | use crate::{
circuit_builder::CircuitBuilder,
error::ZKVMError,
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{LIMB_BITS, UINT_LIMBS, UInt},
r_insn::RInstructionConfig,
},
},
structs::ProgramParams,
uint::Value,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use multilinear_extensions::{Expression, ToExpr as _, WitIn};
use p3::field::{Field, FieldAlgebra};
use witness::set_val;
use crate::e2e::ShardContext;
use itertools::Itertools;
use std::{array, marker::PhantomData};
pub struct MulhInstructionBase<E, I>(PhantomData<(E, I)>);
pub struct MulhConfig<E: ExtensionField> {
rs1_read: UInt<E>,
rs2_read: UInt<E>,
r_insn: RInstructionConfig<E>,
rd_low: [WitIn; UINT_LIMBS],
rd_high: Option<[WitIn; UINT_LIMBS]>,
rs1_ext: Option<WitIn>,
rs2_ext: Option<WitIn>,
phantom: PhantomData<E>,
}
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for MulhInstructionBase<E, I> {
type InstructionConfig = MulhConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<MulhConfig<E>, ZKVMError> {
assert_eq!(UInt::<E>::TOTAL_BITS, u32::BITS as usize);
assert_eq!(UInt::<E>::LIMB_BITS, 16);
assert_eq!(UInt::<E>::NUM_LIMBS, 2);
// 0. Registers and instruction lookup
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?;
let rs2_read = UInt::new_unchecked(|| "rs2_read", circuit_builder)?;
let rs1_expr = rs1_read.expr();
let rs2_expr = rs2_read.expr();
let carry_divide = E::BaseField::from_canonical_u32(1 << UInt::<E>::LIMB_BITS).inverse();
let rd_low: [_; UINT_LIMBS] =
array::from_fn(|i| circuit_builder.create_witin(|| format!("rd_low_{i}")));
let mut carry_low: [Expression<E>; UINT_LIMBS] =
array::from_fn(|_| E::BaseField::ZERO.expr());
for i in 0..UINT_LIMBS {
let expected_limb = if i == 0 {
E::BaseField::ZERO.expr()
} else {
carry_low[i - 1].clone()
} + (0..=i).fold(E::BaseField::ZERO.expr(), |ac, k| {
ac + (rs1_expr[k].clone() * rs2_expr[i - k].clone())
});
carry_low[i] = carry_divide.expr() * (expected_limb - rd_low[i].expr());
}
for (i, (rd_low, carry_low)) in rd_low.iter().zip(carry_low.iter()).enumerate() {
circuit_builder.assert_dynamic_range(
|| format!("range_check_rd_low_{i}"),
rd_low.expr(),
E::BaseField::from_canonical_u32(16).expr(),
)?;
circuit_builder.assert_dynamic_range(
|| format!("range_check_carry_low_{i}"),
carry_low.expr(),
E::BaseField::from_canonical_u32(18).expr(),
)?;
}
let (rd_high, rs1_ext, rs2_ext) = match I::INST_KIND {
InsnKind::MULH | InsnKind::MULHU | InsnKind::MULHSU => {
let rd_high: [_; UINT_LIMBS] =
array::from_fn(|i| circuit_builder.create_witin(|| format!("rd_high_{i}")));
let rs1_ext = circuit_builder.create_witin(|| "rs1_ext".to_string());
let rs2_ext = circuit_builder.create_witin(|| "rs2_ext".to_string());
let mut carry_high: [Expression<E>; UINT_LIMBS] =
array::from_fn(|_| E::BaseField::ZERO.expr());
for j in 0..UINT_LIMBS {
let expected_limb =
if j == 0 {
carry_low[UINT_LIMBS - 1].clone()
} else {
carry_high[j - 1].clone()
} + ((j + 1)..UINT_LIMBS).fold(E::BaseField::ZERO.expr(), |acc, k| {
acc + (rs1_expr[k].clone() * rs2_expr[UINT_LIMBS + j - k].clone())
}) + (0..(j + 1)).fold(E::BaseField::ZERO.expr(), |acc, k| {
acc + (rs1_expr[k].clone() * rs2_ext.expr())
+ (rs2_expr[k].clone() * rs1_ext.expr())
});
carry_high[j] = carry_divide.expr() * (expected_limb - rd_high[j].expr());
}
for (i, (rd_high, carry_high)) in rd_high.iter().zip(carry_high.iter()).enumerate()
{
circuit_builder.assert_dynamic_range(
|| format!("range_check_high_{i}"),
rd_high.expr(),
E::BaseField::from_canonical_u32(16).expr(),
)?;
circuit_builder.assert_dynamic_range(
|| format!("range_check_carry_high_{i}"),
carry_high.expr(),
E::BaseField::from_canonical_u32(18).expr(),
)?;
}
let sign_mask = E::BaseField::from_canonical_u32(1 << (LIMB_BITS - 1));
let ext_inv = E::BaseField::from_canonical_u32((1 << LIMB_BITS) - 1).inverse();
let rs1_sign: Expression<E> = rs1_ext.expr() * ext_inv.expr();
let rs2_sign: Expression<E> = rs2_ext.expr() * ext_inv.expr();
circuit_builder.assert_bit(|| "rs1_sign_bool", rs1_sign.clone())?;
circuit_builder.assert_bit(|| "rs2_sign_bool", rs2_sign.clone())?;
match I::INST_KIND {
InsnKind::MULH => {
// Implement MULH circuit here
circuit_builder.assert_dynamic_range(
|| "mulh_range_check_rs1_last",
E::BaseField::from_canonical_u32(2).expr()
* (rs1_expr[UINT_LIMBS - 1].clone() - rs1_sign * sign_mask.expr()),
E::BaseField::from_canonical_u32(16).expr(),
)?;
circuit_builder.assert_dynamic_range(
|| "mulh_range_check_rs2_last",
E::BaseField::from_canonical_u32(2).expr()
* (rs2_expr[UINT_LIMBS - 1].clone() - rs2_sign * sign_mask.expr()),
E::BaseField::from_canonical_u32(16).expr(),
)?;
}
InsnKind::MULHU => {
// Implement MULHU circuit here
circuit_builder.require_zero(|| "mulhu_rs1_sign_zero", rs1_sign.clone())?;
circuit_builder.require_zero(|| "mulhu_rs2_sign_zero", rs2_sign.clone())?;
}
InsnKind::MULHSU => {
// Implement MULHSU circuit here
circuit_builder
.require_zero(|| "mulhsu_rs2_sign_zero", rs2_sign.clone())?;
circuit_builder.assert_dynamic_range(
|| "mulhsu_range_check_rs1_last",
E::BaseField::from_canonical_u32(2).expr()
* (rs1_expr[UINT_LIMBS - 1].clone() - rs1_sign * sign_mask.expr()),
E::BaseField::from_canonical_u32(16).expr(),
)?;
circuit_builder.assert_dynamic_range(
|| "mulhsu_range_check_rs2_last",
rs2_expr[UINT_LIMBS - 1].clone() - rs2_sign * sign_mask.expr(),
E::BaseField::from_canonical_u32(16).expr(),
)?;
}
InsnKind::MUL => (),
_ => unreachable!("Unsupported instruction kind"),
}
Some((rd_high, rs1_ext, rs2_ext))
}
InsnKind::MUL => None,
_ => unreachable!("unsupported instruction kind"),
}
.map(|(rd_high, rs1_ext, rs2_ext)| (Some(rd_high), Some(rs1_ext), Some(rs2_ext)))
.unwrap_or_else(|| (None, None, None));
let rd_written = match I::INST_KIND {
InsnKind::MULH | InsnKind::MULHU | InsnKind::MULHSU => UInt::from_exprs_unchecked(
rd_high
.as_ref()
.unwrap()
.iter()
.map(|w| w.expr())
.collect_vec(),
),
InsnKind::MUL => {
UInt::from_exprs_unchecked(rd_low.iter().map(|w| w.expr()).collect_vec())
}
_ => unreachable!("unsupported instruction kind"),
};
let r_insn = RInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
rs1_read.register_expr(),
rs2_read.register_expr(),
rd_written.register_expr(),
)?;
Ok(MulhConfig {
rs1_read,
rs2_read,
r_insn,
rd_high,
rd_low,
// carry,
rs1_ext,
rs2_ext,
phantom: PhantomData,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
// Read registers from step
let rs1 = step.rs1().unwrap().value;
let rs1_val = Value::new_unchecked(rs1);
let rs1_limbs = rs1_val.as_u16_limbs();
config.rs1_read.assign_limbs(instance, rs1_limbs);
let rs2 = step.rs2().unwrap().value;
let rs2_val = Value::new_unchecked(rs2);
let rs2_limbs = rs2_val.as_u16_limbs();
config.rs2_read.assign_limbs(instance, rs2_limbs);
// R-type instruction
config
.r_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let (rd_high, rd_low, carry, rs1_ext, rs2_ext) = run_mulh::<UINT_LIMBS, LIMB_BITS>(
I::INST_KIND,
rs1_val
.as_u16_limbs()
.iter()
.map(|x| *x as u32)
.collect::<Vec<_>>()
.as_slice(),
rs2_val
.as_u16_limbs()
.iter()
.map(|x| *x as u32)
.collect::<Vec<_>>()
.as_slice(),
);
for (rd_low, carry_low) in rd_low.iter().zip(carry[0..UINT_LIMBS].iter()) {
lk_multiplicity.assert_dynamic_range(*rd_low as u64, 16);
lk_multiplicity.assert_dynamic_range(*carry_low as u64, 18);
}
for i in 0..UINT_LIMBS {
set_val!(instance, config.rd_low[i], rd_low[i] as u64);
}
match I::INST_KIND {
InsnKind::MULH | InsnKind::MULHU | InsnKind::MULHSU => {
for i in 0..UINT_LIMBS {
set_val!(
instance,
config.rd_high.as_ref().unwrap()[i],
rd_high[i] as u64
);
}
set_val!(instance, config.rs1_ext.as_ref().unwrap(), rs1_ext as u64);
set_val!(instance, config.rs2_ext.as_ref().unwrap(), rs2_ext as u64);
for (rd_high, carry_high) in rd_high.iter().zip(carry[UINT_LIMBS..].iter()) {
lk_multiplicity.assert_dynamic_range(*rd_high as u64, 16);
lk_multiplicity.assert_dynamic_range(*carry_high as u64, 18);
}
}
_ => (),
}
let sign_mask = 1 << (LIMB_BITS - 1);
let ext = (1 << LIMB_BITS) - 1;
let rs1_sign = rs1_ext / ext;
let rs2_sign = rs2_ext / ext;
match I::INST_KIND {
InsnKind::MULH => {
lk_multiplicity.assert_dynamic_range(
(2 * (rs1_limbs[UINT_LIMBS - 1] as u32 - rs1_sign * sign_mask)) as u64,
16,
);
lk_multiplicity.assert_dynamic_range(
(2 * (rs2_limbs[UINT_LIMBS - 1] as u32 - rs2_sign * sign_mask)) as u64,
16,
);
}
InsnKind::MULHU => {}
InsnKind::MULHSU => {
lk_multiplicity.assert_dynamic_range(
(2 * (rs1_limbs[UINT_LIMBS - 1] as u32 - rs1_sign * sign_mask)) as u64,
16,
);
lk_multiplicity.assert_dynamic_range(
(rs2_limbs[UINT_LIMBS - 1] as u32 - rs2_sign * sign_mask) as u64,
16,
);
}
InsnKind::MUL => {}
_ => unreachable!("Unsupported instruction kind"),
}
Ok(())
}
}
fn run_mulh<const NUM_LIMBS: usize, const LIMB_BITS: usize>(
kind: InsnKind,
x: &[u32],
y: &[u32],
) -> ([u32; NUM_LIMBS], [u32; NUM_LIMBS], Vec<u32>, u32, u32) {
let mut mul = [0u64; NUM_LIMBS];
let mut carry = vec![0; 2 * NUM_LIMBS];
for i in 0..NUM_LIMBS {
if i > 0 {
mul[i] = carry[i - 1];
}
for j in 0..=i {
mul[i] += (x[j] * y[i - j]) as u64;
}
carry[i] = mul[i] >> LIMB_BITS;
mul[i] %= 1 << LIMB_BITS;
}
let x_ext = (x[NUM_LIMBS - 1] >> (LIMB_BITS - 1))
* if kind == InsnKind::MULHU {
0
} else {
(1 << LIMB_BITS) - 1
};
let y_ext = (y[NUM_LIMBS - 1] >> (LIMB_BITS - 1))
* if kind == InsnKind::MULH {
(1 << LIMB_BITS) - 1
} else {
0
};
let mut mulh = [0; NUM_LIMBS];
let mut x_prefix = 0;
let mut y_prefix = 0;
for i in 0..NUM_LIMBS {
x_prefix += x[i];
y_prefix += y[i];
mulh[i] = carry[NUM_LIMBS + i - 1]
+ (x_prefix as u64 * y_ext as u64)
+ (y_prefix as u64 * x_ext as u64);
for j in (i + 1)..NUM_LIMBS {
mulh[i] += (x[j] * y[NUM_LIMBS + i - j]) as u64;
}
carry[NUM_LIMBS + i] = mulh[i] >> LIMB_BITS;
mulh[i] %= 1 << LIMB_BITS;
}
let mut mulh_u32 = [0u32; NUM_LIMBS];
let mut mul_u32 = [0u32; NUM_LIMBS];
let mut carry_u32 = vec![0u32; 2 * NUM_LIMBS];
for i in 0..NUM_LIMBS {
mul_u32[i] = mul[i] as u32;
mulh_u32[i] = mulh[i] as u32;
carry_u32[i] = carry[i] as u32;
carry_u32[i + NUM_LIMBS] = carry[i + NUM_LIMBS] as u32;
}
(mulh_u32, mul_u32, carry_u32, x_ext, y_ext)
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/shift_imm/shift_imm_circuit.rs | ceno_zkvm/src/instructions/riscv/shift_imm/shift_imm_circuit.rs | use crate::{
Value,
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
gadgets::SignedExtendConfig,
instructions::{
Instruction,
riscv::{
RIVInstruction,
constants::{LIMB_BITS, UINT_LIMBS, UInt},
i_insn::IInstructionConfig,
},
},
structs::ProgramParams,
tables::InsnRecord,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
use ff_ext::{ExtensionField, FieldInto};
use gkr_iop::gadgets::AssertLtConfig;
use multilinear_extensions::{Expression, ToExpr, WitIn};
use std::marker::PhantomData;
use witness::set_val;
pub struct ShiftImmInstruction<E, I>(PhantomData<(E, I)>);
pub struct ShiftImmConfig<E: ExtensionField> {
i_insn: IInstructionConfig<E>,
imm: WitIn,
rs1_read: UInt<E>,
pub rd_written: UInt<E>,
outflow: WitIn,
assert_lt_config: AssertLtConfig,
// SRAI
is_lt_config: Option<SignedExtendConfig<E>>,
}
impl<E: ExtensionField, I: RIVInstruction> Instruction<E> for ShiftImmInstruction<E, I> {
type InstructionConfig = ShiftImmConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
circuit_builder: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
// treat bit shifting as a bit "inflow" and "outflow" process, flowing from left to right or vice versa
// this approach simplifies constraint and witness allocation compared to using multiplication/division gadget,
// as the divisor/multiplier is a power of 2.
//
// example: right shift (bit flow from left to right)
// inflow || rs1_read == rd_written || outflow
// in this case, inflow consists of either all 0s or all 1s for sign extension (if the value is signed).
//
// for left shifts, the inflow is always 0:
// rs1_read || inflow == outflow || rd_written
//
// additional constraint: outflow < (1 << shift), which lead to unique solution
// soundness: take Goldilocks as example, both sides of the equation are 63 bits numbers (<2**63)
// rd * imm + outflow == inflow * 2**32 + rs1
// 32 + 31. 31. 31 + 32. 32. (Bit widths)
// Note: `imm` wtns is set to 2**imm (upto 32 bit) just for efficient verification.
let imm = circuit_builder.create_witin(|| "imm");
let rs1_read = UInt::new_unchecked(|| "rs1_read", circuit_builder)?;
let rd_written = UInt::new(|| "rd_written", circuit_builder)?;
let outflow = circuit_builder.create_witin(|| "outflow");
let assert_lt_config = AssertLtConfig::construct_circuit(
circuit_builder,
|| "outflow < imm",
outflow.expr(),
imm.expr(),
UINT_LIMBS * LIMB_BITS,
)?;
let two_pow_total_bits: Expression<_> = (1u64 << UInt::<E>::TOTAL_BITS).into();
let is_lt_config = match I::INST_KIND {
InsnKind::SLLI => {
circuit_builder.require_equal(
|| "shift check",
rs1_read.value() * imm.expr(), // inflow is zero for this case
outflow.expr() * two_pow_total_bits + rd_written.value(),
)?;
None
}
InsnKind::SRAI | InsnKind::SRLI => {
let (inflow, is_lt_config) = match I::INST_KIND {
InsnKind::SRAI => {
let is_rs1_neg = rs1_read.is_negative(circuit_builder)?;
let ones = imm.expr() - 1;
(is_rs1_neg.expr() * ones, Some(is_rs1_neg))
}
InsnKind::SRLI => (Expression::ZERO, None),
_ => unreachable!(),
};
circuit_builder.require_equal(
|| "shift check",
rd_written.value() * imm.expr() + outflow.expr(),
inflow * two_pow_total_bits + rs1_read.value(),
)?;
is_lt_config
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
let i_insn = IInstructionConfig::<E>::construct_circuit(
circuit_builder,
I::INST_KIND,
imm.expr(),
rs1_read.register_expr(),
rd_written.register_expr(),
false,
)?;
Ok(ShiftImmConfig {
i_insn,
imm,
rs1_read,
rd_written,
outflow,
assert_lt_config,
is_lt_config,
})
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
// imm_internal is a precomputed 2**shift.
let imm = InsnRecord::<E::BaseField>::imm_internal(&step.insn()).0 as u64;
let rs1_read = Value::new_unchecked(step.rs1().unwrap().value);
let rd_written = Value::new(step.rd().unwrap().value.after, lk_multiplicity);
set_val!(instance, config.imm, imm);
config.rs1_read.assign_value(instance, rs1_read.clone());
config.rd_written.assign_value(instance, rd_written);
let outflow = match I::INST_KIND {
InsnKind::SLLI => (rs1_read.as_u64() * imm) >> UInt::<E>::TOTAL_BITS,
InsnKind::SRAI | InsnKind::SRLI => {
if I::INST_KIND == InsnKind::SRAI {
config.is_lt_config.as_ref().unwrap().assign_instance(
instance,
lk_multiplicity,
*rs1_read.as_u16_limbs().last().unwrap() as u64,
)?;
}
rs1_read.as_u64() & (imm - 1)
}
_ => unreachable!("Unsupported instruction kind {:?}", I::INST_KIND),
};
set_val!(instance, config.outflow, outflow);
config
.assert_lt_config
.assign_instance(instance, lk_multiplicity, outflow, imm)?;
config
.i_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/logic/test.rs | ceno_zkvm/src/instructions/riscv/logic/test.rs | use ceno_emul::{Change, StepRecord, Word, encode_rv32};
use ff_ext::GoldilocksExt2;
use super::*;
use crate::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
instructions::{Instruction, riscv::constants::UInt8},
scheme::mock_prover::{MOCK_PC_START, MockProver},
structs::ProgramParams,
utils::split_to_u8,
};
const A: Word = 0xbead1010;
const B: Word = 0xef552020;
#[test]
fn test_opcode_and() {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "and",
|cb| {
let config = AndInstruction::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::AND, 2, 3, 4, 0);
let (raw_witin, lkm) = AndInstruction::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
A,
B,
Change::new(0, A & B),
0,
)],
)
.unwrap();
let expected_rd_written = UInt8::from_const_unchecked(split_to_u8::<u64>(A & B));
config
.rd_written
.require_equal(|| "assert_rd_written", &mut cb, &expected_rd_written)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_opcode_or() {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "or",
|cb| {
let config = OrInstruction::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::OR, 2, 3, 4, 0);
let (raw_witin, lkm) = OrInstruction::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
A,
B,
Change::new(0, A | B),
0,
)],
)
.unwrap();
let expected_rd_written = UInt8::from_const_unchecked(split_to_u8::<u64>(A | B));
config
.rd_written
.require_equal(|| "assert_rd_written", &mut cb, &expected_rd_written)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
#[test]
fn test_opcode_xor() {
let mut cs = ConstraintSystem::<GoldilocksExt2>::new(|| "riscv");
let mut cb = CircuitBuilder::new(&mut cs);
let config = cb
.namespace(
|| "xor",
|cb| {
let config = XorInstruction::construct_circuit(cb, &ProgramParams::default());
Ok(config)
},
)
.unwrap()
.unwrap();
let insn_code = encode_rv32(InsnKind::XOR, 2, 3, 4, 0);
let (raw_witin, lkm) = XorInstruction::assign_instances(
&config,
&mut ShardContext::default(),
cb.cs.num_witin as usize,
cb.cs.num_structural_witin as usize,
vec![&StepRecord::new_r_instruction(
3,
MOCK_PC_START,
insn_code,
A,
B,
Change::new(0, A ^ B),
0,
)],
)
.unwrap();
let expected_rd_written = UInt8::from_const_unchecked(split_to_u8::<u64>(A ^ B));
config
.rd_written
.require_equal(|| "assert_rd_written", &mut cb, &expected_rd_written)
.unwrap();
MockProver::assert_satisfied_raw(&cb, raw_witin, &[insn_code], None, Some(lkm));
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/instructions/riscv/logic/logic_circuit.rs | ceno_zkvm/src/instructions/riscv/logic/logic_circuit.rs | //! The circuit implementation of logic instructions.
use ff_ext::ExtensionField;
use gkr_iop::tables::OpsTable;
use std::marker::PhantomData;
use crate::{
circuit_builder::CircuitBuilder,
e2e::ShardContext,
error::ZKVMError,
instructions::{
Instruction,
riscv::{constants::UInt8, r_insn::RInstructionConfig},
},
structs::ProgramParams,
utils::split_to_u8,
witness::LkMultiplicity,
};
use ceno_emul::{InsnKind, StepRecord};
/// This trait defines a logic instruction, connecting an instruction type to a lookup table.
pub trait LogicOp {
const INST_KIND: InsnKind;
type OpsTable: OpsTable;
}
/// The Instruction circuit for a given LogicOp.
pub struct LogicInstruction<E, I>(PhantomData<(E, I)>);
impl<E: ExtensionField, I: LogicOp> Instruction<E> for LogicInstruction<E, I> {
type InstructionConfig = LogicConfig<E>;
fn name() -> String {
format!("{:?}", I::INST_KIND)
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let config = LogicConfig::construct_circuit(cb, I::INST_KIND)?;
// Constrain the registers based on the given lookup table.
UInt8::logic(
cb,
I::OpsTable::ROM_TYPE,
&config.rs1_read,
&config.rs2_read,
&config.rd_written,
)?;
Ok(config)
}
fn assign_instance(
config: &Self::InstructionConfig,
shard_ctx: &mut ShardContext,
instance: &mut [<E as ExtensionField>::BaseField],
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
UInt8::<E>::logic_assign::<I::OpsTable>(
lk_multiplicity,
step.rs1().unwrap().value as u64,
step.rs2().unwrap().value as u64,
);
config.assign_instance(instance, shard_ctx, lk_multiplicity, step)
}
}
/// This config implements R-Instructions that represent registers values as 4 * u8.
/// Non-generic code shared by several circuits.
#[derive(Debug)]
pub struct LogicConfig<E: ExtensionField> {
r_insn: RInstructionConfig<E>,
rs1_read: UInt8<E>,
rs2_read: UInt8<E>,
pub(crate) rd_written: UInt8<E>,
}
impl<E: ExtensionField> LogicConfig<E> {
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
insn_kind: InsnKind,
) -> Result<Self, ZKVMError> {
let rs1_read = UInt8::new_unchecked(|| "rs1_read", cb)?;
let rs2_read = UInt8::new_unchecked(|| "rs2_read", cb)?;
let rd_written = UInt8::new_unchecked(|| "rd_written", cb)?;
let r_insn = RInstructionConfig::<E>::construct_circuit(
cb,
insn_kind,
rs1_read.register_expr(),
rs2_read.register_expr(),
rd_written.register_expr(),
)?;
Ok(Self {
r_insn,
rs1_read,
rs2_read,
rd_written,
})
}
fn assign_instance(
&self,
instance: &mut [<E as ExtensionField>::BaseField],
shard_ctx: &mut ShardContext,
lk_multiplicity: &mut LkMultiplicity,
step: &StepRecord,
) -> Result<(), ZKVMError> {
self.r_insn
.assign_instance(instance, shard_ctx, lk_multiplicity, step)?;
let rs1_read = split_to_u8(step.rs1().unwrap().value);
self.rs1_read.assign_limbs(instance, &rs1_read);
let rs2_read = split_to_u8(step.rs2().unwrap().value);
self.rs2_read.assign_limbs(instance, &rs2_read);
let rd_written = split_to_u8(step.rd().unwrap().value.after);
self.rd_written.assign_limbs(instance, &rd_written);
Ok(())
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/lookup_keccakf.rs | ceno_zkvm/src/precompiles/lookup_keccakf.rs | use ceno_emul::{ByteAddr, Cycle, MemOp, StepRecord};
use ff_ext::ExtensionField;
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator,
chip::Chip,
circuit_builder::{
CircuitBuilder, ConstraintSystem, RotationParams, expansion_expr, rotation_split,
},
cpu::{CpuBackend, CpuProver},
error::{BackendError, CircuitBuilderError},
gkr::{
GKRCircuit, GKRProof, GKRProverOutput,
booleanhypercube::{BooleanHypercube, CYCLIC_POW2_5},
layer::Layer,
mock::MockProver,
},
selector::{SelectorContext, SelectorType},
utils::lk_multiplicity::LkMultiplicity,
};
use itertools::{Itertools, iproduct, izip, zip_eq};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression, StructuralWitIn, ToExpr, WitIn,
mle::PointAndEval,
util::{ceil_log2, max_usable_threads},
};
use ndarray::{ArrayView, Ix2, Ix3, s};
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::{ParallelSlice, ParallelSliceMut},
};
use std::{array, mem::transmute, sync::Arc};
use sumcheck::{
macros::{entered_span, exit_span},
util::optimal_sumcheck_threads,
};
use transcript::{BasicTranscript, Transcript};
use witness::{InstancePaddingStrategy, RowMajorMatrix, next_pow2_instance_padding};
use crate::{
chip_handler::MemoryExpr,
e2e::ShardContext,
error::ZKVMError,
instructions::riscv::insn_base::{StateInOut, WriteMEM},
precompiles::{
SelectorTypeLayout,
utils::{Mask, MaskRepresentation, not8_expr, set_slice_felts_from_u64 as push_instance},
},
scheme::utils::gkr_witness,
};
pub const ROUNDS: usize = 24;
pub const ROUNDS_CEIL_LOG2: usize = 5; // log_2(24.next_pow2())
const RC: [u64; ROUNDS] = [
1u64,
0x8082u64,
0x800000000000808au64,
0x8000000080008000u64,
0x808bu64,
0x80000001u64,
0x8000000080008081u64,
0x8000000000008009u64,
0x8au64,
0x88u64,
0x80008009u64,
0x8000000au64,
0x8000808bu64,
0x800000000000008bu64,
0x8000000000008089u64,
0x8000000000008003u64,
0x8000000000008002u64,
0x8000000000000080u64,
0x800au64,
0x800000008000000au64,
0x8000000080008081u64,
0x8000000000008080u64,
0x80000001u64,
0x8000000080008008u64,
];
const ROTATION_CONSTANTS: [[usize; 5]; 5] = [
[0, 1, 62, 28, 27],
[36, 44, 6, 55, 20],
[3, 10, 43, 25, 39],
[41, 45, 15, 21, 8],
[18, 2, 61, 56, 14],
];
pub const KECCAK_INPUT32_SIZE: usize = 50;
pub const KECCAK_OUTPUT32_SIZE: usize = 50;
// number of non zero out within keccak circuit
pub const KECCAK_OUT_EVAL_SIZE: usize = size_of::<KeccakOutEvals<u8>>();
const AND_LOOKUPS_PER_ROUND: usize = 200;
const XOR_LOOKUPS_PER_ROUND: usize = 608;
const RANGE_LOOKUPS_PER_ROUND: usize = 290;
const LOOKUP_FELTS_PER_ROUND: usize =
AND_LOOKUPS_PER_ROUND + XOR_LOOKUPS_PER_ROUND + RANGE_LOOKUPS_PER_ROUND;
pub const AND_LOOKUPS: usize = AND_LOOKUPS_PER_ROUND;
pub const XOR_LOOKUPS: usize = XOR_LOOKUPS_PER_ROUND;
pub const RANGE_LOOKUPS: usize = RANGE_LOOKUPS_PER_ROUND;
pub const STRUCTURAL_WITIN: usize = 6;
#[derive(Clone, Debug)]
pub struct KeccakParams;
#[derive(Clone, Debug)]
#[repr(C)]
pub struct KeccakOutEvals<T> {
pub lookup_entries: [T; LOOKUP_FELTS_PER_ROUND],
}
#[allow(dead_code)]
#[derive(Clone, Debug)]
#[repr(C)]
pub struct KeccakFixedCols<T> {
pub rc: [T; 8],
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct KeccakWitCols<T> {
pub input8: [T; 200],
pub c_aux: [T; 200],
pub c_temp: [T; 40],
pub c_rot: [T; 40],
pub d: [T; 40],
pub theta_output: [T; 200],
pub rotation_witness: [T; 196],
pub rhopi_output: [T; 200],
pub nonlinear: [T; 200],
pub chi_output: [T; 8],
pub iota_output: [T; 200],
// TODO temporarily define rc as witness
pub rc: [T; 8],
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct KeccakLayer<WitT, EqT> {
pub wits: KeccakWitCols<WitT>,
// pub fixed: KeccakFixedCols<FixedT>,
pub(crate) eq_rotation_left: EqT,
pub(crate) eq_rotation_right: EqT,
pub(crate) eq_rotation: EqT,
}
#[derive(Clone, Debug)]
pub struct KeccakLayout<E: ExtensionField> {
pub params: KeccakParams,
pub layer_exprs: KeccakLayer<WitIn, StructuralWitIn>,
pub selector_type_layout: SelectorTypeLayout<E>,
pub input32_exprs: [MemoryExpr<E>; KECCAK_INPUT32_SIZE],
pub output32_exprs: [MemoryExpr<E>; KECCAK_OUTPUT32_SIZE],
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
}
const ROTATION_WITNESS_LEN: usize = 196;
const C_TEMP_SPLIT_SIZES: [usize; 8] = [15, 1, 15, 1, 15, 1, 15, 1];
const BYTE_SPLIT_SIZES: [usize; 8] = [8; 8];
#[inline(always)]
fn split_mask_to_bytes(value: u64) -> [u64; 8] {
value.to_le_bytes().map(|b| b as u64)
}
#[inline(always)]
fn split_mask_to_array<const N: usize>(value: u64, sizes: &[usize; N]) -> [u64; N] {
let mut out = [0u64; N];
if N == 8 && sizes.iter().all(|&s| s == 8) {
out.copy_from_slice(&split_mask_to_bytes(value));
return out;
}
let values = MaskRepresentation::from_mask(Mask::new(64, value))
.convert(sizes)
.values();
out.copy_from_slice(values.as_slice());
out
}
impl<E: ExtensionField> KeccakLayout<E> {
fn new(cb: &mut CircuitBuilder<E>, params: KeccakParams) -> Self {
// allocate witnesses, fixed, and eqs
let (
wits,
// fixed,
[
sel_mem_read,
sel_mem_write,
eq_zero,
eq_rotation_left,
eq_rotation_right,
eq_rotation,
],
): (KeccakWitCols<WitIn>, [StructuralWitIn; STRUCTURAL_WITIN]) = unsafe {
(
transmute::<[WitIn; size_of::<KeccakWitCols<u8>>()], KeccakWitCols<WitIn>>(
array::from_fn(|id| cb.create_witin(|| format!("keccak_witin_{}", id))),
),
// transmute::<[Fixed; 8], KeccakFixedCols<Fixed>>(array::from_fn(|id| {
// cb.create_fixed(|| format!("keccak_fixed_{}", id))
// })),
array::from_fn(|id| {
cb.create_placeholder_structural_witin(|| format!("keccak_eq_{}", id))
}),
)
};
// indices to activate zero/lookup constraints
let checked_indices = CYCLIC_POW2_5
.iter()
.take(ROUNDS)
.sorted()
.copied()
.map(|v| v as usize)
.collect_vec();
Self {
params,
layer_exprs: KeccakLayer {
wits,
// fixed,
eq_rotation_left,
eq_rotation_right,
eq_rotation,
},
selector_type_layout: SelectorTypeLayout {
sel_mem_read: SelectorType::OrderedSparse32 {
indices: vec![CYCLIC_POW2_5[0] as usize],
expression: sel_mem_read.expr(),
},
sel_mem_write: SelectorType::OrderedSparse32 {
indices: vec![CYCLIC_POW2_5[ROUNDS - 1] as usize],
expression: sel_mem_write.expr(),
},
sel_lookup: SelectorType::OrderedSparse32 {
indices: checked_indices.clone(),
expression: eq_zero.expr(),
},
sel_zero: SelectorType::OrderedSparse32 {
indices: checked_indices,
expression: eq_zero.expr(),
},
},
input32_exprs: array::from_fn(|_| array::from_fn(|_| Expression::WitIn(0))),
output32_exprs: array::from_fn(|_| array::from_fn(|_| Expression::WitIn(0))),
n_fixed: 0,
n_committed: 0,
n_structural_witin: STRUCTURAL_WITIN,
n_challenges: 0,
}
}
}
impl<E: ExtensionField> ProtocolBuilder<E> for KeccakLayout<E> {
type Params = KeccakParams;
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = Self::new(cb, params);
let system = cb;
let KeccakWitCols {
input8,
c_aux,
c_temp,
c_rot,
d,
theta_output,
rotation_witness,
rhopi_output,
nonlinear,
chi_output,
iota_output,
rc,
} = &layout.layer_exprs.wits;
// let KeccakFixedCols { rc } = &layout.layer_exprs.fixed;
// TODO: ndarrays can be replaced with normal arrays
// Input state of the round in 8-bit chunks
let state8: ArrayView<WitIn, Ix3> = ArrayView::from_shape((5, 5, 8), input8).unwrap();
// The purpose is to compute the auxiliary array
// c[i] = XOR (state[j][i]) for j in 0..5
// We unroll it into
// c_aux[i][j] = XOR (state[k][i]) for k in 0..j
// We use c_aux[i][4] instead of c[i]
// c_aux is also stored in 8-bit chunks
let c_aux: ArrayView<WitIn, Ix3> = ArrayView::from_shape((5, 5, 8), c_aux).unwrap();
for i in 0..5 {
for k in 0..8 {
// Initialize first element
system.require_equal(
|| "init c_aux".to_string(),
state8[[0, i, k]].into(),
c_aux[[i, 0, k]].into(),
)?;
}
for j in 1..5 {
// Check xor using lookups over all chunks
for k in 0..8 {
system.lookup_xor_byte(
c_aux[[i, j - 1, k]].into(),
state8[[j, i, k]].into(),
c_aux[[i, j, k]].into(),
)?;
}
}
}
// Compute c_rot[i] = c[i].rotate_left(1)
// To understand how rotations are performed in general, consult the
// documentation of `constrain_left_rotation64`. Here c_temp is the split
// witness for a 1-rotation.
let c_temp: ArrayView<WitIn, Ix2> = ArrayView::from_shape((5, 8), c_temp).unwrap();
let c_rot: ArrayView<WitIn, Ix2> = ArrayView::from_shape((5, 8), c_rot).unwrap();
let (sizes, _) = rotation_split(1);
for i in 0..5 {
assert_eq!(c_temp.slice(s![i, ..]).iter().len(), sizes.iter().len());
system.require_left_rotation64(
|| format!("theta rotation_{i}"),
&c_aux
.slice(s![i, 4, ..])
.iter()
.map(|e| e.expr())
.collect_vec(),
&zip_eq(c_temp.slice(s![i, ..]).iter(), sizes.iter())
.map(|(e, sz)| (*sz, e.expr()))
.collect_vec(),
&c_rot
.slice(s![i, ..])
.iter()
.map(|e| e.expr())
.collect_vec(),
1,
)?;
}
// d is computed simply as XOR of required elements of c (and rotations)
// again stored as 8-bit chunks
let d: ArrayView<WitIn, Ix2> = ArrayView::from_shape((5, 8), d).unwrap();
for i in 0..5 {
for k in 0..8 {
system.lookup_xor_byte(
c_aux[[(i + 5 - 1) % 5, 4, k]].into(),
c_rot[[(i + 1) % 5, k]].into(),
d[[i, k]].into(),
)?;
}
}
// output state of the Theta sub-round, simple XOR, in 8-bit chunks
let theta_output: ArrayView<WitIn, Ix3> =
ArrayView::from_shape((5, 5, 8), theta_output).unwrap();
for i in 0..5 {
for j in 0..5 {
for k in 0..8 {
system.lookup_xor_byte(
state8[[j, i, k]].into(),
d[[i, k]].into(),
theta_output[[j, i, k]].into(),
)?
}
}
}
// output state after applying both Rho and Pi sub-rounds
// sub-round Pi is a simple permutation of 64-bit lanes
// sub-round Rho requires rotations
let rhopi_output: ArrayView<WitIn, Ix3> =
ArrayView::from_shape((5, 5, 8), rhopi_output).unwrap();
// iterator over split witnesses
let mut rotation_witness = rotation_witness.iter();
for i in 0..5 {
#[allow(clippy::needless_range_loop)]
for j in 0..5 {
let arg = theta_output
.slice(s!(j, i, ..))
.iter()
.map(|e| e.expr())
.collect_vec();
let (sizes, _) = rotation_split(ROTATION_CONSTANTS[j][i]);
let many = sizes.len();
let rep_split = zip_eq(sizes, rotation_witness.by_ref().take(many))
.map(|(sz, wit)| (sz, wit.expr()))
.collect_vec();
let arg_rotated = rhopi_output
.slice(s!((2 * i + 3 * j) % 5, j, ..))
.iter()
.map(|e| e.expr())
.collect_vec();
system.require_left_rotation64(
|| format!("RHOPI {i}, {j}"),
&arg,
&rep_split,
&arg_rotated,
ROTATION_CONSTANTS[j][i],
)?;
}
}
assert!(rotation_witness.next().is_none());
let mut chi_output = chi_output.to_vec();
chi_output.extend(iota_output[8..].to_vec());
let chi_output: ArrayView<WitIn, Ix3> =
ArrayView::from_shape((5, 5, 8), &chi_output).unwrap();
// for the Chi sub-round, we use an intermediate witness storing the result of
// the required AND
let nonlinear: ArrayView<WitIn, Ix3> = ArrayView::from_shape((5, 5, 8), nonlinear).unwrap();
for i in 0..5 {
for j in 0..5 {
for k in 0..8 {
system.lookup_and_byte(
not8_expr(rhopi_output[[j, (i + 1) % 5, k]].into()),
rhopi_output[[j, (i + 2) % 5, k]].into(),
nonlinear[[j, i, k]].into(),
)?;
system.lookup_xor_byte(
rhopi_output[[j, i, k]].into(),
nonlinear[[j, i, k]].into(),
chi_output[[j, i, k]].into(),
)?;
}
}
}
let iota_output_arr: ArrayView<WitIn, Ix3> =
ArrayView::from_shape((5, 5, 8), iota_output).unwrap();
for k in 0..8 {
system.lookup_xor_byte(
chi_output[[0, 0, k]].into(),
rc[k].into(),
iota_output_arr[[0, 0, k]].into(),
)?;
}
let keccak_input8: ArrayView<WitIn, Ix3> =
ArrayView::from_shape((5, 5, 8), input8).unwrap();
let keccak_output8: ArrayView<WitIn, Ix3> =
ArrayView::from_shape((5, 5, 8), iota_output).unwrap();
// process keccak output
let mut keccak_output32 = Vec::with_capacity(KECCAK_OUTPUT32_SIZE);
for x in 0..5 {
for y in 0..5 {
for k in 0..2 {
// create an expression combining 4 elements of state8 into a 2x16-bit felt
let output8_slice = keccak_output8
.slice(s![x, y, 4 * k..4 * (k + 1)])
.iter()
.map(|e| (8, e.expr()))
.collect_vec();
keccak_output32.push([
expansion_expr::<E, 16>(&output8_slice[0..2]),
expansion_expr::<E, 16>(&output8_slice[2..4]),
])
}
}
}
let mut keccak_input32 = Vec::with_capacity(KECCAK_INPUT32_SIZE);
// process keccak input
for x in 0..5 {
for y in 0..5 {
for k in 0..2 {
// create an expression combining 4 elements of state8 into a single 2x16-bit felt
let input8_slice = keccak_input8
.slice(s![x, y, 4 * k..4 * (k + 1)])
.iter()
.map(|e| (8, e.expr()))
.collect_vec();
keccak_input32.push([
expansion_expr::<E, 16>(&input8_slice[0..2]),
expansion_expr::<E, 16>(&input8_slice[2..4]),
])
}
}
}
// set input/output32 expr
layout.input32_exprs = keccak_input32.try_into().unwrap();
layout.output32_exprs = keccak_output32.try_into().unwrap();
// rotation constrain: rotation(keccak_input8).next() == keccak_output8
izip!(keccak_input8, keccak_output8)
.for_each(|(input, output)| system.rotate_and_assert_eq(input.expr(), output.expr()));
system.set_rotation_params(RotationParams {
rotation_eqs: Some([
layout.layer_exprs.eq_rotation_left.expr(),
layout.layer_exprs.eq_rotation_right.expr(),
layout.layer_exprs.eq_rotation.expr(),
]),
rotation_cyclic_group_log2: ROUNDS_CEIL_LOG2,
rotation_cyclic_subgroup_size: ROUNDS - 1,
});
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_challenges = 0;
// register selector to legacy constrain system
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
// r_record
(0..r_len).collect_vec(),
// w_record
(r_len..r_len + w_len).collect_vec(),
// lk_record
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
// zero_record
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
fn n_committed(&self) -> usize {
unimplemented!("retrieve from constrain system")
}
fn n_fixed(&self) -> usize {
unimplemented!("retrieve from constrain system")
}
fn n_challenges(&self) -> usize {
0
}
fn n_evaluations(&self) -> usize {
unimplemented!()
}
fn n_layers(&self) -> usize {
1
}
}
#[derive(Clone)]
pub struct KeccakStateInstance {
pub state_ptr_address: ByteAddr,
pub cur_ts: Cycle,
pub read_ts: [Cycle; KECCAK_INPUT32_SIZE],
}
impl Default for KeccakStateInstance {
fn default() -> Self {
Self {
state_ptr_address: Default::default(),
cur_ts: Default::default(),
read_ts: [Cycle::default(); KECCAK_INPUT32_SIZE],
}
}
}
#[derive(Clone)]
pub struct KeccakWitInstance {
pub instance: [u32; KECCAK_INPUT32_SIZE],
}
impl Default for KeccakWitInstance {
fn default() -> Self {
Self {
instance: [0u32; KECCAK_INPUT32_SIZE],
}
}
}
#[derive(Clone, Default)]
pub struct KeccakInstance {
pub state: KeccakStateInstance,
pub witin: KeccakWitInstance,
}
#[derive(Clone, Default)]
pub struct KeccakTrace {
pub instances: Vec<KeccakInstance>,
}
impl<E> ProtocolWitnessGenerator<E> for KeccakLayout<E>
where
E: ExtensionField,
{
type Trace = KeccakTrace;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
// TODO remove this after recover RC
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
// RowMajorMatrix::new_by_values(
// RC.iter()
// .flat_map(|x| {
// (0..8)
// .map(|i| E::BaseField::from_canonical_u64((x >> (i << 3)) & 0xFF))
// .collect_vec()
// })
// .collect_vec(),
// 8,
// InstancePaddingStrategy::Default,
// )
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
// TODO assign eq (selectors) to _structural_wits
let [wits, structural_wits] = wits;
let KeccakLayer {
wits:
KeccakWitCols {
input8: input8_witin,
c_aux: c_aux_witin,
c_temp: c_temp_witin,
c_rot: c_rot_witin,
d: d_witin,
theta_output: theta_output_witin,
rotation_witness: rotation_witness_witin,
rhopi_output: rhopi_output_witin,
nonlinear: nonlinear_witin,
chi_output: chi_output_witin,
iota_output: iota_output_witin,
rc: rc_witin,
},
..
} = self.layer_exprs;
let num_instances = phase1.instances.len();
// keccak instance full rounds (24 rounds + 8 round padding) as chunk size
// we need to do assignment on respective 31 cyclic group index
wits.values
.par_chunks_mut(self.n_committed * ROUNDS.next_power_of_two())
.take(num_instances)
.zip_eq(
structural_wits
.values
.par_chunks_mut(self.n_structural_witin * ROUNDS.next_power_of_two())
.take(num_instances),
)
.zip(&phase1.instances)
.for_each(|((wits, structural_wits), KeccakInstance { witin, .. })| {
let mut lk_multiplicity = lk_multiplicity.clone();
let state_32_iter = witin.instance.iter().map(|e| *e as u64);
let mut state64 = [[0u64; 5]; 5];
zip_eq(iproduct!(0..5, 0..5), state_32_iter.tuples())
.map(|((x, y), (lo, hi))| {
state64[x][y] = lo | (hi << 32);
})
.count();
let bh = BooleanHypercube::new(ROUNDS_CEIL_LOG2);
let mut cyclic_group = bh.into_iter();
let (mut sel_mem_read_iter, sel_mem_read_structural_witin) = (
self.selector_type_layout
.sel_mem_read
.sparse32_indices()
.iter(),
self.selector_type_layout.sel_mem_read.selector_expr().id(),
);
let (mut sel_mem_write_iter, sel_mem_write_structural_witin) = (
self.selector_type_layout
.sel_mem_write
.sparse32_indices()
.iter(),
self.selector_type_layout.sel_mem_write.selector_expr().id(),
);
let (mut sel_lookup_iter, sel_lookup_structural_witin) = (
self.selector_type_layout
.sel_lookup
.sparse32_indices()
.iter(),
self.selector_type_layout.sel_lookup.selector_expr().id(),
);
let (mut sel_zero_iter, sel_zero_structural_witin) = (
self.selector_type_layout.sel_zero.sparse32_indices().iter(),
self.selector_type_layout.sel_zero.selector_expr().id(),
);
#[allow(clippy::needless_range_loop)]
for round in 0..ROUNDS {
let round_index = cyclic_group.next().unwrap();
let wits =
&mut wits[round_index as usize * self.n_committed..][..self.n_committed];
// set selector
if let Some(index) = sel_mem_read_iter.next() {
structural_wits
[index * self.n_structural_witin + sel_mem_read_structural_witin] =
E::BaseField::ONE;
}
if let Some(index) = sel_mem_write_iter.next() {
structural_wits
[index * self.n_structural_witin + sel_mem_write_structural_witin] =
E::BaseField::ONE;
}
if let Some(index) = sel_lookup_iter.next() {
structural_wits
[index * self.n_structural_witin + sel_lookup_structural_witin] =
E::BaseField::ONE;
}
if let Some(index) = sel_zero_iter.next() {
structural_wits
[index * self.n_structural_witin + sel_zero_structural_witin] =
E::BaseField::ONE;
}
let mut state8 = [[[0u64; 8]; 5]; 5];
for x in 0..5 {
for y in 0..5 {
state8[x][y] = split_mask_to_array(state64[x][y], &BYTE_SPLIT_SIZES);
}
}
push_instance::<E, _>(
wits,
input8_witin[0].id.into(),
state8.into_iter().flatten().flatten(),
);
let mut c_aux64 = [[0u64; 5]; 5];
let mut c_aux8 = [[[0u64; 8]; 5]; 5];
for i in 0..5 {
c_aux64[i][0] = state64[0][i];
c_aux8[i][0] = split_mask_to_array(c_aux64[i][0], &BYTE_SPLIT_SIZES);
for j in 1..5 {
c_aux64[i][j] = state64[j][i] ^ c_aux64[i][j - 1];
for k in 0..8 {
lk_multiplicity
.lookup_xor_byte(c_aux8[i][j - 1][k], state8[j][i][k]);
}
c_aux8[i][j] = split_mask_to_array(c_aux64[i][j], &BYTE_SPLIT_SIZES);
}
}
let mut c64 = [0u64; 5];
let mut c8 = [[0u64; 8]; 5];
for x in 0..5 {
c64[x] = c_aux64[x][4];
c8[x] = split_mask_to_array(c64[x], &BYTE_SPLIT_SIZES);
}
let mut c_temp = [[0u64; 8]; 5];
for i in 0..5 {
let chunks = split_mask_to_array(c64[i], &C_TEMP_SPLIT_SIZES);
for (chunk, size) in chunks.iter().zip(C_TEMP_SPLIT_SIZES.iter()) {
lk_multiplicity.assert_const_range(*chunk, *size);
}
c_temp[i] = chunks;
}
let mut crot64 = [0u64; 5];
let mut crot8 = [[0u64; 8]; 5];
for i in 0..5 {
crot64[i] = c64[i].rotate_left(1);
crot8[i] = split_mask_to_array(crot64[i], &BYTE_SPLIT_SIZES);
}
let mut d64 = [0u64; 5];
let mut d8 = [[0u64; 8]; 5];
for x in 0..5 {
d64[x] = c64[(x + 4) % 5] ^ c64[(x + 1) % 5].rotate_left(1);
for k in 0..8 {
lk_multiplicity.lookup_xor_byte(
c_aux8[(x + 5 - 1) % 5][4][k],
crot8[(x + 1) % 5][k],
);
}
d8[x] = split_mask_to_array(d64[x], &BYTE_SPLIT_SIZES);
}
let mut theta_state64 = state64;
let mut theta_state8 = [[[0u64; 8]; 5]; 5];
let mut rotation_witness = Vec::with_capacity(ROTATION_WITNESS_LEN);
for x in 0..5 {
for y in 0..5 {
theta_state64[y][x] ^= d64[x];
for k in 0..8 {
lk_multiplicity.lookup_xor_byte(state8[y][x][k], d8[x][k])
}
theta_state8[y][x] =
split_mask_to_array(theta_state64[y][x], &BYTE_SPLIT_SIZES);
let (sizes, _) = rotation_split(ROTATION_CONSTANTS[y][x]);
let rotation_chunks =
MaskRepresentation::from_mask(Mask::new(64, theta_state64[y][x]))
.convert(&sizes)
.values();
for (chunk, size) in rotation_chunks.iter().zip(sizes.iter()) {
lk_multiplicity.assert_const_range(*chunk, *size);
}
rotation_witness.extend(rotation_chunks);
}
}
assert_eq!(rotation_witness.len(), rotation_witness_witin.len());
// Rho and Pi steps
let mut rhopi_output64 = [[0u64; 5]; 5];
let mut rhopi_output8 = [[[0u64; 8]; 5]; 5];
for x in 0..5 {
for y in 0..5 {
rhopi_output64[(2 * x + 3 * y) % 5][y % 5] =
theta_state64[y][x].rotate_left(ROTATION_CONSTANTS[y][x] as u32);
}
}
for x in 0..5 {
for y in 0..5 {
rhopi_output8[x][y] =
split_mask_to_array(rhopi_output64[x][y], &BYTE_SPLIT_SIZES);
}
}
// Chi step
let mut nonlinear64 = [[0u64; 5]; 5];
let mut nonlinear8 = [[[0u64; 8]; 5]; 5];
for x in 0..5 {
for y in 0..5 {
nonlinear64[y][x] =
!rhopi_output64[y][(x + 1) % 5] & rhopi_output64[y][(x + 2) % 5];
for k in 0..8 {
lk_multiplicity.lookup_and_byte(
0xFF - rhopi_output8[y][(x + 1) % 5][k],
rhopi_output8[y][(x + 2) % 5][k],
);
}
nonlinear8[y][x] =
split_mask_to_array(nonlinear64[y][x], &BYTE_SPLIT_SIZES);
}
}
let mut chi_output64 = [[0u64; 5]; 5];
let mut chi_output8 = [[[0u64; 8]; 5]; 5];
for x in 0..5 {
for y in 0..5 {
chi_output64[y][x] = nonlinear64[y][x] ^ rhopi_output64[y][x];
for k in 0..8 {
lk_multiplicity
.lookup_xor_byte(rhopi_output8[y][x][k], nonlinear8[y][x][k]);
}
chi_output8[y][x] =
split_mask_to_array(chi_output64[y][x], &BYTE_SPLIT_SIZES);
}
}
// Iota step
let mut iota_output64 = chi_output64;
let mut iota_output8 = [[[0u64; 8]; 5]; 5];
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/utils.rs | ceno_zkvm/src/precompiles/utils.rs | use ff_ext::ExtensionField;
use gkr_iop::circuit_builder::expansion_expr;
use itertools::Itertools;
use multilinear_extensions::{Expression, ToExpr};
use p3::field::FieldAlgebra;
use smallvec::SmallVec;
pub fn not8_expr<E: ExtensionField>(expr: Expression<E>) -> Expression<E> {
E::BaseField::from_canonical_u8(0xFF).expr() - expr
}
pub fn set_slice_felts_from_u64<E, I>(dst: &mut [E::BaseField], start_index: usize, iter: I)
where
E: ExtensionField,
I: IntoIterator<Item = u64>,
{
for (i, word) in iter.into_iter().enumerate() {
dst[start_index + i] = E::BaseField::from_canonical_u64(word);
}
}
/// Merge a slice of u8 limbs into a slice of u32 represented by u16 limb pair.
pub fn merge_u8_slice_to_u16_limbs_pairs_and_extend<E: ExtensionField>(
u8_slice: &[impl ToExpr<E, Output = Expression<E>> + Clone],
dst: &mut Vec<[Expression<E>; 2]>,
) {
let len = u8_slice.len() / 4;
for i in 0..len {
// create an expression combining 4 elements of bytes into a 2x16-bit felt
let output8_slice = u8_slice[4 * i..4 * (i + 1)]
.iter()
.map(|e| (8, e.expr()))
.collect_vec();
dst.push([
expansion_expr::<E, 16>(&output8_slice[0..2]),
expansion_expr::<E, 16>(&output8_slice[2..4]),
])
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct Mask {
pub size: usize,
pub value: u64,
}
impl Mask {
pub fn new(size: usize, value: u64) -> Self {
if size < 64 {
assert!(value < (1 << size));
}
Self { size, value }
}
}
const MASK_INLINE_CAPACITY: usize = 32;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct MaskRepresentation {
pub rep: SmallVec<[Mask; MASK_INLINE_CAPACITY]>,
}
impl From<Mask> for (usize, u64) {
fn from(mask: Mask) -> Self {
(mask.size, mask.value)
}
}
impl From<(usize, u64)> for Mask {
fn from(tuple: (usize, u64)) -> Self {
Mask::new(tuple.0, tuple.1)
}
}
impl From<MaskRepresentation> for Vec<(usize, u64)> {
fn from(mask_rep: MaskRepresentation) -> Self {
mask_rep.rep.into_iter().map(|mask| mask.into()).collect()
}
}
impl From<Vec<(usize, u64)>> for MaskRepresentation {
fn from(tuples: Vec<(usize, u64)>) -> Self {
MaskRepresentation {
rep: tuples.into_iter().map(Into::into).collect(),
}
}
}
impl FromIterator<(usize, u64)> for MaskRepresentation {
fn from_iter<I: IntoIterator<Item = (usize, u64)>>(iter: I) -> Self {
MaskRepresentation {
rep: iter.into_iter().map(Into::into).collect(),
}
}
}
impl FromIterator<Mask> for MaskRepresentation {
fn from_iter<I: IntoIterator<Item = Mask>>(iter: I) -> Self {
MaskRepresentation {
rep: iter.into_iter().collect(),
}
}
}
impl MaskRepresentation {
pub fn new(masks: Vec<Mask>) -> Self {
Self { rep: masks.into() }
}
pub fn from_mask(mask: Mask) -> Self {
let mut rep = SmallVec::new();
rep.push(mask);
Self { rep }
}
pub fn from_masks<I>(masks: I) -> Self
where
I: IntoIterator<Item = Mask>,
{
Self {
rep: masks.into_iter().collect(),
}
}
pub fn from_bits(bits: Vec<u64>, sizes: Vec<usize>) -> Self {
assert_eq!(bits.len(), sizes.iter().sum::<usize>());
let mut masks = Vec::new();
let mut bit_iter = bits.into_iter();
for size in sizes {
let mut mask = 0;
for i in 0..size {
mask += (1 << i) * bit_iter.next().unwrap();
}
masks.push(Mask::new(size, mask));
}
Self { rep: masks.into() }
}
pub fn to_bits(&self) -> Vec<u64> {
self.rep
.iter()
.flat_map(|mask| (0..mask.size).map(move |i| (mask.value >> i) & 1))
.collect()
}
pub fn convert(&self, new_sizes: &[usize]) -> Self {
let mut rep = SmallVec::<[Mask; MASK_INLINE_CAPACITY]>::with_capacity(new_sizes.len());
let mut src_index = 0;
let mut src_bit = 0;
for &size in new_sizes {
let mut value = 0u64;
for bit_pos in 0..size {
let mut bit_value = 0u64;
while src_index < self.rep.len() {
let mask = &self.rep[src_index];
if src_bit < mask.size {
bit_value = (mask.value >> src_bit) & 1;
src_bit += 1;
if src_bit == mask.size {
src_index += 1;
src_bit = 0;
}
break;
} else {
src_index += 1;
src_bit = 0;
}
}
value |= bit_value << bit_pos;
}
rep.push(Mask::new(size, value));
}
Self { rep }
}
pub fn values(&self) -> SmallVec<[u64; MASK_INLINE_CAPACITY]> {
self.rep.iter().map(|m| m.value).collect()
}
pub fn masks(&self) -> Vec<Mask> {
self.rep.to_vec()
}
}
#[cfg(test)]
mod tests {
use crate::precompiles::utils::{Mask, MaskRepresentation};
#[test]
fn test_mask_representation_from_bits() {
let bits = vec![1, 0, 1, 1, 0, 1, 0, 0];
let sizes = vec![3, 5];
let mask_rep = MaskRepresentation::from_bits(bits.clone(), sizes.clone());
assert_eq!(mask_rep.rep.len(), 2);
assert_eq!(mask_rep.rep[0], Mask::new(3, 0b101));
assert_eq!(mask_rep.rep[1], Mask::new(5, 0b00101));
}
#[test]
fn test_mask_representation_to_bits() {
let masks = vec![Mask::new(3, 0b101), Mask::new(5, 0b00101)];
let mask_rep = MaskRepresentation::new(masks);
let bits = mask_rep.to_bits();
assert_eq!(bits, vec![1, 0, 1, 1, 0, 1, 0, 0]);
}
#[test]
fn test_mask_representation_convert() {
let bits = vec![1, 0, 1, 1, 0, 1, 0, 0];
let sizes = vec![3, 5];
let mask_rep = MaskRepresentation::from_bits(bits.clone(), sizes.clone());
let new_sizes = [4, 4];
let new_mask_rep = mask_rep.convert(&new_sizes);
assert_eq!(new_mask_rep.rep.len(), 2);
assert_eq!(new_mask_rep.rep[0], Mask::new(4, 0b1101));
assert_eq!(new_mask_rep.rep[1], Mask::new(4, 0b0010));
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/bitwise_keccakf.rs | ceno_zkvm/src/precompiles/bitwise_keccakf.rs | use crate::scheme::utils::gkr_witness;
use ff_ext::ExtensionField;
use itertools::{Itertools, iproduct, izip};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
ChallengeId, Expression, ToExpr, WitIn,
mle::{MultilinearExtension, Point, PointAndEval},
util::ceil_log2,
};
use p3::{field::FieldAlgebra, util::indices_arr};
use std::{array::from_fn, mem::transmute, sync::Arc};
use sumcheck::{
macros::{entered_span, exit_span},
util::optimal_sumcheck_threads,
};
use tiny_keccak::keccakf;
use transcript::{BasicTranscript, Transcript};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator,
chip::Chip,
circuit_builder::{CircuitBuilder, ConstraintSystem},
cpu::{CpuBackend, CpuProver},
error::CircuitBuilderError,
evaluation::EvalExpression,
gkr::{
GKRCircuit, GKRProverOutput,
booleanhypercube::CYCLIC_POW2_5,
layer::Layer,
layer_constraint_system::{LayerConstraintSystem, expansion_expr},
},
selector::{SelectorContext, SelectorType},
utils::{indices_arr_with_offset, lk_multiplicity::LkMultiplicity, wits_fixed_and_eqs},
};
fn to_xyz(i: usize) -> (usize, usize, usize) {
assert!(i < STATE_SIZE);
(i / 64 % 5, (i / 64) / 5, i % 64)
}
fn from_xyz(x: usize, y: usize, z: usize) -> usize {
64 * (5 * y + x) + z
}
fn and_expr<E: ExtensionField>(a: Expression<E>, b: Expression<E>) -> Expression<E> {
a.clone() * b.clone()
}
fn not_expr<E: ExtensionField>(a: Expression<E>) -> Expression<E> {
one_expr() - a
}
fn xor_expr<E: ExtensionField>(a: Expression<E>, b: Expression<E>) -> Expression<E> {
a.clone() + b.clone() - E::BaseField::from_canonical_u32(2).expr() * a * b
}
fn zero_expr<E: ExtensionField>() -> Expression<E> {
E::BaseField::ZERO.expr()
}
fn one_expr<E: ExtensionField>() -> Expression<E> {
E::BaseField::ONE.expr()
}
fn c_expr<E: ExtensionField>(x: usize, z: usize, state_wits: &[Expression<E>]) -> Expression<E> {
(0..5)
.map(|y| state_wits[from_xyz(x, y, z)].clone())
.fold(zero_expr(), xor_expr)
}
fn from_xz(x: usize, z: usize) -> usize {
x * 64 + z
}
fn d_expr<E: ExtensionField>(x: usize, z: usize, c_wits: &[Expression<E>]) -> Expression<E> {
let lhs = from_xz((x + 5 - 1) % 5, z);
let rhs = from_xz((x + 1) % 5, (z + 64 - 1) % 64);
xor_expr(c_wits[lhs].clone(), c_wits[rhs].clone())
}
fn keccak_phase1_witness<E: ExtensionField>(states: &[[u64; 25]]) -> RowMajorMatrix<E::BaseField> {
let num_states = states.len();
assert!(num_states.is_power_of_two());
let mut values = vec![E::BaseField::ONE; STATE_SIZE * num_states];
for (state_idx, state) in states.iter().enumerate() {
for (word_idx, &word) in state.iter().enumerate() {
for bit_idx in 0..64 {
let bit = ((word >> bit_idx) & 1) == 1;
values[state_idx * STATE_SIZE + word_idx * 64 + bit_idx] =
E::BaseField::from_bool(bit);
}
}
}
let mut rmm =
RowMajorMatrix::new_by_values(values, STATE_SIZE, InstancePaddingStrategy::Default);
rmm.padding_by_strategy();
rmm
}
const ROUNDS: usize = 24;
const RC: [u64; ROUNDS] = [
1u64,
0x8082u64,
0x800000000000808au64,
0x8000000080008000u64,
0x808bu64,
0x80000001u64,
0x8000000080008081u64,
0x8000000000008009u64,
0x8au64,
0x88u64,
0x80008009u64,
0x8000000au64,
0x8000808bu64,
0x800000000000008bu64,
0x8000000000008089u64,
0x8000000000008003u64,
0x8000000000008002u64,
0x8000000000000080u64,
0x800au64,
0x800000008000000au64,
0x8000000080008081u64,
0x8000000000008080u64,
0x80000001u64,
0x8000000080008008u64,
];
const X: usize = 5;
const Y: usize = 5;
const Z: usize = 64;
const STATE_SIZE: usize = X * Y * Z;
const C_SIZE: usize = X * Z;
const D_SIZE: usize = X * Z;
const KECCAK_OUTPUT32_SIZE: usize = 50;
const KECCAK_INPUT32_SIZE: usize = 50;
const KECCAK_OUT_EVAL_SIZE: usize = size_of::<KeccakOutEvals<u8>>();
const KECCAK_ALL_IN_EVAL_SIZE: usize = size_of::<KeccakInEvals<u8>>();
#[derive(Clone, Debug)]
pub struct KeccakParams;
#[derive(Clone, Debug)]
#[repr(C)]
pub struct KeccakOutEvals<T> {
pub output32: [T; KECCAK_OUTPUT32_SIZE],
pub input32: [T; KECCAK_INPUT32_SIZE],
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct Output32Layer<WitT, EqT> {
output: [WitT; STATE_SIZE],
sel: EqT,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct IotaLayer<WitT, EqT> {
chi_output: [WitT; Z],
eq: EqT,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct RhoPiAndChiLayer<WitT, EqT> {
theta_output: [WitT; STATE_SIZE],
eq_round_out: EqT,
eq_iota: EqT,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct ThetaThirdLayer<WitT, EqT> {
d: [WitT; D_SIZE],
state_copy: [WitT; STATE_SIZE],
eq: EqT,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct ThetaSecondLayer<WitT, EqT> {
c: [WitT; C_SIZE],
eq: EqT,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct ThetaFirstLayer<WitT, EqT, OptionEqT> {
round_input: [WitT; STATE_SIZE],
eq_c: EqT,
eq_copy: EqT,
sel_keccak_out: OptionEqT,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct KeccakRound<WitT, EqT, OptionEqT> {
iota: IotaLayer<WitT, EqT>,
rho_pi_and_chi: RhoPiAndChiLayer<WitT, EqT>,
theta_third: ThetaThirdLayer<WitT, EqT>,
theta_second: ThetaSecondLayer<WitT, EqT>,
theta_first: ThetaFirstLayer<WitT, EqT, OptionEqT>,
}
const OUTPUT32_WIT_SIZE: usize = size_of::<Output32Layer<u8, ()>>();
const IOTA_WIT_SIZE: usize = size_of::<IotaLayer<u8, ()>>();
const RHO_PI_AND_CHI_WIT_SIZE: usize = size_of::<RhoPiAndChiLayer<u8, ()>>();
const THETA_THIRD_WIT_SIZE: usize = size_of::<ThetaThirdLayer<u8, ()>>();
const THETA_SECOND_WIT_SIZE: usize = size_of::<ThetaSecondLayer<u8, ()>>();
const THETA_FIRST_WIT_SIZE: usize = size_of::<ThetaFirstLayer<u8, (), ()>>();
#[derive(Clone, Debug)]
#[repr(C)]
pub struct KeccakRoundEval<T> {
iota: [T; IOTA_WIT_SIZE],
rho_pi_and_chi: [T; RHO_PI_AND_CHI_WIT_SIZE],
theta_third: [T; THETA_THIRD_WIT_SIZE],
theta_second: [T; THETA_SECOND_WIT_SIZE],
theta_first: [T; THETA_FIRST_WIT_SIZE],
}
#[derive(Clone, Debug)]
pub struct KeccakLayers<WitT, EqT> {
pub output32: Output32Layer<WitT, EqT>,
pub inner_rounds: [KeccakRound<WitT, EqT, ()>; 23],
pub first_round: KeccakRound<WitT, EqT, EqT>,
}
#[derive(Clone, Debug)]
pub struct KeccakInEvals<T> {
pub output32: [T; STATE_SIZE],
pub inner_rounds: [KeccakRoundEval<T>; 23],
pub first_round: KeccakRoundEval<T>,
}
#[derive(Clone, Debug)]
pub struct KeccakLayout<E: ExtensionField> {
pub layers: KeccakLayers<WitIn, WitIn>,
pub layer_in_evals: KeccakInEvals<usize>,
pub final_out_evals: KeccakOutEvals<usize>,
pub alpha: Expression<E>,
pub beta: Expression<E>,
}
#[allow(clippy::missing_transmute_annotations)]
fn allocate_round<OptionEqT>(
theta_first: fn() -> ThetaFirstLayer<WitIn, WitIn, OptionEqT>,
) -> KeccakRound<WitIn, WitIn, OptionEqT> {
const IOTA_EQ_SIZE: usize = std::mem::size_of::<IotaLayer<(), u8>>();
const RHO_PI_AND_CHI_EQ_SIZE: usize = std::mem::size_of::<RhoPiAndChiLayer<(), u8>>();
const THETA_THIRD_EQ_SIZE: usize = std::mem::size_of::<ThetaThirdLayer<(), u8>>();
const THETA_SECOND_EQ_SIZE: usize = std::mem::size_of::<ThetaSecondLayer<(), u8>>();
let iota = wits_fixed_and_eqs::<IOTA_WIT_SIZE, 0, IOTA_EQ_SIZE>();
let iota = IotaLayer {
chi_output: iota.0,
eq: iota.2[0],
};
let (theta_output, _, [eq_round_out, eq_iota]) =
wits_fixed_and_eqs::<RHO_PI_AND_CHI_WIT_SIZE, 0, RHO_PI_AND_CHI_EQ_SIZE>();
let rho_pi_and_chi = RhoPiAndChiLayer {
theta_output,
eq_round_out,
eq_iota,
};
let (theta_third_input, _, [eq]) =
wits_fixed_and_eqs::<THETA_THIRD_WIT_SIZE, 0, THETA_THIRD_EQ_SIZE>();
let (d, state_copy) = unsafe { transmute(theta_third_input) };
let theta_third = ThetaThirdLayer { d, state_copy, eq };
let (c, _, [eq]) = wits_fixed_and_eqs::<THETA_SECOND_WIT_SIZE, 0, THETA_SECOND_EQ_SIZE>();
let theta_second = ThetaSecondLayer { c, eq };
let theta_first = theta_first();
KeccakRound {
iota,
rho_pi_and_chi,
theta_third,
theta_second,
theta_first,
}
}
impl<E: ExtensionField> Default for KeccakLayout<E> {
#[allow(clippy::missing_transmute_annotations)]
fn default() -> Self {
// allocate evaluation expressions
let final_out_evals = {
let final_out_evals = indices_arr::<KECCAK_OUT_EVAL_SIZE>();
unsafe {
transmute::<[usize; KECCAK_OUT_EVAL_SIZE], KeccakOutEvals<usize>>(final_out_evals)
}
};
let layer_in_evals = {
let layer_in_evals =
indices_arr_with_offset::<KECCAK_ALL_IN_EVAL_SIZE, KECCAK_OUT_EVAL_SIZE>();
unsafe {
transmute::<[usize; KECCAK_ALL_IN_EVAL_SIZE], KeccakInEvals<usize>>(layer_in_evals)
}
};
// allocate witnesses, fixed, and eqs
let layers = {
let (output, _, [sel]) = wits_fixed_and_eqs::<OUTPUT32_WIT_SIZE, 0, 1>();
let output32 = Output32Layer { output, sel };
let inner_rounds = from_fn(|_| {
allocate_round(|| {
const THETA_FIRST_EQ_SIZE: usize = size_of::<ThetaFirstLayer<(), u8, ()>>();
let (round_input, _, eqs) =
wits_fixed_and_eqs::<THETA_FIRST_WIT_SIZE, 0, THETA_FIRST_EQ_SIZE>();
let (eq_c, eq_copy) = unsafe { transmute(eqs) };
ThetaFirstLayer {
round_input,
eq_c,
eq_copy,
sel_keccak_out: (),
}
})
});
let first_round = allocate_round(|| {
const KECCAK_FIRST_EQ_SIZE: usize = size_of::<ThetaFirstLayer<(), u8, u8>>();
let (round_input, _, eqs) =
wits_fixed_and_eqs::<THETA_FIRST_WIT_SIZE, 0, KECCAK_FIRST_EQ_SIZE>();
let (eq_c, eq_copy, sel_keccak_out) = unsafe { transmute(eqs) };
ThetaFirstLayer {
round_input,
eq_c,
eq_copy,
sel_keccak_out,
}
});
KeccakLayers {
output32,
inner_rounds,
first_round,
}
};
Self {
layers,
layer_in_evals,
final_out_evals,
alpha: Expression::Challenge(0 as ChallengeId, 1, E::ONE, E::ZERO),
beta: Expression::Challenge(1 as ChallengeId, 1, E::ONE, E::ZERO),
}
}
}
fn output32_layer<E: ExtensionField>(
layer: &Output32Layer<WitIn, WitIn>,
out_evals: &[usize],
in_evals: &[usize],
alpha: Expression<E>,
beta: Expression<E>,
) -> Layer<E> {
let mut system = LayerConstraintSystem::new(STATE_SIZE, 0, 0, None, alpha, beta);
let keccak_output = &layer.output;
let mut keccak_output32_iter = out_evals.iter().map(|x| EvalExpression::Single(*x));
// process keccak output
let sel_type = SelectorType::OrderedSparse32 {
indices: vec![CYCLIC_POW2_5[ROUNDS - 1] as usize],
expression: layer.sel.expr(),
};
for x in 0..X {
for y in 0..Y {
for k in 0..2 {
// create an expression combining 4 elements of state8 into a single 32-bit felt
let expr = expansion_expr::<E, 32>(
&keccak_output[from_xyz(x, y, 32 * k)..from_xyz(x, y, 32 * (k + 1))]
.iter()
.map(|e| (1, e.expr()))
.collect_vec(),
);
system.add_non_zero_constraint(
expr,
(sel_type.clone(), keccak_output32_iter.next().unwrap()),
format!("build 32-bit output: {x}, {y}, {k}"),
);
}
}
}
system.into_layer("Round 23: final".to_string(), in_evals.to_vec(), 0)
}
fn iota_layer<E: ExtensionField>(
layer: &IotaLayer<WitIn, WitIn>,
iota_out_evals: &[usize],
iota_in_evals: &[usize],
round_id: usize,
alpha: Expression<E>,
beta: Expression<E>,
) -> Layer<E> {
let mut system =
LayerConstraintSystem::new(STATE_SIZE, 0, 0, Some(layer.eq.expr()), alpha, beta);
let bits = layer.chi_output.iter().map(|e| e.expr()).collect_vec();
let round_value = RC[round_id];
let sel_type = SelectorType::Whole(layer.eq.expr());
iota_out_evals.iter().enumerate().for_each(|(i, out_eval)| {
let expr = {
let round_bit = E::BaseField::from_canonical_u64((round_value >> i) & 1).expr();
xor_expr(bits[i].clone(), round_bit)
};
system.add_non_zero_constraint(
expr,
(sel_type.clone(), EvalExpression::Single(*out_eval)),
format!("Round {round_id}: Iota:: compute output {i}"),
);
});
system.into_layer(
format!("Round {round_id}: Iota:: compute output"),
iota_in_evals.to_vec(),
0,
)
}
fn chi_expr<E: ExtensionField>(i: usize, bits: &[Expression<E>]) -> Expression<E> {
assert_eq!(bits.len(), STATE_SIZE);
let (x, y, z) = to_xyz(i);
let rhs = and_expr(
not_expr(bits[from_xyz((x + 1) % X, y, z)].clone()),
bits[from_xyz((x + 2) % X, y, z)].clone(),
);
xor_expr((bits[i]).clone(), rhs)
}
fn rho_pi_and_chi_layer<E: ExtensionField>(
layer: &RhoPiAndChiLayer<WitIn, WitIn>,
out_evals: &[usize],
in_evals: &[usize],
round_id: usize,
alpha: Expression<E>,
beta: Expression<E>,
) -> Layer<E> {
let mut system = LayerConstraintSystem::new(STATE_SIZE, 0, 0, None, alpha, beta);
// Apply the effects of the rho + pi permutation directly o the argument of chi
// No need for a separate layer
let perm = rho_and_pi_permutation();
let theta_output = &layer.theta_output;
let permuted = (0..STATE_SIZE)
.map(|i| theta_output[perm[i]].expr())
.collect_vec();
let mut out_eval_iter = out_evals.iter().map(|o| EvalExpression::Single(*o));
(0..STATE_SIZE).for_each(|i| {
let (x, y, _z) = to_xyz(i);
let eq = if x == 0 && y == 0 {
layer.eq_iota.expr()
} else {
layer.eq_round_out.expr()
};
let sel_type = SelectorType::Whole(eq);
system.add_non_zero_constraint(
chi_expr(i, &permuted),
(sel_type, out_eval_iter.next().unwrap()),
format!("Round {round_id}: Chi:: apply rho, pi and chi [{i}]"),
)
});
system.into_layer(
format!("Round {round_id}: Chi:: apply rho, pi and chi"),
in_evals.to_vec(),
0,
)
}
fn theta_third_layer<E: ExtensionField>(
layer: &ThetaThirdLayer<WitIn, WitIn>,
out_evals: &[usize],
in_evals: &[usize],
round_id: usize,
alpha: Expression<E>,
beta: Expression<E>,
) -> Layer<E> {
let mut system = LayerConstraintSystem::new(D_SIZE + STATE_SIZE, 0, 0, None, alpha, beta);
// Compute post-theta state using original state and D[][] values
let mut out_eval_iter = out_evals.iter().map(|o| EvalExpression::Single(*o));
let sel_type = SelectorType::Whole(layer.eq.expr());
(0..STATE_SIZE).for_each(|i| {
let (x, _, z) = to_xyz(i);
let expr = xor_expr(layer.state_copy[i].expr(), layer.d[from_xz(x, z)].expr());
system.add_non_zero_constraint(
expr,
(sel_type.clone(), out_eval_iter.next().unwrap()),
format!("Theta::compute output [{i}]"),
);
});
system.into_layer(
format!("Round {round_id}: Theta::compute output"),
in_evals.to_vec(),
0,
)
}
fn theta_second_layer<E: ExtensionField>(
layer: &ThetaSecondLayer<WitIn, WitIn>,
out_evals: &[usize],
in_evals: &[usize],
round_id: usize,
alpha: Expression<E>,
beta: Expression<E>,
) -> Layer<E> {
let mut system = LayerConstraintSystem::new(D_SIZE + STATE_SIZE, 0, 0, None, alpha, beta);
// Compute D[][] from C[][] values
let c = layer.c.iter().map(|c| c.expr()).collect_vec();
let mut out_eval_iter = out_evals.iter().map(|o| EvalExpression::Single(*o));
let sel_type = SelectorType::Whole(layer.eq.expr());
iproduct!(0..5usize, 0..64usize).for_each(|(x, z)| {
let expr = d_expr(x, z, &c);
system.add_non_zero_constraint(
expr,
(sel_type.clone(), out_eval_iter.next().unwrap()),
format!("Theta::compute D[{x}][{z}]"),
);
});
system.into_layer(
format!("Round {round_id}: Theta::compute D[x][z]"),
in_evals.to_vec(),
0,
)
}
fn theta_first_layer<E: ExtensionField>(
layer: &ThetaFirstLayer<WitIn, WitIn, ()>,
d_out_evals: &[usize],
state_copy_out_evals: &[usize],
in_evals: &[usize],
round_id: usize,
alpha: Expression<E>,
beta: Expression<E>,
) -> Layer<E> {
let mut system = LayerConstraintSystem::new(STATE_SIZE, 0, 0, None, alpha, beta);
let state_wits = layer.round_input.iter().map(|s| s.expr()).collect_vec();
// Compute C[][] from state
let mut out_eval_iter = d_out_evals.iter().map(|o| EvalExpression::Single(*o));
let sel_type = SelectorType::Whole(layer.eq_c.expr());
iproduct!(0..5usize, 0..64usize).for_each(|(x, z)| {
let expr = c_expr(x, z, &state_wits);
system.add_non_zero_constraint(
expr,
(sel_type.clone(), out_eval_iter.next().unwrap()),
format!("Theta::compute C[{x}][{z}]"),
);
});
// Copy state
let mut out_eval_iter = state_copy_out_evals
.iter()
.map(|o| EvalExpression::Single(*o));
let sel_type = SelectorType::Whole(layer.eq_copy.expr());
state_wits.into_iter().enumerate().for_each(|(i, expr)| {
let (x, y, z) = to_xyz(i);
system.add_non_zero_constraint(
expr,
(sel_type.clone(), out_eval_iter.next().unwrap()),
format!("Theta::copy state[{x}][{y}][{z}]"),
)
});
system.into_layer(
format!("Round {round_id}: Theta::compute C[x][z]"),
in_evals.to_vec(),
0,
)
}
#[allow(clippy::too_many_arguments)]
fn keccak_first_layer<E: ExtensionField>(
layer: &ThetaFirstLayer<WitIn, WitIn, WitIn>,
d_out_evals: &[usize],
state_copy_out_evals: &[usize],
input32_out_evals: &[usize],
in_evals: &[usize],
alpha: Expression<E>,
beta: Expression<E>,
) -> Layer<E> {
let mut system = LayerConstraintSystem::new(STATE_SIZE, 0, 0, None, alpha, beta);
let state_wits = layer.round_input.iter().map(|s| s.expr()).collect_vec();
// Compute C[][] from state
let mut out_eval_iter = d_out_evals.iter().map(|o| EvalExpression::Single(*o));
let sel_type = SelectorType::Whole(layer.eq_c.expr());
iproduct!(0..5usize, 0..64usize).for_each(|(x, z)| {
let expr = c_expr(x, z, &state_wits);
system.add_non_zero_constraint(
expr,
(sel_type.clone(), out_eval_iter.next().unwrap()),
format!("Theta::compute C[{x}][{z}]"),
);
});
// Copy state
let mut out_eval_iter = state_copy_out_evals
.iter()
.map(|o| EvalExpression::Single(*o));
let sel_type = SelectorType::Whole(layer.eq_copy.expr());
state_wits.into_iter().enumerate().for_each(|(i, expr)| {
let (x, y, z) = to_xyz(i);
system.add_non_zero_constraint(
expr,
(sel_type.clone(), out_eval_iter.next().unwrap()),
format!("Theta::copy state[{x}][{y}][{z}]"),
)
});
// process keccak output
let mut out_eval_iter = input32_out_evals.iter().map(|x| EvalExpression::Single(*x));
let sel_type = SelectorType::OrderedSparse32 {
indices: vec![CYCLIC_POW2_5[0] as usize],
expression: layer.sel_keccak_out.expr(),
};
for x in 0..X {
for y in 0..Y {
for k in 0..2 {
// create an expression combining 4 elements of state8 into a single 32-bit felt
let expr = expansion_expr::<E, 32>(
&layer.round_input[from_xyz(x, y, 32 * k)..from_xyz(x, y, 32 * (k + 1))]
.iter()
.map(|e| (1, e.expr()))
.collect_vec(),
);
system.add_non_zero_constraint(
expr,
(sel_type.clone(), out_eval_iter.next().unwrap()),
format!("build 32-bit input: {x}, {y}, {k}"),
);
}
}
}
system.into_layer(
"Round 0: Theta::compute C[x][z], build 32-bit input".to_string(),
in_evals.to_vec(),
0,
)
}
impl<E: ExtensionField> KeccakLayout<E> {
pub fn build_gkr_chip_old(
_cb: &mut CircuitBuilder<E>,
_params: KeccakParams,
) -> Result<(Self, Chip<E>), CircuitBuilderError> {
let layout = Self::default();
let mut chip = Chip {
n_fixed: 0,
n_committed: STATE_SIZE,
n_challenges: 0,
n_evaluations: KECCAK_ALL_IN_EVAL_SIZE + KECCAK_OUT_EVAL_SIZE,
layers: vec![],
final_out_evals: unsafe {
transmute::<KeccakOutEvals<usize>, [usize; KECCAK_OUT_EVAL_SIZE]>(
layout.final_out_evals.clone(),
)
}
.to_vec(),
};
chip.add_layer(output32_layer(
&layout.layers.output32,
&layout.final_out_evals.output32,
&layout.layer_in_evals.output32,
layout.alpha.clone(),
layout.beta.clone(),
));
macro_rules! add_common_layers {
($round_layers:expr, $round_output:expr, $round_in_evals:expr, $round_id:expr, $alpha:expr, $beta:expr) => {
chip.add_layer(iota_layer(
&$round_layers.iota,
&$round_output[..Z],
&$round_in_evals.iota,
$round_id,
$alpha,
$beta,
));
let rho_pi_and_chi_out_evals =
[$round_in_evals.iota.to_vec(), $round_output[Z..].to_vec()].concat();
chip.add_layer(rho_pi_and_chi_layer(
&$round_layers.rho_pi_and_chi,
&rho_pi_and_chi_out_evals,
&$round_in_evals.rho_pi_and_chi,
$round_id,
$alpha,
$beta,
));
chip.add_layer(theta_third_layer(
&$round_layers.theta_third,
&$round_in_evals.rho_pi_and_chi,
&$round_in_evals.theta_third,
$round_id,
$alpha,
$beta,
));
chip.add_layer(theta_second_layer(
&$round_layers.theta_second,
&$round_in_evals.theta_third,
&$round_in_evals.theta_second,
$round_id,
$alpha,
$beta,
));
};
}
// add Round 1..24
let round_output = izip!(
(1..ROUNDS),
&layout.layers.inner_rounds,
&layout.layer_in_evals.inner_rounds
)
.rev()
.fold(
&layout.layer_in_evals.output32,
|round_output, (round_id, round_layers, round_in_evals)| {
add_common_layers!(
round_layers,
round_output,
round_in_evals,
round_id,
layout.alpha.clone(),
layout.beta.clone()
);
chip.add_layer(theta_first_layer(
&round_layers.theta_first,
&round_in_evals.theta_second,
&round_in_evals.theta_third[D_SIZE..],
&round_in_evals.theta_first,
round_id,
layout.alpha.clone(),
layout.beta.clone(),
));
&round_in_evals.theta_first
},
);
// add Round 0
let (round_layers, round_in_evals) = (
&layout.layers.first_round,
&layout.layer_in_evals.first_round,
);
add_common_layers!(
round_layers,
round_output,
round_in_evals,
0,
layout.alpha.clone(),
layout.beta.clone()
);
chip.add_layer(keccak_first_layer(
&round_layers.theta_first,
&round_in_evals.theta_second,
&round_in_evals.theta_third[D_SIZE..],
&layout.final_out_evals.input32,
&round_in_evals.theta_first,
layout.alpha.clone(),
layout.beta.clone(),
));
Ok((layout, chip))
}
}
impl<E: ExtensionField> ProtocolBuilder<E> for KeccakLayout<E> {
type Params = KeccakParams;
fn finalize(&mut self, _cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
unimplemented!()
}
fn build_layer_logic(
_cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
unimplemented!()
}
fn n_committed(&self) -> usize {
STATE_SIZE
}
fn n_fixed(&self) -> usize {
0
}
fn n_challenges(&self) -> usize {
0
}
fn n_layers(&self) -> usize {
5 * ROUNDS + 1
}
fn n_evaluations(&self) -> usize {
KECCAK_ALL_IN_EVAL_SIZE + KECCAK_OUT_EVAL_SIZE
}
}
pub struct KeccakTrace<E: ExtensionField> {
pub bits: RowMajorMatrix<E::BaseField>,
}
impl<E> ProtocolWitnessGenerator<E> for KeccakLayout<E>
where
E: ExtensionField,
{
type Trace = KeccakTrace<E>;
fn phase1_witness_group(
&self,
_phase1: Self::Trace,
_wits: [&mut RowMajorMatrix<E::BaseField>; 2],
_lk_multiplicity: &mut LkMultiplicity,
) {
// phase1.bits
}
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new_by_values(vec![], 1, InstancePaddingStrategy::Default)
}
}
// based on
// https://github.com/0xPolygonHermez/zkevm-prover/blob/main/tools/sm/keccak_f/keccak_rho.cpp
fn rho<T: Copy + Default>(state: &[T]) -> Vec<T> {
assert_eq!(state.len(), STATE_SIZE);
let (mut x, mut y) = (1, 0);
let mut ret = [T::default(); STATE_SIZE];
for z in 0..Z {
ret[from_xyz(0, 0, z)] = state[from_xyz(0, 0, z)];
}
for t in 0..24 {
for z in 0..Z {
let new_z = (1000 * Z + z - (t + 1) * (t + 2) / 2) % Z;
ret[from_xyz(x, y, z)] = state[from_xyz(x, y, new_z)];
}
(x, y) = (y, (2 * x + 3 * y) % Y);
}
ret.to_vec()
}
// https://github.com/0xPolygonHermez/zkevm-prover/blob/main/tools/sm/keccak_f/keccak_pi.cpp
fn pi<T: Copy + Default>(state: &[T]) -> Vec<T> {
assert_eq!(state.len(), STATE_SIZE);
let mut ret = [T::default(); STATE_SIZE];
iproduct!(0..X, 0..Y, 0..Z)
.map(|(x, y, z)| ret[from_xyz(x, y, z)] = state[from_xyz((x + 3 * y) % X, x, z)])
.count();
ret.to_vec()
}
// Combines rho and pi steps into a single permutation
fn rho_and_pi_permutation() -> Vec<usize> {
let perm: [usize; STATE_SIZE] = from_fn(|i| i);
pi(&rho(&perm))
}
pub fn setup_gkr_circuit<E: ExtensionField>()
-> Result<(KeccakLayout<E>, GKRCircuit<E>), CircuitBuilderError> {
let params = KeccakParams {};
let mut cs = ConstraintSystem::new(|| "bitwise_keccak");
let mut circuit_builder = CircuitBuilder::<E>::new(&mut cs);
let (layout, chip) = KeccakLayout::build_gkr_chip_old(&mut circuit_builder, params)?;
Ok((layout, chip.gkr_circuit()))
}
pub fn run_keccakf<E: ExtensionField, PCS: PolynomialCommitmentScheme<E> + 'static>(
gkr_circuit: GKRCircuit<E>,
states: Vec<[u64; 25]>,
verify: bool,
test: bool,
) {
let num_instances = states.len();
let log2_num_instances = ceil_log2(num_instances);
let num_threads = optimal_sumcheck_threads(log2_num_instances);
let span = entered_span!("keccak_witness", profiling_1 = true);
let bits = keccak_phase1_witness::<E>(&states);
exit_span!(span);
let span = entered_span!("phase1_witness_group", profiling_1 = true);
let phase1_witness = bits;
exit_span!(span);
let mut prover_transcript = BasicTranscript::<E>::new(b"protocol");
// Omit the commit phase1 and phase2.
let span = entered_span!("gkr_witness", profiling_1 = true);
let phase1_witness_group = phase1_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
#[allow(clippy::type_complexity)]
let (gkr_witness, gkr_output) = gkr_witness::<E, PCS, CpuBackend<E, PCS>, CpuProver<_>>(
&gkr_circuit,
&phase1_witness_group,
&[],
&[],
&[],
&[],
&[],
);
exit_span!(span);
let out_evals = {
let mut point = Point::new();
point.extend(prover_transcript.sample_vec(log2_num_instances).to_vec());
if test {
// sanity check on first instance only
// TODO test all instances
let result_from_witness = gkr_witness.layers[0]
.iter()
.map(|bit| {
if <E as ExtensionField>::BaseField::ZERO == bit.get_base_field_vec()[0] {
<E as ExtensionField>::BaseField::ZERO
} else {
<E as ExtensionField>::BaseField::ONE
}
})
.collect_vec();
let mut state = states.clone();
keccakf(&mut state[0]);
// TODO test this
assert_eq!(
keccak_phase1_witness::<E>(&state) // result from tiny keccak
.to_mles()
.into_iter()
.map(|b: MultilinearExtension<'_, E>| b.get_base_field_vec()[0])
.collect_vec(),
result_from_witness
);
}
gkr_output
.0
.iter()
.map(|bit| PointAndEval {
point: point.clone(),
eval: bit.evaluate(&point),
})
.collect_vec()
};
let span = entered_span!("prove", profiling_1 = true);
let selector_ctxs = vec![
SelectorContext::new(0, num_instances, log2_num_instances);
gkr_circuit
.layers
.first()
.map(|layer| layer.out_sel_and_eval_exprs.len())
.unwrap()
];
let GKRProverOutput { gkr_proof, .. } = gkr_circuit
.prove::<CpuBackend<E, PCS>, CpuProver<_>>(
num_threads,
log2_num_instances,
gkr_witness,
&out_evals,
&[],
&[],
&mut prover_transcript,
&selector_ctxs,
)
.expect("Failed to prove phase");
exit_span!(span);
if verify {
{
let mut verifier_transcript = BasicTranscript::<E>::new(b"protocol");
// TODO verify output
let mut point = Point::new();
point.extend(verifier_transcript.sample_vec(log2_num_instances).to_vec());
gkr_circuit
.verify(
log2_num_instances,
gkr_proof,
&out_evals,
&[],
&[],
&[],
&mut verifier_transcript,
&selector_ctxs,
)
.expect("GKR verify failed");
// Omit the PCS opening phase.
}
}
}
#[cfg(test)]
mod tests {
use super::*;
use ff_ext::GoldilocksExt2;
use mpcs::BasefoldDefault;
use rand::{RngCore, SeedableRng};
#[test]
#[ignore = "stack overflow. force enable it will cause occationally cause unittest ci hang"]
fn test_keccakf() {
type E = GoldilocksExt2;
type Pcs = BasefoldDefault<E>;
let _ = tracing_subscriber::fmt()
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/weierstrass.rs | ceno_zkvm/src/precompiles/weierstrass.rs | use generic_array::GenericArray;
use num::BigUint;
use sp1_curves::params::NumWords;
pub mod test_utils;
pub mod weierstrass_add;
pub mod weierstrass_decompress;
pub mod weierstrass_double;
#[derive(Clone, Default, Debug)]
pub struct EllipticCurveAddInstance<P: NumWords> {
/// The first point as a list of words.
pub p: GenericArray<u32, P::WordsCurvePoint>,
/// The second point as a list of words.
pub q: GenericArray<u32, P::WordsCurvePoint>,
}
#[derive(Clone, Default, Debug)]
pub struct EllipticCurveDoubleInstance<P: NumWords> {
/// The point as a list of words.
pub p: GenericArray<u32, P::WordsCurvePoint>,
}
/// Elliptic Curve Point Decompress Event.
///
/// This event is emitted when an elliptic curve point decompression operation is performed.
#[derive(Debug, Clone)]
pub struct EllipticCurveDecompressInstance<P: NumWords> {
/// The sign bit of the point.
pub sign_bit: bool,
/// The x coordinate as a list of bytes.
pub x: BigUint,
/// The old value of y.
pub old_y_words: GenericArray<u32, P::WordsFieldElement>,
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/mod.rs | ceno_zkvm/src/precompiles/mod.rs | mod bitwise_keccakf;
mod fptower;
mod lookup_keccakf;
mod uint256;
mod utils;
mod weierstrass;
pub use lookup_keccakf::{
AND_LOOKUPS, KECCAK_INPUT32_SIZE, KECCAK_OUT_EVAL_SIZE, KeccakInstance, KeccakLayout,
KeccakParams, KeccakStateInstance, KeccakTrace, KeccakWitInstance, RANGE_LOOKUPS,
ROUNDS as KECCAK_ROUNDS, ROUNDS_CEIL_LOG2 as KECCAK_ROUNDS_CEIL_LOG2, XOR_LOOKUPS,
run_lookup_keccakf, setup_gkr_circuit as setup_lookup_keccak_gkr_circuit,
};
pub use bitwise_keccakf::{
KeccakLayout as BitwiseKeccakLayout, run_keccakf as run_bitwise_keccakf,
setup_gkr_circuit as setup_bitwise_keccak_gkr_circuit,
};
use ff_ext::ExtensionField;
pub use fptower::{
fp::{FpOpInstance, FpOpLayout, FpOpTrace},
fp2_addsub::{Fp2AddSubAssignLayout, Fp2AddSubInstance, Fp2AddSubTrace},
fp2_mul::{Fp2MulAssignLayout, Fp2MulInstance, Fp2MulTrace},
};
use gkr_iop::selector::SelectorType;
pub use uint256::{
Uint256InvLayout, Uint256InvSpec, Uint256InvTrace, Uint256MulInstance, Uint256MulLayout,
Uint256MulTrace, run_uint256_mul, setup_uint256mul_gkr_circuit as setup_uint256_mul_circuit,
};
pub use weierstrass::{
EllipticCurveAddInstance, EllipticCurveDecompressInstance, EllipticCurveDoubleInstance,
test_utils::{random_point_pairs, random_points},
weierstrass_add::{
WeierstrassAddAssignLayout, WeierstrassAddAssignTrace, run_weierstrass_add,
setup_gkr_circuit as setup_weierstrass_add_circuit,
},
weierstrass_decompress::{
WeierstrassDecompressLayout, WeierstrassDecompressTrace, run_weierstrass_decompress,
setup_gkr_circuit as setup_weierstrass_decompress_circuit,
},
weierstrass_double::{
WeierstrassDoubleAssignLayout, WeierstrassDoubleAssignTrace, run_weierstrass_double,
setup_gkr_circuit as setup_weierstrass_double_circuit,
},
};
#[derive(Clone, Debug)]
pub struct SelectorTypeLayout<E: ExtensionField> {
pub sel_mem_read: SelectorType<E>,
pub sel_mem_write: SelectorType<E>,
pub sel_lookup: SelectorType<E>,
pub sel_zero: SelectorType<E>,
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/uint256.rs | ceno_zkvm/src/precompiles/uint256.rs | // The crate uint256 circuit is modified from succinctlabs/sp1 under MIT license
// The MIT License (MIT)
// Copyright (c) 2023 Succinct Labs
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use crate::{
chip_handler::MemoryExpr,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
error::ZKVMError,
gadgets::{FieldOperation, IsZeroOperation, field_op::FieldOpCols, range::FieldLtCols},
instructions::riscv::insn_base::{StateInOut, WriteMEM},
precompiles::{SelectorTypeLayout, utils::merge_u8_slice_to_u16_limbs_pairs_and_extend},
scheme::utils::gkr_witness,
structs::PointAndEval,
witness::LkMultiplicity,
};
use ceno_emul::{ByteAddr, MemOp, StepRecord};
use derive::AlignedBorrow;
use ff_ext::{ExtensionField, SmallField};
use generic_array::{GenericArray, sequence::GenericSequence};
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator,
chip::Chip,
cpu::{CpuBackend, CpuProver},
error::{BackendError, CircuitBuilderError},
gkr::{GKRCircuit, GKRProof, GKRProverOutput, layer::Layer, mock::MockProver},
selector::{SelectorContext, SelectorType},
};
use itertools::{Itertools, izip};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression, ToExpr, WitIn,
util::{ceil_log2, max_usable_threads},
};
use num::{BigUint, One, Zero};
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, IntoParallelRefIterator, ParallelIterator},
slice::ParallelSlice,
};
use sp1_curves::{
params::{FieldParameters, Limbs, NumLimbs, NumWords},
polynomial::Polynomial,
uint256::U256Field,
utils::biguint_to_limbs,
};
use std::{array, borrow::BorrowMut, marker::PhantomData, sync::Arc};
use sumcheck::{
macros::{entered_span, exit_span},
util::optimal_sumcheck_threads,
};
use transcript::{BasicTranscript, Transcript};
use typenum::Unsigned;
use witness::{InstancePaddingStrategy, RowMajorMatrix};
/// A set of columns for the Uint256Mul operation.
#[derive(Debug, Clone, AlignedBorrow)]
#[repr(C)]
pub struct Uint256MulWitCols<T> {
pub x_limbs: Limbs<T, <U256Field as NumLimbs>::Limbs>,
pub y_limbs: Limbs<T, <U256Field as NumLimbs>::Limbs>,
pub modulus_limbs: Limbs<T, <U256Field as NumLimbs>::Limbs>,
/// Columns for checking if modulus is zero. If it's zero, then use 2^256 as the effective
/// modulus.
pub modulus_is_zero: IsZeroOperation<T>,
/// Column that is equal to is_real * (1 - modulus_is_zero.result).
pub modulus_is_not_zero: T,
// Output values. We compute (x * y) % modulus.
pub output: FieldOpCols<T, U256Field>,
pub output_range_check: FieldLtCols<T, U256Field>,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct Uint256MulLayer<WitT> {
pub wits: Uint256MulWitCols<WitT>,
}
#[derive(Clone, Debug)]
pub struct Uint256MulLayout<E: ExtensionField> {
pub layer_exprs: Uint256MulLayer<WitIn>,
pub selector_type_layout: SelectorTypeLayout<E>,
/// Read x, y, and modulus from memory.
pub input32_exprs: [GenericArray<MemoryExpr<E>, <U256Field as NumWords>::WordsFieldElement>; 3],
pub output32_exprs: GenericArray<MemoryExpr<E>, <U256Field as NumWords>::WordsFieldElement>,
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
}
impl<E: ExtensionField> Uint256MulLayout<E> {
fn new(cb: &mut CircuitBuilder<E>) -> Self {
let wits = Uint256MulWitCols {
x_limbs: Limbs(GenericArray::generate(|_| cb.create_witin(|| "uint256 x"))),
y_limbs: Limbs(GenericArray::generate(|_| cb.create_witin(|| "uint256 y"))),
modulus_limbs: Limbs(GenericArray::generate(|_| {
cb.create_witin(|| "uint256 modulus")
})),
modulus_is_zero: IsZeroOperation::create(cb),
modulus_is_not_zero: cb.create_witin(|| "uint256_mul_modulus_is_not_zero"),
output: FieldOpCols::create(cb, || "uint256_mul_output"),
output_range_check: FieldLtCols::create(cb, || "uint256_mul_output_range_check"),
};
let eq = cb.create_placeholder_structural_witin(|| "uint256_mul_structural_witin");
let sel = SelectorType::Prefix(eq.expr());
let selector_type_layout = SelectorTypeLayout {
sel_mem_read: sel.clone(),
sel_mem_write: sel.clone(),
sel_lookup: sel.clone(),
sel_zero: sel.clone(),
};
// Default expression, will be updated in build_layer_logic
let input32_exprs: [GenericArray<MemoryExpr<E>, <U256Field as NumWords>::WordsFieldElement>;
3] = array::from_fn(|_| {
GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)))
});
// Default expression, will be updated in build_layer_logic
let output32_exprs: GenericArray<
MemoryExpr<E>,
<U256Field as NumWords>::WordsFieldElement,
> = GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
Self {
layer_exprs: Uint256MulLayer { wits },
selector_type_layout,
input32_exprs,
output32_exprs,
n_fixed: 0,
n_committed: 0,
n_challenges: 0,
n_structural_witin: 0,
}
}
#[allow(clippy::too_many_arguments)]
fn populate_row(
blu_events: &mut LkMultiplicity,
cols: &mut Uint256MulWitCols<E::BaseField>,
instance: &Uint256MulInstance,
) {
let x = &instance.x;
cols.x_limbs = U256Field::to_limbs_field(x);
let y = &instance.y;
cols.y_limbs = U256Field::to_limbs_field(y);
let modulus = &instance.modulus;
cols.modulus_limbs = U256Field::to_limbs_field(modulus);
let modulus_bytes = modulus.to_bytes_le();
let modulus_byte_sum = modulus_bytes.iter().map(|b| *b as u32).sum::<u32>();
cols.modulus_is_zero.populate(modulus_byte_sum);
// Populate the output column.
let effective_modulus = if modulus.is_zero() {
BigUint::one() << 256
} else {
modulus.clone()
};
let result = cols.output.populate_with_modulus(
blu_events,
x,
y,
&effective_modulus,
// &modulus,
FieldOperation::Mul,
);
cols.modulus_is_not_zero = E::BaseField::ONE - cols.modulus_is_zero.result;
if cols.modulus_is_not_zero == E::BaseField::ONE {
cols.output_range_check
.populate(blu_events, &result, &effective_modulus);
}
}
}
impl<E: ExtensionField> ProtocolBuilder<E> for Uint256MulLayout<E> {
type Params = ();
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = Self::new(cb);
let wits = &layout.layer_exprs.wits;
// We are computing (x * y) % modulus. The value of x is stored in the "prev_value" of
// the x_memory, since we write to it later.
let x_limbs = &wits.x_limbs;
let y_limbs = &wits.y_limbs;
let modulus_limbs = &wits.modulus_limbs;
// If the modulus is zero, then we don't perform the modulus operation.
// Evaluate the modulus_is_zero operation by summing each byte of the modulus. The sum will
// not overflow because we are summing 32 bytes.
let modulus_byte_sum = modulus_limbs
.0
.iter()
.fold(Expression::ZERO, |acc, &limb| acc + limb.expr());
wits.modulus_is_zero.eval(cb, modulus_byte_sum)?;
// If the modulus is zero, we'll actually use 2^256 as the modulus, so nothing happens.
// Otherwise, we use the modulus passed in.
let modulus_is_zero = wits.modulus_is_zero.result;
let mut coeff_2_256: Vec<Expression<E>> = Vec::new();
coeff_2_256.resize(32, Expression::ZERO);
coeff_2_256.push(Expression::ONE);
let modulus_polynomial: Polynomial<Expression<E>> = (*modulus_limbs).into();
let p_modulus: Polynomial<Expression<E>> = modulus_polynomial
* (1 - modulus_is_zero.expr())
+ Polynomial::from_coefficients(&coeff_2_256) * modulus_is_zero.expr();
// Evaluate the uint256 multiplication
wits.output
.eval_with_modulus(cb, x_limbs, y_limbs, &p_modulus, FieldOperation::Mul)?;
// Verify the range of the output if the moduls is not zero. Also, check the value of
// modulus_is_not_zero.
wits.output_range_check.condition_eval(
cb,
&wits.output.result,
modulus_limbs,
wits.modulus_is_not_zero.expr(),
)?;
cb.require_equal(
|| "uint256_mul: modulus_is_not_zero",
wits.modulus_is_not_zero.expr(),
Expression::ONE - modulus_is_zero.expr(),
)?;
// Constraint output32 from wits.output by converting 8-bit limbs to 2x16-bit felts
let mut output32 = Vec::with_capacity(<U256Field as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.output.result.0, &mut output32);
let output32 = output32.try_into().unwrap();
// Constraint input32 from wits.x_limbs, wits.y_limbs, wits.modulus_limbs
let mut x_input32 = Vec::with_capacity(<U256Field as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.x_limbs.0, &mut x_input32);
let x_input32 = x_input32.try_into().unwrap();
let mut y_input32 = Vec::with_capacity(<U256Field as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.y_limbs.0, &mut y_input32);
let y_input32 = y_input32.try_into().unwrap();
let mut modulus_input32 =
Vec::with_capacity(<U256Field as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(
&wits.modulus_limbs.0,
&mut modulus_input32,
);
let modulus_input32 = modulus_input32.try_into().unwrap();
// set input32/output32 expr
layout.input32_exprs = [x_input32, y_input32, modulus_input32];
layout.output32_exprs = output32;
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_structural_witin = cb.cs.num_structural_witin as usize;
self.n_challenges = 0;
// register selector to legacy constrain system
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
// r_record
(0..r_len).collect_vec(),
// w_record
(r_len..r_len + w_len).collect_vec(),
// lk_record
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
// zero_record
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
fn n_committed(&self) -> usize {
todo!()
}
fn n_fixed(&self) -> usize {
todo!()
}
fn n_challenges(&self) -> usize {
todo!()
}
fn n_evaluations(&self) -> usize {
todo!()
}
fn n_layers(&self) -> usize {
todo!()
}
}
pub struct Uint256MulTrace {
pub instances: Vec<Uint256MulInstance>,
}
impl<E: ExtensionField> ProtocolWitnessGenerator<E> for Uint256MulLayout<E> {
type Trace = Uint256MulTrace;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
let num_instances = wits[0].num_instances();
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let num_wit_cols = size_of::<Uint256MulWitCols<u8>>();
let [wits, structural_wits] = wits;
let raw_witin_iter = wits.par_batch_iter_mut(num_instance_per_batch);
let raw_structural_wits_iter = structural_wits.par_batch_iter_mut(num_instance_per_batch);
raw_witin_iter
.zip_eq(raw_structural_wits_iter)
.zip_eq(phase1.instances.par_chunks(num_instance_per_batch))
.for_each(|((rows, eqs), phase1_instances)| {
let mut lk_multiplicity = lk_multiplicity.clone();
rows.chunks_mut(self.n_committed)
.zip_eq(eqs.chunks_mut(self.n_structural_witin))
.zip_eq(phase1_instances)
.for_each(|((row, eqs), phase1_instance)| {
let cols: &mut Uint256MulWitCols<E::BaseField> = row
[self.layer_exprs.wits.x_limbs.0[0].id as usize..][..num_wit_cols] // TODO: Find a better way to write it.
.borrow_mut();
Self::populate_row(&mut lk_multiplicity, cols, phase1_instance);
for x in eqs.iter_mut() {
*x = E::BaseField::ONE;
}
});
});
}
}
pub trait Uint256InvSpec {
type P: FieldParameters + NumWords;
fn syscall() -> u32;
fn name() -> String;
fn modulus() -> BigUint;
}
/// A set of columns for the Uint256Inv operation.
#[derive(Debug, Clone, AlignedBorrow)]
#[repr(C)]
pub struct Uint256InvWitCols<T> {
// x = UInt256Field::ONE
// y := input
// y in little endian format
pub y_limbs: Limbs<T, <U256Field as NumLimbs>::Limbs>,
// output values. x / y = output
pub output: FieldOpCols<T, U256Field>,
pub output_range_check: FieldLtCols<T, U256Field>,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct Uint256InvLayer<WitT> {
pub wits: Uint256InvWitCols<WitT>,
}
#[derive(Clone, Debug)]
pub struct Uint256InvLayout<E: ExtensionField, Spec: Uint256InvSpec> {
pub layer_exprs: Uint256InvLayer<WitIn>,
pub selector_type_layout: SelectorTypeLayout<E>,
// y from memory
pub input32_exprs: GenericArray<MemoryExpr<E>, <Spec::P as NumWords>::WordsFieldElement>,
pub modulus_limbs: Limbs<Expression<E>, <Spec::P as NumLimbs>::Limbs>,
pub output32_exprs: GenericArray<MemoryExpr<E>, <Spec::P as NumWords>::WordsFieldElement>,
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
phantom: PhantomData<Spec::P>,
}
impl<E: ExtensionField, Spec: Uint256InvSpec> Uint256InvLayout<E, Spec> {
fn new(cb: &mut CircuitBuilder<E>) -> Self {
let wits = Uint256InvWitCols {
y_limbs: Limbs(GenericArray::generate(|_| cb.create_witin(|| "uint256 y"))),
output: FieldOpCols::create(cb, || "uint256_inv_output"),
output_range_check: FieldLtCols::create(cb, || "uint256_inv_output_range_check"),
};
let modulus_limbs = Spec::P::to_limbs_expr(&Spec::modulus());
let eq = cb.create_placeholder_structural_witin(|| "uint256_mul_structural_witin");
let sel = SelectorType::Prefix(eq.expr());
let selector_type_layout = SelectorTypeLayout {
sel_mem_read: sel.clone(),
sel_mem_write: sel.clone(),
sel_lookup: sel.clone(),
sel_zero: sel.clone(),
};
// Default expression, will be updated in build_layer_logic
let input32_exprs = GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
// Default expression, will be updated in build_layer_logic
let output32_exprs = GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
Self {
layer_exprs: Uint256InvLayer { wits },
selector_type_layout,
input32_exprs,
modulus_limbs,
output32_exprs,
n_fixed: 0,
n_committed: 0,
n_challenges: 0,
n_structural_witin: 0,
phantom: Default::default(),
}
}
#[allow(clippy::too_many_arguments)]
fn populate_row(
blu_events: &mut LkMultiplicity,
cols: &mut Uint256InvWitCols<E::BaseField>,
y: &BigUint,
) {
cols.y_limbs = U256Field::to_limbs_field(y);
let y_inv = cols.output.populate_with_modulus(
blu_events,
&BigUint::one(),
y,
&Spec::modulus(),
FieldOperation::Div,
);
cols.output_range_check
.populate(blu_events, &y_inv, &Spec::modulus());
}
}
impl<E: ExtensionField, Spec: Uint256InvSpec> ProtocolBuilder<E> for Uint256InvLayout<E, Spec> {
type Params = ();
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = Self::new(cb);
let wits = &layout.layer_exprs.wits;
// compute y_inv = (1 / y) % modulus
// NOTE: y_limbs and modulus_limbs in little endian format
let y_limbs = &wits.y_limbs;
let modulus_limbs = &layout.modulus_limbs;
// If the modulus is zero, we'll actually use 2^256 as the modulus, so nothing happens.
// Otherwise, we use the modulus passed in.
let modulus_polynomial: Polynomial<Expression<E>> = modulus_limbs.clone().into();
let p_modulus: Polynomial<Expression<E>> = modulus_polynomial;
// constant one
let one_limbs: Limbs<Expression<E>, _> = Spec::P::to_limbs_expr(&BigUint::one());
// Evaluate the uint256 multiplication
wits.output
.eval_with_modulus(cb, &one_limbs, y_limbs, &p_modulus, FieldOperation::Div)?;
// Verify the range of the output if the moduls is not zero. Also, check the value of
// modulus_is_not_zero.
wits.output_range_check
.eval(cb, &wits.output.result, modulus_limbs)?;
// Constraint output32 from wits.output by converting 8-bit limbs to 2x16-bit felts
let mut output32 = Vec::with_capacity(<Spec::P as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(
// rev to convert to big-endian
&wits.output.result.0.into_iter().rev().collect_vec(),
&mut output32,
);
let output32 = output32.try_into().unwrap();
// Constraint input32 from wits.y_limbs
let mut y_input32 = Vec::with_capacity(<Spec::P as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(
// rev to convert to big-endian
&wits.y_limbs.0.into_iter().rev().collect_vec(),
&mut y_input32,
);
let y_input32 = y_input32.try_into().unwrap();
// set input32/output32 expr
layout.input32_exprs = y_input32;
layout.output32_exprs = output32;
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_structural_witin = cb.cs.num_structural_witin as usize;
self.n_challenges = 0;
// register selector to legacy constrain system
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
// r_record
(0..r_len).collect_vec(),
// w_record
(r_len..r_len + w_len).collect_vec(),
// lk_record
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
// zero_record
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
fn n_committed(&self) -> usize {
todo!()
}
fn n_fixed(&self) -> usize {
todo!()
}
fn n_challenges(&self) -> usize {
todo!()
}
fn n_evaluations(&self) -> usize {
todo!()
}
fn n_layers(&self) -> usize {
todo!()
}
}
pub struct Uint256InvTrace {
pub instances: Vec<BigUint>,
}
impl<E: ExtensionField, Spec: Uint256InvSpec> ProtocolWitnessGenerator<E>
for Uint256InvLayout<E, Spec>
{
type Trace = Uint256InvTrace;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
let num_instances = wits[0].num_instances();
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let num_wit_cols = size_of::<Uint256InvWitCols<u8>>();
let [wits, structural_wits] = wits;
let raw_witin_iter = wits.par_batch_iter_mut(num_instance_per_batch);
let raw_structural_wits_iter = structural_wits.par_batch_iter_mut(num_instance_per_batch);
raw_witin_iter
.zip_eq(raw_structural_wits_iter)
.zip_eq(phase1.instances.par_chunks(num_instance_per_batch))
.for_each(|((rows, eqs), phase1_instances)| {
let mut lk_multiplicity = lk_multiplicity.clone();
rows.chunks_mut(self.n_committed)
.zip_eq(eqs.chunks_mut(self.n_structural_witin))
.zip_eq(phase1_instances)
.for_each(|((row, eqs), phase1_instance)| {
let cols: &mut Uint256InvWitCols<E::BaseField> = row
[self.layer_exprs.wits.y_limbs.0[0].id as usize..][..num_wit_cols] // TODO: Find a better way to write it.
.borrow_mut();
Self::populate_row(&mut lk_multiplicity, cols, phase1_instance);
for x in eqs.iter_mut() {
*x = E::BaseField::ONE;
}
});
});
}
}
/// Uint256 Mul Event.
///
/// This event is emitted when a uint256 mul operation is performed.
#[derive(Default, Debug, Clone)]
pub struct Uint256MulInstance {
/// x
pub x: BigUint,
/// y
pub y: BigUint,
/// modulus
pub modulus: BigUint,
}
/// this is for testing purpose
pub struct TestUint256MulLayout<E: ExtensionField> {
layout: Uint256MulLayout<E>,
mem_rw: Vec<WriteMEM>,
vm_state: StateInOut<E>,
_number_ptr: WitIn,
}
#[allow(clippy::type_complexity)]
pub fn setup_uint256mul_gkr_circuit<E: ExtensionField>()
-> Result<(TestUint256MulLayout<E>, GKRCircuit<E>, u16, u16), ZKVMError> {
let mut cs = ConstraintSystem::new(|| "uint256_mul");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
// constrain vmstate
let vm_state = StateInOut::construct_circuit(&mut cb, false)?;
let number_ptr = cb.create_witin(|| "state_ptr_0");
let mut layout = Uint256MulLayout::build_layer_logic(&mut cb, ())?;
// Write the result to the same address of the first input point.
let limb_len = layout.output32_exprs.len();
let mut mem_rw = izip!(&layout.input32_exprs[0], &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
&mut cb,
// mem address := state_ptr_0 + i
number_ptr.expr() + E::BaseField::from_canonical_u32(i as u32).expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
// Keep the second input point unchanged in memory.
layout.input32_exprs[1..]
.iter()
.enumerate()
.map(|(j, input32_exprs)| {
let circuit = input32_exprs
.iter()
.enumerate()
.map(|(i, val_before)| {
WriteMEM::construct_circuit(
&mut cb,
// mem address := state_ptr_1 + i
number_ptr.expr()
+ E::BaseField::from_canonical_u32((limb_len * j + i) as u32).expr(),
val_before.clone(),
val_before.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>();
circuit.map(|c| mem_rw.extend(c))
})
.collect::<Result<Vec<_>, _>>()?;
let (out_evals, mut chip) = layout.finalize(&mut cb);
let layer = Layer::from_circuit_builder(
&cb,
"weierstrass_add".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
Ok((
TestUint256MulLayout {
layout,
vm_state,
_number_ptr: number_ptr,
mem_rw,
},
chip.gkr_circuit(),
cs.num_witin,
cs.num_structural_witin,
))
}
#[tracing::instrument(
skip_all,
name = "run_uint256_mul",
level = "trace",
fields(profiling_1)
)]
pub fn run_uint256_mul<E: ExtensionField, PCS: PolynomialCommitmentScheme<E> + 'static>(
(layout, gkr_circuit, num_witin, num_structural_witin): (
TestUint256MulLayout<E>,
GKRCircuit<E>,
u16,
u16,
),
instances: Vec<Uint256MulInstance>,
verify: bool,
test_outputs: bool,
) -> Result<GKRProof<E>, BackendError> {
let mut shard_ctx = ShardContext::default();
let num_instances = instances.len();
let log2_num_instance = ceil_log2(num_instances);
let num_threads = optimal_sumcheck_threads(log2_num_instance);
let span = entered_span!("phase1_witness", profiling_2 = true);
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let mut lk_multiplicity = LkMultiplicity::default();
let mut phase1_witness = RowMajorMatrix::<E::BaseField>::new(
instances.len(),
num_witin as usize,
InstancePaddingStrategy::Default,
);
let mut structural_witness = RowMajorMatrix::<E::BaseField>::new(
instances.len(),
num_structural_witin as usize,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = phase1_witness.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
raw_witin_iter
.zip_eq(instances.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.for_each(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin as usize)
.zip_eq(steps)
.for_each(|(instance, _step)| {
layout
.vm_state
.assign_instance(
instance,
&shard_ctx,
&StepRecord::new_ecall_any(10, ByteAddr::from(0)),
)
.expect("assign vm_state error");
layout.mem_rw.iter().for_each(|mem_config| {
mem_config
.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
10,
&MemOp {
previous_cycle: 0,
addr: ByteAddr::from(0).waddr(),
value: Default::default(),
},
)
.expect("assign error");
});
})
});
layout.layout.phase1_witness_group(
Uint256MulTrace {
instances: instances.clone(),
},
[&mut phase1_witness, &mut structural_witness],
&mut lk_multiplicity,
);
exit_span!(span);
if test_outputs {
// Test got output == expected output.
let expected_outputs = instances
.iter()
.map(|Uint256MulInstance { x, y, modulus }| {
let c = if modulus.is_zero() {
(x * y) % (BigUint::one() << 256)
} else {
(x * y) % modulus
};
biguint_to_limbs::<{ <U256Field as NumLimbs>::Limbs::USIZE }>(&c).to_vec()
})
.collect_vec();
let output_index_start = layout.layout.layer_exprs.wits.output.result.0[0].id as usize;
let got_outputs = phase1_witness
.iter_rows()
.take(num_instances)
.map(|cols| {
cols[output_index_start..][..<U256Field as NumLimbs>::Limbs::USIZE]
.iter()
.map(|c| c.to_canonical_u64() as u8)
.collect_vec()
})
.collect_vec();
assert_eq!(expected_outputs, got_outputs);
}
let mut prover_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
prover_transcript.read_challenge().elements,
prover_transcript.read_challenge().elements,
];
let span = entered_span!("gkr_witness", profiling_2 = true);
let phase1_witness_group = phase1_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let structural_witness = structural_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let fixed = layout
.layout
.fixed_witness_group()
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
#[allow(clippy::type_complexity)]
let (gkr_witness, gkr_output) = gkr_witness::<E, PCS, CpuBackend<E, PCS>, CpuProver<_>>(
&gkr_circuit,
&phase1_witness_group,
&structural_witness,
&fixed,
&[],
&[],
&challenges,
);
exit_span!(span);
let span = entered_span!("out_eval", profiling_2 = true);
let out_evals = {
let mut point = Vec::with_capacity(log2_num_instance);
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/fptower.rs | ceno_zkvm/src/precompiles/fptower.rs | pub mod fp;
pub mod fp2_addsub;
pub mod fp2_mul;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/fptower/fp.rs | ceno_zkvm/src/precompiles/fptower/fp.rs | // The crate fp circuit is modified from succinctlabs/sp1 under MIT license
// The MIT License (MIT)
// Copyright (c) 2023 Succinct Labs
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::{array, borrow::BorrowMut, marker::PhantomData, mem::size_of};
use derive::AlignedBorrow;
use ff_ext::ExtensionField;
use generic_array::{GenericArray, sequence::GenericSequence};
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator, chip::Chip,
circuit_builder::CircuitBuilder, error::CircuitBuilderError, selector::SelectorType,
};
use itertools::Itertools;
use multilinear_extensions::{Expression, ToExpr, WitIn, util::max_usable_threads};
use num::BigUint;
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, ParallelIterator},
prelude::ParallelSlice,
};
use sp1_curves::{
params::{Limbs, NumWords},
polynomial::Polynomial,
weierstrass::FpOpField,
};
use typenum::Unsigned;
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::MemoryExpr,
gadgets::{FieldOperation, field_op::FieldOpCols, range::FieldLtCols},
precompiles::{SelectorTypeLayout, utils::merge_u8_slice_to_u16_limbs_pairs_and_extend},
witness::LkMultiplicity,
};
pub const fn num_fp_cols<P: FpOpField>() -> usize {
size_of::<FpOpWitCols<u8, P>>()
}
#[derive(Debug, Clone)]
pub struct FpOpInstance<P: FpOpField> {
pub x: BigUint,
pub y: BigUint,
pub op: FieldOperation,
_marker: PhantomData<P>,
}
impl<P: FpOpField> FpOpInstance<P> {
pub fn new(x: BigUint, y: BigUint, op: FieldOperation) -> Self {
Self {
x,
y,
op,
_marker: PhantomData,
}
}
}
pub struct FpOpTrace<P: FpOpField> {
pub instances: Vec<FpOpInstance<P>>,
}
#[derive(Debug, Clone, AlignedBorrow)]
#[repr(C)]
pub struct FpOpWitCols<T, P: FpOpField> {
pub is_add: T,
pub is_sub: T,
pub is_mul: T,
pub x_limbs: Limbs<T, P::Limbs>,
pub y_limbs: Limbs<T, P::Limbs>,
pub output: FieldOpCols<T, P>,
pub output_range_check: FieldLtCols<T, P>,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct FpOpLayer<WitT, P: FpOpField> {
pub wits: FpOpWitCols<WitT, P>,
}
#[derive(Clone, Debug)]
pub struct FpOpLayout<E: ExtensionField, P: FpOpField> {
pub layer_exprs: FpOpLayer<WitIn, P>,
pub selector_type_layout: SelectorTypeLayout<E>,
pub input32_exprs: [GenericArray<MemoryExpr<E>, <P as NumWords>::WordsFieldElement>; 2],
pub output32_exprs: GenericArray<MemoryExpr<E>, <P as NumWords>::WordsFieldElement>,
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
}
impl<E: ExtensionField, P: FpOpField> FpOpLayout<E, P> {
fn new(cb: &mut CircuitBuilder<E>) -> Self {
let wits = FpOpWitCols {
is_add: cb.create_witin(|| "fp_op_is_add"),
is_sub: cb.create_witin(|| "fp_op_is_sub"),
is_mul: cb.create_witin(|| "fp_op_is_mul"),
x_limbs: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp_op_x"))),
y_limbs: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp_op_y"))),
output: FieldOpCols::create(cb, || "fp_op_output"),
output_range_check: FieldLtCols::create(cb, || "fp_op_output_range"),
};
let eq = cb.create_placeholder_structural_witin(|| "fp_op_structural_witin");
let sel = SelectorType::Prefix(eq.expr());
let selector_type_layout = SelectorTypeLayout {
sel_mem_read: sel.clone(),
sel_mem_write: sel.clone(),
sel_lookup: sel.clone(),
sel_zero: sel.clone(),
};
let input32_exprs: [GenericArray<MemoryExpr<E>, <P as NumWords>::WordsFieldElement>; 2] =
array::from_fn(|_| {
GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)))
});
let output32_exprs: GenericArray<MemoryExpr<E>, <P as NumWords>::WordsFieldElement> =
GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
Self {
layer_exprs: FpOpLayer { wits },
selector_type_layout,
input32_exprs,
output32_exprs,
n_fixed: 0,
n_committed: 0,
n_structural_witin: 0,
n_challenges: 0,
}
}
fn populate_row(
instance: &FpOpInstance<P>,
cols: &mut FpOpWitCols<E::BaseField, P>,
lk_multiplicity: &mut LkMultiplicity,
) {
cols.is_add = E::BaseField::from_canonical_u8((instance.op == FieldOperation::Add) as u8);
cols.is_sub = E::BaseField::from_canonical_u8((instance.op == FieldOperation::Sub) as u8);
cols.is_mul = E::BaseField::from_canonical_u8((instance.op == FieldOperation::Mul) as u8);
cols.x_limbs = P::to_limbs_field(&instance.x);
cols.y_limbs = P::to_limbs_field(&instance.y);
let modulus = P::modulus();
let output = cols.output.populate_with_modulus(
lk_multiplicity,
&instance.x,
&instance.y,
&modulus,
instance.op,
);
cols.output_range_check
.populate(lk_multiplicity, &output, &modulus);
}
}
impl<E: ExtensionField, P: FpOpField> ProtocolBuilder<E> for FpOpLayout<E, P> {
type Params = ();
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = FpOpLayout::new(cb);
let wits = &layout.layer_exprs.wits;
cb.assert_bit(|| "fp_op_is_add_bool", wits.is_add.expr())?;
cb.assert_bit(|| "fp_op_is_sub_bool", wits.is_sub.expr())?;
cb.assert_bit(|| "fp_op_is_mul_bool", wits.is_mul.expr())?;
cb.require_one(
|| "fp_op_one_hot",
wits.is_add.expr() + wits.is_sub.expr() + wits.is_mul.expr(),
)?;
let modulus: Polynomial<Expression<E>> = P::to_limbs_expr::<E>(&P::modulus()).into();
let zero = E::BaseField::ZERO.expr();
wits.output.eval_variable(
cb,
&wits.x_limbs,
&wits.y_limbs,
&modulus,
wits.is_add.expr(),
wits.is_sub.expr(),
wits.is_mul.expr(),
zero,
)?;
wits.output_range_check
.eval(cb, &wits.output.result, &modulus)?;
let mut x_input32 = Vec::with_capacity(<P as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.x_limbs.0, &mut x_input32);
let x_input32 = x_input32.try_into().unwrap();
let mut y_input32 = Vec::with_capacity(<P as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.y_limbs.0, &mut y_input32);
let y_input32 = y_input32.try_into().unwrap();
let mut output32 = Vec::with_capacity(<P as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.output.result.0, &mut output32);
let output32 = output32.try_into().unwrap();
layout.input32_exprs = [x_input32, y_input32];
layout.output32_exprs = output32;
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_structural_witin = cb.cs.num_structural_witin as usize;
self.n_challenges = 0;
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
(0..r_len).collect_vec(),
(r_len..r_len + w_len).collect_vec(),
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
}
impl<E: ExtensionField, P: FpOpField> ProtocolWitnessGenerator<E> for FpOpLayout<E, P> {
type Trace = FpOpTrace<P>;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
let (wits_start, num_wit_cols) =
(self.layer_exprs.wits.is_add.id as usize, num_fp_cols::<P>());
let [wits, structural_wits] = wits;
let num_instances = wits.num_instances();
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let raw_witin_iter = wits.par_batch_iter_mut(num_instance_per_batch);
let raw_structural_wits_iter = structural_wits.par_batch_iter_mut(num_instance_per_batch);
raw_witin_iter
.zip_eq(raw_structural_wits_iter)
.zip_eq(phase1.instances.par_chunks(num_instance_per_batch))
.for_each(|((rows, eqs), phase1_instances)| {
let mut lk_multiplicity = lk_multiplicity.clone();
rows.chunks_mut(self.n_committed)
.zip_eq(eqs.chunks_mut(self.n_structural_witin))
.zip_eq(phase1_instances)
.for_each(|((row, eqs), phase1_instance)| {
let cols: &mut FpOpWitCols<E::BaseField, P> =
row[wits_start..][..num_wit_cols].borrow_mut();
Self::populate_row(phase1_instance, cols, &mut lk_multiplicity);
for x in eqs.iter_mut() {
*x = E::BaseField::ONE;
}
});
});
}
}
#[cfg(test)]
mod tests {
use super::*;
use ff_ext::{BabyBearExt4, SmallField};
use gkr_iop::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
cpu::{CpuBackend, CpuProver},
gkr::{GKRProverOutput, layer::Layer},
selector::SelectorContext,
};
use itertools::Itertools;
use mpcs::BasefoldDefault;
use multilinear_extensions::{mle::PointAndEval, util::ceil_log2};
use num::BigUint;
use rand::RngCore;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use sp1_curves::weierstrass::{bls12_381::Bls12381BaseField, bn254::Bn254BaseField};
use std::sync::Arc;
use sumcheck::util::optimal_sumcheck_threads;
use transcript::{BasicTranscript, Transcript};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::witness::LkMultiplicity;
fn random_mod<P: FpOpField>() -> BigUint {
let mut bytes = vec![0u8; P::NB_LIMBS + 8];
rand::thread_rng().fill_bytes(&mut bytes);
BigUint::from_bytes_le(&bytes) % P::modulus()
}
fn test_fp_ops_helper<P: FpOpField>(count: usize) {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let mut cs = ConstraintSystem::<E>::new(|| "fp_op_test");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
let mut layout =
FpOpLayout::<E, P>::build_layer_logic(&mut cb, ()).expect("build_layer_logic failed");
let (out_evals, mut chip) = layout.finalize(&mut cb);
let layer =
Layer::from_circuit_builder(&cb, "fp_op".to_string(), layout.n_challenges, out_evals);
chip.add_layer(layer);
let gkr_circuit = chip.gkr_circuit();
let instances = (0..count)
.map(|i| {
let x = random_mod::<P>();
let y = random_mod::<P>();
let op = if i % 2 == 0 {
FieldOperation::Add
} else {
FieldOperation::Mul
};
FpOpInstance::<P>::new(x, y, op)
})
.collect_vec();
let mut phase1 = RowMajorMatrix::new(
instances.len(),
layout.n_committed,
InstancePaddingStrategy::Default,
);
let mut structural = RowMajorMatrix::new(
instances.len(),
layout.n_structural_witin,
InstancePaddingStrategy::Default,
);
let mut lk_multiplicity = LkMultiplicity::default();
layout.phase1_witness_group(
FpOpTrace::<P> {
instances: instances.clone(),
},
[&mut phase1, &mut structural],
&mut lk_multiplicity,
);
let output_index = layout.layer_exprs.wits.output.result.0[0].id as usize;
for (row, inst) in phase1
.iter_rows()
.take(instances.len())
.zip(instances.iter())
{
let out_bytes = row[output_index..][..P::NB_LIMBS]
.iter()
.map(|c| c.to_canonical_u64() as u8)
.collect_vec();
let got = BigUint::from_bytes_le(&out_bytes);
let modulus = P::modulus();
let expected = match inst.op {
FieldOperation::Add => (&inst.x + &inst.y) % &modulus,
FieldOperation::Mul => (&inst.x * &inst.y) % &modulus,
FieldOperation::Sub | FieldOperation::Div => unreachable!(),
};
assert_eq!(got, expected);
}
phase1.padding_by_strategy();
structural.padding_by_strategy();
let num_instances = instances.len();
let log2_num_instance = ceil_log2(num_instances);
let num_threads = optimal_sumcheck_threads(log2_num_instance);
let mut prover_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
prover_transcript.read_challenge().elements,
prover_transcript.read_challenge().elements,
];
let phase1_witness_group = phase1.to_mles().into_iter().map(Arc::new).collect_vec();
let structural_witness = structural.to_mles().into_iter().map(Arc::new).collect_vec();
let fixed = layout
.fixed_witness_group()
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let (gkr_witness, gkr_output) =
crate::scheme::utils::gkr_witness::<E, Pcs, CpuBackend<E, Pcs>, CpuProver<_>>(
&gkr_circuit,
&phase1_witness_group,
&structural_witness,
&fixed,
&[],
&[],
&challenges,
);
let out_evals = {
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(prover_transcript.sample_vec(log2_num_instance).to_vec());
let out_evals = gkr_output
.0
.par_iter()
.map(|wit| {
let point = point[point.len() - wit.num_vars()..point.len()].to_vec();
PointAndEval {
point: point.clone(),
eval: wit.evaluate(&point),
}
})
.collect::<Vec<_>>();
if out_evals.is_empty() {
vec![PointAndEval {
point: point[point.len() - log2_num_instance..point.len()].to_vec(),
eval: E::ZERO,
}]
} else {
out_evals
}
};
let selector_ctxs = vec![SelectorContext::new(0, num_instances, log2_num_instance); 1];
let GKRProverOutput { gkr_proof, .. } = gkr_circuit
.prove::<CpuBackend<E, Pcs>, CpuProver<_>>(
num_threads,
log2_num_instance,
gkr_witness,
&out_evals,
&[],
&challenges,
&mut prover_transcript,
&selector_ctxs,
)
.expect("fp_op prove failed");
let mut verifier_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
verifier_transcript.read_challenge().elements,
verifier_transcript.read_challenge().elements,
];
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(verifier_transcript.sample_vec(log2_num_instance).to_vec());
gkr_circuit
.verify(
log2_num_instance,
gkr_proof,
&out_evals,
&[],
&[],
&challenges,
&mut verifier_transcript,
&selector_ctxs,
)
.expect("fp_op verify failed");
}
#[test]
fn test_bls12381_fp_ops() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp_ops_helper::<Bls12381BaseField>(8))
.expect("spawn fp_ops test thread failed")
.join()
.expect("fp_ops test thread panicked");
}
#[test]
fn test_bls12381_fp_ops_nonpow2() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp_ops_helper::<Bls12381BaseField>(7))
.expect("spawn fp_ops test thread failed")
.join()
.expect("fp_ops test thread panicked");
}
#[test]
fn test_bn254_fp_ops() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp_ops_helper::<Bn254BaseField>(8))
.expect("spawn fp_ops test thread failed")
.join()
.expect("fp_ops test thread panicked");
}
#[test]
fn test_bn254_fp_ops_nonpow2() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp_ops_helper::<Bn254BaseField>(7))
.expect("spawn fp_ops test thread failed")
.join()
.expect("fp_ops test thread panicked");
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/fptower/fp2_mul.rs | ceno_zkvm/src/precompiles/fptower/fp2_mul.rs | // The crate fp2_mul circuit is modified from succinctlabs/sp1 under MIT license
// The MIT License (MIT)
// Copyright (c) 2023 Succinct Labs
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::{array, borrow::BorrowMut, marker::PhantomData, mem::size_of};
use derive::AlignedBorrow;
use ff_ext::ExtensionField;
use generic_array::{GenericArray, sequence::GenericSequence};
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator, chip::Chip,
circuit_builder::CircuitBuilder, error::CircuitBuilderError, selector::SelectorType,
};
use itertools::Itertools;
use multilinear_extensions::{Expression, ToExpr, WitIn, util::max_usable_threads};
use num::BigUint;
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, ParallelIterator},
prelude::ParallelSlice,
};
use sp1_curves::{
params::{Limbs, NumWords},
polynomial::Polynomial,
weierstrass::FpOpField,
};
use typenum::Unsigned;
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::MemoryExpr,
gadgets::{FieldOperation, field_op::FieldOpCols, range::FieldLtCols},
precompiles::{SelectorTypeLayout, utils::merge_u8_slice_to_u16_limbs_pairs_and_extend},
witness::LkMultiplicity,
};
pub const fn num_fp2_mul_cols<P: FpOpField>() -> usize {
size_of::<Fp2MulAssignWitCols<u8, P>>()
}
#[derive(Debug, Clone)]
pub struct Fp2MulInstance<P: FpOpField> {
pub a0: BigUint,
pub a1: BigUint,
pub b0: BigUint,
pub b1: BigUint,
_marker: PhantomData<P>,
}
impl<P: FpOpField> Fp2MulInstance<P> {
pub fn new(a0: BigUint, a1: BigUint, b0: BigUint, b1: BigUint) -> Self {
Self {
a0,
a1,
b0,
b1,
_marker: PhantomData,
}
}
}
pub struct Fp2MulTrace<P: FpOpField> {
pub instances: Vec<Fp2MulInstance<P>>,
}
/// A set of columns for the Fp2Mul operation.
#[derive(Debug, Clone, AlignedBorrow)]
#[repr(C)]
pub struct Fp2MulAssignWitCols<T, P: FpOpField> {
pub a0: Limbs<T, P::Limbs>,
pub a1: Limbs<T, P::Limbs>,
pub b0: Limbs<T, P::Limbs>,
pub b1: Limbs<T, P::Limbs>,
pub(crate) a0_mul_b0: FieldOpCols<T, P>,
pub(crate) a1_mul_b1: FieldOpCols<T, P>,
pub(crate) a0_mul_b1: FieldOpCols<T, P>,
pub(crate) a1_mul_b0: FieldOpCols<T, P>,
pub(crate) c0: FieldOpCols<T, P>,
pub(crate) c1: FieldOpCols<T, P>,
pub(crate) c0_range_check: FieldLtCols<T, P>,
pub(crate) c1_range_check: FieldLtCols<T, P>,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct Fp2MulAssignLayer<WitT, P: FpOpField> {
pub wits: Fp2MulAssignWitCols<WitT, P>,
}
#[derive(Clone, Debug)]
pub struct Fp2MulAssignLayout<E: ExtensionField, P: FpOpField> {
pub layer_exprs: Fp2MulAssignLayer<WitIn, P>,
pub selector_type_layout: SelectorTypeLayout<E>,
pub input32_exprs: [GenericArray<MemoryExpr<E>, <P as NumWords>::WordsCurvePoint>; 2],
pub output32_exprs: GenericArray<MemoryExpr<E>, <P as NumWords>::WordsCurvePoint>,
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
}
impl<E: ExtensionField, P: FpOpField> Fp2MulAssignLayout<E, P> {
fn new(cb: &mut CircuitBuilder<E>) -> Self {
let wits = Fp2MulAssignWitCols {
a0: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp2_mul_a0"))),
a1: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp2_mul_a1"))),
b0: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp2_mul_b0"))),
b1: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp2_mul_b1"))),
a0_mul_b0: FieldOpCols::create(cb, || "fp2_mul_a0_mul_b0"),
a1_mul_b1: FieldOpCols::create(cb, || "fp2_mul_a1_mul_b1"),
a0_mul_b1: FieldOpCols::create(cb, || "fp2_mul_a0_mul_b1"),
a1_mul_b0: FieldOpCols::create(cb, || "fp2_mul_a1_mul_b0"),
c0: FieldOpCols::create(cb, || "fp2_mul_c0"),
c1: FieldOpCols::create(cb, || "fp2_mul_c1"),
c0_range_check: FieldLtCols::create(cb, || "fp2_mul_c0_range"),
c1_range_check: FieldLtCols::create(cb, || "fp2_mul_c1_range"),
};
let eq = cb.create_placeholder_structural_witin(|| "fp2_mul_structural_witin");
let sel = SelectorType::Prefix(eq.expr());
let selector_type_layout = SelectorTypeLayout {
sel_mem_read: sel.clone(),
sel_mem_write: sel.clone(),
sel_lookup: sel.clone(),
sel_zero: sel.clone(),
};
let input32_exprs: [GenericArray<MemoryExpr<E>, <P as NumWords>::WordsCurvePoint>; 2] =
array::from_fn(|_| {
GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)))
});
let output32_exprs: GenericArray<MemoryExpr<E>, <P as NumWords>::WordsCurvePoint> =
GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
Self {
layer_exprs: Fp2MulAssignLayer { wits },
selector_type_layout,
input32_exprs,
output32_exprs,
n_fixed: 0,
n_committed: 0,
n_structural_witin: 0,
n_challenges: 0,
}
}
fn populate_row(
instance: &Fp2MulInstance<P>,
cols: &mut Fp2MulAssignWitCols<E::BaseField, P>,
lk_multiplicity: &mut LkMultiplicity,
) {
cols.a0 = P::to_limbs_field(&instance.a0);
cols.a1 = P::to_limbs_field(&instance.a1);
cols.b0 = P::to_limbs_field(&instance.b0);
cols.b1 = P::to_limbs_field(&instance.b1);
let modulus = P::modulus();
let a0_mul_b0 = cols.a0_mul_b0.populate_with_modulus(
lk_multiplicity,
&instance.a0,
&instance.b0,
&modulus,
FieldOperation::Mul,
);
let a1_mul_b1 = cols.a1_mul_b1.populate_with_modulus(
lk_multiplicity,
&instance.a1,
&instance.b1,
&modulus,
FieldOperation::Mul,
);
let a0_mul_b1 = cols.a0_mul_b1.populate_with_modulus(
lk_multiplicity,
&instance.a0,
&instance.b1,
&modulus,
FieldOperation::Mul,
);
let a1_mul_b0 = cols.a1_mul_b0.populate_with_modulus(
lk_multiplicity,
&instance.a1,
&instance.b0,
&modulus,
FieldOperation::Mul,
);
let c0 = cols.c0.populate_with_modulus(
lk_multiplicity,
&a0_mul_b0,
&a1_mul_b1,
&modulus,
FieldOperation::Sub,
);
let c1 = cols.c1.populate_with_modulus(
lk_multiplicity,
&a0_mul_b1,
&a1_mul_b0,
&modulus,
FieldOperation::Add,
);
cols.c0_range_check.populate(lk_multiplicity, &c0, &modulus);
cols.c1_range_check.populate(lk_multiplicity, &c1, &modulus);
}
}
impl<E: ExtensionField, P: FpOpField> ProtocolBuilder<E> for Fp2MulAssignLayout<E, P> {
type Params = ();
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = Fp2MulAssignLayout::new(cb);
let wits = &layout.layer_exprs.wits;
let modulus: Polynomial<Expression<E>> = P::to_limbs_expr::<E>(&P::modulus()).into();
wits.a0_mul_b0
.eval_with_modulus(cb, &wits.a0, &wits.b0, &modulus, FieldOperation::Mul)?;
wits.a1_mul_b1
.eval_with_modulus(cb, &wits.a1, &wits.b1, &modulus, FieldOperation::Mul)?;
wits.a0_mul_b1
.eval_with_modulus(cb, &wits.a0, &wits.b1, &modulus, FieldOperation::Mul)?;
wits.a1_mul_b0
.eval_with_modulus(cb, &wits.a1, &wits.b0, &modulus, FieldOperation::Mul)?;
wits.c0.eval_with_modulus(
cb,
&wits.a0_mul_b0.result,
&wits.a1_mul_b1.result,
&modulus,
FieldOperation::Sub,
)?;
wits.c1.eval_with_modulus(
cb,
&wits.a0_mul_b1.result,
&wits.a1_mul_b0.result,
&modulus,
FieldOperation::Add,
)?;
wits.c0_range_check.eval(cb, &wits.c0.result, &modulus)?;
wits.c1_range_check.eval(cb, &wits.c1.result, &modulus)?;
let mut x_input32 = Vec::with_capacity(<P as NumWords>::WordsCurvePoint::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.a0.0, &mut x_input32);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.a1.0, &mut x_input32);
let x_input32 = x_input32.try_into().unwrap();
let mut y_input32 = Vec::with_capacity(<P as NumWords>::WordsCurvePoint::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.b0.0, &mut y_input32);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.b1.0, &mut y_input32);
let y_input32 = y_input32.try_into().unwrap();
let mut output32 = Vec::with_capacity(<P as NumWords>::WordsCurvePoint::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.c0.result.0, &mut output32);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.c1.result.0, &mut output32);
let output32 = output32.try_into().unwrap();
layout.input32_exprs = [x_input32, y_input32];
layout.output32_exprs = output32;
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_structural_witin = cb.cs.num_structural_witin as usize;
self.n_challenges = 0;
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
(0..r_len).collect_vec(),
(r_len..r_len + w_len).collect_vec(),
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
}
impl<E: ExtensionField, P: FpOpField> ProtocolWitnessGenerator<E> for Fp2MulAssignLayout<E, P> {
type Trace = Fp2MulTrace<P>;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
let (wits_start, num_wit_cols) = (
self.layer_exprs.wits.a0.0[0].id as usize,
num_fp2_mul_cols::<P>(),
);
let [wits, structural_wits] = wits;
let num_instances = wits.num_instances();
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let raw_witin_iter = wits.par_batch_iter_mut(num_instance_per_batch);
let raw_structural_wits_iter = structural_wits.par_batch_iter_mut(num_instance_per_batch);
raw_witin_iter
.zip_eq(raw_structural_wits_iter)
.zip_eq(phase1.instances.par_chunks(num_instance_per_batch))
.for_each(|((rows, eqs), phase1_instances)| {
let mut lk_multiplicity = lk_multiplicity.clone();
rows.chunks_mut(self.n_committed)
.zip_eq(eqs.chunks_mut(self.n_structural_witin))
.zip_eq(phase1_instances)
.for_each(|((row, eqs), phase1_instance)| {
let cols: &mut Fp2MulAssignWitCols<E::BaseField, P> =
row[wits_start..][..num_wit_cols].borrow_mut();
Self::populate_row(phase1_instance, cols, &mut lk_multiplicity);
for x in eqs.iter_mut() {
*x = E::BaseField::ONE;
}
});
});
}
}
#[cfg(test)]
mod tests {
use super::*;
use ff_ext::{BabyBearExt4, SmallField};
use gkr_iop::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
cpu::{CpuBackend, CpuProver},
gkr::{GKRProverOutput, layer::Layer},
selector::SelectorContext,
};
use itertools::Itertools;
use mpcs::BasefoldDefault;
use multilinear_extensions::{mle::PointAndEval, util::ceil_log2};
use num::BigUint;
use rand::RngCore;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use sp1_curves::weierstrass::{bls12_381::Bls12381BaseField, bn254::Bn254BaseField};
use std::sync::Arc;
use sumcheck::util::optimal_sumcheck_threads;
use transcript::{BasicTranscript, Transcript};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::witness::LkMultiplicity;
fn random_mod<P: FpOpField>() -> BigUint {
let mut bytes = vec![0u8; P::NB_LIMBS + 8];
rand::thread_rng().fill_bytes(&mut bytes);
BigUint::from_bytes_le(&bytes) % P::modulus()
}
fn test_fp2_mul_helper<P: FpOpField>(count: usize) {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let mut cs = ConstraintSystem::<E>::new(|| "fp2_mul_test");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
let mut layout = Fp2MulAssignLayout::<E, P>::build_layer_logic(&mut cb, ())
.expect("build_layer_logic failed");
let (out_evals, mut chip) = layout.finalize(&mut cb);
let layer =
Layer::from_circuit_builder(&cb, "fp2_mul".to_string(), layout.n_challenges, out_evals);
chip.add_layer(layer);
let gkr_circuit = chip.gkr_circuit();
let instances = (0..count)
.map(|_| {
let x_c0 = random_mod::<P>();
let x_c1 = random_mod::<P>();
let y_c0 = random_mod::<P>();
let y_c1 = random_mod::<P>();
Fp2MulInstance::<P>::new(x_c0, x_c1, y_c0, y_c1)
})
.collect_vec();
let mut phase1 = RowMajorMatrix::new(
instances.len(),
layout.n_committed,
InstancePaddingStrategy::Default,
);
let mut structural = RowMajorMatrix::new(
instances.len(),
layout.n_structural_witin,
InstancePaddingStrategy::Default,
);
let mut lk_multiplicity = LkMultiplicity::default();
layout.phase1_witness_group(
Fp2MulTrace::<P> {
instances: instances.clone(),
},
[&mut phase1, &mut structural],
&mut lk_multiplicity,
);
let c0_index = layout.layer_exprs.wits.c0.result.0[0].id as usize;
let c1_index = layout.layer_exprs.wits.c1.result.0[0].id as usize;
for (row, inst) in phase1
.iter_rows()
.take(instances.len())
.zip(instances.iter())
{
let c0_bytes = row[c0_index..][..P::NB_LIMBS]
.iter()
.map(|c| c.to_canonical_u64() as u8)
.collect_vec();
let c1_bytes = row[c1_index..][..P::NB_LIMBS]
.iter()
.map(|c| c.to_canonical_u64() as u8)
.collect_vec();
let got_c0 = BigUint::from_bytes_le(&c0_bytes);
let got_c1 = BigUint::from_bytes_le(&c1_bytes);
let modulus = P::modulus();
let expected_c0 =
(&inst.a0 * &inst.b0 + &modulus - (&inst.a1 * &inst.b1) % &modulus) % &modulus;
let expected_c1 = (&inst.a0 * &inst.b1 + &inst.a1 * &inst.b0) % &modulus;
assert_eq!(got_c0, expected_c0);
assert_eq!(got_c1, expected_c1);
}
phase1.padding_by_strategy();
structural.padding_by_strategy();
let num_instances = instances.len();
let log2_num_instance = ceil_log2(num_instances);
let num_threads = optimal_sumcheck_threads(log2_num_instance);
let mut prover_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
prover_transcript.read_challenge().elements,
prover_transcript.read_challenge().elements,
];
let phase1_witness_group = phase1.to_mles().into_iter().map(Arc::new).collect_vec();
let structural_witness = structural.to_mles().into_iter().map(Arc::new).collect_vec();
let fixed = layout
.fixed_witness_group()
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let (gkr_witness, gkr_output) =
crate::scheme::utils::gkr_witness::<E, Pcs, CpuBackend<E, Pcs>, CpuProver<_>>(
&gkr_circuit,
&phase1_witness_group,
&structural_witness,
&fixed,
&[],
&[],
&challenges,
);
let out_evals = {
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(prover_transcript.sample_vec(log2_num_instance).to_vec());
let out_evals = gkr_output
.0
.par_iter()
.map(|wit| {
let point = point[point.len() - wit.num_vars()..point.len()].to_vec();
PointAndEval {
point: point.clone(),
eval: wit.evaluate(&point),
}
})
.collect::<Vec<_>>();
if out_evals.is_empty() {
vec![PointAndEval {
point: point[point.len() - log2_num_instance..point.len()].to_vec(),
eval: E::ZERO,
}]
} else {
out_evals
}
};
let selector_ctxs = vec![SelectorContext::new(0, num_instances, log2_num_instance); 1];
let GKRProverOutput { gkr_proof, .. } = gkr_circuit
.prove::<CpuBackend<E, Pcs>, CpuProver<_>>(
num_threads,
log2_num_instance,
gkr_witness,
&out_evals,
&[],
&challenges,
&mut prover_transcript,
&selector_ctxs,
)
.expect("fp2_mul prove failed");
let mut verifier_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
verifier_transcript.read_challenge().elements,
verifier_transcript.read_challenge().elements,
];
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(verifier_transcript.sample_vec(log2_num_instance).to_vec());
gkr_circuit
.verify(
log2_num_instance,
gkr_proof,
&out_evals,
&[],
&[],
&challenges,
&mut verifier_transcript,
&selector_ctxs,
)
.expect("fp2_mul verify failed");
}
#[test]
fn test_bls12381_fp2_mul() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp2_mul_helper::<Bls12381BaseField>(8))
.expect("spawn fp2_mul test thread failed")
.join()
.expect("fp2_mul test thread panicked");
}
#[test]
fn test_bls12381_fp2_mul_nonpow2() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp2_mul_helper::<Bls12381BaseField>(7))
.expect("spawn fp2_mul test thread failed")
.join()
.expect("fp2_mul test thread panicked");
}
#[test]
fn test_bn254_fp2_mul() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp2_mul_helper::<Bn254BaseField>(8))
.expect("spawn fp2_mul test thread failed")
.join()
.expect("fp2_mul test thread panicked");
}
#[test]
fn test_bn254_fp2_mul_nonpow2() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp2_mul_helper::<Bn254BaseField>(7))
.expect("spawn fp2_mul test thread failed")
.join()
.expect("fp2_mul test thread panicked");
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/fptower/fp2_addsub.rs | ceno_zkvm/src/precompiles/fptower/fp2_addsub.rs | // The crate fp2_addsub circuit is modified from succinctlabs/sp1 under MIT license
// The MIT License (MIT)
// Copyright (c) 2023 Succinct Labs
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::{array, borrow::BorrowMut, marker::PhantomData, mem::size_of};
use derive::AlignedBorrow;
use ff_ext::ExtensionField;
use generic_array::{GenericArray, sequence::GenericSequence};
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator, chip::Chip,
circuit_builder::CircuitBuilder, error::CircuitBuilderError, selector::SelectorType,
};
use itertools::Itertools;
use multilinear_extensions::{Expression, ToExpr, WitIn, util::max_usable_threads};
use num::BigUint;
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, ParallelIterator},
prelude::ParallelSlice,
};
use sp1_curves::{
params::{Limbs, NumWords},
polynomial::Polynomial,
weierstrass::FpOpField,
};
use typenum::Unsigned;
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::MemoryExpr,
gadgets::{FieldOperation, field_op::FieldOpCols, range::FieldLtCols},
precompiles::{SelectorTypeLayout, utils::merge_u8_slice_to_u16_limbs_pairs_and_extend},
witness::LkMultiplicity,
};
pub const fn num_fp2_addsub_cols<P: FpOpField>() -> usize {
size_of::<Fp2AddSubAssignWitCols<u8, P>>()
}
#[derive(Debug, Clone)]
pub struct Fp2AddSubInstance<P: FpOpField> {
pub a0: BigUint,
pub a1: BigUint,
pub b0: BigUint,
pub b1: BigUint,
pub op: FieldOperation,
_marker: PhantomData<P>,
}
impl<P: FpOpField> Fp2AddSubInstance<P> {
pub fn new(a0: BigUint, a1: BigUint, b0: BigUint, b1: BigUint, op: FieldOperation) -> Self {
Self {
a0,
a1,
b0,
b1,
op,
_marker: PhantomData,
}
}
}
pub struct Fp2AddSubTrace<P: FpOpField> {
pub instances: Vec<Fp2AddSubInstance<P>>,
}
/// A set of columns for the Fp2AddSub operation.
#[derive(Debug, Clone, AlignedBorrow)]
#[repr(C)]
pub struct Fp2AddSubAssignWitCols<T, P: FpOpField> {
pub is_add: T,
pub a0: Limbs<T, P::Limbs>,
pub a1: Limbs<T, P::Limbs>,
pub b0: Limbs<T, P::Limbs>,
pub b1: Limbs<T, P::Limbs>,
pub(crate) c0: FieldOpCols<T, P>,
pub(crate) c1: FieldOpCols<T, P>,
pub(crate) c0_range_check: FieldLtCols<T, P>,
pub(crate) c1_range_check: FieldLtCols<T, P>,
}
#[derive(Clone, Debug)]
#[repr(C)]
pub struct Fp2AddSubAssignLayer<WitT, P: FpOpField> {
pub wits: Fp2AddSubAssignWitCols<WitT, P>,
}
#[derive(Clone, Debug)]
pub struct Fp2AddSubAssignLayout<E: ExtensionField, P: FpOpField> {
pub layer_exprs: Fp2AddSubAssignLayer<WitIn, P>,
pub selector_type_layout: SelectorTypeLayout<E>,
pub input32_exprs: [GenericArray<MemoryExpr<E>, <P as NumWords>::WordsCurvePoint>; 2],
pub output32_exprs: GenericArray<MemoryExpr<E>, <P as NumWords>::WordsCurvePoint>,
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
}
impl<E: ExtensionField, P: FpOpField> Fp2AddSubAssignLayout<E, P> {
fn new(cb: &mut CircuitBuilder<E>) -> Self {
let wits = Fp2AddSubAssignWitCols {
is_add: cb.create_witin(|| "fp2_is_add"),
a0: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp2_a0"))),
a1: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp2_a1"))),
b0: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp2_b0"))),
b1: Limbs(GenericArray::generate(|_| cb.create_witin(|| "fp2_b1"))),
c0: FieldOpCols::create(cb, || "fp2_c0"),
c1: FieldOpCols::create(cb, || "fp2_c1"),
c0_range_check: FieldLtCols::create(cb, || "fp2_c0_range"),
c1_range_check: FieldLtCols::create(cb, || "fp2_c1_range"),
};
let eq = cb.create_placeholder_structural_witin(|| "fp2_addsub_structural_witin");
let sel = SelectorType::Prefix(eq.expr());
let selector_type_layout = SelectorTypeLayout {
sel_mem_read: sel.clone(),
sel_mem_write: sel.clone(),
sel_lookup: sel.clone(),
sel_zero: sel.clone(),
};
let input32_exprs: [GenericArray<MemoryExpr<E>, <P as NumWords>::WordsCurvePoint>; 2] =
array::from_fn(|_| {
GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)))
});
let output32_exprs: GenericArray<MemoryExpr<E>, <P as NumWords>::WordsCurvePoint> =
GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
Self {
layer_exprs: Fp2AddSubAssignLayer { wits },
selector_type_layout,
input32_exprs,
output32_exprs,
n_fixed: 0,
n_committed: 0,
n_structural_witin: 0,
n_challenges: 0,
}
}
fn populate_row(
instance: &Fp2AddSubInstance<P>,
cols: &mut Fp2AddSubAssignWitCols<E::BaseField, P>,
lk_multiplicity: &mut LkMultiplicity,
) {
cols.is_add = E::BaseField::from_canonical_u8((instance.op == FieldOperation::Add) as u8);
cols.a0 = P::to_limbs_field(&instance.a0);
cols.a1 = P::to_limbs_field(&instance.a1);
cols.b0 = P::to_limbs_field(&instance.b0);
cols.b1 = P::to_limbs_field(&instance.b1);
let modulus = P::modulus();
let c0 = cols.c0.populate_with_modulus(
lk_multiplicity,
&instance.a0,
&instance.b0,
&modulus,
instance.op,
);
let c1 = cols.c1.populate_with_modulus(
lk_multiplicity,
&instance.a1,
&instance.b1,
&modulus,
instance.op,
);
cols.c0_range_check.populate(lk_multiplicity, &c0, &modulus);
cols.c1_range_check.populate(lk_multiplicity, &c1, &modulus);
}
}
impl<E: ExtensionField, P: FpOpField> ProtocolBuilder<E> for Fp2AddSubAssignLayout<E, P> {
type Params = ();
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = Fp2AddSubAssignLayout::new(cb);
let wits = &layout.layer_exprs.wits;
cb.assert_bit(|| "fp2_is_add_bool", wits.is_add.expr())?;
let modulus: Polynomial<Expression<E>> = P::to_limbs_expr::<E>(&P::modulus()).into();
let is_sub = E::BaseField::ONE.expr() - wits.is_add.expr();
let zero = E::BaseField::ZERO.expr();
wits.c0.eval_variable(
cb,
&wits.a0,
&wits.b0,
&modulus,
wits.is_add.expr(),
is_sub.clone(),
zero.clone(),
zero.clone(),
)?;
wits.c1.eval_variable(
cb,
&wits.a1,
&wits.b1,
&modulus,
wits.is_add.expr(),
is_sub,
zero.clone(),
zero,
)?;
wits.c0_range_check.eval(cb, &wits.c0.result, &modulus)?;
wits.c1_range_check.eval(cb, &wits.c1.result, &modulus)?;
let mut x_input32 = Vec::with_capacity(<P as NumWords>::WordsCurvePoint::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.a0.0, &mut x_input32);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.a1.0, &mut x_input32);
let x_input32 = x_input32.try_into().unwrap();
let mut y_input32 = Vec::with_capacity(<P as NumWords>::WordsCurvePoint::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.b0.0, &mut y_input32);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.b1.0, &mut y_input32);
let y_input32 = y_input32.try_into().unwrap();
let mut output32 = Vec::with_capacity(<P as NumWords>::WordsCurvePoint::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.c0.result.0, &mut output32);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&wits.c1.result.0, &mut output32);
let output32 = output32.try_into().unwrap();
layout.input32_exprs = [x_input32, y_input32];
layout.output32_exprs = output32;
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_structural_witin = cb.cs.num_structural_witin as usize;
self.n_challenges = 0;
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
(0..r_len).collect_vec(),
(r_len..r_len + w_len).collect_vec(),
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
}
impl<E: ExtensionField, P: FpOpField> ProtocolWitnessGenerator<E> for Fp2AddSubAssignLayout<E, P> {
type Trace = Fp2AddSubTrace<P>;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
let (wits_start, num_wit_cols) = (
self.layer_exprs.wits.is_add.id as usize,
num_fp2_addsub_cols::<P>(),
);
let [wits, structural_wits] = wits;
let num_instances = wits.num_instances();
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let raw_witin_iter = wits.par_batch_iter_mut(num_instance_per_batch);
let raw_structural_wits_iter = structural_wits.par_batch_iter_mut(num_instance_per_batch);
raw_witin_iter
.zip_eq(raw_structural_wits_iter)
.zip_eq(phase1.instances.par_chunks(num_instance_per_batch))
.for_each(|((rows, eqs), phase1_instances)| {
let mut lk_multiplicity = lk_multiplicity.clone();
rows.chunks_mut(self.n_committed)
.zip_eq(eqs.chunks_mut(self.n_structural_witin))
.zip_eq(phase1_instances)
.for_each(|((row, eqs), phase1_instance)| {
let cols: &mut Fp2AddSubAssignWitCols<E::BaseField, P> =
row[wits_start..][..num_wit_cols].borrow_mut();
Self::populate_row(phase1_instance, cols, &mut lk_multiplicity);
for x in eqs.iter_mut() {
*x = E::BaseField::ONE;
}
});
});
}
}
#[cfg(test)]
mod tests {
use super::*;
use ff_ext::{BabyBearExt4, SmallField};
use gkr_iop::{
circuit_builder::{CircuitBuilder, ConstraintSystem},
cpu::{CpuBackend, CpuProver},
gkr::{GKRProverOutput, layer::Layer},
selector::SelectorContext,
};
use itertools::Itertools;
use mpcs::BasefoldDefault;
use multilinear_extensions::{mle::PointAndEval, util::ceil_log2};
use num::BigUint;
use rand::RngCore;
use rayon::iter::{IntoParallelRefIterator, ParallelIterator};
use sp1_curves::weierstrass::{bls12_381::Bls12381BaseField, bn254::Bn254BaseField};
use std::sync::Arc;
use sumcheck::util::optimal_sumcheck_threads;
use transcript::{BasicTranscript, Transcript};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{gadgets::FieldOperation, witness::LkMultiplicity};
fn random_mod<P: FpOpField>() -> BigUint {
let mut bytes = vec![0u8; P::NB_LIMBS + 8];
rand::thread_rng().fill_bytes(&mut bytes);
BigUint::from_bytes_le(&bytes) % P::modulus()
}
fn test_fp2_addsub_helper<P: FpOpField>(count: usize) {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let mut cs = ConstraintSystem::<E>::new(|| "fp2_addsub_test");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
let mut layout = Fp2AddSubAssignLayout::<E, P>::build_layer_logic(&mut cb, ())
.expect("build_layer_logic failed");
let (out_evals, mut chip) = layout.finalize(&mut cb);
let layer = Layer::from_circuit_builder(
&cb,
"fp2_addsub".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
let gkr_circuit = chip.gkr_circuit();
let instances = (0..count)
.map(|i| {
let a0 = random_mod::<P>();
let a1 = random_mod::<P>();
let b0 = random_mod::<P>();
let b1 = random_mod::<P>();
let op = if i % 2 == 0 {
FieldOperation::Add
} else {
FieldOperation::Sub
};
Fp2AddSubInstance::<P>::new(a0, a1, b0, b1, op)
})
.collect_vec();
let mut phase1 = RowMajorMatrix::new(
instances.len(),
layout.n_committed,
InstancePaddingStrategy::Default,
);
let mut structural = RowMajorMatrix::new(
instances.len(),
layout.n_structural_witin,
InstancePaddingStrategy::Default,
);
let mut lk_multiplicity = LkMultiplicity::default();
layout.phase1_witness_group(
Fp2AddSubTrace::<P> {
instances: instances.clone(),
},
[&mut phase1, &mut structural],
&mut lk_multiplicity,
);
let c0_index = layout.layer_exprs.wits.c0.result.0[0].id as usize;
let c1_index = layout.layer_exprs.wits.c1.result.0[0].id as usize;
for (row, inst) in phase1
.iter_rows()
.take(instances.len())
.zip(instances.iter())
{
let c0_bytes = row[c0_index..][..P::NB_LIMBS]
.iter()
.map(|c| c.to_canonical_u64() as u8)
.collect_vec();
let c1_bytes = row[c1_index..][..P::NB_LIMBS]
.iter()
.map(|c| c.to_canonical_u64() as u8)
.collect_vec();
let got_c0 = BigUint::from_bytes_le(&c0_bytes);
let got_c1 = BigUint::from_bytes_le(&c1_bytes);
let modulus = P::modulus();
let expected_c0 = match inst.op {
FieldOperation::Add => (&inst.a0 + &inst.b0) % &modulus,
FieldOperation::Sub => (&modulus + &inst.a0 - &inst.b0) % &modulus,
FieldOperation::Mul | FieldOperation::Div => unreachable!(),
};
let expected_c1 = match inst.op {
FieldOperation::Add => (&inst.a1 + &inst.b1) % &modulus,
FieldOperation::Sub => (&modulus + &inst.a1 - &inst.b1) % &modulus,
FieldOperation::Mul | FieldOperation::Div => unreachable!(),
};
assert_eq!(got_c0, expected_c0);
assert_eq!(got_c1, expected_c1);
}
phase1.padding_by_strategy();
structural.padding_by_strategy();
let num_instances = instances.len();
let log2_num_instance = ceil_log2(num_instances);
let num_threads = optimal_sumcheck_threads(log2_num_instance);
let mut prover_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
prover_transcript.read_challenge().elements,
prover_transcript.read_challenge().elements,
];
let phase1_witness_group = phase1.to_mles().into_iter().map(Arc::new).collect_vec();
let structural_witness = structural.to_mles().into_iter().map(Arc::new).collect_vec();
let fixed = layout
.fixed_witness_group()
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let (gkr_witness, gkr_output) =
crate::scheme::utils::gkr_witness::<E, Pcs, CpuBackend<E, Pcs>, CpuProver<_>>(
&gkr_circuit,
&phase1_witness_group,
&structural_witness,
&fixed,
&[],
&[],
&challenges,
);
let out_evals = {
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(prover_transcript.sample_vec(log2_num_instance).to_vec());
let out_evals = gkr_output
.0
.par_iter()
.map(|wit| {
let point = point[point.len() - wit.num_vars()..point.len()].to_vec();
PointAndEval {
point: point.clone(),
eval: wit.evaluate(&point),
}
})
.collect::<Vec<_>>();
if out_evals.is_empty() {
vec![PointAndEval {
point: point[point.len() - log2_num_instance..point.len()].to_vec(),
eval: E::ZERO,
}]
} else {
out_evals
}
};
let selector_ctxs = vec![SelectorContext::new(0, num_instances, log2_num_instance); 1];
let GKRProverOutput { gkr_proof, .. } = gkr_circuit
.prove::<CpuBackend<E, Pcs>, CpuProver<_>>(
num_threads,
log2_num_instance,
gkr_witness,
&out_evals,
&[],
&challenges,
&mut prover_transcript,
&selector_ctxs,
)
.expect("fp2_addsub prove failed");
let mut verifier_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
verifier_transcript.read_challenge().elements,
verifier_transcript.read_challenge().elements,
];
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(verifier_transcript.sample_vec(log2_num_instance).to_vec());
gkr_circuit
.verify(
log2_num_instance,
gkr_proof,
&out_evals,
&[],
&[],
&challenges,
&mut verifier_transcript,
&selector_ctxs,
)
.expect("fp2_addsub verify failed");
}
#[test]
fn test_bls12381_fp2_addsub() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp2_addsub_helper::<Bls12381BaseField>(8))
.expect("spawn fp2_addsub test thread failed")
.join()
.expect("fp2_addsub test thread panicked");
}
#[test]
fn test_bls12381_fp2_addsub_nonpow2() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp2_addsub_helper::<Bls12381BaseField>(7))
.expect("spawn fp2_addsub test thread failed")
.join()
.expect("fp2_addsub test thread panicked");
}
#[test]
fn test_bn254_fp2_addsub() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp2_addsub_helper::<Bn254BaseField>(8))
.expect("spawn fp2_addsub test thread failed")
.join()
.expect("fp2_addsub test thread panicked");
}
#[test]
fn test_bn254_fp2_addsub_nonpow2() {
std::thread::Builder::new()
.stack_size(32 * 1024 * 1024)
.spawn(|| test_fp2_addsub_helper::<Bn254BaseField>(7))
.expect("spawn fp2_addsub test thread failed")
.join()
.expect("fp2_addsub test thread panicked");
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/weierstrass/weierstrass_add.rs | ceno_zkvm/src/precompiles/weierstrass/weierstrass_add.rs | // The crate weierstrass add circuit is modified from succinctlabs/sp1 under MIT license
// The MIT License (MIT)
// Copyright (c) 2023 Succinct Labs
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::{array, fmt::Debug, sync::Arc};
use ceno_emul::{ByteAddr, MemOp, StepRecord};
use core::{borrow::BorrowMut, mem::size_of};
use derive::AlignedBorrow;
use ff_ext::{ExtensionField, SmallField};
use generic_array::{GenericArray, sequence::GenericSequence, typenum::Unsigned};
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator,
chip::Chip,
circuit_builder::{CircuitBuilder, ConstraintSystem},
cpu::{CpuBackend, CpuProver},
error::{BackendError, CircuitBuilderError},
gkr::{GKRCircuit, GKRProof, GKRProverOutput, layer::Layer, mock::MockProver},
selector::{SelectorContext, SelectorType},
};
use itertools::{Itertools, izip};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression, ToExpr, WitIn,
util::{ceil_log2, max_usable_threads},
};
use num::BigUint;
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, ParallelIterator},
prelude::{IntoParallelRefIterator, ParallelSlice},
};
use sp1_curves::{
AffinePoint, EllipticCurve,
params::{FieldParameters, Limbs, NumLimbs, NumWords},
};
use sumcheck::{
macros::{entered_span, exit_span},
util::optimal_sumcheck_threads,
};
use transcript::{BasicTranscript, Transcript};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::MemoryExpr,
e2e::ShardContext,
error::ZKVMError,
gadgets::{FieldOperation, field_op::FieldOpCols},
instructions::riscv::insn_base::{StateInOut, WriteMEM},
precompiles::{
SelectorTypeLayout, utils::merge_u8_slice_to_u16_limbs_pairs_and_extend,
weierstrass::EllipticCurveAddInstance,
},
scheme::utils::gkr_witness,
structs::PointAndEval,
witness::LkMultiplicity,
};
#[derive(Clone, Debug, AlignedBorrow)]
#[repr(C)]
pub struct WeierstrassAddAssignWitCols<WitT, P: FieldParameters + NumLimbs> {
pub p_x: Limbs<WitT, P::Limbs>,
pub p_y: Limbs<WitT, P::Limbs>,
pub q_x: Limbs<WitT, P::Limbs>,
pub q_y: Limbs<WitT, P::Limbs>,
pub(crate) slope_denominator: FieldOpCols<WitT, P>,
pub(crate) slope_numerator: FieldOpCols<WitT, P>,
pub(crate) slope: FieldOpCols<WitT, P>,
pub(crate) slope_squared: FieldOpCols<WitT, P>,
pub(crate) p_x_plus_q_x: FieldOpCols<WitT, P>,
pub(crate) x3_ins: FieldOpCols<WitT, P>,
pub(crate) p_x_minus_x: FieldOpCols<WitT, P>,
pub(crate) y3_ins: FieldOpCols<WitT, P>,
pub(crate) slope_times_p_x_minus_x: FieldOpCols<WitT, P>,
}
/// Weierstrass addition is implemented by a single layer.
#[derive(Clone, Debug)]
#[repr(C)]
pub struct WeierstrassAddAssignLayer<WitT, P: FieldParameters + NumWords> {
pub wits: WeierstrassAddAssignWitCols<WitT, P>,
}
#[derive(Clone, Debug)]
pub struct WeierstrassAddAssignLayout<E: ExtensionField, EC: EllipticCurve> {
pub layer_exprs: WeierstrassAddAssignLayer<WitIn, EC::BaseField>,
pub selector_type_layout: SelectorTypeLayout<E>,
pub input32_exprs:
[GenericArray<MemoryExpr<E>, <EC::BaseField as NumWords>::WordsCurvePoint>; 2],
pub output32_exprs: GenericArray<MemoryExpr<E>, <EC::BaseField as NumWords>::WordsCurvePoint>,
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
}
impl<E: ExtensionField, EC: EllipticCurve> WeierstrassAddAssignLayout<E, EC> {
fn new(cb: &mut CircuitBuilder<E>) -> Self {
let wits = WeierstrassAddAssignWitCols {
p_x: Limbs(GenericArray::generate(|_| cb.create_witin(|| "p_x"))),
p_y: Limbs(GenericArray::generate(|_| cb.create_witin(|| "p_y"))),
q_x: Limbs(GenericArray::generate(|_| cb.create_witin(|| "q_x"))),
q_y: Limbs(GenericArray::generate(|_| cb.create_witin(|| "q_y"))),
slope_denominator: FieldOpCols::create(cb, || "slope_denominator"),
slope_numerator: FieldOpCols::create(cb, || "slope_numerator"),
slope: FieldOpCols::create(cb, || "slope"),
slope_squared: FieldOpCols::create(cb, || "slope_squared"),
p_x_plus_q_x: FieldOpCols::create(cb, || "p_x_plus_q_x"),
x3_ins: FieldOpCols::create(cb, || "x3_ins"),
p_x_minus_x: FieldOpCols::create(cb, || "p_x_minus_x"),
y3_ins: FieldOpCols::create(cb, || "y3_ins"),
slope_times_p_x_minus_x: FieldOpCols::create(cb, || "slope_times_p_x_minus_x"),
};
let eq = cb.create_placeholder_structural_witin(|| "weierstrass_add_eq");
let sel = SelectorType::Prefix(eq.expr());
let selector_type_layout = SelectorTypeLayout {
sel_mem_read: sel.clone(),
sel_mem_write: sel.clone(),
sel_lookup: sel.clone(),
sel_zero: sel.clone(),
};
// Default expression, will be updated in build_layer_logic
let input32_exprs: [GenericArray<
MemoryExpr<E>,
<EC::BaseField as NumWords>::WordsCurvePoint,
>; 2] = array::from_fn(|_| {
GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)))
});
// Default expression, will be updated in build_layer_logic
let output32_exprs: GenericArray<
MemoryExpr<E>,
<EC::BaseField as NumWords>::WordsCurvePoint,
> = GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
Self {
layer_exprs: WeierstrassAddAssignLayer { wits },
selector_type_layout,
input32_exprs,
output32_exprs,
n_fixed: 0,
n_committed: 0,
n_challenges: 0,
n_structural_witin: 0,
}
}
#[allow(clippy::too_many_arguments)]
fn populate_field_ops(
blu_events: &mut LkMultiplicity,
cols: &mut WeierstrassAddAssignWitCols<E::BaseField, EC::BaseField>,
p_x: BigUint,
p_y: BigUint,
q_x: BigUint,
q_y: BigUint,
) {
// This populates necessary field operations to calculate the addition of two points on a
// Weierstrass curve.
// slope = (q.y - p.y) / (q.x - p.x).
let slope = {
let slope_numerator =
cols.slope_numerator
.populate(blu_events, &q_y, &p_y, FieldOperation::Sub);
let slope_denominator =
cols.slope_denominator
.populate(blu_events, &q_x, &p_x, FieldOperation::Sub);
cols.slope.populate(
blu_events,
&slope_numerator,
&slope_denominator,
FieldOperation::Div,
)
};
// x = slope * slope - (p.x + q.x).
let x = {
let slope_squared =
cols.slope_squared
.populate(blu_events, &slope, &slope, FieldOperation::Mul);
let p_x_plus_q_x =
cols.p_x_plus_q_x
.populate(blu_events, &p_x, &q_x, FieldOperation::Add);
cols.x3_ins.populate(
blu_events,
&slope_squared,
&p_x_plus_q_x,
FieldOperation::Sub,
)
};
// y = slope * (p.x - x_3n) - p.y.
{
let p_x_minus_x = cols
.p_x_minus_x
.populate(blu_events, &p_x, &x, FieldOperation::Sub);
let slope_times_p_x_minus_x = cols.slope_times_p_x_minus_x.populate(
blu_events,
&slope,
&p_x_minus_x,
FieldOperation::Mul,
);
cols.y3_ins.populate(
blu_events,
&slope_times_p_x_minus_x,
&p_y,
FieldOperation::Sub,
);
}
}
}
impl<E: ExtensionField, EC: EllipticCurve> ProtocolBuilder<E>
for WeierstrassAddAssignLayout<E, EC>
{
type Params = ();
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = WeierstrassAddAssignLayout::new(cb);
let wits = &layout.layer_exprs.wits;
// slope = (q.y - p.y) / (q.x - p.x).
let slope = {
wits.slope_numerator
.eval(cb, &wits.q_y, &wits.p_y, FieldOperation::Sub)?;
wits.slope_denominator
.eval(cb, &wits.q_x, &wits.p_x, FieldOperation::Sub)?;
wits.slope.eval(
cb,
&wits.slope_numerator.result,
&wits.slope_denominator.result,
FieldOperation::Div,
)?;
&wits.slope.result
};
// x = slope * slope - self.x - other.x.
let x = {
wits.slope_squared
.eval(cb, slope, slope, FieldOperation::Mul)?;
wits.p_x_plus_q_x
.eval(cb, &wits.p_x, &wits.q_x, FieldOperation::Add)?;
wits.x3_ins.eval(
cb,
&wits.slope_squared.result,
&wits.p_x_plus_q_x.result,
FieldOperation::Sub,
)?;
&wits.x3_ins.result
};
// y = slope * (p.x - x_3n) - q.y.
{
wits.p_x_minus_x
.eval(cb, &wits.p_x, x, FieldOperation::Sub)?;
wits.slope_times_p_x_minus_x.eval(
cb,
slope,
&wits.p_x_minus_x.result,
FieldOperation::Mul,
)?;
wits.y3_ins.eval(
cb,
&wits.slope_times_p_x_minus_x.result,
&wits.p_y,
FieldOperation::Sub,
)?;
}
// Constraint output32 from wits.x3_ins || wits.y3_ins by converting 8-bit limbs to 2x16-bit felts
let mut output32 = Vec::with_capacity(<EC::BaseField as NumWords>::WordsCurvePoint::USIZE);
for limbs in [&wits.x3_ins.result, &wits.y3_ins.result] {
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&limbs.0, &mut output32);
}
let output32 = output32.try_into().unwrap();
let mut p_input32 = Vec::with_capacity(<EC::BaseField as NumWords>::WordsCurvePoint::USIZE);
for limbs in [&wits.p_x, &wits.p_y] {
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&limbs.0, &mut p_input32);
}
let p_input32 = p_input32.try_into().unwrap();
let mut q_input32 = Vec::with_capacity(<EC::BaseField as NumWords>::WordsCurvePoint::USIZE);
for limbs in [&wits.q_x, &wits.q_y] {
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&limbs.0, &mut q_input32);
}
let q_input32 = q_input32.try_into().unwrap();
// set input32/output32 expr
layout.input32_exprs = [p_input32, q_input32];
layout.output32_exprs = output32;
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_structural_witin = cb.cs.num_structural_witin as usize;
self.n_challenges = 0;
// register selector to legacy constrain system
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
// r_record
(0..r_len).collect_vec(),
// w_record
(r_len..r_len + w_len).collect_vec(),
// lk_record
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
// zero_record
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
fn n_committed(&self) -> usize {
todo!()
}
fn n_fixed(&self) -> usize {
todo!()
}
fn n_challenges(&self) -> usize {
todo!()
}
fn n_evaluations(&self) -> usize {
todo!()
}
fn n_layers(&self) -> usize {
todo!()
}
}
#[derive(Clone, Default)]
pub struct WeierstrassAddAssignTrace<P: NumWords> {
pub instances: Vec<EllipticCurveAddInstance<P>>,
}
impl<E: ExtensionField, EC: EllipticCurve> ProtocolWitnessGenerator<E>
for WeierstrassAddAssignLayout<E, EC>
{
type Trace = WeierstrassAddAssignTrace<EC::BaseField>;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
let num_instances = wits[0].num_instances();
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let num_wit_cols = size_of::<WeierstrassAddAssignWitCols<u8, EC::BaseField>>();
let [wits, structural_wits] = wits;
let raw_witin_iter = wits.par_batch_iter_mut(num_instance_per_batch);
let raw_structural_wits_iter = structural_wits.par_batch_iter_mut(num_instance_per_batch);
raw_witin_iter
.zip_eq(raw_structural_wits_iter)
.zip_eq(phase1.instances.par_chunks(num_instance_per_batch))
.for_each(|((rows, eqs), phase1_instances)| {
let mut lk_multiplicity = lk_multiplicity.clone();
rows.chunks_mut(self.n_committed)
.zip_eq(eqs.chunks_mut(self.n_structural_witin))
.zip_eq(phase1_instances)
.for_each(|((row, eqs), phase1_instance)| {
let cols: &mut WeierstrassAddAssignWitCols<E::BaseField, EC::BaseField> =
row[self.layer_exprs.wits.p_x.0[0].id as usize..][..num_wit_cols] // TODO: Find a better way to write it.
.borrow_mut();
Self::populate_row(phase1_instance, cols, &mut lk_multiplicity);
for x in eqs.iter_mut() {
*x = E::BaseField::ONE;
}
});
});
}
}
impl<E: ExtensionField, EC: EllipticCurve> WeierstrassAddAssignLayout<E, EC> {
pub fn populate_row(
event: &EllipticCurveAddInstance<EC::BaseField>,
cols: &mut WeierstrassAddAssignWitCols<E::BaseField, EC::BaseField>,
new_byte_lookup_events: &mut LkMultiplicity,
) {
// Decode affine points.
let p = &event.p;
let q = &event.q;
let p = AffinePoint::<EC>::from_words_le(p);
let (p_x, p_y) = (p.x, p.y);
let q = AffinePoint::<EC>::from_words_le(q);
let (q_x, q_y) = (q.x, q.y);
// Populate basic columns.
cols.p_x = EC::BaseField::to_limbs_field(&p_x);
cols.p_y = EC::BaseField::to_limbs_field(&p_y);
cols.q_x = EC::BaseField::to_limbs_field(&q_x);
cols.q_y = EC::BaseField::to_limbs_field(&q_y);
Self::populate_field_ops(new_byte_lookup_events, cols, p_x, p_y, q_x, q_y);
}
}
/// this is for testing purpose
pub struct TestWeierstrassAddLayout<E: ExtensionField, EC: EllipticCurve> {
layout: WeierstrassAddAssignLayout<E, EC>,
mem_rw: Vec<WriteMEM>,
vm_state: StateInOut<E>,
_point_ptr_0: WitIn,
}
#[allow(clippy::type_complexity)]
pub fn setup_gkr_circuit<E: ExtensionField, EC: EllipticCurve>()
-> Result<(TestWeierstrassAddLayout<E, EC>, GKRCircuit<E>, u16, u16), ZKVMError> {
let mut cs = ConstraintSystem::new(|| "weierstrass_add");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
// constrain vmstate
let vm_state = StateInOut::construct_circuit(&mut cb, false)?;
let point_ptr_0 = cb.create_witin(|| "state_ptr_0");
let mut layout = WeierstrassAddAssignLayout::build_layer_logic(&mut cb, ())?;
// Write the result to the same address of the first input point.
let mut mem_rw = izip!(&layout.input32_exprs[0], &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
&mut cb,
// mem address := state_ptr_0 + i
point_ptr_0.expr() + E::BaseField::from_canonical_u32(i as u32).expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
// Keep the second input point unchanged in memory.
mem_rw.extend(
layout.input32_exprs[1]
.iter()
.enumerate()
.map(|(i, val_before)| {
WriteMEM::construct_circuit(
&mut cb,
// mem address := state_ptr_1 + i
point_ptr_0.expr()
+ E::BaseField::from_canonical_u32(
(layout.output32_exprs.len() + i) as u32,
)
.expr(),
val_before.clone(),
val_before.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?,
);
let (out_evals, mut chip) = layout.finalize(&mut cb);
let layer = Layer::from_circuit_builder(
&cb,
"weierstrass_add".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
Ok((
TestWeierstrassAddLayout {
layout,
vm_state,
_point_ptr_0: point_ptr_0,
mem_rw,
},
chip.gkr_circuit(),
cs.num_witin,
cs.num_structural_witin,
))
}
#[tracing::instrument(
skip_all,
name = "run_weierstrass_add",
level = "trace",
fields(profiling_1)
)]
pub fn run_weierstrass_add<
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E> + 'static,
EC: EllipticCurve,
>(
(layout, gkr_circuit, num_witin, num_structural_witin): (
TestWeierstrassAddLayout<E, EC>,
GKRCircuit<E>,
u16,
u16,
),
points: Vec<[GenericArray<u32, <EC::BaseField as NumWords>::WordsCurvePoint>; 2]>,
verify: bool,
test_outputs: bool,
) -> Result<GKRProof<E>, BackendError> {
let mut shard_ctx = ShardContext::default();
let num_instances = points.len();
let log2_num_instance = ceil_log2(num_instances);
let num_threads = optimal_sumcheck_threads(log2_num_instance);
let mut instances: Vec<EllipticCurveAddInstance<EC::BaseField>> =
Vec::with_capacity(num_instances);
let span = entered_span!("instances", profiling_2 = true);
for [p, q] in &points {
let instance = EllipticCurveAddInstance {
p: p.clone(),
q: q.clone(),
};
instances.push(instance);
}
exit_span!(span);
let span = entered_span!("phase1_witness", profiling_2 = true);
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let mut lk_multiplicity = LkMultiplicity::default();
let mut phase1_witness = RowMajorMatrix::<E::BaseField>::new(
instances.len(),
num_witin as usize,
InstancePaddingStrategy::Default,
);
let mut structural_witness = RowMajorMatrix::<E::BaseField>::new(
instances.len(),
num_structural_witin as usize,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = phase1_witness.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
raw_witin_iter
.zip_eq(instances.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.for_each(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin as usize)
.zip_eq(steps)
.for_each(|(instance, _step)| {
layout
.vm_state
.assign_instance(
instance,
&shard_ctx,
&StepRecord::new_ecall_any(10, ByteAddr::from(0)),
)
.expect("assign vm_state error");
layout.mem_rw.iter().for_each(|mem_config| {
mem_config
.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
10,
&MemOp {
previous_cycle: 0,
addr: ByteAddr::from(0).waddr(),
value: Default::default(),
},
)
.expect("assign error");
});
})
});
layout.layout.phase1_witness_group(
WeierstrassAddAssignTrace { instances },
[&mut phase1_witness, &mut structural_witness],
&mut lk_multiplicity,
);
exit_span!(span);
if test_outputs {
// Test got output == expected output.
// n_points x (result_x_words || result_y_words) in little endian
let expected_outputs = points
.iter()
.map(|[a, b]| {
let a = AffinePoint::<EC>::from_words_le(a);
let b = AffinePoint::<EC>::from_words_le(b);
let c = a + b;
c.to_words_le()
.into_iter()
.flat_map(|word| {
[
word & 0xFF,
(word >> 8) & 0xFF,
(word >> 16) & 0xFF,
(word >> 24) & 0xFF,
]
})
.collect_vec()
})
.collect_vec();
let x_output_index_start = layout.layout.layer_exprs.wits.x3_ins.result[0].id as usize;
let y_output_index_start = layout.layout.layer_exprs.wits.y3_ins.result[0].id as usize;
let got_outputs = phase1_witness
.iter_rows()
.take(num_instances)
.map(|cols| {
[
cols[x_output_index_start..][..<EC::BaseField as NumLimbs>::Limbs::USIZE]
.iter()
.map(|x| x.to_canonical_u64() as u32)
.collect_vec(),
cols[y_output_index_start..][..<EC::BaseField as NumLimbs>::Limbs::USIZE]
.iter()
.map(|y| y.to_canonical_u64() as u32)
.collect_vec(),
]
.concat()
})
.collect_vec();
assert_eq!(expected_outputs, got_outputs);
}
let mut prover_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
prover_transcript.read_challenge().elements,
prover_transcript.read_challenge().elements,
];
let span = entered_span!("gkr_witness", profiling_2 = true);
let phase1_witness_group = phase1_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let structural_witness = structural_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let fixed = layout
.layout
.fixed_witness_group()
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
#[allow(clippy::type_complexity)]
let (gkr_witness, gkr_output) = gkr_witness::<E, PCS, CpuBackend<E, PCS>, CpuProver<_>>(
&gkr_circuit,
&phase1_witness_group,
&structural_witness,
&fixed,
&[],
&[],
&challenges,
);
exit_span!(span);
let span = entered_span!("out_eval", profiling_2 = true);
let out_evals = {
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(prover_transcript.sample_vec(log2_num_instance).to_vec());
let out_evals = gkr_output
.0
.par_iter()
.map(|wit| {
let point = point[point.len() - wit.num_vars()..point.len()].to_vec();
PointAndEval {
point: point.clone(),
eval: wit.evaluate(&point),
}
})
.collect::<Vec<_>>();
if out_evals.is_empty() {
vec![PointAndEval {
point: point[point.len() - log2_num_instance..point.len()].to_vec(),
eval: E::ZERO,
}]
} else {
out_evals
}
};
exit_span!(span);
if cfg!(debug_assertions) {
// mock prover
let out_wits = gkr_output.0.0.clone();
MockProver::check(&gkr_circuit, &gkr_witness, out_wits, challenges.to_vec())
.expect("mock prover failed");
}
let span = entered_span!("create_proof", profiling_2 = true);
let selector_ctxs = vec![SelectorContext::new(0, num_instances, log2_num_instance); 1];
let GKRProverOutput { gkr_proof, .. } = gkr_circuit
.prove::<CpuBackend<E, PCS>, CpuProver<_>>(
num_threads,
log2_num_instance,
gkr_witness,
&out_evals,
&[],
&challenges,
&mut prover_transcript,
&selector_ctxs,
)
.expect("Failed to prove phase");
exit_span!(span);
if verify {
{
let mut verifier_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
verifier_transcript.read_challenge().elements,
verifier_transcript.read_challenge().elements,
];
// This is to make prover/verifier match
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(verifier_transcript.sample_vec(log2_num_instance).to_vec());
gkr_circuit
.verify(
log2_num_instance,
gkr_proof.clone(),
&out_evals,
&[],
&[],
&challenges,
&mut verifier_transcript,
&selector_ctxs,
)
.expect("GKR verify failed");
// Omit the PCS opening phase.
}
}
Ok(gkr_proof)
}
#[cfg(test)]
mod tests {
use super::*;
use crate::precompiles::weierstrass::test_utils::random_point_pairs;
use ff_ext::BabyBearExt4;
use mpcs::BasefoldDefault;
use sp1_curves::weierstrass::{
SwCurve, WeierstrassParameters, bls12_381::Bls12381, bn254::Bn254, secp256k1::Secp256k1,
secp256r1::Secp256r1,
};
fn test_weierstrass_add_helper<WP: WeierstrassParameters>() {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let points = random_point_pairs::<WP>(8);
let _ = run_weierstrass_add::<E, Pcs, SwCurve<WP>>(
setup_gkr_circuit::<E, SwCurve<WP>>().expect("setup gkr circuit failed"),
points,
true,
true,
)
.inspect_err(|err| {
eprintln!("{:?}", err);
})
.expect("weierstrass_add failed");
}
#[test]
fn test_weierstrass_add_bn254() {
test_weierstrass_add_helper::<Bn254>();
}
#[test]
fn test_weierstrass_add_bls12381() {
test_weierstrass_add_helper::<Bls12381>();
}
#[test]
fn test_weierstrass_add_secp256k1() {
test_weierstrass_add_helper::<Secp256k1>();
}
#[test]
fn test_weierstrass_add_secp256r1() {
test_weierstrass_add_helper::<Secp256r1>();
}
fn test_weierstrass_add_nonpow2_helper<WP: WeierstrassParameters>() {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let points = random_point_pairs::<WP>(5);
let _ = run_weierstrass_add::<E, Pcs, SwCurve<WP>>(
setup_gkr_circuit::<E, SwCurve<WP>>().expect("setup gkr circuit failed"),
points,
true,
true,
)
.inspect_err(|err| {
eprintln!("{:?}", err);
})
.expect("weierstrass_add_nonpow2 failed");
}
#[test]
fn test_weierstrass_add_nonpow2_bn254() {
test_weierstrass_add_nonpow2_helper::<Bn254>();
}
#[test]
fn test_weierstrass_add_nonpow2_bls12381() {
test_weierstrass_add_nonpow2_helper::<Bls12381>();
}
#[test]
fn test_weierstrass_add_nonpow2_secp256k1() {
test_weierstrass_add_nonpow2_helper::<Secp256k1>();
}
#[test]
fn test_weierstrass_add_nonpow2_secp256r1() {
test_weierstrass_add_nonpow2_helper::<Secp256r1>();
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/weierstrass/weierstrass_double.rs | ceno_zkvm/src/precompiles/weierstrass/weierstrass_double.rs | // The crate weierstrass double circuit is modified from succinctlabs/sp1 under MIT license
// The MIT License (MIT)
// Copyright (c) 2023 Succinct Labs
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::{array, fmt::Debug, sync::Arc};
use ceno_emul::{ByteAddr, MemOp, StepRecord};
use core::{borrow::BorrowMut, mem::size_of};
use derive::AlignedBorrow;
use ff_ext::{ExtensionField, SmallField};
use generic_array::{GenericArray, sequence::GenericSequence, typenum::Unsigned};
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator,
chip::Chip,
circuit_builder::{CircuitBuilder, ConstraintSystem},
cpu::{CpuBackend, CpuProver},
error::{BackendError, CircuitBuilderError},
gkr::{GKRCircuit, GKRProof, GKRProverOutput, layer::Layer, mock::MockProver},
selector::{SelectorContext, SelectorType},
};
use itertools::{Itertools, izip};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression, ToExpr, WitIn,
util::{ceil_log2, max_usable_threads},
};
use num::BigUint;
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, ParallelIterator},
prelude::{IntoParallelRefIterator, ParallelSlice},
};
use sp1_curves::{
AffinePoint, EllipticCurve,
params::{FieldParameters, Limbs, NumLimbs, NumWords},
weierstrass::WeierstrassParameters,
};
use sumcheck::{
macros::{entered_span, exit_span},
util::optimal_sumcheck_threads,
};
use transcript::{BasicTranscript, Transcript};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::MemoryExpr,
e2e::ShardContext,
error::ZKVMError,
gadgets::{FieldOperation, field_op::FieldOpCols},
instructions::riscv::insn_base::{StateInOut, WriteMEM},
precompiles::{
SelectorTypeLayout, utils::merge_u8_slice_to_u16_limbs_pairs_and_extend,
weierstrass::EllipticCurveDoubleInstance,
},
scheme::utils::gkr_witness,
structs::PointAndEval,
witness::LkMultiplicity,
};
#[derive(Clone, Debug, AlignedBorrow)]
#[repr(C)]
pub struct WeierstrassDoubleAssignWitCols<WitT, P: FieldParameters + NumLimbs> {
pub p_x: Limbs<WitT, P::Limbs>,
pub p_y: Limbs<WitT, P::Limbs>,
pub(crate) slope_denominator: FieldOpCols<WitT, P>,
pub(crate) slope_numerator: FieldOpCols<WitT, P>,
pub(crate) slope: FieldOpCols<WitT, P>,
pub(crate) p_x_squared: FieldOpCols<WitT, P>,
pub(crate) p_x_squared_times_3: FieldOpCols<WitT, P>,
pub(crate) slope_squared: FieldOpCols<WitT, P>,
pub(crate) p_x_plus_p_x: FieldOpCols<WitT, P>,
pub(crate) x3_ins: FieldOpCols<WitT, P>,
pub(crate) p_x_minus_x: FieldOpCols<WitT, P>,
pub(crate) y3_ins: FieldOpCols<WitT, P>,
pub(crate) slope_times_p_x_minus_x: FieldOpCols<WitT, P>,
}
/// Weierstrass double is implemented by a single layer.
#[derive(Clone, Debug)]
#[repr(C)]
pub struct WeierstrassDoubleAssignLayer<WitT, P: FieldParameters + NumWords> {
pub wits: WeierstrassDoubleAssignWitCols<WitT, P>,
}
#[derive(Clone, Debug)]
pub struct WeierstrassDoubleAssignLayout<E: ExtensionField, EC: EllipticCurve> {
pub layer_exprs: WeierstrassDoubleAssignLayer<WitIn, EC::BaseField>,
pub selector_type_layout: SelectorTypeLayout<E>,
pub input32_exprs: GenericArray<MemoryExpr<E>, <EC::BaseField as NumWords>::WordsCurvePoint>,
pub output32_exprs: GenericArray<MemoryExpr<E>, <EC::BaseField as NumWords>::WordsCurvePoint>,
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
}
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters>
WeierstrassDoubleAssignLayout<E, EC>
{
fn new(cb: &mut CircuitBuilder<E>) -> Self {
let wits = WeierstrassDoubleAssignWitCols {
p_x: Limbs(GenericArray::generate(|_| cb.create_witin(|| "p_x"))),
p_y: Limbs(GenericArray::generate(|_| cb.create_witin(|| "p_y"))),
slope_denominator: FieldOpCols::create(cb, || "slope_denominator"),
slope_numerator: FieldOpCols::create(cb, || "slope_numerator"),
slope: FieldOpCols::create(cb, || "slope"),
p_x_squared: FieldOpCols::create(cb, || "p_x_squared"),
p_x_squared_times_3: FieldOpCols::create(cb, || "p_x_squared_times_3"),
slope_squared: FieldOpCols::create(cb, || "slope_squared"),
p_x_plus_p_x: FieldOpCols::create(cb, || "p_x_plus_p_x"),
x3_ins: FieldOpCols::create(cb, || "x3_ins"),
p_x_minus_x: FieldOpCols::create(cb, || "p_x_minus_x"),
y3_ins: FieldOpCols::create(cb, || "y3_ins"),
slope_times_p_x_minus_x: FieldOpCols::create(cb, || "slope_times_p_x_minus_x"),
};
let eq = cb.create_placeholder_structural_witin(|| "weierstrass_double_eq");
let sel = SelectorType::Prefix(eq.expr());
let selector_type_layout = SelectorTypeLayout {
sel_mem_read: sel.clone(),
sel_mem_write: sel.clone(),
sel_lookup: sel.clone(),
sel_zero: sel.clone(),
};
let input32_exprs: GenericArray<
MemoryExpr<E>,
<EC::BaseField as NumWords>::WordsCurvePoint,
> = GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
let output32_exprs: GenericArray<
MemoryExpr<E>,
<EC::BaseField as NumWords>::WordsCurvePoint,
> = GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
Self {
layer_exprs: WeierstrassDoubleAssignLayer { wits },
selector_type_layout,
input32_exprs,
output32_exprs,
n_fixed: 0,
n_committed: 0,
n_challenges: 0,
n_structural_witin: 0,
}
}
#[allow(clippy::too_many_arguments)]
fn populate_field_ops(
blu_events: &mut LkMultiplicity,
cols: &mut WeierstrassDoubleAssignWitCols<E::BaseField, EC::BaseField>,
p_x: BigUint,
p_y: BigUint,
) {
// This populates necessary field operations to double a point on a Weierstrass curve.
let a = EC::a_int();
let slope = {
// slope_numerator = a + (p.x * p.x) * 3.
let slope_numerator = {
let p_x_squared =
cols.p_x_squared
.populate(blu_events, &p_x, &p_x, FieldOperation::Mul);
let p_x_squared_times_3 = cols.p_x_squared_times_3.populate(
blu_events,
&p_x_squared,
&BigUint::from(3u32),
FieldOperation::Mul,
);
cols.slope_numerator.populate(
blu_events,
&a,
&p_x_squared_times_3,
FieldOperation::Add,
)
};
// slope_denominator = 2 * y.
let slope_denominator = cols.slope_denominator.populate(
blu_events,
&BigUint::from(2u32),
&p_y,
FieldOperation::Mul,
);
cols.slope.populate(
blu_events,
&slope_numerator,
&slope_denominator,
FieldOperation::Div,
)
};
// x = slope * slope - (p.x + p.x).
let x = {
let slope_squared =
cols.slope_squared
.populate(blu_events, &slope, &slope, FieldOperation::Mul);
let p_x_plus_p_x =
cols.p_x_plus_p_x
.populate(blu_events, &p_x, &p_x, FieldOperation::Add);
cols.x3_ins.populate(
blu_events,
&slope_squared,
&p_x_plus_p_x,
FieldOperation::Sub,
)
};
// y = slope * (p.x - x) - p.y.
{
let p_x_minus_x = cols
.p_x_minus_x
.populate(blu_events, &p_x, &x, FieldOperation::Sub);
let slope_times_p_x_minus_x = cols.slope_times_p_x_minus_x.populate(
blu_events,
&slope,
&p_x_minus_x,
FieldOperation::Mul,
);
cols.y3_ins.populate(
blu_events,
&slope_times_p_x_minus_x,
&p_y,
FieldOperation::Sub,
);
}
}
}
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters> ProtocolBuilder<E>
for WeierstrassDoubleAssignLayout<E, EC>
{
type Params = ();
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = WeierstrassDoubleAssignLayout::new(cb);
let wits = &layout.layer_exprs.wits;
// `a` in the Weierstrass form: y^2 = x^3 + a * x + b.
let a = EC::BaseField::to_limbs_expr::<E>(&EC::a_int());
// slope = slope_numerator / slope_denominator.
let slope = {
// slope_numerator = a + (p.x * p.x) * 3.
{
wits.p_x_squared
.eval(cb, &wits.p_x, &wits.p_x, FieldOperation::Mul)?;
wits.p_x_squared_times_3.eval(
cb,
&wits.p_x_squared.result,
&EC::BaseField::to_limbs_expr::<E>(&BigUint::from(3u32)),
FieldOperation::Mul,
)?;
wits.slope_numerator.eval(
cb,
&a,
&wits.p_x_squared_times_3.result,
FieldOperation::Add,
)?;
};
// slope_denominator = 2 * y.
wits.slope_denominator.eval(
cb,
&EC::BaseField::to_limbs_expr::<E>(&BigUint::from(2u32)),
&wits.p_y,
FieldOperation::Mul,
)?;
wits.slope.eval(
cb,
&wits.slope_numerator.result,
&wits.slope_denominator.result,
FieldOperation::Div,
)?;
&wits.slope.result
};
// x = slope * slope - (p.x + p.x).
let x = {
wits.slope_squared
.eval(cb, slope, slope, FieldOperation::Mul)?;
wits.p_x_plus_p_x
.eval(cb, &wits.p_x, &wits.p_x, FieldOperation::Add)?;
wits.x3_ins.eval(
cb,
&wits.slope_squared.result,
&wits.p_x_plus_p_x.result,
FieldOperation::Sub,
)?;
&wits.x3_ins.result
};
// y = slope * (p.x - x) - p.y.
{
wits.p_x_minus_x
.eval(cb, &wits.p_x, x, FieldOperation::Sub)?;
wits.slope_times_p_x_minus_x.eval(
cb,
slope,
&wits.p_x_minus_x.result,
FieldOperation::Mul,
)?;
wits.y3_ins.eval(
cb,
&wits.slope_times_p_x_minus_x.result,
&wits.p_y,
FieldOperation::Sub,
)?;
}
// Constraint output32 from wits.x3_ins || wits.y3_ins by converting 8-bit limbs to 2x16-bit felts
let mut output32 = Vec::with_capacity(<EC::BaseField as NumWords>::WordsCurvePoint::USIZE);
for limbs in [&wits.x3_ins.result, &wits.y3_ins.result] {
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&limbs.0, &mut output32);
}
let output32 = output32.try_into().unwrap();
let mut p_input32 = Vec::with_capacity(<EC::BaseField as NumWords>::WordsCurvePoint::USIZE);
for limbs in [&wits.p_x, &wits.p_y] {
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(&limbs.0, &mut p_input32);
}
let p_input32 = p_input32.try_into().unwrap();
// set input32/output32 expr
layout.input32_exprs = p_input32;
layout.output32_exprs = output32;
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_structural_witin = cb.cs.num_structural_witin as usize;
self.n_challenges = 0;
// register selector to legacy constrain system
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
// r_record
(0..r_len).collect_vec(),
// w_record
(r_len..r_len + w_len).collect_vec(),
// lk_record
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
// zero_record
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
fn n_committed(&self) -> usize {
todo!()
}
fn n_fixed(&self) -> usize {
todo!()
}
fn n_challenges(&self) -> usize {
todo!()
}
fn n_evaluations(&self) -> usize {
todo!()
}
fn n_layers(&self) -> usize {
todo!()
}
}
#[derive(Clone, Default)]
pub struct WeierstrassDoubleAssignTrace<P: NumWords> {
pub instances: Vec<EllipticCurveDoubleInstance<P>>,
}
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters> ProtocolWitnessGenerator<E>
for WeierstrassDoubleAssignLayout<E, EC>
{
type Trace = WeierstrassDoubleAssignTrace<EC::BaseField>;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
let num_instances = wits[0].num_instances();
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let num_wit_cols = size_of::<WeierstrassDoubleAssignWitCols<u8, EC::BaseField>>();
let [wits, structural_wits] = wits;
let raw_witin_iter = wits.par_batch_iter_mut(num_instance_per_batch);
let raw_structural_wits_iter = structural_wits.par_batch_iter_mut(num_instance_per_batch);
raw_witin_iter
.zip_eq(raw_structural_wits_iter)
.zip_eq(phase1.instances.par_chunks(num_instance_per_batch))
.for_each(|((rows, eqs), phase1_instances)| {
let mut lk_multiplicity = lk_multiplicity.clone();
rows.chunks_mut(self.n_committed)
.zip_eq(eqs.chunks_mut(self.n_structural_witin))
.zip_eq(phase1_instances)
.for_each(|((row, eqs), phase1_instance)| {
let cols: &mut WeierstrassDoubleAssignWitCols<E::BaseField, EC::BaseField> =
row[self.layer_exprs.wits.p_x.0[0].id as usize..][..num_wit_cols] // TODO: Find a better way to write it.
.borrow_mut(); // We should construct the circuit to guarantee this part occurs first.
Self::populate_row(phase1_instance, cols, &mut lk_multiplicity);
for x in eqs.iter_mut() {
*x = E::BaseField::ONE;
}
});
});
}
}
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters>
WeierstrassDoubleAssignLayout<E, EC>
{
pub fn populate_row(
event: &EllipticCurveDoubleInstance<EC::BaseField>,
cols: &mut WeierstrassDoubleAssignWitCols<E::BaseField, EC::BaseField>,
new_byte_lookup_events: &mut LkMultiplicity,
) {
// Decode affine points.
let p = &event.p;
let p = AffinePoint::<EC>::from_words_le(p);
let (p_x, p_y) = (p.x, p.y);
// Populate basic columns.
cols.p_x = EC::BaseField::to_limbs_field(&p_x);
cols.p_y = EC::BaseField::to_limbs_field(&p_y);
Self::populate_field_ops(new_byte_lookup_events, cols, p_x, p_y);
}
}
/// this is for testing purpose
pub struct TestWeierstrassDoubleLayout<E: ExtensionField, EC: EllipticCurve> {
layout: WeierstrassDoubleAssignLayout<E, EC>,
mem_rw: Vec<WriteMEM>,
vm_state: StateInOut<E>,
_point_ptr_0: WitIn,
}
#[allow(clippy::type_complexity)]
pub fn setup_gkr_circuit<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters>()
-> Result<(TestWeierstrassDoubleLayout<E, EC>, GKRCircuit<E>, u16, u16), ZKVMError> {
let mut cs = ConstraintSystem::new(|| "weierstrass_double");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
// constrain vmstate
let vm_state = StateInOut::construct_circuit(&mut cb, false)?;
let point_ptr_0 = cb.create_witin(|| "state_ptr_0");
let mut layout = WeierstrassDoubleAssignLayout::build_layer_logic(&mut cb, ())?;
// Write the result to the same address of the first input point.
let mem_rw = izip!(&layout.input32_exprs, &layout.output32_exprs)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
&mut cb,
// mem address := state_ptr_0 + i
point_ptr_0.expr() + E::BaseField::from_canonical_u32(i as u32).expr(),
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
let (out_evals, mut chip) = layout.finalize(&mut cb);
let layer = Layer::from_circuit_builder(
&cb,
"weierstrass_double".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
Ok((
TestWeierstrassDoubleLayout {
layout,
vm_state,
_point_ptr_0: point_ptr_0,
mem_rw,
},
chip.gkr_circuit(),
cs.num_witin,
cs.num_structural_witin,
))
}
#[tracing::instrument(
skip_all,
name = "run_weierstrass_double",
level = "trace",
fields(profiling_1)
)]
pub fn run_weierstrass_double<
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E> + 'static,
EC: EllipticCurve + WeierstrassParameters,
>(
(layout, gkr_circuit, num_witin, num_structural_witin): (
TestWeierstrassDoubleLayout<E, EC>,
GKRCircuit<E>,
u16,
u16,
),
points: Vec<GenericArray<u32, <EC::BaseField as NumWords>::WordsCurvePoint>>,
verify: bool,
test_outputs: bool,
) -> Result<GKRProof<E>, BackendError> {
let mut shard_ctx = ShardContext::default();
let num_instances = points.len();
let log2_num_instance = ceil_log2(num_instances);
let num_threads = optimal_sumcheck_threads(log2_num_instance);
let mut instances: Vec<EllipticCurveDoubleInstance<EC::BaseField>> =
Vec::with_capacity(num_instances);
let span = entered_span!("instances", profiling_2 = true);
for p in &points {
let instance = EllipticCurveDoubleInstance { p: p.clone() };
instances.push(instance);
}
exit_span!(span);
let span = entered_span!("phase1_witness", profiling_2 = true);
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let mut lk_multiplicity = LkMultiplicity::default();
let mut phase1_witness = RowMajorMatrix::<E::BaseField>::new(
instances.len(),
num_witin as usize,
InstancePaddingStrategy::Default,
);
let mut structural_witness = RowMajorMatrix::<E::BaseField>::new(
instances.len(),
num_structural_witin as usize,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = phase1_witness.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
raw_witin_iter
.zip_eq(instances.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.for_each(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin as usize)
.zip_eq(steps)
.for_each(|(instance, _step)| {
layout
.vm_state
.assign_instance(
instance,
&shard_ctx,
&StepRecord::new_ecall_any(10, ByteAddr::from(0)),
)
.expect("assign vm_state error");
layout.mem_rw.iter().for_each(|mem_config| {
mem_config
.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
10,
&MemOp {
previous_cycle: 0,
addr: ByteAddr::from(0).waddr(),
value: Default::default(),
},
)
.expect("assign error");
});
})
});
layout.layout.phase1_witness_group(
WeierstrassDoubleAssignTrace { instances },
[&mut phase1_witness, &mut structural_witness],
&mut lk_multiplicity,
);
exit_span!(span);
if test_outputs {
// test got output == expected output
// n_points x (result_x_words || result_y_words) in little endian
let expected_outputs = points
.iter()
.map(|a| {
let a = AffinePoint::<EC>::from_words_le(a);
let c = EC::ec_double(&a);
c.to_words_le()
.into_iter()
.flat_map(|word| {
[
word & 0xFF,
(word >> 8) & 0xFF,
(word >> 16) & 0xFF,
(word >> 24) & 0xFF,
]
})
.collect_vec()
})
.collect_vec();
let x_output_index_start = layout.layout.layer_exprs.wits.x3_ins.result[0].id as usize;
let y_output_index_start = layout.layout.layer_exprs.wits.y3_ins.result[0].id as usize;
let got_outputs = phase1_witness
.iter_rows()
.take(num_instances)
.map(|cols| {
[
cols[x_output_index_start..][..<EC::BaseField as NumLimbs>::Limbs::USIZE]
.iter()
.map(|x| x.to_canonical_u64() as u32)
.collect_vec(),
cols[y_output_index_start..][..<EC::BaseField as NumLimbs>::Limbs::USIZE]
.iter()
.map(|y| y.to_canonical_u64() as u32)
.collect_vec(),
]
.concat()
})
.collect_vec();
assert_eq!(expected_outputs, got_outputs);
}
let mut prover_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
prover_transcript.read_challenge().elements,
prover_transcript.read_challenge().elements,
];
let span = entered_span!("gkr_witness", profiling_2 = true);
let phase1_witness_group = phase1_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let structural_witness = structural_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let fixed = layout
.layout
.fixed_witness_group()
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
#[allow(clippy::type_complexity)]
let (gkr_witness, gkr_output) = gkr_witness::<E, PCS, CpuBackend<E, PCS>, CpuProver<_>>(
&gkr_circuit,
&phase1_witness_group,
&structural_witness,
&fixed,
&[],
&[],
&challenges,
);
exit_span!(span);
let span = entered_span!("out_eval", profiling_2 = true);
let out_evals = {
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(prover_transcript.sample_vec(log2_num_instance).to_vec());
let out_evals = gkr_output
.0
.par_iter()
.map(|wit| {
let point = point[point.len() - wit.num_vars()..point.len()].to_vec();
PointAndEval {
point: point.clone(),
eval: wit.evaluate(&point),
}
})
.collect::<Vec<_>>();
if out_evals.is_empty() {
vec![PointAndEval {
point: point[point.len() - log2_num_instance..point.len()].to_vec(),
eval: E::ZERO,
}]
} else {
out_evals
}
};
exit_span!(span);
if cfg!(debug_assertions) {
// mock prover
let out_wits = gkr_output.0.0.clone();
MockProver::check(&gkr_circuit, &gkr_witness, out_wits, challenges.to_vec())
.expect("mock prover failed");
}
let span = entered_span!("create_proof", profiling_2 = true);
let selector_ctxs = vec![SelectorContext::new(0, num_instances, log2_num_instance); 1];
let GKRProverOutput { gkr_proof, .. } = gkr_circuit
.prove::<CpuBackend<E, PCS>, CpuProver<_>>(
num_threads,
log2_num_instance,
gkr_witness,
&out_evals,
&[],
&challenges,
&mut prover_transcript,
&selector_ctxs,
)
.expect("Failed to prove phase");
exit_span!(span);
if verify {
{
let mut verifier_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
verifier_transcript.read_challenge().elements,
verifier_transcript.read_challenge().elements,
];
// This is to make prover/verifier match
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(verifier_transcript.sample_vec(log2_num_instance).to_vec());
gkr_circuit
.verify(
log2_num_instance,
gkr_proof.clone(),
&out_evals,
&[],
&[],
&challenges,
&mut verifier_transcript,
&selector_ctxs,
)
.expect("GKR verify failed");
// Omit the PCS opening phase.
}
}
Ok(gkr_proof)
}
#[cfg(test)]
mod tests {
use super::*;
use ff_ext::BabyBearExt4;
use mpcs::BasefoldDefault;
use sp1_curves::weierstrass::{
SwCurve, WeierstrassParameters, bls12_381::Bls12381, bn254::Bn254, secp256k1::Secp256k1,
secp256r1::Secp256r1,
};
use crate::precompiles::weierstrass::test_utils::random_points;
fn test_weierstrass_double_helper<WP: WeierstrassParameters>() {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let points = random_points::<WP>(8);
let _ = run_weierstrass_double::<E, Pcs, SwCurve<WP>>(
setup_gkr_circuit::<E, SwCurve<WP>>().expect("setup gkr circuit failed"),
points,
true,
true,
)
.inspect_err(|err| {
eprintln!("{:?}", err);
})
.expect("run_weierstrass_double failed");
}
#[test]
fn test_weierstrass_double_bn254() {
test_weierstrass_double_helper::<Bn254>();
}
#[test]
fn test_weierstrass_double_bls12381() {
test_weierstrass_double_helper::<Bls12381>();
}
#[test]
fn test_weierstrass_double_secp256k1() {
test_weierstrass_double_helper::<Secp256k1>();
}
#[test]
fn test_weierstrass_double_secp256r1() {
test_weierstrass_double_helper::<Secp256r1>();
}
fn test_weierstrass_double_nonpow2_helper<WP: WeierstrassParameters>() {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let points = random_points::<WP>(5);
let _ = run_weierstrass_double::<E, Pcs, SwCurve<WP>>(
setup_gkr_circuit::<E, SwCurve<WP>>().expect("setup gkr circuit failed"),
points,
true,
true,
)
.inspect_err(|err| {
eprintln!("{:?}", err);
})
.expect("test_weierstrass_double_nonpow2_helper failed");
}
#[test]
fn test_weierstrass_double_nonpow2_bn254() {
test_weierstrass_double_nonpow2_helper::<Bn254>();
}
#[test]
fn test_weierstrass_double_nonpow2_bls12381() {
test_weierstrass_double_nonpow2_helper::<Bls12381>();
}
#[test]
fn test_weierstrass_double_nonpow2_secp256k1() {
test_weierstrass_double_nonpow2_helper::<Secp256k1>();
}
#[test]
fn test_weierstrass_double_nonpow2_secp256r1() {
test_weierstrass_double_nonpow2_helper::<Secp256r1>();
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/weierstrass/weierstrass_decompress.rs | ceno_zkvm/src/precompiles/weierstrass/weierstrass_decompress.rs | // The crate weierstrass add circuit is modified from succinctlabs/sp1 under MIT license
// The MIT License (MIT)
// Copyright (c) 2023 Succinct Labs
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
use std::{array, fmt::Debug, marker::PhantomData, sync::Arc};
use ceno_emul::{ByteAddr, MemOp, StepRecord};
use core::{borrow::BorrowMut, mem::size_of};
use derive::AlignedBorrow;
use ff_ext::{ExtensionField, SmallField};
use generic_array::{GenericArray, sequence::GenericSequence, typenum::Unsigned};
use gkr_iop::{
OutEvalGroups, ProtocolBuilder, ProtocolWitnessGenerator,
chip::Chip,
circuit_builder::{CircuitBuilder, ConstraintSystem},
cpu::{CpuBackend, CpuProver},
error::{BackendError, CircuitBuilderError},
gkr::{GKRCircuit, GKRProof, GKRProverOutput, layer::Layer, mock::MockProver},
selector::{SelectorContext, SelectorType},
};
use itertools::{Itertools, izip};
use mpcs::PolynomialCommitmentScheme;
use multilinear_extensions::{
Expression, ToExpr, WitIn,
macros::{entered_span, exit_span},
util::{ceil_log2, max_usable_threads},
};
use num::{BigUint, One, Zero};
use p3::field::FieldAlgebra;
use rayon::{
iter::{IndexedParallelIterator, ParallelIterator},
prelude::{IntoParallelRefIterator, ParallelSlice},
};
use sp1_curves::{
CurveType, EllipticCurve,
params::{FieldParameters, Limbs, NumLimbs, NumWords},
polynomial::Polynomial,
weierstrass::{
WeierstrassParameters,
secp256k1::{secp256k1_decompress, secp256k1_sqrt},
secp256r1::{secp256r1_decompress, secp256r1_sqrt},
},
};
use sumcheck::util::optimal_sumcheck_threads;
use transcript::{BasicTranscript, Transcript};
use witness::{InstancePaddingStrategy, RowMajorMatrix};
use crate::{
chip_handler::MemoryExpr,
e2e::ShardContext,
error::ZKVMError,
gadgets::{
FieldOperation, field_inner_product::FieldInnerProductCols, field_op::FieldOpCols,
field_sqrt::FieldSqrtCols, range::FieldLtCols,
},
instructions::riscv::{
constants::UINT_LIMBS,
insn_base::{StateInOut, WriteMEM},
},
precompiles::{
SelectorTypeLayout, utils::merge_u8_slice_to_u16_limbs_pairs_and_extend,
weierstrass::EllipticCurveDecompressInstance,
},
scheme::utils::gkr_witness,
structs::PointAndEval,
witness::LkMultiplicity,
};
#[derive(Clone, Debug, AlignedBorrow)]
#[repr(C)]
pub struct WeierstrassDecompressWitCols<WitT, P: FieldParameters + NumLimbs + NumWords> {
pub sign_bit: WitT,
pub(crate) x_limbs: Limbs<WitT, P::Limbs>,
pub(crate) y_limbs: Limbs<WitT, P::Limbs>,
pub(crate) old_output32: GenericArray<[WitT; UINT_LIMBS], P::WordsFieldElement>,
pub(crate) range_x: FieldLtCols<WitT, P>,
pub(crate) neg_y_range_check: FieldLtCols<WitT, P>,
pub(crate) x_2: FieldOpCols<WitT, P>,
pub(crate) x_3: FieldOpCols<WitT, P>,
pub(crate) ax_plus_b: FieldInnerProductCols<WitT, P>,
pub(crate) x_3_plus_b_plus_ax: FieldOpCols<WitT, P>,
pub(crate) pos_y: FieldSqrtCols<WitT, P>,
pub(crate) neg_y: FieldOpCols<WitT, P>,
}
/// Weierstrass decompress is implemented by a single layer.
#[derive(Clone, Debug)]
#[repr(C)]
pub struct WeierstrassDecompressLayer<WitT, P: FieldParameters + NumWords> {
pub wits: WeierstrassDecompressWitCols<WitT, P>,
}
#[derive(Clone, Debug)]
pub struct WeierstrassDecompressLayout<E: ExtensionField, EC: EllipticCurve> {
pub layer_exprs: WeierstrassDecompressLayer<WitIn, EC::BaseField>,
pub selector_type_layout: SelectorTypeLayout<E>,
pub input32_exprs: GenericArray<MemoryExpr<E>, <EC::BaseField as NumWords>::WordsFieldElement>,
pub old_output32_exprs:
GenericArray<MemoryExpr<E>, <EC::BaseField as NumWords>::WordsFieldElement>,
pub output32_exprs: GenericArray<MemoryExpr<E>, <EC::BaseField as NumWords>::WordsFieldElement>,
pub n_fixed: usize,
pub n_committed: usize,
pub n_structural_witin: usize,
pub n_challenges: usize,
}
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters>
WeierstrassDecompressLayout<E, EC>
{
fn new(cb: &mut CircuitBuilder<E>) -> Self {
match EC::CURVE_TYPE {
CurveType::Secp256k1 | CurveType::Secp256r1 => {}
_ => panic!("Unsupported curve"),
}
let wits = WeierstrassDecompressWitCols {
sign_bit: cb.create_bit(|| "sign_bit").unwrap(),
x_limbs: Limbs(GenericArray::generate(|_| cb.create_witin(|| "x"))),
y_limbs: Limbs(GenericArray::generate(|_| cb.create_witin(|| "y"))),
old_output32: GenericArray::generate(|i| {
array::from_fn(|j| cb.create_witin(|| format!("old_output32_{}_{}", i, j)))
}),
range_x: FieldLtCols::create(cb, || "range_x"),
neg_y_range_check: FieldLtCols::create(cb, || "neg_y_range_check"),
x_2: FieldOpCols::create(cb, || "x_2"),
x_3: FieldOpCols::create(cb, || "x_3"),
ax_plus_b: FieldInnerProductCols::create(cb, || "ax_plus_b"),
x_3_plus_b_plus_ax: FieldOpCols::create(cb, || "x_3_plus_b_plus_ax"),
pos_y: FieldSqrtCols::create(cb, || "y"),
neg_y: FieldOpCols::create(cb, || "neg_y"),
};
let eq = cb.create_placeholder_structural_witin(|| "weierstrass_decompress_eq");
let sel = SelectorType::Prefix(eq.expr());
let selector_type_layout = SelectorTypeLayout {
sel_mem_read: sel.clone(),
sel_mem_write: sel.clone(),
sel_lookup: sel.clone(),
sel_zero: sel.clone(),
};
let input32_exprs: GenericArray<
MemoryExpr<E>,
<EC::BaseField as NumWords>::WordsFieldElement,
> = GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
let old_output32_exprs: GenericArray<
MemoryExpr<E>,
<EC::BaseField as NumWords>::WordsFieldElement,
> = GenericArray::generate(|_| {
array::from_fn(|i| cb.create_witin(|| format!("old_output32_{}", i)).expr())
});
let output32_exprs: GenericArray<
MemoryExpr<E>,
<EC::BaseField as NumWords>::WordsFieldElement,
> = GenericArray::generate(|_| array::from_fn(|_| Expression::WitIn(0)));
Self {
layer_exprs: WeierstrassDecompressLayer { wits },
selector_type_layout,
input32_exprs,
old_output32_exprs,
output32_exprs,
n_fixed: 0,
n_committed: 0,
n_structural_witin: 0,
n_challenges: 0,
}
}
#[allow(clippy::too_many_arguments)]
fn populate(
record: &mut LkMultiplicity,
cols: &mut WeierstrassDecompressWitCols<E::BaseField, EC::BaseField>,
instance: &EllipticCurveDecompressInstance<EC::BaseField>,
) {
cols.sign_bit = E::BaseField::from_bool(instance.sign_bit);
cols.old_output32 = GenericArray::generate(|i| {
[
E::BaseField::from_canonical_u32(instance.old_y_words[i] & ((1 << 16) - 1)),
E::BaseField::from_canonical_u32((instance.old_y_words[i] >> 16) & ((1 << 16) - 1)),
]
});
let x = &instance.x;
// Y = sqrt(x^3 + ax + b)
cols.x_limbs = EC::BaseField::to_limbs_field(x);
cols.range_x.populate(record, x, &EC::BaseField::modulus());
let x_2 = cols.x_2.populate(record, x, x, FieldOperation::Mul);
let x_3 = cols.x_3.populate(record, &x_2, x, FieldOperation::Mul);
let b = EC::b_int();
let a = EC::a_int();
let param_vec = vec![a, b];
let x_vec = vec![x.clone(), BigUint::one()];
let ax_plus_b = cols.ax_plus_b.populate(record, ¶m_vec, &x_vec);
let x_3_plus_b_plus_ax =
cols.x_3_plus_b_plus_ax
.populate(record, &x_3, &ax_plus_b, FieldOperation::Add);
let sqrt_fn = match EC::CURVE_TYPE {
CurveType::Secp256k1 => secp256k1_sqrt,
CurveType::Secp256r1 => secp256r1_sqrt,
_ => panic!("Unsupported curve"),
};
let y = cols.pos_y.populate(record, &x_3_plus_b_plus_ax, sqrt_fn);
let zero = BigUint::zero();
let neg_y = cols.neg_y.populate(record, &zero, &y, FieldOperation::Sub);
cols.neg_y_range_check
.populate(record, &neg_y, &EC::BaseField::modulus());
if cols.pos_y.lsb.to_canonical_u64() == instance.sign_bit as u64 {
cols.y_limbs = EC::BaseField::to_limbs_field(&y);
} else {
cols.y_limbs = EC::BaseField::to_limbs_field(&neg_y);
}
}
}
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters> ProtocolBuilder<E>
for WeierstrassDecompressLayout<E, EC>
{
type Params = ();
fn build_layer_logic(
cb: &mut CircuitBuilder<E>,
_params: Self::Params,
) -> Result<Self, CircuitBuilderError> {
let mut layout = match EC::CURVE_TYPE {
CurveType::Secp256k1 | CurveType::Secp256r1 => WeierstrassDecompressLayout::new(cb),
_ => panic!("Unsupported curve"),
};
let wits = &layout.layer_exprs.wits;
let x_limbs = &wits.x_limbs;
let max_num_limbs = EC::BaseField::to_limbs_expr(&EC::BaseField::modulus());
wits.range_x.eval(cb, x_limbs, &max_num_limbs)?;
wits.x_2.eval(cb, x_limbs, x_limbs, FieldOperation::Mul)?;
wits.x_3
.eval(cb, &wits.x_2.result, x_limbs, FieldOperation::Mul)?;
let b_const = EC::BaseField::to_limbs_expr::<E>(&EC::b_int());
let a_const = EC::BaseField::to_limbs_expr::<E>(&EC::a_int());
let params = [a_const, b_const];
let p_x: Polynomial<Expression<E>> = x_limbs.clone().into();
let p_one: Polynomial<Expression<E>> =
EC::BaseField::to_limbs_expr::<E>(&BigUint::one()).into();
wits.ax_plus_b.eval(cb, ¶ms, &[p_x, p_one])?;
wits.x_3_plus_b_plus_ax.eval(
cb,
&wits.x_3.result,
&wits.ax_plus_b.result,
FieldOperation::Add,
)?;
wits.neg_y.eval(
cb,
&[Expression::<E>::ZERO].iter(),
&wits.pos_y.multiplication.result,
FieldOperation::Sub,
)?;
// Range check the `neg_y.result` to be canonical.
let modulus_limbs = EC::BaseField::to_limbs_expr(&EC::BaseField::modulus());
wits.neg_y_range_check
.eval(cb, &wits.neg_y.result, &modulus_limbs)?;
// Constrain that `y` is a square root. Note that `y.multiplication.result` is constrained
// to be canonical here. Since `y_limbs` is constrained to be either
// `y.multiplication.result` or `neg_y.result`, `y_limbs` will be canonical.
wits.pos_y
.eval(cb, &wits.x_3_plus_b_plus_ax.result, wits.pos_y.lsb)?;
// When the sign rule is LeastSignificantBit, the sign_bit should match the parity
// of the result. The parity of the square root result is given by the wits.y.lsb
// value. Thus, if the sign_bit matches the wits.y.lsb value, then the result
// should be the square root of the y value. Otherwise, the result should be the
// negative square root of the y value.
let cond: Expression<E> = 1
- (wits.pos_y.lsb.expr() + wits.sign_bit.expr()
- 2 * wits.pos_y.lsb.expr() * wits.sign_bit.expr());
for (y, sqrt_y, neg_sqrt_y) in izip!(
wits.y_limbs.0.iter(),
wits.pos_y.multiplication.result.0.iter(),
wits.neg_y.result.0.iter()
) {
cb.condition_require_equal(
|| "when lsb == sign_bit, y_limbs = sqrt(y), otherwise y_limbs = -sqrt(y)",
cond.expr(),
y.expr(),
sqrt_y.expr(),
neg_sqrt_y.expr(),
)?;
}
let mut output32 =
Vec::with_capacity(<EC::BaseField as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(
&wits.y_limbs.0.iter().rev().cloned().collect::<Vec<_>>(),
&mut output32,
);
let output32 = output32.try_into().unwrap();
let mut input32 = Vec::with_capacity(<EC::BaseField as NumWords>::WordsFieldElement::USIZE);
merge_u8_slice_to_u16_limbs_pairs_and_extend::<E>(
&wits.x_limbs.0.iter().rev().cloned().collect::<Vec<_>>(),
&mut input32,
);
let input32 = input32.try_into().unwrap();
// set input32/output32 expr
layout.input32_exprs = input32;
layout.output32_exprs = output32;
layout.old_output32_exprs =
GenericArray::generate(|i| array::from_fn(|j| wits.old_output32[i][j].expr()));
Ok(layout)
}
fn finalize(&mut self, cb: &mut CircuitBuilder<E>) -> (OutEvalGroups, Chip<E>) {
self.n_fixed = cb.cs.num_fixed;
self.n_committed = cb.cs.num_witin as usize;
self.n_structural_witin = cb.cs.num_structural_witin as usize;
self.n_challenges = 0;
// register selector to legacy constrain system
cb.cs.r_selector = Some(self.selector_type_layout.sel_mem_read.clone());
cb.cs.w_selector = Some(self.selector_type_layout.sel_mem_write.clone());
cb.cs.lk_selector = Some(self.selector_type_layout.sel_lookup.clone());
cb.cs.zero_selector = Some(self.selector_type_layout.sel_zero.clone());
let w_len = cb.cs.w_expressions.len();
let r_len = cb.cs.r_expressions.len();
let lk_len = cb.cs.lk_expressions.len();
let zero_len =
cb.cs.assert_zero_expressions.len() + cb.cs.assert_zero_sumcheck_expressions.len();
(
[
// r_record
(0..r_len).collect_vec(),
// w_record
(r_len..r_len + w_len).collect_vec(),
// lk_record
(r_len + w_len..r_len + w_len + lk_len).collect_vec(),
// zero_record
(0..zero_len).collect_vec(),
],
Chip::new_from_cb(cb, self.n_challenges),
)
}
fn n_committed(&self) -> usize {
todo!()
}
fn n_fixed(&self) -> usize {
todo!()
}
fn n_challenges(&self) -> usize {
todo!()
}
fn n_evaluations(&self) -> usize {
todo!()
}
fn n_layers(&self) -> usize {
todo!()
}
}
#[derive(Clone, Default)]
pub struct WeierstrassDecompressTrace<P: NumLimbs + NumWords> {
pub instances: Vec<EllipticCurveDecompressInstance<P>>,
pub _phantom: PhantomData<P>,
}
impl<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters> ProtocolWitnessGenerator<E>
for WeierstrassDecompressLayout<E, EC>
{
type Trace = WeierstrassDecompressTrace<EC::BaseField>;
fn fixed_witness_group(&self) -> RowMajorMatrix<E::BaseField> {
RowMajorMatrix::new(0, 0, InstancePaddingStrategy::Default)
}
fn phase1_witness_group(
&self,
phase1: Self::Trace,
wits: [&mut RowMajorMatrix<E::BaseField>; 2],
lk_multiplicity: &mut LkMultiplicity,
) {
let num_instances = wits[0].num_instances();
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
// The number of columns used for weierstrass decompress subcircuit.
let num_main_wit_cols = size_of::<WeierstrassDecompressWitCols<u8, EC::BaseField>>();
let [wits, structural_wits] = wits;
let raw_witin_iter = wits.par_batch_iter_mut(num_instance_per_batch);
let raw_structural_wits_iter = structural_wits.par_batch_iter_mut(num_instance_per_batch);
raw_witin_iter
.zip_eq(raw_structural_wits_iter)
.zip_eq(phase1.instances.par_chunks(num_instance_per_batch))
.for_each(|((rows, eqs), phase1_instances)| {
let mut lk_multiplicity = lk_multiplicity.clone();
rows.chunks_mut(self.n_committed)
.zip_eq(eqs.chunks_mut(self.n_structural_witin))
.zip_eq(phase1_instances)
.for_each(|((row, eqs), phase1_instance)| {
let cols: &mut WeierstrassDecompressWitCols<E::BaseField, EC::BaseField> =
row[self.layer_exprs.wits.sign_bit.id as usize..][..num_main_wit_cols] // TODO: Find a better way to write it.
.borrow_mut();
Self::populate(&mut lk_multiplicity, cols, phase1_instance);
for x in eqs.iter_mut() {
*x = E::BaseField::ONE;
}
});
});
}
}
/// this is for testing purpose
pub struct TestWeierstrassDecompressLayout<E: ExtensionField, EC: EllipticCurve> {
layout: WeierstrassDecompressLayout<E, EC>,
mem_rw: Vec<WriteMEM>,
vm_state: StateInOut<E>,
_field_ptr: WitIn,
}
#[allow(clippy::type_complexity)]
pub fn setup_gkr_circuit<E: ExtensionField, EC: EllipticCurve + WeierstrassParameters>() -> Result<
(
TestWeierstrassDecompressLayout<E, EC>,
GKRCircuit<E>,
u16,
u16,
),
ZKVMError,
> {
let mut cs = ConstraintSystem::new(|| "weierstrass_decompress");
let mut cb = CircuitBuilder::<E>::new(&mut cs);
// constrain vmstate
let vm_state = StateInOut::construct_circuit(&mut cb, false)?;
let field_ptr = cb.create_witin(|| "field_ptr");
let mut layout = WeierstrassDecompressLayout::build_layer_logic(&mut cb, ())?;
let num_limbs = <EC::BaseField as NumLimbs>::Limbs::U32;
let mut mem_rw = layout
.input32_exprs
.iter()
.enumerate()
.map(|(i, val)| {
WriteMEM::construct_circuit(
&mut cb,
// mem address := field_ptr + i * 4
field_ptr.expr() + (i as u32) * 4,
val.clone(),
val.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?;
mem_rw.extend(
izip!(
layout.old_output32_exprs.iter(),
layout.output32_exprs.iter()
)
.enumerate()
.map(|(i, (val_before, val_after))| {
WriteMEM::construct_circuit(
&mut cb,
// mem address := field_ptr + i * 4 + num_limbs
field_ptr.expr() + (i as u32) * 4 + num_limbs,
val_before.clone(),
val_after.clone(),
vm_state.ts,
)
})
.collect::<Result<Vec<WriteMEM>, _>>()?,
);
let (out_evals, mut chip) = layout.finalize(&mut cb);
let layer = Layer::from_circuit_builder(
&cb,
"weierstrass_decompress".to_string(),
layout.n_challenges,
out_evals,
);
chip.add_layer(layer);
Ok((
TestWeierstrassDecompressLayout {
layout,
vm_state,
_field_ptr: field_ptr,
mem_rw,
},
chip.gkr_circuit(),
cs.num_witin,
cs.num_structural_witin,
))
}
#[tracing::instrument(
skip_all,
name = "run_weierstrass_decompress",
level = "trace",
fields(profiling_1)
)]
pub fn run_weierstrass_decompress<
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E> + 'static,
EC: EllipticCurve + WeierstrassParameters,
>(
(layout, gkr_circuit, num_witin, num_structural_witin): (
TestWeierstrassDecompressLayout<E, EC>,
GKRCircuit<E>,
u16,
u16,
),
instances: Vec<EllipticCurveDecompressInstance<EC::BaseField>>,
test_outputs: bool,
verify: bool,
) -> Result<GKRProof<E>, BackendError> {
let mut shard_ctx = ShardContext::default();
let num_instances = instances.len();
let log2_num_instance = ceil_log2(num_instances);
let num_threads = optimal_sumcheck_threads(log2_num_instance);
let span = entered_span!("phase1_witness", profiling_2 = true);
let nthreads = max_usable_threads();
let num_instance_per_batch = num_instances.div_ceil(nthreads).max(1);
let mut lk_multiplicity = LkMultiplicity::default();
let mut phase1_witness = RowMajorMatrix::<E::BaseField>::new(
instances.len(),
num_witin as usize,
InstancePaddingStrategy::Default,
);
let mut structural_witness = RowMajorMatrix::<E::BaseField>::new(
instances.len(),
num_structural_witin as usize,
InstancePaddingStrategy::Default,
);
let raw_witin_iter = phase1_witness.par_batch_iter_mut(num_instance_per_batch);
let shard_ctx_vec = shard_ctx.get_forked();
raw_witin_iter
.zip_eq(instances.par_chunks(num_instance_per_batch))
.zip(shard_ctx_vec)
.for_each(|((instances, steps), mut shard_ctx)| {
let mut lk_multiplicity = lk_multiplicity.clone();
instances
.chunks_mut(num_witin as usize)
.zip_eq(steps)
.for_each(|(instance, _step)| {
layout
.vm_state
.assign_instance(
instance,
&shard_ctx,
&StepRecord::new_ecall_any(10, ByteAddr::from(0)),
)
.expect("assign vm_state error");
layout.mem_rw.iter().for_each(|mem_config| {
mem_config
.assign_op(
instance,
&mut shard_ctx,
&mut lk_multiplicity,
10,
&MemOp {
previous_cycle: 0,
addr: ByteAddr::from(0).waddr(),
value: Default::default(),
},
)
.expect("assign error");
});
})
});
layout.layout.phase1_witness_group(
WeierstrassDecompressTrace {
instances: instances.clone(),
_phantom: PhantomData,
},
[&mut phase1_witness, &mut structural_witness],
&mut lk_multiplicity,
);
exit_span!(span);
if test_outputs {
let decompress_fn = match EC::CURVE_TYPE {
CurveType::Secp256k1 => secp256k1_decompress::<EC>,
CurveType::Secp256r1 => secp256r1_decompress::<EC>,
_ => panic!("Unsupported curve"),
};
let expected_outputs = instances
.iter()
.map(
|EllipticCurveDecompressInstance {
x,
sign_bit,
old_y_words: _,
}| {
let computed_point = decompress_fn(&x.to_bytes_be(), *sign_bit as u32);
EC::BaseField::to_limbs(&computed_point.y)
},
)
.collect_vec();
let y_output_index_start = layout.layout.layer_exprs.wits.y_limbs.0[0].id as usize;
let got_outputs = phase1_witness
.iter_rows()
.take(num_instances)
.map(|cols| {
cols[y_output_index_start..][..<EC::BaseField as NumLimbs>::Limbs::USIZE]
.iter()
.map(|y| y.to_canonical_u64() as u8)
.collect_vec()
})
.collect_vec();
assert_eq!(expected_outputs, got_outputs);
}
let mut prover_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
prover_transcript.read_challenge().elements,
prover_transcript.read_challenge().elements,
];
let span = entered_span!("gkr_witness", profiling_2 = true);
let phase1_witness_group = phase1_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let structural_witness = structural_witness
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
let fixed = layout
.layout
.fixed_witness_group()
.to_mles()
.into_iter()
.map(Arc::new)
.collect_vec();
#[allow(clippy::type_complexity)]
let (gkr_witness, gkr_output) = gkr_witness::<E, PCS, CpuBackend<E, PCS>, CpuProver<_>>(
&gkr_circuit,
&phase1_witness_group,
&structural_witness,
&fixed,
&[],
&[],
&challenges,
);
exit_span!(span);
let span = entered_span!("out_eval", profiling_2 = true);
let out_evals = {
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(prover_transcript.sample_vec(log2_num_instance).to_vec());
let out_evals = gkr_output
.0
.par_iter()
.map(|wit| {
let point = point[point.len() - wit.num_vars()..point.len()].to_vec();
PointAndEval {
point: point.clone(),
eval: wit.evaluate(&point),
}
})
.collect::<Vec<_>>();
if out_evals.is_empty() {
vec![PointAndEval {
point: point[point.len() - log2_num_instance..point.len()].to_vec(),
eval: E::ZERO,
}]
} else {
out_evals
}
};
exit_span!(span);
if cfg!(debug_assertions) {
// mock prover
let out_wits = gkr_output.0.0.clone();
MockProver::check(&gkr_circuit, &gkr_witness, out_wits, challenges.to_vec())
.expect("mock prover failed");
}
let span = entered_span!("create_proof", profiling_2 = true);
let selector_ctxs = vec![SelectorContext::new(0, num_instances, log2_num_instance); 1];
let GKRProverOutput { gkr_proof, .. } = gkr_circuit
.prove::<CpuBackend<E, PCS>, CpuProver<_>>(
num_threads,
log2_num_instance,
gkr_witness,
&out_evals,
&[],
&challenges,
&mut prover_transcript,
&selector_ctxs,
)
.expect("Failed to prove phase");
exit_span!(span);
if verify {
{
let mut verifier_transcript = BasicTranscript::<E>::new(b"protocol");
let challenges = [
verifier_transcript.read_challenge().elements,
verifier_transcript.read_challenge().elements,
];
// This is to make prover/verifier match
let mut point = Vec::with_capacity(log2_num_instance);
point.extend(verifier_transcript.sample_vec(log2_num_instance).to_vec());
gkr_circuit
.verify(
log2_num_instance,
gkr_proof.clone(),
&out_evals,
&[],
&[],
&challenges,
&mut verifier_transcript,
&selector_ctxs,
)
.expect("GKR verify failed");
// Omit the PCS opening phase.
}
}
Ok(gkr_proof)
}
#[cfg(test)]
mod tests {
use crate::precompiles::weierstrass::test_utils::random_decompress_instances;
use super::*;
use ff_ext::BabyBearExt4;
use mpcs::BasefoldDefault;
use sp1_curves::weierstrass::{
SwCurve, WeierstrassParameters, secp256k1::Secp256k1, secp256r1::Secp256r1,
};
fn test_weierstrass_decompress_helper<WP: WeierstrassParameters>() {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let instances = random_decompress_instances::<SwCurve<WP>>(8);
let _ = run_weierstrass_decompress::<E, Pcs, SwCurve<WP>>(
setup_gkr_circuit::<E, SwCurve<WP>>().expect("setup gkr circuit failed"),
instances,
true,
true,
);
}
#[test]
fn test_weierstrass_decompress_secp256k1() {
test_weierstrass_decompress_helper::<Secp256k1>();
}
#[test]
fn test_weierstrass_decompress_secp256r1() {
test_weierstrass_decompress_helper::<Secp256r1>();
}
fn test_weierstrass_decompress_nonpow2_helper<WP: WeierstrassParameters>() {
type E = BabyBearExt4;
type Pcs = BasefoldDefault<E>;
let instances = random_decompress_instances::<SwCurve<WP>>(5);
let _ = run_weierstrass_decompress::<E, Pcs, SwCurve<WP>>(
setup_gkr_circuit::<E, SwCurve<WP>>().expect("setup gkr circuit failed"),
instances,
true,
true,
);
}
#[test]
fn test_weierstrass_decompress_nonpow2_secp256k1() {
test_weierstrass_decompress_nonpow2_helper::<Secp256k1>();
}
#[test]
fn test_weierstrass_decompress_nonpow2_secp256r1() {
test_weierstrass_decompress_nonpow2_helper::<Secp256r1>();
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/precompiles/weierstrass/test_utils.rs | ceno_zkvm/src/precompiles/weierstrass/test_utils.rs | use generic_array::GenericArray;
use num::bigint::RandBigInt;
use rand::{Rng, SeedableRng};
use sp1_curves::{
EllipticCurve,
params::NumWords,
weierstrass::{SwCurve, WeierstrassParameters},
};
use crate::precompiles::weierstrass::EllipticCurveDecompressInstance;
pub fn random_point_pairs<WP: WeierstrassParameters>(
num_instances: usize,
) -> Vec<[GenericArray<u32, <WP::BaseField as NumWords>::WordsCurvePoint>; 2]> {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let base = SwCurve::<WP>::generator();
(0..num_instances)
.map(|_| {
let x = rng.gen_biguint(24);
let mut y = rng.gen_biguint(24);
while y == x {
y = rng.gen_biguint(24);
}
let x_base = base.clone().sw_scalar_mul(&x);
let y_base = base.clone().sw_scalar_mul(&y);
[
x_base.to_words_le().try_into().unwrap(),
y_base.to_words_le().try_into().unwrap(),
]
})
.collect()
}
pub fn random_points<WP: WeierstrassParameters>(
num_instances: usize,
) -> Vec<GenericArray<u32, <WP::BaseField as NumWords>::WordsCurvePoint>> {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let base = SwCurve::<WP>::generator();
(0..num_instances)
.map(|_| {
let x = rng.gen_biguint(24);
let x_base = base.clone().sw_scalar_mul(&x);
x_base.to_words_le().try_into().unwrap()
})
.collect()
}
#[allow(dead_code)]
pub fn random_decompress_instances<WP: EllipticCurve + WeierstrassParameters>(
num_instances: usize,
) -> Vec<EllipticCurveDecompressInstance<WP::BaseField>> {
let mut rng = rand::rngs::StdRng::seed_from_u64(42);
let base = SwCurve::<WP>::generator();
(0..num_instances)
.map(|_| {
let x = rng.gen_biguint(24);
let sign_bit = rng.gen_bool(0.5);
let x_base = base.clone().sw_scalar_mul(&x);
EllipticCurveDecompressInstance {
sign_bit,
x: x_base.x,
old_y_words: GenericArray::default(),
}
})
.collect()
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/bin/bitwise_keccak.rs | ceno_zkvm/src/bin/bitwise_keccak.rs | use ceno_zkvm::precompiles::{run_bitwise_keccakf, setup_bitwise_keccak_gkr_circuit};
use clap::{Parser, command};
use ff_ext::GoldilocksExt2;
use itertools::Itertools;
use mpcs::BasefoldDefault;
use rand::{RngCore, SeedableRng};
use tracing::level_filters::LevelFilter;
use tracing_forest::ForestLayer;
use tracing_subscriber::{
EnvFilter, Registry, filter::filter_fn, fmt, layer::SubscriberExt, util::SubscriberInitExt,
};
// Use jemalloc as global allocator for performance
#[cfg(all(feature = "jemalloc", unix, not(test)))]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
// Profiling granularity.
// Setting any value restricts logs to profiling information
#[arg(long)]
profiling: Option<usize>,
}
fn main() {
let args = Args::parse();
type E = GoldilocksExt2;
type Pcs = BasefoldDefault<E>;
// default filter
let default_filter = EnvFilter::builder()
.with_default_directive(LevelFilter::DEBUG.into())
.from_env_lossy();
// filter by profiling level;
// spans with level i contain the field "profiling_{i}"
// this restricts statistics to first (args.profiling) levels
let profiling_level = args.profiling.unwrap_or(1);
let filter_by_profiling_level = filter_fn(move |metadata| {
(1..=profiling_level)
.map(|i| format!("profiling_{i}"))
.any(|field| metadata.fields().field(&field).is_some())
});
let fmt_layer = fmt::layer()
.compact()
.with_thread_ids(false)
.with_thread_names(false)
.without_time();
Registry::default()
.with(args.profiling.is_some().then_some(ForestLayer::default()))
.with(fmt_layer)
// if some profiling granularity is specified, use the profiling filter,
// otherwise use the default
.with(
args.profiling
.is_some()
.then_some(filter_by_profiling_level),
)
.with(args.profiling.is_none().then_some(default_filter))
.init();
let random_u64: u64 = rand::random();
// Use seeded rng for debugging convenience
let mut rng = rand::rngs::StdRng::seed_from_u64(random_u64);
let num_instances = 1024;
let states: Vec<[u64; 25]> = (0..num_instances)
.map(|_| std::array::from_fn(|_| rng.next_u64()))
.collect_vec();
run_bitwise_keccakf::<E, Pcs>(
setup_bitwise_keccak_gkr_circuit()
.expect("setup circuit error")
.1,
states,
false,
false,
);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/bin/riscv_stats.rs | ceno_zkvm/src/bin/riscv_stats.rs | use std::collections::BTreeMap;
use ceno_zkvm::{
instructions::riscv::Rv32imConfig,
stats::{StaticReport, TraceReport},
structs::ZKVMConstraintSystem,
};
use ff_ext::GoldilocksExt2;
type E = GoldilocksExt2;
fn main() {
let mut zkvm_cs = ZKVMConstraintSystem::default();
let _ = Rv32imConfig::<E>::construct_circuits(&mut zkvm_cs);
let static_report = StaticReport::new(&zkvm_cs);
let report = TraceReport::new(&static_report, BTreeMap::new(), "no program");
report.save_table("riscv_stats.txt");
println!("INFO: generated riscv_stats.txt");
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/bin/e2e.rs | ceno_zkvm/src/bin/e2e.rs | use ceno_emul::{IterAddresses, Platform, Program, WORD_SIZE, Word};
use ceno_host::{CenoStdin, memory_from_file};
#[cfg(all(feature = "jemalloc", unix, not(test)))]
use ceno_zkvm::print_allocated_bytes;
use ceno_zkvm::{
e2e::{
Checkpoint, FieldType, MultiProver, PcsKind, Preset, run_e2e_with_checkpoint,
setup_platform, setup_platform_debug, verify,
},
scheme::{
ZKVMProof, constants::MAX_NUM_VARIABLES, create_backend, create_prover, hal::ProverDevice,
mock_prover::LkMultiplicityKey, verifier::ZKVMVerifier,
},
with_panic_hook,
};
use clap::Parser;
use ff_ext::{BabyBearExt4, ExtensionField, GoldilocksExt2};
use gkr_iop::hal::ProverBackend;
use mpcs::{
Basefold, BasefoldRSParams, PolynomialCommitmentScheme, SecurityLevel, Whir, WhirDefaultSpec,
};
use p3::field::FieldAlgebra;
use serde::{Serialize, de::DeserializeOwned};
use std::{fs, panic, panic::AssertUnwindSafe, path::PathBuf};
use tracing::{error, level_filters::LevelFilter};
use tracing_forest::ForestLayer;
use tracing_subscriber::{
EnvFilter, Registry, filter::filter_fn, fmt, layer::SubscriberExt, util::SubscriberInitExt,
};
use transcript::BasicTranscript as Transcript;
// Use jemalloc as global allocator for performance
#[cfg(all(feature = "jemalloc", unix, not(test)))]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
fn parse_size(s: &str) -> Result<u32, parse_size::Error> {
parse_size::Config::new()
.with_binary()
.parse_size(s)
.map(|size| size as u32)
}
/// Prove the execution of a fixed RISC-V program.
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
/// The path to the ELF file to execute.
elf: PathBuf,
/// The path to the proof file to write.
#[arg(default_value = "proof.bin")]
proof_file: PathBuf,
/// The path to the verification key file to write.
#[arg(default_value = "vk.bin")]
vk_file: PathBuf,
/// The maximum number of steps to execute the program.
#[arg(short, long)]
max_steps: Option<usize>,
// Profiling granularity.
// Setting any value restricts logs to profiling information
#[arg(long)]
profiling: Option<usize>,
/// The preset configuration to use.
#[arg(long, value_enum, default_value_t = Preset::Ceno)]
platform: Preset,
/// The polynomial commitment scheme to use.
#[arg(long, value_enum, default_value_t = PcsKind::default())]
pcs: PcsKind,
/// The field to use, eg. goldilocks
#[arg(long, value_enum, default_value_t = FieldType::default())]
field: FieldType,
/// Hints: prover-private unconstrained input.
/// This is a raw file mapped as a memory segment.
/// Zero-padded to the right to the next power-of-two size.
#[arg(long, conflicts_with = "hints")]
hints_file: Option<String>,
#[arg(long, conflicts_with = "hints_file", value_parser, num_args = 1.., value_delimiter = ',')]
hints: Option<Vec<Word>>,
#[arg(long, default_value = "100")]
n: u32,
/// Stack size in bytes.
#[arg(long, default_value = "2M", value_parser = parse_size)]
stack_size: u32,
/// Heap size in bytes.
#[arg(long, default_value = "2M", value_parser = parse_size)]
heap_size: u32,
/// Max number of variables
#[clap(long, default_value_t = MAX_NUM_VARIABLES)]
max_num_variables: usize,
#[arg(long, value_parser, num_args = 1.., value_delimiter = ',')]
public_io: Option<Vec<Word>>,
/// pub io size in byte
#[arg(long, default_value = "1k", value_parser = parse_size)]
public_io_size: u32,
/// The security level to use.
#[arg(short, long, value_enum, default_value_t = SecurityLevel::default())]
security_level: SecurityLevel,
// prover id
#[arg(long, default_value = "0")]
prover_id: u32,
// number of available prover.
#[arg(long, default_value = "1")]
num_provers: u32,
// max cycle per shard
#[arg(long, default_value = "536870912")] // 536870912 = 2^29
max_cycle_per_shard: u64,
// max cycle per shard
// default value: 16GB VRAM, each cell 4 byte, log explosion 2
// => 2^30 * 16 / 4 / 2
#[arg(long, default_value = "2147483648")]
max_cell_per_shard: u64,
// for debug purpose
// only generate respective shard id and skip others
#[arg(long)]
shard_id: Option<u64>,
}
fn main() {
let args = {
let mut args = Args::parse();
args.stack_size = args.stack_size.next_multiple_of(WORD_SIZE as u32);
args.heap_size = args.heap_size.next_multiple_of(WORD_SIZE as u32);
args
};
// default filter
let default_filter = EnvFilter::builder()
.with_default_directive(LevelFilter::DEBUG.into())
.from_env_lossy();
// filter by profiling level;
// spans with level i contain the field "profiling_{i}"
// this restricts statistics to first (args.profiling) levels
let profiling_level = args.profiling.unwrap_or(1);
let filter_by_profiling_level = filter_fn(move |metadata| {
(1..=profiling_level)
.map(|i| format!("profiling_{i}"))
.any(|field| metadata.fields().field(&field).is_some())
});
let fmt_layer = fmt::layer()
.compact()
.with_thread_ids(false)
.with_thread_names(false)
.without_time();
Registry::default()
.with(args.profiling.is_some().then_some(ForestLayer::default()))
.with(fmt_layer)
// if some profiling granularity is specified, use the profiling filter,
// otherwise use the default
.with(
args.profiling
.is_some()
.then_some(filter_by_profiling_level),
)
.with(args.profiling.is_none().then_some(default_filter))
.init();
// process public input first
let public_io = args
.public_io
.and_then(|public_io| {
// if the vector contains only one element, write it as a raw `u32`
// otherwise, write the entire vector
// in both cases, convert the resulting `CenoStdin` into a `Vec<u32>`
if public_io.len() == 1 {
CenoStdin::default()
.write(&public_io[0])
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
} else {
CenoStdin::default()
.write(&public_io)
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
}
})
.unwrap_or_default();
assert!(
public_io.len() <= args.public_io_size as usize / WORD_SIZE,
"require pub io length {} < max public_io_size {}",
public_io.len(),
args.public_io_size as usize / WORD_SIZE
);
tracing::info!("Loading ELF file: {}", args.elf.display());
let elf_bytes = fs::read(&args.elf).expect("read elf file");
let program = Program::load_elf(&elf_bytes, u32::MAX).unwrap();
let platform = if cfg!(debug_assertions) {
setup_platform_debug(
args.platform,
&program,
args.stack_size,
args.heap_size,
args.public_io_size,
)
} else {
setup_platform(
args.platform,
&program,
args.stack_size,
args.heap_size,
args.public_io_size,
)
};
tracing::info!("Running on platform {:?} {}", args.platform, platform);
tracing::info!(
"Stack: {} bytes. Heap: {} bytes.",
args.stack_size,
args.heap_size
);
let hints = args
.hints_file
.as_ref()
.map(|file_path| {
tracing::info!("Loading hints file: {:?}", file_path);
let hints = memory_from_file(file_path).expect("failed to read hints file");
assert!(
hints.len() <= platform.hints.iter_addresses().len(),
"hints must fit in {} bytes",
platform.hints.len()
);
hints
})
.or_else(|| {
args.hints.and_then(|hint| {
// if the vector contains only one element, write it as a raw `u32`
// otherwise, write the entire vector
// in both cases, convert the resulting `CenoStdin` into a `Vec<u32>`
if hint.len() == 1 {
CenoStdin::default()
.write(&hint[0])
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
} else {
CenoStdin::default()
.write(&hint)
.ok()
.map(|stdin| Into::<Vec<u32>>::into(&*stdin))
}
})
})
.unwrap_or_default();
let max_steps = args.max_steps.unwrap_or(usize::MAX);
let multi_prover = MultiProver::new(
args.prover_id as usize,
args.num_provers as usize,
args.max_cell_per_shard,
args.max_cycle_per_shard,
);
let target_shard_id = args.shard_id.map(|v| v as usize);
match (args.pcs, args.field) {
(PcsKind::Basefold, FieldType::Goldilocks) => {
let backend = create_backend(args.max_num_variables, args.security_level);
let prover = create_prover(backend);
run_inner::<GoldilocksExt2, Basefold<GoldilocksExt2, BasefoldRSParams>, _, _>(
prover,
program,
platform,
multi_prover,
&hints,
&public_io,
max_steps,
args.proof_file,
args.vk_file,
Checkpoint::Complete,
target_shard_id,
)
}
(PcsKind::Basefold, FieldType::BabyBear) => {
let backend = create_backend(args.max_num_variables, args.security_level);
let prover = create_prover(backend);
run_inner::<BabyBearExt4, Basefold<BabyBearExt4, BasefoldRSParams>, _, _>(
prover,
program,
platform,
multi_prover,
&hints,
&public_io,
max_steps,
args.proof_file,
args.vk_file,
Checkpoint::Complete,
target_shard_id,
)
}
(PcsKind::Whir, FieldType::Goldilocks) => {
let backend = create_backend(args.max_num_variables, args.security_level);
let prover = create_prover(backend);
run_inner::<GoldilocksExt2, Whir<GoldilocksExt2, WhirDefaultSpec>, _, _>(
prover,
program,
platform,
multi_prover,
&hints,
&public_io,
max_steps,
args.proof_file,
args.vk_file,
Checkpoint::PrepVerify, // TODO: when whir and babybear is ready
target_shard_id,
)
}
(PcsKind::Whir, FieldType::BabyBear) => {
let backend = create_backend(args.max_num_variables, args.security_level);
let prover = create_prover(backend);
run_inner::<BabyBearExt4, Whir<BabyBearExt4, WhirDefaultSpec>, _, _>(
prover,
program,
platform,
multi_prover,
&hints,
&public_io,
max_steps,
args.proof_file,
args.vk_file,
Checkpoint::PrepVerify, // TODO: when whir and babybear is ready
target_shard_id,
)
}
};
#[cfg(all(feature = "jemalloc", unix, not(test)))]
{
print_allocated_bytes();
}
}
#[allow(clippy::too_many_arguments)]
fn run_inner<
E: ExtensionField + LkMultiplicityKey + DeserializeOwned,
PCS: PolynomialCommitmentScheme<E> + Serialize + 'static,
PB: ProverBackend<E = E, Pcs = PCS> + 'static,
PD: ProverDevice<PB> + 'static,
>(
pd: PD,
program: Program,
platform: Platform,
multi_prover: MultiProver,
hints: &[u32],
public_io: &[u32],
max_steps: usize,
proof_file: PathBuf,
vk_file: PathBuf,
checkpoint: Checkpoint,
target_shard_id: Option<usize>,
) {
let result = run_e2e_with_checkpoint::<E, PCS, _, _>(
pd,
program,
platform,
multi_prover,
hints,
public_io,
max_steps,
checkpoint,
target_shard_id,
);
let zkvm_proofs = result
.proofs
.expect("PrepSanityCheck should yield zkvm_proof.");
let vk = result.vk.expect("PrepSanityCheck should yield vk.");
let proof_bytes = bincode::serialize(&zkvm_proofs).unwrap();
fs::write(&proof_file, proof_bytes).unwrap();
let vk_bytes = bincode::serialize(&vk).unwrap();
fs::write(&vk_file, vk_bytes).unwrap();
if checkpoint > Checkpoint::PrepVerify && target_shard_id.is_none() {
let verifier = ZKVMVerifier::new(vk);
verify(zkvm_proofs.clone(), &verifier).expect("Verification failed");
soundness_test(zkvm_proofs.first().cloned().unwrap(), &verifier);
}
}
fn soundness_test<E: ExtensionField, Pcs: PolynomialCommitmentScheme<E>>(
mut zkvm_proof: ZKVMProof<E, Pcs>,
verifier: &ZKVMVerifier<E, Pcs>,
) {
// do sanity check
let transcript = Transcript::new(b"riscv");
// change public input maliciously should cause verifier to reject proof
zkvm_proof.raw_pi[0] = vec![E::BaseField::ONE];
zkvm_proof.raw_pi[1] = vec![E::BaseField::ONE];
// capture panic message, if have
let result = with_panic_hook(Box::new(|_info| ()), || {
panic::catch_unwind(AssertUnwindSafe(|| {
verifier.verify_proof(zkvm_proof, transcript)
}))
});
match result {
Ok(res) => {
res.expect_err("verify proof should return with error");
}
Err(err) => {
let msg: String = if let Some(message) = err.downcast_ref::<&str>() {
message.to_string()
} else if let Some(message) = err.downcast_ref::<String>() {
message.to_string()
} else if let Some(message) = err.downcast_ref::<&String>() {
message.to_string()
} else {
unreachable!()
};
if !msg.starts_with("0th round's prover message is not consistent with the claim") {
error!("unknown panic {msg:?}");
panic::resume_unwind(err);
};
}
};
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/bin/lookup_keccak.rs | ceno_zkvm/src/bin/lookup_keccak.rs | use ceno_zkvm::precompiles::{run_lookup_keccakf, setup_lookup_keccak_gkr_circuit};
use clap::{Parser, command};
use ff_ext::GoldilocksExt2;
use itertools::Itertools;
use mpcs::BasefoldDefault;
use rand::{RngCore, SeedableRng};
use tracing::level_filters::LevelFilter;
use tracing_forest::ForestLayer;
use tracing_subscriber::{
EnvFilter, Registry, filter::filter_fn, fmt, layer::SubscriberExt, util::SubscriberInitExt,
};
// Use jemalloc as global allocator for performance
#[cfg(all(feature = "jemalloc", unix, not(test)))]
#[global_allocator]
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
#[derive(Parser, Debug)]
#[command(version, about, long_about = None)]
struct Args {
// Profiling granularity.
// Setting any value restricts logs to profiling information
#[arg(long)]
profiling: Option<usize>,
}
fn main() {
let args = Args::parse();
type E = GoldilocksExt2;
type Pcs = BasefoldDefault<E>;
// default filter
let default_filter = EnvFilter::builder()
.with_default_directive(LevelFilter::DEBUG.into())
.from_env_lossy();
// filter by profiling level;
// spans with level i contain the field "profiling_{i}"
// this restricts statistics to first (args.profiling) levels
let profiling_level = args.profiling.unwrap_or(1);
let filter_by_profiling_level = filter_fn(move |metadata| {
(1..=profiling_level)
.map(|i| format!("profiling_{i}"))
.any(|field| metadata.fields().field(&field).is_some())
});
let fmt_layer = fmt::layer()
.compact()
.with_thread_ids(false)
.with_thread_names(false)
.without_time();
Registry::default()
.with(args.profiling.is_some().then_some(ForestLayer::default()))
.with(fmt_layer)
// if some profiling granularity is specified, use the profiling filter,
// otherwise use the default
.with(
args.profiling
.is_some()
.then_some(filter_by_profiling_level),
)
.with(args.profiling.is_none().then_some(default_filter))
.init();
let random_u64: u64 = rand::random();
// Use seeded rng for debugging convenience
let mut rng = rand::rngs::StdRng::seed_from_u64(random_u64);
let num_instances = 8192;
let states: Vec<[u64; 25]> = (0..num_instances)
.map(|_| std::array::from_fn(|_| rng.next_u64()))
.collect_vec();
let circuit_setup = setup_lookup_keccak_gkr_circuit();
let proof = run_lookup_keccakf::<E, Pcs>(
circuit_setup.expect("setup circuit error"),
states,
true,
true,
)
.expect("generate proof");
tracing::info!("lookup keccak proof stat: {}", proof);
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/septic_curve.rs | ceno_zkvm/src/scheme/septic_curve.rs | use either::Either;
use ff_ext::{ExtensionField, FromUniformBytes};
use multilinear_extensions::Expression;
// The extension field and curve definition are adapted from
// https://github.com/succinctlabs/sp1/blob/v5.2.1/crates/stark/src/septic_curve.rs
use p3::field::{Field, FieldAlgebra};
use rand::RngCore;
use serde::{Deserialize, Serialize};
use std::{
iter::Sum,
ops::{Add, Deref, Mul, MulAssign, Neg, Sub},
};
/// F[z] / (z^6 - z - 4)
///
/// ```sage
/// # finite field F = GF(2^31 - 2^27 + 1)
/// p = 2^31 - 2^27 + 1
/// F = GF(p)
///
/// # polynomial ring over F
/// R.<x> = PolynomialRing(F)
/// f = x^6 - x - 4
///
/// # check if f(x) is irreducible
/// print(f.is_irreducible())
/// ```
pub struct SexticExtension<F>([F; 6]);
/// F[z] / (z^7 - 2z - 5)
///
/// ```sage
/// # finite field F = GF(2^31 - 2^27 + 1)
/// p = 2^31 - 2^27 + 1
/// F = GF(p)
///
/// # polynomial ring over F
/// R.<x> = PolynomialRing(F)
/// f = x^7 - 2x - 5
///
/// # check if f(x) is irreducible
/// print(f.is_irreducible())
/// ```
#[derive(Clone, Debug, Default, PartialEq, Eq, Serialize, Deserialize, Hash)]
pub struct SepticExtension<F>(pub [F; 7]);
impl<F: Copy + Clone + Default> From<&[F]> for SepticExtension<F> {
fn from(slice: &[F]) -> Self {
assert!(slice.len() == 7);
let mut arr = [F::default(); 7];
arr.copy_from_slice(&slice[0..7]);
Self(arr)
}
}
impl<F: Copy + Clone + Default> From<Vec<F>> for SepticExtension<F> {
fn from(v: Vec<F>) -> Self {
assert!(v.len() == 7);
let mut arr = [F::default(); 7];
arr.copy_from_slice(&v[0..7]);
Self(arr)
}
}
impl<F> Deref for SepticExtension<F> {
type Target = [F];
fn deref(&self) -> &[F] {
&self.0
}
}
impl<F: Field> SepticExtension<F> {
pub fn is_zero(&self) -> bool {
self.0.iter().all(|c| *c == F::ZERO)
}
pub fn zero() -> Self {
Self([F::ZERO; 7])
}
pub fn one() -> Self {
let mut arr = [F::ZERO; 7];
arr[0] = F::ONE;
Self(arr)
}
// returns z^{i*p} for i = 0..6
//
// The sage script to compute z^{i*p} is as follows:
// ```sage
// p = 2^31 - 2^27 + 1
// Fp = GF(p)
// R.<z> = PolynomialRing(Fp)
// mod_poly = z^7 - 2*z - 5
// Q = R.quotient(mod_poly)
//
// # compute z^(i*p) for i = 1..6
// for k in range(1, 7):
// power = k * p
// z_power = Q(z)^power
// print(f"z^({k}*p) = {z_power}")
// ```
fn z_pow_p(i: usize) -> Self {
match i {
0 => [1, 0, 0, 0, 0, 0, 0].into(),
1 => [
954599710, 1359279693, 566669999, 1982781815, 1735718361, 1174868538, 1120871770,
]
.into(),
2 => [
862825265, 597046311, 978840770, 1790138282, 1044777201, 835869808, 1342179023,
]
.into(),
3 => [
596273169, 658837454, 1515468261, 367059247, 781278880, 1544222616, 155490465,
]
.into(),
4 => [
557608863, 1173670028, 1749546888, 1086464137, 803900099, 1288818584, 1184677604,
]
.into(),
5 => [
763416381, 1252567168, 628856225, 1771903394, 650712211, 19417363, 57990258,
]
.into(),
6 => [
1734711039, 1749813853, 1227235221, 1707730636, 424560395, 1007029514, 498034669,
]
.into(),
_ => unimplemented!("i should be in [0, 7]"),
}
}
// returns z^{i*p^2} for i = 0..6
// we can change the above sage script to compute z^{i*p^2} by replacing
// `power = k * p` with `power = k * p * p`
fn z_pow_p_square(i: usize) -> Self {
match i {
0 => [1, 0, 0, 0, 0, 0, 0].into(),
1 => [
1013489358, 1619071628, 304593143, 1949397349, 1564307636, 327761151, 415430835,
]
.into(),
2 => [
209824426, 1313900768, 38410482, 256593180, 1708830551, 1244995038, 1555324019,
]
.into(),
3 => [
1475628651, 777565847, 704492386, 1218528120, 1245363405, 475884575, 649166061,
]
.into(),
4 => [
550038364, 948935655, 68722023, 1251345762, 1692456177, 1177958698, 350232928,
]
.into(),
5 => [
882720258, 821925756, 199955840, 812002876, 1484951277, 1063138035, 491712810,
]
.into(),
6 => [
738287111, 1955364991, 552724293, 1175775744, 341623997, 1454022463, 408193320,
]
.into(),
_ => unimplemented!("i should be in [0, 7]"),
}
}
// returns self^p = (a0 + a1*z^p + ... + a6*z^(6p))
pub fn frobenius(&self) -> Self {
Self::z_pow_p(0) * self.0[0]
+ Self::z_pow_p(1) * self.0[1]
+ Self::z_pow_p(2) * self.0[2]
+ Self::z_pow_p(3) * self.0[3]
+ Self::z_pow_p(4) * self.0[4]
+ Self::z_pow_p(5) * self.0[5]
+ Self::z_pow_p(6) * self.0[6]
}
// returns self^(p^2) = (a0 + a1*z^(p^2) + ... + a6*z^(6*p^2))
pub fn double_frobenius(&self) -> Self {
Self::z_pow_p_square(0) * self.0[0]
+ Self::z_pow_p_square(1) * self.0[1]
+ Self::z_pow_p_square(2) * self.0[2]
+ Self::z_pow_p_square(3) * self.0[3]
+ Self::z_pow_p_square(4) * self.0[4]
+ Self::z_pow_p_square(5) * self.0[5]
+ Self::z_pow_p_square(6) * self.0[6]
}
// returns self^(p + p^2 + ... + p^6)
fn norm_sub(&self) -> Self {
let a = self.frobenius() * self.double_frobenius();
let b = a.double_frobenius();
let c = b.double_frobenius();
a * b * c
}
// norm = self^(1 + p + ... + p^6)
// = self^((p^7-1)/(p-1))
// it's a field element in F since norm^p = norm
fn norm(&self) -> F {
(self.norm_sub() * self).0[0]
}
pub fn is_square(&self) -> bool {
// since a^((p^7 - 1)/2) = norm(a)^((p-1)/2)
// to test if self^((p^7 - 1) / 2) == 1?
// we can just test if norm(a)^((p-1)/2) == 1?
let exp_digits = ((F::order() - 1u32) / 2u32).to_u64_digits();
debug_assert!(exp_digits.len() == 1);
let exp = exp_digits[0];
self.norm().exp_u64(exp) == F::ONE
}
pub fn inverse(&self) -> Option<Self> {
match self.is_zero() {
true => None,
false => {
// since norm(a)^(-1) * a^(p + p^2 + ... + p^6) * a = 1
// it's easy to see a^(-1) = norm(a)^(-1) * a^(p + p^2 + ... + p^6)
let x = self.norm_sub();
let norm = (self * &x).0[0];
// since self is not zero, norm is not zero
let norm_inv = norm.try_inverse().unwrap();
Some(x * norm_inv)
}
}
}
pub fn square(&self) -> Self {
let mut result = [F::ZERO; 7];
let two = F::from_canonical_u32(2);
let five = F::from_canonical_u32(5);
// i < j
for i in 0..7 {
for j in (i + 1)..7 {
let term = two * self.0[i] * self.0[j];
let mut index = i + j;
if index < 7 {
result[index] += term;
} else {
index -= 7;
// x^7 = 2x + 5
result[index] += five * term;
result[index + 1] += two * term;
}
}
}
// i == j: i \in [0, 3]
result[0] += self.0[0] * self.0[0];
result[2] += self.0[1] * self.0[1];
result[4] += self.0[2] * self.0[2];
result[6] += self.0[3] * self.0[3];
// a4^2 * x^8 = a4^2 * (2x + 5)x = 5a4^2 * x + 2a4^2 * x^2
let term = self.0[4] * self.0[4];
result[1] += five * term;
result[2] += two * term;
// a5^2 * x^10 = a5^2 * (2x + 5)x^3 = 5a5^2 * x^3 + 2a5^2 * x^4
let term = self.0[5] * self.0[5];
result[3] += five * term;
result[4] += two * term;
// a6^2 * x^12 = a6^2 * (2x + 5)x^5 = 5a6^2 * x^5 + 2a6^2 * x^6
let term = self.0[6] * self.0[6];
result[5] += five * term;
result[6] += two * term;
Self(result)
}
pub fn pow(&self, exp: u64) -> Self {
let mut result = Self::one();
let num_bits = 64 - exp.leading_zeros();
for j in (0..num_bits).rev() {
result = result.square();
if (exp >> j) & 1u64 == 1u64 {
result = result * self;
}
}
result
}
pub fn sqrt(&self) -> Option<Self> {
// the algorithm is adapted from [Cipolla's algorithm](https://en.wikipedia.org/wiki/Cipolla%27s_algorithm
// the code is taken from https://github.com/succinctlabs/sp1/blob/dev/crates/stark/src/septic_extension.rs#L623
let n = self.clone();
if n == Self::zero() || n == Self::one() {
return Some(n);
}
// norm = n^(1 + p + ... + p^6) = n^(p^7-1)/(p-1)
let norm = n.norm();
let exp = ((F::order() - 1u32) / 2u32).to_u64_digits()[0];
// euler's criterion n^((p^7-1)/2) == 1 iff n is quadratic residue
if norm.exp_u64(exp) != F::ONE {
// it's not a square
return None;
};
// n_power = n^((p+1)/2)
let exp = ((F::order() + 1u32) / 2u32).to_u64_digits()[0];
let n_power = self.pow(exp);
// n^((p^2 + p)/2)
let mut n_frobenius = n_power.frobenius();
let mut denominator = n_frobenius.clone();
// n^((p^4 + p^3)/2)
n_frobenius = n_frobenius.double_frobenius();
denominator *= n_frobenius.clone();
// n^((p^6 + p^5)/2)
n_frobenius = n_frobenius.double_frobenius();
// d = n^((p^6 + p^5 + p^4 + p^3 + p^2 + p) / 2)
// d^2 * n = norm
denominator *= n_frobenius;
// d' = d*n
denominator *= n;
let base = norm.inverse(); // norm^(-1)
let g = F::GENERATOR;
let mut a = F::ONE;
let mut non_residue = F::ONE - base;
let legendre_exp = (F::order() - 1u32) / 2u32; // (p-1)/2
// non_residue = a^2 - 1/norm
// find `a` such that non_residue is not a square in F
while non_residue.exp_u64(legendre_exp.to_u64_digits()[0]) == F::ONE {
a *= g;
non_residue = a.square() - base;
}
// (p+1)/2
let cipolla_exp = ((F::order() + 1u32) / 2u32).to_u64_digits()[0];
// x = (a+i)^((p+1)/2) where a in Fp
// x^2 = (a+i) * (a+i)^p = (a+i)*(a-i) = a^2 - i^2
// = a^2 - non_residue = 1/norm
// therefore, x is the square root of 1/norm
let mut x = QuadraticExtension::new(a, F::ONE, non_residue);
x = x.pow(cipolla_exp);
// (x*d')^2 = x^2 * d^2 * n^2 = 1/norm * norm * n
Some(denominator * x.real)
}
}
// a + bi where i^2 = non_residue
#[derive(Clone, Debug)]
pub struct QuadraticExtension<F> {
pub real: F,
pub imag: F,
pub non_residue: F,
}
impl<F: Field> QuadraticExtension<F> {
pub fn new(real: F, imag: F, non_residue: F) -> Self {
Self {
real,
imag,
non_residue,
}
}
pub fn square(&self) -> Self {
// (a + bi)^2 = (a^2 + b^2*i^2) + 2ab*i
let real = self.real * self.real + self.non_residue * self.imag * self.imag;
let mut imag = self.real * self.imag;
imag += imag;
Self {
real,
imag,
non_residue: self.non_residue,
}
}
pub fn mul(&self, other: &Self) -> Self {
// (a + bi)(c + di) = (ac + bd*i^2) + (ad + bc)i
let real = self.real * other.real + self.non_residue * self.imag * other.imag;
let imag = self.real * other.imag + self.imag * other.real;
Self {
real,
imag,
non_residue: self.non_residue,
}
}
pub fn pow(&self, exp: u64) -> Self {
let mut result = Self {
real: F::ONE,
imag: F::ZERO,
non_residue: self.non_residue,
};
let num_bits = 64 - exp.leading_zeros();
for j in (0..num_bits).rev() {
result = result.square();
if (exp >> j) & 1u64 == 1u64 {
result = result.mul(self);
}
}
result
}
}
impl<F: Field + FromUniformBytes> SepticExtension<F> {
pub fn random(mut rng: impl RngCore) -> Self {
let mut arr = [F::ZERO; 7];
for item in arr.iter_mut() {
*item = F::random(&mut rng);
}
Self(arr)
}
}
impl<F: Field> From<[u32; 7]> for SepticExtension<F> {
fn from(arr: [u32; 7]) -> Self {
let mut result = [F::ZERO; 7];
for i in 0..7 {
result[i] = F::from_canonical_u32(arr[i]);
}
Self(result)
}
}
impl<F: FieldAlgebra + Copy> Add<&Self> for SepticExtension<F> {
type Output = SepticExtension<F>;
fn add(self, other: &Self) -> Self {
let mut result = [F::ZERO; 7];
for (i, res) in result.iter_mut().enumerate() {
*res = self.0[i] + other.0[i];
}
Self(result)
}
}
impl<F: FieldAlgebra + Copy> Add<Self> for &SepticExtension<F> {
type Output = SepticExtension<F>;
fn add(self, other: Self) -> SepticExtension<F> {
let mut result = [F::ZERO; 7];
for (i, res) in result.iter_mut().enumerate() {
*res = self.0[i] + other.0[i];
}
SepticExtension(result)
}
}
impl<F: FieldAlgebra + Copy> Add for SepticExtension<F> {
type Output = Self;
fn add(self, other: Self) -> Self {
self.add(&other)
}
}
impl<F: FieldAlgebra + Copy> Neg for SepticExtension<F> {
type Output = Self;
fn neg(self) -> Self {
let mut result = [F::ZERO; 7];
for (res, src) in result.iter_mut().zip(self.0.iter()) {
*res = -(*src);
}
Self(result)
}
}
impl<F: FieldAlgebra + Copy> Sub<&Self> for SepticExtension<F> {
type Output = SepticExtension<F>;
fn sub(self, other: &Self) -> Self {
let mut result = [F::ZERO; 7];
for (i, res) in result.iter_mut().enumerate() {
*res = self.0[i] - other.0[i];
}
Self(result)
}
}
impl<F: FieldAlgebra + Copy> Sub<Self> for &SepticExtension<F> {
type Output = SepticExtension<F>;
fn sub(self, other: Self) -> SepticExtension<F> {
let mut result = [F::ZERO; 7];
for (i, res) in result.iter_mut().enumerate() {
*res = self.0[i] - other.0[i];
}
SepticExtension(result)
}
}
impl<F: FieldAlgebra + Copy> Sub for SepticExtension<F> {
type Output = Self;
fn sub(self, other: Self) -> Self {
self.sub(&other)
}
}
impl<F: Field> Add<F> for &SepticExtension<F> {
type Output = SepticExtension<F>;
fn add(self, other: F) -> Self::Output {
let mut result = self.clone();
result.0[0] += other;
result
}
}
impl<F: Field> Add<F> for SepticExtension<F> {
type Output = SepticExtension<F>;
fn add(self, other: F) -> Self::Output {
(&self).add(other)
}
}
impl<F: Field> Mul<F> for &SepticExtension<F> {
type Output = SepticExtension<F>;
fn mul(self, other: F) -> Self::Output {
let mut result = [F::ZERO; 7];
for (i, res) in result.iter_mut().enumerate() {
*res = self.0[i] * other;
}
SepticExtension(result)
}
}
impl<F: Field> Mul<F> for SepticExtension<F> {
type Output = SepticExtension<F>;
fn mul(self, other: F) -> Self::Output {
(&self).mul(other)
}
}
impl<F: Field> Mul<Self> for &SepticExtension<F> {
type Output = SepticExtension<F>;
fn mul(self, other: Self) -> Self::Output {
let mut result = [F::ZERO; 7];
let five = F::from_canonical_u32(5);
let two = F::from_canonical_u32(2);
for i in 0..7 {
for j in 0..7 {
let term = self.0[i] * other.0[j];
let mut index = i + j;
if index < 7 {
result[index] += term;
} else {
index -= 7;
// x^7 = 2x + 5
result[index] += five * term;
result[index + 1] += two * term;
}
}
}
SepticExtension(result)
}
}
impl<F: Field> Mul for SepticExtension<F> {
type Output = Self;
fn mul(self, other: Self) -> Self {
(&self).mul(&other)
}
}
impl<F: Field> Mul<&Self> for SepticExtension<F> {
type Output = Self;
fn mul(self, other: &Self) -> Self {
(&self).mul(other)
}
}
impl<F: Field> MulAssign<Self> for SepticExtension<F> {
fn mul_assign(&mut self, other: Self) {
*self = (&*self).mul(&other);
}
}
#[derive(Clone, Debug)]
pub struct SymbolicSepticExtension<E: ExtensionField>(pub Vec<Expression<E>>);
impl<E: ExtensionField> SymbolicSepticExtension<E> {
pub fn mul_scalar(&self, scalar: Either<E::BaseField, E>) -> Self {
let res = self
.0
.iter()
.map(|a| a.clone() * Expression::Constant(scalar))
.collect();
SymbolicSepticExtension(res)
}
pub fn add_scalar(&self, scalar: Either<E::BaseField, E>) -> Self {
let res = self
.0
.iter()
.map(|a| a.clone() + Expression::Constant(scalar))
.collect();
SymbolicSepticExtension(res)
}
}
impl<E: ExtensionField> Add<Self> for &SymbolicSepticExtension<E> {
type Output = SymbolicSepticExtension<E>;
fn add(self, other: Self) -> Self::Output {
let res = self
.0
.iter()
.zip(other.0.iter())
.map(|(a, b)| a.clone() + b.clone())
.collect();
SymbolicSepticExtension(res)
}
}
impl<E: ExtensionField> Add<&Self> for SymbolicSepticExtension<E> {
type Output = Self;
fn add(self, other: &Self) -> Self {
(&self).add(other)
}
}
impl<E: ExtensionField> Add for SymbolicSepticExtension<E> {
type Output = Self;
fn add(self, other: Self) -> Self {
(&self).add(&other)
}
}
impl<E: ExtensionField> Sub<Self> for &SymbolicSepticExtension<E> {
type Output = SymbolicSepticExtension<E>;
fn sub(self, other: Self) -> Self::Output {
let res = self
.0
.iter()
.zip(other.0.iter())
.map(|(a, b)| a.clone() - b.clone())
.collect();
SymbolicSepticExtension(res)
}
}
impl<E: ExtensionField> Sub<&Self> for SymbolicSepticExtension<E> {
type Output = Self;
fn sub(self, other: &Self) -> Self {
(&self).sub(other)
}
}
impl<E: ExtensionField> Sub for SymbolicSepticExtension<E> {
type Output = Self;
fn sub(self, other: Self) -> Self {
(&self).sub(&other)
}
}
impl<E: ExtensionField> Mul<Self> for &SymbolicSepticExtension<E> {
type Output = SymbolicSepticExtension<E>;
fn mul(self, other: Self) -> Self::Output {
let mut result = vec![Expression::Constant(Either::Left(E::BaseField::ZERO)); 7];
let five = Expression::Constant(Either::Left(E::BaseField::from_canonical_u32(5)));
let two = Expression::Constant(Either::Left(E::BaseField::from_canonical_u32(2)));
for i in 0..7 {
for j in 0..7 {
let term = self.0[i].clone() * other.0[j].clone();
let mut index = i + j;
if index < 7 {
result[index] += term;
} else {
index -= 7;
// x^7 = 2x + 5
result[index] += five.clone() * term.clone();
result[index + 1] += two.clone() * term.clone();
}
}
}
SymbolicSepticExtension(result)
}
}
impl<E: ExtensionField> Mul<&Self> for SymbolicSepticExtension<E> {
type Output = Self;
fn mul(self, other: &Self) -> Self {
(&self).mul(other)
}
}
impl<E: ExtensionField> Mul for SymbolicSepticExtension<E> {
type Output = Self;
fn mul(self, other: Self) -> Self {
(&self).mul(&other)
}
}
impl<E: ExtensionField> Mul<&Expression<E>> for SymbolicSepticExtension<E> {
type Output = SymbolicSepticExtension<E>;
fn mul(self, other: &Expression<E>) -> Self::Output {
let res = self.0.iter().map(|a| a.clone() * other.clone()).collect();
SymbolicSepticExtension(res)
}
}
impl<E: ExtensionField> Mul<Expression<E>> for SymbolicSepticExtension<E> {
type Output = SymbolicSepticExtension<E>;
fn mul(self, other: Expression<E>) -> Self::Output {
self.mul(&other)
}
}
impl<E: ExtensionField> SymbolicSepticExtension<E> {
pub fn new(exprs: Vec<Expression<E>>) -> Self {
assert!(
exprs.len() == 7,
"exprs length must be 7, but got {}",
exprs.len()
);
Self(exprs)
}
pub fn to_exprs(&self) -> Vec<Expression<E>> {
self.0.clone()
}
}
/// A point on the short Weierstrass curve defined by
/// y^2 = x^3 + 2x + 26z^5
/// over the extension field F[z] / (z^7 - 2z - 5).
///
/// Note that
/// 1. The curve's cofactor is 1
/// 2. The curve's order is a large prime number of 31x7 bits
#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize, Hash)]
pub struct SepticPoint<F> {
pub x: SepticExtension<F>,
pub y: SepticExtension<F>,
pub is_infinity: bool,
}
impl<F: Field> SepticPoint<F> {
// if there exists y such that (x, y) is on the curve, return one of them
pub fn from_x(x: SepticExtension<F>) -> Option<Self> {
let b: SepticExtension<F> = [0, 0, 0, 0, 0, 26, 0].into();
let a: F = F::from_canonical_u32(2);
let y2 = x.square() * &x + (&x * a) + &b;
if y2.is_square() {
let y = y2.sqrt().unwrap();
Some(Self {
x,
y,
is_infinity: false,
})
} else {
None
}
}
pub fn from_affine(x: SepticExtension<F>, y: SepticExtension<F>) -> Self {
let is_infinity = x.is_zero() && y.is_zero();
Self { x, y, is_infinity }
}
pub fn double(&self) -> Self {
let a = F::from_canonical_u32(2);
let three = F::from_canonical_u32(3);
let two = F::from_canonical_u32(2);
let x1 = &self.x;
let y1 = &self.y;
let x1_sqr = x1.square();
// x3 = (3*x1^2 + a)^2 / (2*y1)^2 - x1 - x1
let slope = (x1_sqr * three + a) * (y1 * two).inverse().unwrap();
let x3 = slope.square() - x1 - x1;
// y3 = slope * (x1 - x3) - y1
let y3 = slope * (x1 - &x3) - y1;
Self {
x: x3,
y: y3,
is_infinity: false,
}
}
}
impl<F: Field> Default for SepticPoint<F> {
fn default() -> Self {
Self {
x: SepticExtension::zero(),
y: SepticExtension::zero(),
is_infinity: true,
}
}
}
impl<F: Field> Neg for SepticPoint<F> {
type Output = SepticPoint<F>;
fn neg(self) -> Self::Output {
if self.is_infinity {
return self;
}
Self {
x: self.x,
y: -self.y,
is_infinity: false,
}
}
}
impl<F: Field> Add<Self> for SepticPoint<F> {
type Output = Self;
fn add(self, other: Self) -> Self {
if self.is_infinity {
return other;
}
if other.is_infinity {
return self;
}
if self.x == other.x {
if self.y == other.y {
return self.double();
} else {
assert!((self.y + other.y).is_zero());
return Self {
x: SepticExtension::zero(),
y: SepticExtension::zero(),
is_infinity: true,
};
}
}
let slope = (other.y - &self.y) * (other.x.clone() - &self.x).inverse().unwrap();
let x = slope.square() - (&self.x + &other.x);
let y = slope * (self.x - &x) - self.y;
Self {
x,
y,
is_infinity: false,
}
}
}
impl<F: Field> Sum<Self> for SepticPoint<F> {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Self::default(), |acc, p| acc + p)
}
}
impl<F: Field> SepticPoint<F> {
pub fn is_on_curve(&self) -> bool {
if self.is_infinity && self.x.is_zero() && self.y.is_zero() {
return true;
}
let b: SepticExtension<F> = [0, 0, 0, 0, 0, 26, 0].into();
let a: F = F::from_canonical_u32(2);
self.y.square() == self.x.square() * &self.x + (&self.x * a) + b
}
pub fn point_at_infinity() -> Self {
Self::default()
}
}
impl<F: Field + FromUniformBytes> SepticPoint<F> {
pub fn random(mut rng: impl RngCore) -> Self {
loop {
let x = SepticExtension::random(&mut rng);
if let Some(point) = Self::from_x(x) {
return point;
}
}
}
}
#[derive(Clone, Debug, PartialEq, Serialize, Deserialize)]
pub struct SepticJacobianPoint<F> {
pub x: SepticExtension<F>,
pub y: SepticExtension<F>,
pub z: SepticExtension<F>,
}
impl<F: Field> From<SepticPoint<F>> for SepticJacobianPoint<F> {
fn from(p: SepticPoint<F>) -> Self {
if p.is_infinity {
Self::default()
} else {
Self {
x: p.x,
y: p.y,
z: SepticExtension::one(),
}
}
}
}
impl<F: Field> Default for SepticJacobianPoint<F> {
fn default() -> Self {
// return the point at infinity
Self {
x: SepticExtension::zero(),
y: SepticExtension::one(),
z: SepticExtension::zero(),
}
}
}
impl<F: Field> SepticJacobianPoint<F> {
pub fn point_at_infinity() -> Self {
Self::default()
}
pub fn is_on_curve(&self) -> bool {
if self.z.is_zero() {
return self.x.is_zero() && !self.y.is_zero();
}
let b: SepticExtension<F> = [0, 0, 0, 0, 0, 26, 0].into();
let a: F = F::from_canonical_u32(2);
let z2 = self.z.square();
let z4 = z2.square();
let z6 = &z4 * &z2;
// y^2 = x^3 + 2x*z^4 + b*z^6
self.y.square() == self.x.square() * &self.x + (&self.x * a * z4) + (b * &z6)
}
pub fn into_affine(self) -> SepticPoint<F> {
if self.z.is_zero() {
return SepticPoint::point_at_infinity();
}
let z_inv = self.z.inverse().unwrap();
let z_inv2 = z_inv.square();
let z_inv3 = &z_inv2 * &z_inv;
let x = &self.x * &z_inv2;
let y = &self.y * &z_inv3;
SepticPoint {
x,
y,
is_infinity: false,
}
}
}
impl<F: Field> Add<Self> for &SepticJacobianPoint<F> {
type Output = SepticJacobianPoint<F>;
fn add(self, rhs: Self) -> Self::Output {
// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian-3.html#addition-add-2007-bl
if self.z.is_zero() {
return rhs.clone();
}
if rhs.z.is_zero() {
return self.clone();
}
let z1z1 = self.z.square();
let z2z2 = rhs.z.square();
let u1 = &self.x * &z2z2;
let u2 = &rhs.x * &z1z1;
let s1 = &self.y * &z2z2 * &rhs.z;
let s2 = &rhs.y * &z1z1 * &self.z;
if u1 == u2 {
if s1 == s2 {
return self.double();
} else {
return SepticJacobianPoint::point_at_infinity();
}
}
let two = F::from_canonical_u32(2);
let h = u2 - &u1;
let i = (&h * two).square();
let j = &h * &i;
let r = (s2 - &s1) * two;
let v = u1 * &i;
let x3 = r.square() - &j - &v * two;
let y3 = r * (v - &x3) - s1 * &j * two;
let z3 = (&self.z + &rhs.z).square() - &z1z1 - &z2z2;
let z3 = z3 * h;
Self::Output {
x: x3,
y: y3,
z: z3,
}
}
}
impl<F: Field> Add<Self> for SepticJacobianPoint<F> {
type Output = SepticJacobianPoint<F>;
fn add(self, rhs: Self) -> Self::Output {
(&self).add(&rhs)
}
}
impl<F: Field> SepticJacobianPoint<F> {
pub fn double(&self) -> Self {
// https://hyperelliptic.org/EFD/g1p/auto-shortw-jacobian.html#doubling-dbl-2007-bl
// y = 0 means self.order = 2
if self.y.is_zero() {
return SepticJacobianPoint::point_at_infinity();
}
let two = F::from_canonical_u32(2);
let three = F::from_canonical_u32(3);
let eight = F::from_canonical_u32(8);
let a = F::from_canonical_u32(2); // The curve coefficient a
// xx = x1^2
let xx = self.x.square();
// yy = y1^2
let yy = self.y.square();
// yyyy = yy^2
let yyyy = yy.square();
// zz = z1^2
let zz = self.z.square();
// S = 2*((x1 + y1^2)^2 - x1^2 - y1^4)
let s = (&self.x + &yy).square() - &xx - &yyyy;
let s = s * two;
// M = 3*x1^2 + a*z1^4
let m = &xx * three + zz.square() * a;
// T = M^2 - 2*S
let t = m.square() - &s * two;
// Y3 = M*(S-T)-8*y^4
let y3 = m * (&s - &t) - &yyyy * eight;
// X3 = T
let x3 = t;
// Z3 = (y1+z1)^2 - y1^2 - z1^2
let z3 = (&self.y + &self.z).square() - &yy - &zz;
Self {
x: x3,
y: y3,
z: z3,
}
}
}
impl<F: Field> Sum<Self> for SepticJacobianPoint<F> {
fn sum<I: Iterator<Item = Self>>(iter: I) -> Self {
iter.fold(Self::default(), |acc, p| acc + p)
}
}
impl<F: Field + FromUniformBytes> SepticJacobianPoint<F> {
pub fn random(rng: impl RngCore) -> Self {
SepticPoint::random(rng).into()
}
}
#[cfg(test)]
mod tests {
use super::SepticExtension;
use crate::scheme::septic_curve::{SepticJacobianPoint, SepticPoint};
use p3::{babybear::BabyBear, field::Field};
use rand::thread_rng;
type F = BabyBear;
#[test]
fn test_septic_extension_arithmetic() {
let mut rng = thread_rng();
// a = z, b = z^6 + z^5 + z^4
let a: SepticExtension<F> = SepticExtension::from([0, 1, 0, 0, 0, 0, 0]);
let b: SepticExtension<F> = SepticExtension::from([0, 0, 0, 0, 1, 1, 1]);
let c = SepticExtension::from([5, 2, 0, 0, 0, 1, 1]);
assert_eq!(a * b, c);
// a^(p^2) = (a^p)^p
assert_eq!(c.double_frobenius(), c.frobenius().frobenius());
// norm_sub(a) * a must be in F
let norm = c.norm_sub() * &c;
assert!(norm.0[1..7].iter().all(|x| x.is_zero()));
let d: SepticExtension<F> = SepticExtension::random(&mut rng);
let e = d.square();
assert!(e.is_square());
let f = e.sqrt().unwrap();
let zero = SepticExtension::zero();
assert!(f == d || f == zero - d);
}
#[test]
fn test_septic_curve_arithmetic() {
let mut rng = thread_rng();
let p1 = SepticPoint::<F>::random(&mut rng);
let p2 = SepticPoint::<F>::random(&mut rng);
let j1 = SepticJacobianPoint::from(p1.clone());
let j2 = SepticJacobianPoint::from(p2.clone());
let p3 = p1 + p2;
let j3 = &j1 + &j2;
assert!(j1.is_on_curve());
assert!(j2.is_on_curve());
assert!(j3.is_on_curve());
assert!(p3.is_on_curve());
assert_eq!(p3, j3.clone().into_affine());
// 2*p3 - p3 = p3
let p4 = p3.double();
assert_eq!((-p3.clone() + p4.clone()), p3);
// 2*j3 = 2*p3
let j4 = j3.double();
assert!(j4.is_on_curve());
assert_eq!(j4.into_affine(), p4);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/tests.rs | ceno_zkvm/src/scheme/tests.rs | use crate::{
circuit_builder::CircuitBuilder,
error::ZKVMError,
instructions::{
Instruction,
riscv::{arith::AddInstruction, ecall::HaltInstruction},
},
scheme::{
constants::DYNAMIC_RANGE_MAX_BITS,
cpu::CpuTowerProver,
create_backend, create_prover,
hal::{ProofInput, TowerProverSpec},
},
structs::{ProgramParams, RAMType, ZKVMConstraintSystem, ZKVMFixedTraces, ZKVMWitnesses},
tables::ProgramTableCircuit,
witness::{LkMultiplicity, set_val},
};
use ceno_emul::{
CENO_PLATFORM,
InsnKind::{ADD, ECALL},
Platform, Program, StepRecord, VMState, encode_rv32,
};
use ff_ext::{ExtensionField, FieldInto, FromUniformBytes, GoldilocksExt2};
use gkr_iop::cpu::default_backend_config;
#[cfg(feature = "gpu")]
use gkr_iop::gpu::{MultilinearExtensionGpu, gpu_prover::*};
use multilinear_extensions::{ToExpr, WitIn, mle::MultilinearExtension};
use std::marker::PhantomData;
#[cfg(feature = "gpu")]
use std::sync::Arc;
#[cfg(debug_assertions)]
use ff_ext::{Instrumented, PoseidonField};
use super::{
PublicValues,
constants::MAX_NUM_VARIABLES,
prover::ZKVMProver,
utils::infer_tower_product_witness,
verifier::{TowerVerify, ZKVMVerifier},
};
use crate::{
e2e::ShardContext, scheme::constants::NUM_FANIN, structs::PointAndEval,
tables::DynamicRangeTableCircuit,
};
use itertools::Itertools;
use mpcs::{
PolynomialCommitmentScheme, SecurityLevel, SecurityLevel::Conjecture100bits, WhirDefault,
};
use multilinear_extensions::{mle::IntoMLE, util::ceil_log2};
use p3::field::FieldAlgebra;
use rand::thread_rng;
use transcript::{BasicTranscript, Transcript};
struct TestConfig {
pub(crate) reg_id: WitIn,
}
struct TestCircuit<E: ExtensionField, const RW: usize, const L: usize> {
phantom: PhantomData<E>,
}
impl<E: ExtensionField, const L: usize, const RW: usize> Instruction<E> for TestCircuit<E, RW, L> {
type InstructionConfig = TestConfig;
fn name() -> String {
"TEST".into()
}
fn construct_circuit(
cb: &mut CircuitBuilder<E>,
_params: &ProgramParams,
) -> Result<Self::InstructionConfig, ZKVMError> {
let reg_id = cb.create_witin(|| "reg_id");
(0..RW).try_for_each(|_| {
let record = vec![1.into(), reg_id.expr()];
cb.read_record(|| "read", RAMType::Register, record.clone())?;
cb.write_record(|| "write", RAMType::Register, record)?;
Result::<(), ZKVMError>::Ok(())
})?;
(0..L).try_for_each(|_| {
cb.assert_ux::<_, _, 16>(|| "regid_in_range", reg_id.expr())?;
Result::<(), ZKVMError>::Ok(())
})?;
assert_eq!(cb.cs.lk_expressions.len(), L);
assert_eq!(cb.cs.r_expressions.len(), RW);
assert_eq!(cb.cs.w_expressions.len(), RW);
Ok(TestConfig { reg_id })
}
fn assign_instance(
config: &Self::InstructionConfig,
_shard_ctx: &mut ShardContext,
instance: &mut [E::BaseField],
_lk_multiplicity: &mut LkMultiplicity,
_step: &StepRecord,
) -> Result<(), ZKVMError> {
set_val!(instance, config.reg_id, E::BaseField::ONE);
Ok(())
}
}
#[test]
fn test_rw_lk_expression_combination() {
type E = GoldilocksExt2;
type Pcs = WhirDefault<E>;
fn test_rw_lk_expression_combination_inner<
const L: usize,
const RW: usize,
E: ExtensionField,
Pcs: PolynomialCommitmentScheme<E> + 'static,
>() {
// configure
let (max_num_variables, security_level) = (8, Conjecture100bits);
let backend = create_backend::<E, Pcs>(max_num_variables, security_level);
let device = create_prover(backend.clone());
let name = TestCircuit::<E, RW, L>::name();
let mut zkvm_cs = ZKVMConstraintSystem::default();
let config = zkvm_cs.register_opcode_circuit::<TestCircuit<E, RW, L>>();
let mut shard_ctx = ShardContext::default();
// generate fixed traces
let mut zkvm_fixed_traces = ZKVMFixedTraces::default();
zkvm_fixed_traces.register_opcode_circuit::<TestCircuit<E, RW, L>>(&zkvm_cs, &config);
// keygen
let pk = zkvm_cs
.clone()
.key_gen::<Pcs>(
device.backend.pp.clone(),
device.backend.vp.clone(),
0,
zkvm_fixed_traces,
)
.unwrap();
let vk = pk.get_vk_slow();
// generate mock witness
let num_instances = 1 << 8;
let mut zkvm_witness = ZKVMWitnesses::default();
zkvm_witness
.assign_opcode_circuit::<TestCircuit<E, RW, L>>(
&zkvm_cs,
&mut shard_ctx,
&config,
vec![&StepRecord::default(); num_instances],
)
.unwrap();
// get proof
let prover = ZKVMProver::new_with_single_shard(pk, device);
let mut transcript = BasicTranscript::new(b"test");
let mut rmm: Vec<_> = zkvm_witness
.into_iter_sorted()
.next()
.unwrap()
.witness_rmms
.into();
let (rmm, structural_rmm) = (rmm.remove(0), rmm.remove(0));
let wits_in = rmm.to_mles();
let structural_wits_in = structural_rmm.to_mles();
// commit to main traces
let commit_with_witness =
Pcs::batch_commit_and_write(&prover.pk.pp, vec![rmm], &mut transcript).unwrap();
let witin_commit = Pcs::get_pure_commitment(&commit_with_witness);
// TODO: better way to handle this
#[cfg(not(feature = "gpu"))]
let (wits_in, structural_in) = {
(
wits_in.into_iter().map(|v| v.into()).collect_vec(),
structural_wits_in
.into_iter()
.map(|v| v.into())
.collect_vec(),
)
};
#[cfg(feature = "gpu")]
let (wits_in, structural_in) = {
let cuda_hal = get_cuda_hal().unwrap();
(
wits_in
.iter()
.map(|v| Arc::new(MultilinearExtensionGpu::from_ceno(&cuda_hal, v)))
.collect_vec(),
structural_in = structural_wits_in
.iter()
.map(|v| Arc::new(MultilinearExtensionGpu::from_ceno(&cuda_hal, v)))
.collect_vec(),
)
};
let prover_challenges = [
transcript.read_challenge().elements,
transcript.read_challenge().elements,
];
let input = ProofInput {
fixed: vec![],
witness: wits_in,
structural_witness: structural_in,
public_input: vec![],
pub_io_evals: vec![],
num_instances: vec![num_instances],
has_ecc_ops: false,
};
let (proof, _, _) = prover
.create_chip_proof(
name.as_str(),
prover.pk.circuit_pks.get(&name).unwrap(),
input,
&mut transcript,
&prover_challenges,
)
.expect("create_proof failed");
// verify proof
let verifier = ZKVMVerifier::new(vk.clone());
let mut v_transcript = BasicTranscript::new(b"test");
// write commitment into transcript and derive challenges from it
Pcs::write_commitment(&witin_commit, &mut v_transcript).unwrap();
let verifier_challenges = [
v_transcript.read_challenge().elements,
v_transcript.read_challenge().elements,
];
assert_eq!(prover_challenges, verifier_challenges);
#[cfg(debug_assertions)]
{
Instrumented::<<<E as ExtensionField>::BaseField as PoseidonField>::P>::clear_metrics();
}
verifier
.verify_chip_proof(
name.as_str(),
verifier.vk.circuit_vks.get(&name).unwrap(),
&proof,
&[],
&[],
&mut v_transcript,
NUM_FANIN,
&PointAndEval::default(),
&verifier_challenges,
)
.expect("verifier failed");
#[cfg(debug_assertions)]
{
println!(
"instrumented metrics {}",
Instrumented::<<<E as ExtensionField>::BaseField as PoseidonField>::P>::format_metrics(
)
);
}
}
// <lookup count, rw count>
test_rw_lk_expression_combination_inner::<19, 17, E, Pcs>();
test_rw_lk_expression_combination_inner::<61, 17, E, Pcs>();
test_rw_lk_expression_combination_inner::<17, 61, E, Pcs>();
}
const PROGRAM_CODE: [ceno_emul::Instruction; 4] = [
encode_rv32(ADD, 4, 1, 4, 0),
encode_rv32(ECALL, 0, 0, 0, 0),
encode_rv32(ECALL, 0, 0, 0, 0),
encode_rv32(ECALL, 0, 0, 0, 0),
];
#[ignore = "this case is already tested in riscv_example as ecall_halt has only one instance"]
#[test]
fn test_single_add_instance_e2e() {
type E = GoldilocksExt2;
type Pcs = WhirDefault<E>;
// set up program
let program = Program::new(
CENO_PLATFORM.pc_base(),
CENO_PLATFORM.pc_base(),
CENO_PLATFORM.heap.start,
PROGRAM_CODE.to_vec(),
Default::default(),
);
Pcs::setup(1 << MAX_NUM_VARIABLES, SecurityLevel::default()).expect("Basefold PCS setup");
let (pp, vp) = Pcs::trim((), 1 << MAX_NUM_VARIABLES).expect("Basefold trim");
let mut zkvm_cs = ZKVMConstraintSystem::default();
let mut shard_ctx = ShardContext::default();
// opcode circuits
let add_config = zkvm_cs.register_opcode_circuit::<AddInstruction<E>>();
let halt_config = zkvm_cs.register_opcode_circuit::<HaltInstruction<E>>();
let dynamic_range_config =
zkvm_cs.register_table_circuit::<DynamicRangeTableCircuit<E, DYNAMIC_RANGE_MAX_BITS>>();
let prog_config = zkvm_cs.register_table_circuit::<ProgramTableCircuit<E>>();
let mut zkvm_fixed_traces = ZKVMFixedTraces::default();
zkvm_fixed_traces.register_opcode_circuit::<AddInstruction<E>>(&zkvm_cs, &add_config);
zkvm_fixed_traces.register_opcode_circuit::<HaltInstruction<E>>(&zkvm_cs, &halt_config);
zkvm_fixed_traces
.register_table_circuit::<DynamicRangeTableCircuit<E, DYNAMIC_RANGE_MAX_BITS>>(
&zkvm_cs,
&dynamic_range_config,
&(),
);
zkvm_fixed_traces.register_table_circuit::<ProgramTableCircuit<E>>(
&zkvm_cs,
&prog_config,
&program,
);
let pk = zkvm_cs
.clone()
.key_gen::<Pcs>(pp, vp, program.entry, zkvm_fixed_traces)
.expect("keygen failed");
let vk = pk.get_vk_slow();
// single instance
let mut vm = VMState::new(CENO_PLATFORM.clone(), program.clone().into());
let all_records = vm
.iter_until_halt()
.collect::<Result<Vec<StepRecord>, _>>()
.expect("vm exec failed")
.into_iter()
.collect::<Vec<_>>();
let mut add_records = vec![];
let mut halt_records = vec![];
all_records.iter().for_each(|record| {
let kind = record.insn().kind;
match kind {
ADD => add_records.push(record),
ECALL => {
if record.rs1().unwrap().value == Platform::ecall_halt() {
halt_records.push(record);
}
}
_ => {}
}
});
assert_eq!(add_records.len(), 1);
assert_eq!(halt_records.len(), 1);
// proving
let (max_num_variables, security_level) = default_backend_config();
let backend = create_backend::<E, Pcs>(max_num_variables, security_level);
let device = create_prover(backend);
let prover = ZKVMProver::new_with_single_shard(pk, device);
let verifier = ZKVMVerifier::new(vk);
let mut zkvm_witness = ZKVMWitnesses::default();
// assign opcode circuits
zkvm_witness
.assign_opcode_circuit::<AddInstruction<E>>(
&zkvm_cs,
&mut shard_ctx,
&add_config,
add_records,
)
.unwrap();
zkvm_witness
.assign_opcode_circuit::<HaltInstruction<E>>(
&zkvm_cs,
&mut shard_ctx,
&halt_config,
halt_records,
)
.unwrap();
zkvm_witness.finalize_lk_multiplicities();
zkvm_witness
.assign_table_circuit::<DynamicRangeTableCircuit<E, DYNAMIC_RANGE_MAX_BITS>>(
&zkvm_cs,
&dynamic_range_config,
&(),
)
.unwrap();
zkvm_witness
.assign_table_circuit::<ProgramTableCircuit<E>>(&zkvm_cs, &prog_config, &program)
.unwrap();
let pi = PublicValues::new(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, vec![0], vec![0; 14]);
let transcript = BasicTranscript::new(b"riscv");
let zkvm_proof = prover
.create_proof(&shard_ctx, zkvm_witness, pi, transcript)
.expect("create_proof failed");
println!("encoded zkvm proof {}", &zkvm_proof,);
#[cfg(debug_assertions)]
{
Instrumented::<<<E as ExtensionField>::BaseField as PoseidonField>::P>::clear_metrics();
}
let transcript = BasicTranscript::new(b"riscv");
assert!(
verifier
.verify_proof(zkvm_proof, transcript)
.expect("verify proof return with error"),
);
#[cfg(debug_assertions)]
{
println!(
"instrumented metrics {}",
Instrumented::<<<E as ExtensionField>::BaseField as PoseidonField>::P>::format_metrics(
)
);
}
}
/// test various product argument size, starting from minimal leaf size 2
#[test]
fn test_tower_proof_various_prod_size() {
fn _test_tower_proof_prod_size_2(leaf_layer_size: usize) {
let num_vars = ceil_log2(leaf_layer_size);
let mut rng = thread_rng();
type E = GoldilocksExt2;
let mut transcript = BasicTranscript::new(b"test_tower_proof");
let leaf_layer: MultilinearExtension<E> = (0..leaf_layer_size)
.map(|_| E::random(&mut rng))
.collect_vec()
.into_mle();
let (first, second): (&[E], &[E]) = leaf_layer
.get_ext_field_vec()
.split_at(leaf_layer.evaluations().len() / 2);
let last_layer_splitted_fanin: Vec<MultilinearExtension<E>> =
vec![first.to_vec().into_mle(), second.to_vec().into_mle()];
let layers = infer_tower_product_witness(num_vars, last_layer_splitted_fanin, 2);
let (rt_tower_p, tower_proof) = CpuTowerProver::create_proof::<E, WhirDefault<E>>(
vec![TowerProverSpec {
witness: layers.clone(),
}],
vec![],
2,
&mut transcript,
);
let mut transcript = BasicTranscript::new(b"test_tower_proof");
let (rt_tower_v, prod_point_and_eval, _, _) = TowerVerify::verify(
vec![
layers[0]
.iter()
.flat_map(|mle| mle.get_ext_field_vec().to_vec())
.collect_vec(),
],
vec![],
&tower_proof,
vec![num_vars],
2,
&mut transcript,
)
.unwrap();
assert_eq!(rt_tower_p, rt_tower_v);
assert_eq!(rt_tower_v.len(), num_vars);
assert_eq!(prod_point_and_eval.len(), 1);
assert_eq!(
leaf_layer.evaluate(&rt_tower_v),
prod_point_and_eval[0].eval
);
}
for leaf_layer_size in 1..10 {
_test_tower_proof_prod_size_2(1 << leaf_layer_size);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/prover.rs | ceno_zkvm/src/scheme/prover.rs | use ff_ext::ExtensionField;
use gkr_iop::{
cpu::{CpuBackend, CpuProver},
hal::ProverBackend,
};
use std::{
collections::{BTreeMap, HashMap},
marker::PhantomData,
sync::Arc,
};
use crate::scheme::{constants::SEPTIC_EXTENSION_DEGREE, hal::MainSumcheckEvals};
use either::Either;
use gkr_iop::hal::MultilinearPolynomial;
use itertools::Itertools;
use mpcs::{Point, PolynomialCommitmentScheme};
use multilinear_extensions::{
Expression, Instance,
mle::{IntoMLE, MultilinearExtension},
};
use p3::field::FieldAlgebra;
use std::iter::Iterator;
use sumcheck::{
macros::{entered_span, exit_span},
structs::IOPProverMessage,
};
use tracing::info_span;
use transcript::Transcript;
use super::{PublicValues, ZKVMChipProof, ZKVMProof, hal::ProverDevice};
use crate::{
e2e::ShardContext,
error::ZKVMError,
scheme::{
hal::{DeviceProvingKey, ProofInput},
utils::build_main_witness,
},
structs::{ProvingKey, TowerProofs, ZKVMProvingKey, ZKVMWitnesses},
};
type CreateTableProof<E> = (ZKVMChipProof<E>, HashMap<usize, E>, Point<E>);
pub type ZkVMCpuProver<E, PCS> =
ZKVMProver<E, PCS, CpuBackend<E, PCS>, CpuProver<CpuBackend<E, PCS>>>;
pub struct ZKVMProver<E: ExtensionField, PCS: PolynomialCommitmentScheme<E>, PB: ProverBackend, PD>
{
pub pk: Arc<ZKVMProvingKey<E, PCS>>,
device: PD,
// device_pk might be none if there is no fixed commitment
device_first_shard_pk: Option<DeviceProvingKey<'static, PB>>,
device_non_first_shard_pk: Option<DeviceProvingKey<'static, PB>>,
_marker: PhantomData<PB>,
}
impl<
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E> + 'static,
PB: ProverBackend<E = E, Pcs = PCS> + 'static,
PD: ProverDevice<PB> + 'static,
> ZKVMProver<E, PCS, PB, PD>
{
pub fn new_with_single_shard(pk: ZKVMProvingKey<E, PCS>, device: PD) -> Self {
let pk = Arc::new(pk);
let device_first_shard_pk = if pk.as_ref().has_fixed_commitment() {
Some(device.transport_proving_key(true, pk.clone()))
} else {
None
};
ZKVMProver {
pk,
device,
device_first_shard_pk,
device_non_first_shard_pk: None,
_marker: PhantomData,
}
}
pub fn new(pk: Arc<ZKVMProvingKey<E, PCS>>, device: PD) -> Self {
let (device_first_shard_pk, device_non_first_shard_pk) =
if pk.as_ref().has_fixed_commitment() {
(
Some(device.transport_proving_key(true, pk.clone())),
Some(device.transport_proving_key(false, pk.clone())),
)
} else {
(None, None)
};
ZKVMProver {
pk,
device,
device_first_shard_pk,
device_non_first_shard_pk,
_marker: PhantomData,
}
}
pub fn get_device_proving_key(
&self,
shard_ctx: &ShardContext,
) -> Option<&DeviceProvingKey<'static, PB>> {
if shard_ctx.is_first_shard() {
self.device_first_shard_pk.as_ref()
} else {
self.device_non_first_shard_pk.as_ref()
}
}
}
impl<
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E> + 'static,
PB: ProverBackend<E = E, Pcs = PCS> + 'static,
PD: ProverDevice<PB> + 'static,
> ZKVMProver<E, PCS, PB, PD>
{
/// create proof for zkvm execution
#[tracing::instrument(
skip_all,
name = "ZKVM_create_proof",
fields(profiling_1),
level = "trace"
)]
pub fn create_proof(
&self,
shard_ctx: &ShardContext,
witnesses: ZKVMWitnesses<E>,
pi: PublicValues,
mut transcript: impl Transcript<E> + 'static,
) -> Result<ZKVMProof<E, PCS>, ZKVMError> {
info_span!(
"[ceno] create_proof_of_shard",
shard_id = shard_ctx.shard_id
)
.in_scope(|| {
let raw_pi = pi.to_vec::<E>();
let mut pi_evals = ZKVMProof::<E, PCS>::pi_evals(&raw_pi);
let mut chip_proofs = BTreeMap::new();
let span = entered_span!("commit_to_pi", profiling_1 = true);
// including raw public input to transcript
for v in raw_pi.iter().flatten() {
transcript.append_field_element(v);
}
exit_span!(span);
let pi: Vec<MultilinearExtension<E>> =
raw_pi.iter().map(|p| p.to_vec().into_mle()).collect();
// commit to fixed commitment
let span = entered_span!("commit_to_fixed_commit", profiling_1 = true);
if let Some(fixed_commit) = &self.pk.fixed_commit
&& shard_ctx.is_first_shard()
{
PCS::write_commitment(fixed_commit, &mut transcript)
.map_err(ZKVMError::PCSError)?;
} else if let Some(fixed_commit) = &self.pk.fixed_no_omc_init_commit
&& !shard_ctx.is_first_shard()
{
PCS::write_commitment(fixed_commit, &mut transcript)
.map_err(ZKVMError::PCSError)?;
}
exit_span!(span);
// only keep track of circuits that have non-zero instances
for (name, chip_inputs) in &witnesses.witnesses {
let pk = self.pk.circuit_pks.get(name).ok_or(ZKVMError::VKNotFound(
format!("proving key for circuit {} not found", name).into(),
))?;
// include omc init tables iff it's in first shard
if !shard_ctx.is_first_shard() && pk.get_cs().with_omc_init_only() {
continue;
}
// num_instance from witness might include rotation
let num_instances = chip_inputs
.iter()
.flat_map(|chip_input| &chip_input.num_instances)
.collect_vec();
if num_instances.is_empty() {
continue;
}
let circuit_idx = self.pk.circuit_name_to_index.get(name).unwrap();
// write (circuit_idx, num_var) to transcript
transcript.append_field_element(&E::BaseField::from_canonical_usize(*circuit_idx));
for num_instance in num_instances {
transcript
.append_field_element(&E::BaseField::from_canonical_usize(*num_instance));
}
}
// extract chip meta info before consuming witnesses
// (circuit_name, num_instances)
let name_and_instances = witnesses.get_witnesses_name_instance();
let commit_to_traces_span = entered_span!("batch commit to traces", profiling_1 = true);
let mut wits_rmms = BTreeMap::new();
let mut structural_rmms = Vec::with_capacity(name_and_instances.len());
// commit to opcode circuits first and then commit to table circuits, sorted by name
for (i, chip_input) in witnesses.into_iter_sorted().enumerate() {
let [witness_rmm, structural_witness_rmm] = chip_input.witness_rmms;
if witness_rmm.num_instances() > 0 {
wits_rmms.insert(i, witness_rmm);
}
structural_rmms.push(structural_witness_rmm);
}
tracing::debug!(
"witness rmm in {} MB",
wits_rmms
.values()
.map(|v| v.values.len() * std::mem::size_of::<E::BaseField>())
.sum::<usize>() as f64
/ (1024.0 * 1024.0)
);
// commit to witness traces in batch
let (mut witness_mles, witness_data, witin_commit) = info_span!("[ceno] commit_traces")
.in_scope(|| self.device.commit_traces(wits_rmms));
PCS::write_commitment(&witin_commit, &mut transcript).map_err(ZKVMError::PCSError)?;
exit_span!(commit_to_traces_span);
// transfer pk to device
let transfer_pk_span = entered_span!("transfer pk to device", profiling_1 = true);
let mut fixed_mles = self
.get_device_proving_key(shard_ctx)
.map(|dpk| dpk.fixed_mles.clone())
.unwrap_or_default();
exit_span!(transfer_pk_span);
// squeeze two challenges from transcript
let challenges = [
transcript.read_challenge().elements,
transcript.read_challenge().elements,
];
tracing::debug!("global challenges in prover: {:?}", challenges);
let public_input_span = entered_span!("public_input", profiling_1 = true);
let public_input = self.device.transport_mles(&pi);
exit_span!(public_input_span);
let main_proofs_span = entered_span!("main_proofs", profiling_1 = true);
let mut points = Vec::new();
let mut evaluations = Vec::new();
let mut witness_iter = self
.device
.extract_witness_mles(&mut witness_mles, &witness_data);
for ((circuit_name, num_instances), structural_rmm) in name_and_instances
.into_iter()
.zip_eq(structural_rmms.into_iter())
{
let circuit_idx = self
.pk
.circuit_name_to_index
.get(&circuit_name)
.cloned()
.expect("invalid circuit {} not exist in ceno zkvm");
let pk = self.pk.circuit_pks.get(&circuit_name).unwrap();
let cs = pk.get_cs();
if !shard_ctx.is_first_shard() && cs.with_omc_init_only() {
assert!(num_instances.is_empty());
// skip drain respective fixed because we use different set of fixed commitment
continue;
}
if num_instances.is_empty() {
// we need to drain respective fixed when num_instances is 0
if cs.num_fixed() > 0 {
let _ = fixed_mles.drain(..cs.num_fixed()).collect_vec();
}
continue;
}
transcript
.append_field_element(&E::BaseField::from_canonical_u64(circuit_idx as u64));
// TODO: add an enum for circuit type either in constraint_system or vk
let witness_mle = info_span!("[ceno] extract_witness_mles").in_scope(|| {
if cs.num_witin() > 0 {
let mles = witness_iter.by_ref().take(cs.num_witin()).collect_vec();
assert_eq!(
mles.len(),
cs.num_witin(),
"insufficient witness mles for circuit {}",
circuit_name
);
mles
} else {
vec![]
}
});
let structural_witness = info_span!("[ceno] transport_structural_witness")
.in_scope(|| {
let structural_mles = structural_rmm.to_mles();
self.device.transport_mles(&structural_mles)
});
let fixed = fixed_mles.drain(..cs.num_fixed()).collect_vec();
let input = ProofInput {
witness: witness_mle,
fixed,
structural_witness,
public_input: public_input.clone(),
pub_io_evals: pi_evals.iter().map(|p| Either::Right(*p)).collect(),
num_instances: num_instances.clone(),
has_ecc_ops: cs.has_ecc_ops(),
};
let (opcode_proof, pi_in_evals, input_opening_point) =
info_span!("[ceno] create_chip_proof", name = circuit_name.as_str()).in_scope(
|| {
self.create_chip_proof(
circuit_name.as_str(),
pk,
input,
&mut transcript,
&challenges,
)
},
)?;
tracing::trace!(
"generated proof for opcode {} with num_instances={:?}",
circuit_name,
num_instances
);
if cs.num_witin() > 0 || cs.num_fixed() > 0 {
points.push(input_opening_point);
evaluations.push(vec![
opcode_proof.wits_in_evals.clone(),
opcode_proof.fixed_in_evals.clone(),
]);
} else {
assert!(opcode_proof.wits_in_evals.is_empty());
assert!(opcode_proof.fixed_in_evals.is_empty());
}
chip_proofs
.entry(circuit_idx)
.or_insert(vec![])
.push(opcode_proof);
for (idx, eval) in pi_in_evals {
pi_evals[idx] = eval;
}
}
drop(witness_iter);
exit_span!(main_proofs_span);
// batch opening pcs
// generate static info from prover key for expected num variable
let pcs_opening = entered_span!("pcs_opening", profiling_1 = true);
let mpcs_opening_proof = info_span!("[ceno] pcs_opening").in_scope(|| {
self.device.open(
witness_data,
self.get_device_proving_key(shard_ctx)
.map(|dpk| dpk.pcs_data.clone()),
points,
evaluations,
&mut transcript,
)
});
exit_span!(pcs_opening);
let vm_proof = ZKVMProof::new(
raw_pi,
pi_evals,
chip_proofs,
witin_commit,
mpcs_opening_proof,
);
Ok(vm_proof)
})
}
/// create proof for opcode and table circuits
///
/// for each read/write/logup expression, we pack all records of that type
/// into a single tower tree, and then feed these trees into tower prover.
#[tracing::instrument(skip_all, name = "create_chip_proof", fields(table_name=name, profiling_2
), level = "trace")]
pub fn create_chip_proof<'a>(
&self,
name: &str,
circuit_pk: &ProvingKey<E>,
input: ProofInput<'a, PB>,
transcript: &mut impl Transcript<E>,
challenges: &[E; 2],
) -> Result<CreateTableProof<E>, ZKVMError> {
let cs = circuit_pk.get_cs();
let log2_num_instances = input.log2_num_instances();
let num_var_with_rotation = log2_num_instances + cs.rotation_vars().unwrap_or(0);
// run ecc quark prover
let ecc_proof = if !cs.zkvm_v1_css.ec_final_sum.is_empty() {
let span = entered_span!("run_ecc_final_sum", profiling_2 = true);
let ec_point_exprs = &cs.zkvm_v1_css.ec_point_exprs;
assert_eq!(ec_point_exprs.len(), SEPTIC_EXTENSION_DEGREE * 2);
let mut xs_ys = ec_point_exprs
.iter()
.map(|expr| match expr {
Expression::WitIn(id) => input.witness[*id as usize].clone(),
_ => unreachable!("ec point's expression must be WitIn"),
})
.collect_vec();
let ys = xs_ys.split_off(SEPTIC_EXTENSION_DEGREE);
let xs = xs_ys;
let slopes = cs
.zkvm_v1_css
.ec_slope_exprs
.iter()
.map(|expr| match expr {
Expression::WitIn(id) => input.witness[*id as usize].clone(),
_ => unreachable!("slope's expression must be WitIn"),
})
.collect_vec();
let ecc_proof = Some(info_span!("[ceno] prove_ec_sum_quark").in_scope(|| {
self.device
.prove_ec_sum_quark(input.num_instances(), xs, ys, slopes, transcript)
})?);
exit_span!(span);
ecc_proof
} else {
None
};
// build main witness
let records = info_span!("[ceno] build_main_witness")
.in_scope(|| build_main_witness::<E, PCS, PB, PD>(cs, &input, challenges));
let span = entered_span!("prove_tower_relation", profiling_2 = true);
// prove the product and logup sum relation between layers in tower
// (internally calls build_tower_witness)
let (rt_tower, tower_proof, lk_out_evals, w_out_evals, r_out_evals) =
info_span!("[ceno] prove_tower_relation").in_scope(|| {
self.device
.prove_tower_relation(cs, &input, &records, challenges, transcript)
});
exit_span!(span);
assert_eq!(
rt_tower.len(), // num var length should equal to max_num_instance
num_var_with_rotation,
);
// TODO: batch reduction into main sumcheck
// x[rt,0] = \sum_b eq([rt,0], b) * x[b]
// x[rt,1] = \sum_b eq([rt,1], b) * x[b]
// x[1,rt] = \sum_b eq([1,rt], b) * x[b]
// y[rt,0] = \sum_b eq([rt,0], b) * y[b]
// y[rt,1] = \sum_b eq([rt,1], b) * y[b]
// y[1,rt] = \sum_b eq([1,rt], b) * y[b]
// s[0,rt] = \sum_b eq([0,rt], b) * s[b]
// 1. prove the main constraints among witness polynomials
// 2. prove the relation between last layer in the tower and read/write/logup records
let span = entered_span!("prove_main_constraints", profiling_2 = true);
let (input_opening_point, evals, main_sumcheck_proofs, gkr_iop_proof) =
info_span!("[ceno] prove_main_constraints").in_scope(|| {
self.device
.prove_main_constraints(rt_tower, &input, cs, challenges, transcript)
})?;
let MainSumcheckEvals {
wits_in_evals,
fixed_in_evals,
} = evals;
exit_span!(span);
// evaluate pi if there is instance query
let mut pi_in_evals: HashMap<usize, E> = HashMap::new();
if !cs.instance_openings().is_empty() {
let span = entered_span!("pi::evals", profiling_2 = true);
for &Instance(idx) in cs.instance_openings() {
let poly = &input.public_input[idx];
pi_in_evals.insert(
idx,
poly.eval(input_opening_point[..poly.num_vars()].to_vec()),
);
}
exit_span!(span);
}
Ok((
ZKVMChipProof {
r_out_evals,
w_out_evals,
lk_out_evals,
main_sumcheck_proofs,
gkr_iop_proof,
tower_proof,
ecc_proof,
fixed_in_evals,
wits_in_evals,
num_instances: input.num_instances,
},
pi_in_evals,
input_opening_point,
))
}
pub fn setup_init_mem(&self, hints: &[u32], public_io: &[u32]) -> crate::e2e::InitMemState {
let Some(ctx) = self.pk.program_ctx.as_ref() else {
panic!("empty program ctx")
};
ctx.setup_init_mem(hints, public_io)
}
}
/// TowerProofs
impl<E: ExtensionField> TowerProofs<E> {
pub fn new(prod_spec_size: usize, logup_spec_size: usize) -> Self {
TowerProofs {
proofs: vec![],
prod_specs_eval: vec![vec![]; prod_spec_size],
logup_specs_eval: vec![vec![]; logup_spec_size],
prod_specs_points: vec![vec![]; prod_spec_size],
logup_specs_points: vec![vec![]; logup_spec_size],
}
}
pub fn push_sumcheck_proofs(&mut self, proofs: Vec<IOPProverMessage<E>>) {
self.proofs.push(proofs);
}
pub fn push_prod_evals_and_point(&mut self, spec_index: usize, evals: Vec<E>, point: Vec<E>) {
self.prod_specs_eval[spec_index].push(evals);
self.prod_specs_points[spec_index].push(point);
}
pub fn push_logup_evals_and_point(&mut self, spec_index: usize, evals: Vec<E>, point: Vec<E>) {
self.logup_specs_eval[spec_index].push(evals);
self.logup_specs_points[spec_index].push(point);
}
pub fn prod_spec_size(&self) -> usize {
self.prod_specs_eval.len()
}
pub fn logup_spec_size(&self) -> usize {
self.logup_specs_eval.len()
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/mock_prover.rs | ceno_zkvm/src/scheme/mock_prover.rs | use super::{PublicValues, utils::wit_infer_by_expr};
use crate::{
ROMType,
circuit_builder::{CircuitBuilder, ConstraintSystem},
e2e::ShardContext,
state::{GlobalState, StateCircuit},
structs::{
ComposedConstrainSystem, ProgramParams, RAMType, ZKVMConstraintSystem, ZKVMFixedTraces,
ZKVMWitnesses,
},
tables::{ProgramTableCircuit, RMMCollections, TableCircuit},
witness::LkMultiplicity,
};
use base64::{Engine, engine::general_purpose::URL_SAFE_NO_PAD};
use ceno_emul::{ByteAddr, CENO_PLATFORM, Program};
use either::Either;
use ff_ext::{BabyBearExt4, ExtensionField, GoldilocksExt2, SmallField};
use generic_static::StaticTypeMap;
use gkr_iop::{
tables::{
LookupTable, OpsTable,
ops::{AndTable, LtuTable, OrTable, PowTable, XorTable},
},
utils::lk_multiplicity::{LkMultiplicityRaw, Multiplicity},
};
use itertools::{Itertools, chain, enumerate, izip};
use multilinear_extensions::{
Expression, WitnessId, fmt,
mle::{ArcMultilinearExtension, IntoMLEs, MultilinearExtension},
util::ceil_log2,
utils::{eval_by_expr, eval_by_expr_with_fixed, eval_by_expr_with_instance},
};
use p3::field::{Field, FieldAlgebra};
use rand::thread_rng;
use std::{
cmp::max,
collections::{BTreeSet, HashMap, HashSet},
fmt::Debug,
fs::File,
hash::Hash,
io::{BufReader, ErrorKind},
marker::PhantomData,
ops::Index,
sync::OnceLock,
};
use strum::IntoEnumIterator;
use tiny_keccak::{Hasher, Keccak};
use witness::next_pow2_instance_padding;
const MAX_CONSTRAINT_DEGREE: usize = 3;
const MOCK_PROGRAM_SIZE: usize = 32;
pub const MOCK_PC_START: ByteAddr = ByteAddr(0x0800_0000);
/// Allow LK Multiplicity's key to be used with `u64` and `GoldilocksExt2`.
pub trait LkMultiplicityKey: Copy + Clone + Debug + Eq + Hash + Send {
/// If key is u64, return Some(u64), otherwise None.
fn to_u64(&self) -> Option<u64>;
}
impl LkMultiplicityKey for u64 {
fn to_u64(&self) -> Option<u64> {
Some(*self)
}
}
impl LkMultiplicityKey for GoldilocksExt2 {
fn to_u64(&self) -> Option<u64> {
None
}
}
impl LkMultiplicityKey for BabyBearExt4 {
fn to_u64(&self) -> Option<u64> {
None
}
}
#[allow(clippy::enum_variant_names)]
#[derive(Debug, Clone)]
pub enum MockProverError<E: ExtensionField, K: LkMultiplicityKey> {
AssertZeroError {
expression: Expression<E>,
evaluated: Either<E::BaseField, E>,
name: String,
inst_id: usize,
},
AssertEqualError {
left_expression: Expression<E>,
right_expression: Expression<E>,
left: Either<E::BaseField, E>,
right: Either<E::BaseField, E>,
name: String,
inst_id: usize,
},
DegreeTooHigh {
expression: Expression<E>,
degree: usize,
name: String,
},
LookupError {
rom_type: ROMType,
expression: Expression<E>,
evaluated: E,
name: String,
inst_id: usize,
},
LkMultiplicityError {
rom_type: ROMType,
key: K,
count: isize, // +ve => missing in cs, -ve => missing in assignments
},
}
impl<E: ExtensionField, K: LkMultiplicityKey> PartialEq for MockProverError<E, K> {
// Compare errors based on the content, ignoring the inst_id
fn eq(&self, other: &Self) -> bool {
match (self, other) {
(
MockProverError::AssertZeroError {
expression: left_expression,
evaluated: left_evaluated,
name: left_name,
..
},
MockProverError::AssertZeroError {
expression: right_expression,
evaluated: right_evaluated,
name: right_name,
..
},
) => {
left_expression == right_expression
&& left_evaluated == right_evaluated
&& left_name == right_name
}
(
MockProverError::AssertEqualError {
left_expression: left_left_expression,
right_expression: left_right_expression,
left: left_left,
right: left_right,
name: left_name,
..
},
MockProverError::AssertEqualError {
left_expression: right_left_expression,
right_expression: right_right_expression,
left: right_left,
right: right_right,
name: right_name,
..
},
) => {
left_left_expression == right_left_expression
&& left_right_expression == right_right_expression
&& left_left == right_left
&& left_right == right_right
&& left_name == right_name
}
(
MockProverError::LookupError {
expression: left_expression,
evaluated: left_evaluated,
name: left_name,
..
},
MockProverError::LookupError {
expression: right_expression,
evaluated: right_evaluated,
name: right_name,
..
},
) => {
left_expression == right_expression
&& left_evaluated == right_evaluated
&& left_name == right_name
}
(
MockProverError::LkMultiplicityError {
rom_type: left_rom_type,
key: left_key,
count: left_count,
},
MockProverError::LkMultiplicityError {
rom_type: right_rom_type,
key: right_key,
count: right_count,
},
) => (left_rom_type, left_key, left_count) == (right_rom_type, right_key, right_count),
_ => false,
}
}
}
impl<E: ExtensionField, K: LkMultiplicityKey> MockProverError<E, K> {
fn print(&self, wits_in: &[ArcMultilinearExtension<E>], wits_in_name: &[String]) {
let mut wtns = vec![];
match self {
Self::AssertZeroError {
expression,
evaluated,
name,
inst_id,
} => {
let expression_fmt = fmt::expr(expression, &mut wtns, false);
let wtns_fmt = fmt::wtns(&wtns, wits_in, *inst_id, wits_in_name);
let eval_fmt = fmt::either_field(*evaluated, false);
println!(
"\nAssertZeroError {name:?}: Evaluated expression is not zero\n\
Expression: {expression_fmt}\n\
Evaluation: {eval_fmt} != 0\n\
Inst[{inst_id}]:\n{wtns_fmt}\n",
);
}
Self::AssertEqualError {
left_expression,
right_expression,
left,
right,
name,
inst_id,
} => {
let left_expression_fmt = fmt::expr(left_expression, &mut wtns, false);
let right_expression_fmt = fmt::expr(right_expression, &mut wtns, false);
let wtns_fmt = fmt::wtns(&wtns, wits_in, *inst_id, wits_in_name);
let left_eval_fmt = fmt::either_field(*left, false);
let right_eval_fmt = fmt::either_field(*right, false);
println!(
"\nAssertEqualError {name:?}\n\
Left: {left_eval_fmt} != Right: {right_eval_fmt}\n\
Left Expression: {left_expression_fmt}\n\
Right Expression: {right_expression_fmt}\n\
Inst[{inst_id}]:\n{wtns_fmt}\n",
);
}
Self::DegreeTooHigh {
expression,
degree,
name,
} => {
let expression_fmt = fmt::expr(expression, &mut wtns, false);
println!(
"\nDegreeTooHigh {name:?}: Expression degree is too high\n\
Expression: {expression_fmt}\n\
Degree: {degree} > {MAX_CONSTRAINT_DEGREE}\n",
);
}
Self::LookupError {
rom_type,
expression,
evaluated,
name,
inst_id,
} => {
let expression_fmt = fmt::expr(expression, &mut wtns, false);
let wtns_fmt = fmt::wtns(&wtns, wits_in, *inst_id, wits_in_name);
let eval_fmt = fmt::field(*evaluated);
println!(
"\nLookupError {name:#?}: Evaluated expression does not exist in T vector\n\
ROM Type: {rom_type:?}\n\
Expression: {expression_fmt}\n\
Evaluation: {eval_fmt}\n\
Inst[{inst_id}]:\n{wtns_fmt}\n",
);
}
Self::LkMultiplicityError {
rom_type,
key,
count,
..
} => {
let lookups = if count.abs() > 1 {
format!("{} Lookups", count.abs())
} else {
"Lookup".to_string()
};
let (location, element) = if let Some(key) = key.to_u64() {
let location = if *count > 0 {
"constraint system"
} else {
"assignments"
};
let element = match rom_type {
ROMType::Dynamic => {
let left = u64::BITS - 1 - key.leading_zeros();
let element = key & ((1 << left) - 1);
format!("Dynamic Range Table U{left} with Element: {element:?}")
}
ROMType::DoubleU8 => {
let a = (key >> 8) & u8::MAX as u64;
let b = key & (u8::MAX as u64);
format!("Double U8 Range Table with Elements: ({a:?}, {b:?})")
}
ROMType::And => {
let (a, b) = AndTable::unpack(key);
format!("Element: {a} && {b}")
}
ROMType::Or => {
let (a, b) = OrTable::unpack(key);
format!("Element: {a} || {b}")
}
ROMType::Xor => {
let (a, b) = XorTable::unpack(key);
format!("Element: {a} ^ {b}")
}
ROMType::Ltu => {
let (a, b) = LtuTable::unpack(key);
format!("Element: {a} < {b}")
}
ROMType::Pow => {
let (a, b) = PowTable::unpack(key);
format!("Element: {a} ** {b}")
}
ROMType::Instruction => format!("PC: {key}"),
};
(location, element)
} else {
(
if *count > 0 {
"combined_lkm_tables"
} else {
"combined_lkm_opcodes"
},
format!("Element: {key:?}"),
)
};
println!(
"\nLkMultiplicityError:\n\
{lookups} of {rom_type:?} missing in {location}\n\
{element}\n"
);
}
}
}
#[cfg(test)]
fn inst_id(&self) -> usize {
match self {
Self::AssertZeroError { inst_id, .. }
| Self::AssertEqualError { inst_id, .. }
| Self::LookupError { inst_id, .. } => *inst_id,
Self::DegreeTooHigh { .. } | Self::LkMultiplicityError { .. } => unreachable!(),
}
}
fn contains(&self, constraint_name: &str) -> bool {
format!("{:?}", self).contains(constraint_name)
}
}
pub struct MockProver<E: ExtensionField> {
_phantom: PhantomData<E>,
}
fn load_tables<E: ExtensionField>(
cs: &ConstraintSystem<E>,
challenge: [E; 2],
) -> HashSet<Vec<u64>> {
fn load_dynamic_range_table<E: ExtensionField, const MAX_BITS: usize>(
t_vec: &mut Vec<Vec<u64>>,
cs: &ConstraintSystem<E>,
challenge: [E; 2],
) {
for (i, bits) in std::iter::once(0)
.chain((0..=MAX_BITS).flat_map(|i| 0..(1 << i)))
.zip(
std::iter::once(0)
.chain((0..=MAX_BITS).flat_map(|i| std::iter::repeat_n(i, 1 << i))),
)
{
let rlc_record = cs.rlc_chip_record(vec![
(LookupTable::Dynamic as usize).into(),
(i as usize).into(),
bits.into(),
]);
let rlc_record = eval_by_expr(&[], &[], &challenge, &rlc_record);
t_vec.push(rlc_record.to_canonical_u64_vec());
}
}
fn load_double_u8_range_table<E: ExtensionField>(
t_vec: &mut Vec<Vec<u64>>,
cs: &ConstraintSystem<E>,
challenge: [E; 2],
) {
for (a, b) in (0..(1 << 8))
.flat_map(|i| std::iter::repeat_n(i, 1 << 8))
.zip(std::iter::repeat_n(0, 1 << 8).flat_map(|_| 0..(1 << 8)))
{
let rlc_record = cs.rlc_chip_record(vec![
(LookupTable::DoubleU8 as usize).into(),
a.into(),
b.into(),
]);
let rlc_record = eval_by_expr(&[], &[], &challenge, &rlc_record);
t_vec.push(rlc_record.to_canonical_u64_vec());
}
}
fn load_op_table<OP: OpsTable, E: ExtensionField>(
t_vec: &mut Vec<Vec<u64>>,
cs: &ConstraintSystem<E>,
challenge: [E; 2],
) {
for [a, b, c] in OP::content() {
let rlc_record = cs.rlc_chip_record(vec![
(OP::ROM_TYPE as usize).into(),
(a as usize).into(),
(b as usize).into(),
(c as usize).into(),
]);
let rlc_record = eval_by_expr(&[], &[], &challenge, &rlc_record);
t_vec.push(rlc_record.to_canonical_u64_vec());
}
}
let mut table_vec = vec![];
load_dynamic_range_table::<_, 18>(&mut table_vec, cs, challenge);
load_double_u8_range_table(&mut table_vec, cs, challenge);
load_op_table::<AndTable, _>(&mut table_vec, cs, challenge);
load_op_table::<OrTable, _>(&mut table_vec, cs, challenge);
load_op_table::<XorTable, _>(&mut table_vec, cs, challenge);
load_op_table::<LtuTable, _>(&mut table_vec, cs, challenge);
if E::BaseField::bits() > 32 {
// this pow table only work on large prime field
load_op_table::<PowTable, _>(&mut table_vec, cs, challenge);
}
HashSet::from_iter(table_vec)
}
// load once per generic type E instantiation
// return challenge and table
#[allow(clippy::type_complexity)]
fn load_once_tables<E: ExtensionField + 'static + Sync + Send>(
cs: &ConstraintSystem<E>,
) -> ([E; 2], HashSet<Vec<u64>>) {
static CACHE: OnceLock<StaticTypeMap<([Vec<u64>; 2], HashSet<Vec<u64>>)>> = OnceLock::new();
let cache = CACHE.get_or_init(StaticTypeMap::new);
let (challenges_repr, table) = cache.call_once::<E, _>(|| {
let mut rng = thread_rng();
let challenge = [E::random(&mut rng), E::random(&mut rng)];
let mut keccak = Keccak::v256();
let mut filename_digest = [0u8; 32];
keccak.update(serde_json::to_string(&challenge).unwrap().as_bytes());
keccak.finalize(&mut filename_digest);
let file_path = format!(
"table_cache_dev_{:?}.json",
URL_SAFE_NO_PAD.encode(filename_digest)
);
let table = match File::open(&file_path) {
Ok(file) => {
let reader = BufReader::new(file);
serde_json::from_reader(reader).unwrap()
}
Err(e) if e.kind() == ErrorKind::NotFound => {
// Cached file doesn't exist, let's make a new one.
// And carefully avoid exposing a half-written file to other threads,
// or other runs of this program (in case of a crash).
let mut file = tempfile::NamedTempFile::new_in(".").unwrap();
// load new table and seserialize to file for later use
let table = load_tables(cs, challenge);
serde_json::to_writer(&mut file, &table).unwrap();
// Persist the file to the target location
// This is an atomic operation on Posix-like systems, so we don't have to worry
// about half-written files.
// Note, that if another process wrote to our target file in the meantime,
// we silently overwrite it here. But that's fine.
file.persist(file_path).unwrap();
table
}
Err(e) => panic!("{:?}", e),
};
(
challenge.map(|c| {
c.as_base_slice()
.iter()
.map(|b| b.to_canonical_u64())
.collect_vec()
}),
table,
)
});
// reinitialize per generic type E
(
challenges_repr.clone().map(|repr| {
E::from_base_iter(repr.iter().copied().map(E::BaseField::from_canonical_u64))
}),
table.clone(),
)
}
impl<'a, E: ExtensionField + Hash> MockProver<E> {
pub fn run_with_challenge(
cb: &CircuitBuilder<E>,
fixed: &[ArcMultilinearExtension<'a, E>],
wits_in: &[ArcMultilinearExtension<'a, E>],
structural_witin: &[ArcMultilinearExtension<'a, E>],
challenge: [E; 2],
lkm: Option<Multiplicity<u64>>,
) -> Result<(), Vec<MockProverError<E, u64>>> {
Self::run_maybe_challenge(
cb,
fixed,
wits_in,
structural_witin,
&[],
&[],
&[],
Some(challenge),
lkm,
)
}
pub fn run(
cb: &CircuitBuilder<E>,
wits_in: &[ArcMultilinearExtension<'a, E>],
program: &[ceno_emul::Instruction],
lkm: Option<Multiplicity<u64>>,
) -> Result<(), Vec<MockProverError<E, u64>>> {
Self::run_maybe_challenge(cb, &[], wits_in, &[], program, &[], &[], None, lkm)
}
#[allow(clippy::too_many_arguments)]
fn run_maybe_challenge(
cb: &CircuitBuilder<E>,
fixed: &[ArcMultilinearExtension<'a, E>],
wits_in: &[ArcMultilinearExtension<'a, E>],
structural_witin: &[ArcMultilinearExtension<'a, E>],
program: &[ceno_emul::Instruction],
pi_mles: &[ArcMultilinearExtension<'a, E>],
pub_io_evals: &[Either<E::BaseField, E>],
challenge: Option<[E; 2]>,
lkm: Option<Multiplicity<u64>>,
) -> Result<(), Vec<MockProverError<E, u64>>> {
let program = Program::from(program);
let (table, challenge) = Self::load_tables_with_program(cb.cs, &program, challenge);
Self::run_maybe_challenge_with_table(
cb.cs,
&table,
fixed,
wits_in,
structural_witin,
pi_mles,
pub_io_evals,
1,
challenge,
lkm,
)
.map(|_| ())
}
#[allow(clippy::too_many_arguments)]
fn run_maybe_challenge_with_table(
cs: &ConstraintSystem<E>,
table: &HashSet<Vec<u64>>,
fixed: &[ArcMultilinearExtension<'a, E>],
wits_in: &[ArcMultilinearExtension<'a, E>],
structural_witin: &[ArcMultilinearExtension<'a, E>],
pi_mles: &[ArcMultilinearExtension<'a, E>],
pub_io_evals: &[Either<E::BaseField, E>],
num_instances: usize,
challenge: [E; 2],
expected_lkm: Option<Multiplicity<u64>>,
) -> Result<LkMultiplicityRaw<E>, Vec<MockProverError<E, u64>>> {
let mut shared_lkm = LkMultiplicityRaw::<E>::default();
let mut errors = vec![];
let num_instance_padded = wits_in
.first()
.or_else(|| fixed.first())
.or_else(|| pi_mles.first())
.or_else(|| structural_witin.first())
.map(|mle| mle.evaluations().len())
.unwrap_or_else(|| next_pow2_instance_padding(num_instances));
// Assert zero expressions
for (expr, name) in cs
.assert_zero_expressions
.iter()
.chain(&cs.assert_zero_sumcheck_expressions)
.zip_eq(
cs.assert_zero_expressions_namespace_map
.iter()
.chain(&cs.assert_zero_sumcheck_expressions_namespace_map),
)
{
if expr.degree() > MAX_CONSTRAINT_DEGREE {
errors.push(MockProverError::DegreeTooHigh {
expression: expr.clone(),
degree: expr.degree(),
name: name.clone(),
});
}
let zero_selector: ArcMultilinearExtension<_> =
if let Some(zero_selector) = &cs.zero_selector {
structural_witin[zero_selector.selector_expr().id()].clone()
} else {
let mut selector = vec![E::BaseField::ONE; num_instances];
selector.resize(num_instance_padded, E::BaseField::ZERO);
MultilinearExtension::from_evaluation_vec_smart(
ceil_log2(num_instance_padded),
selector,
)
.into()
};
// require_equal does not always have the form of Expr::Sum as
// the sum of witness and constant is expressed as scaled sum
if let Expression::Sum(left, right) = expr
&& name.contains("require_equal")
{
let right = -right.as_ref();
let left_evaluated = wit_infer_by_expr(
left,
cs.num_witin,
cs.num_fixed as WitnessId,
cs.instance_openings.len(),
fixed,
wits_in,
structural_witin,
pi_mles,
pub_io_evals,
&challenge,
);
let left_evaluated =
filter_mle_by_selector_mle(left_evaluated, zero_selector.clone());
let right_evaluated = wit_infer_by_expr(
&right,
cs.num_witin,
cs.num_fixed as WitnessId,
cs.instance_openings.len(),
fixed,
wits_in,
structural_witin,
pi_mles,
pub_io_evals,
&challenge,
);
let right_evaluated =
filter_mle_by_selector_mle(right_evaluated, zero_selector.clone());
// left_evaluated.len() ?= right_evaluated.len() due to padding instance
for (inst_id, (left_element, right_element)) in
izip!(left_evaluated, right_evaluated).enumerate()
{
if left_element != right_element {
errors.push(MockProverError::AssertEqualError {
left_expression: *left.clone(),
right_expression: right.clone(),
left: Either::Right(left_element),
right: Either::Right(right_element),
name: name.clone(),
inst_id,
});
}
}
} else {
// contains require_zero
let expr_evaluated = wit_infer_by_expr(
expr,
cs.num_witin,
cs.num_fixed as WitnessId,
cs.instance_openings.len(),
fixed,
wits_in,
structural_witin,
pi_mles,
pub_io_evals,
&challenge,
);
let expr_evaluated =
filter_mle_by_selector_mle(expr_evaluated, zero_selector.clone());
for (inst_id, element) in enumerate(expr_evaluated) {
if element != E::ZERO {
errors.push(MockProverError::AssertZeroError {
expression: expr.clone(),
evaluated: Either::Right(element),
name: name.clone(),
inst_id,
});
}
}
}
}
let lk_selector: ArcMultilinearExtension<_> = if let Some(lk_selector) = &cs.lk_selector {
structural_witin[lk_selector.selector_expr().id()].clone()
} else {
let mut selector = vec![E::BaseField::ONE; num_instances];
selector.resize(num_instance_padded, E::BaseField::ZERO);
MultilinearExtension::from_evaluation_vec_smart(
ceil_log2(num_instance_padded),
selector,
)
.into()
};
// Lookup expressions
for (expr, (name, (rom_type, _))) in cs.lk_expressions.iter().zip(
cs.lk_expressions_namespace_map
.iter()
.zip_eq(cs.lk_expressions_items_map.iter()),
) {
let expr_evaluated = wit_infer_by_expr(
expr,
cs.num_witin,
cs.num_fixed as WitnessId,
cs.instance_openings.len(),
fixed,
wits_in,
structural_witin,
pi_mles,
pub_io_evals,
&challenge,
);
let expr_evaluated = filter_mle_by_selector_mle(expr_evaluated, lk_selector.clone());
// Check each lookup expr exists in t vec
for (inst_id, element) in enumerate(&expr_evaluated) {
if !table.contains(&element.to_canonical_u64_vec()) {
errors.push(MockProverError::LookupError {
rom_type: *rom_type,
expression: expr.clone(),
evaluated: *element,
name: name.clone(),
inst_id,
});
}
}
// Increment shared LK Multiplicity
for element in expr_evaluated {
shared_lkm.increment(*rom_type, element);
}
}
// LK Multiplicity check
if let Some(lkm_from_assignment) = expected_lkm {
let selected_count = lk_selector
.get_base_field_vec()
.iter()
.filter(|sel| **sel == E::BaseField::ONE)
.count();
// Infer LK Multiplicity from constraint system.
let mut lkm_from_cs = LkMultiplicity::default();
for (rom_type, args) in &cs.lk_expressions_items_map {
let args_eval: Vec<_> = args
.iter()
.map(|arg_expr| {
let arg_eval = wit_infer_by_expr(
arg_expr,
cs.num_witin,
cs.num_fixed as WitnessId,
cs.instance_openings.len(),
fixed,
wits_in,
structural_witin,
pi_mles,
pub_io_evals,
&challenge,
);
if arg_expr.is_constant() && arg_eval.evaluations.len() == 1 {
vec![arg_eval.get_ext_field_vec()[0].to_canonical_u64(); selected_count]
} else {
filter_mle_by_selector_mle(arg_eval, lk_selector.clone())
.iter()
.map(E::to_canonical_u64)
.collect_vec()
}
})
.collect();
// Count lookups infered from ConstraintSystem from all instances into lkm_from_cs.
for i in 0..selected_count {
match rom_type {
ROMType::Dynamic => {
lkm_from_cs.assert_dynamic_range(args_eval[0][i], args_eval[1][i]);
}
ROMType::DoubleU8 => {
lkm_from_cs.assert_double_u8(args_eval[0][i], args_eval[1][i]);
}
ROMType::And => {
lkm_from_cs.lookup_and_byte(args_eval[0][i], args_eval[1][i])
}
ROMType::Or => lkm_from_cs.lookup_or_byte(args_eval[0][i], args_eval[1][i]),
ROMType::Xor => {
lkm_from_cs.lookup_xor_byte(args_eval[0][i], args_eval[1][i])
}
ROMType::Ltu => {
lkm_from_cs.lookup_ltu_byte(args_eval[0][i], args_eval[1][i])
}
ROMType::Pow => {
assert_eq!(args_eval[0][i], 2);
lkm_from_cs.lookup_pow2(args_eval[1][i])
}
ROMType::Instruction => lkm_from_cs.fetch(args_eval[0][i] as u32),
};
}
}
errors.extend(compare_lkm(
lkm_from_assignment,
lkm_from_cs.into_finalize_result(),
));
}
if errors.is_empty() {
Ok(shared_lkm)
} else {
Err(errors)
}
}
fn load_tables_with_program(
cs: &ConstraintSystem<E>,
program: &Program,
challenge: Option<[E; 2]>,
) -> (HashSet<Vec<u64>>, [E; 2]) {
// load tables
let (challenge, mut table) = if let Some(challenge) = challenge {
(challenge, load_tables(cs, challenge))
} else {
load_once_tables(cs)
};
table.extend(Self::load_program_table(program, challenge));
(table, challenge)
}
fn load_program_table(program: &Program, challenge: [E; 2]) -> Vec<Vec<u64>> {
let mut t_vec = vec![];
let mut cs = ConstraintSystem::<E>::new(|| "mock_program");
let params = ProgramParams {
platform: CENO_PLATFORM.clone(),
program_size: max(
next_pow2_instance_padding(program.instructions.len()),
MOCK_PROGRAM_SIZE,
),
..ProgramParams::default()
};
let mut cb = CircuitBuilder::new(&mut cs);
let config = ProgramTableCircuit::<_>::construct_circuit(&mut cb, ¶ms).unwrap();
let fixed = ProgramTableCircuit::<E>::generate_fixed_traces(&config, cs.num_fixed, program);
for table_expr in &cs.lk_table_expressions {
for row in fixed.iter_rows() {
// TODO: Find a better way to obtain the row content.
let row: Vec<E> = row.iter().map(|v| (*v).into()).collect();
let rlc_record =
eval_by_expr_with_fixed(&row, &[], &[], &challenge, &table_expr.values);
t_vec.push(rlc_record.to_canonical_u64_vec());
}
}
t_vec
}
#[allow(clippy::too_many_arguments)]
/// Run and check errors
///
/// Panic, unless we see exactly the expected errors.
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | true |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/utils.rs | ceno_zkvm/src/scheme/utils.rs | use crate::{
scheme::{
constants::MIN_PAR_SIZE,
hal::{ProofInput, ProverDevice},
},
structs::ComposedConstrainSystem,
};
use either::Either;
use ff_ext::ExtensionField;
use gkr_iop::{
evaluation::EvalExpression,
gkr::{GKRCircuit, GKRCircuitOutput, GKRCircuitWitness, layer::LayerWitness},
hal::{MultilinearPolynomial, ProtocolWitnessGeneratorProver, ProverBackend},
};
use itertools::Itertools;
use mpcs::PolynomialCommitmentScheme;
pub use multilinear_extensions::wit_infer_by_expr;
use multilinear_extensions::{
mle::{ArcMultilinearExtension, FieldType, IntoMLE, MultilinearExtension},
util::ceil_log2,
};
use rayon::{
iter::{
IndexedParallelIterator, IntoParallelIterator, IntoParallelRefIterator,
IntoParallelRefMutIterator, ParallelIterator,
},
prelude::ParallelSliceMut,
};
use std::{iter, sync::Arc};
use witness::next_pow2_instance_padding;
/// interleaving multiple mles into mles, and num_limbs indicate number of final limbs vector
/// e.g input [[1,2],[3,4],[5,6],[7,8]], num_limbs=2,log2_per_instance_size=3
/// output [[1,3,5,7,0,0,0,0],[2,4,6,8,0,0,0,0]]
#[allow(unused)]
pub(crate) fn interleaving_mles_to_mles<'a, E: ExtensionField>(
mles: &[ArcMultilinearExtension<E>],
num_instances: usize,
num_limbs: usize,
default: E,
) -> Vec<MultilinearExtension<'a, E>> {
assert!(num_limbs.is_power_of_two());
assert!(!mles.is_empty());
let next_power_of_2 = next_pow2_instance_padding(num_instances);
assert!(
mles.iter()
.all(|mle| mle.evaluations().len() <= next_power_of_2)
);
let log2_num_instances = ceil_log2(next_power_of_2);
let per_fanin_len = (mles[0].evaluations().len() / num_limbs).max(1); // minimal size 1
let log2_mle_size = ceil_log2(mles.len());
let log2_num_limbs = ceil_log2(num_limbs);
(0..num_limbs)
.into_par_iter()
.map(|fanin_index| {
let mut evaluations = vec![
default;
1 << (log2_mle_size
+ log2_num_instances.saturating_sub(log2_num_limbs))
];
let per_instance_size = 1 << log2_mle_size;
assert!(evaluations.len() >= per_instance_size);
let start = per_fanin_len * fanin_index;
if start < num_instances {
let valid_instances_len = per_fanin_len.min(num_instances - start);
mles.iter()
.enumerate()
.for_each(|(i, mle)| match mle.evaluations() {
FieldType::Ext(mle) => mle
.get(start..(start + valid_instances_len))
.unwrap_or(&[])
.par_iter()
.zip(evaluations.par_chunks_mut(per_instance_size))
.with_min_len(MIN_PAR_SIZE)
.for_each(|(value, instance)| {
assert_eq!(instance.len(), per_instance_size);
instance[i] = *value;
}),
FieldType::Base(mle) => mle
.get(start..(start + per_fanin_len))
.unwrap_or(&[])
.par_iter()
.zip(evaluations.par_chunks_mut(per_instance_size))
.with_min_len(MIN_PAR_SIZE)
.for_each(|(value, instance)| {
assert_eq!(instance.len(), per_instance_size);
instance[i] = E::from(*value);
}),
_ => unreachable!(),
});
}
evaluations.into_mle()
})
.collect::<Vec<MultilinearExtension<E>>>()
}
macro_rules! tower_mle_4 {
($p1:ident, $p2:ident, $q1:ident, $q2:ident, $start_index:ident, $cur_len:ident) => {{
let range = $start_index..($start_index + $cur_len);
$q1[range.clone()]
.par_iter()
.zip(&$q2[range.clone()])
.zip(&$p1[range.clone()])
.zip(&$p2[range])
.map(|(((q1, q2), p1), p2)| {
let p = *q1 * *p2 + *q2 * *p1;
let q = *q1 * *q2;
(p, q)
})
.unzip()
}};
}
pub fn log2_strict_usize(n: usize) -> usize {
assert!(n.is_power_of_two());
n.trailing_zeros() as usize
}
/// infer logup witness from last layer
/// return is the ([p1,p2], [q1,q2]) for each layer
pub(crate) fn infer_tower_logup_witness<'a, E: ExtensionField>(
p_mles: Option<Vec<MultilinearExtension<'a, E>>>,
q_mles: Vec<MultilinearExtension<'a, E>>,
) -> Vec<Vec<MultilinearExtension<'a, E>>> {
if cfg!(test) {
assert_eq!(q_mles.len(), 2);
assert!(q_mles.iter().map(|q| q.evaluations().len()).all_equal());
}
let num_vars = ceil_log2(q_mles[0].evaluations().len());
let mut wit_layers = (0..num_vars).fold(vec![(p_mles, q_mles)], |mut acc, _| {
let (p, q): &(
Option<Vec<MultilinearExtension<E>>>,
Vec<MultilinearExtension<E>>,
) = acc.last().unwrap();
let (q1, q2) = (&q[0], &q[1]);
let cur_len = q1.evaluations().len() / 2;
let (next_p, next_q): (Vec<MultilinearExtension<E>>, Vec<MultilinearExtension<E>>) = (0..2)
.map(|index| {
let start_index = cur_len * index;
let (p_evals, q_evals): (Vec<E>, Vec<E>) = if let Some(p) = p {
let (p1, p2) = (&p[0], &p[1]);
match (
p1.evaluations(),
p2.evaluations(),
q1.evaluations(),
q2.evaluations(),
) {
(
FieldType::Ext(p1),
FieldType::Ext(p2),
FieldType::Ext(q1),
FieldType::Ext(q2),
) => tower_mle_4!(p1, p2, q1, q2, start_index, cur_len),
(
FieldType::Base(p1),
FieldType::Base(p2),
FieldType::Ext(q1),
FieldType::Ext(q2),
) => tower_mle_4!(p1, p2, q1, q2, start_index, cur_len),
_ => unreachable!(),
}
} else {
match (q1.evaluations(), q2.evaluations()) {
(FieldType::Ext(q1), FieldType::Ext(q2)) => {
let range = start_index..(start_index + cur_len);
q1[range.clone()]
.par_iter()
.zip(&q2[range])
.map(|(q1, q2)| {
// 1 / q1 + 1 / q2 = (q1+q2) / q1*q2
// p is numerator and q is denominator
let p = *q1 + *q2;
let q = *q1 * *q2;
(p, q)
})
.unzip()
}
_ => unreachable!(),
}
};
(p_evals.into_mle(), q_evals.into_mle())
})
.unzip(); // vec[vec[p1, p2], vec[q1, q2]]
acc.push((Some(next_p), next_q));
acc
});
wit_layers.reverse();
wit_layers
.into_iter()
.map(|(p, q)| {
// input layer p are all 1
if let Some(mut p) = p {
p.extend(q);
p
} else {
let len = q[0].evaluations().len();
vec![
(0..len)
.into_par_iter()
.map(|_| E::ONE)
.collect::<Vec<_>>()
.into_mle(),
(0..len)
.into_par_iter()
.map(|_| E::ONE)
.collect::<Vec<_>>()
.into_mle(),
]
.into_iter()
.chain(q)
.collect()
}
})
.collect_vec()
}
/// Infer tower witness from input layer (layer 0 is the output layer and layer n is the input layer).
/// The relation between layer i and layer i+1 is as follows:
/// prod[i][b] = ∏_s prod[i+1][s,b]
/// where 2^s is the fanin of the product gate `num_product_fanin`.
pub fn infer_tower_product_witness<E: ExtensionField>(
num_vars: usize,
last_layer: Vec<MultilinearExtension<'_, E>>,
num_product_fanin: usize,
) -> Vec<Vec<MultilinearExtension<'_, E>>> {
// sanity check
assert!(last_layer.len() == num_product_fanin);
assert!(num_product_fanin.is_power_of_two());
let log2_num_product_fanin = log2_strict_usize(num_product_fanin);
assert!(num_vars.is_multiple_of(log2_num_product_fanin));
assert!(
last_layer
.iter()
.all(|p| p.num_vars() == num_vars - log2_num_product_fanin)
);
let num_layers = num_vars / log2_num_product_fanin;
let mut wit_layers = Vec::with_capacity(num_layers);
wit_layers.push(last_layer);
for _ in (0..num_layers - 1).rev() {
let input_layer = wit_layers.last().unwrap();
let output_len = input_layer[0].evaluations().len() / num_product_fanin;
let output_layer: Vec<MultilinearExtension<E>> = (0..num_product_fanin)
.map(|index| {
// avoid the overhead of vector initialization
let mut evaluations: Vec<E> = Vec::with_capacity(output_len);
let remaining = evaluations.spare_capacity_mut();
input_layer.chunks_exact(2).enumerate().for_each(|(i, f)| {
match (f[0].evaluations(), f[1].evaluations()) {
(FieldType::Ext(f1), FieldType::Ext(f2)) => {
let start: usize = index * output_len;
if i == 0 {
(start..(start + output_len))
.into_par_iter()
.zip(remaining.par_iter_mut())
.with_min_len(MIN_PAR_SIZE)
.for_each(|(index, evaluations)| {
evaluations.write(f1[index] * f2[index]);
});
} else {
(start..(start + output_len))
.into_par_iter()
.zip(remaining.par_iter_mut())
.with_min_len(MIN_PAR_SIZE)
.for_each(|(index, evaluations)| {
evaluations.write(f1[index] * f2[index]);
});
}
}
_ => unreachable!("must be extension field"),
}
});
unsafe {
evaluations.set_len(output_len);
}
evaluations.into_mle()
})
.collect_vec();
wit_layers.push(output_layer);
}
wit_layers.reverse();
wit_layers
}
#[tracing::instrument(
skip_all,
name = "build_main_witness",
fields(profiling_2),
level = "trace"
)]
pub fn build_main_witness<
'a,
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E>,
PB: ProverBackend<E = E, Pcs = PCS> + 'static,
PD: ProverDevice<PB>,
>(
composed_cs: &ComposedConstrainSystem<E>,
input: &ProofInput<'a, PB>,
challenges: &[E; 2],
) -> Vec<Arc<PB::MultilinearPoly<'a>>> {
let ComposedConstrainSystem {
zkvm_v1_css: cs,
gkr_circuit,
} = composed_cs;
let log2_num_instances = input.log2_num_instances();
let num_var_with_rotation = log2_num_instances + composed_cs.rotation_vars().unwrap_or(0);
// sanity check
assert_eq!(input.witness.len(), cs.num_witin as usize);
// structural witness can be empty. In this case they are `eq`, and will be filled later
assert!(
input.structural_witness.len() == cs.num_structural_witin as usize
|| input.structural_witness.is_empty(),
);
assert_eq!(input.fixed.len(), cs.num_fixed);
let Some(gkr_circuit) = gkr_circuit else {
panic!("empty gkr-iop")
};
// circuit must have at least one read/write/lookup
assert!(
cs.r_expressions.len()
+ cs.w_expressions.len()
+ cs.lk_expressions.len()
+ cs.r_table_expressions.len()
+ cs.w_table_expressions.len()
+ cs.lk_table_expressions.len()
> 0,
"assert circuit"
);
let pub_io_mles = cs
.instance_openings
.iter()
.map(|instance| input.public_input[instance.0].clone())
.collect_vec();
// check all witness size are power of 2
assert!(
input
.witness
.iter()
.chain(&input.structural_witness)
.chain(&input.fixed)
.chain(&pub_io_mles)
.all(|v| { v.evaluations_len() == 1 << num_var_with_rotation })
);
let (_, gkr_circuit_out) = gkr_witness::<E, PCS, PB, PD>(
gkr_circuit,
&input.witness,
&input.structural_witness,
&input.fixed,
&pub_io_mles,
&input.pub_io_evals,
challenges,
);
gkr_circuit_out.0.0
}
pub fn gkr_witness<
'b,
E: ExtensionField,
PCS: PolynomialCommitmentScheme<E>,
PB: ProverBackend<E = E, Pcs = PCS> + 'static,
PD: ProverDevice<PB>,
>(
circuit: &GKRCircuit<E>,
phase1_witness_group: &[Arc<PB::MultilinearPoly<'b>>],
structural_witness: &[Arc<PB::MultilinearPoly<'b>>],
fixed: &[Arc<PB::MultilinearPoly<'b>>],
pub_io_mles: &[Arc<PB::MultilinearPoly<'b>>],
pub_io_evals: &[Either<E::BaseField, E>],
challenges: &[E],
) -> (GKRCircuitWitness<'b, PB>, GKRCircuitOutput<'b, PB>) {
// layer order from output to input
let mut layer_wits = Vec::<LayerWitness<PB>>::with_capacity(circuit.layers.len() + 1);
let mut witness_mle_flatten = vec![None; circuit.n_evaluations];
// set input to witness_mle_flatten via first layer in_eval_expr
if let Some(first_layer) = circuit.layers.last() {
// process witin
first_layer
.in_eval_expr
.iter()
.take(first_layer.n_witin)
.zip_eq(phase1_witness_group.iter())
.for_each(|(index, witin_mle)| {
witness_mle_flatten[*index] = Some(witin_mle.clone());
});
first_layer
.in_eval_expr
.iter()
.skip(first_layer.n_witin)
.take(first_layer.n_fixed)
.zip_eq(fixed.iter())
.for_each(|(index, fixed_mle)| {
witness_mle_flatten[*index] = Some(fixed_mle.clone());
});
first_layer
.in_eval_expr
.iter()
.skip(first_layer.n_witin + first_layer.n_fixed)
.take(first_layer.n_instance)
.zip_eq(pub_io_mles.iter())
.for_each(|(index, pubio_mle)| {
witness_mle_flatten[*index] = Some(pubio_mle.clone());
});
// XXX currently fixed poly not support in layers > 1
// TODO process fixed (and probably short) mle
//
// first_layer
// .in_eval_expr
// .par_iter()
// .enumerate()
// .skip(phase1_witness_group.len())
// .map(|(index, witin)| {
// (
// *witin,
// Some(
// fixed[index - phase1_witness_group.len()]
// .iter()
// .cycle()
// .cloned()
// .take(num_instances_with_rotation)
// .collect_vec()
// .into_mle()
// .into(),
// ),
// )
// })
// .collect::<HashMap<_, _>>()
// .into_iter()
// .for_each(|(witin, optional_mle)| witness_mle_flatten[witin] = optional_mle);
}
// generate all layer witness from input to output
for (i, layer) in circuit.layers.iter().rev().enumerate() {
tracing::debug!("generating input {i} layer with layer name {}", layer.name);
// process in_evals to prepare layer witness
// This should assume the input of the first layer is the phase1 witness of the circuit.
let current_layer_wits = layer
.in_eval_expr
.iter()
.map(|witin| {
witness_mle_flatten[*witin]
.clone()
.expect("witness must exist")
})
.chain(if i == 0 {
// only supply structural witness for first layer
// TODO figure out how to support > 1 GKR layers
Either::Left(structural_witness.iter().cloned())
} else {
Either::Right(iter::empty())
})
.collect_vec();
assert_eq!(
current_layer_wits.len(),
layer.n_witin
+ layer.n_fixed
+ layer.n_instance
+ if i == 0 { layer.n_structural_witin } else { 0 }
);
// infer current layer output
let current_layer_output: Vec<Arc<PB::MultilinearPoly<'b>>> =
<PD as ProtocolWitnessGeneratorProver<PB>>::layer_witness(
layer,
¤t_layer_wits,
pub_io_evals,
challenges,
);
layer_wits.push(LayerWitness::new(current_layer_wits, vec![]));
// process out to prepare output witness
layer
.out_sel_and_eval_exprs
.iter()
.flat_map(|(_, out_eval)| out_eval)
.zip_eq(¤t_layer_output)
.for_each(|(out_eval, out_mle)| match out_eval {
EvalExpression::Single(out) | EvalExpression::Linear(out, _, _) => {
witness_mle_flatten[*out] = Some(out_mle.clone());
}
EvalExpression::Zero => { // zero expression
// do nothing on zero expression
}
other => unimplemented!("{:?}", other),
});
}
layer_wits.reverse();
// initialize a vector to store the final outputs of the GKR circuit.
let mut gkr_out_well_order = vec![Arc::default(); circuit.final_out_evals.len()];
circuit
.final_out_evals
.iter()
.for_each(|out| gkr_out_well_order[*out] = witness_mle_flatten[*out].clone().unwrap());
(
GKRCircuitWitness { layers: layer_wits },
GKRCircuitOutput(LayerWitness(gkr_out_well_order)),
)
}
#[cfg(test)]
mod tests {
use ff_ext::{FieldInto, GoldilocksExt2};
use itertools::Itertools;
use multilinear_extensions::{
commutative_op_mle_pair,
mle::{ArcMultilinearExtension, FieldType, IntoMLE, MultilinearExtension},
smart_slice::SmartSlice,
util::ceil_log2,
};
use p3::field::FieldAlgebra;
use crate::scheme::utils::{
infer_tower_logup_witness, infer_tower_product_witness, interleaving_mles_to_mles,
};
#[test]
fn test_infer_tower_witness() {
type E = GoldilocksExt2;
let num_product_fanin = 2;
let last_layer: Vec<MultilinearExtension<E>> = vec![
vec![E::ONE, E::from_canonical_u64(2u64)].into_mle(),
vec![E::from_canonical_u64(3u64), E::from_canonical_u64(4u64)].into_mle(),
];
let num_vars = ceil_log2(last_layer[0].evaluations().len()) + 1;
let res = infer_tower_product_witness(num_vars, last_layer.clone(), 2);
let (left, right) = (&res[0][0], &res[0][1]);
let final_product = commutative_op_mle_pair!(
|left, right| {
assert!(left.len() == 1 && right.len() == 1);
left[0] * right[0]
},
|out| out.into()
);
let expected_final_product: E = last_layer
.iter()
.map(|f| match f.evaluations() {
FieldType::Ext(e) => e.iter().copied().reduce(|a, b| a * b).unwrap(),
_ => unreachable!(""),
})
.product();
assert_eq!(res.len(), num_vars);
assert!(
res.iter()
.all(|layer_wit| layer_wit.len() == num_product_fanin)
);
assert_eq!(final_product, expected_final_product);
}
#[test]
fn test_interleaving_mles_to_mles() {
type E = GoldilocksExt2;
let num_product_fanin = 2;
// [[1, 2], [3, 4], [5, 6], [7, 8]]
let input_mles: Vec<ArcMultilinearExtension<E>> = vec![
vec![E::ONE, E::from_canonical_u64(2u64)].into_mle().into(),
vec![E::from_canonical_u64(3u64), E::from_canonical_u64(4u64)]
.into_mle()
.into(),
vec![E::from_canonical_u64(5u64), E::from_canonical_u64(6u64)]
.into_mle()
.into(),
vec![E::from_canonical_u64(7u64), E::from_canonical_u64(8u64)]
.into_mle()
.into(),
];
let res = interleaving_mles_to_mles(&input_mles, 2, num_product_fanin, E::ONE);
// [[1, 3, 5, 7], [2, 4, 6, 8]]
assert_eq!(
res[0].get_ext_field_vec(),
vec![
E::ONE,
E::from_canonical_u64(3u64),
E::from_canonical_u64(5u64),
E::from_canonical_u64(7u64)
],
);
assert_eq!(
res[1].get_ext_field_vec(),
vec![
E::from_canonical_u64(2u64),
E::from_canonical_u64(4u64),
E::from_canonical_u64(6u64),
E::from_canonical_u64(8u64)
],
);
}
#[test]
fn test_interleaving_mles_to_mles_padding() {
type E = GoldilocksExt2;
let num_product_fanin = 2;
// case 1: test limb level padding
// [[1,2],[3,4],[5,6]]]
let input_mles: Vec<ArcMultilinearExtension<E>> = vec![
vec![E::ONE, E::from_canonical_u64(2u64)].into_mle().into(),
vec![E::from_canonical_u64(3u64), E::from_canonical_u64(4u64)]
.into_mle()
.into(),
vec![E::from_canonical_u64(5u64), E::from_canonical_u64(6u64)]
.into_mle()
.into(),
];
let res = interleaving_mles_to_mles(&input_mles, 2, num_product_fanin, E::ZERO);
// [[1, 3, 5, 0], [2, 4, 6, 0]]
assert_eq!(
res[0].get_ext_field_vec(),
vec![
E::ONE,
E::from_canonical_u64(3u64),
E::from_canonical_u64(5u64),
E::from_canonical_u64(0u64)
],
);
assert_eq!(
res[1].get_ext_field_vec(),
vec![
E::from_canonical_u64(2u64),
E::from_canonical_u64(4u64),
E::from_canonical_u64(6u64),
E::from_canonical_u64(0u64)
],
);
// case 2: test instance level padding
// [[1,0],[3,0],[5,0]]]
let input_mles: Vec<ArcMultilinearExtension<E>> = vec![
vec![E::ONE, E::from_canonical_u64(0u64)].into_mle().into(),
vec![E::from_canonical_u64(3u64), E::from_canonical_u64(0u64)]
.into_mle()
.into(),
vec![E::from_canonical_u64(5u64), E::from_canonical_u64(0u64)]
.into_mle()
.into(),
];
let res = interleaving_mles_to_mles(&input_mles, 1, num_product_fanin, E::ONE);
// [[1, 3, 5, 1], [1, 1, 1, 1]]
assert_eq!(
res[0].get_ext_field_vec(),
vec![
E::ONE,
E::from_canonical_u64(3u64),
E::from_canonical_u64(5u64),
E::ONE
],
);
assert_eq!(res[1].get_ext_field_vec(), vec![E::ONE; 4],);
}
#[test]
fn test_interleaving_mles_to_mles_edgecases() {
type E = GoldilocksExt2;
let num_product_fanin = 2;
// one instance, 2 mles: [[2], [3]]
let input_mles: Vec<ArcMultilinearExtension<E>> = vec![
vec![E::from_canonical_u64(2u64)].into_mle().into(),
vec![E::from_canonical_u64(3u64)].into_mle().into(),
];
let res = interleaving_mles_to_mles(&input_mles, 1, num_product_fanin, E::ONE);
// [[2, 3], [1, 1]]
assert_eq!(
res[0].get_ext_field_vec(),
vec![E::from_canonical_u64(2u64), E::from_canonical_u64(3u64)],
);
assert_eq!(res[1].get_ext_field_vec(), vec![E::ONE, E::ONE],);
}
#[test]
fn test_infer_tower_logup_witness() {
type E = GoldilocksExt2;
let num_vars = 2;
let q: Vec<MultilinearExtension<E>> = vec![
vec![1, 2, 3, 4]
.into_iter()
.map(E::from_canonical_u64)
.collect_vec()
.into_mle(),
vec![5, 6, 7, 8]
.into_iter()
.map(E::from_canonical_u64)
.collect_vec()
.into_mle(),
];
let mut res = infer_tower_logup_witness(None, q);
assert_eq!(num_vars + 1, res.len());
// input layer
let layer = res.pop().unwrap();
// input layer p
assert_eq!(
layer[0].evaluations().to_owned(),
FieldType::Ext(SmartSlice::Owned(vec![1.into_f(); 4]))
);
assert_eq!(
layer[1].evaluations().clone(),
FieldType::Ext(SmartSlice::Owned(vec![1.into_f(); 4]))
);
// input layer q is none
assert_eq!(
layer[2].evaluations().clone(),
FieldType::Ext(SmartSlice::Owned(vec![
1.into_f(),
2.into_f(),
3.into_f(),
4.into_f()
]))
);
assert_eq!(
layer[3].evaluations().clone(),
FieldType::Ext(SmartSlice::Owned(vec![
5.into_f(),
6.into_f(),
7.into_f(),
8.into_f()
]))
);
// next layer
let layer = res.pop().unwrap();
// next layer p1
assert_eq!(
layer[0].evaluations().clone(),
FieldType::<E>::Ext(SmartSlice::Owned(vec![
vec![1 + 5]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>(),
vec![2 + 6]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>()
]))
);
// next layer p2
assert_eq!(
layer[1].evaluations().clone(),
FieldType::<E>::Ext(SmartSlice::Owned(vec![
vec![3 + 7]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>(),
vec![4 + 8]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>()
]))
);
// next layer q1
assert_eq!(
layer[2].evaluations().clone(),
FieldType::<E>::Ext(SmartSlice::Owned(vec![
vec![5].into_iter().map(E::from_canonical_u64).sum::<E>(),
vec![2 * 6]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>()
]))
);
// next layer q2
assert_eq!(
layer[3].evaluations().clone(),
FieldType::<E>::Ext(SmartSlice::Owned(vec![
vec![3 * 7]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>(),
vec![4 * 8]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>()
]))
);
// output layer
let layer = res.pop().unwrap();
// p1
assert_eq!(
layer[0].evaluations().clone(),
// p11 * q12 + p12 * q11
FieldType::<E>::Ext(SmartSlice::Owned(vec![
vec![(1 + 5) * (3 * 7) + (3 + 7) * 5]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>(),
]))
);
// p2
assert_eq!(
layer[1].evaluations().clone(),
// p21 * q22 + p22 * q21
FieldType::<E>::Ext(SmartSlice::Owned(vec![
vec![(2 + 6) * (4 * 8) + (4 + 8) * (2 * 6)]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>(),
]))
);
// q1
assert_eq!(
layer[2].evaluations().clone(),
// q12 * q11
FieldType::<E>::Ext(SmartSlice::Owned(vec![
vec![(3 * 7) * 5]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>(),
]))
);
// q2
assert_eq!(
layer[3].evaluations().clone(),
// q22 * q22
FieldType::<E>::Ext(SmartSlice::Owned(vec![
vec![(4 * 8) * (2 * 6)]
.into_iter()
.map(E::from_canonical_u64)
.sum::<E>(),
]))
);
}
}
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
scroll-tech/ceno | https://github.com/scroll-tech/ceno/blob/ce97cf805a131db43a6d3d56a2fd0506a6dc8431/ceno_zkvm/src/scheme/constants.rs | ceno_zkvm/src/scheme/constants.rs | pub(crate) const MIN_PAR_SIZE: usize = 64;
pub const NUM_FANIN: usize = 2;
pub const NUM_FANIN_LOGUP: usize = 2;
pub const MAX_NUM_VARIABLES: usize = 24;
pub const DYNAMIC_RANGE_MAX_BITS: usize = 18;
pub const SEPTIC_EXTENSION_DEGREE: usize = 7;
pub const SEPTIC_JACOBIAN_NUM_MLES: usize = 3 * SEPTIC_EXTENSION_DEGREE;
| rust | Apache-2.0 | ce97cf805a131db43a6d3d56a2fd0506a6dc8431 | 2026-01-04T20:23:30.257242Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.