repo stringlengths 6 65 | file_url stringlengths 81 311 | file_path stringlengths 6 227 | content stringlengths 0 32.8k | language stringclasses 1
value | license stringclasses 7
values | commit_sha stringlengths 40 40 | retrieved_at stringdate 2026-01-04 15:31:58 2026-01-04 20:25:31 | truncated bool 2
classes |
|---|---|---|---|---|---|---|---|---|
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/lib.rs | runtime/entrypoint/src/lib.rs | //! Ported from Entrypoint for SP1 zkVM.
#![feature(asm_experimental_arch)]
pub mod heap;
pub mod syscalls;
pub mod io {
pub use zkm_precompiles::io::*;
}
pub mod precompiles {
pub use zkm_precompiles::*;
}
extern crate alloc;
#[macro_export]
macro_rules! entrypoint {
($path:path) => {
const ZKVM_ENTRY: fn() = $path;
use $crate::heap::SimpleAlloc;
#[global_allocator]
static HEAP: SimpleAlloc = SimpleAlloc;
mod zkvm_generated_main {
#[no_mangle]
fn start() {
super::ZKVM_ENTRY()
}
}
};
}
mod libm;
/// The number of 32 bit words that the public values digest is composed of.
pub const PV_DIGEST_NUM_WORDS: usize = 8;
pub const POSEIDON_NUM_WORDS: usize = 8;
#[cfg(target_os = "zkvm")]
mod zkvm {
use crate::syscalls::{sys_rand, syscall_halt};
use bytemuck;
use getrandom::{register_custom_getrandom, Error};
use sha2::{Digest, Sha256};
pub static mut PUBLIC_VALUES_HASHER: Option<Sha256> = None;
#[cfg(not(feature = "interface"))]
#[no_mangle]
fn main() {
unsafe {
PUBLIC_VALUES_HASHER = Some(Sha256::new());
extern "C" {
fn start();
}
start()
}
syscall_halt(0);
}
core::arch::global_asm!(include_str!("memset.s"));
core::arch::global_asm!(include_str!("memcpy.s"));
fn zkvm_getrandom(s: &mut [u8]) -> Result<(), Error> {
if s.is_empty() {
return Ok(());
}
let (head, aligned, tail) = bytemuck::pod_align_to_mut::<_, u32>(s);
// Fill the aligned portion of the dest buffer with random words.
// sys_rand uses copy-in to fill the buffer at 4-words per cycle.
if aligned.len() > 0 {
unsafe {
sys_rand(aligned.as_mut_ptr(), aligned.len());
}
}
// Up to 4 bytes may be split between the head and tail.
// Sample an additional word and do an unaligned write to fill the last parts.
if head.len() > 0 || tail.len() > 0 {
assert!(head.len() < 4);
assert!(tail.len() < 4);
let mut words = [0u32; 2];
unsafe {
sys_rand(words.as_mut_ptr(), 2);
}
head.copy_from_slice(&words[0].to_ne_bytes()[..head.len()]);
tail.copy_from_slice(&words[1].to_ne_bytes()[..tail.len()]);
}
Ok(())
}
register_custom_getrandom!(zkvm_getrandom);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/libm.rs | runtime/entrypoint/src/libm.rs | //! Ported from Entrypoint for SP1 zkVM.
//!
#[no_mangle]
pub extern "C" fn acos(x: f64) -> f64 {
libm::acos(x)
}
#[no_mangle]
pub extern "C" fn acosf(x: f32) -> f32 {
libm::acosf(x)
}
#[no_mangle]
pub extern "C" fn acosh(x: f64) -> f64 {
libm::acosh(x)
}
#[no_mangle]
pub extern "C" fn acoshf(x: f32) -> f32 {
libm::acoshf(x)
}
#[no_mangle]
pub extern "C" fn asin(x: f64) -> f64 {
libm::asin(x)
}
#[no_mangle]
pub extern "C" fn asinf(x: f32) -> f32 {
libm::asinf(x)
}
#[no_mangle]
pub extern "C" fn asinh(x: f64) -> f64 {
libm::asinh(x)
}
#[no_mangle]
pub extern "C" fn asinhf(x: f32) -> f32 {
libm::asinhf(x)
}
#[no_mangle]
pub extern "C" fn atan(x: f64) -> f64 {
libm::atan(x)
}
#[no_mangle]
pub extern "C" fn atan2(y: f64, x: f64) -> f64 {
libm::atan2(y, x)
}
#[no_mangle]
pub extern "C" fn atan2f(y: f32, x: f32) -> f32 {
libm::atan2f(y, x)
}
#[no_mangle]
pub extern "C" fn atanf(x: f32) -> f32 {
libm::atanf(x)
}
#[no_mangle]
pub extern "C" fn atanh(x: f64) -> f64 {
libm::atanh(x)
}
#[no_mangle]
pub extern "C" fn atanhf(x: f32) -> f32 {
libm::atanhf(x)
}
#[no_mangle]
pub extern "C" fn cbrt(x: f64) -> f64 {
libm::cbrt(x)
}
#[no_mangle]
pub extern "C" fn cbrtf(x: f32) -> f32 {
libm::cbrtf(x)
}
#[no_mangle]
pub extern "C" fn ceil(x: f64) -> f64 {
libm::ceil(x)
}
#[no_mangle]
pub extern "C" fn ceilf(x: f32) -> f32 {
libm::ceilf(x)
}
#[no_mangle]
pub extern "C" fn copysign(x: f64, y: f64) -> f64 {
libm::copysign(x, y)
}
#[no_mangle]
pub extern "C" fn copysignf(x: f32, y: f32) -> f32 {
libm::copysignf(x, y)
}
#[no_mangle]
pub extern "C" fn cos(x: f64) -> f64 {
libm::cos(x)
}
#[no_mangle]
pub extern "C" fn cosf(x: f32) -> f32 {
libm::cosf(x)
}
#[no_mangle]
pub extern "C" fn cosh(x: f64) -> f64 {
libm::cosh(x)
}
#[no_mangle]
pub extern "C" fn coshf(x: f32) -> f32 {
libm::coshf(x)
}
#[no_mangle]
pub extern "C" fn erf(x: f64) -> f64 {
libm::erf(x)
}
#[no_mangle]
pub extern "C" fn erfc(x: f64) -> f64 {
libm::erfc(x)
}
#[no_mangle]
pub extern "C" fn erfcf(x: f32) -> f32 {
libm::erfcf(x)
}
#[no_mangle]
pub extern "C" fn erff(x: f32) -> f32 {
libm::erff(x)
}
#[no_mangle]
pub extern "C" fn exp(x: f64) -> f64 {
libm::exp(x)
}
#[no_mangle]
pub extern "C" fn exp2(x: f64) -> f64 {
libm::exp2(x)
}
#[no_mangle]
pub extern "C" fn exp2f(x: f32) -> f32 {
libm::exp2f(x)
}
#[no_mangle]
pub extern "C" fn exp10(x: f64) -> f64 {
libm::exp10(x)
}
#[no_mangle]
pub extern "C" fn exp10f(x: f32) -> f32 {
libm::exp10f(x)
}
#[no_mangle]
pub extern "C" fn expf(x: f32) -> f32 {
libm::expf(x)
}
#[no_mangle]
pub extern "C" fn expm1(x: f64) -> f64 {
libm::expm1(x)
}
#[no_mangle]
pub extern "C" fn expm1f(x: f32) -> f32 {
libm::expm1f(x)
}
#[no_mangle]
pub extern "C" fn fabs(x: f64) -> f64 {
libm::fabs(x)
}
#[no_mangle]
pub extern "C" fn fabsf(x: f32) -> f32 {
libm::fabsf(x)
}
#[no_mangle]
pub extern "C" fn fdim(x: f64, y: f64) -> f64 {
libm::fdim(x, y)
}
#[no_mangle]
pub extern "C" fn fdimf(x: f32, y: f32) -> f32 {
libm::fdimf(x, y)
}
#[no_mangle]
pub extern "C" fn floor(x: f64) -> f64 {
libm::floor(x)
}
#[no_mangle]
pub extern "C" fn floorf(x: f32) -> f32 {
libm::floorf(x)
}
#[no_mangle]
pub extern "C" fn fma(x: f64, y: f64, z: f64) -> f64 {
libm::fma(x, y, z)
}
#[no_mangle]
pub extern "C" fn fmaf(x: f32, y: f32, z: f32) -> f32 {
libm::fmaf(x, y, z)
}
#[no_mangle]
pub extern "C" fn fmax(x: f64, y: f64) -> f64 {
libm::fmax(x, y)
}
#[no_mangle]
pub extern "C" fn fmaxf(x: f32, y: f32) -> f32 {
libm::fmaxf(x, y)
}
#[no_mangle]
pub extern "C" fn fmin(x: f64, y: f64) -> f64 {
libm::fmin(x, y)
}
#[no_mangle]
pub extern "C" fn fminf(x: f32, y: f32) -> f32 {
libm::fminf(x, y)
}
#[no_mangle]
pub extern "C" fn fmod(x: f64, y: f64) -> f64 {
libm::fmod(x, y)
}
#[no_mangle]
pub extern "C" fn fmodf(x: f32, y: f32) -> f32 {
libm::fmodf(x, y)
}
#[no_mangle]
pub fn frexp(x: f64) -> (f64, i32) {
libm::frexp(x)
}
#[no_mangle]
pub fn frexpf(x: f32) -> (f32, i32) {
libm::frexpf(x)
}
#[no_mangle]
pub extern "C" fn hypot(x: f64, y: f64) -> f64 {
libm::hypot(x, y)
}
#[no_mangle]
pub extern "C" fn hypotf(x: f32, y: f32) -> f32 {
libm::hypotf(x, y)
}
#[no_mangle]
pub extern "C" fn ilogb(x: f64) -> i32 {
libm::ilogb(x)
}
#[no_mangle]
pub extern "C" fn ilogbf(x: f32) -> i32 {
libm::ilogbf(x)
}
#[no_mangle]
pub extern "C" fn j0(x: f64) -> f64 {
libm::j0(x)
}
#[no_mangle]
pub extern "C" fn j0f(x: f32) -> f32 {
libm::j0f(x)
}
#[no_mangle]
pub extern "C" fn j1(x: f64) -> f64 {
libm::j1(x)
}
#[no_mangle]
pub extern "C" fn j1f(x: f32) -> f32 {
libm::j1f(x)
}
#[no_mangle]
pub extern "C" fn jn(n: i32, x: f64) -> f64 {
libm::jn(n, x)
}
#[no_mangle]
pub extern "C" fn jnf(n: i32, x: f32) -> f32 {
libm::jnf(n, x)
}
#[no_mangle]
pub extern "C" fn ldexp(x: f64, n: i32) -> f64 {
libm::ldexp(x, n)
}
#[no_mangle]
pub extern "C" fn ldexpf(x: f32, n: i32) -> f32 {
libm::ldexpf(x, n)
}
#[no_mangle]
pub extern "C" fn lgamma(x: f64) -> f64 {
libm::lgamma(x)
}
#[no_mangle]
pub fn lgamma_r(x: f64) -> (f64, i32) {
libm::lgamma_r(x)
}
#[no_mangle]
pub fn lgammaf(x: f32) -> f32 {
libm::lgammaf(x)
}
#[no_mangle]
pub fn lgammaf_r(x: f32) -> (f32, i32) {
libm::lgammaf_r(x)
}
#[no_mangle]
pub extern "C" fn log(x: f64) -> f64 {
libm::log(x)
}
#[no_mangle]
pub extern "C" fn log1p(x: f64) -> f64 {
libm::log1p(x)
}
#[no_mangle]
pub extern "C" fn log1pf(x: f32) -> f32 {
libm::log1pf(x)
}
#[no_mangle]
pub extern "C" fn log2(x: f64) -> f64 {
libm::log2(x)
}
#[no_mangle]
pub extern "C" fn log2f(x: f32) -> f32 {
libm::log2f(x)
}
#[no_mangle]
pub extern "C" fn log10(x: f64) -> f64 {
libm::log10(x)
}
#[no_mangle]
pub extern "C" fn log10f(x: f32) -> f32 {
libm::log10f(x)
}
#[no_mangle]
pub extern "C" fn logf(x: f32) -> f32 {
libm::logf(x)
}
#[no_mangle]
pub fn modf(x: f64) -> (f64, f64) {
libm::modf(x)
}
#[no_mangle]
pub fn modff(x: f32) -> (f32, f32) {
libm::modff(x)
}
#[no_mangle]
pub extern "C" fn nextafter(x: f64, y: f64) -> f64 {
libm::nextafter(x, y)
}
#[no_mangle]
pub extern "C" fn nextafterf(x: f32, y: f32) -> f32 {
libm::nextafterf(x, y)
}
#[no_mangle]
pub extern "C" fn pow(x: f64, y: f64) -> f64 {
libm::pow(x, y)
}
#[no_mangle]
pub extern "C" fn powf(x: f32, y: f32) -> f32 {
libm::powf(x, y)
}
#[no_mangle]
pub extern "C" fn remainder(x: f64, y: f64) -> f64 {
libm::remainder(x, y)
}
#[no_mangle]
pub extern "C" fn remainderf(x: f32, y: f32) -> f32 {
libm::remainderf(x, y)
}
#[no_mangle]
pub fn remquo(x: f64, y: f64) -> (f64, i32) {
libm::remquo(x, y)
}
#[no_mangle]
pub fn remquof(x: f32, y: f32) -> (f32, i32) {
libm::remquof(x, y)
}
#[no_mangle]
pub extern "C" fn round(x: f64) -> f64 {
libm::round(x)
}
#[no_mangle]
pub extern "C" fn roundf(x: f32) -> f32 {
libm::roundf(x)
}
#[no_mangle]
pub extern "C" fn scalbn(x: f64, n: i32) -> f64 {
libm::scalbn(x, n)
}
#[no_mangle]
pub extern "C" fn scalbnf(x: f32, n: i32) -> f32 {
libm::scalbnf(x, n)
}
#[no_mangle]
pub extern "C" fn sin(x: f64) -> f64 {
libm::sin(x)
}
#[no_mangle]
pub fn sincos(x: f64) -> (f64, f64) {
libm::sincos(x)
}
#[no_mangle]
pub fn sincosf(x: f32) -> (f32, f32) {
libm::sincosf(x)
}
#[no_mangle]
pub extern "C" fn sinf(x: f32) -> f32 {
libm::sinf(x)
}
#[no_mangle]
pub extern "C" fn sinh(x: f64) -> f64 {
libm::sinh(x)
}
#[no_mangle]
pub extern "C" fn sinhf(x: f32) -> f32 {
libm::sinhf(x)
}
#[no_mangle]
pub extern "C" fn sqrt(x: f64) -> f64 {
libm::sqrt(x)
}
#[no_mangle]
pub extern "C" fn sqrtf(x: f32) -> f32 {
libm::sqrtf(x)
}
#[no_mangle]
pub extern "C" fn tan(x: f64) -> f64 {
libm::tan(x)
}
#[no_mangle]
pub extern "C" fn tanf(x: f32) -> f32 {
libm::tanf(x)
}
#[no_mangle]
pub extern "C" fn tanh(x: f64) -> f64 {
libm::tanh(x)
}
#[no_mangle]
pub extern "C" fn tanhf(x: f32) -> f32 {
libm::tanhf(x)
}
#[no_mangle]
pub extern "C" fn tgamma(x: f64) -> f64 {
libm::tgamma(x)
}
#[no_mangle]
pub extern "C" fn tgammaf(x: f32) -> f32 {
libm::tgammaf(x)
}
#[no_mangle]
pub extern "C" fn trunc(x: f64) -> f64 {
libm::trunc(x)
}
#[no_mangle]
pub extern "C" fn truncf(x: f32) -> f32 {
libm::truncf(x)
}
#[no_mangle]
pub extern "C" fn y0(x: f64) -> f64 {
libm::y0(x)
}
#[no_mangle]
pub extern "C" fn y0f(x: f32) -> f32 {
libm::y0f(x)
}
#[no_mangle]
pub extern "C" fn y1(x: f64) -> f64 {
libm::y1(x)
}
#[no_mangle]
pub extern "C" fn y1f(x: f32) -> f32 {
libm::y1f(x)
}
#[no_mangle]
pub extern "C" fn yn(n: i32, x: f64) -> f64 {
libm::yn(n, x)
}
#[no_mangle]
pub extern "C" fn ynf(n: i32, x: f32) -> f32 {
libm::ynf(n, x)
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/heap.rs | runtime/entrypoint/src/heap.rs | //! Ported from Entrypoint for SP1 zkVM.
use core::alloc::{GlobalAlloc, Layout};
use crate::syscalls::sys_alloc_aligned;
/// A simple heap allocator.
///
/// Allocates memory from left to right, without any deallocation.
pub struct SimpleAlloc;
unsafe impl GlobalAlloc for SimpleAlloc {
unsafe fn alloc(&self, layout: Layout) -> *mut u8 {
sys_alloc_aligned(layout.size(), layout.align())
}
unsafe fn dealloc(&self, _: *mut u8, _: Layout) {}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/syscalls/halt.rs | runtime/entrypoint/src/syscalls/halt.rs | //! Ported from Entrypoint for SP1 zkVM.
use cfg_if::cfg_if;
cfg_if! {
if #[cfg(target_os = "zkvm")] {
use core::arch::asm;
}
}
/// Halts the program.
#[allow(unused_variables)]
pub extern "C" fn syscall_halt(exit_code: u8) -> ! {
#[cfg(target_os = "zkvm")]
unsafe {
asm!(
"syscall",
in("$2") crate::syscalls::HALT,
in("$4") exit_code
);
unreachable!()
}
#[cfg(not(target_os = "zkvm"))]
unreachable!()
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/syscalls/io.rs | runtime/entrypoint/src/syscalls/io.rs | //! Ported from Entrypoint for SP1 zkVM.
cfg_if::cfg_if! {
if #[cfg(target_os = "zkvm")] {
use core::arch::asm;
}
}
/// Write data to the prover.
#[allow(unused_variables)]
#[no_mangle]
pub extern "C" fn syscall_write(fd: u32, write_buf: *const u8, nbytes: usize) {
cfg_if::cfg_if! {
if #[cfg(target_os = "zkvm")] {
unsafe {
asm!(
"syscall",
in("$2") crate::syscalls::WRITE,
in("$4") fd,
in("$5") write_buf,
in("$6") nbytes,
);
}
} else {
unreachable!()
}
}
}
#[allow(unused_variables)]
#[no_mangle]
pub extern "C" fn syscall_hint_len() -> usize {
#[cfg(target_os = "zkvm")]
unsafe {
let len;
asm!(
"syscall",
in("$2") crate::syscalls::HINT_LEN,
lateout("$2") len,
);
len
}
#[cfg(not(target_os = "zkvm"))]
unreachable!()
}
#[allow(unused_variables)]
#[no_mangle]
pub extern "C" fn syscall_hint_read(ptr: *mut u8, len: usize) {
#[cfg(target_os = "zkvm")]
unsafe {
asm!(
"syscall",
in("$2") crate::syscalls::HINT_READ,
in("$4") ptr,
in("$5") len,
);
}
#[cfg(not(target_os = "zkvm"))]
unreachable!()
}
#[allow(unused_variables)]
#[no_mangle]
pub extern "C" fn syscall_verify(claim_digest: &[u8; 32]) {
let mut to_host = [0u8; 32];
to_host[..32].copy_from_slice(claim_digest);
cfg_if::cfg_if! {
if #[cfg(target_os = "zkvm")] {
unsafe {
asm!(
"syscall",
in("$2") crate::syscalls::VERIFY,
in("$5") to_host.as_ptr() as u32,
in("$6") 32u32,
)
}
} else {
unreachable!()
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/syscalls/memory.rs | runtime/entrypoint/src/syscalls/memory.rs | // Copyright 2023 RISC Zero, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
const SYSTEM_START: usize = 0x8000_0000;
#[allow(clippy::missing_safety_doc)]
#[no_mangle]
pub unsafe extern "C" fn sys_alloc_aligned(bytes: usize, align: usize) -> *mut u8 {
extern "C" {
// https://lld.llvm.org/ELF/linker_script.html#sections-command
static _end: u8;
}
// Pointer to next heap address to use, or 0 if the heap has not yet been
// initialized.
static mut HEAP_POS: usize = 0;
// SAFETY: Single threaded, so nothing else can touch this while we're working.
let mut heap_pos = unsafe { HEAP_POS };
if heap_pos == 0 {
heap_pos = unsafe { (&_end) as *const u8 as usize };
}
let offset = heap_pos & (align - 1);
if offset != 0 {
heap_pos += align - offset;
}
let ptr = heap_pos as *mut u8;
heap_pos += bytes;
// Check to make sure heap doesn't collide with SYSTEM memory.
if SYSTEM_START < heap_pos {
panic!();
}
unsafe { HEAP_POS = heap_pos };
ptr
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/syscalls/sha256.rs | runtime/entrypoint/src/syscalls/sha256.rs | #[cfg(target_os = "zkvm")]
use core::arch::asm;
/// Executes the Keccak256 permutation on the given state.
///
/// ### Safety
///
/// The caller must ensure that `state` is valid pointer to data that is aligned along a four
/// byte boundary.
#[allow(unused_variables)]
#[no_mangle]
pub extern "C" fn syscall_sha256_compress(w: *mut u32, state: *mut u32) {
#[cfg(target_os = "zkvm")]
unsafe {
asm!(
"syscall",
in("$2") crate::syscalls::SHA_COMPRESS,
in("$4") w,
in("$5") state,
);
}
}
#[allow(unused_variables)]
#[no_mangle]
pub extern "C" fn syscall_sha256_extend(w: *mut u32) {
#[cfg(target_os = "zkvm")]
unsafe {
asm!(
"syscall",
in("$2") crate::syscalls::SHA_EXTEND,
in("$4") w,
in("$5") 0
);
}
#[cfg(not(target_os = "zkvm"))]
unreachable!()
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/syscalls/keccak.rs | runtime/entrypoint/src/syscalls/keccak.rs | #[cfg(target_os = "zkvm")]
use core::arch::asm;
/// Executes the Keccak256 permutation on the given state.
///
/// ### Safety
///
/// The caller must ensure that `state` is valid pointer to data that is aligned along a four
/// byte boundary.
#[allow(unused_variables)]
#[no_mangle]
pub extern "C" fn syscall_keccak(state: *const u32, len: usize, result: *mut u8) {
#[cfg(target_os = "zkvm")]
unsafe {
asm!(
"syscall",
in("$2") crate::syscalls::KECCAK_PERMUTE,
in("$4") state,
in("$5") len,
in("$6") result,
);
}
#[cfg(not(target_os = "zkvm"))]
unreachable!()
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/syscalls/mod.rs | runtime/entrypoint/src/syscalls/mod.rs | //! Ported from Entrypoint for SP1 zkVM.
mod halt;
mod io;
mod keccak;
mod memory;
mod sha256;
mod sys;
pub use halt::*;
pub use io::*;
pub use keccak::*;
pub use memory::*;
pub use sha256::*;
pub use sys::*;
/// These codes MUST match the codes in `core/src/runtime/syscall.rs`. There is a derived test
/// that checks that the enum is consistent with the syscalls.
///
/// Halts the program.
pub const HALT: u32 = 4246u32;
/// Writes to a file descriptor. Currently only used for `STDOUT/STDERR`.
pub const WRITE: u32 = 4004u32;
/// Executes `HINT_LEN`.
pub const HINT_LEN: u32 = 0x00_00_00_F0;
/// Executes `HINT_READ`.
pub const HINT_READ: u32 = 0x00_00_00_F1;
/// Executes `HINT_READ`.
pub const VERIFY: u32 = 0x00_00_00_F2;
/// Executes `KECCAK_PERMUTE`.
pub const KECCAK_PERMUTE: u32 = 0x00_01_01_09;
/// Executes `SHA_EXTEND`.
pub const SHA_EXTEND: u32 = 0x00_30_01_05;
/// Executes `SHA_COMPRESS`.
pub const SHA_COMPRESS: u32 = 0x00_01_01_06;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/runtime/entrypoint/src/syscalls/sys.rs | runtime/entrypoint/src/syscalls/sys.rs | //! Ported from Entrypoint for SP1 zkVM.
use std::sync::Mutex;
use lazy_static::lazy_static;
use rand::{rngs::StdRng, Rng, SeedableRng};
use crate::syscalls::{syscall_halt, syscall_write};
/// The random number generator seed for the zkVM.
///
/// In the future, we can pass in this seed from the host or have the verifier generate it.
const PRNG_SEED: u64 = 0x123456789abcdef0;
lazy_static! {
/// A lazy static to generate a global random number generator.
static ref RNG: Mutex<StdRng> = Mutex::new(StdRng::seed_from_u64(PRNG_SEED));
}
/// A lazy static to print a warning once for using the `sys_rand` system call.
static SYS_RAND_WARNING: std::sync::Once = std::sync::Once::new();
/// Generates random bytes.
///
/// # Safety
///
/// Make sure that `buf` has at least `nwords` words.
#[no_mangle]
pub unsafe extern "C" fn sys_rand(recv_buf: *mut u32, words: usize) {
SYS_RAND_WARNING.call_once(|| {
println!("WARNING: Using insecure random number generator.");
});
let mut rng = RNG.lock().unwrap();
for i in 0..words {
let element = recv_buf.add(i);
*element = rng.gen();
}
}
#[allow(clippy::missing_safety_doc)]
#[no_mangle]
pub unsafe extern "C" fn sys_panic(msg_ptr: *const u8, len: usize) -> ! {
sys_write(2, msg_ptr, len);
syscall_halt(1);
}
#[allow(unused_variables)]
#[no_mangle]
pub const fn sys_getenv(
recv_buf: *mut u32,
words: usize,
varname: *const u8,
varname_len: usize,
) -> usize {
0
}
#[allow(unused_variables)]
#[no_mangle]
pub const fn sys_alloc_words(nwords: usize) -> *mut u32 {
core::ptr::null_mut()
}
#[allow(unused_unsafe)]
#[no_mangle]
pub fn sys_write(fd: u32, write_buf: *const u8, nbytes: usize) {
unsafe {
syscall_write(fd, write_buf, nbytes);
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/build/src/lib.rs | build/src/lib.rs | mod build;
mod command;
mod utils;
use build::build_program_internal;
pub use build::execute_build_program;
use clap::Parser;
pub const BUILD_TARGET: &str = "mips-zkm-zkvm-elf";
// const DEFAULT_TAG: &str = "v1.0.0";
pub const DEFAULT_OUTPUT_DIR: &str = "elf";
pub const HELPER_TARGET_SUBDIR: &str = "elf-compilation";
/// Compile an ZKM program.
///
/// Additional arguments are useful for configuring the build process, including options for using
/// Docker, specifying binary and ELF names, ignoring Rust version checks, and enabling specific
/// features.
#[derive(Clone, Parser, Debug)]
pub struct BuildArgs {
#[clap(
long,
action,
value_delimiter = ',',
help = "Space or comma separated list of features to activate"
)]
pub features: Vec<String>,
#[clap(long, action, help = "Do not activate the `default` feature")]
pub no_default_features: bool,
#[clap(long, action, help = "Ignore `rust-version` specification in packages")]
pub ignore_rust_version: bool,
#[clap(long, action, help = "Assert that `Cargo.lock` will remain unchanged")]
pub locked: bool,
#[clap(
alias = "bin",
long,
action,
help = "Build only the specified binary",
default_value = ""
)]
pub binary: String,
#[clap(long, action, help = "ELF binary name", default_value = "")]
pub elf_name: String,
#[clap(
alias = "out-dir",
long,
action,
help = "Copy the compiled ELF to this directory",
default_value = DEFAULT_OUTPUT_DIR
)]
pub output_directory: String,
}
// Implement default args to match clap defaults.
impl Default for BuildArgs {
fn default() -> Self {
Self {
features: vec![],
ignore_rust_version: false,
binary: "".to_string(),
elf_name: "".to_string(),
output_directory: DEFAULT_OUTPUT_DIR.to_string(),
locked: false,
no_default_features: false,
}
}
}
/// Builds the program if the program at the specified path, or one of its dependencies, changes.
///
/// This function monitors the program and its dependencies for changes. If any changes are
/// detected, it triggers a rebuild of the program.
///
/// # Arguments
///
/// * `path` - A string slice that holds the path to the program directory.
///
/// This function is useful for automatically rebuilding the program during development
/// when changes are made to the source code or its dependencies.
///
/// Set the `ZKM_SKIP_PROGRAM_BUILD` environment variable to `true` to skip building the program.
pub fn build_program(path: &str) {
build_program_internal(path, None)
}
/// Builds the program with the given arguments if the program at path, or one of its dependencies,
/// changes.
///
/// # Arguments
///
/// * `path` - A string slice that holds the path to the program directory.
/// * `args` - A [`BuildArgs`] struct that contains various build configuration options.
///
/// Set the `ZKM_SKIP_PROGRAM_BUILD` environment variable to `true` to skip building the program.
pub fn build_program_with_args(path: &str, args: BuildArgs) {
build_program_internal(path, Some(args))
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/build/src/build.rs | build/src/build.rs | use std::path::PathBuf;
use anyhow::Result;
use cargo_metadata::camino::Utf8PathBuf;
use crate::{
command::{local::create_local_command, utils::execute_command},
utils::{cargo_rerun_if_changed, copy_elf_to_output_dir, current_datetime},
BuildArgs,
};
/// Build a program with the specified [`BuildArgs`]. The `program_dir` is specified as an argument
/// when the program is built via `build_program`.
///
/// # Arguments
///
/// * `args` - A reference to a `BuildArgs` struct that holds various arguments used for building
/// the program.
/// * `program_dir` - An optional `PathBuf` specifying the directory of the program to be built.
///
/// # Returns
///
/// * `Result<Utf8PathBuf>` - The path to the built program as a `Utf8PathBuf` on success, or an
/// error on failure.
pub fn execute_build_program(
args: &BuildArgs,
program_dir: Option<PathBuf>,
) -> Result<Utf8PathBuf> {
// If the program directory is not specified, use the current directory.
let program_dir = program_dir
.unwrap_or_else(|| std::env::current_dir().expect("Failed to get current directory."));
let program_dir: Utf8PathBuf = program_dir
.try_into()
.expect("Failed to convert PathBuf to Utf8PathBuf");
// Get the program metadata.
let program_metadata_file = program_dir.join("Cargo.toml");
let mut program_metadata_cmd = cargo_metadata::MetadataCommand::new();
let program_metadata = program_metadata_cmd
.manifest_path(program_metadata_file)
.exec()?;
// Get the command corresponding to Docker or local build.
let cmd = create_local_command(args, &program_dir, &program_metadata);
execute_command(cmd)?;
copy_elf_to_output_dir(args, &program_metadata)
}
/// Internal helper function to build the program with or without arguments.
pub(crate) fn build_program_internal(path: &str, args: Option<BuildArgs>) {
// Get the root package name and metadata.
let program_dir = std::path::Path::new(path);
let metadata_file = program_dir.join("Cargo.toml");
let mut metadata_cmd = cargo_metadata::MetadataCommand::new();
let metadata = metadata_cmd.manifest_path(metadata_file).exec().unwrap();
let root_package = metadata.root_package();
let root_package_name = root_package
.as_ref()
.map(|p| p.name.as_str())
.unwrap_or("Program");
// Skip the program build if the ZKM_SKIP_PROGRAM_BUILD environment variable is set to true.
let skip_program_build = std::env::var("ZKM_SKIP_PROGRAM_BUILD")
.map(|v| v.eq_ignore_ascii_case("true"))
.unwrap_or(false);
if skip_program_build {
println!(
"cargo:warning=Build skipped for {} at {} due to ZKM_SKIP_PROGRAM_BUILD flag",
root_package_name,
current_datetime()
);
return;
}
// Activate the build command if the dependencies change.
cargo_rerun_if_changed(&metadata, program_dir);
// Check if RUSTC_WORKSPACE_WRAPPER is set to clippy-driver (i.e. if `cargo clippy` is the
// current compiler). If so, don't execute `cargo prove build` because it breaks
// rust-analyzer's `cargo clippy` feature.
let is_clippy_driver = std::env::var("RUSTC_WORKSPACE_WRAPPER")
.map(|val| val.contains("clippy-driver"))
.unwrap_or(false);
if is_clippy_driver {
println!("cargo:warning=Skipping build due to clippy invocation.");
return;
}
// Build the program with the given arguments.
let path_output = if let Some(args) = args {
execute_build_program(&args, Some(program_dir.to_path_buf()))
} else {
execute_build_program(&BuildArgs::default(), Some(program_dir.to_path_buf()))
};
if let Err(err) = path_output {
panic!("Failed to build Zkm program: {}.", err);
}
println!(
"cargo:warning={} built at {}",
root_package_name,
current_datetime()
);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/build/src/utils.rs | build/src/utils.rs | use std::{fs, path::Path};
use anyhow::Result;
use cargo_metadata::{camino::Utf8PathBuf, Metadata};
use chrono::Local;
use crate::{BuildArgs, BUILD_TARGET, HELPER_TARGET_SUBDIR};
/// Copy the ELF to the specified output directory.
pub(crate) fn copy_elf_to_output_dir(
args: &BuildArgs,
program_metadata: &cargo_metadata::Metadata,
) -> Result<Utf8PathBuf> {
let root_package = program_metadata.root_package();
let root_package_name = root_package.as_ref().map(|p| &p.name);
// The ELF is written to a target folder specified by the program's package. If built with
// Docker, includes /docker after HELPER_TARGET_SUBDIR.
let target_dir_suffix = HELPER_TARGET_SUBDIR.to_string();
let original_elf_path = program_metadata
.target_directory
.join(target_dir_suffix)
.join(BUILD_TARGET)
.join("release")
.join(root_package_name.unwrap());
// The order of precedence for the ELF name is:
// 1. --elf_name flag
// 2. --binary flag + -elf suffix (defaults to riscv32im-succinct-zkvm-elf)
let elf_name = if !args.elf_name.is_empty() {
args.elf_name.clone()
} else if !args.binary.is_empty() {
// TODO: In the future, change this to default to the package name. Will require updating
// docs and examples.
args.binary.clone()
} else {
BUILD_TARGET.to_string()
};
let elf_dir = program_metadata
.target_directory
.parent()
.unwrap()
.join(&args.output_directory);
fs::create_dir_all(&elf_dir)?;
let result_elf_path = elf_dir.join(elf_name);
// Copy the ELF to the specified output directory.
fs::copy(original_elf_path, &result_elf_path)?;
Ok(result_elf_path)
}
pub(crate) fn current_datetime() -> String {
let now = Local::now();
now.format("%Y-%m-%d %H:%M:%S").to_string()
}
/// Re-run the cargo command if the Cargo.toml or Cargo.lock file changes.
pub(crate) fn cargo_rerun_if_changed(metadata: &Metadata, program_dir: &Path) {
// Tell cargo to rerun the script only if program/{src, bin, build.rs, Cargo.toml} changes
// Ref: https://doc.rust-lang.org/nightly/cargo/reference/build-scripts.html#rerun-if-changed
let dirs = vec![
program_dir.join("src"),
program_dir.join("bin"),
program_dir.join("build.rs"),
program_dir.join("Cargo.toml"),
];
for dir in dirs {
if dir.exists() {
println!(
"cargo::rerun-if-changed={}",
dir.canonicalize().unwrap().display()
);
}
}
// Re-run the build script if the workspace root's Cargo.lock changes. If the program is its own
// workspace, this will be the program's Cargo.lock.
println!(
"cargo:rerun-if-changed={}",
metadata.workspace_root.join("Cargo.lock").as_str()
);
// Re-run if any local dependency changes.
for package in &metadata.packages {
for dependency in &package.dependencies {
if let Some(path) = &dependency.path {
println!("cargo:rerun-if-changed={}", path.as_str());
}
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/build/src/command/local.rs | build/src/command/local.rs | use std::process::Command;
use crate::{BuildArgs, HELPER_TARGET_SUBDIR};
use cargo_metadata::camino::Utf8PathBuf;
use super::utils::{get_program_build_args, get_rust_compiler_flags};
/// Get the command to build the program locally.
pub(crate) fn create_local_command(
args: &BuildArgs,
program_dir: &Utf8PathBuf,
program_metadata: &cargo_metadata::Metadata,
) -> Command {
let mut command = Command::new("cargo");
let canonicalized_program_dir = program_dir
.canonicalize()
.expect("Failed to canonicalize program directory");
// When executing the local command:
// 1. Set the target directory to a subdirectory of the program's target directory to avoid
// build
// conflicts with the parent process. Source: https://github.com/rust-lang/cargo/issues/6412
// 2. Set the rustup toolchain to succinct.
// 3. Set the encoded rust flags.
// 4. Remove the rustc configuration, otherwise in a build script it will attempt to compile the
// program with the toolchain of the normal build process, rather than the Succinct
// toolchain.
command
.current_dir(canonicalized_program_dir)
.env("CARGO_ENCODED_RUSTFLAGS", get_rust_compiler_flags())
.env(
"CARGO_TARGET_DIR",
program_metadata.target_directory.join(HELPER_TARGET_SUBDIR),
)
.args(get_program_build_args(args));
command
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/build/src/command/utils.rs | build/src/command/utils.rs | use anyhow::{Context, Result};
use std::{
io::{BufRead, BufReader},
process::{exit, Command, Stdio},
thread,
};
use crate::{BuildArgs, BUILD_TARGET};
/// Get the arguments to build the program with the arguments from the [`BuildArgs`] struct.
pub(crate) fn get_program_build_args(args: &BuildArgs) -> Vec<String> {
let mut build_args = vec![
"build".to_string(),
"--release".to_string(),
"--target".to_string(),
BUILD_TARGET.to_string(),
];
if args.ignore_rust_version {
build_args.push("--ignore-rust-version".to_string());
}
if !args.binary.is_empty() {
build_args.push("--bin".to_string());
build_args.push(args.binary.clone());
}
if !args.features.is_empty() {
build_args.push("--features".to_string());
build_args.push(args.features.join(","));
}
if args.no_default_features {
build_args.push("--no-default-features".to_string());
}
if args.locked {
build_args.push("--locked".to_string());
}
build_args
}
/// Rust flags for compilation of C libraries.
pub(crate) fn get_rust_compiler_flags() -> String {
let rust_flags = [
"-C".to_string(),
"target-cpu=mips32r2".to_string(),
"-C".to_string(),
"target-feature=+crt-static".to_string(),
"-C".to_string(),
"link-arg=-nostdlib".to_string(),
"-C".to_string(),
"link-arg=-g".to_string(),
//"-C".to_string(),
//"link-arg=-nostartfiles".to_string(),
"-C".to_string(),
"link-arg=--entry=main".to_string(),
];
rust_flags.join("\x1f")
}
/// Execute the command and handle the output depending on the context.
pub(crate) fn execute_command(mut command: Command) -> Result<()> {
// Add necessary tags for stdout and stderr from the command.
let mut child = command
.stdout(Stdio::piped())
.stderr(Stdio::piped())
.spawn()
.context("failed to spawn command")?;
let stdout = BufReader::new(child.stdout.take().unwrap());
let stderr = BufReader::new(child.stderr.take().unwrap());
// Add prefix to the output of the process depending on the context.
let msg = "[zkm] ";
// Pipe stdout and stderr to the parent process with [docker] prefix
let stdout_handle = thread::spawn(move || {
stdout.lines().for_each(|line| {
println!("{} {}", msg, line.unwrap());
});
});
stderr.lines().for_each(|line| {
eprintln!("{} {}", msg, line.unwrap());
});
stdout_handle.join().unwrap();
// Wait for the child process to finish and check the result.
let result = child.wait()?;
if !result.success() {
// Error message is already printed by cargo.
exit(result.code().unwrap_or(1))
}
Ok(())
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/build/src/command/mod.rs | build/src/command/mod.rs | pub(crate) mod local;
pub(crate) mod utils;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/config.rs | prover/src/config.rs | use plonky2::fri::reduction_strategies::FriReductionStrategy;
use plonky2::fri::{FriConfig, FriParams};
pub struct StarkConfig {
pub security_bits: usize,
/// The number of challenge points to generate, for IOPs that have soundness errors of (roughly)
/// `degree / |F|`.
pub num_challenges: usize,
pub fri_config: FriConfig,
}
impl StarkConfig {
/// A typical configuration with a rate of 2, resulting in fast but large proofs.
/// Targets ~90 bit conjectured security.
pub fn standard_fast_config() -> Self {
Self {
security_bits: 90,
num_challenges: 2,
fri_config: FriConfig {
rate_bits: 2,
cap_height: 4,
proof_of_work_bits: 16,
reduction_strategy: FriReductionStrategy::ConstantArityBits(4, 5),
num_query_rounds: 37,
},
}
}
pub(crate) fn fri_params(&self, degree_bits: usize) -> FriParams {
self.fri_config.fri_params(degree_bits, false)
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/proof.rs | prover/src/proof.rs | use crate::all_stark::NUM_PUBLIC_INPUT_USERDATA;
use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::fri::oracle::PolynomialBatch;
use plonky2::fri::proof::{FriChallenges, FriChallengesTarget, FriProof, FriProofTarget};
use plonky2::fri::structure::{
FriOpeningBatch, FriOpeningBatchTarget, FriOpenings, FriOpeningsTarget,
};
use plonky2::hash::hash_types::{MerkleCapTarget, RichField};
use plonky2::hash::merkle_tree::MerkleCap;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::{BoolTarget, Target};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::{GenericConfig, Hasher};
use plonky2::util::serialization::{Buffer, IoResult, Read, Write};
use plonky2_maybe_rayon::*;
use serde::{Deserialize, Serialize};
use crate::all_stark::NUM_TABLES;
use crate::config::StarkConfig;
use crate::cross_table_lookup::GrandProductChallengeSet;
/// A STARK proof for each table, plus some metadata used to create recursive wrapper proofs.
#[derive(Debug, Clone)]
pub struct AllProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
pub stark_proofs: [StarkProofWithMetadata<F, C, D>; NUM_TABLES],
pub(crate) ctl_challenges: GrandProductChallengeSet<F>,
pub public_values: PublicValues,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> AllProof<F, C, D> {
pub fn degree_bits(&self, config: &StarkConfig) -> [usize; NUM_TABLES] {
core::array::from_fn(|i| self.stark_proofs[i].proof.recover_degree_bits(config))
}
}
pub(crate) struct AllProofChallenges<F: RichField + Extendable<D>, const D: usize> {
pub stark_challenges: [StarkProofChallenges<F, D>; NUM_TABLES],
pub ctl_challenges: GrandProductChallengeSet<F>,
}
#[allow(unused)] // TODO: should be used soon
pub(crate) struct AllChallengerState<F: RichField + Extendable<D>, H: Hasher<F>, const D: usize> {
/// Sponge state of the challenger before starting each proof,
/// along with the final state after all proofs are done. This final state isn't strictly needed.
pub states: [H::Permutation; NUM_TABLES + 1],
pub ctl_challenges: GrandProductChallengeSet<F>,
}
/// Memory values which are public.
#[derive(Debug, Clone, Default, Deserialize, Serialize)]
pub struct PublicValues {
pub roots_before: MemRoots,
pub roots_after: MemRoots,
pub userdata: Vec<u8>,
}
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct MemRoots {
pub root: [u32; 8],
}
/// Memory values which are public.
/// Note: All the larger integers are encoded with 32-bit limbs in little-endian order.
#[derive(Eq, PartialEq, Debug)]
pub struct PublicValuesTarget {
pub roots_before: MemRootsTarget,
pub roots_after: MemRootsTarget,
pub userdata: [Target; NUM_PUBLIC_INPUT_USERDATA],
}
impl PublicValuesTarget {
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
let MemRootsTarget {
root: state_root_before,
} = self.roots_before;
buffer.write_target_array(&state_root_before)?;
let MemRootsTarget {
root: state_root_after,
} = self.roots_after;
buffer.write_target_array(&state_root_after)?;
buffer.write_target_array(&self.userdata)?;
Ok(())
}
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let roots_before = MemRootsTarget {
root: buffer.read_target_array()?,
};
let roots_after = MemRootsTarget {
root: buffer.read_target_array()?,
};
let userdata = buffer.read_target_array()?;
Ok(Self {
roots_before,
roots_after,
userdata,
})
}
pub fn from_public_inputs(pis: &[Target]) -> Self {
Self {
roots_before: MemRootsTarget::from_public_inputs(&pis[0..8]),
roots_after: MemRootsTarget::from_public_inputs(&pis[8..16]),
userdata: pis[16..16 + NUM_PUBLIC_INPUT_USERDATA].try_into().unwrap(),
}
}
pub fn select<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
condition: BoolTarget,
pv0: Self,
pv1: Self,
) -> Self {
Self {
roots_before: MemRootsTarget::select(
builder,
condition,
pv0.roots_before,
pv1.roots_before,
),
roots_after: MemRootsTarget::select(
builder,
condition,
pv0.roots_after,
pv1.roots_after,
),
userdata: core::array::from_fn(|i| {
builder.select(condition, pv0.userdata[i], pv1.userdata[i])
}),
}
}
}
#[derive(Eq, PartialEq, Debug, Copy, Clone)]
pub struct MemRootsTarget {
pub root: [Target; 8],
}
impl MemRootsTarget {
pub const SIZE: usize = 24;
pub fn from_public_inputs(pis: &[Target]) -> Self {
let root = pis[0..8].try_into().unwrap();
Self { root }
}
pub fn select<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
condition: BoolTarget,
tr0: Self,
tr1: Self,
) -> Self {
Self {
root: core::array::from_fn(|i| builder.select(condition, tr0.root[i], tr1.root[i])),
}
}
pub fn connect<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
tr0: Self,
tr1: Self,
) {
for i in 0..8 {
builder.connect(tr0.root[i], tr1.root[i]);
}
}
}
#[derive(Debug, Clone, Serialize)]
pub struct StarkProof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> {
/// Merkle cap of LDEs of trace values.
pub trace_cap: MerkleCap<F, C::Hasher>,
/// Merkle cap of LDEs of lookup helper and CTL columns.
pub auxiliary_polys_cap: MerkleCap<F, C::Hasher>,
/// Merkle cap of LDEs of quotient polynomial evaluations.
pub quotient_polys_cap: MerkleCap<F, C::Hasher>,
/// Purported values of each polynomial at the challenge point.
pub openings: StarkOpeningSet<F, D>,
/// A batch FRI argument for all openings.
pub opening_proof: FriProof<F, C::Hasher, D>,
}
/// A `StarkProof` along with some metadata about the initial Fiat-Shamir state, which is used when
/// creating a recursive wrapper proof around a STARK proof.
#[derive(Debug, Clone)]
pub struct StarkProofWithMetadata<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub(crate) init_challenger_state: <C::Hasher as Hasher<F>>::Permutation,
pub proof: StarkProof<F, C, D>,
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> StarkProof<F, C, D> {
/// Recover the length of the trace from a STARK proof and a STARK config.
pub fn recover_degree_bits(&self, config: &StarkConfig) -> usize {
let initial_merkle_proof = &self.opening_proof.query_round_proofs[0]
.initial_trees_proof
.evals_proofs[0]
.1;
let lde_bits = config.fri_config.cap_height + initial_merkle_proof.siblings.len();
lde_bits - config.fri_config.rate_bits
}
pub fn num_ctl_zs(&self) -> usize {
self.openings.ctl_zs_first.len()
}
}
#[derive(Eq, PartialEq, Debug)]
pub struct StarkProofTarget<const D: usize> {
pub trace_cap: MerkleCapTarget,
pub auxiliary_polys_cap: MerkleCapTarget,
pub quotient_polys_cap: MerkleCapTarget,
pub openings: StarkOpeningSetTarget<D>,
pub opening_proof: FriProofTarget<D>,
}
impl<const D: usize> StarkProofTarget<D> {
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
buffer.write_target_merkle_cap(&self.trace_cap)?;
buffer.write_target_merkle_cap(&self.auxiliary_polys_cap)?;
buffer.write_target_merkle_cap(&self.quotient_polys_cap)?;
buffer.write_target_fri_proof(&self.opening_proof)?;
self.openings.to_buffer(buffer)?;
Ok(())
}
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let trace_cap = buffer.read_target_merkle_cap()?;
let auxiliary_polys_cap = buffer.read_target_merkle_cap()?;
let quotient_polys_cap = buffer.read_target_merkle_cap()?;
let opening_proof = buffer.read_target_fri_proof()?;
let openings = StarkOpeningSetTarget::from_buffer(buffer)?;
Ok(Self {
trace_cap,
auxiliary_polys_cap,
quotient_polys_cap,
openings,
opening_proof,
})
}
/// Recover the length of the trace from a STARK proof and a STARK config.
pub fn recover_degree_bits(&self, config: &StarkConfig) -> usize {
let initial_merkle_proof = &self.opening_proof.query_round_proofs[0]
.initial_trees_proof
.evals_proofs[0]
.1;
let lde_bits = config.fri_config.cap_height + initial_merkle_proof.siblings.len();
lde_bits - config.fri_config.rate_bits
}
}
pub(crate) struct StarkProofChallenges<F: RichField + Extendable<D>, const D: usize> {
/// Random values used to combine STARK constraints.
pub stark_alphas: Vec<F>,
/// Point at which the STARK polynomials are opened.
pub stark_zeta: F::Extension,
pub fri_challenges: FriChallenges<F, D>,
}
pub(crate) struct StarkProofChallengesTarget<const D: usize> {
pub stark_alphas: Vec<Target>,
pub stark_zeta: ExtensionTarget<D>,
pub fri_challenges: FriChallengesTarget<D>,
}
/// Purported values of each polynomial at the challenge point.
#[derive(Debug, Clone, Serialize)]
pub struct StarkOpeningSet<F: RichField + Extendable<D>, const D: usize> {
/// Openings of trace polynomials at `zeta`.
pub local_values: Vec<F::Extension>,
/// Openings of trace polynomials at `g * zeta`.
pub next_values: Vec<F::Extension>,
/// Openings of lookups and cross-table lookups `Z` polynomials at `zeta`.
pub auxiliary_polys: Vec<F::Extension>,
/// Openings of lookups and cross-table lookups `Z` polynomials at `g * zeta`.
pub auxiliary_polys_next: Vec<F::Extension>,
/// Openings of cross-table lookups `Z` polynomials at `1`.
pub ctl_zs_first: Vec<F>,
/// Openings of quotient polynomials at `zeta`.
pub quotient_polys: Vec<F::Extension>,
}
impl<F: RichField + Extendable<D>, const D: usize> StarkOpeningSet<F, D> {
pub fn new<C: GenericConfig<D, F = F>>(
zeta: F::Extension,
g: F,
trace_commitment: &PolynomialBatch<F, C, D>,
auxiliary_polys_commitment: &PolynomialBatch<F, C, D>,
quotient_commitment: &PolynomialBatch<F, C, D>,
num_lookup_columns: usize,
num_ctl_polys: &[usize],
) -> Self {
let total_num_helper_cols: usize = num_ctl_polys.iter().sum();
let eval_commitment = |z: F::Extension, c: &PolynomialBatch<F, C, D>| {
c.polynomials
.par_iter()
.map(|p| p.to_extension().eval(z))
.collect::<Vec<_>>()
};
let eval_commitment_base = |z: F, c: &PolynomialBatch<F, C, D>| {
c.polynomials
.par_iter()
.map(|p| p.eval(z))
.collect::<Vec<_>>()
};
let auxiliary_first = eval_commitment_base(F::ONE, auxiliary_polys_commitment);
let ctl_zs_first = auxiliary_first[num_lookup_columns + total_num_helper_cols..].to_vec();
let zeta_next = zeta.scalar_mul(g);
Self {
local_values: eval_commitment(zeta, trace_commitment),
next_values: eval_commitment(zeta_next, trace_commitment),
auxiliary_polys: eval_commitment(zeta, auxiliary_polys_commitment),
auxiliary_polys_next: eval_commitment(zeta_next, auxiliary_polys_commitment),
ctl_zs_first,
quotient_polys: eval_commitment(zeta, quotient_commitment),
}
}
pub(crate) fn to_fri_openings(&self) -> FriOpenings<F, D> {
let zeta_batch = FriOpeningBatch {
values: self
.local_values
.iter()
.chain(&self.auxiliary_polys)
.chain(&self.quotient_polys)
.copied()
.collect_vec(),
};
let zeta_next_batch = FriOpeningBatch {
values: self
.next_values
.iter()
.chain(&self.auxiliary_polys_next)
.copied()
.collect_vec(),
};
debug_assert!(!self.ctl_zs_first.is_empty());
let ctl_first_batch = FriOpeningBatch {
values: self
.ctl_zs_first
.iter()
.copied()
.map(F::Extension::from_basefield)
.collect(),
};
FriOpenings {
batches: vec![zeta_batch, zeta_next_batch, ctl_first_batch],
}
}
}
#[derive(Eq, PartialEq, Debug)]
pub struct StarkOpeningSetTarget<const D: usize> {
pub local_values: Vec<ExtensionTarget<D>>,
pub next_values: Vec<ExtensionTarget<D>>,
pub auxiliary_polys: Vec<ExtensionTarget<D>>,
pub auxiliary_polys_next: Vec<ExtensionTarget<D>>,
pub ctl_zs_first: Vec<Target>,
pub quotient_polys: Vec<ExtensionTarget<D>>,
}
impl<const D: usize> StarkOpeningSetTarget<D> {
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
buffer.write_target_ext_vec(&self.local_values)?;
buffer.write_target_ext_vec(&self.next_values)?;
buffer.write_target_ext_vec(&self.auxiliary_polys)?;
buffer.write_target_ext_vec(&self.auxiliary_polys_next)?;
buffer.write_target_vec(&self.ctl_zs_first)?;
buffer.write_target_ext_vec(&self.quotient_polys)?;
Ok(())
}
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let local_values = buffer.read_target_ext_vec::<D>()?;
let next_values = buffer.read_target_ext_vec::<D>()?;
let auxiliary_polys = buffer.read_target_ext_vec::<D>()?;
let auxiliary_polys_next = buffer.read_target_ext_vec::<D>()?;
let ctl_zs_first = buffer.read_target_vec()?;
let quotient_polys = buffer.read_target_ext_vec::<D>()?;
Ok(Self {
local_values,
next_values,
auxiliary_polys,
auxiliary_polys_next,
ctl_zs_first,
quotient_polys,
})
}
pub(crate) fn to_fri_openings(&self, zero: Target) -> FriOpeningsTarget<D> {
let zeta_batch = FriOpeningBatchTarget {
values: self
.local_values
.iter()
.chain(&self.auxiliary_polys)
.chain(&self.quotient_polys)
.copied()
.collect_vec(),
};
let zeta_next_batch = FriOpeningBatchTarget {
values: self
.next_values
.iter()
.chain(&self.auxiliary_polys_next)
.copied()
.collect_vec(),
};
debug_assert!(!self.ctl_zs_first.is_empty());
let ctl_first_batch = FriOpeningBatchTarget {
values: self
.ctl_zs_first
.iter()
.copied()
.map(|t| t.to_ext_target(zero))
.collect(),
};
FriOpeningsTarget {
batches: vec![zeta_batch, zeta_next_batch, ctl_first_batch],
}
}
}
pub struct StarkProofWithPublicInputsTarget<const D: usize> {
pub proof: StarkProofTarget<D>,
pub public_inputs: Vec<Target>,
}
#[derive(Debug, Clone)]
pub struct StarkProofWithPublicInputs<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
> {
pub proof: StarkProof<F, C, D>,
// TODO: Maybe make it generic over a `S: Stark` and replace with `[F; S::PUBLIC_INPUTS]`.
pub public_inputs: Vec<F>,
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/recursive_verifier.rs | prover/src/recursive_verifier.rs | use std::fmt::Debug;
use anyhow::Result;
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::fri::witness_util::set_fri_proof_target;
use plonky2::gates::exponentiation::ExponentiationGate;
use plonky2::gates::gate::GateRef;
use plonky2::gates::noop::NoopGate;
use plonky2::hash::hash_types::RichField;
use plonky2::hash::hashing::PlonkyPermutation;
use plonky2::iop::challenger::RecursiveChallenger;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
use plonky2::iop::witness::{PartialWitness, Witness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::{CircuitConfig, CircuitData};
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
use plonky2::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
use plonky2::util::reducing::ReducingFactorTarget;
use plonky2::util::serialization::{
Buffer, GateSerializer, IoResult, Read, WitnessGeneratorSerializer, Write,
};
use plonky2::with_context;
use plonky2_util::log2_ceil;
use crate::all_stark::Table;
use crate::config::StarkConfig;
use crate::constraint_consumer::RecursiveConstraintConsumer;
use crate::cross_table_lookup::{
CrossTableLookup, CtlCheckVarsTarget, GrandProductChallenge, GrandProductChallengeSet,
};
use crate::evaluation_frame::StarkEvaluationFrame;
use crate::lookup::LookupCheckVarsTarget;
use crate::proof::{
MemRoots, MemRootsTarget, PublicValues, PublicValuesTarget, StarkOpeningSetTarget, StarkProof,
StarkProofChallengesTarget, StarkProofTarget, StarkProofWithMetadata,
};
use crate::stark::Stark;
use crate::vanishing_poly::eval_vanishing_poly_circuit;
use crate::witness::errors::ProgramError;
pub(crate) struct PublicInputs<T: Copy + Default + Eq + PartialEq + Debug, P: PlonkyPermutation<T>>
{
pub(crate) trace_cap: Vec<Vec<T>>,
pub(crate) ctl_zs_first: Vec<T>,
pub(crate) ctl_challenges: GrandProductChallengeSet<T>,
pub(crate) challenger_state_before: P,
pub(crate) challenger_state_after: P,
}
impl<T: Copy + Debug + Default + Eq + PartialEq, P: PlonkyPermutation<T>> PublicInputs<T, P> {
pub(crate) fn from_vec(v: &[T], config: &StarkConfig) -> Self {
// TODO: Document magic number 4; probably comes from
// Ethereum 256 bits = 4 * Goldilocks 64 bits
let nelts = config.fri_config.num_cap_elements();
let mut trace_cap = Vec::with_capacity(nelts);
for i in 0..nelts {
trace_cap.push(v[4 * i..4 * (i + 1)].to_vec());
}
let mut iter = v.iter().copied().skip(4 * nelts);
let ctl_challenges = GrandProductChallengeSet {
challenges: (0..config.num_challenges)
.map(|_| GrandProductChallenge {
beta: iter.next().unwrap(),
gamma: iter.next().unwrap(),
})
.collect(),
};
let challenger_state_before = P::new(&mut iter);
let challenger_state_after = P::new(&mut iter);
let ctl_zs_first: Vec<_> = iter.collect();
Self {
trace_cap,
ctl_zs_first,
ctl_challenges,
challenger_state_before,
challenger_state_after,
}
}
}
/// Represents a circuit which recursively verifies a STARK proof.
#[derive(Eq, PartialEq, Debug)]
pub(crate) struct StarkWrapperCircuit<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
{
pub(crate) circuit: CircuitData<F, C, D>,
pub(crate) stark_proof_target: StarkProofTarget<D>,
pub(crate) ctl_challenges_target: GrandProductChallengeSet<Target>,
pub(crate) init_challenger_state_target:
<C::Hasher as AlgebraicHasher<F>>::AlgebraicPermutation,
pub(crate) zero_target: Target,
}
impl<F, C, const D: usize> StarkWrapperCircuit<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
{
pub fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<()> {
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
buffer.write_target_vec(self.init_challenger_state_target.as_ref())?;
buffer.write_target(self.zero_target)?;
self.stark_proof_target.to_buffer(buffer)?;
self.ctl_challenges_target.to_buffer(buffer)?;
Ok(())
}
pub fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<Self> {
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
let target_vec = buffer.read_target_vec()?;
let init_challenger_state_target =
<C::Hasher as AlgebraicHasher<F>>::AlgebraicPermutation::new(target_vec);
let zero_target = buffer.read_target()?;
let stark_proof_target = StarkProofTarget::from_buffer(buffer)?;
let ctl_challenges_target = GrandProductChallengeSet::from_buffer(buffer)?;
Ok(Self {
circuit,
stark_proof_target,
ctl_challenges_target,
init_challenger_state_target,
zero_target,
})
}
pub(crate) fn prove(
&self,
proof_with_metadata: &StarkProofWithMetadata<F, C, D>,
ctl_challenges: &GrandProductChallengeSet<F>,
) -> Result<ProofWithPublicInputs<F, C, D>> {
let mut inputs = PartialWitness::new();
set_stark_proof_target(
&mut inputs,
&self.stark_proof_target,
&proof_with_metadata.proof,
self.zero_target,
);
for (challenge_target, challenge) in self
.ctl_challenges_target
.challenges
.iter()
.zip(&ctl_challenges.challenges)
{
inputs.set_target(challenge_target.beta, challenge.beta);
inputs.set_target(challenge_target.gamma, challenge.gamma);
}
inputs.set_target_arr(
self.init_challenger_state_target.as_ref(),
proof_with_metadata.init_challenger_state.as_ref(),
);
self.circuit.prove(inputs)
}
}
/// Represents a circuit which recursively verifies a PLONK proof.
#[derive(Eq, PartialEq, Debug)]
pub(crate) struct PlonkWrapperCircuit<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub(crate) circuit: CircuitData<F, C, D>,
pub(crate) proof_with_pis_target: ProofWithPublicInputsTarget<D>,
}
impl<F, C, const D: usize> PlonkWrapperCircuit<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
{
pub(crate) fn prove(
&self,
proof: &ProofWithPublicInputs<F, C, D>,
) -> Result<ProofWithPublicInputs<F, C, D>> {
let mut inputs = PartialWitness::new();
inputs.set_proof_with_pis_target(&self.proof_with_pis_target, proof);
self.circuit.prove(inputs)
}
}
/// Returns the recursive Stark circuit.
pub(crate) fn recursive_stark_circuit<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
const D: usize,
>(
table: Table,
stark: &S,
degree_bits: usize,
cross_table_lookups: &[CrossTableLookup<F>],
inner_config: &StarkConfig,
circuit_config: &CircuitConfig,
min_degree_bits: usize,
) -> StarkWrapperCircuit<F, C, D>
where
C::Hasher: AlgebraicHasher<F>,
{
let mut builder = CircuitBuilder::<F, D>::new(circuit_config.clone());
let zero_target = builder.zero();
let num_lookup_columns = stark.num_lookup_helper_columns(inner_config);
let (total_num_helpers, num_ctl_zs, num_helpers_by_ctl) =
CrossTableLookup::num_ctl_helpers_zs_all(
cross_table_lookups,
table,
inner_config.num_challenges,
stark.constraint_degree(),
);
let num_ctl_helper_zs = num_ctl_zs + total_num_helpers;
let proof_target = add_virtual_stark_proof(
&mut builder,
stark,
inner_config,
degree_bits,
num_ctl_helper_zs,
num_ctl_zs,
);
builder.register_public_inputs(
&proof_target
.trace_cap
.0
.iter()
.flat_map(|h| h.elements)
.collect::<Vec<_>>(),
);
let ctl_challenges_target = GrandProductChallengeSet {
challenges: (0..inner_config.num_challenges)
.map(|_| GrandProductChallenge {
beta: builder.add_virtual_public_input(),
gamma: builder.add_virtual_public_input(),
})
.collect(),
};
let ctl_vars = CtlCheckVarsTarget::from_proof(
table,
&proof_target,
cross_table_lookups,
&ctl_challenges_target,
num_lookup_columns,
total_num_helpers,
&num_helpers_by_ctl,
);
let init_challenger_state_target =
<C::Hasher as AlgebraicHasher<F>>::AlgebraicPermutation::new(std::iter::from_fn(|| {
Some(builder.add_virtual_public_input())
}));
let mut challenger =
RecursiveChallenger::<F, C::Hasher, D>::from_state(init_challenger_state_target);
let challenges =
proof_target.get_challenges::<F, C>(&mut builder, &mut challenger, inner_config);
let challenger_state = challenger.compact(&mut builder);
builder.register_public_inputs(challenger_state.as_ref());
builder.register_public_inputs(&proof_target.openings.ctl_zs_first);
verify_stark_proof_with_challenges_circuit::<F, C, _, D>(
&mut builder,
stark,
&proof_target,
&challenges,
&ctl_vars,
&ctl_challenges_target,
inner_config,
);
add_common_recursion_gates(&mut builder);
// Pad to the minimum degree.
while log2_ceil(builder.num_gates()) < min_degree_bits {
builder.add_gate(NoopGate, vec![]);
}
let circuit = builder.build::<C>();
StarkWrapperCircuit {
circuit,
stark_proof_target: proof_target,
ctl_challenges_target,
init_challenger_state_target,
zero_target,
}
}
/// Add gates that are sometimes used by recursive circuits, even if it's not actually used by this
/// particular recursive circuit. This is done for uniformity. We sometimes want all recursion
/// circuits to have the same gate set, so that we can do 1-of-n conditional recursion efficiently.
pub(crate) fn add_common_recursion_gates<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
) {
builder.add_gate_to_gate_set(GateRef::new(ExponentiationGate::new_from_config(
&builder.config,
)));
}
/// Recursively verifies an inner proof.
fn verify_stark_proof_with_challenges_circuit<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
stark: &S,
proof: &StarkProofTarget<D>,
challenges: &StarkProofChallengesTarget<D>,
ctl_vars: &[CtlCheckVarsTarget<F, D>],
ctl_challenges: &GrandProductChallengeSet<Target>,
inner_config: &StarkConfig,
) where
C::Hasher: AlgebraicHasher<F>,
{
let zero = builder.zero();
let one = builder.one_extension();
let num_ctl_polys = ctl_vars
.iter()
.map(|ctl| ctl.helper_columns.len())
.sum::<usize>();
let StarkOpeningSetTarget {
local_values,
next_values,
auxiliary_polys,
auxiliary_polys_next,
ctl_zs_first,
quotient_polys,
} = &proof.openings;
let vars = S::EvaluationFrameTarget::from_values(local_values, next_values);
let degree_bits = proof.recover_degree_bits(inner_config);
let zeta_pow_deg = builder.exp_power_of_2_extension(challenges.stark_zeta, degree_bits);
let z_h_zeta = builder.sub_extension(zeta_pow_deg, one);
let (l_0, l_last) =
eval_l_0_and_l_last_circuit(builder, degree_bits, challenges.stark_zeta, z_h_zeta);
let last =
builder.constant_extension(F::Extension::primitive_root_of_unity(degree_bits).inverse());
let z_last = builder.sub_extension(challenges.stark_zeta, last);
let mut consumer = RecursiveConstraintConsumer::<F, D>::new(
builder.zero_extension(),
challenges.stark_alphas.clone(),
z_last,
l_0,
l_last,
);
let num_lookup_columns = stark.num_lookup_helper_columns(inner_config);
let lookup_challenges = (num_lookup_columns > 0).then(|| {
ctl_challenges
.challenges
.iter()
.map(|ch| ch.beta)
.collect::<Vec<_>>()
});
let lookup_vars = stark.uses_lookups().then(|| LookupCheckVarsTarget {
local_values: auxiliary_polys[..num_lookup_columns].to_vec(),
next_values: auxiliary_polys_next[..num_lookup_columns].to_vec(),
challenges: lookup_challenges.unwrap(),
});
with_context!(
builder,
"evaluate vanishing polynomial",
eval_vanishing_poly_circuit::<F, S, D>(
builder,
stark,
&vars,
lookup_vars,
ctl_vars,
&mut consumer,
)
);
let vanishing_polys_zeta = consumer.accumulators();
// Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta.
let mut scale = ReducingFactorTarget::new(zeta_pow_deg);
for (i, chunk) in quotient_polys
.chunks(stark.quotient_degree_factor())
.enumerate()
{
let recombined_quotient = scale.reduce(chunk, builder);
let computed_vanishing_poly = builder.mul_extension(z_h_zeta, recombined_quotient);
builder.connect_extension(vanishing_polys_zeta[i], computed_vanishing_poly);
}
let merkle_caps = vec![
proof.trace_cap.clone(),
proof.auxiliary_polys_cap.clone(),
proof.quotient_polys_cap.clone(),
];
let fri_instance = stark.fri_instance_target(
builder,
challenges.stark_zeta,
F::primitive_root_of_unity(degree_bits),
num_ctl_polys,
ctl_zs_first.len(),
inner_config,
);
builder.verify_fri_proof::<C>(
&fri_instance,
&proof.openings.to_fri_openings(zero),
&challenges.fri_challenges,
&merkle_caps,
&proof.opening_proof,
&inner_config.fri_params(degree_bits),
);
}
fn eval_l_0_and_l_last_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
log_n: usize,
x: ExtensionTarget<D>,
z_x: ExtensionTarget<D>,
) -> (ExtensionTarget<D>, ExtensionTarget<D>) {
let n = builder.constant_extension(F::Extension::from_canonical_usize(1 << log_n));
let g = builder.constant_extension(F::Extension::primitive_root_of_unity(log_n));
let one = builder.one_extension();
let l_0_deno = builder.mul_sub_extension(n, x, n);
let l_last_deno = builder.mul_sub_extension(g, x, one);
let l_last_deno = builder.mul_extension(n, l_last_deno);
(
builder.div_extension(z_x, l_0_deno),
builder.div_extension(z_x, l_last_deno),
)
}
pub(crate) fn add_virtual_public_values<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
) -> PublicValuesTarget {
let roots_before = add_virtual_trie_roots(builder);
let roots_after = add_virtual_trie_roots(builder);
let userdata = builder.add_virtual_public_input_arr();
PublicValuesTarget {
roots_before,
roots_after,
userdata,
}
}
pub(crate) fn add_virtual_trie_roots<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
) -> MemRootsTarget {
let root = builder.add_virtual_public_input_arr();
MemRootsTarget { root }
}
pub(crate) fn add_virtual_stark_proof<
F: RichField + Extendable<D>,
S: Stark<F, D>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
stark: &S,
config: &StarkConfig,
degree_bits: usize,
num_ctl_helper_zs: usize,
num_ctl_zs: usize,
) -> StarkProofTarget<D> {
let fri_params = config.fri_params(degree_bits);
let cap_height = fri_params.config.cap_height;
let num_leaves_per_oracle = vec![
S::COLUMNS,
stark.num_lookup_helper_columns(config) + num_ctl_helper_zs,
stark.quotient_degree_factor() * config.num_challenges,
];
let auxiliary_polys_cap = builder.add_virtual_cap(cap_height);
StarkProofTarget {
trace_cap: builder.add_virtual_cap(cap_height),
auxiliary_polys_cap,
quotient_polys_cap: builder.add_virtual_cap(cap_height),
openings: add_virtual_stark_opening_set::<F, S, D>(
builder,
stark,
num_ctl_helper_zs,
num_ctl_zs,
config,
),
opening_proof: builder.add_virtual_fri_proof(&num_leaves_per_oracle, &fri_params),
}
}
fn add_virtual_stark_opening_set<F: RichField + Extendable<D>, S: Stark<F, D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
stark: &S,
num_ctl_helper_zs: usize,
num_ctl_zs: usize,
config: &StarkConfig,
) -> StarkOpeningSetTarget<D> {
let num_challenges = config.num_challenges;
StarkOpeningSetTarget {
local_values: builder.add_virtual_extension_targets(S::COLUMNS),
next_values: builder.add_virtual_extension_targets(S::COLUMNS),
auxiliary_polys: builder.add_virtual_extension_targets(
stark.num_lookup_helper_columns(config) + num_ctl_helper_zs,
),
auxiliary_polys_next: builder.add_virtual_extension_targets(
stark.num_lookup_helper_columns(config) + num_ctl_helper_zs,
),
ctl_zs_first: builder.add_virtual_targets(num_ctl_zs),
quotient_polys: builder
.add_virtual_extension_targets(stark.quotient_degree_factor() * num_challenges),
}
}
pub(crate) fn set_stark_proof_target<F, C: GenericConfig<D, F = F>, W, const D: usize>(
witness: &mut W,
proof_target: &StarkProofTarget<D>,
proof: &StarkProof<F, C, D>,
zero: Target,
) where
F: RichField + Extendable<D>,
C::Hasher: AlgebraicHasher<F>,
W: Witness<F>,
{
witness.set_cap_target(&proof_target.trace_cap, &proof.trace_cap);
witness.set_cap_target(&proof_target.quotient_polys_cap, &proof.quotient_polys_cap);
witness.set_fri_openings(
&proof_target.openings.to_fri_openings(zero),
&proof.openings.to_fri_openings(),
);
witness.set_cap_target(
&proof_target.auxiliary_polys_cap,
&proof.auxiliary_polys_cap,
);
set_fri_proof_target(witness, &proof_target.opening_proof, &proof.opening_proof);
}
pub(crate) fn set_public_value_targets<F, W, const D: usize>(
witness: &mut W,
public_values_target: &PublicValuesTarget,
public_values: &PublicValues,
) -> Result<(), ProgramError>
where
F: RichField + Extendable<D>,
W: Witness<F>,
{
set_trie_roots_target(
witness,
&public_values_target.roots_before,
&public_values.roots_before,
);
set_trie_roots_target(
witness,
&public_values_target.roots_after,
&public_values.roots_after,
);
// setup userdata
for (i, limb) in public_values.userdata.iter().enumerate() {
log::trace!(
"set userdata target: {:?} => {:?}",
public_values_target.userdata[i],
F::from_canonical_u8(*limb),
);
witness.set_target(
public_values_target.userdata[i],
F::from_canonical_u8(*limb),
);
}
Ok(())
}
pub(crate) fn set_trie_roots_target<F, W, const D: usize>(
witness: &mut W,
trie_roots_target: &MemRootsTarget,
trie_roots: &MemRoots,
) where
F: RichField + Extendable<D>,
W: Witness<F>,
{
for (i, limb) in trie_roots.root.into_iter().enumerate() {
log::trace!(
"set target: {:?} => {:?}",
trie_roots_target.root[i],
F::from_canonical_u32(limb),
);
witness.set_target(trie_roots_target.root[i], F::from_canonical_u32(limb));
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/lib.rs | prover/src/lib.rs | #![allow(clippy::mixed_case_hex_literals)]
#![allow(clippy::upper_case_acronyms)]
#![allow(clippy::needless_range_loop)]
#![allow(clippy::type_complexity)]
#![allow(clippy::too_many_arguments)]
#![allow(clippy::doc_lazy_continuation)]
#![allow(clippy::field_reassign_with_default)]
#![feature(decl_macro)]
#![feature(generic_arg_infer)]
#![allow(dead_code)]
pub mod all_stark;
pub mod arithmetic;
pub mod config;
pub mod constraint_consumer;
pub mod cpu;
pub mod cross_table_lookup;
pub mod evaluation_frame;
pub mod fixed_recursive_verifier;
pub mod generation;
pub mod get_challenges;
pub mod keccak;
pub mod keccak_sponge;
pub mod logic;
pub mod lookup;
pub mod memory;
pub mod poseidon;
pub mod poseidon_sponge;
pub mod proof;
pub mod prover;
pub mod recursive_verifier;
pub mod sha_compress;
pub mod sha_compress_sponge;
pub mod sha_extend;
pub mod sha_extend_sponge;
pub mod stark;
pub mod stark_testing;
pub mod util;
pub mod vanishing_poly;
pub mod verifier;
pub mod witness;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/all_stark.rs | prover/src/all_stark.rs | use crate::arithmetic::arithmetic_stark;
use crate::arithmetic::arithmetic_stark::ArithmeticStark;
use crate::config::StarkConfig;
use crate::cpu::cpu_stark;
use crate::cpu::cpu_stark::CpuStark;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::cross_table_lookup::{CrossTableLookup, TableWithColumns};
use crate::keccak::keccak_stark;
use crate::keccak::keccak_stark::KeccakStark;
use crate::keccak_sponge::columns::KECCAK_RATE_BYTES;
use crate::keccak_sponge::keccak_sponge_stark;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeStark;
use crate::logic;
use crate::logic::LogicStark;
use crate::memory::memory_stark;
use crate::memory::memory_stark::MemoryStark;
use crate::poseidon::poseidon_stark;
use crate::poseidon::poseidon_stark::PoseidonStark;
use crate::poseidon_sponge::columns::POSEIDON_RATE_BYTES;
use crate::poseidon_sponge::poseidon_sponge_stark;
use crate::poseidon_sponge::poseidon_sponge_stark::PoseidonSpongeStark;
use crate::sha_compress::sha_compress_stark;
use crate::sha_compress::sha_compress_stark::ShaCompressStark;
use crate::sha_compress_sponge::sha_compress_sponge_stark;
use crate::sha_compress_sponge::sha_compress_sponge_stark::{
ShaCompressSpongeStark, SHA_COMPRESS_SPONGE_READ_BYTES,
};
use crate::sha_extend::sha_extend_stark;
use crate::sha_extend::sha_extend_stark::ShaExtendStark;
use crate::sha_extend_sponge::columns::SHA_EXTEND_SPONGE_READ_BYTES;
use crate::sha_extend_sponge::sha_extend_sponge_stark;
use crate::sha_extend_sponge::sha_extend_sponge_stark::ShaExtendSpongeStark;
use crate::stark::Stark;
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
#[derive(Clone)]
pub struct AllStark<F: RichField + Extendable<D>, const D: usize> {
pub arithmetic_stark: ArithmeticStark<F, D>,
pub cpu_stark: CpuStark<F, D>,
pub poseidon_stark: PoseidonStark<F, D>,
pub poseidon_sponge_stark: PoseidonSpongeStark<F, D>,
pub keccak_stark: KeccakStark<F, D>,
pub keccak_sponge_stark: KeccakSpongeStark<F, D>,
pub sha_extend_stark: ShaExtendStark<F, D>,
pub sha_extend_sponge_stark: ShaExtendSpongeStark<F, D>,
pub sha_compress_stark: ShaCompressStark<F, D>,
pub sha_compress_sponge_stark: ShaCompressSpongeStark<F, D>,
pub logic_stark: LogicStark<F, D>,
pub memory_stark: MemoryStark<F, D>,
pub cross_table_lookups: Vec<CrossTableLookup<F>>,
}
impl<F: RichField + Extendable<D>, const D: usize> Default for AllStark<F, D> {
fn default() -> Self {
Self {
arithmetic_stark: ArithmeticStark::default(),
cpu_stark: CpuStark::default(),
poseidon_stark: PoseidonStark::default(),
poseidon_sponge_stark: PoseidonSpongeStark::default(),
keccak_stark: KeccakStark::default(),
keccak_sponge_stark: KeccakSpongeStark::default(),
sha_extend_stark: ShaExtendStark::default(),
sha_extend_sponge_stark: ShaExtendSpongeStark::default(),
sha_compress_stark: ShaCompressStark::default(),
sha_compress_sponge_stark: ShaCompressSpongeStark::default(),
logic_stark: LogicStark::default(),
memory_stark: MemoryStark::default(),
cross_table_lookups: all_cross_table_lookups(),
}
}
}
impl<F: RichField + Extendable<D>, const D: usize> AllStark<F, D> {
pub(crate) fn num_lookups_helper_columns(&self, config: &StarkConfig) -> [usize; NUM_TABLES] {
[
self.arithmetic_stark.num_lookup_helper_columns(config),
self.cpu_stark.num_lookup_helper_columns(config),
self.poseidon_stark.num_lookup_helper_columns(config),
self.poseidon_sponge_stark.num_lookup_helper_columns(config),
self.keccak_stark.num_lookup_helper_columns(config),
self.keccak_sponge_stark.num_lookup_helper_columns(config),
self.sha_extend_stark.num_lookup_helper_columns(config),
self.sha_extend_sponge_stark
.num_lookup_helper_columns(config),
self.sha_compress_stark.num_lookup_helper_columns(config),
self.sha_compress_sponge_stark
.num_lookup_helper_columns(config),
self.logic_stark.num_lookup_helper_columns(config),
self.memory_stark.num_lookup_helper_columns(config),
]
}
}
#[derive(Debug, Copy, Clone, Eq, PartialEq)]
pub enum Table {
Arithmetic = 0,
Cpu = 1,
Poseidon = 2,
PoseidonSponge = 3,
Keccak = 4,
KeccakSponge = 5,
ShaExtend = 6,
ShaExtendSponge = 7,
ShaCompress = 8,
ShaCompressSponge = 9,
Logic = 10,
Memory = 11,
}
pub(crate) const NUM_TABLES: usize = Table::Memory as usize + 1;
pub(crate) const NUM_PUBLIC_INPUT_USERDATA: usize = 32;
pub(crate) const MIN_TRACE_LEN: usize = 1 << 6;
impl Table {
pub(crate) fn all() -> [Self; NUM_TABLES] {
[
Self::Arithmetic,
Self::Cpu,
Self::Poseidon,
Self::PoseidonSponge,
Self::Keccak,
Self::KeccakSponge,
Self::ShaExtend,
Self::ShaExtendSponge,
Self::ShaCompress,
Self::ShaCompressSponge,
Self::Logic,
Self::Memory,
]
}
}
pub(crate) fn all_cross_table_lookups<F: Field>() -> Vec<CrossTableLookup<F>> {
vec![
ctl_arithmetic(),
ctl_poseidon_sponge(),
ctl_poseidon_inputs(),
ctl_poseidon_outputs(),
ctl_keccak_sponge(),
ctl_keccak_inputs(),
ctl_keccak_outputs(),
ctl_sha_extend_sponge(),
ctl_sha_extend_inputs(),
ctl_sha_extend_outputs(),
ctl_sha_compress_sponge(),
ctl_sha_compress_inputs(),
ctl_sha_compress_outputs(),
ctl_logic(),
ctl_memory(),
]
}
fn ctl_arithmetic<F: Field>() -> CrossTableLookup<F> {
CrossTableLookup::new(
vec![
cpu_stark::ctl_arithmetic_base_rows(),
cpu_stark::ctl_arithmetic_imm_base_rows(),
],
arithmetic_stark::ctl_arithmetic_rows(),
)
}
// We now need two different looked tables for `PoseidonStark`:
// one for the inputs and one for the outputs.
// They are linked with the timestamp.
fn ctl_poseidon_inputs<F: Field>() -> CrossTableLookup<F> {
let poseidon_sponge_looking = TableWithColumns::new(
Table::PoseidonSponge,
poseidon_sponge_stark::ctl_looking_poseidon_inputs(),
Some(poseidon_sponge_stark::ctl_looking_poseidon_filter()),
);
let poseidon_looked = TableWithColumns::new(
Table::Poseidon,
poseidon_stark::ctl_data_inputs(),
Some(poseidon_stark::ctl_filter_inputs()),
);
CrossTableLookup::new(vec![poseidon_sponge_looking], poseidon_looked)
}
fn ctl_poseidon_outputs<F: Field>() -> CrossTableLookup<F> {
let poseidon_sponge_looking = TableWithColumns::new(
Table::PoseidonSponge,
poseidon_sponge_stark::ctl_looking_poseidon_outputs(),
Some(poseidon_sponge_stark::ctl_looking_poseidon_filter()),
);
let poseidon_looked = TableWithColumns::new(
Table::Poseidon,
poseidon_stark::ctl_data_outputs(),
Some(poseidon_stark::ctl_filter_outputs()),
);
CrossTableLookup::new(vec![poseidon_sponge_looking], poseidon_looked)
}
fn ctl_poseidon_sponge<F: Field>() -> CrossTableLookup<F> {
let cpu_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_poseidon_sponge(),
Some(cpu_stark::ctl_filter_poseidon_sponge()),
);
let poseidon_sponge_looked = TableWithColumns::new(
Table::PoseidonSponge,
poseidon_sponge_stark::ctl_looked_data(),
Some(poseidon_sponge_stark::ctl_looked_filter()),
);
CrossTableLookup::new(vec![cpu_looking], poseidon_sponge_looked)
}
// We now need two different looked tables for `KeccakStark`:
// one for the inputs and one for the outputs.
// They are linked with the timestamp.
fn ctl_keccak_inputs<F: Field>() -> CrossTableLookup<F> {
let keccak_sponge_looking = TableWithColumns::new(
Table::KeccakSponge,
keccak_sponge_stark::ctl_looking_keccak_inputs(),
Some(keccak_sponge_stark::ctl_looking_keccak_filter()),
);
let keccak_looked = TableWithColumns::new(
Table::Keccak,
keccak_stark::ctl_data_inputs(),
Some(keccak_stark::ctl_filter_inputs()),
);
CrossTableLookup::new(vec![keccak_sponge_looking], keccak_looked)
}
fn ctl_keccak_outputs<F: Field>() -> CrossTableLookup<F> {
let keccak_sponge_looking = TableWithColumns::new(
Table::KeccakSponge,
keccak_sponge_stark::ctl_looking_keccak_outputs(),
Some(keccak_sponge_stark::ctl_looking_keccak_filter()),
);
let keccak_looked = TableWithColumns::new(
Table::Keccak,
keccak_stark::ctl_data_outputs(),
Some(keccak_stark::ctl_filter_outputs()),
);
CrossTableLookup::new(vec![keccak_sponge_looking], keccak_looked)
}
fn ctl_keccak_sponge<F: Field>() -> CrossTableLookup<F> {
let cpu_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_keccak_sponge(),
Some(cpu_stark::ctl_filter_keccak_sponge()),
);
let keccak_sponge_looked = TableWithColumns::new(
Table::KeccakSponge,
keccak_sponge_stark::ctl_looked_data(),
Some(keccak_sponge_stark::ctl_looked_filter()),
);
CrossTableLookup::new(vec![cpu_looking], keccak_sponge_looked)
}
fn ctl_sha_extend_inputs<F: Field>() -> CrossTableLookup<F> {
let sha_extend_sponge_looking = TableWithColumns::new(
Table::ShaExtendSponge,
sha_extend_sponge_stark::ctl_looking_sha_extend_inputs(),
Some(sha_extend_sponge_stark::ctl_looking_sha_extend_filter()),
);
let sha_extend_looked = TableWithColumns::new(
Table::ShaExtend,
sha_extend_stark::ctl_data_inputs(),
Some(sha_extend_stark::ctl_filter()),
);
CrossTableLookup::new(vec![sha_extend_sponge_looking], sha_extend_looked)
}
fn ctl_sha_extend_outputs<F: Field>() -> CrossTableLookup<F> {
let sha_extend_sponge_looking = TableWithColumns::new(
Table::ShaExtendSponge,
sha_extend_sponge_stark::ctl_looking_sha_extend_outputs(),
Some(sha_extend_sponge_stark::ctl_looking_sha_extend_filter()),
);
let sha_extend_looked = TableWithColumns::new(
Table::ShaExtend,
sha_extend_stark::ctl_data_outputs(),
Some(sha_extend_stark::ctl_filter()),
);
CrossTableLookup::new(vec![sha_extend_sponge_looking], sha_extend_looked)
}
fn ctl_sha_extend_sponge<F: Field>() -> CrossTableLookup<F> {
let cpu_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_sha_extend_sponge(),
Some(cpu_stark::ctl_filter_sha_extend_sponge()),
);
let sha_extend_sponge_looked = TableWithColumns::new(
Table::ShaExtendSponge,
sha_extend_sponge_stark::ctl_looked_data(),
Some(sha_extend_sponge_stark::ctl_looking_sha_extend_filter()),
);
CrossTableLookup::new(vec![cpu_looking], sha_extend_sponge_looked)
}
fn ctl_sha_compress_inputs<F: Field>() -> CrossTableLookup<F> {
let sha_compress_sponge_looking = TableWithColumns::new(
Table::ShaCompressSponge,
sha_compress_sponge_stark::ctl_looking_sha_compress_inputs(),
Some(sha_compress_sponge_stark::ctl_looking_sha_compress_filter()),
);
let sha_compress_looked = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_data_inputs(),
Some(sha_compress_stark::ctl_filter_inputs()),
);
CrossTableLookup::new(vec![sha_compress_sponge_looking], sha_compress_looked)
}
fn ctl_sha_compress_outputs<F: Field>() -> CrossTableLookup<F> {
let sha_compress_sponge_looking = TableWithColumns::new(
Table::ShaCompressSponge,
sha_compress_sponge_stark::ctl_looking_sha_compress_outputs(),
Some(sha_compress_sponge_stark::ctl_looking_sha_compress_filter()),
);
let sha_compress_looked = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_data_outputs(),
Some(sha_compress_stark::ctl_filter_outputs()),
);
CrossTableLookup::new(vec![sha_compress_sponge_looking], sha_compress_looked)
}
fn ctl_sha_compress_sponge<F: Field>() -> CrossTableLookup<F> {
let cpu_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_sha_compress_sponge(),
Some(cpu_stark::ctl_filter_sha_compress_sponge()),
);
let sha_compress_sponge_looked = TableWithColumns::new(
Table::ShaCompressSponge,
sha_compress_sponge_stark::ctl_looked_data(),
Some(sha_compress_sponge_stark::ctl_looked_filter()),
);
CrossTableLookup::new(vec![cpu_looking], sha_compress_sponge_looked)
}
pub(crate) fn ctl_logic<F: Field>() -> CrossTableLookup<F> {
let cpu_looking = TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_logic(),
Some(cpu_stark::ctl_filter_logic()),
);
let mut all_lookers = vec![cpu_looking];
for i in 0..keccak_sponge_stark::num_logic_ctls() {
let keccak_sponge_looking = TableWithColumns::new(
Table::KeccakSponge,
keccak_sponge_stark::ctl_looking_logic(i),
Some(keccak_sponge_stark::ctl_looking_logic_filter()),
);
all_lookers.push(keccak_sponge_looking);
}
// sha extend logic
{
let sha_extend_s_0_inter_looking = TableWithColumns::new(
Table::ShaExtend,
sha_extend_stark::ctl_s_0_inter_looking_logic(),
Some(sha_extend_stark::ctl_filter()),
);
all_lookers.push(sha_extend_s_0_inter_looking);
let sha_extend_s_0_looking = TableWithColumns::new(
Table::ShaExtend,
sha_extend_stark::ctl_s_0_looking_logic(),
Some(sha_extend_stark::ctl_filter()),
);
all_lookers.push(sha_extend_s_0_looking);
let sha_extend_s_1_inter_looking = TableWithColumns::new(
Table::ShaExtend,
sha_extend_stark::ctl_s_1_inter_looking_logic(),
Some(sha_extend_stark::ctl_filter()),
);
all_lookers.push(sha_extend_s_1_inter_looking);
let sha_extend_s_1_looking = TableWithColumns::new(
Table::ShaExtend,
sha_extend_stark::ctl_s_1_looking_logic(),
Some(sha_extend_stark::ctl_filter()),
);
all_lookers.push(sha_extend_s_1_looking);
}
// sha compress logic
{
let s_1_inter_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_s_1_inter_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let s_1_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_s_1_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let e_and_f_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_e_and_f_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let not_e_and_g_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_not_e_and_g_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let ch_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_ch_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let s_0_inter_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_s_0_inter_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let s_0_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_s_0_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let a_and_b_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_a_and_b_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let a_and_c_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_a_and_c_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let b_and_c_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_b_and_c_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let maj_inter_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_maj_inter_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
let maj_looking = TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_maj_looking_logic(),
Some(sha_compress_stark::ctl_logic_filter()),
);
all_lookers.extend([
s_1_inter_looking,
s_1_looking,
e_and_f_looking,
not_e_and_g_looking,
ch_looking,
s_0_inter_looking,
s_0_looking,
a_and_b_looking,
a_and_c_looking,
b_and_c_looking,
maj_inter_looking,
maj_looking,
]);
}
let logic_looked =
TableWithColumns::new(Table::Logic, logic::ctl_data(), Some(logic::ctl_filter()));
CrossTableLookup::new(all_lookers, logic_looked)
}
fn ctl_memory<F: Field>() -> CrossTableLookup<F> {
let cpu_memory_gp_ops = (0..NUM_GP_CHANNELS).map(|channel| {
TableWithColumns::new(
Table::Cpu,
cpu_stark::ctl_data_gp_memory(channel),
Some(cpu_stark::ctl_filter_gp_memory(channel)),
)
});
let poseidon_sponge_reads = (0..POSEIDON_RATE_BYTES).map(|i| {
TableWithColumns::new(
Table::PoseidonSponge,
poseidon_sponge_stark::ctl_looking_memory(i),
Some(poseidon_sponge_stark::ctl_looking_memory_filter(i)),
)
});
let keccak_sponge_reads = (0..KECCAK_RATE_BYTES).map(|i| {
TableWithColumns::new(
Table::KeccakSponge,
keccak_sponge_stark::ctl_looking_memory(i),
Some(keccak_sponge_stark::ctl_looking_memory_filter(i)),
)
});
let sha_extend_sponge_reads = (0..SHA_EXTEND_SPONGE_READ_BYTES).map(|i| {
TableWithColumns::new(
Table::ShaExtendSponge,
sha_extend_sponge_stark::ctl_looking_memory(i),
Some(sha_extend_sponge_stark::ctl_looking_sha_extend_filter()),
)
});
let sha_compress_sponge_reads = (0..SHA_COMPRESS_SPONGE_READ_BYTES).map(|i| {
TableWithColumns::new(
Table::ShaCompressSponge,
sha_compress_sponge_stark::ctl_looking_memory(i),
Some(sha_compress_sponge_stark::ctl_looking_sha_compress_filter()),
)
});
let sha_compress_reads = (0..4).map(|i| {
TableWithColumns::new(
Table::ShaCompress,
sha_compress_stark::ctl_looking_memory(i),
Some(sha_compress_stark::ctl_logic_filter()),
)
});
let all_lookers = []
.into_iter()
.chain(cpu_memory_gp_ops)
.chain(keccak_sponge_reads)
.chain(poseidon_sponge_reads)
.chain(sha_extend_sponge_reads)
.chain(sha_compress_sponge_reads)
.chain(sha_compress_reads)
.collect();
let memory_looked = TableWithColumns::new(
Table::Memory,
memory_stark::ctl_data(),
Some(memory_stark::ctl_filter()),
);
CrossTableLookup::new(all_lookers, memory_looked)
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/get_challenges.rs | prover/src/get_challenges.rs | use plonky2::field::extension::Extendable;
use plonky2::fri::proof::{FriProof, FriProofTarget};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
use crate::all_stark::{AllStark, NUM_TABLES};
use crate::config::StarkConfig;
use crate::cross_table_lookup::get_grand_product_challenge_set;
use crate::proof::*;
use crate::witness::errors::ProgramError;
fn observe_root<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
challenger: &mut Challenger<F, C::Hasher>,
root: &[u32; 8],
) {
for limb in root.iter() {
challenger.observe_element(F::from_canonical_u32(*limb));
}
}
fn observe_trie_roots<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
challenger: &mut Challenger<F, C::Hasher>,
trie_roots: &MemRoots,
) {
observe_root::<F, C, D>(challenger, &trie_roots.root);
}
fn observe_trie_roots_target<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
challenger: &mut RecursiveChallenger<F, C::Hasher, D>,
trie_roots: &MemRootsTarget,
) where
C::Hasher: AlgebraicHasher<F>,
{
challenger.observe_elements(&trie_roots.root);
}
/*
fn observe_extra_block_data<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
challenger: &mut Challenger<F, C::Hasher>,
extra_data: &ExtraBlockData,
) -> Result<(), ProgramError> {
challenger.observe_elements(&h256_limbs(extra_data.genesis_state_trie_root));
challenger.observe_element(u256_to_u32(extra_data.txn_number_before)?);
challenger.observe_element(u256_to_u32(extra_data.txn_number_after)?);
let gas_used_before = u256_to_u64(extra_data.gas_used_before)?;
challenger.observe_element(gas_used_before.0);
challenger.observe_element(gas_used_before.1);
let gas_used_after = u256_to_u64(extra_data.gas_used_after)?;
challenger.observe_element(gas_used_after.0);
challenger.observe_element(gas_used_after.1);
for i in 0..8 {
challenger.observe_elements(&u256_limbs(extra_data.block_bloom_before[i]));
}
for i in 0..8 {
challenger.observe_elements(&u256_limbs(extra_data.block_bloom_after[i]));
}
Ok(())
}
fn observe_extra_block_data_target<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
challenger: &mut RecursiveChallenger<F, C::Hasher, D>,
extra_data: &ExtraBlockDataTarget,
) where
C::Hasher: AlgebraicHasher<F>,
{
challenger.observe_elements(&extra_data.genesis_state_trie_root);
challenger.observe_element(extra_data.txn_number_before);
challenger.observe_element(extra_data.txn_number_after);
challenger.observe_elements(&extra_data.gas_used_before);
challenger.observe_elements(&extra_data.gas_used_after);
challenger.observe_elements(&extra_data.block_bloom_before);
challenger.observe_elements(&extra_data.block_bloom_after);
}
*/
pub(crate) fn observe_public_values<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
challenger: &mut Challenger<F, C::Hasher>,
public_values: &PublicValues,
) -> Result<(), ProgramError> {
observe_trie_roots::<F, C, D>(challenger, &public_values.roots_before);
observe_trie_roots::<F, C, D>(challenger, &public_values.roots_after);
for elem in &public_values.userdata {
challenger.observe_element(F::from_canonical_u8(*elem));
}
Ok(())
}
pub(crate) fn observe_public_values_target<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
challenger: &mut RecursiveChallenger<F, C::Hasher, D>,
public_values: &PublicValuesTarget,
) where
C::Hasher: AlgebraicHasher<F>,
{
observe_trie_roots_target::<F, C, D>(challenger, &public_values.roots_before);
observe_trie_roots_target::<F, C, D>(challenger, &public_values.roots_after);
challenger.observe_elements(&public_values.userdata);
}
impl<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize> AllProof<F, C, D> {
/// Computes all Fiat-Shamir challenges used in the STARK proof.
pub(crate) fn get_challenges(
&self,
config: &StarkConfig,
) -> Result<AllProofChallenges<F, D>, ProgramError> {
let mut challenger = Challenger::<F, C::Hasher>::new();
for proof in &self.stark_proofs {
challenger.observe_cap(&proof.proof.trace_cap);
}
observe_public_values::<F, C, D>(&mut challenger, &self.public_values)?;
let ctl_challenges =
get_grand_product_challenge_set(&mut challenger, config.num_challenges);
Ok(AllProofChallenges {
stark_challenges: core::array::from_fn(|i| {
challenger.compact();
self.stark_proofs[i]
.proof
.get_challenges(&mut challenger, config)
}),
ctl_challenges,
})
}
#[allow(unused)] // TODO: should be used soon
pub(crate) fn get_challenger_states(
&self,
all_stark: &AllStark<F, D>,
config: &StarkConfig,
) -> AllChallengerState<F, C::Hasher, D> {
let mut challenger = Challenger::<F, C::Hasher>::new();
for proof in &self.stark_proofs {
challenger.observe_cap(&proof.proof.trace_cap);
}
observe_public_values::<F, C, D>(&mut challenger, &self.public_values);
let ctl_challenges =
get_grand_product_challenge_set(&mut challenger, config.num_challenges);
let lookups = all_stark.num_lookups_helper_columns(config);
let mut challenger_states = vec![challenger.compact()];
for i in 0..NUM_TABLES {
self.stark_proofs[i]
.proof
.get_challenges(&mut challenger, config);
challenger_states.push(challenger.compact());
}
AllChallengerState {
states: challenger_states.try_into().unwrap(),
ctl_challenges,
}
}
}
impl<F, C, const D: usize> StarkProof<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
/// Computes all Fiat-Shamir challenges used in the STARK proof.
pub(crate) fn get_challenges(
&self,
challenger: &mut Challenger<F, C::Hasher>,
config: &StarkConfig,
) -> StarkProofChallenges<F, D> {
let degree_bits = self.recover_degree_bits(config);
let StarkProof {
auxiliary_polys_cap,
quotient_polys_cap,
openings,
opening_proof:
FriProof {
commit_phase_merkle_caps,
final_poly,
pow_witness,
..
},
..
} = &self;
let num_challenges = config.num_challenges;
challenger.observe_cap(auxiliary_polys_cap);
let stark_alphas = challenger.get_n_challenges(num_challenges);
challenger.observe_cap(quotient_polys_cap);
let stark_zeta = challenger.get_extension_challenge::<D>();
challenger.observe_openings(&openings.to_fri_openings());
StarkProofChallenges {
stark_alphas,
stark_zeta,
fri_challenges: challenger.fri_challenges::<C, D>(
commit_phase_merkle_caps,
final_poly,
*pow_witness,
degree_bits,
&config.fri_config,
),
}
}
}
impl<const D: usize> StarkProofTarget<D> {
pub(crate) fn get_challenges<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>>(
&self,
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, C::Hasher, D>,
config: &StarkConfig,
) -> StarkProofChallengesTarget<D>
where
C::Hasher: AlgebraicHasher<F>,
{
let StarkProofTarget {
auxiliary_polys_cap: auxiliary_polys,
quotient_polys_cap,
openings,
opening_proof:
FriProofTarget {
commit_phase_merkle_caps,
final_poly,
pow_witness,
..
},
..
} = &self;
let num_challenges = config.num_challenges;
challenger.observe_cap(auxiliary_polys);
let stark_alphas = challenger.get_n_challenges(builder, num_challenges);
challenger.observe_cap(quotient_polys_cap);
let stark_zeta = challenger.get_extension_challenge(builder);
challenger.observe_openings(&openings.to_fri_openings(builder.zero()));
StarkProofChallengesTarget {
stark_alphas,
stark_zeta,
fri_challenges: challenger.fri_challenges(
builder,
commit_phase_merkle_caps,
final_poly,
*pow_witness,
&config.fri_config,
),
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/prover.rs | prover/src/prover.rs | use std::any::type_name;
use anyhow::{ensure, Result};
use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::packable::Packable;
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use plonky2::field::types::Field;
use plonky2::field::zero_poly_coset::ZeroPolyOnCoset;
use plonky2::fri::oracle::PolynomialBatch;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::challenger::Challenger;
use plonky2::plonk::config::GenericConfig;
use plonky2::timed;
use plonky2::util::timing::TimingTree;
use plonky2::util::transpose;
use plonky2_maybe_rayon::*;
use plonky2_util::{log2_ceil, log2_strict};
use crate::all_stark::{AllStark, Table, NUM_TABLES};
use crate::config::StarkConfig;
use crate::constraint_consumer::ConstraintConsumer;
use crate::cpu::kernel::assembler::Kernel;
use crate::cross_table_lookup::{
cross_table_lookup_data, get_grand_product_challenge_set, CtlCheckVars, CtlData,
GrandProductChallengeSet,
};
use crate::evaluation_frame::StarkEvaluationFrame;
use crate::generation::outputs::GenerationOutputs;
use crate::generation::state::{AssumptionReceipts, AssumptionUsage};
use crate::generation::{generate_traces, generate_traces_with_assumptions};
use crate::get_challenges::observe_public_values;
use crate::lookup::{lookup_helper_columns, Lookup, LookupCheckVars};
use crate::proof::{AllProof, PublicValues, StarkOpeningSet, StarkProof, StarkProofWithMetadata};
use crate::stark::Stark;
use crate::vanishing_poly::eval_vanishing_poly;
use std::{cell::RefCell, rc::Rc};
#[cfg(any(feature = "test", test))]
use crate::cross_table_lookup::testutils::check_ctls;
/// Generate traces, then create all STARK proofs.
pub fn prove<F, C, const D: usize>(
all_stark: &AllStark<F, D>,
kernel: &Kernel,
config: &StarkConfig,
timing: &mut TimingTree,
) -> Result<AllProof<F, C, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
let (proof, _outputs) = prove_with_outputs(all_stark, kernel, config, timing)?;
Ok(proof)
}
pub fn prove_with_assumptions<F, C, const D: usize>(
all_stark: &AllStark<F, D>,
kernel: &Kernel,
config: &StarkConfig,
timing: &mut TimingTree,
assumptions: AssumptionReceipts<F, C, D>,
) -> Result<(AllProof<F, C, D>, Rc<RefCell<AssumptionUsage<F, C, D>>>)>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
let (proof, _outputs, receipts) =
prove_with_output_and_assumptions(all_stark, kernel, config, timing, assumptions)?;
Ok((proof, receipts))
}
/// Generate traces, then create all STARK proofs. Returns information about the post-state,
/// intended for debugging, in addition to the proof.
pub fn prove_with_outputs<F, C, const D: usize>(
all_stark: &AllStark<F, D>,
kernel: &Kernel,
config: &StarkConfig,
timing: &mut TimingTree,
) -> Result<(AllProof<F, C, D>, GenerationOutputs)>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
let (traces, public_values, outputs) = timed!(
timing,
"generate all traces",
generate_traces::<F, C, D>(all_stark, kernel, config, timing)?
);
let proof = prove_with_traces(all_stark, config, traces, public_values, timing)?;
Ok((proof, outputs))
}
/// Generate traces, then create all STARK proofs. Returns information about the post-state,
/// intended for debugging, in addition to the proof.
pub fn prove_with_output_and_assumptions<F, C, const D: usize>(
all_stark: &AllStark<F, D>,
kernel: &Kernel,
config: &StarkConfig,
timing: &mut TimingTree,
assumptions: AssumptionReceipts<F, C, D>,
) -> Result<(
AllProof<F, C, D>,
GenerationOutputs,
Rc<RefCell<AssumptionUsage<F, C, D>>>,
)>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
let (traces, public_values, outputs, receipts) = timed!(
timing,
"generate all traces",
generate_traces_with_assumptions::<F, C, D>(
all_stark,
kernel,
config,
timing,
assumptions
)?
);
let proof = prove_with_traces(all_stark, config, traces, public_values, timing)?;
Ok((proof, outputs, receipts))
}
/// Compute all STARK proofs.
pub(crate) fn prove_with_traces<F, C, const D: usize>(
all_stark: &AllStark<F, D>,
config: &StarkConfig,
trace_poly_values: [Vec<PolynomialValues<F>>; NUM_TABLES],
public_values: PublicValues,
timing: &mut TimingTree,
) -> Result<AllProof<F, C, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
let rate_bits = config.fri_config.rate_bits;
let cap_height = config.fri_config.cap_height;
let trace_commitments = timed!(
timing,
"compute all trace commitments",
trace_poly_values
.iter()
.zip_eq(Table::all())
.map(|(trace, table)| {
timed!(
timing,
&format!("compute trace commitment for {:?}", table),
PolynomialBatch::<F, C, D>::from_values(
// TODO: Cloning this isn't great; consider having `from_values` accept a reference,
// or having `compute_permutation_z_polys` read trace values from the `PolynomialBatch`.
trace.clone(),
rate_bits,
false,
cap_height,
timing,
None,
)
)
})
.collect::<Vec<_>>()
);
log::debug!("trace_commitments: {}", trace_commitments.len());
#[cfg(any(feature = "test", test))]
{
log::debug!("check_ctls...");
check_ctls(&trace_poly_values, &all_stark.cross_table_lookups);
log::debug!("check_ctls done.");
}
let trace_caps = trace_commitments
.iter()
.map(|c| c.merkle_tree.cap.clone())
.collect::<Vec<_>>();
let mut challenger = Challenger::<F, C::Hasher>::new();
for cap in &trace_caps {
challenger.observe_cap(cap);
}
observe_public_values::<F, C, D>(&mut challenger, &public_values)
.map_err(|_| anyhow::Error::msg("Invalid conversion of public values."))?;
let ctl_challenges = get_grand_product_challenge_set(&mut challenger, config.num_challenges);
let ctl_data_per_table = timed!(
timing,
"compute CTL data",
cross_table_lookup_data::<F, D>(
&trace_poly_values,
&all_stark.cross_table_lookups,
&ctl_challenges,
all_stark.arithmetic_stark.constraint_degree()
)
);
let stark_proofs = timed!(
timing,
"compute all proofs given commitments",
prove_with_commitments(
all_stark,
config,
trace_poly_values,
trace_commitments,
ctl_data_per_table,
&mut challenger,
&ctl_challenges,
timing
)?
);
/*
#[cfg(test)]
{
check_ctls(
&trace_poly_values,
&all_stark.cross_table_lookups,
);
}
*/
Ok(AllProof {
stark_proofs,
ctl_challenges,
public_values,
})
}
fn prove_with_commitments<F, C, const D: usize>(
all_stark: &AllStark<F, D>,
config: &StarkConfig,
trace_poly_values: [Vec<PolynomialValues<F>>; NUM_TABLES],
trace_commitments: Vec<PolynomialBatch<F, C, D>>,
ctl_data_per_table: [CtlData<F>; NUM_TABLES],
challenger: &mut Challenger<F, C::Hasher>,
ctl_challenges: &GrandProductChallengeSet<F>,
timing: &mut TimingTree,
) -> Result<[StarkProofWithMetadata<F, C, D>; NUM_TABLES]>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
let arithmetic_proof = timed!(
timing,
"prove Arithmetic STARK",
prove_single_table(
&all_stark.arithmetic_stark,
config,
&trace_poly_values[Table::Arithmetic as usize],
&trace_commitments[Table::Arithmetic as usize],
&ctl_data_per_table[Table::Arithmetic as usize],
ctl_challenges,
challenger,
timing,
)?
);
let cpu_proof = timed!(
timing,
"prove CPU STARK",
prove_single_table(
&all_stark.cpu_stark,
config,
&trace_poly_values[Table::Cpu as usize],
&trace_commitments[Table::Cpu as usize],
&ctl_data_per_table[Table::Cpu as usize],
ctl_challenges,
challenger,
timing,
)?
);
let poseidon_proof = timed!(
timing,
"prove Poseidon STARK",
prove_single_table(
&all_stark.poseidon_stark,
config,
&trace_poly_values[Table::Poseidon as usize],
&trace_commitments[Table::Poseidon as usize],
&ctl_data_per_table[Table::Poseidon as usize],
ctl_challenges,
challenger,
timing,
)?
);
let poseidon_sponge_proof = timed!(
timing,
"prove Poseidon sponge STARK",
prove_single_table(
&all_stark.poseidon_sponge_stark,
config,
&trace_poly_values[Table::PoseidonSponge as usize],
&trace_commitments[Table::PoseidonSponge as usize],
&ctl_data_per_table[Table::PoseidonSponge as usize],
ctl_challenges,
challenger,
timing,
)?
);
let keccak_proof = timed!(
timing,
"prove Keccak STARK",
prove_single_table(
&all_stark.keccak_stark,
config,
&trace_poly_values[Table::Keccak as usize],
&trace_commitments[Table::Keccak as usize],
&ctl_data_per_table[Table::Keccak as usize],
ctl_challenges,
challenger,
timing,
)?
);
let keccak_sponge_proof = timed!(
timing,
"prove Keccak sponge STARK",
prove_single_table(
&all_stark.keccak_sponge_stark,
config,
&trace_poly_values[Table::KeccakSponge as usize],
&trace_commitments[Table::KeccakSponge as usize],
&ctl_data_per_table[Table::KeccakSponge as usize],
ctl_challenges,
challenger,
timing,
)?
);
let sha_extend_proof = timed!(
timing,
"prove SHA Extend STARK",
prove_single_table(
&all_stark.sha_extend_stark,
config,
&trace_poly_values[Table::ShaExtend as usize],
&trace_commitments[Table::ShaExtend as usize],
&ctl_data_per_table[Table::ShaExtend as usize],
ctl_challenges,
challenger,
timing,
)?
);
let sha_extend_sponge_proof = timed!(
timing,
"prove SHA Extend sponge STARK",
prove_single_table(
&all_stark.sha_extend_sponge_stark,
config,
&trace_poly_values[Table::ShaExtendSponge as usize],
&trace_commitments[Table::ShaExtendSponge as usize],
&ctl_data_per_table[Table::ShaExtendSponge as usize],
ctl_challenges,
challenger,
timing,
)?
);
let sha_compress_proof = timed!(
timing,
"prove SHA Compress STARK",
prove_single_table(
&all_stark.sha_compress_stark,
config,
&trace_poly_values[Table::ShaCompress as usize],
&trace_commitments[Table::ShaCompress as usize],
&ctl_data_per_table[Table::ShaCompress as usize],
ctl_challenges,
challenger,
timing,
)?
);
let sha_compress_sponge_proof = timed!(
timing,
"prove SHA Compress sponge STARK",
prove_single_table(
&all_stark.sha_compress_sponge_stark,
config,
&trace_poly_values[Table::ShaCompressSponge as usize],
&trace_commitments[Table::ShaCompressSponge as usize],
&ctl_data_per_table[Table::ShaCompressSponge as usize],
ctl_challenges,
challenger,
timing,
)?
);
let logic_proof = timed!(
timing,
"prove Logic STARK",
prove_single_table(
&all_stark.logic_stark,
config,
&trace_poly_values[Table::Logic as usize],
&trace_commitments[Table::Logic as usize],
&ctl_data_per_table[Table::Logic as usize],
ctl_challenges,
challenger,
timing,
)?
);
let memory_proof = timed!(
timing,
"prove Memory STARK",
prove_single_table(
&all_stark.memory_stark,
config,
&trace_poly_values[Table::Memory as usize],
&trace_commitments[Table::Memory as usize],
&ctl_data_per_table[Table::Memory as usize],
ctl_challenges,
challenger,
timing,
)?
);
Ok([
arithmetic_proof,
cpu_proof,
poseidon_proof,
poseidon_sponge_proof,
keccak_proof,
keccak_sponge_proof,
sha_extend_proof,
sha_extend_sponge_proof,
sha_compress_proof,
sha_compress_sponge_proof,
logic_proof,
memory_proof,
])
}
/// Compute proof for a single STARK table.
pub(crate) fn prove_single_table<F, C, S, const D: usize>(
stark: &S,
config: &StarkConfig,
trace_poly_values: &[PolynomialValues<F>],
trace_commitment: &PolynomialBatch<F, C, D>,
ctl_data: &CtlData<F>,
ctl_challenges: &GrandProductChallengeSet<F>,
challenger: &mut Challenger<F, C::Hasher>,
timing: &mut TimingTree,
) -> Result<StarkProofWithMetadata<F, C, D>>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
let degree = trace_poly_values[0].len();
let degree_bits = log2_strict(degree);
let fri_params = config.fri_params(degree_bits);
let rate_bits = config.fri_config.rate_bits;
let cap_height = config.fri_config.cap_height;
assert!(
fri_params.total_arities() <= degree_bits + rate_bits - cap_height,
"FRI total reduction arity is too large.",
);
let init_challenger_state = challenger.compact();
let constraint_degree = stark.constraint_degree();
let lookup_challenges = stark.uses_lookups().then(|| {
ctl_challenges
.challenges
.iter()
.map(|ch| ch.beta)
.collect::<Vec<_>>()
});
let lookups = stark.lookups();
let lookup_helper_columns = timed!(
timing,
"compute lookup helper columns",
lookup_challenges.as_ref().map(|challenges| {
let mut columns = Vec::new();
for lookup in &lookups {
for &challenge in challenges {
columns.extend(lookup_helper_columns(
lookup,
trace_poly_values,
challenge,
constraint_degree,
));
}
}
columns
})
);
let num_lookup_columns = lookup_helper_columns.as_ref().map(|v| v.len()).unwrap_or(0);
let auxiliary_polys = match lookup_helper_columns {
None => {
let mut ctl_polys = ctl_data.ctl_helper_polys();
ctl_polys.extend(ctl_data.ctl_z_polys());
ctl_polys
}
Some(mut lookup_columns) => {
lookup_columns.extend(ctl_data.ctl_helper_polys());
lookup_columns.extend(ctl_data.ctl_z_polys());
lookup_columns
}
};
assert!(!auxiliary_polys.is_empty(), "No CTL?");
let auxiliary_polys_commitment = timed!(
timing,
"compute auxiliary polynomials commitment",
PolynomialBatch::from_values(
auxiliary_polys,
rate_bits,
false,
config.fri_config.cap_height,
timing,
None,
)
);
let auxiliary_polys_cap = auxiliary_polys_commitment.merkle_tree.cap.clone();
challenger.observe_cap(&auxiliary_polys_cap);
let alphas = challenger.get_n_challenges(config.num_challenges);
let num_ctl_polys = ctl_data.num_ctl_helper_polys();
if cfg!(test) {
check_constraints(
stark,
trace_commitment,
&auxiliary_polys_commitment,
lookup_challenges.as_ref(),
&lookups,
ctl_data,
alphas.clone(),
degree_bits,
num_lookup_columns,
&num_ctl_polys,
);
}
let quotient_polys = timed!(
timing,
"compute quotient polys",
compute_quotient_polys::<F, <F as Packable>::Packing, C, S, D>(
stark,
trace_commitment,
&auxiliary_polys_commitment,
lookup_challenges.as_ref(),
&lookups,
ctl_data,
alphas,
degree_bits,
num_lookup_columns,
&num_ctl_polys,
config,
)
);
let all_quotient_chunks = timed!(
timing,
"split quotient polys",
quotient_polys
.into_par_iter()
.flat_map(|mut quotient_poly| {
quotient_poly
.trim_to_len(degree * stark.quotient_degree_factor())
.expect(
"Quotient has failed, the vanishing polynomial is not divisible by Z_H",
);
// Split quotient into degree-n chunks.
quotient_poly.chunks(degree)
})
.collect()
);
let quotient_commitment = timed!(
timing,
"compute quotient commitment",
PolynomialBatch::from_coeffs(
all_quotient_chunks,
rate_bits,
false,
config.fri_config.cap_height,
timing,
None,
)
);
let quotient_polys_cap = quotient_commitment.merkle_tree.cap.clone();
challenger.observe_cap("ient_polys_cap);
let zeta = challenger.get_extension_challenge::<D>();
// To avoid leaking witness data, we want to ensure that our opening locations, `zeta` and
// `g * zeta`, are not in our subgroup `H`. It suffices to check `zeta` only, since
// `(g * zeta)^n = zeta^n`, where `n` is the order of `g`.
let g = F::primitive_root_of_unity(degree_bits);
ensure!(
zeta.exp_power_of_2(degree_bits) != F::Extension::ONE,
"Opening point is in the subgroup."
);
let openings = StarkOpeningSet::new(
zeta,
g,
trace_commitment,
&auxiliary_polys_commitment,
"ient_commitment,
stark.num_lookup_helper_columns(config),
&num_ctl_polys,
);
challenger.observe_openings(&openings.to_fri_openings());
let initial_merkle_trees = vec![
trace_commitment,
&auxiliary_polys_commitment,
"ient_commitment,
];
let opening_proof = timed!(
timing,
"compute openings proof",
PolynomialBatch::prove_openings(
&stark.fri_instance(zeta, g, num_ctl_polys.iter().sum(), num_ctl_polys, config),
&initial_merkle_trees,
challenger,
&fri_params,
timing,
)
);
let proof = StarkProof {
trace_cap: trace_commitment.merkle_tree.cap.clone(),
auxiliary_polys_cap,
quotient_polys_cap,
openings,
opening_proof,
};
Ok(StarkProofWithMetadata {
init_challenger_state,
proof,
})
}
/// Computes the quotient polynomials `(sum alpha^i C_i(x)) / Z_H(x)` for `alpha` in `alphas`,
/// where the `C_i`s are the Stark constraints.
fn compute_quotient_polys<'a, F, P, C, S, const D: usize>(
stark: &S,
trace_commitment: &'a PolynomialBatch<F, C, D>,
auxiliary_polys_commitment: &'a PolynomialBatch<F, C, D>,
lookup_challenges: Option<&'a Vec<F>>,
lookups: &[Lookup<F>],
ctl_data: &CtlData<F>,
alphas: Vec<F>,
degree_bits: usize,
num_lookup_columns: usize,
num_ctl_columns: &[usize],
config: &StarkConfig,
) -> Vec<PolynomialCoeffs<F>>
where
F: RichField + Extendable<D>,
P: PackedField<Scalar = F>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
let degree = 1 << degree_bits;
let rate_bits = config.fri_config.rate_bits;
let total_num_helper_cols: usize = num_ctl_columns.iter().sum();
let quotient_degree_bits = log2_ceil(stark.quotient_degree_factor());
assert!(
quotient_degree_bits <= rate_bits,
"Having constraints of degree higher than the rate is not supported yet."
);
let step = 1 << (rate_bits - quotient_degree_bits);
// When opening the `Z`s polys at the "next" point, need to look at the point `next_step` steps away.
let next_step = 1 << quotient_degree_bits;
// Evaluation of the first Lagrange polynomial on the LDE domain.
let lagrange_first = PolynomialValues::selector(degree, 0).lde_onto_coset(quotient_degree_bits);
// Evaluation of the last Lagrange polynomial on the LDE domain.
let lagrange_last =
PolynomialValues::selector(degree, degree - 1).lde_onto_coset(quotient_degree_bits);
let z_h_on_coset = ZeroPolyOnCoset::<F>::new(degree_bits, quotient_degree_bits);
// Retrieve the LDE values at index `i`.
let get_trace_values_packed =
|i_start| -> Vec<P> { trace_commitment.get_lde_values_packed(i_start, step) };
// Last element of the subgroup.
let last = F::primitive_root_of_unity(degree_bits).inverse();
let size = degree << quotient_degree_bits;
let coset = F::cyclic_subgroup_coset_known_order(
F::primitive_root_of_unity(degree_bits + quotient_degree_bits),
F::coset_shift(),
size,
);
// We will step by `P::WIDTH`, and in each iteration, evaluate the quotient polynomial at
// a batch of `P::WIDTH` points.
let quotient_values = (0..size)
.into_par_iter()
.step_by(P::WIDTH)
.flat_map_iter(|i_start| {
let i_next_start = (i_start + next_step) % size;
let i_range = i_start..i_start + P::WIDTH;
let x = *P::from_slice(&coset[i_range.clone()]);
let z_last = x - last;
let lagrange_basis_first = *P::from_slice(&lagrange_first.values[i_range.clone()]);
let lagrange_basis_last = *P::from_slice(&lagrange_last.values[i_range]);
let mut consumer = ConstraintConsumer::new(
alphas.clone(),
z_last,
lagrange_basis_first,
lagrange_basis_last,
);
let vars = S::EvaluationFrame::from_values(
&get_trace_values_packed(i_start),
&get_trace_values_packed(i_next_start),
);
let lookup_vars = lookup_challenges.map(|challenges| LookupCheckVars {
local_values: auxiliary_polys_commitment.get_lde_values_packed(i_start, step)
[..num_lookup_columns]
.to_vec(),
next_values: auxiliary_polys_commitment.get_lde_values_packed(i_next_start, step),
challenges: challenges.to_vec(),
});
let mut start_index = 0;
let ctl_vars = ctl_data
.zs_columns
.iter()
.enumerate()
.map(|(i, zs_columns)| {
let num_ctl_helper_cols = num_ctl_columns[i];
let helper_columns = auxiliary_polys_commitment
.get_lde_values_packed(i_start, step)[num_lookup_columns
+ start_index
..num_lookup_columns + start_index + num_ctl_helper_cols]
.to_vec();
let ctl_vars = CtlCheckVars::<F, F, P, 1> {
helper_columns,
local_z: auxiliary_polys_commitment.get_lde_values_packed(i_start, step)
[num_lookup_columns + total_num_helper_cols + i],
next_z: auxiliary_polys_commitment
.get_lde_values_packed(i_next_start, step)
[num_lookup_columns + total_num_helper_cols + i],
challenges: zs_columns.challenge,
columns: zs_columns.columns.clone(),
filter: zs_columns.filter.clone(),
};
start_index += num_ctl_helper_cols;
ctl_vars
})
.collect::<Vec<_>>();
eval_vanishing_poly::<F, F, P, S, D, 1>(
stark,
&vars,
lookups,
lookup_vars,
&ctl_vars,
&mut consumer,
);
let mut constraints_evals = consumer.accumulators();
// We divide the constraints evaluations by `Z_H(x)`.
let denominator_inv: P = z_h_on_coset.eval_inverse_packed(i_start);
for eval in &mut constraints_evals {
*eval *= denominator_inv;
}
let num_challenges = alphas.len();
(0..P::WIDTH).map(move |i| {
(0..num_challenges)
.map(|j| constraints_evals[j].as_slice()[i])
.collect()
})
})
.collect::<Vec<_>>();
transpose("ient_values)
.into_par_iter()
.map(PolynomialValues::new)
.map(|values| values.coset_ifft(F::coset_shift()))
.collect()
}
/// Check that all constraints evaluate to zero on `H`.
/// Can also be used to check the degree of the constraints by evaluating on a larger subgroup.
fn check_constraints<'a, F, C, S, const D: usize>(
stark: &S,
trace_commitment: &'a PolynomialBatch<F, C, D>,
auxiliary_commitment: &'a PolynomialBatch<F, C, D>,
lookup_challenges: Option<&'a Vec<F>>,
lookups: &[Lookup<F>],
ctl_data: &CtlData<F>,
alphas: Vec<F>,
degree_bits: usize,
num_lookup_columns: usize,
num_ctl_helper_cols: &[usize],
) where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
let degree = 1 << degree_bits;
let rate_bits = 0; // Set this to higher value to check constraint degree.
let total_num_helper_cols: usize = num_ctl_helper_cols.iter().sum();
let size = degree << rate_bits;
let step = 1 << rate_bits;
// Evaluation of the first Lagrange polynomial.
let lagrange_first = PolynomialValues::selector(degree, 0).lde(rate_bits);
// Evaluation of the last Lagrange polynomial.
let lagrange_last = PolynomialValues::selector(degree, degree - 1).lde(rate_bits);
let subgroup = F::two_adic_subgroup(degree_bits + rate_bits);
// Get the evaluations of a batch of polynomials over our subgroup.
let get_subgroup_evals = |comm: &PolynomialBatch<F, C, D>| -> Vec<Vec<F>> {
let values = comm
.polynomials
.par_iter()
.map(|coeffs| coeffs.clone().fft().values)
.collect::<Vec<_>>();
transpose(&values)
};
let trace_subgroup_evals = get_subgroup_evals(trace_commitment);
let auxiliary_subgroup_evals = get_subgroup_evals(auxiliary_commitment);
// Last element of the subgroup.
let last = F::primitive_root_of_unity(degree_bits).inverse();
let constraint_values = (0..size)
.map(|i| {
let i_next = (i + step) % size;
let x = subgroup[i];
let z_last = x - last;
let lagrange_basis_first = lagrange_first.values[i];
let lagrange_basis_last = lagrange_last.values[i];
let mut consumer = ConstraintConsumer::new(
alphas.clone(),
z_last,
lagrange_basis_first,
lagrange_basis_last,
);
let vars = S::EvaluationFrame::from_values(
&trace_subgroup_evals[i],
&trace_subgroup_evals[i_next],
);
let lookup_vars = lookup_challenges.map(|challenges| LookupCheckVars {
local_values: auxiliary_subgroup_evals[i][..num_lookup_columns].to_vec(),
next_values: auxiliary_subgroup_evals[i_next][..num_lookup_columns].to_vec(),
challenges: challenges.to_vec(),
});
let mut start_index = 0;
let ctl_vars = ctl_data
.zs_columns
.iter()
.enumerate()
.map(|(iii, zs_columns)| {
let num_helper_cols = num_ctl_helper_cols[iii];
let helper_columns = auxiliary_subgroup_evals[i][num_lookup_columns
+ start_index
..num_lookup_columns + start_index + num_helper_cols]
.to_vec();
let ctl_vars = CtlCheckVars::<F, F, F, 1> {
helper_columns,
local_z: auxiliary_subgroup_evals[i]
[num_lookup_columns + total_num_helper_cols + iii],
next_z: auxiliary_subgroup_evals[i_next]
[num_lookup_columns + total_num_helper_cols + iii],
challenges: zs_columns.challenge,
columns: zs_columns.columns.clone(),
filter: zs_columns.filter.clone(),
};
start_index += num_helper_cols;
ctl_vars
})
.collect::<Vec<_>>();
eval_vanishing_poly::<F, F, F, S, D, 1>(
stark,
&vars,
lookups,
lookup_vars,
&ctl_vars,
&mut consumer,
);
consumer.accumulators()
})
.collect::<Vec<_>>();
for v in constraint_values {
assert!(
v.iter().all(|x| x.is_zero()),
"Constraint failed in {}",
type_name::<S>()
);
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/util.rs | prover/src/util.rs | use itertools::Itertools;
use std::mem::{size_of, transmute_copy, ManuallyDrop};
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::util::transpose;
#[allow(unused_imports)]
use plonky2_maybe_rayon::rayon;
/// Construct an integer from its constituent bits (in little-endian order)
pub fn limb_from_bits_le<P: PackedField>(iter: impl IntoIterator<Item = P>) -> P {
// TODO: This is technically wrong, as 1 << i won't be canonical for all fields...
iter.into_iter()
.enumerate()
.map(|(i, bit)| bit * P::Scalar::from_canonical_u64(1 << i))
.sum()
}
/// Construct an integer from its constituent bits (in little-endian order): recursive edition
pub fn limb_from_bits_le_recursive<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
iter: impl IntoIterator<Item = ExtensionTarget<D>>,
) -> ExtensionTarget<D> {
iter.into_iter()
.enumerate()
.fold(builder.zero_extension(), |acc, (i, bit)| {
// TODO: This is technically wrong, as 1 << i won't be canonical for all fields...
builder.mul_const_add_extension(F::from_canonical_u64(1 << i), bit, acc)
})
}
/// A helper function to transpose a row-wise trace and put it in the format that `prove` expects.
pub fn trace_rows_to_poly_values<F: Field, const COLUMNS: usize>(
trace_rows: Vec<[F; COLUMNS]>,
) -> Vec<PolynomialValues<F>> {
let trace_row_vecs = trace_rows.into_iter().map(|row| row.to_vec()).collect_vec();
let trace_col_vecs: Vec<Vec<F>> = transpose(&trace_row_vecs);
trace_col_vecs
.into_iter()
.map(|column| PolynomialValues::new(column))
.collect()
}
pub(crate) const fn indices_arr<const N: usize>() -> [usize; N] {
let mut indices_arr = [0; N];
let mut i = 0;
while i < N {
indices_arr[i] = i;
i += 1;
}
indices_arr
}
pub(crate) unsafe fn transmute_no_compile_time_size_checks<T, U>(value: T) -> U {
debug_assert_eq!(size_of::<T>(), size_of::<U>());
// Need ManuallyDrop so that `value` is not dropped by this function.
let value = ManuallyDrop::new(value);
// Copy the bit pattern. The original value is no longer safe to use.
transmute_copy(&value)
}
pub fn u32_array_to_u8_vec(u32_array: &[u32; 8]) -> Vec<u8> {
let mut u8_vec = Vec::with_capacity(u32_array.len() * 4);
for &item in u32_array {
u8_vec.extend_from_slice(&item.to_le_bytes());
}
u8_vec
}
macro_rules! join {
($($($a:expr),+$(,)?)?) => {
crate::util::__join!{0;;$($($a,)+)?}
};
}
macro_rules! __join {
($len:expr; $($f:ident $r:ident $a:expr),*; $b:expr, $($c:expr,)*) => {
crate::util::__join!{$len + 1; $($f $r $a,)* f r $b; $($c,)* }
};
($len:expr; $($f:ident $r:ident $a:expr),* ;) => {
match ($(Some(crate::util::__sendable_closure($a)),)*) {
($(mut $f,)*) => {
$(let mut $r = None;)*
let array: [&mut (dyn FnMut() + Send); $len] = [
$(&mut || $r = Some((&mut $f).take().unwrap()())),*
];
rayon::iter::ParallelIterator::for_each(
rayon::iter::IntoParallelIterator::into_par_iter(array),
|f| f(),
);
($($r.unwrap(),)*)
}
}
};
}
#[doc(hidden)]
pub(crate) fn __sendable_closure<R, F: FnOnce() -> R + Send>(x: F) -> F {
x
}
pub(crate) use __join;
pub(crate) use join;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/vanishing_poly.rs | prover/src/vanishing_poly.rs | use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{
eval_cross_table_lookup_checks, eval_cross_table_lookup_checks_circuit, CtlCheckVars,
CtlCheckVarsTarget,
};
use crate::lookup::{
eval_ext_lookups_circuit, eval_packed_lookups_generic, Lookup, LookupCheckVars,
LookupCheckVarsTarget,
};
use crate::stark::Stark;
pub(crate) fn eval_vanishing_poly<F, FE, P, S, const D: usize, const D2: usize>(
stark: &S,
vars: &S::EvaluationFrame<FE, P, D2>,
lookups: &[Lookup<F>],
lookup_vars: Option<LookupCheckVars<F, FE, P, D2>>,
ctl_vars: &[CtlCheckVars<F, FE, P, D2>],
consumer: &mut ConstraintConsumer<P>,
) where
F: RichField + Extendable<D>,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
S: Stark<F, D>,
{
stark.eval_packed_generic(vars, consumer);
if let Some(lookup_vars) = lookup_vars {
eval_packed_lookups_generic::<F, FE, P, S, D, D2>(
stark,
lookups,
vars,
lookup_vars,
consumer,
);
}
eval_cross_table_lookup_checks::<F, FE, P, S, D, D2>(
vars,
ctl_vars,
consumer,
stark.constraint_degree(),
);
}
pub(crate) fn eval_vanishing_poly_circuit<F, S, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
stark: &S,
vars: &S::EvaluationFrameTarget,
lookup_vars: Option<LookupCheckVarsTarget<D>>,
ctl_vars: &[CtlCheckVarsTarget<F, D>],
consumer: &mut RecursiveConstraintConsumer<F, D>,
) where
F: RichField + Extendable<D>,
S: Stark<F, D>,
{
stark.eval_ext_circuit(builder, vars, consumer);
if let Some(lookup_vars) = lookup_vars {
eval_ext_lookups_circuit::<F, S, D>(builder, stark, vars, lookup_vars, consumer);
}
eval_cross_table_lookup_checks_circuit::<S, F, D>(
builder,
vars,
ctl_vars,
consumer,
stark.constraint_degree(),
);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/lookup.rs | prover/src/lookup.rs | use num_bigint::BigUint;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2_util::ceil_div_usize;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{
eval_helper_columns, eval_helper_columns_circuit, get_helper_cols, Column, Filter,
GrandProductChallenge,
};
use crate::evaluation_frame::StarkEvaluationFrame;
use crate::stark::Stark;
pub struct Lookup<F: Field> {
/// Columns whose values should be contained in the lookup table.
/// These are the f_i(x) polynomials in the logUp paper.
pub(crate) columns: Vec<Column<F>>,
/// Column containing the lookup table.
/// This is the t(x) polynomial in the paper.
pub(crate) table_column: Column<F>,
/// Column containing the frequencies of `columns` in `table_column`.
/// This is the m(x) polynomial in the paper.
pub(crate) frequencies_column: Column<F>,
pub filter_columns: Vec<Option<Filter<F>>>,
}
impl<F: Field> Lookup<F> {
pub(crate) fn num_helper_columns(&self, constraint_degree: usize) -> usize {
// One helper column for each column batch of size `constraint_degree-1`,
// then one column for the inverse of `table + challenge` and one for the `Z` polynomial.
ceil_div_usize(self.columns.len(), constraint_degree - 1) + 1
}
}
/// logUp protocol from https://ia.cr/2022/1530
/// Compute the helper columns for the lookup argument.
/// Given columns `f0,...,fk` and a column `t`, such that `∪fi ⊆ t`, and challenges `x`,
/// this computes the helper columns `h_i = 1/(x+f_2i) + 1/(x+f_2i+1)`, `g = 1/(x+t)`,
/// and `Z(gx) = Z(x) + sum h_i(x) - m(x)g(x)` where `m` is the frequencies column.
pub(crate) fn lookup_helper_columns<F: Field>(
lookup: &Lookup<F>,
trace_poly_values: &[PolynomialValues<F>],
challenge: F,
constraint_degree: usize,
) -> Vec<PolynomialValues<F>> {
assert_eq!(
constraint_degree, 3,
"TODO: Allow other constraint degrees."
);
assert_eq!(lookup.columns.len(), lookup.filter_columns.len());
let num_total_logup_entries = trace_poly_values[0].values.len() * lookup.columns.len();
assert!(BigUint::from(num_total_logup_entries) < F::characteristic());
let num_helper_columns = lookup.num_helper_columns(constraint_degree);
let looking_cols = lookup
.columns
.iter()
.map(|col| vec![col.clone()])
.collect::<Vec<Vec<Column<F>>>>();
let grand_challenge = GrandProductChallenge {
beta: F::ONE,
gamma: challenge,
};
let columns_filters = looking_cols
.iter()
.zip(lookup.filter_columns.iter())
.map(|(col, filter)| (&col[..], filter))
.collect::<Vec<_>>();
// For each batch of `constraint_degree-1` columns `fi`, compute `sum 1/(f_i+challenge)` and
// add it to the helper columns.
// Note: these are the h_k(x) polynomials in the paper, with a few differences:
// * Here, the first ratio m_0(x)/phi_0(x) is not included with the columns batched up to create the
// h_k polynomials; instead there's a separate helper column for it (see below).
// * Here, we use 1 instead of -1 as the numerator (and subtract later).
// * Here, for now, the batch size (l) is always constraint_degree - 1 = 2.
// * Here, there are filters for the columns, to only select some rows
// in a given column.
let mut helper_columns = get_helper_cols(
trace_poly_values,
trace_poly_values[0].len(),
&columns_filters,
grand_challenge,
constraint_degree,
);
// Add `1/(table+challenge)` to the helper columns.
// This is 1/phi_0(x) = 1/(x + t(x)) from the paper.
// Here, we don't include m(x) in the numerator, instead multiplying it with this column later.
let mut table = lookup.table_column.eval_all_rows(trace_poly_values);
for x in table.iter_mut() {
*x = challenge + *x;
}
let table_inverse: Vec<F> = F::batch_multiplicative_inverse(&table);
// Compute the `Z` polynomial with `Z(1)=0` and `Z(gx) = Z(x) + sum h_i(x) - frequencies(x)g(x)`.
// This enforces the check from the paper, that the sum of the h_k(x) polynomials is 0 over H.
// In the paper, that sum includes m(x)/(x + t(x)) = frequencies(x)/g(x), because that was bundled
// into the h_k(x) polynomials.
let frequencies = &lookup.frequencies_column.eval_all_rows(trace_poly_values);
let mut z = Vec::with_capacity(frequencies.len());
z.push(F::ZERO);
for i in 0..frequencies.len() - 1 {
let x = helper_columns[..num_helper_columns - 1]
.iter()
.map(|col| col.values[i])
.sum::<F>()
- frequencies[i] * table_inverse[i];
z.push(z[i] + x);
}
helper_columns.push(z.into());
helper_columns
}
pub struct LookupCheckVars<F, FE, P, const D2: usize>
where
F: Field,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
pub(crate) local_values: Vec<P>,
pub(crate) next_values: Vec<P>,
pub(crate) challenges: Vec<F>,
}
/// Constraints for the logUp lookup argument.
pub(crate) fn eval_packed_lookups_generic<F, FE, P, S, const D: usize, const D2: usize>(
stark: &S,
lookups: &[Lookup<F>],
vars: &S::EvaluationFrame<FE, P, D2>,
lookup_vars: LookupCheckVars<F, FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
F: RichField + Extendable<D>,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
S: Stark<F, D>,
{
let local_values = vars.get_local_values();
let next_values = vars.get_next_values();
let degree = stark.constraint_degree();
assert_eq!(degree, 3, "TODO: Allow other constraint degrees.");
let mut start = 0;
for lookup in lookups {
let num_helper_columns = lookup.num_helper_columns(degree);
for &challenge in &lookup_vars.challenges {
let grand_challenge = GrandProductChallenge {
beta: F::ONE,
gamma: challenge,
};
let lookup_columns = lookup
.columns
.iter()
.map(|col| vec![col.eval_with_next(local_values, next_values)])
.collect::<Vec<Vec<P>>>();
// For each chunk, check that `h_i (x+f_2i) (x+f_{2i+1}) = (x+f_2i) * filter_{2i+1} + (x+f_{2i+1}) * filter_2i` if the chunk has length 2
// or if it has length 1, check that `h_i * (x+f_2i) = filter_2i`, where x is the challenge
eval_helper_columns(
&lookup.filter_columns,
&lookup_columns,
local_values,
next_values,
&lookup_vars.local_values[start..start + num_helper_columns - 1],
degree,
&grand_challenge,
yield_constr,
);
let challenge = FE::from_basefield(challenge);
// Check the `Z` polynomial.
let z = lookup_vars.local_values[start + num_helper_columns - 1];
let next_z = lookup_vars.next_values[start + num_helper_columns - 1];
let table_with_challenge = lookup.table_column.eval(local_values) + challenge;
let y = lookup_vars.local_values[start..start + num_helper_columns - 1]
.iter()
.fold(P::ZEROS, |acc, x| acc + *x)
* table_with_challenge
- lookup.frequencies_column.eval(local_values);
// Check that in the first row, z = 0;
yield_constr.constraint_first_row(z);
yield_constr.constraint((next_z - z) * table_with_challenge - y);
start += num_helper_columns;
}
}
}
pub struct LookupCheckVarsTarget<const D: usize> {
pub(crate) local_values: Vec<ExtensionTarget<D>>,
pub(crate) next_values: Vec<ExtensionTarget<D>>,
pub(crate) challenges: Vec<Target>,
}
pub(crate) fn eval_ext_lookups_circuit<
F: RichField + Extendable<D>,
S: Stark<F, D>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
stark: &S,
vars: &S::EvaluationFrameTarget,
lookup_vars: LookupCheckVarsTarget<D>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let degree = stark.constraint_degree();
let lookups = stark.lookups();
let local_values = vars.get_local_values();
let next_values = vars.get_next_values();
assert_eq!(degree, 3, "TODO: Allow other constraint degrees.");
let mut start = 0;
for lookup in lookups {
let num_helper_columns = lookup.num_helper_columns(degree);
let col_values = lookup
.columns
.iter()
.map(|col| vec![col.eval_with_next_circuit(builder, local_values, next_values)])
.collect::<Vec<_>>();
for &challenge in &lookup_vars.challenges {
let grand_challenge = GrandProductChallenge {
beta: builder.one(),
gamma: challenge,
};
eval_helper_columns_circuit(
builder,
&lookup.filter_columns,
&col_values,
local_values,
next_values,
&lookup_vars.local_values[start..start + num_helper_columns - 1],
degree,
&grand_challenge,
yield_constr,
);
let challenge = builder.convert_to_ext(challenge);
let z = lookup_vars.local_values[start + num_helper_columns - 1];
let next_z = lookup_vars.next_values[start + num_helper_columns - 1];
let table_column = lookup
.table_column
.eval_circuit(builder, vars.get_local_values());
let table_with_challenge = builder.add_extension(table_column, challenge);
let mut y = builder.add_many_extension(
&lookup_vars.local_values[start..start + num_helper_columns - 1],
);
let frequencies_column = lookup
.frequencies_column
.eval_circuit(builder, vars.get_local_values());
y = builder.mul_extension(y, table_with_challenge);
y = builder.sub_extension(y, frequencies_column);
// Check that in the first row, z = 0;
yield_constr.constraint_first_row(builder, z);
let mut constraint = builder.sub_extension(next_z, z);
constraint = builder.mul_extension(constraint, table_with_challenge);
constraint = builder.sub_extension(constraint, y);
yield_constr.constraint(builder, constraint);
start += num_helper_columns;
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/stark_testing.rs | prover/src/stark_testing.rs | use anyhow::{ensure, Result};
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::polynomial::{PolynomialCoeffs, PolynomialValues};
use plonky2::field::types::{Field, Sample};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::CircuitConfig;
use plonky2::plonk::config::GenericConfig;
use plonky2::util::transpose;
use plonky2_util::{log2_ceil, log2_strict};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::evaluation_frame::StarkEvaluationFrame;
use crate::stark::Stark;
const WITNESS_SIZE: usize = 1 << 5;
/// Tests that the constraints imposed by the given STARK are low-degree by applying them to random
/// low-degree witness polynomials.
pub fn test_stark_low_degree<F: RichField + Extendable<D>, S: Stark<F, D>, const D: usize>(
stark: S,
) -> Result<()> {
let rate_bits = log2_ceil(stark.constraint_degree() + 1);
let trace_ldes = random_low_degree_matrix::<F>(S::COLUMNS, rate_bits);
let size = trace_ldes.len();
let lagrange_first = PolynomialValues::selector(WITNESS_SIZE, 0).lde(rate_bits);
let lagrange_last = PolynomialValues::selector(WITNESS_SIZE, WITNESS_SIZE - 1).lde(rate_bits);
let last = F::primitive_root_of_unity(log2_strict(WITNESS_SIZE)).inverse();
let subgroup =
F::cyclic_subgroup_known_order(F::primitive_root_of_unity(log2_strict(size)), size);
let alpha = F::rand();
let constraint_evals = (0..size)
.map(|i| {
let vars = S::EvaluationFrame::from_values(
&trace_ldes[i],
&trace_ldes[(i + (1 << rate_bits)) % size],
);
let mut consumer = ConstraintConsumer::<F>::new(
vec![alpha],
subgroup[i] - last,
lagrange_first.values[i],
lagrange_last.values[i],
);
stark.eval_packed_base(&vars, &mut consumer);
consumer.accumulators()[0]
})
.collect::<Vec<_>>();
let constraint_poly_values = PolynomialValues::new(constraint_evals);
if !constraint_poly_values.is_zero() {
let constraint_eval_degree = constraint_poly_values.degree();
let maximum_degree = WITNESS_SIZE * stark.constraint_degree() - 1;
ensure!(
constraint_eval_degree <= maximum_degree,
"Expected degrees at most {} * {} - 1 = {}, actual {:?}",
WITNESS_SIZE,
stark.constraint_degree(),
maximum_degree,
constraint_eval_degree
);
}
Ok(())
}
/// Tests that the circuit constraints imposed by the given STARK are coherent with the native constraints.
pub fn test_stark_circuit_constraints<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
const D: usize,
>(
stark: S,
) -> Result<()> {
// Compute native constraint evaluation on random values.
let vars = S::EvaluationFrame::from_values(
&F::Extension::rand_vec(S::COLUMNS),
&F::Extension::rand_vec(S::COLUMNS),
);
let alphas = F::rand_vec(1);
let z_last = F::Extension::rand();
let lagrange_first = F::Extension::rand();
let lagrange_last = F::Extension::rand();
let mut consumer = ConstraintConsumer::<F::Extension>::new(
alphas
.iter()
.copied()
.map(F::Extension::from_basefield)
.collect(),
z_last,
lagrange_first,
lagrange_last,
);
stark.eval_ext(&vars, &mut consumer);
let native_eval = consumer.accumulators()[0];
// Compute circuit constraint evaluation on same random values.
let circuit_config = CircuitConfig::standard_recursion_config();
let mut builder = CircuitBuilder::<F, D>::new(circuit_config);
let mut pw = PartialWitness::<F>::new();
let locals_t = builder.add_virtual_extension_targets(S::COLUMNS);
pw.set_extension_targets(&locals_t, vars.get_local_values());
let nexts_t = builder.add_virtual_extension_targets(S::COLUMNS);
pw.set_extension_targets(&nexts_t, vars.get_next_values());
let alphas_t = builder.add_virtual_targets(1);
pw.set_target(alphas_t[0], alphas[0]);
let z_last_t = builder.add_virtual_extension_target();
pw.set_extension_target(z_last_t, z_last);
let lagrange_first_t = builder.add_virtual_extension_target();
pw.set_extension_target(lagrange_first_t, lagrange_first);
let lagrange_last_t = builder.add_virtual_extension_target();
pw.set_extension_target(lagrange_last_t, lagrange_last);
let vars = S::EvaluationFrameTarget::from_values(&locals_t, &nexts_t);
let mut consumer = RecursiveConstraintConsumer::<F, D>::new(
builder.zero_extension(),
alphas_t,
z_last_t,
lagrange_first_t,
lagrange_last_t,
);
stark.eval_ext_circuit(&mut builder, &vars, &mut consumer);
let circuit_eval = consumer.accumulators()[0];
let native_eval_t = builder.constant_extension(native_eval);
builder.connect_extension(circuit_eval, native_eval_t);
let data = builder.build::<C>();
let proof = data.prove(pw)?;
data.verify(proof)
}
fn random_low_degree_matrix<F: Field>(num_polys: usize, rate_bits: usize) -> Vec<Vec<F>> {
let polys = (0..num_polys)
.map(|_| random_low_degree_values(rate_bits))
.collect::<Vec<_>>();
transpose(&polys)
}
fn random_low_degree_values<F: Field>(rate_bits: usize) -> Vec<F> {
PolynomialCoeffs::new(F::rand_vec(WITNESS_SIZE))
.lde(rate_bits)
.fft()
.values
}
pub fn test_stark_check_constraints<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
const D: usize,
>(
stark: S,
lv: &[C::F],
nv: &[C::F],
) {
// Compute native constraint evaluation on random values.
let vars = S::EvaluationFrame::from_values(
&lv.iter()
.copied()
.map(F::Extension::from_basefield)
.collect::<Vec<_>>()[..],
&nv.iter()
.copied()
.map(F::Extension::from_basefield)
.collect::<Vec<_>>()[..],
);
let alphas = F::rand_vec(1);
let z_last = F::Extension::rand();
let lagrange_first = F::Extension::rand();
let lagrange_last = F::Extension::rand();
let mut consumer = ConstraintConsumer::<F::Extension>::new(
alphas
.iter()
.copied()
.map(F::Extension::from_basefield)
.collect(),
z_last,
lagrange_first,
lagrange_last,
);
stark.eval_ext(&vars, &mut consumer);
for &acc in &consumer.constraint_accs {
assert_eq!(acc, F::Extension::ZERO);
}
}
pub fn test_stark_cpu_check_constraints<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
const D: usize,
>(
stark: S,
lv: &[C::F],
nv: &[C::F],
) {
// Compute native constraint evaluation on random values.
let vars = S::EvaluationFrame::from_values(
&lv.iter()
.copied()
.map(F::Extension::from_basefield)
.collect::<Vec<_>>()[..],
&nv.iter()
.copied()
.map(F::Extension::from_basefield)
.collect::<Vec<_>>()[..],
);
let alphas = F::rand_vec(1);
let z_last = F::Extension::rand();
let lagrange_first = F::Extension::ZERO;
let lagrange_last = F::Extension::ZERO;
let mut consumer = ConstraintConsumer::<F::Extension>::new(
alphas
.iter()
.copied()
.map(F::Extension::from_basefield)
.collect(),
z_last,
lagrange_first,
lagrange_last,
);
stark.eval_packed_generic(&vars, &mut consumer);
for &acc in &consumer.constraint_accs {
assert_eq!(acc, F::Extension::ZERO);
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/stark.rs | prover/src/stark.rs | use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::fri::structure::{
FriBatchInfo, FriBatchInfoTarget, FriInstanceInfo, FriInstanceInfoTarget, FriOracleInfo,
FriPolynomialInfo,
};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::config::StarkConfig;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::evaluation_frame::StarkEvaluationFrame;
use crate::lookup::Lookup;
const TRACE_ORACLE_INDEX: usize = 0;
const AUXILIARY_ORACLE_INDEX: usize = 1;
const QUOTIENT_ORACLE_INDEX: usize = 2;
/// Represents a STARK system.
pub trait Stark<F: RichField + Extendable<D>, const D: usize>: Sync {
/// The total number of columns in the trace.
const COLUMNS: usize = Self::EvaluationFrameTarget::COLUMNS;
/// This is used to evaluate constraints natively.
type EvaluationFrame<FE, P, const D2: usize>: StarkEvaluationFrame<P>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
/// The `Target` version of `Self::EvaluationFrame`, used to evaluate constraints recursively.
type EvaluationFrameTarget: StarkEvaluationFrame<ExtensionTarget<D>>;
/// Evaluate constraints at a vector of points.
///
/// The points are elements of a field `FE`, a degree `D2` extension of `F`. This lets us
/// evaluate constraints over a larger domain if desired. This can also be called with `FE = F`
/// and `D2 = 1`, in which case we are using the trivial extension, i.e. just evaluating
/// constraints over `F`.
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
/// Evaluate constraints at a vector of points from the base field `F`.
fn eval_packed_base<P: PackedField<Scalar = F>>(
&self,
vars: &Self::EvaluationFrame<F, P, 1>,
yield_constr: &mut ConstraintConsumer<P>,
) {
self.eval_packed_generic(vars, yield_constr)
}
/// Evaluate constraints at a single point from the degree `D` extension field.
fn eval_ext(
&self,
vars: &Self::EvaluationFrame<F::Extension, F::Extension, D>,
yield_constr: &mut ConstraintConsumer<F::Extension>,
) {
self.eval_packed_generic(vars, yield_constr)
}
/// Evaluate constraints at a vector of points from the degree `D` extension field. This is like
/// `eval_ext`, except in the context of a recursive circuit.
/// Note: constraints must be added through`yeld_constr.constraint(builder, constraint)` in the
/// same order as they are given in `eval_packed_generic`.
fn eval_ext_circuit(
&self,
builder: &mut CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
);
/// The maximum constraint degree.
fn constraint_degree(&self) -> usize;
/// The maximum constraint degree.
fn quotient_degree_factor(&self) -> usize {
1.max(self.constraint_degree() - 1)
}
fn num_quotient_polys(&self, config: &StarkConfig) -> usize {
self.quotient_degree_factor() * config.num_challenges
}
/// Computes the FRI instance used to prove this Stark.
fn fri_instance(
&self,
zeta: F::Extension,
g: F,
num_ctl_helpers: usize,
num_ctl_zs: Vec<usize>,
config: &StarkConfig,
) -> FriInstanceInfo<F, D> {
let trace_oracle = FriOracleInfo {
num_polys: Self::COLUMNS,
blinding: false,
};
let trace_info = FriPolynomialInfo::from_range(TRACE_ORACLE_INDEX, 0..Self::COLUMNS);
let num_lookup_columns = self.num_lookup_helper_columns(config);
let num_auxiliary_polys = num_lookup_columns + num_ctl_helpers + num_ctl_zs.len();
let auxiliary_oracle = FriOracleInfo {
num_polys: num_auxiliary_polys,
blinding: false,
};
let auxiliary_polys_info =
FriPolynomialInfo::from_range(AUXILIARY_ORACLE_INDEX, 0..num_auxiliary_polys);
let ctl_zs_info = FriPolynomialInfo::from_range(
AUXILIARY_ORACLE_INDEX,
num_lookup_columns + num_ctl_helpers..num_auxiliary_polys,
);
let num_quotient_polys = self.num_quotient_polys(config);
let quotient_oracle = FriOracleInfo {
num_polys: num_quotient_polys,
blinding: false,
};
let quotient_info =
FriPolynomialInfo::from_range(QUOTIENT_ORACLE_INDEX, 0..num_quotient_polys);
let zeta_batch = FriBatchInfo {
point: zeta,
polynomials: [
trace_info.clone(),
auxiliary_polys_info.clone(),
quotient_info,
]
.concat(),
};
let zeta_next_batch = FriBatchInfo {
point: zeta.scalar_mul(g),
polynomials: [trace_info, auxiliary_polys_info].concat(),
};
let ctl_first_batch = FriBatchInfo {
point: F::Extension::ONE,
polynomials: ctl_zs_info,
};
FriInstanceInfo {
oracles: vec![trace_oracle, auxiliary_oracle, quotient_oracle],
batches: vec![zeta_batch, zeta_next_batch, ctl_first_batch],
}
}
/// Computes the FRI instance used to prove this Stark.
fn fri_instance_target(
&self,
builder: &mut CircuitBuilder<F, D>,
zeta: ExtensionTarget<D>,
g: F,
num_ctl_helper_polys: usize,
num_ctl_zs: usize,
inner_config: &StarkConfig,
) -> FriInstanceInfoTarget<D> {
let trace_oracle = FriOracleInfo {
num_polys: Self::COLUMNS,
blinding: false,
};
let trace_info = FriPolynomialInfo::from_range(TRACE_ORACLE_INDEX, 0..Self::COLUMNS);
let num_lookup_columns = self.num_lookup_helper_columns(inner_config);
let num_auxiliary_polys = num_lookup_columns + num_ctl_helper_polys + num_ctl_zs;
let auxiliary_oracle = FriOracleInfo {
num_polys: num_auxiliary_polys,
blinding: false,
};
let auxiliary_polys_info =
FriPolynomialInfo::from_range(AUXILIARY_ORACLE_INDEX, 0..num_auxiliary_polys);
let ctl_zs_info = FriPolynomialInfo::from_range(
AUXILIARY_ORACLE_INDEX,
num_lookup_columns + num_ctl_helper_polys
..num_lookup_columns + num_ctl_helper_polys + num_ctl_zs,
);
let num_quotient_polys = self.num_quotient_polys(inner_config);
let quotient_oracle = FriOracleInfo {
num_polys: num_quotient_polys,
blinding: false,
};
let quotient_info =
FriPolynomialInfo::from_range(QUOTIENT_ORACLE_INDEX, 0..num_quotient_polys);
let zeta_batch = FriBatchInfoTarget {
point: zeta,
polynomials: [
trace_info.clone(),
auxiliary_polys_info.clone(),
quotient_info,
]
.concat(),
};
let zeta_next = builder.mul_const_extension(g, zeta);
let zeta_next_batch = FriBatchInfoTarget {
point: zeta_next,
polynomials: [trace_info, auxiliary_polys_info].concat(),
};
let ctl_first_batch = FriBatchInfoTarget {
point: builder.one_extension(),
polynomials: ctl_zs_info,
};
FriInstanceInfoTarget {
oracles: vec![trace_oracle, auxiliary_oracle, quotient_oracle],
batches: vec![zeta_batch, zeta_next_batch, ctl_first_batch],
}
}
fn lookups(&self) -> Vec<Lookup<F>> {
vec![]
}
fn num_lookup_helper_columns(&self, config: &StarkConfig) -> usize {
self.lookups()
.iter()
.map(|lookup| lookup.num_helper_columns(self.constraint_degree()))
.sum::<usize>()
* config.num_challenges
}
fn uses_lookups(&self) -> bool {
!self.lookups().is_empty()
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/constraint_consumer.rs | prover/src/constraint_consumer.rs | use std::marker::PhantomData;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
use plonky2::plonk::circuit_builder::CircuitBuilder;
pub struct ConstraintConsumer<P: PackedField> {
/// Random values used to combine multiple constraints into one.
pub alphas: Vec<P::Scalar>,
/// Running sums of constraints that have been emitted so far, scaled by powers of alpha.
// TODO(JN): This is pub so it can be used in a test. Once we have an API for accessing this
// result, it should be made private.
pub constraint_accs: Vec<P>,
/// The evaluation of `X - g^(n-1)`.
z_last: P,
/// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated
/// with the first trace row, and zero at other points in the subgroup.
lagrange_basis_first: P,
/// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated
/// with the last trace row, and zero at other points in the subgroup.
lagrange_basis_last: P,
}
impl<P: PackedField> ConstraintConsumer<P> {
pub fn new(
alphas: Vec<P::Scalar>,
z_last: P,
lagrange_basis_first: P,
lagrange_basis_last: P,
) -> Self {
Self {
constraint_accs: vec![P::ZEROS; alphas.len()],
alphas,
z_last,
lagrange_basis_first,
lagrange_basis_last,
}
}
pub fn accumulators(self) -> Vec<P> {
self.constraint_accs
}
/// Add one constraint valid on all rows except the last.
pub fn constraint_transition(&mut self, constraint: P) {
self.constraint(constraint * self.z_last);
}
/// Add one constraint on all rows.
pub fn constraint(&mut self, constraint: P) {
for (&alpha, acc) in self.alphas.iter().zip(&mut self.constraint_accs) {
*acc *= alpha;
*acc += constraint;
}
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// first row of the trace.
pub fn constraint_first_row(&mut self, constraint: P) {
self.constraint(constraint * self.lagrange_basis_first);
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// last row of the trace.
pub fn constraint_last_row(&mut self, constraint: P) {
self.constraint(constraint * self.lagrange_basis_last);
}
}
pub struct RecursiveConstraintConsumer<F: RichField + Extendable<D>, const D: usize> {
/// A random value used to combine multiple constraints into one.
alphas: Vec<Target>,
/// A running sum of constraints that have been emitted so far, scaled by powers of alpha.
constraint_accs: Vec<ExtensionTarget<D>>,
/// The evaluation of `X - g^(n-1)`.
z_last: ExtensionTarget<D>,
/// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated
/// with the first trace row, and zero at other points in the subgroup.
lagrange_basis_first: ExtensionTarget<D>,
/// The evaluation of the Lagrange basis polynomial which is nonzero at the point associated
/// with the last trace row, and zero at other points in the subgroup.
lagrange_basis_last: ExtensionTarget<D>,
_phantom: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> RecursiveConstraintConsumer<F, D> {
pub fn new(
zero: ExtensionTarget<D>,
alphas: Vec<Target>,
z_last: ExtensionTarget<D>,
lagrange_basis_first: ExtensionTarget<D>,
lagrange_basis_last: ExtensionTarget<D>,
) -> Self {
Self {
constraint_accs: vec![zero; alphas.len()],
alphas,
z_last,
lagrange_basis_first,
lagrange_basis_last,
_phantom: Default::default(),
}
}
pub fn accumulators(self) -> Vec<ExtensionTarget<D>> {
self.constraint_accs
}
/// Add one constraint valid on all rows except the last.
pub fn constraint_transition(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
) {
let filtered_constraint = builder.mul_extension(constraint, self.z_last);
self.constraint(builder, filtered_constraint);
}
/// Add one constraint valid on all rows.
pub fn constraint(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
) {
for (&alpha, acc) in self.alphas.iter().zip(&mut self.constraint_accs) {
*acc = builder.scalar_mul_add_extension(alpha, *acc, constraint);
}
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// first row of the trace.
pub fn constraint_first_row(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
) {
let filtered_constraint = builder.mul_extension(constraint, self.lagrange_basis_first);
self.constraint(builder, filtered_constraint);
}
/// Add one constraint, but first multiply it by a filter such that it will only apply to the
/// last row of the trace.
pub fn constraint_last_row(
&mut self,
builder: &mut CircuitBuilder<F, D>,
constraint: ExtensionTarget<D>,
) {
let filtered_constraint = builder.mul_extension(constraint, self.lagrange_basis_last);
self.constraint(builder, filtered_constraint);
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/evaluation_frame.rs | prover/src/evaluation_frame.rs | /// A trait for viewing an evaluation frame of a STARK table.
///
/// It allows to access the current and next rows at a given step
/// and can be used to implement constraint evaluation both natively
/// and recursively.
pub trait StarkEvaluationFrame<T: Copy + Clone + Default>: Sized {
/// The number of columns for the STARK table this evaluation frame views.
const COLUMNS: usize;
/// Returns the local values (i.e. current row) for this evaluation frame.
fn get_local_values(&self) -> &[T];
/// Returns the next values (i.e. next row) for this evaluation frame.
fn get_next_values(&self) -> &[T];
/// Outputs a new evaluation frame from the provided local and next values.
///
/// **NOTE**: Concrete implementations of this method SHOULD ensure that
/// the provided slices lengths match the `Self::COLUMNS` value.
fn from_values(lv: &[T], nv: &[T]) -> Self;
}
pub struct StarkFrame<T: Copy + Clone + Default, const N: usize> {
local_values: [T; N],
next_values: [T; N],
}
impl<T: Copy + Clone + Default, const N: usize> StarkEvaluationFrame<T> for StarkFrame<T, N> {
const COLUMNS: usize = N;
fn get_local_values(&self) -> &[T] {
&self.local_values
}
fn get_next_values(&self) -> &[T] {
&self.next_values
}
fn from_values(lv: &[T], nv: &[T]) -> Self {
assert_eq!(lv.len(), Self::COLUMNS);
assert_eq!(nv.len(), Self::COLUMNS);
Self {
local_values: lv.try_into().unwrap(),
next_values: nv.try_into().unwrap(),
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/verifier.rs | prover/src/verifier.rs | use crate::all_stark::{AllStark, Table};
use crate::config::StarkConfig;
use crate::constraint_consumer::ConstraintConsumer;
use crate::cross_table_lookup::{
num_ctl_helper_columns_by_table, verify_cross_table_lookups, CtlCheckVars,
GrandProductChallengeSet,
};
use crate::evaluation_frame::StarkEvaluationFrame;
use crate::lookup::LookupCheckVars;
use anyhow::{ensure, Result};
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::types::Field;
use plonky2::fri::verifier::verify_fri_proof;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::config::GenericConfig;
use plonky2::plonk::plonk_common::reduce_with_powers;
use std::any::type_name;
use crate::proof::{
AllProof, AllProofChallenges, StarkOpeningSet, StarkProof, StarkProofChallenges,
};
use crate::stark::Stark;
use crate::vanishing_poly::eval_vanishing_poly;
pub fn verify_proof<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
all_stark: &AllStark<F, D>,
all_proof: AllProof<F, C, D>,
config: &StarkConfig,
) -> Result<()>
where
{
let AllProofChallenges {
stark_challenges,
ctl_challenges,
} = all_proof
.get_challenges(config)
.map_err(|_| anyhow::Error::msg("Invalid sampling of proof challenges."))?;
let num_lookup_columns = all_stark.num_lookups_helper_columns(config);
let AllStark {
arithmetic_stark,
cpu_stark,
poseidon_stark,
poseidon_sponge_stark,
keccak_stark,
keccak_sponge_stark,
sha_extend_stark,
sha_extend_sponge_stark,
sha_compress_stark,
sha_compress_sponge_stark,
logic_stark,
memory_stark,
cross_table_lookups,
} = all_stark;
let num_ctl_helper_cols = num_ctl_helper_columns_by_table(
cross_table_lookups,
all_stark.arithmetic_stark.constraint_degree(),
);
let ctl_vars_per_table = CtlCheckVars::from_proofs(
&all_proof.stark_proofs,
cross_table_lookups,
&ctl_challenges,
&num_lookup_columns,
&num_ctl_helper_cols,
);
verify_stark_proof_with_challenges(
arithmetic_stark,
&all_proof.stark_proofs[Table::Arithmetic as usize].proof,
&stark_challenges[Table::Arithmetic as usize],
&ctl_vars_per_table[Table::Arithmetic as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
cpu_stark,
&all_proof.stark_proofs[Table::Cpu as usize].proof,
&stark_challenges[Table::Cpu as usize],
&ctl_vars_per_table[Table::Cpu as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
poseidon_stark,
&all_proof.stark_proofs[Table::Poseidon as usize].proof,
&stark_challenges[Table::Poseidon as usize],
&ctl_vars_per_table[Table::Poseidon as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
poseidon_sponge_stark,
&all_proof.stark_proofs[Table::PoseidonSponge as usize].proof,
&stark_challenges[Table::PoseidonSponge as usize],
&ctl_vars_per_table[Table::PoseidonSponge as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
keccak_stark,
&all_proof.stark_proofs[Table::Keccak as usize].proof,
&stark_challenges[Table::Keccak as usize],
&ctl_vars_per_table[Table::Keccak as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
keccak_sponge_stark,
&all_proof.stark_proofs[Table::KeccakSponge as usize].proof,
&stark_challenges[Table::KeccakSponge as usize],
&ctl_vars_per_table[Table::KeccakSponge as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
sha_extend_stark,
&all_proof.stark_proofs[Table::ShaExtend as usize].proof,
&stark_challenges[Table::ShaExtend as usize],
&ctl_vars_per_table[Table::ShaExtend as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
sha_extend_sponge_stark,
&all_proof.stark_proofs[Table::ShaExtendSponge as usize].proof,
&stark_challenges[Table::ShaExtendSponge as usize],
&ctl_vars_per_table[Table::ShaExtendSponge as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
sha_compress_stark,
&all_proof.stark_proofs[Table::ShaCompress as usize].proof,
&stark_challenges[Table::ShaCompress as usize],
&ctl_vars_per_table[Table::ShaCompress as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
sha_compress_sponge_stark,
&all_proof.stark_proofs[Table::ShaCompressSponge as usize].proof,
&stark_challenges[Table::ShaCompressSponge as usize],
&ctl_vars_per_table[Table::ShaCompressSponge as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
logic_stark,
&all_proof.stark_proofs[Table::Logic as usize].proof,
&stark_challenges[Table::Logic as usize],
&ctl_vars_per_table[Table::Logic as usize],
&ctl_challenges,
config,
)?;
verify_stark_proof_with_challenges(
memory_stark,
&all_proof.stark_proofs[Table::Memory as usize].proof,
&stark_challenges[Table::Memory as usize],
&ctl_vars_per_table[Table::Memory as usize],
&ctl_challenges,
config,
)?;
verify_cross_table_lookups::<F, D>(
cross_table_lookups,
all_proof
.stark_proofs
.map(|p| p.proof.openings.ctl_zs_first),
config,
)
}
pub(crate) fn verify_stark_proof_with_challenges<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
const D: usize,
>(
stark: &S,
proof: &StarkProof<F, C, D>,
challenges: &StarkProofChallenges<F, D>,
ctl_vars: &[CtlCheckVars<F, F::Extension, F::Extension, D>],
ctl_challenges: &GrandProductChallengeSet<F>,
config: &StarkConfig,
) -> Result<()> {
log::debug!("Checking proof: {}", type_name::<S>());
let num_ctl_polys = ctl_vars
.iter()
.map(|ctl| ctl.helper_columns.len())
.sum::<usize>();
let num_ctl_z_polys = ctl_vars.len();
validate_proof_shape(stark, proof, config, num_ctl_polys, num_ctl_z_polys)?;
let StarkOpeningSet {
local_values,
next_values,
auxiliary_polys,
auxiliary_polys_next,
ctl_zs_first: _,
quotient_polys,
} = &proof.openings;
let vars = S::EvaluationFrame::from_values(local_values, next_values);
let degree_bits = proof.recover_degree_bits(config);
let (l_0, l_last) = eval_l_0_and_l_last(degree_bits, challenges.stark_zeta);
let last = F::primitive_root_of_unity(degree_bits).inverse();
let z_last = challenges.stark_zeta - last.into();
let mut consumer = ConstraintConsumer::<F::Extension>::new(
challenges
.stark_alphas
.iter()
.map(|&alpha| F::Extension::from_basefield(alpha))
.collect::<Vec<_>>(),
z_last,
l_0,
l_last,
);
let num_lookup_columns = stark.num_lookup_helper_columns(config);
let lookup_challenges = (num_lookup_columns > 0).then(|| {
ctl_challenges
.challenges
.iter()
.map(|ch| ch.beta)
.collect::<Vec<_>>()
});
let lookup_vars = stark.uses_lookups().then(|| LookupCheckVars {
local_values: auxiliary_polys[..num_lookup_columns].to_vec(),
next_values: auxiliary_polys_next[..num_lookup_columns].to_vec(),
challenges: lookup_challenges.unwrap(),
});
let lookups = stark.lookups();
eval_vanishing_poly::<F, F::Extension, F::Extension, S, D, D>(
stark,
&vars,
&lookups,
lookup_vars,
ctl_vars,
&mut consumer,
);
let vanishing_polys_zeta = consumer.accumulators();
// Check each polynomial identity, of the form `vanishing(x) = Z_H(x) quotient(x)`, at zeta.
let zeta_pow_deg = challenges.stark_zeta.exp_power_of_2(degree_bits);
let z_h_zeta = zeta_pow_deg - F::Extension::ONE;
// `quotient_polys_zeta` holds `num_challenges * quotient_degree_factor` evaluations.
// Each chunk of `quotient_degree_factor` holds the evaluations of `t_0(zeta),...,t_{quotient_degree_factor-1}(zeta)`
// where the "real" quotient polynomial is `t(X) = t_0(X) + t_1(X)*X^n + t_2(X)*X^{2n} + ...`.
// So to reconstruct `t(zeta)` we can compute `reduce_with_powers(chunk, zeta^n)` for each
// `quotient_degree_factor`-sized chunk of the original evaluations.
for (i, chunk) in quotient_polys
.chunks(stark.quotient_degree_factor())
.enumerate()
{
ensure!(
vanishing_polys_zeta[i] == z_h_zeta * reduce_with_powers(chunk, zeta_pow_deg),
"Mismatch between evaluation and opening of quotient polynomial"
);
}
let merkle_caps = vec![
proof.trace_cap.clone(),
proof.auxiliary_polys_cap.clone(),
proof.quotient_polys_cap.clone(),
];
let num_ctl_zs = ctl_vars
.iter()
.map(|ctl| ctl.helper_columns.len())
.collect::<Vec<_>>();
verify_fri_proof::<F, C, D>(
&stark.fri_instance(
challenges.stark_zeta,
F::primitive_root_of_unity(degree_bits),
num_ctl_polys,
num_ctl_zs,
config,
),
&proof.openings.to_fri_openings(),
&challenges.fri_challenges,
&merkle_caps,
&proof.opening_proof,
&config.fri_params(degree_bits),
)?;
Ok(())
}
fn validate_proof_shape<F, C, S, const D: usize>(
stark: &S,
proof: &StarkProof<F, C, D>,
config: &StarkConfig,
num_ctl_helpers: usize,
num_ctl_zs: usize,
) -> anyhow::Result<()>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
S: Stark<F, D>,
{
let StarkProof {
trace_cap,
auxiliary_polys_cap,
quotient_polys_cap,
openings,
// The shape of the opening proof will be checked in the FRI verifier (see
// validate_fri_proof_shape), so we ignore it here.
opening_proof: _,
} = proof;
let StarkOpeningSet {
local_values,
next_values,
auxiliary_polys,
auxiliary_polys_next,
ctl_zs_first,
quotient_polys,
} = openings;
let degree_bits = proof.recover_degree_bits(config);
let fri_params = config.fri_params(degree_bits);
let cap_height = fri_params.config.cap_height;
let num_auxiliary = num_ctl_helpers + stark.num_lookup_helper_columns(config) + num_ctl_zs;
ensure!(trace_cap.height() == cap_height);
ensure!(auxiliary_polys_cap.height() == cap_height);
ensure!(quotient_polys_cap.height() == cap_height);
ensure!(local_values.len() == S::COLUMNS);
ensure!(next_values.len() == S::COLUMNS);
ensure!(auxiliary_polys.len() == num_auxiliary);
ensure!(auxiliary_polys_next.len() == num_auxiliary);
ensure!(ctl_zs_first.len() == num_ctl_zs);
ensure!(quotient_polys.len() == stark.num_quotient_polys(config));
Ok(())
}
/// Evaluate the Lagrange polynomials `L_0` and `L_(n-1)` at a point `x`.
/// `L_0(x) = (x^n - 1)/(n * (x - 1))`
/// `L_(n-1)(x) = (x^n - 1)/(n * (g * x - 1))`, with `g` the first element of the subgroup.
fn eval_l_0_and_l_last<F: Field>(log_n: usize, x: F) -> (F, F) {
let n = F::from_canonical_usize(1 << log_n);
let g = F::primitive_root_of_unity(log_n);
let z_x = x.exp_power_of_2(log_n) - F::ONE;
let invs = F::batch_multiplicative_inverse(&[n * (x - F::ONE), n * (g * x - F::ONE)]);
(z_x * invs[0], z_x * invs[1])
}
#[cfg(test)]
mod tests {
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Sample;
use crate::verifier::eval_l_0_and_l_last;
#[test]
fn test_eval_l_0_and_l_last() {
type F = GoldilocksField;
let log_n = 5;
let n = 1 << log_n;
let x = F::rand(); // challenge point
let expected_l_first_x = PolynomialValues::selector(n, 0).ifft().eval(x);
let expected_l_last_x = PolynomialValues::selector(n, n - 1).ifft().eval(x);
let (l_first_x, l_last_x) = eval_l_0_and_l_last(log_n, x);
assert_eq!(l_first_x, expected_l_first_x);
assert_eq!(l_last_x, expected_l_last_x);
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/logic.rs | prover/src/logic.rs | use std::marker::PhantomData;
use itertools::izip;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2_util::ceil_div_usize;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, Filter};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::logic::columns::NUM_COLUMNS;
use crate::stark::Stark;
use crate::util::{limb_from_bits_le, limb_from_bits_le_recursive, trace_rows_to_poly_values};
const VAL_BITS: usize = 32;
// Number of bits stored per field element. Ensure that this fits; it is not checked.
pub(crate) const PACKED_LIMB_BITS: usize = 32;
// Number of field elements needed to store each input/output at the specified packing.
const PACKED_LEN: usize = ceil_div_usize(VAL_BITS, PACKED_LIMB_BITS);
pub(crate) mod columns {
use std::cmp::min;
use std::ops::Range;
use super::{PACKED_LEN, PACKED_LIMB_BITS, VAL_BITS};
pub const IS_AND: usize = 0;
pub const IS_OR: usize = IS_AND + 1;
pub const IS_XOR: usize = IS_OR + 1;
pub const IS_NOR: usize = IS_XOR + 1;
// The inputs are decomposed into bits.
pub const INPUT0: Range<usize> = (IS_NOR + 1)..(IS_NOR + 1) + VAL_BITS;
pub const INPUT1: Range<usize> = INPUT0.end..INPUT0.end + VAL_BITS;
// The result is packed in limbs of `PACKED_LIMB_BITS` bits.
pub const RESULT: Range<usize> = INPUT1.end..INPUT1.end + PACKED_LEN;
pub fn limb_bit_cols_for_input(input_bits: Range<usize>) -> impl Iterator<Item = Range<usize>> {
(0..PACKED_LEN).map(move |i| {
let start = input_bits.start + i * PACKED_LIMB_BITS;
let end = min(start + PACKED_LIMB_BITS, input_bits.end);
start..end
})
}
pub const NUM_COLUMNS: usize = RESULT.end;
}
pub fn ctl_data<F: Field>() -> Vec<Column<F>> {
// We scale each filter flag with the associated opcode value.
// If a logic operation is happening on the CPU side, the CTL
// will enforce that the reconstructed opcode value from the
// opcode bits matches.
let mut res = vec![Column::linear_combination([
(columns::IS_AND, F::from_canonical_u32(0b100100 * (1 << 6))),
(columns::IS_OR, F::from_canonical_u32(0b100101 * (1 << 6))),
(columns::IS_XOR, F::from_canonical_u32(0b100110 * (1 << 6))),
(columns::IS_NOR, F::from_canonical_u32(0b100111 * (1 << 6))),
])];
res.extend(columns::limb_bit_cols_for_input(columns::INPUT0).map(Column::le_bits));
res.extend(columns::limb_bit_cols_for_input(columns::INPUT1).map(Column::le_bits));
res.extend(columns::RESULT.map(Column::single));
res
}
pub fn ctl_filter<F: Field>() -> Filter<F> {
Filter::new_simple(Column::sum([
columns::IS_AND,
columns::IS_OR,
columns::IS_XOR,
columns::IS_NOR,
]))
}
#[derive(Copy, Clone, Default)]
pub struct LogicStark<F, const D: usize> {
pub f: PhantomData<F>,
}
#[derive(Copy, Clone, Debug, Eq, PartialEq)]
pub(crate) enum Op {
And,
Or,
Xor,
Nor,
}
impl Op {
pub(crate) fn result(&self, a: u32, b: u32) -> u32 {
match self {
Op::And => a & b,
Op::Or => a | b,
Op::Xor => a ^ b,
Op::Nor => !(a | b),
}
}
}
#[derive(Debug, Clone)]
pub(crate) struct Operation {
operator: Op,
input0: u32,
input1: u32,
pub(crate) result: u32,
}
impl Operation {
pub(crate) fn new(operator: Op, input0: u32, input1: u32) -> Self {
let result = operator.result(input0, input1);
//log::debug!("{:?}: {} {} => {}", operator, input0, input1, result);
Operation {
operator,
input0,
input1,
result,
}
}
fn into_row<F: Field>(self) -> [F; NUM_COLUMNS] {
let Operation {
operator,
input0,
input1,
result,
} = self;
let mut row = [F::ZERO; NUM_COLUMNS];
row[match operator {
Op::And => columns::IS_AND,
Op::Or => columns::IS_OR,
Op::Xor => columns::IS_XOR,
Op::Nor => columns::IS_NOR,
}] = F::ONE;
for i in 0..32 {
row[columns::INPUT0.start + i] = F::from_canonical_u32((input0 >> i) & 1);
row[columns::INPUT1.start + i] = F::from_canonical_u32((input1 >> i) & 1);
}
row[columns::RESULT.start] = F::from_canonical_u32(result);
row
}
}
impl<F: RichField, const D: usize> LogicStark<F, D> {
pub(crate) fn generate_trace(
&self,
operations: Vec<Operation>,
min_rows: usize,
) -> Vec<PolynomialValues<F>> {
let trace_rows = self.generate_trace_rows(operations, min_rows);
trace_rows_to_poly_values(trace_rows)
}
fn generate_trace_rows(
&self,
operations: Vec<Operation>,
min_rows: usize,
) -> Vec<[F; NUM_COLUMNS]> {
let len = operations.len();
let padded_len = len.max(min_rows).next_power_of_two();
let mut rows = Vec::with_capacity(padded_len);
for op in operations {
rows.push(op.into_row());
}
// Pad to a power of two.
for _ in len..padded_len {
rows.push([F::ZERO; NUM_COLUMNS]);
}
rows
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for LogicStark<F, D> {
type EvaluationFrame<FE, P, const D2: usize>
= StarkFrame<P, NUM_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_COLUMNS>;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let lv = vars.get_local_values();
// IS_AND, IS_OR, and IS_XOR come from the CPU table, so we assume they're valid.
let is_and = lv[columns::IS_AND];
let is_or = lv[columns::IS_OR];
let is_xor = lv[columns::IS_XOR];
let is_nor = lv[columns::IS_NOR];
// The result will be `in0 OP in1 = sum_coeff * (in0 + in1) + and_coeff * (in0 AND in1) + not_coeff * u32::MAX`.
// `AND => sum_coeff = 0, and_coeff = 1, not_coeff=0`
// `OR => sum_coeff = 1, and_coeff = -1, not_coeff=0`
// `XOR => sum_coeff = 1, and_coeff = -2, not_coeff=0`
// `NOR => sum_coeff = -1, and_coeff = 1, not_coeff=1`
let sum_coeff = is_or + is_xor - is_nor;
let and_coeff = is_and - is_or - is_xor * FE::TWO + is_nor;
let not_coeff = is_nor;
// Ensure that all bits are indeed bits.
for input_bits_cols in [columns::INPUT0, columns::INPUT1] {
for i in input_bits_cols {
let bit = lv[i];
yield_constr.constraint(bit * (bit - P::ONES));
}
}
// Form the result
for (result_col, x_bits_cols, y_bits_cols) in izip!(
columns::RESULT,
columns::limb_bit_cols_for_input(columns::INPUT0),
columns::limb_bit_cols_for_input(columns::INPUT1),
) {
let x: P = limb_from_bits_le(x_bits_cols.clone().map(|col| lv[col]));
let y: P = limb_from_bits_le(y_bits_cols.clone().map(|col| lv[col]));
let x_bits = x_bits_cols.map(|i| lv[i]);
let y_bits = y_bits_cols.map(|i| lv[i]);
let x_land_y: P = izip!(0.., x_bits, y_bits)
.map(|(i, x_bit, y_bit)| x_bit * y_bit * FE::from_canonical_u64(1 << i))
.sum();
let x_op_y = sum_coeff * (x + y)
+ and_coeff * x_land_y
+ not_coeff * FE::from_canonical_u32(u32::MAX);
yield_constr.constraint(lv[result_col] - x_op_y);
}
}
fn eval_ext_circuit(
&self,
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let lv = vars.get_local_values();
// IS_AND, IS_OR, and IS_XOR come from the CPU table, so we assume they're valid.
let is_and = lv[columns::IS_AND];
let is_or = lv[columns::IS_OR];
let is_xor = lv[columns::IS_XOR];
let is_nor = lv[columns::IS_NOR];
// The result will be `in0 OP in1 = sum_coeff * (in0 + in1) + and_coeff * (in0 AND in1) + not_coeff * u32::MAX`.
// `AND => sum_coeff = 0, and_coeff = 1, not_coeff=0`
// `OR => sum_coeff = 1, and_coeff = -1, not_coeff=0`
// `XOR => sum_coeff = 1, and_coeff = -2, not_coeff=0`
// `NOR => sum_coeff = -1, and_coeff = 1, not_coeff=1`
let sum_coeff = {
let sum_coeff = builder.add_extension(is_or, is_xor);
builder.sub_extension(sum_coeff, is_nor)
};
let and_coeff = {
let and_coeff = builder.sub_extension(is_and, is_or);
let and_coeff = builder.mul_const_add_extension(-F::TWO, is_xor, and_coeff);
builder.add_extension(and_coeff, is_nor)
};
let not_coeff = is_nor;
// Ensure that all bits are indeed bits.
for input_bits_cols in [columns::INPUT0, columns::INPUT1] {
for i in input_bits_cols {
let bit = lv[i];
let constr = builder.mul_sub_extension(bit, bit, bit);
yield_constr.constraint(builder, constr);
}
}
// Form the result
for (result_col, x_bits_cols, y_bits_cols) in izip!(
columns::RESULT,
columns::limb_bit_cols_for_input(columns::INPUT0),
columns::limb_bit_cols_for_input(columns::INPUT1),
) {
let x = limb_from_bits_le_recursive(builder, x_bits_cols.clone().map(|i| lv[i]));
let y = limb_from_bits_le_recursive(builder, y_bits_cols.clone().map(|i| lv[i]));
let x_bits = x_bits_cols.map(|i| lv[i]);
let y_bits = y_bits_cols.map(|i| lv[i]);
let x_land_y = izip!(0usize.., x_bits, y_bits).fold(
builder.zero_extension(),
|acc, (i, x_bit, y_bit)| {
builder.arithmetic_extension(
F::from_canonical_u64(1 << i),
F::ONE,
x_bit,
y_bit,
acc,
)
},
);
let x_op_y = {
let x_op_y = builder.mul_extension(sum_coeff, x);
let x_op_y = builder.mul_add_extension(sum_coeff, y, x_op_y);
let x_op_y = builder.mul_add_extension(and_coeff, x_land_y, x_op_y);
builder.mul_const_add_extension(F::from_canonical_u32(u32::MAX), not_coeff, x_op_y)
};
let constr = builder.sub_extension(lv[result_col], x_op_y);
yield_constr.constraint(builder, constr);
}
}
fn constraint_degree(&self) -> usize {
3
}
}
#[cfg(test)]
mod tests {
use crate::logic::{LogicStark, Op, Operation};
use crate::stark_testing::{
test_stark_check_constraints, test_stark_circuit_constraints, test_stark_low_degree,
};
use anyhow::Result;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
#[test]
fn test_stark_degree() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = LogicStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_low_degree(stark)
}
#[test]
fn test_stark_circuit() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = LogicStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
#[test]
fn test_stark_check_constraint() {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = LogicStark<F, D>;
let stark = S {
f: Default::default(),
};
let ops = vec![
Operation::new(Op::Nor, 0, 1),
Operation::new(Op::Nor, 1, 1),
Operation::new(Op::Nor, 0, 0),
Operation::new(Op::Nor, 1283818, 219218),
Operation::new(Op::And, 0, 1),
Operation::new(Op::And, 1, 1),
Operation::new(Op::And, 0, 0),
Operation::new(Op::Or, 0, 1),
Operation::new(Op::Or, 1, 1),
Operation::new(Op::Or, 0, 0),
Operation::new(Op::Xor, 0, 1),
Operation::new(Op::And, 0, 1),
Operation::new(Op::And, 1, 1),
Operation::new(Op::And, 0, 0),
Operation::new(Op::And, 12112, 313131),
Operation::new(Op::Or, 0, 1),
Operation::new(Op::Or, 1, 1),
Operation::new(Op::Or, 0, 0),
Operation::new(Op::Or, 12121, 21211),
Operation::new(Op::Xor, 0, 1),
Operation::new(Op::Xor, 1, 1),
Operation::new(Op::Xor, 0, 0),
Operation::new(Op::Xor, 218219, 9828121),
Operation::new(Op::Xor, 1, 1),
Operation::new(Op::Xor, 0, 0),
];
let num_rows = 1 << 10;
let vals = stark.generate_trace_rows(ops, num_rows);
for i in 0..(vals.len() - 1) {
test_stark_check_constraints::<F, C, S, D>(stark, &vals[i], &vals[i + 1]);
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cross_table_lookup.rs | prover/src/cross_table_lookup.rs | use std::borrow::Borrow;
use std::cmp::min;
use std::fmt::Debug;
use std::iter::repeat;
use anyhow::{ensure, Result};
use itertools::Itertools;
use plonky2::field::batch_util::batch_add_inplace;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::challenger::{Challenger, RecursiveChallenger};
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::iop::target::Target;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig, Hasher};
use plonky2::plonk::plonk_common::{
reduce_with_powers, reduce_with_powers_circuit, reduce_with_powers_ext_circuit,
};
use plonky2::util::serialization::{Buffer, IoResult, Read, Write};
use plonky2_util::ceil_div_usize;
use crate::all_stark::{Table, NUM_TABLES};
use crate::config::StarkConfig;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::evaluation_frame::StarkEvaluationFrame;
use crate::proof::{StarkProofTarget, StarkProofWithMetadata};
use crate::stark::Stark;
#[derive(Clone, Debug)]
pub struct Filter<F: Field> {
products: Vec<(Column<F>, Column<F>)>,
constants: Vec<Column<F>>,
}
impl<F: Field> Filter<F> {
/// Returns a filter from the provided `products` and `constants` vectors.
pub fn new(products: Vec<(Column<F>, Column<F>)>, constants: Vec<Column<F>>) -> Self {
Self {
products,
constants,
}
}
/// Returns a filter made of a single column.
pub fn new_simple(col: Column<F>) -> Self {
Self {
products: vec![],
constants: vec![col],
}
}
/// Given the column values for the current and next rows, evaluates the filter.
pub(crate) fn eval_filter<FE, P, const D: usize>(&self, v: &[P], next_v: &[P]) -> P
where
FE: FieldExtension<D, BaseField = F>,
P: PackedField<Scalar = FE>,
{
self.products
.iter()
.map(|(col1, col2)| col1.eval_with_next(v, next_v) * col2.eval_with_next(v, next_v))
.sum::<P>()
+ self
.constants
.iter()
.map(|col| col.eval_with_next(v, next_v))
.sum::<P>()
}
/// Circuit version of `eval_filter`:
/// Given the column values for the current and next rows, evaluates the filter.
pub(crate) fn eval_filter_circuit<const D: usize>(
&self,
builder: &mut CircuitBuilder<F, D>,
v: &[ExtensionTarget<D>],
next_v: &[ExtensionTarget<D>],
) -> ExtensionTarget<D>
where
F: RichField + Extendable<D>,
{
let prods = self
.products
.iter()
.map(|(col1, col2)| {
let col1_eval = col1.eval_with_next_circuit(builder, v, next_v);
let col2_eval = col2.eval_with_next_circuit(builder, v, next_v);
builder.mul_extension(col1_eval, col2_eval)
})
.collect::<Vec<_>>();
let consts = self
.constants
.iter()
.map(|col| col.eval_with_next_circuit(builder, v, next_v))
.collect::<Vec<_>>();
let prods = builder.add_many_extension(prods);
let consts = builder.add_many_extension(consts);
builder.add_extension(prods, consts)
}
/// Evaluate on a row of a table given in column-major form.
pub(crate) fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
self.products
.iter()
.map(|(col1, col2)| col1.eval_table(table, row) * col2.eval_table(table, row))
.sum::<F>()
+ self
.constants
.iter()
.map(|col| col.eval_table(table, row))
.sum()
}
}
/// Represent a linear combination of columns.
#[derive(Clone, Debug)]
pub struct Column<F: Field> {
linear_combination: Vec<(usize, F)>,
next_row_linear_combination: Vec<(usize, F)>,
constant: F,
}
impl<F: Field> Column<F> {
pub fn single(c: usize) -> Self {
Self {
linear_combination: vec![(c, F::ONE)],
next_row_linear_combination: vec![],
constant: F::ZERO,
}
}
pub fn singles<I: IntoIterator<Item = impl Borrow<usize>>>(
cs: I,
) -> impl Iterator<Item = Self> {
cs.into_iter().map(|c| Self::single(*c.borrow()))
}
pub fn single_next_row(c: usize) -> Self {
Self {
linear_combination: vec![],
next_row_linear_combination: vec![(c, F::ONE)],
constant: F::ZERO,
}
}
pub fn singles_next_row<I: IntoIterator<Item = impl Borrow<usize>>>(
cs: I,
) -> impl Iterator<Item = Self> {
cs.into_iter().map(|c| Self::single_next_row(*c.borrow()))
}
pub fn constant(constant: F) -> Self {
Self {
linear_combination: vec![],
next_row_linear_combination: vec![],
constant,
}
}
pub fn zero() -> Self {
Self::constant(F::ZERO)
}
pub fn one() -> Self {
Self::constant(F::ONE)
}
pub fn linear_combination_with_constant<I: IntoIterator<Item = (usize, F)>>(
iter: I,
constant: F,
) -> Self {
let v = iter.into_iter().collect::<Vec<_>>();
assert!(!v.is_empty());
debug_assert_eq!(
v.iter().map(|(c, _)| c).unique().count(),
v.len(),
"Duplicate columns."
);
Self {
linear_combination: v,
next_row_linear_combination: vec![],
constant,
}
}
pub fn linear_combination_and_next_row_with_constant<I: IntoIterator<Item = (usize, F)>>(
iter: I,
next_row_iter: I,
constant: F,
) -> Self {
let v = iter.into_iter().collect::<Vec<_>>();
let next_row_v = next_row_iter.into_iter().collect::<Vec<_>>();
assert!(!v.is_empty() || !next_row_v.is_empty());
debug_assert_eq!(
v.iter().map(|(c, _)| c).unique().count(),
v.len(),
"Duplicate columns."
);
debug_assert_eq!(
next_row_v.iter().map(|(c, _)| c).unique().count(),
next_row_v.len(),
"Duplicate columns."
);
Self {
linear_combination: v,
next_row_linear_combination: next_row_v,
constant,
}
}
pub fn linear_combination<I: IntoIterator<Item = (usize, F)>>(iter: I) -> Self {
Self::linear_combination_with_constant(iter, F::ZERO)
}
pub fn le_bits<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(F::TWO.powers()))
}
pub fn le_bytes<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
Self::linear_combination(
cs.into_iter()
.map(|c| *c.borrow())
.zip(F::from_canonical_u16(256).powers()),
)
}
pub fn sum<I: IntoIterator<Item = impl Borrow<usize>>>(cs: I) -> Self {
Self::linear_combination(cs.into_iter().map(|c| *c.borrow()).zip(repeat(F::ONE)))
}
pub fn eval<FE, P, const D: usize>(&self, v: &[P]) -> P
where
FE: FieldExtension<D, BaseField = F>,
P: PackedField<Scalar = FE>,
{
self.linear_combination
.iter()
.map(|&(c, f)| v[c] * FE::from_basefield(f))
.sum::<P>()
+ FE::from_basefield(self.constant)
}
pub fn eval_with_next<FE, P, const D: usize>(&self, v: &[P], next_v: &[P]) -> P
where
FE: FieldExtension<D, BaseField = F>,
P: PackedField<Scalar = FE>,
{
self.linear_combination
.iter()
.map(|&(c, f)| v[c] * FE::from_basefield(f))
.sum::<P>()
+ self
.next_row_linear_combination
.iter()
.map(|&(c, f)| next_v[c] * FE::from_basefield(f))
.sum::<P>()
+ FE::from_basefield(self.constant)
}
/// Evaluate on an row of a table given in column-major form.
pub fn eval_table(&self, table: &[PolynomialValues<F>], row: usize) -> F {
let mut res = self
.linear_combination
.iter()
.map(|&(c, f)| table[c].values[row] * f)
.sum::<F>()
+ self.constant;
// If we access the next row at the last row, for sanity, we consider the next row's values to be 0.
// If CTLs are correctly written, the filter should be 0 in that case anyway.
if !self.next_row_linear_combination.is_empty() && row < table[0].values.len() - 1 {
res += self
.next_row_linear_combination
.iter()
.map(|&(c, f)| table[c].values[row + 1] * f)
.sum::<F>();
}
res
}
pub(crate) fn eval_all_rows(&self, table: &[PolynomialValues<F>]) -> Vec<F> {
let length = table[0].len();
(0..length)
.map(|row| self.eval_table(table, row))
.collect::<Vec<F>>()
}
pub fn eval_circuit<const D: usize>(
&self,
builder: &mut CircuitBuilder<F, D>,
v: &[ExtensionTarget<D>],
) -> ExtensionTarget<D>
where
F: RichField + Extendable<D>,
{
let pairs = self
.linear_combination
.iter()
.map(|&(c, f)| {
(
v[c],
builder.constant_extension(F::Extension::from_basefield(f)),
)
})
.collect::<Vec<_>>();
let constant = builder.constant_extension(F::Extension::from_basefield(self.constant));
builder.inner_product_extension(F::ONE, constant, pairs)
}
pub fn eval_with_next_circuit<const D: usize>(
&self,
builder: &mut CircuitBuilder<F, D>,
v: &[ExtensionTarget<D>],
next_v: &[ExtensionTarget<D>],
) -> ExtensionTarget<D>
where
F: RichField + Extendable<D>,
{
let mut pairs = self
.linear_combination
.iter()
.map(|&(c, f)| {
(
v[c],
builder.constant_extension(F::Extension::from_basefield(f)),
)
})
.collect::<Vec<_>>();
let next_row_pairs = self.next_row_linear_combination.iter().map(|&(c, f)| {
(
next_v[c],
builder.constant_extension(F::Extension::from_basefield(f)),
)
});
pairs.extend(next_row_pairs);
let constant = builder.constant_extension(F::Extension::from_basefield(self.constant));
builder.inner_product_extension(F::ONE, constant, pairs)
}
}
#[derive(Clone, Debug)]
pub struct TableWithColumns<F: Field> {
table: Table,
columns: Vec<Column<F>>,
filter: Option<Filter<F>>,
}
impl<F: Field> TableWithColumns<F> {
pub fn new(table: Table, columns: Vec<Column<F>>, filter: Option<Filter<F>>) -> Self {
Self {
table,
columns,
filter,
}
}
}
#[derive(Clone)]
pub struct CrossTableLookup<F: Field> {
pub(crate) looking_tables: Vec<TableWithColumns<F>>,
pub(crate) looked_table: TableWithColumns<F>,
}
impl<F: Field> CrossTableLookup<F> {
pub fn new(
looking_tables: Vec<TableWithColumns<F>>,
looked_table: TableWithColumns<F>,
) -> Self {
assert!(looking_tables
.iter()
.all(|twc| twc.columns.len() == looked_table.columns.len()));
Self {
looking_tables,
looked_table,
}
}
/// Given a table, returns:
/// - the total number of helper columns for this table, over all Cross-table lookups,
/// - the total number of z polynomials for this table, over all Cross-table lookups,
/// - the number of helper columns for this table, for each Cross-table lookup.
pub(crate) fn num_ctl_helpers_zs_all(
ctls: &[Self],
table: Table,
num_challenges: usize,
constraint_degree: usize,
) -> (usize, usize, Vec<usize>) {
let mut num_helpers = 0;
let mut num_ctls = 0;
let mut num_helpers_by_ctl = vec![0; ctls.len()];
for (i, ctl) in ctls.iter().enumerate() {
let all_tables = std::iter::once(&ctl.looked_table).chain(&ctl.looking_tables);
let num_appearances = all_tables.filter(|twc| twc.table == table).count();
let is_helpers = num_appearances > 1;
if is_helpers {
num_helpers_by_ctl[i] = ceil_div_usize(num_appearances, constraint_degree - 1);
num_helpers += num_helpers_by_ctl[i];
}
if num_appearances > 0 {
num_ctls += 1;
}
}
(
num_helpers * num_challenges,
num_ctls * num_challenges,
num_helpers_by_ctl,
)
}
}
/// Cross-table lookup data for one table.
#[derive(Clone, Default)]
pub struct CtlData<'a, F: Field> {
pub(crate) zs_columns: Vec<CtlZData<'a, F>>,
}
/// Cross-table lookup data associated with one Z(x) polynomial.
/// One Z(x) polynomial can be associated to multiple tables,
/// built from the same STARK.
#[derive(Clone)]
pub(crate) struct CtlZData<'a, F: Field> {
/// Helper columns to verify the Z polynomial values.
pub(crate) helper_columns: Vec<PolynomialValues<F>>,
pub(crate) z: PolynomialValues<F>,
pub(crate) challenge: GrandProductChallenge<F>,
/// Vector of column linear combinations for the current tables.
pub(crate) columns: Vec<&'a [Column<F>]>,
/// Vector of filter columns for the current table.
/// Each filter evaluates to either 1 or 0.
pub(crate) filter: Vec<Option<Filter<F>>>,
}
impl<F: Field> CtlData<'_, F> {
pub fn len(&self) -> usize {
self.zs_columns.len()
}
pub fn is_empty(&self) -> bool {
self.zs_columns.is_empty()
}
/// Returns all the cross-table lookup helper polynomials.
pub(crate) fn ctl_helper_polys(&self) -> Vec<PolynomialValues<F>> {
let num_polys = self
.zs_columns
.iter()
.fold(0, |acc, z| acc + z.helper_columns.len());
let mut res = Vec::with_capacity(num_polys);
for z in &self.zs_columns {
res.extend(z.helper_columns.clone());
}
res
}
/// Returns all the Z cross-table-lookup polynomials.
pub(crate) fn ctl_z_polys(&self) -> Vec<PolynomialValues<F>> {
let mut res = Vec::with_capacity(self.zs_columns.len());
for z in &self.zs_columns {
res.push(z.z.clone());
}
res
}
/// Returns the number of helper columns for each STARK in each
/// `CtlZData`.
pub(crate) fn num_ctl_helper_polys(&self) -> Vec<usize> {
let mut res = Vec::with_capacity(self.zs_columns.len());
for z in &self.zs_columns {
res.push(z.helper_columns.len());
}
res
}
}
/// Randomness for a single instance of a permutation check protocol.
#[derive(Copy, Clone, Eq, PartialEq, Debug)]
pub(crate) struct GrandProductChallenge<T: Copy + Eq + PartialEq + Debug> {
/// Randomness used to combine multiple columns into one.
pub(crate) beta: T,
/// Random offset that's added to the beta-reduced column values.
pub(crate) gamma: T,
}
impl<F: Field> GrandProductChallenge<F> {
pub(crate) fn combine<'a, FE, P, T: IntoIterator<Item = &'a P>, const D2: usize>(
&self,
terms: T,
) -> P
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
T::IntoIter: DoubleEndedIterator,
{
reduce_with_powers(terms, FE::from_basefield(self.beta)) + FE::from_basefield(self.gamma)
}
}
impl GrandProductChallenge<Target> {
pub(crate) fn combine_circuit<F: RichField + Extendable<D>, const D: usize>(
&self,
builder: &mut CircuitBuilder<F, D>,
terms: &[ExtensionTarget<D>],
) -> ExtensionTarget<D> {
let reduced = reduce_with_powers_ext_circuit(builder, terms, self.beta);
let gamma = builder.convert_to_ext(self.gamma);
builder.add_extension(reduced, gamma)
}
}
impl GrandProductChallenge<Target> {
pub(crate) fn combine_base_circuit<F: RichField + Extendable<D>, const D: usize>(
&self,
builder: &mut CircuitBuilder<F, D>,
terms: &[Target],
) -> Target {
let reduced = reduce_with_powers_circuit(builder, terms, self.beta);
builder.add(reduced, self.gamma)
}
}
/// Like `PermutationChallenge`, but with `num_challenges` copies to boost soundness.
#[derive(Clone, Eq, PartialEq, Debug)]
pub(crate) struct GrandProductChallengeSet<T: Copy + Eq + PartialEq + Debug> {
pub(crate) challenges: Vec<GrandProductChallenge<T>>,
}
impl GrandProductChallengeSet<Target> {
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
buffer.write_usize(self.challenges.len())?;
for challenge in &self.challenges {
buffer.write_target(challenge.beta)?;
buffer.write_target(challenge.gamma)?;
}
Ok(())
}
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let length = buffer.read_usize()?;
let mut challenges = Vec::with_capacity(length);
for _ in 0..length {
challenges.push(GrandProductChallenge {
beta: buffer.read_target()?,
gamma: buffer.read_target()?,
});
}
Ok(GrandProductChallengeSet { challenges })
}
}
fn get_grand_product_challenge<F: RichField, H: Hasher<F>>(
challenger: &mut Challenger<F, H>,
) -> GrandProductChallenge<F> {
let beta = challenger.get_challenge();
let gamma = challenger.get_challenge();
GrandProductChallenge { beta, gamma }
}
pub(crate) fn get_grand_product_challenge_set<F: RichField, H: Hasher<F>>(
challenger: &mut Challenger<F, H>,
num_challenges: usize,
) -> GrandProductChallengeSet<F> {
let challenges = (0..num_challenges)
.map(|_| get_grand_product_challenge(challenger))
.collect();
GrandProductChallengeSet { challenges }
}
fn get_grand_product_challenge_target<
F: RichField + Extendable<D>,
H: AlgebraicHasher<F>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, H, D>,
) -> GrandProductChallenge<Target> {
let beta = challenger.get_challenge(builder);
let gamma = challenger.get_challenge(builder);
GrandProductChallenge { beta, gamma }
}
pub(crate) fn get_grand_product_challenge_set_target<
F: RichField + Extendable<D>,
H: AlgebraicHasher<F>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
challenger: &mut RecursiveChallenger<F, H, D>,
num_challenges: usize,
) -> GrandProductChallengeSet<Target> {
let challenges = (0..num_challenges)
.map(|_| get_grand_product_challenge_target(builder, challenger))
.collect();
GrandProductChallengeSet { challenges }
}
/// Returns the number of helper columns for each `Table`.
pub(crate) fn num_ctl_helper_columns_by_table<F: Field>(
ctls: &[CrossTableLookup<F>],
constraint_degree: usize,
) -> Vec<[usize; NUM_TABLES]> {
let mut res = vec![[0; NUM_TABLES]; ctls.len()];
for (i, ctl) in ctls.iter().enumerate() {
let CrossTableLookup {
looking_tables,
looked_table: _,
} = ctl;
let mut num_by_table = [0; NUM_TABLES];
let grouped_lookups = looking_tables.iter().group_by(|&a| a.table);
for (table, group) in grouped_lookups.into_iter() {
let sum = group.count();
if sum > 1 {
// We only need helper columns if there are more than 2 columns.
num_by_table[table as usize] = ceil_div_usize(sum, constraint_degree - 1);
}
}
res[i] = num_by_table;
}
res
}
pub(crate) fn cross_table_lookup_data<'a, F: RichField, const D: usize>(
trace_poly_values: &[Vec<PolynomialValues<F>>; NUM_TABLES],
cross_table_lookups: &'a [CrossTableLookup<F>],
ctl_challenges: &GrandProductChallengeSet<F>,
constraint_degree: usize,
) -> [CtlData<'a, F>; NUM_TABLES] {
let mut ctl_data_per_table = [0; NUM_TABLES].map(|_| CtlData::default());
for CrossTableLookup {
looking_tables,
looked_table,
} in cross_table_lookups
{
log::debug!("Processing CTL for {:?}", looked_table.table);
for &challenge in &ctl_challenges.challenges {
let helper_zs_looking = ctl_helper_zs_cols(
trace_poly_values,
looking_tables.clone(),
challenge,
constraint_degree,
);
let z_looked = partial_sums(
&trace_poly_values[looked_table.table as usize],
&[(&looked_table.columns, &looked_table.filter)],
challenge,
constraint_degree,
);
for (table, helpers_zs) in helper_zs_looking {
let num_helpers = helpers_zs.len() - 1;
let count = looking_tables
.iter()
.filter(|looking_table| looking_table.table as usize == table)
.count();
let cols_filts = looking_tables.iter().filter_map(|looking_table| {
if looking_table.table as usize == table {
Some((&looking_table.columns, &looking_table.filter))
} else {
None
}
});
let mut columns = Vec::with_capacity(count);
let mut filter = Vec::with_capacity(count);
for (col, filt) in cols_filts {
columns.push(&col[..]);
filter.push(filt.clone());
}
ctl_data_per_table[table].zs_columns.push(CtlZData {
helper_columns: helpers_zs[..num_helpers].to_vec(),
z: helpers_zs[num_helpers].clone(),
challenge,
columns,
filter,
});
}
// There is no helper column for the looking table.
let looked_poly = z_looked[0].clone();
ctl_data_per_table[looked_table.table as usize]
.zs_columns
.push(CtlZData {
helper_columns: vec![],
z: looked_poly,
challenge,
columns: vec![&looked_table.columns[..]],
filter: vec![looked_table.filter.clone()],
});
}
}
ctl_data_per_table
}
type ColumnFilter<'a, F> = (&'a [Column<F>], &'a Option<Filter<F>>);
/// Given a STARK's trace, and the data associated to one lookup (either CTL or range check),
/// returns the associated helper polynomials.
pub(crate) fn get_helper_cols<F: Field>(
trace: &[PolynomialValues<F>],
degree: usize,
columns_filters: &[ColumnFilter<F>],
challenge: GrandProductChallenge<F>,
constraint_degree: usize,
) -> Vec<PolynomialValues<F>> {
let num_helper_columns = ceil_div_usize(columns_filters.len(), constraint_degree - 1);
let mut helper_columns = Vec::with_capacity(num_helper_columns);
for mut cols_filts in &columns_filters.iter().chunks(constraint_degree - 1) {
let (first_col, first_filter) = cols_filts.next().unwrap();
let mut filter_col = Vec::with_capacity(degree);
let first_combined = (0..degree)
.map(|d| {
let f = if let Some(filter) = first_filter {
let f = filter.eval_table(trace, d);
filter_col.push(f);
f
} else {
filter_col.push(F::ONE);
F::ONE
};
if f.is_one() {
let evals = first_col
.iter()
.map(|c| c.eval_table(trace, d))
.collect::<Vec<F>>();
challenge.combine(evals.iter())
} else {
assert_eq!(f, F::ZERO, "Non-binary filter?");
// Dummy value. Cannot be zero since it will be batch-inverted.
F::ONE
}
})
.collect::<Vec<F>>();
let mut acc = F::batch_multiplicative_inverse(&first_combined);
for d in 0..degree {
if filter_col[d].is_zero() {
acc[d] = F::ZERO;
}
}
for (col, filt) in cols_filts {
let mut filter_col = Vec::with_capacity(degree);
let mut combined = (0..degree)
.map(|d| {
let f = if let Some(filter) = filt {
let f = filter.eval_table(trace, d);
filter_col.push(f);
f
} else {
filter_col.push(F::ONE);
F::ONE
};
if f.is_one() {
let evals = col
.iter()
.map(|c| c.eval_table(trace, d))
.collect::<Vec<F>>();
challenge.combine(evals.iter())
} else {
assert_eq!(f, F::ZERO, "Non-binary filter?");
// Dummy value. Cannot be zero since it will be batch-inverted.
F::ONE
}
})
.collect::<Vec<F>>();
combined = F::batch_multiplicative_inverse(&combined);
for d in 0..degree {
if filter_col[d].is_zero() {
combined[d] = F::ZERO;
}
}
batch_add_inplace(&mut acc, &combined);
}
helper_columns.push(acc.into());
}
assert_eq!(helper_columns.len(), num_helper_columns);
helper_columns
}
/// Computes helper columns and Z polynomials for all looking tables
/// of one cross-table lookup (i.e. for one looked table).
fn ctl_helper_zs_cols<F: Field>(
all_stark_traces: &[Vec<PolynomialValues<F>>; NUM_TABLES],
looking_tables: Vec<TableWithColumns<F>>,
challenge: GrandProductChallenge<F>,
constraint_degree: usize,
) -> Vec<(usize, Vec<PolynomialValues<F>>)> {
let grouped_lookups = looking_tables.iter().group_by(|a| a.table);
grouped_lookups
.into_iter()
.map(|(table, group)| {
let columns_filters = group
.map(|table| (&table.columns[..], &table.filter))
.collect::<Vec<(&[Column<F>], &Option<Filter<F>>)>>();
(
table as usize,
partial_sums(
&all_stark_traces[table as usize],
&columns_filters,
challenge,
constraint_degree,
),
)
})
.collect::<Vec<(usize, Vec<PolynomialValues<F>>)>>()
}
/// Computes the cross-table lookup partial sums for one table and given column linear combinations.
/// `trace` represents the trace values for the given table.
/// `columns` is a vector of column linear combinations to evaluate. Each element in the vector represents columns that need to be combined.
/// `filter_cols` are column linear combinations used to determine whether a row should be selected.
/// `challenge` is a cross-table lookup challenge.
/// The initial sum `s` is 0.
/// For each row, if the `filter_column` evaluates to 1, then the row is selected. All the column linear combinations are evaluated at said row.
/// The evaluations of each elements of `columns` are then combined together to form a value `v`.
/// The values `v`` are grouped together, in groups of size `constraint_degree - 1` (2 in our case). For each group, we construct a helper
/// column: h = \sum_i 1/(v_i).
///
/// The sum is updated: `s += \sum h_i`, and is pushed to the vector of partial sums `z``.
/// Returns the helper columns and `z`.
fn partial_sums<F: Field>(
trace: &[PolynomialValues<F>],
columns_filters: &[ColumnFilter<F>],
challenge: GrandProductChallenge<F>,
constraint_degree: usize,
) -> Vec<PolynomialValues<F>> {
let degree = trace[0].len();
let mut z = Vec::with_capacity(degree);
let mut helper_columns =
get_helper_cols(trace, degree, columns_filters, challenge, constraint_degree);
let x = helper_columns
.iter()
.map(|col| col.values[degree - 1])
.sum::<F>();
z.push(x);
for i in (0..degree - 1).rev() {
let x = helper_columns.iter().map(|col| col.values[i]).sum::<F>();
z.push(z[z.len() - 1] + x);
}
z.reverse();
if columns_filters.len() > 1 {
helper_columns.push(z.into());
} else {
helper_columns = vec![z.into()];
}
helper_columns
}
#[derive(Clone)]
pub struct CtlCheckVars<'a, F, FE, P, const D2: usize>
where
F: Field,
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
pub(crate) helper_columns: Vec<P>,
pub(crate) local_z: P,
pub(crate) next_z: P,
pub(crate) challenges: GrandProductChallenge<F>,
pub(crate) columns: Vec<&'a [Column<F>]>,
pub(crate) filter: Vec<Option<Filter<F>>>,
}
impl<'a, F: RichField + Extendable<D>, const D: usize>
CtlCheckVars<'a, F, F::Extension, F::Extension, D>
{
pub(crate) fn from_proofs<C: GenericConfig<D, F = F>>(
proofs: &[StarkProofWithMetadata<F, C, D>; NUM_TABLES],
cross_table_lookups: &'a [CrossTableLookup<F>],
ctl_challenges: &'a GrandProductChallengeSet<F>,
num_lookup_columns: &[usize; NUM_TABLES],
num_helper_ctl_columns: &Vec<[usize; NUM_TABLES]>,
) -> [Vec<Self>; NUM_TABLES] {
let mut total_num_helper_cols_by_table = [0; NUM_TABLES];
for p_ctls in num_helper_ctl_columns {
for j in 0..NUM_TABLES {
total_num_helper_cols_by_table[j] += p_ctls[j] * ctl_challenges.challenges.len();
}
}
// Get all cross-table lookup polynomial openings for each STARK proof.
let ctl_zs = proofs
.iter()
.zip(num_lookup_columns)
.map(|(p, &num_lookup)| {
let openings = &p.proof.openings;
let ctl_zs = &openings.auxiliary_polys[num_lookup..];
let ctl_zs_next = &openings.auxiliary_polys_next[num_lookup..];
ctl_zs.iter().zip(ctl_zs_next).collect::<Vec<_>>()
})
.collect::<Vec<_>>();
// Put each cross-table lookup polynomial into the correct table data: if a CTL polynomial is extracted from looking/looked table t, then we add it to the `CtlCheckVars` of table t.
let mut start_indices = [0; NUM_TABLES];
let mut z_indices = [0; NUM_TABLES];
let mut ctl_vars_per_table = [0; NUM_TABLES].map(|_| vec![]);
for (
CrossTableLookup {
looking_tables,
looked_table,
},
num_ctls,
) in cross_table_lookups.iter().zip(num_helper_ctl_columns)
{
for &challenges in &ctl_challenges.challenges {
// Group looking tables by `Table`, since we bundle the looking tables taken from the same `Table` together thanks to helper columns.
// We want to only iterate on each `Table` once.
let mut filtered_looking_tables =
Vec::with_capacity(min(looking_tables.len(), NUM_TABLES));
for table in looking_tables {
if !filtered_looking_tables.contains(&(table.table as usize)) {
filtered_looking_tables.push(table.table as usize);
}
}
for &table in filtered_looking_tables.iter() {
// We have first all the helper polynomials, then all the z polynomials.
let (looking_z, looking_z_next) =
ctl_zs[table][total_num_helper_cols_by_table[table] + z_indices[table]];
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | true |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/fixed_recursive_verifier.rs | prover/src/fixed_recursive_verifier.rs | use core::mem::{self, MaybeUninit};
use std::collections::BTreeMap;
use std::ops::Range;
use hashbrown::HashMap;
use itertools::{zip_eq, Itertools};
use plonky2::field::extension::Extendable;
use plonky2::fri::FriParams;
use plonky2::gates::constant::ConstantGate;
use plonky2::gates::noop::NoopGate;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::challenger::RecursiveChallenger;
use plonky2::iop::target::{BoolTarget, Target};
use plonky2::iop::witness::{PartialWitness, WitnessWrite};
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::circuit_data::{
CircuitConfig, CircuitData, CommonCircuitData, VerifierCircuitTarget,
};
use plonky2::plonk::config::{AlgebraicHasher, GenericConfig};
use plonky2::plonk::proof::{ProofWithPublicInputs, ProofWithPublicInputsTarget};
use plonky2::recursion::cyclic_recursion::check_cyclic_proof_verifier_data;
use plonky2::recursion::dummy_circuit::cyclic_base_proof;
use plonky2::util::serialization::{
Buffer, GateSerializer, IoResult, Read, WitnessGeneratorSerializer, Write,
};
use plonky2::util::timing::TimingTree;
use plonky2_util::log2_ceil;
use crate::all_stark::{all_cross_table_lookups, AllStark, Table, NUM_TABLES};
use crate::config::StarkConfig;
use crate::cpu::kernel::assembler::Kernel;
use crate::cross_table_lookup::{
get_grand_product_challenge_set_target, verify_cross_table_lookups_circuit, CrossTableLookup,
GrandProductChallengeSet,
};
use crate::generation::state::{
AssumptionReceipt, AssumptionReceipts, CompositeReceipt, InnerReceipt, Receipt, ReceiptClaim,
};
use crate::get_challenges::observe_public_values_target;
use crate::proof::{MemRootsTarget, PublicValues, PublicValuesTarget, StarkProofWithMetadata};
use crate::prover::{prove_with_output_and_assumptions, prove_with_outputs};
use crate::recursive_verifier::{
add_common_recursion_gates, add_virtual_public_values, recursive_stark_circuit,
set_public_value_targets, PlonkWrapperCircuit, PublicInputs, StarkWrapperCircuit,
};
use crate::stark::Stark;
use crate::util::u32_array_to_u8_vec;
use crate::verifier::verify_proof;
//use crate::util::h256_limbs;
/// The recursion threshold. We end a chain of recursive proofs once we reach this size.
const THRESHOLD_DEGREE_BITS: usize = 13;
pub const RANGE_TABLES: [&str; 12] = [
"ARITHMETIC",
"CPU",
"POSEIDON",
"POSEIDON_SPONGE",
"KECCAK",
"KECCAK_SPONGE",
"SHA_EXTEND",
"SHA_EXTEND_SPONGE",
"SHA_COMPRESS",
"SHA_COMPRESS_SPONGE",
"LOGIC",
"MEMORY",
];
/// Contains all recursive circuits used in the system.
///
/// For each STARK and each initial `degree_bits`, this contains a chain of
/// recursive circuits for shrinking that STARK from `degree_bits` to a constant
/// `THRESHOLD_DEGREE_BITS`. It also contains a special root circuit
/// for combining each STARK's shrunk wrapper proof into a single proof.
#[derive(Eq, PartialEq, Debug)]
pub struct AllRecursiveCircuits<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
C::Hasher: AlgebraicHasher<F>,
{
/// The ZKVM root circuit, which aggregates the (shrunk) per-table recursive proofs.
pub root: RootCircuitData<F, C, D>,
pub aggregation: AggregationCircuitData<F, C, D>,
/// The block circuit, which verifies an aggregation root proof and a previous block proof.
pub block: BlockCircuitData<F, C, D>,
/// Holds chains of circuits for each table and for each initial `degree_bits`.
by_table: [RecursiveCircuitsForTable<F, C, D>; NUM_TABLES],
}
/// Data for the ZKVM root circuit, which is used to combine each STARK's shrunk wrapper proof
/// into a single proof.
#[derive(Eq, PartialEq, Debug)]
pub struct RootCircuitData<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub circuit: CircuitData<F, C, D>,
proof_with_pis: [ProofWithPublicInputsTarget<D>; NUM_TABLES],
/// For each table, various inner circuits may be used depending on the initial table size.
/// This target holds the index of the circuit (within `final_circuits()`) that was used.
index_verifier_data: [Target; NUM_TABLES],
/// Public inputs containing public values.
public_values: PublicValuesTarget,
/// Public inputs used for cyclic verification. These aren't actually used for ZKVM root
/// proofs; the circuit has them just to match the structure of aggregation proofs.
cyclic_vk: VerifierCircuitTarget,
}
impl<F, C, const D: usize> RootCircuitData<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<()> {
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
for proof in &self.proof_with_pis {
buffer.write_target_proof_with_public_inputs(proof)?;
}
for index in self.index_verifier_data {
buffer.write_target(index)?;
}
self.public_values.to_buffer(buffer)?;
buffer.write_target_verifier_circuit(&self.cyclic_vk)?;
Ok(())
}
pub fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<Self> {
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
let mut proof_with_pis = Vec::with_capacity(NUM_TABLES);
for _ in 0..NUM_TABLES {
proof_with_pis.push(buffer.read_target_proof_with_public_inputs()?);
}
let mut index_verifier_data = Vec::with_capacity(NUM_TABLES);
for _ in 0..NUM_TABLES {
index_verifier_data.push(buffer.read_target()?);
}
let public_values = PublicValuesTarget::from_buffer(buffer)?;
let cyclic_vk = buffer.read_target_verifier_circuit()?;
Ok(Self {
circuit,
proof_with_pis: proof_with_pis.try_into().unwrap(),
index_verifier_data: index_verifier_data.try_into().unwrap(),
public_values,
cyclic_vk,
})
}
}
/// Data for the aggregation circuit, which is used to compress two proofs into one. Each inner
/// proof can be either an ZKVM root proof or another aggregation proof.
#[derive(Eq, PartialEq, Debug)]
pub struct AggregationCircuitData<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub circuit: CircuitData<F, C, D>,
lhs: AggregationChildTarget<D>,
rhs: AggregationChildTarget<D>,
public_values: PublicValuesTarget,
cyclic_vk: VerifierCircuitTarget,
}
impl<F, C, const D: usize> AggregationCircuitData<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<()> {
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
buffer.write_target_verifier_circuit(&self.cyclic_vk)?;
self.public_values.to_buffer(buffer)?;
self.lhs.to_buffer(buffer)?;
self.rhs.to_buffer(buffer)?;
Ok(())
}
pub fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<Self> {
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
let cyclic_vk = buffer.read_target_verifier_circuit()?;
let public_values = PublicValuesTarget::from_buffer(buffer)?;
let lhs = AggregationChildTarget::from_buffer(buffer)?;
let rhs = AggregationChildTarget::from_buffer(buffer)?;
Ok(Self {
circuit,
lhs,
rhs,
public_values,
cyclic_vk,
})
}
}
#[derive(Eq, PartialEq, Debug)]
pub struct AggregationChildTarget<const D: usize> {
is_agg: BoolTarget,
agg_proof: ProofWithPublicInputsTarget<D>,
evm_proof: ProofWithPublicInputsTarget<D>,
}
impl<const D: usize> AggregationChildTarget<D> {
pub fn to_buffer(&self, buffer: &mut Vec<u8>) -> IoResult<()> {
buffer.write_target_bool(self.is_agg)?;
buffer.write_target_proof_with_public_inputs(&self.agg_proof)?;
buffer.write_target_proof_with_public_inputs(&self.evm_proof)?;
Ok(())
}
pub fn from_buffer(buffer: &mut Buffer) -> IoResult<Self> {
let is_agg = buffer.read_target_bool()?;
let agg_proof = buffer.read_target_proof_with_public_inputs()?;
let evm_proof = buffer.read_target_proof_with_public_inputs()?;
Ok(Self {
is_agg,
agg_proof,
evm_proof,
})
}
pub fn public_values<F: RichField + Extendable<D>>(
&self,
builder: &mut CircuitBuilder<F, D>,
) -> PublicValuesTarget {
let agg_pv = PublicValuesTarget::from_public_inputs(&self.agg_proof.public_inputs);
let evm_pv = PublicValuesTarget::from_public_inputs(&self.evm_proof.public_inputs);
PublicValuesTarget::select(builder, self.is_agg, agg_pv, evm_pv)
}
}
#[derive(Eq, PartialEq, Debug)]
pub struct BlockCircuitData<F, C, const D: usize>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub circuit: CircuitData<F, C, D>,
has_parent_block: BoolTarget,
parent_block_proof: ProofWithPublicInputsTarget<D>,
agg_root_proof: ProofWithPublicInputsTarget<D>,
public_values: PublicValuesTarget,
cyclic_vk: VerifierCircuitTarget,
}
impl<F, C, const D: usize> BlockCircuitData<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
{
pub fn to_buffer(
&self,
buffer: &mut Vec<u8>,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<()> {
buffer.write_circuit_data(&self.circuit, gate_serializer, generator_serializer)?;
buffer.write_target_bool(self.has_parent_block)?;
buffer.write_target_proof_with_public_inputs(&self.parent_block_proof)?;
buffer.write_target_proof_with_public_inputs(&self.agg_root_proof)?;
self.public_values.to_buffer(buffer)?;
buffer.write_target_verifier_circuit(&self.cyclic_vk)?;
Ok(())
}
pub fn from_buffer(
buffer: &mut Buffer,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<Self> {
let circuit = buffer.read_circuit_data(gate_serializer, generator_serializer)?;
let has_parent_block = buffer.read_target_bool()?;
let parent_block_proof = buffer.read_target_proof_with_public_inputs()?;
let agg_root_proof = buffer.read_target_proof_with_public_inputs()?;
let public_values = PublicValuesTarget::from_buffer(buffer)?;
let cyclic_vk = buffer.read_target_verifier_circuit()?;
Ok(Self {
circuit,
has_parent_block,
parent_block_proof,
agg_root_proof,
public_values,
cyclic_vk,
})
}
}
impl<F, C, const D: usize> AllRecursiveCircuits<F, C, D>
where
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F> + 'static,
C::Hasher: AlgebraicHasher<F>,
{
pub fn to_bytes(
&self,
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<Vec<u8>> {
// TODO: would be better to initialize it dynamically based on the supported max degree.
let mut buffer = Vec::with_capacity(1 << 34);
self.root
.to_buffer(&mut buffer, gate_serializer, generator_serializer)?;
self.aggregation
.to_buffer(&mut buffer, gate_serializer, generator_serializer)?;
self.block
.to_buffer(&mut buffer, gate_serializer, generator_serializer)?;
for table in &self.by_table {
table.to_buffer(&mut buffer, gate_serializer, generator_serializer)?;
}
Ok(buffer)
}
#[allow(clippy::missing_transmute_annotations)]
pub fn from_bytes(
bytes: &[u8],
gate_serializer: &dyn GateSerializer<F, D>,
generator_serializer: &dyn WitnessGeneratorSerializer<F, D>,
) -> IoResult<Self> {
let mut buffer = Buffer::new(bytes);
let root =
RootCircuitData::from_buffer(&mut buffer, gate_serializer, generator_serializer)?;
let aggregation = AggregationCircuitData::from_buffer(
&mut buffer,
gate_serializer,
generator_serializer,
)?;
let block =
BlockCircuitData::from_buffer(&mut buffer, gate_serializer, generator_serializer)?;
// Tricky use of MaybeUninit to remove the need for implementing Debug
// for all underlying types, necessary to convert a by_table Vec to an array.
let by_table = {
let mut by_table: [MaybeUninit<RecursiveCircuitsForTable<F, C, D>>; NUM_TABLES] =
unsafe { MaybeUninit::uninit().assume_init() };
for table in &mut by_table[..] {
let value = RecursiveCircuitsForTable::from_buffer(
&mut buffer,
gate_serializer,
generator_serializer,
)?;
*table = MaybeUninit::new(value);
}
unsafe {
#[allow(clippy::missing_transmute_annotations)]
mem::transmute::<_, [RecursiveCircuitsForTable<F, C, D>; NUM_TABLES]>(by_table)
}
};
Ok(Self {
root,
aggregation,
block,
by_table,
})
}
/// Preprocess all recursive circuits used by the system.
pub fn new(
all_stark: &AllStark<F, D>,
degree_bits_ranges: &[Range<usize>; NUM_TABLES],
stark_config: &StarkConfig,
) -> Self {
let arithmetic = RecursiveCircuitsForTable::new(
Table::Arithmetic,
&all_stark.arithmetic_stark,
degree_bits_ranges[Table::Arithmetic as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let cpu = RecursiveCircuitsForTable::new(
Table::Cpu,
&all_stark.cpu_stark,
degree_bits_ranges[Table::Cpu as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let poseidon = RecursiveCircuitsForTable::new(
Table::Poseidon,
&all_stark.poseidon_stark,
degree_bits_ranges[Table::Poseidon as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let poseidon_sponge = RecursiveCircuitsForTable::new(
Table::PoseidonSponge,
&all_stark.poseidon_sponge_stark,
degree_bits_ranges[Table::PoseidonSponge as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let keccak = RecursiveCircuitsForTable::new(
Table::Keccak,
&all_stark.keccak_stark,
degree_bits_ranges[Table::Keccak as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let keccak_sponge = RecursiveCircuitsForTable::new(
Table::KeccakSponge,
&all_stark.keccak_sponge_stark,
degree_bits_ranges[Table::KeccakSponge as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let sha_extend = RecursiveCircuitsForTable::new(
Table::ShaExtend,
&all_stark.sha_extend_stark,
degree_bits_ranges[Table::ShaExtend as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let sha_extend_sponge = RecursiveCircuitsForTable::new(
Table::ShaExtendSponge,
&all_stark.sha_extend_sponge_stark,
degree_bits_ranges[Table::ShaExtendSponge as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let sha_compress = RecursiveCircuitsForTable::new(
Table::ShaCompress,
&all_stark.sha_compress_stark,
degree_bits_ranges[Table::ShaCompress as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let sha_compress_sponge = RecursiveCircuitsForTable::new(
Table::ShaCompressSponge,
&all_stark.sha_compress_sponge_stark,
degree_bits_ranges[Table::ShaCompressSponge as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let logic = RecursiveCircuitsForTable::new(
Table::Logic,
&all_stark.logic_stark,
degree_bits_ranges[Table::Logic as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let memory = RecursiveCircuitsForTable::new(
Table::Memory,
&all_stark.memory_stark,
degree_bits_ranges[Table::Memory as usize].clone(),
&all_stark.cross_table_lookups,
stark_config,
);
let by_table = [
arithmetic,
cpu,
poseidon,
poseidon_sponge,
keccak,
keccak_sponge,
sha_extend,
sha_extend_sponge,
sha_compress,
sha_compress_sponge,
logic,
memory,
];
let root = Self::create_root_circuit(&by_table, stark_config);
let aggregation = Self::create_aggregation_circuit(&root);
let block = Self::create_block_circuit(&aggregation);
Self {
root,
aggregation,
block,
by_table,
}
}
fn create_root_circuit(
by_table: &[RecursiveCircuitsForTable<F, C, D>; NUM_TABLES],
stark_config: &StarkConfig,
) -> RootCircuitData<F, C, D> {
let inner_common_data: [_; NUM_TABLES] =
core::array::from_fn(|i| &by_table[i].final_circuits()[0].common);
let mut builder = CircuitBuilder::new(CircuitConfig::standard_recursion_config());
let public_values = add_virtual_public_values(&mut builder);
let recursive_proofs =
core::array::from_fn(|i| builder.add_virtual_proof_with_pis(inner_common_data[i]));
let pis: [_; NUM_TABLES] = core::array::from_fn(|i| {
PublicInputs::<Target, <C::Hasher as AlgebraicHasher<F>>::AlgebraicPermutation>::from_vec(
&recursive_proofs[i].public_inputs,
stark_config,
)
});
let index_verifier_data = core::array::from_fn(|_i| builder.add_virtual_target());
let mut challenger = RecursiveChallenger::<F, C::Hasher, D>::new(&mut builder);
for pi in &pis {
for h in &pi.trace_cap {
challenger.observe_elements(h);
}
}
observe_public_values_target::<F, C, D>(&mut challenger, &public_values);
let ctl_challenges = get_grand_product_challenge_set_target(
&mut builder,
&mut challenger,
stark_config.num_challenges,
);
// Check that the correct CTL challenges are used in every proof.
for pi in &pis {
for i in 0..stark_config.num_challenges {
builder.connect(
ctl_challenges.challenges[i].beta,
pi.ctl_challenges.challenges[i].beta,
);
builder.connect(
ctl_challenges.challenges[i].gamma,
pi.ctl_challenges.challenges[i].gamma,
);
}
}
let state = challenger.compact(&mut builder);
for (&before, &s) in zip_eq(state.as_ref(), pis[0].challenger_state_before.as_ref()) {
builder.connect(before, s);
}
// Check that the challenger state is consistent between proofs.
for i in 1..NUM_TABLES {
for (&before, &after) in zip_eq(
pis[i].challenger_state_before.as_ref(),
pis[i - 1].challenger_state_after.as_ref(),
) {
builder.connect(before, after);
}
}
// Verify the CTL checks.
verify_cross_table_lookups_circuit::<F, D>(
&mut builder,
all_cross_table_lookups(),
pis.map(|p| p.ctl_zs_first),
stark_config,
);
for (i, table_circuits) in by_table.iter().enumerate() {
let final_circuits = table_circuits.final_circuits();
for final_circuit in &final_circuits {
assert_eq!(
&final_circuit.common, inner_common_data[i],
"common_data mismatch"
);
}
let mut possible_vks = final_circuits
.into_iter()
.map(|c| builder.constant_verifier_data(&c.verifier_only))
.collect_vec();
// random_access_verifier_data expects a vector whose length is a power of two.
// To satisfy this, we will just add some duplicates of the first VK.
while !possible_vks.len().is_power_of_two() {
possible_vks.push(possible_vks[0].clone());
}
let inner_verifier_data =
builder.random_access_verifier_data(index_verifier_data[i], possible_vks);
builder.verify_proof::<C>(
&recursive_proofs[i],
&inner_verifier_data,
inner_common_data[i],
);
}
// We want ZKVM root proofs to have the exact same structure as aggregation proofs, so we add
// public inputs for cyclic verification, even though they'll be ignored.
let cyclic_vk = builder.add_verifier_data_public_inputs();
builder.add_gate(
ConstantGate::new(inner_common_data[0].config.num_constants),
vec![],
);
RootCircuitData {
circuit: builder.build::<C>(),
proof_with_pis: recursive_proofs,
index_verifier_data,
public_values,
cyclic_vk,
}
}
fn create_aggregation_circuit(
root: &RootCircuitData<F, C, D>,
) -> AggregationCircuitData<F, C, D> {
let mut builder = CircuitBuilder::<F, D>::new(root.circuit.common.config.clone());
let public_values = add_virtual_public_values(&mut builder);
let cyclic_vk = builder.add_verifier_data_public_inputs();
let lhs = Self::add_agg_child(&mut builder, root);
let rhs = Self::add_agg_child(&mut builder, root);
let lhs_public_values = lhs.public_values(&mut builder);
let rhs_public_values = rhs.public_values(&mut builder);
// Connect aggregation `trie_roots_before` with lhs `trie_roots_before`.
MemRootsTarget::connect(
&mut builder,
public_values.roots_before,
lhs_public_values.roots_before,
);
// Connect aggregation `trie_roots_after` with rhs `trie_roots_after`.
MemRootsTarget::connect(
&mut builder,
public_values.roots_after,
rhs_public_values.roots_after,
);
// Connect lhs `trie_roots_after` with rhs `trie_roots_before`.
MemRootsTarget::connect(
&mut builder,
lhs_public_values.roots_after,
rhs_public_values.roots_before,
);
// Connect agg `userdata` with lhs `userdata`.
for (limb0, limb1) in public_values
.userdata
.iter()
.zip_eq(&lhs_public_values.userdata)
{
builder.connect(*limb0, *limb1);
}
// Connect agg `userdata` with rhs `userdata`.
for (limb0, limb1) in public_values
.userdata
.iter()
.zip_eq(&rhs_public_values.userdata)
{
builder.connect(*limb0, *limb1);
}
// Pad to match the root circuit's degree.
while log2_ceil(builder.num_gates()) < root.circuit.common.degree_bits() {
builder.add_gate(NoopGate, vec![]);
}
let circuit = builder.build::<C>();
AggregationCircuitData {
circuit,
lhs,
rhs,
public_values,
cyclic_vk,
}
}
fn add_agg_child(
builder: &mut CircuitBuilder<F, D>,
root: &RootCircuitData<F, C, D>,
) -> AggregationChildTarget<D> {
let common = &root.circuit.common;
let root_vk = builder.constant_verifier_data(&root.circuit.verifier_only);
let is_agg = builder.add_virtual_bool_target_safe();
let agg_proof = builder.add_virtual_proof_with_pis(common);
let evm_proof = builder.add_virtual_proof_with_pis(common);
builder
.conditionally_verify_cyclic_proof::<C>(
is_agg, &agg_proof, &evm_proof, &root_vk, common,
)
.expect("Failed to build cyclic recursion circuit");
AggregationChildTarget {
is_agg,
agg_proof,
evm_proof,
}
}
fn create_block_circuit(agg: &AggregationCircuitData<F, C, D>) -> BlockCircuitData<F, C, D> {
// The block circuit is similar to the agg circuit; both verify two inner proofs.
// We need to adjust a few things, but it's easier than making a new CommonCircuitData.
let expected_common_data = CommonCircuitData {
fri_params: FriParams {
degree_bits: 14,
..agg.circuit.common.fri_params.clone()
},
..agg.circuit.common.clone()
};
let mut builder = CircuitBuilder::<F, D>::new(CircuitConfig::standard_recursion_config());
let public_values = add_virtual_public_values(&mut builder);
let has_parent_block = builder.add_virtual_bool_target_safe();
let parent_block_proof = builder.add_virtual_proof_with_pis(&expected_common_data);
let agg_root_proof = builder.add_virtual_proof_with_pis(&agg.circuit.common);
let parent_pv = PublicValuesTarget::from_public_inputs(&parent_block_proof.public_inputs);
let agg_pv = PublicValuesTarget::from_public_inputs(&agg_root_proof.public_inputs);
// Connect block `trie_roots_before` with parent_pv `trie_roots_before`.
MemRootsTarget::connect(
&mut builder,
public_values.roots_before,
parent_pv.roots_before,
);
// Connect the rest of block `public_values` with agg_pv.
MemRootsTarget::connect(&mut builder, public_values.roots_after, agg_pv.roots_after);
// Make connections between block proofs, and check initial and final block values.
Self::connect_block_proof(&mut builder, has_parent_block, &parent_pv, &agg_pv);
for (&limb0, &limb1) in parent_pv.userdata.iter().zip_eq(&agg_pv.userdata) {
builder.connect(limb0, limb1);
}
let cyclic_vk = builder.add_verifier_data_public_inputs();
builder
.conditionally_verify_cyclic_proof_or_dummy::<C>(
has_parent_block,
&parent_block_proof,
&expected_common_data,
)
.expect("Failed to build cyclic recursion circuit");
let agg_verifier_data = builder.constant_verifier_data(&agg.circuit.verifier_only);
builder.verify_proof::<C>(&agg_root_proof, &agg_verifier_data, &agg.circuit.common);
let circuit = builder.build::<C>();
BlockCircuitData {
circuit,
has_parent_block,
parent_block_proof,
agg_root_proof,
public_values,
cyclic_vk,
}
}
fn connect_block_proof(
builder: &mut CircuitBuilder<F, D>,
_has_parent_block: BoolTarget,
lhs: &PublicValuesTarget,
rhs: &PublicValuesTarget,
) {
// Between blocks, we only connect state tries and userdata.
for (&limb0, limb1) in lhs.roots_after.root.iter().zip(rhs.roots_before.root) {
builder.connect(limb0, limb1);
}
}
/// Create a proof for each STARK, then combine them, eventually culminating in a root proof.
pub fn prove_root(
&self,
all_stark: &AllStark<F, D>,
kernel: &Kernel,
config: &StarkConfig,
timing: &mut TimingTree,
) -> anyhow::Result<Receipt<F, C, D>> {
let (all_proof, output) = prove_with_outputs::<F, C, D>(all_stark, kernel, config, timing)?;
verify_proof(all_stark, all_proof.clone(), config).unwrap();
let mut root_inputs = PartialWitness::new();
for table in 0..NUM_TABLES {
let stark_proof = &all_proof.stark_proofs[table];
let original_degree_bits = stark_proof.proof.recover_degree_bits(config);
let table_circuits = &self.by_table[table];
let shrunk_proof = table_circuits
.by_stark_size
.get(&original_degree_bits)
.ok_or_else(|| {
anyhow::Error::msg(format!(
"Missing preprocessed circuits for {:?} table with size {}. To set it, run: export {}=\"{}..{}\" ",
Table::all()[table],
original_degree_bits,
RANGE_TABLES[table],
original_degree_bits,
original_degree_bits + 1,
))
})?
.shrink(stark_proof, &all_proof.ctl_challenges)?;
let index_verifier_data = table_circuits
.by_stark_size
.keys()
.position(|&size| size == original_degree_bits)
.unwrap();
root_inputs.set_target(
self.root.index_verifier_data[table],
F::from_canonical_usize(index_verifier_data),
);
root_inputs.set_proof_with_pis_target(&self.root.proof_with_pis[table], &shrunk_proof);
}
root_inputs.set_verifier_data_target(
&self.root.cyclic_vk,
&self.aggregation.circuit.verifier_only,
);
set_public_value_targets(
&mut root_inputs,
&self.root.public_values,
&all_proof.public_values,
)
.map_err(|_| {
anyhow::Error::msg("Invalid conversion when setting public values targets.")
})?;
let root_proof = self.root.circuit.prove(root_inputs)?;
Ok(Receipt::Segments(InnerReceipt {
proof: root_proof,
values: all_proof.public_values.clone(),
claim: ReceiptClaim {
elf_id: u32_array_to_u8_vec(&all_proof.public_values.roots_before.root),
commit: output.output.clone(),
},
}))
}
pub fn prove_root_with_assumption(
&self,
all_stark: &AllStark<F, D>,
kernel: &Kernel,
config: &StarkConfig,
timing: &mut TimingTree,
assumptions: AssumptionReceipts<F, C, D>,
) -> anyhow::Result<Receipt<F, C, D>> {
if assumptions.is_empty() {
return self.prove_root(all_stark, kernel, config, timing);
}
let (all_proof, output, assumption_used) = prove_with_output_and_assumptions::<F, C, D>(
all_stark,
kernel,
config,
timing,
assumptions,
)?;
verify_proof(all_stark, all_proof.clone(), config).unwrap();
let mut root_inputs = PartialWitness::new();
for table in 0..NUM_TABLES {
let stark_proof = &all_proof.stark_proofs[table];
let original_degree_bits = stark_proof.proof.recover_degree_bits(config);
let table_circuits = &self.by_table[table];
let shrunk_proof = table_circuits
.by_stark_size
.get(&original_degree_bits)
.ok_or_else(|| {
anyhow::Error::msg(format!(
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | true |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend/sha_extend_stark.rs | prover/src/sha_extend/sha_extend_stark.rs | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, Filter};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::sha_extend::columns::{
ShaExtendColumnsView, NUM_SHA_EXTEND_COLUMNS, SHA_EXTEND_COL_MAP,
};
use crate::sha_extend::logic::get_input_range_4;
use crate::sha_extend::rotate_right::{
rotate_right_ext_circuit_constraint, rotate_right_packed_constraints,
};
use crate::sha_extend::shift_right::{
shift_right_ext_circuit_constraints, shift_right_packed_constraints,
};
use crate::sha_extend::wrapping_add_4::{
wrapping_add_4_ext_circuit_constraints, wrapping_add_4_packed_constraints,
};
use crate::stark::Stark;
use crate::util::trace_rows_to_poly_values;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use std::borrow::Borrow;
use std::marker::PhantomData;
pub const NUM_INPUTS: usize = 4 * 4; // w_i_minus_15, w_i_minus_2, w_i_minus_16, w_i_minus_7
pub fn ctl_data_inputs<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_COL_MAP;
let mut res: Vec<_> = Column::singles(
[
cols.w_i_minus_15.as_slice(),
cols.w_i_minus_2.as_slice(),
cols.w_i_minus_16.as_slice(),
cols.w_i_minus_7.as_slice(),
]
.concat(),
)
.collect();
res.push(Column::single(cols.timestamp));
res
}
pub fn ctl_data_outputs<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_COL_MAP;
let mut res: Vec<_> = Column::singles(&cols.w_i.value).collect();
res.push(Column::single(cols.timestamp));
res
}
pub(crate) fn ctl_s_0_inter_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
// Input 0
res.push(Column::le_bytes(cols.w_i_minus_15_rr_7.value));
// Input 1
res.push(Column::le_bytes(cols.w_i_minus_15_rr_18.value));
// The output
res.push(Column::le_bytes(cols.s_0_inter));
res
}
pub(crate) fn ctl_s_0_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
// Input 0
res.push(Column::le_bytes(cols.s_0_inter));
// Input 1
res.push(Column::le_bytes(cols.w_i_minus_15_rs_3.value));
// The output
res.push(Column::le_bytes(cols.s_0));
res
}
pub(crate) fn ctl_s_1_inter_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
// Input 0
res.push(Column::le_bytes(cols.w_i_minus_2_rr_17.value));
// Input 1
res.push(Column::le_bytes(cols.w_i_minus_2_rr_19.value));
// The output
res.push(Column::le_bytes(cols.s_1_inter));
res
}
pub(crate) fn ctl_s_1_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
// Input 0
res.push(Column::le_bytes(cols.s_1_inter));
// Input 1
res.push(Column::le_bytes(cols.w_i_minus_2_rs_10.value));
// The output
res.push(Column::le_bytes(cols.s_1));
res
}
pub fn ctl_filter<F: Field>() -> Filter<F> {
let cols = SHA_EXTEND_COL_MAP;
// not the padding rows.
Filter::new_simple(Column::single(cols.is_real_round))
}
#[derive(Copy, Clone, Default)]
pub struct ShaExtendStark<F, const D: usize> {
pub(crate) f: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> ShaExtendStark<F, D> {
pub(crate) fn generate_trace(
&self,
inputs_and_timestamps: Vec<([u8; NUM_INPUTS], usize)>,
min_rows: usize,
) -> Vec<PolynomialValues<F>> {
// Generate the witness row-wise
let trace_rows = self.generate_trace_rows(inputs_and_timestamps, min_rows);
trace_rows_to_poly_values(trace_rows)
}
fn generate_trace_rows(
&self,
inputs_and_timestamps: Vec<([u8; NUM_INPUTS], usize)>,
min_rows: usize,
) -> Vec<[F; NUM_SHA_EXTEND_COLUMNS]> {
let num_rows = inputs_and_timestamps
.len()
.max(min_rows)
.next_power_of_two();
let mut rows = Vec::with_capacity(num_rows);
for input_and_timestamp in inputs_and_timestamps.iter() {
let rows_for_extend = self.generate_trace_row_for_extend(*input_and_timestamp);
rows.push(rows_for_extend.into());
}
// padding
while rows.len() < num_rows {
rows.push([F::ZERO; NUM_SHA_EXTEND_COLUMNS]);
}
rows
}
fn generate_trace_row_for_extend(
&self,
input_and_timestamp: ([u8; NUM_INPUTS], usize),
) -> ShaExtendColumnsView<F> {
let mut row = ShaExtendColumnsView::default();
row.timestamp = F::from_canonical_usize(input_and_timestamp.1);
let w_i_minus_15: [u8; 4] = input_and_timestamp.0[get_input_range_4(0)]
.try_into()
.unwrap();
let w_i_minus_2: [u8; 4] = input_and_timestamp.0[get_input_range_4(1)]
.try_into()
.unwrap();
let w_i_minus_16: [u8; 4] = input_and_timestamp.0[get_input_range_4(2)]
.try_into()
.unwrap();
let w_i_minus_7: [u8; 4] = input_and_timestamp.0[get_input_range_4(3)]
.try_into()
.unwrap();
row.w_i_minus_15 = w_i_minus_15
.iter()
.map(|&x| F::from_canonical_u8(x))
.collect::<Vec<_>>()
.try_into()
.unwrap();
row.w_i_minus_2 = w_i_minus_2
.iter()
.map(|&x| F::from_canonical_u8(x))
.collect::<Vec<_>>()
.try_into()
.unwrap();
row.w_i_minus_16 = w_i_minus_16
.iter()
.map(|&x| F::from_canonical_u8(x))
.collect::<Vec<_>>()
.try_into()
.unwrap();
row.w_i_minus_7 = w_i_minus_7
.iter()
.map(|&x| F::from_canonical_u8(x))
.collect::<Vec<_>>()
.try_into()
.unwrap();
row.is_real_round = F::ONE;
let w_i_minus_15_rr_7 = row.w_i_minus_15_rr_7.generate_trace(w_i_minus_15, 7);
let w_i_minus_15_rr_18 = row.w_i_minus_15_rr_18.generate_trace(w_i_minus_15, 18);
let w_i_minus_15_rs_3 = row.w_i_minus_15_rs_3.generate_trace(w_i_minus_15, 3);
// s0_inter = (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18)
let s_0_inter = w_i_minus_15_rr_7 ^ w_i_minus_15_rr_18;
row.s_0_inter = s_0_inter.to_le_bytes().map(F::from_canonical_u8);
// s0 := (w[i-15] rightrotate 7) xor (w[i-15] rightrotate 18) xor (w[i-15] rightshift 3)
let s_0 = s_0_inter ^ w_i_minus_15_rs_3;
row.s_0 = s_0.to_le_bytes().map(F::from_canonical_u8);
let w_i_minus_2_rr_17 = row.w_i_minus_2_rr_17.generate_trace(w_i_minus_2, 17);
let w_i_minus_2_rr_19 = row.w_i_minus_2_rr_19.generate_trace(w_i_minus_2, 19);
let w_i_minus_2_rs_10 = row.w_i_minus_2_rs_10.generate_trace(w_i_minus_2, 10);
// s1_inter = (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19)
let s_1_inter = w_i_minus_2_rr_17 ^ w_i_minus_2_rr_19;
row.s_1_inter = s_1_inter.to_le_bytes().map(F::from_canonical_u8);
// s1 := (w[i-2] rightrotate 17) xor (w[i-2] rightrotate 19) xor (w[i-2] rightshift 10)
let s_1 = s_1_inter ^ w_i_minus_2_rs_10;
row.s_1 = s_1.to_le_bytes().map(F::from_canonical_u8);
let _ = row.w_i.generate_trace(
s_1,
u32::from_le_bytes(w_i_minus_7),
s_0,
u32::from_le_bytes(w_i_minus_16),
);
row
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ShaExtendStark<F, D> {
type EvaluationFrame<FE, P, const D2: usize>
= StarkFrame<P, NUM_SHA_EXTEND_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_SHA_EXTEND_COLUMNS>;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let local_values: &[P; NUM_SHA_EXTEND_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &ShaExtendColumnsView<P> = local_values.borrow();
// check the rotation
rotate_right_packed_constraints(
local_values.w_i_minus_15,
&local_values.w_i_minus_15_rr_7,
7,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
rotate_right_packed_constraints(
local_values.w_i_minus_15,
&local_values.w_i_minus_15_rr_18,
18,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
rotate_right_packed_constraints(
local_values.w_i_minus_2,
&local_values.w_i_minus_2_rr_17,
17,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
rotate_right_packed_constraints(
local_values.w_i_minus_2,
&local_values.w_i_minus_2_rr_19,
19,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
// check the shift
shift_right_packed_constraints(
local_values.w_i_minus_15,
&local_values.w_i_minus_15_rs_3,
3,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
shift_right_packed_constraints(
local_values.w_i_minus_2,
&local_values.w_i_minus_2_rs_10,
10,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
// the XOR ops in s_0, s_1 computations are constrained in logic table.
// check the wrapping add: w_i = s_1 + w_i_minus_7 + s_0 + w_i_minus_16
wrapping_add_4_packed_constraints(
local_values.s_1,
local_values.w_i_minus_7,
local_values.s_0,
local_values.w_i_minus_16,
&local_values.w_i,
)
.into_iter()
.for_each(|c| {
let constraint = c * local_values.is_real_round;
yield_constr.constraint(constraint)
});
}
fn eval_ext_circuit(
&self,
builder: &mut CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let local_values: &[ExtensionTarget<D>; NUM_SHA_EXTEND_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &ShaExtendColumnsView<ExtensionTarget<D>> = local_values.borrow();
// check the rotation
rotate_right_ext_circuit_constraint(
builder,
local_values.w_i_minus_15,
&local_values.w_i_minus_15_rr_7,
7,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
rotate_right_ext_circuit_constraint(
builder,
local_values.w_i_minus_15,
&local_values.w_i_minus_15_rr_18,
18,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
rotate_right_ext_circuit_constraint(
builder,
local_values.w_i_minus_2,
&local_values.w_i_minus_2_rr_17,
17,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
rotate_right_ext_circuit_constraint(
builder,
local_values.w_i_minus_2,
&local_values.w_i_minus_2_rr_19,
19,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
// check the shift
shift_right_ext_circuit_constraints(
builder,
local_values.w_i_minus_15,
&local_values.w_i_minus_15_rs_3,
3,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
shift_right_ext_circuit_constraints(
builder,
local_values.w_i_minus_2,
&local_values.w_i_minus_2_rs_10,
10,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
// the XOR ops in s_0, s_1 computations are constrainted in logic table.
// check the wrapping add: w_i = s_1 + w_i_minus_7 + s_0 + w_i_minus_16
wrapping_add_4_ext_circuit_constraints(
builder,
local_values.s_1,
local_values.w_i_minus_7,
local_values.s_0,
local_values.w_i_minus_16,
&local_values.w_i,
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(c, local_values.is_real_round);
yield_constr.constraint(builder, constraint)
});
}
fn constraint_degree(&self) -> usize {
3
}
}
#[cfg(test)]
mod test {
use crate::config::StarkConfig;
use crate::cross_table_lookup::{
Column, CtlData, CtlZData, Filter, GrandProductChallenge, GrandProductChallengeSet,
};
use crate::prover::prove_single_table;
use crate::sha_extend::sha_extend_stark::ShaExtendStark;
use crate::sha_extend_sponge::columns::NUM_EXTEND_INPUT;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::fri::oracle::PolynomialBatch;
use plonky2::iop::challenger::Challenger;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use plonky2::timed;
use plonky2::util::timing::TimingTree;
fn get_random_input() -> [u8; NUM_EXTEND_INPUT * 4] {
let mut input_values = vec![];
let rand = rand::random::<u32>();
input_values.push(rand.to_le_bytes());
input_values.push((rand + 1).to_le_bytes());
input_values.push((rand + 2).to_le_bytes());
input_values.push((rand + 3).to_le_bytes());
let input_values = input_values.into_iter().flatten().collect::<Vec<_>>();
input_values.try_into().unwrap()
}
#[test]
fn test_correction() -> Result<(), String> {
const D: usize = 2;
type F = GoldilocksField;
type S = ShaExtendStark<F, D>;
let mut input_values = vec![];
input_values.extend((0..4).map(|i| (i as u32).to_le_bytes()));
let input_values = input_values.into_iter().flatten().collect::<Vec<_>>();
let input_values: [u8; 16] = input_values.try_into().unwrap();
let input_and_timestamp = (input_values, 0);
let stark = S::default();
let row = stark.generate_trace_row_for_extend(input_and_timestamp);
// extend phase
let w_i_minus_15 = 0_u32;
let s0 = w_i_minus_15.rotate_right(7) ^ w_i_minus_15.rotate_right(18) ^ (w_i_minus_15 >> 3);
let w_i_minus_2 = 1_u32;
// Compute `s1`.
let s1 = w_i_minus_2.rotate_right(17) ^ w_i_minus_2.rotate_right(19) ^ (w_i_minus_2 >> 10);
let w_i_minus_16 = 2_u32;
let w_i_minus_7 = 3_u32;
// Compute `w_i`.
let w_i = s1
.wrapping_add(w_i_minus_16)
.wrapping_add(s0)
.wrapping_add(w_i_minus_7);
let w_i_bin = w_i.to_le_bytes();
assert_eq!(row.w_i.value, w_i_bin.map(F::from_canonical_u8));
Ok(())
}
#[test]
fn test_stark_degree() -> anyhow::Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ShaExtendStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_low_degree(stark)
}
#[test]
fn test_stark_circuit() -> anyhow::Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ShaExtendStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
#[test]
fn sha_extend_benchmark() -> anyhow::Result<()> {
const NUM_EXTEND: usize = 48;
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ShaExtendStark<F, D>;
let stark = S::default();
let config = StarkConfig::standard_fast_config();
init_logger();
let input: Vec<([u8; NUM_EXTEND_INPUT * 4], usize)> =
(0..NUM_EXTEND).map(|_| (get_random_input(), 0)).collect();
let mut timing = TimingTree::new("prove", log::Level::Debug);
let trace_poly_values = stark.generate_trace(input, 8);
// TODO: Cloning this isn't great; consider having `from_values` accept a reference,
// or having `compute_permutation_z_polys` read trace values from the `PolynomialBatch`.
let cloned_trace_poly_values = timed!(timing, "clone", trace_poly_values.clone());
let trace_commitments = timed!(
timing,
"compute trace commitment",
PolynomialBatch::<F, C, D>::from_values(
cloned_trace_poly_values,
config.fri_config.rate_bits,
false,
config.fri_config.cap_height,
&mut timing,
None,
)
);
let degree = 1 << trace_commitments.degree_log;
// Fake CTL data.
let ctl_z_data = CtlZData {
helper_columns: vec![PolynomialValues::zero(degree)],
z: PolynomialValues::zero(degree),
challenge: GrandProductChallenge {
beta: F::ZERO,
gamma: F::ZERO,
},
columns: vec![],
filter: vec![Some(Filter::new_simple(Column::constant(F::ZERO)))],
};
let ctl_data = CtlData {
zs_columns: vec![ctl_z_data.clone(); config.num_challenges],
};
prove_single_table(
&stark,
&config,
&trace_poly_values,
&trace_commitments,
&ctl_data,
&GrandProductChallengeSet {
challenges: vec![ctl_z_data.challenge; config.num_challenges],
},
&mut Challenger::new(),
&mut timing,
)?;
timing.print();
Ok(())
}
fn init_logger() {
let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "debug"));
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend/rotate_right.rs | prover/src/sha_extend/rotate_right.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
pub struct RotateRightOp<T: Copy> {
pub value: [T; 4],
pub shift: T,
pub carry: T,
}
impl<F: Field> RotateRightOp<F> {
pub fn generate_trace(&mut self, le_input_bytes: [u8; 4], rotation: usize) -> u32 {
let input_u32 = u32::from_le_bytes(le_input_bytes);
let rotation_u32 = (rotation % 32) as u32;
let expected = input_u32.rotate_right(rotation_u32);
let (shift, carry) = shr_carry(input_u32, rotation_u32);
self.shift = F::from_canonical_u32(shift);
self.carry = F::from_canonical_u32(carry);
self.value = expected.to_le_bytes().map(F::from_canonical_u8);
expected
}
}
pub(crate) fn rotate_right_packed_constraints<P: PackedField>(
input_bytes: [P; 4],
rotated_value: &RotateRightOp<P>,
rotation: usize,
) -> Vec<P> {
let mut result = Vec::new();
let rotation_u32 = (rotation % 32) as u32;
let two_pow_8 = P::from(P::Scalar::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = P::from(P::Scalar::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = P::from(P::Scalar::from_canonical_u32(2u32.pow(24)));
let rotated_value_from_bytes = rotated_value.value[0]
+ two_pow_8 * rotated_value.value[1]
+ two_pow_16 * rotated_value.value[2]
+ two_pow_24 * rotated_value.value[3];
let input_value_from_bytes = input_bytes[0]
+ two_pow_8 * input_bytes[1]
+ two_pow_16 * input_bytes[2]
+ two_pow_24 * input_bytes[3];
let carry_multiplier = P::from(P::Scalar::from_canonical_u32(2u32.pow(32 - rotation_u32)));
let shift_multiplier = P::from(P::Scalar::from_canonical_u32(2u32.pow(rotation_u32)));
let constraint =
rotated_value_from_bytes - rotated_value.carry * carry_multiplier - rotated_value.shift;
result.push(constraint);
let constraint =
input_value_from_bytes - rotated_value.shift * shift_multiplier - rotated_value.carry;
result.push(constraint);
result
}
pub(crate) fn rotate_right_ext_circuit_constraint<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
input_bytes: [ExtensionTarget<D>; 4],
rotated_value: &RotateRightOp<ExtensionTarget<D>>,
rotation: usize,
) -> Vec<ExtensionTarget<D>> {
let mut result = Vec::new();
let rotation_u32 = (rotation % 32) as u32;
let two_pow_8 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(24)));
let tmp = builder.mul_extension(rotated_value.value[1], two_pow_8);
let tmp2 = builder.mul_extension(rotated_value.value[2], two_pow_16);
let tmp3 = builder.mul_extension(rotated_value.value[3], two_pow_24);
let rotated_value_from_bytes =
builder.add_many_extension([rotated_value.value[0], tmp, tmp2, tmp3]);
let tmp = builder.mul_extension(input_bytes[1], two_pow_8);
let tmp2 = builder.mul_extension(input_bytes[2], two_pow_16);
let tmp3 = builder.mul_extension(input_bytes[3], two_pow_24);
let input_value_from_bytes = builder.add_many_extension([input_bytes[0], tmp, tmp2, tmp3]);
let carry_multiplier = builder.constant_extension(F::Extension::from_canonical_u32(
2u32.pow(32 - rotation_u32),
));
let shift_multiplier =
builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(rotation_u32)));
let tmp = builder.mul_extension(rotated_value.carry, carry_multiplier);
let tmp2 = builder.add_extension(tmp, rotated_value.shift);
let constraint = builder.sub_extension(rotated_value_from_bytes, tmp2);
result.push(constraint);
let tmp = builder.mul_extension(rotated_value.shift, shift_multiplier);
let tmp2 = builder.add_extension(tmp, rotated_value.carry);
let constraint = builder.sub_extension(input_value_from_bytes, tmp2);
result.push(constraint);
result
}
/// Shifts a byte to the right and returns both the shifted byte and the bits that carried.
pub const fn shr_carry(input: u32, rotation: u32) -> (u32, u32) {
let c_mod = rotation % 32;
if c_mod != 0 {
let res = input >> c_mod;
let carry = (input << (32 - c_mod)) >> (32 - c_mod);
(res, carry)
} else {
(input, 0u32)
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend/shift_right.rs | prover/src/sha_extend/shift_right.rs | use crate::sha_extend::rotate_right::shr_carry;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
pub struct ShiftRightOp<T: Copy> {
pub value: [T; 4],
pub shift: T,
pub carry: T,
}
impl<F: Field> ShiftRightOp<F> {
pub fn generate_trace(&mut self, le_input_bytes: [u8; 4], rotation: usize) -> u32 {
let input_u32 = u32::from_le_bytes(le_input_bytes);
let rotation_u32 = (rotation % 32) as u32;
let expected = input_u32 >> rotation_u32;
let (shift, carry) = shr_carry(input_u32, rotation_u32);
self.shift = F::from_canonical_u32(shift);
self.carry = F::from_canonical_u32(carry);
self.value = expected.to_le_bytes().map(F::from_canonical_u8);
expected
}
}
pub(crate) fn shift_right_packed_constraints<P: PackedField>(
input_bytes: [P; 4],
shifted_value: &ShiftRightOp<P>,
rotation: usize,
) -> Vec<P> {
let mut result = Vec::new();
let rotation_u32 = (rotation % 32) as u32;
let two_pow_8 = P::from(P::Scalar::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = P::from(P::Scalar::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = P::from(P::Scalar::from_canonical_u32(2u32.pow(24)));
let shifted_value_from_bytes = shifted_value.value[0]
+ two_pow_8 * shifted_value.value[1]
+ two_pow_16 * shifted_value.value[2]
+ two_pow_24 * shifted_value.value[3];
let input_value_from_bytes = input_bytes[0]
+ two_pow_8 * input_bytes[1]
+ two_pow_16 * input_bytes[2]
+ two_pow_24 * input_bytes[3];
let shift_multiplier = P::from(P::Scalar::from_canonical_u32(2u32.pow(rotation_u32)));
let constraint = shifted_value_from_bytes - shifted_value.shift;
result.push(constraint);
let constraint =
input_value_from_bytes - shifted_value.shift * shift_multiplier - shifted_value.carry;
result.push(constraint);
result
}
pub(crate) fn shift_right_ext_circuit_constraints<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
input_bytes: [ExtensionTarget<D>; 4],
shifted_value: &ShiftRightOp<ExtensionTarget<D>>,
rotation: usize,
) -> Vec<ExtensionTarget<D>> {
let mut result = Vec::new();
let rotation_u32 = (rotation % 32) as u32;
let two_pow_8 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(24)));
let tmp = builder.mul_extension(shifted_value.value[1], two_pow_8);
let tmp2 = builder.mul_extension(shifted_value.value[2], two_pow_16);
let tmp3 = builder.mul_extension(shifted_value.value[3], two_pow_24);
let rotated_value_from_bytes =
builder.add_many_extension([shifted_value.value[0], tmp, tmp2, tmp3]);
let tmp = builder.mul_extension(input_bytes[1], two_pow_8);
let tmp2 = builder.mul_extension(input_bytes[2], two_pow_16);
let tmp3 = builder.mul_extension(input_bytes[3], two_pow_24);
let input_value_from_bytes = builder.add_many_extension([input_bytes[0], tmp, tmp2, tmp3]);
let shift_multiplier =
builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(rotation_u32)));
let constraint = builder.sub_extension(rotated_value_from_bytes, shifted_value.shift);
result.push(constraint);
let tmp = builder.mul_extension(shifted_value.shift, shift_multiplier);
let tmp2 = builder.add_extension(tmp, shifted_value.carry);
let constraint = builder.sub_extension(input_value_from_bytes, tmp2);
result.push(constraint);
result
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend/columns.rs | prover/src/sha_extend/columns.rs | use crate::sha_extend::rotate_right::RotateRightOp;
use crate::sha_extend::shift_right::ShiftRightOp;
use crate::sha_extend::wrapping_add_4::WrappingAdd4Op;
use crate::util::{indices_arr, transmute_no_compile_time_size_checks};
use std::borrow::{Borrow, BorrowMut};
use std::mem::transmute;
pub(crate) struct ShaExtendColumnsView<T: Copy> {
/// Output
pub w_i: WrappingAdd4Op<T>, // w_i_inter_1 + w_i_minus_16
/// Input in le bytes order
pub w_i_minus_15: [T; 4],
pub w_i_minus_2: [T; 4],
pub w_i_minus_16: [T; 4],
pub w_i_minus_7: [T; 4],
/// Intermediate values
pub s_0_inter: [T; 4],
pub s_0: [T; 4],
pub s_1_inter: [T; 4],
pub s_1: [T; 4],
pub w_i_minus_15_rr_7: RotateRightOp<T>,
pub w_i_minus_15_rr_18: RotateRightOp<T>,
pub w_i_minus_2_rr_17: RotateRightOp<T>,
pub w_i_minus_2_rr_19: RotateRightOp<T>,
pub w_i_minus_2_rs_10: ShiftRightOp<T>,
pub w_i_minus_15_rs_3: ShiftRightOp<T>,
/// The timestamp at which inputs should be read from memory.
pub timestamp: T,
pub is_real_round: T,
}
pub const NUM_SHA_EXTEND_COLUMNS: usize = size_of::<ShaExtendColumnsView<u8>>();
impl<T: Copy> From<[T; NUM_SHA_EXTEND_COLUMNS]> for ShaExtendColumnsView<T> {
fn from(value: [T; NUM_SHA_EXTEND_COLUMNS]) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> From<ShaExtendColumnsView<T>> for [T; NUM_SHA_EXTEND_COLUMNS] {
fn from(value: ShaExtendColumnsView<T>) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> Borrow<ShaExtendColumnsView<T>> for [T; NUM_SHA_EXTEND_COLUMNS] {
fn borrow(&self) -> &ShaExtendColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<ShaExtendColumnsView<T>> for [T; NUM_SHA_EXTEND_COLUMNS] {
fn borrow_mut(&mut self) -> &mut ShaExtendColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> Borrow<[T; NUM_SHA_EXTEND_COLUMNS]> for ShaExtendColumnsView<T> {
fn borrow(&self) -> &[T; NUM_SHA_EXTEND_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<[T; NUM_SHA_EXTEND_COLUMNS]> for ShaExtendColumnsView<T> {
fn borrow_mut(&mut self) -> &mut [T; NUM_SHA_EXTEND_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy + Default> Default for ShaExtendColumnsView<T> {
fn default() -> Self {
[T::default(); NUM_SHA_EXTEND_COLUMNS].into()
}
}
const fn make_col_map() -> ShaExtendColumnsView<usize> {
let indices_arr = indices_arr::<NUM_SHA_EXTEND_COLUMNS>();
unsafe {
transmute::<[usize; NUM_SHA_EXTEND_COLUMNS], ShaExtendColumnsView<usize>>(indices_arr)
}
}
pub(crate) const SHA_EXTEND_COL_MAP: ShaExtendColumnsView<usize> = make_col_map();
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend/mod.rs | prover/src/sha_extend/mod.rs | pub mod columns;
pub mod logic;
pub mod rotate_right;
pub mod sha_extend_stark;
mod shift_right;
mod wrapping_add_4;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend/wrapping_add_4.rs | prover/src/sha_extend/wrapping_add_4.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
pub struct WrappingAdd4Op<T> {
/// The result of `a + b + c + d`.
pub value: [T; 4],
/// The carry. Each digit is carry == 0, 1, 2, or 3.
pub carry: [T; 4],
}
impl<F: Field> WrappingAdd4Op<F> {
pub fn generate_trace(&mut self, a: u32, b: u32, c: u32, d: u32) -> u32 {
let expected = a.wrapping_add(b).wrapping_add(c).wrapping_add(d);
let overflowed_result = a as u64 + b as u64 + c as u64 + d as u64;
let carry = overflowed_result >> 32;
assert_eq!(carry * 2_u64.pow(32) + expected as u64, overflowed_result);
assert!(carry < 4);
self.carry = [F::ZERO; 4];
self.carry[carry as usize] = F::ONE;
self.value = expected.to_le_bytes().map(F::from_canonical_u8);
expected
}
}
pub(crate) fn wrapping_add_4_packed_constraints<P: PackedField>(
a: [P; 4],
b: [P; 4],
c: [P; 4],
d: [P; 4],
cols: &WrappingAdd4Op<P>,
) -> Vec<P> {
let mut result = vec![];
let two_pow_8 = P::from(P::Scalar::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = P::from(P::Scalar::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = P::from(P::Scalar::from_canonical_u32(2u32.pow(24)));
let two_pow_32 = P::from(P::Scalar::from_canonical_u64(2u64.pow(32)));
let wrapping_added_result = cols.value[0]
+ two_pow_8 * cols.value[1]
+ two_pow_16 * cols.value[2]
+ two_pow_24 * cols.value[3];
// Each value in carry_{0,1,2,3} is 0 or 1, and exactly one of them is 1 per digit.
for i in 0..4 {
result.push(cols.carry[i] * (P::ONES - cols.carry[i]));
}
result.push(cols.carry[0] + cols.carry[1] + cols.carry[2] + cols.carry[3] - P::ONES);
// Calculates carry from carry_{0,1,2,3}.
let one = P::ONES;
let two = P::from(P::Scalar::from_canonical_u32(2));
let three = P::from(P::Scalar::from_canonical_u32(3));
let carry = cols.carry[1] * one + cols.carry[2] * two + cols.carry[3] * three;
// Wrapping added constraint
let overflowed_result = (a[0] + b[0] + c[0] + d[0])
+ (a[1] + b[1] + c[1] + d[1]) * two_pow_8
+ (a[2] + b[2] + c[2] + d[2]) * two_pow_16
+ (a[3] + b[3] + c[3] + d[3]) * two_pow_24;
let constraint = overflowed_result - carry * two_pow_32 - wrapping_added_result;
result.push(constraint);
result
}
pub(crate) fn wrapping_add_4_ext_circuit_constraints<
F: RichField + Extendable<D>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; 4],
b: [ExtensionTarget<D>; 4],
c: [ExtensionTarget<D>; 4],
d: [ExtensionTarget<D>; 4],
cols: &WrappingAdd4Op<ExtensionTarget<D>>,
) -> Vec<ExtensionTarget<D>> {
let mut result = vec![];
let one = builder.one_extension();
let two = builder.constant_extension(F::Extension::from_canonical_u32(2));
let three = builder.constant_extension(F::Extension::from_canonical_u32(3));
let two_pow_8 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(24)));
let two_pow_32 = builder.constant_extension(F::Extension::from_canonical_u64(2u64.pow(32)));
let tmp = builder.mul_extension(cols.value[1], two_pow_8);
let tmp2 = builder.mul_extension(cols.value[2], two_pow_16);
let tmp3 = builder.mul_extension(cols.value[3], two_pow_24);
let wrapping_added_result = builder.add_many_extension([cols.value[0], tmp, tmp2, tmp3]);
// Each value in carry_{0,1,2,3} is 0 or 1, and exactly one of them is 1 per digit.
for i in 0..4 {
let tmp = builder.sub_extension(one, cols.carry[i]);
result.push(builder.mul_extension(cols.carry[i], tmp));
}
let tmp = builder.add_many_extension(cols.carry);
result.push(builder.sub_extension(tmp, one));
// Calculates carry from carry_{0,1,2,3}.
let tmp = builder.mul_extension(cols.carry[1], one);
let tmp2 = builder.mul_extension(cols.carry[2], two);
let tmp3 = builder.mul_extension(cols.carry[3], three);
let carry = builder.add_many_extension([tmp, tmp2, tmp3]);
// Wrapping added constraint
let byte_0 = builder.add_many_extension([a[0], b[0], c[0], d[0]]);
let byte_1 = builder.add_many_extension([a[1], b[1], c[1], d[1]]);
let byte_2 = builder.add_many_extension([a[2], b[2], c[2], d[2]]);
let byte_3 = builder.add_many_extension([a[3], b[3], c[3], d[3]]);
let tmp1 = builder.mul_extension(byte_1, two_pow_8);
let tmp2 = builder.mul_extension(byte_2, two_pow_16);
let tmp3 = builder.mul_extension(byte_3, two_pow_24);
let overflowed_result = builder.add_many_extension([byte_0, tmp1, tmp2, tmp3]);
let carry_mul = builder.mul_extension(carry, two_pow_32);
let computed_overflowed_result = builder.add_extension(carry_mul, wrapping_added_result);
let constraint = builder.sub_extension(overflowed_result, computed_overflowed_result);
result.push(constraint);
result
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend/logic.rs | prover/src/sha_extend/logic.rs | pub(crate) fn get_input_range_4(i: usize) -> std::ops::Range<usize> {
(i * 4)..(4 + i * 4)
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/keccak_sponge/columns.rs | prover/src/keccak_sponge/columns.rs | use std::borrow::{Borrow, BorrowMut};
use std::mem::{size_of, transmute};
use crate::util::{indices_arr, transmute_no_compile_time_size_checks};
pub(crate) const KECCAK_WIDTH_BYTES: usize = 200;
pub(crate) const KECCAK_WIDTH_U32S: usize = KECCAK_WIDTH_BYTES / 4;
pub(crate) const KECCAK_WIDTH_MINUS_DIGEST_U32S: usize =
(KECCAK_WIDTH_BYTES - KECCAK_DIGEST_BYTES) / 4;
pub(crate) const KECCAK_RATE_BYTES: usize = 136;
pub(crate) const KECCAK_RATE_U32S: usize = KECCAK_RATE_BYTES / 4;
pub(crate) const KECCAK_CAPACITY_BYTES: usize = 64;
pub(crate) const KECCAK_CAPACITY_U32S: usize = KECCAK_CAPACITY_BYTES / 4;
pub(crate) const KECCAK_DIGEST_BYTES: usize = 32;
pub(crate) const KECCAK_DIGEST_U32S: usize = KECCAK_DIGEST_BYTES / 4;
#[repr(C)]
#[derive(Eq, PartialEq, Debug)]
pub(crate) struct KeccakSpongeColumnsView<T: Copy> {
/// 1 if this row represents a full input block, i.e. one in which each byte is an input byte,
/// not a padding byte; 0 otherwise.
pub is_full_input_block: T,
// The base address at which we will read the input block.
pub context: T,
pub segment: T,
// address
pub virt: [T; KECCAK_RATE_U32S],
/// The timestamp at which inputs should be read from memory.
pub timestamp: T,
/// The length of the original input, in bytes.
pub len: T,
/// The number of input bytes that have already been absorbed prior to this block.
pub already_absorbed_bytes: T,
/// If this row represents a final block row, the `i`th entry should be 1 if the final chunk of
/// input has length `i` (in other words if `len - already_absorbed == i`), otherwise 0.
///
/// If this row represents a full input block, this should contain all 0s.
pub is_final_input_len: [T; KECCAK_RATE_BYTES],
/// The initial rate part of the sponge, at the start of this step.
pub original_rate_u32s: [T; KECCAK_RATE_U32S],
/// The capacity part of the sponge, encoded as 32-bit chunks, at the start of this step.
pub original_capacity_u32s: [T; KECCAK_CAPACITY_U32S],
/// The block being absorbed, which may contain input bytes and/or padding bytes.
pub block_bytes: [T; KECCAK_RATE_BYTES],
/// The rate part of the sponge, encoded as 32-bit chunks, after the current block is xor'd in,
/// but before the permutation is applied.
pub xored_rate_u32s: [T; KECCAK_RATE_U32S],
/// The entire state (rate + capacity) of the sponge, encoded as 32-bit chunks, after the
/// permutation is applied, minus the first limbs where the digest is extracted from.
/// Those missing limbs can be recomputed from their corresponding bytes stored in
/// `updated_digest_state_bytes`.
pub partial_updated_state_u32s: [T; KECCAK_WIDTH_MINUS_DIGEST_U32S],
/// The first part of the state of the sponge, seen as bytes, after the permutation is applied.
/// This also represents the output digest of the Keccak sponge during the squeezing phase.
pub updated_digest_state_bytes: [T; KECCAK_DIGEST_BYTES],
}
// `u8` is guaranteed to have a `size_of` of 1.
pub const NUM_KECCAK_SPONGE_COLUMNS: usize = size_of::<KeccakSpongeColumnsView<u8>>();
impl<T: Copy> From<[T; NUM_KECCAK_SPONGE_COLUMNS]> for KeccakSpongeColumnsView<T> {
fn from(value: [T; NUM_KECCAK_SPONGE_COLUMNS]) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> From<KeccakSpongeColumnsView<T>> for [T; NUM_KECCAK_SPONGE_COLUMNS] {
fn from(value: KeccakSpongeColumnsView<T>) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> Borrow<KeccakSpongeColumnsView<T>> for [T; NUM_KECCAK_SPONGE_COLUMNS] {
fn borrow(&self) -> &KeccakSpongeColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<KeccakSpongeColumnsView<T>> for [T; NUM_KECCAK_SPONGE_COLUMNS] {
fn borrow_mut(&mut self) -> &mut KeccakSpongeColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> Borrow<[T; NUM_KECCAK_SPONGE_COLUMNS]> for KeccakSpongeColumnsView<T> {
fn borrow(&self) -> &[T; NUM_KECCAK_SPONGE_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<[T; NUM_KECCAK_SPONGE_COLUMNS]> for KeccakSpongeColumnsView<T> {
fn borrow_mut(&mut self) -> &mut [T; NUM_KECCAK_SPONGE_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy + Default> Default for KeccakSpongeColumnsView<T> {
fn default() -> Self {
[T::default(); NUM_KECCAK_SPONGE_COLUMNS].into()
}
}
const fn make_col_map() -> KeccakSpongeColumnsView<usize> {
let indices_arr = indices_arr::<NUM_KECCAK_SPONGE_COLUMNS>();
unsafe {
transmute::<[usize; NUM_KECCAK_SPONGE_COLUMNS], KeccakSpongeColumnsView<usize>>(indices_arr)
}
}
pub(crate) const KECCAK_SPONGE_COL_MAP: KeccakSpongeColumnsView<usize> = make_col_map();
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/keccak_sponge/mod.rs | prover/src/keccak_sponge/mod.rs | //! The Keccak sponge STARK is used to hash a variable amount of data which is read from memory.
//!
//! It connects to the memory STARK to read input data, and to the Keccak-f STARK to evaluate the
//! permutation at each absorption step.
pub mod columns;
pub mod keccak_sponge_stark;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/keccak_sponge/keccak_sponge_stark.rs | prover/src/keccak_sponge/keccak_sponge_stark.rs | use std::borrow::Borrow;
use std::cmp::min;
use std::iter::{once, repeat};
use std::marker::PhantomData;
use std::mem::size_of;
use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2_util::ceil_div_usize;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::kernel::keccak_util::keccakf_u32s;
use crate::cross_table_lookup::{Column, Filter};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::keccak_sponge::columns::*;
use crate::memory::segments::Segment;
use crate::stark::Stark;
use crate::util::trace_rows_to_poly_values;
use crate::witness::memory::MemoryAddress;
pub const U8S_PER_CTL: usize = 4;
pub const U32S_PER_CTL: usize = 1;
pub(crate) fn ctl_looked_data<F: Field>() -> Vec<Column<F>> {
let cols = KECCAK_SPONGE_COL_MAP;
let mut outputs = Vec::with_capacity(8);
for i in (0..8).rev() {
let cur_col = Column::linear_combination(
cols.updated_digest_state_bytes[i * 4..(i + 1) * 4]
.iter()
.enumerate()
.map(|(j, &c)| (c, F::from_canonical_u64(1 << (24 - 8 * j)))),
);
outputs.push(cur_col);
}
Column::singles([
cols.context,
cols.segment,
cols.virt[0],
cols.len,
cols.timestamp,
])
.chain(outputs)
.collect()
}
pub(crate) fn ctl_looking_keccak_inputs<F: Field>() -> Vec<Column<F>> {
let cols = KECCAK_SPONGE_COL_MAP;
let mut res: Vec<_> = Column::singles(
[
cols.xored_rate_u32s.as_slice(),
&cols.original_capacity_u32s,
]
.concat(),
)
.collect();
res.push(Column::single(cols.timestamp));
res
}
pub(crate) fn ctl_looking_keccak_outputs<F: Field>() -> Vec<Column<F>> {
let cols = KECCAK_SPONGE_COL_MAP;
// We recover the 32-bit digest limbs from their corresponding bytes,
// and then append them to the rest of the updated state limbs.
let digest_u32s = cols.updated_digest_state_bytes.chunks_exact(4).map(|c| {
Column::linear_combination(
c.iter()
.enumerate()
.map(|(i, &b)| (b, F::from_canonical_usize(1 << (8 * i)))),
)
});
let mut res: Vec<_> = digest_u32s.collect();
res.extend(Column::singles(&cols.partial_updated_state_u32s));
res.push(Column::single(cols.timestamp));
res
}
pub(crate) fn ctl_looking_memory<F: Field>(i: usize) -> Vec<Column<F>> {
let cols = KECCAK_SPONGE_COL_MAP;
let mut res = vec![Column::constant(F::ONE)]; // is_read
res.extend(Column::singles([cols.context, cols.segment]));
// The address of the byte being read is `virt + already_absorbed_bytes + i`.
/*
res.push(Column::linear_combination_with_constant(
[(cols.virt, F::ONE), (cols.already_absorbed_bytes, F::ONE)],
F::from_canonical_usize(i),
));
*/
res.push(Column::single(cols.virt[i / 4]));
// The u32 of i'th input byte being read.
let start = (i / 4) * 4;
let lc: Column<F> = Column::le_bytes([
cols.block_bytes[start + 3],
cols.block_bytes[start + 2],
cols.block_bytes[start + 1],
cols.block_bytes[start],
]);
res.push(lc);
// Since we're reading a single byte, the higher limbs must be zero.
// res.extend((1..8).map(|_| Column::zero()));
res.push(Column::single(cols.timestamp));
assert_eq!(
res.len(),
crate::memory::memory_stark::ctl_data::<F>().len()
);
res
}
pub(crate) fn num_logic_ctls() -> usize {
ceil_div_usize(KECCAK_RATE_BYTES, U8S_PER_CTL)
}
/// CTL for performing the `i`th logic CTL. Since we need to do 136 byte XORs, and the logic CTL can
/// XOR 32 bytes per CTL, there are 5 such CTLs.
pub(crate) fn ctl_looking_logic<F: Field>(i: usize) -> Vec<Column<F>> {
debug_assert!(i < num_logic_ctls());
let cols = KECCAK_SPONGE_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
// Input 0 contains some of the sponge's original rate chunks. If this is the last CTL, we won't
// need to use all of the CTL's inputs, so we will pass some zeros.
res.extend(
Column::singles(&cols.original_rate_u32s[i * U32S_PER_CTL..])
.chain(repeat(Column::zero()))
.take(U32S_PER_CTL),
);
// Input 1 contains some of block's chunks. Again, for the last CTL it will include some zeros.
res.extend(
cols.block_bytes[i * U8S_PER_CTL..]
.chunks(size_of::<u32>())
.map(|chunk| Column::le_bytes(chunk))
.chain(repeat(Column::zero()))
.take(U32S_PER_CTL),
);
// The output contains the XOR'd rate part.
res.extend(
Column::singles(&cols.xored_rate_u32s[i * U32S_PER_CTL..])
.chain(repeat(Column::zero()))
.take(U32S_PER_CTL),
);
res
}
pub(crate) fn ctl_looked_filter<F: Field>() -> Filter<F> {
// The CPU table is only interested in our final-block rows, since those contain the final
// sponge output.
Filter::new_simple(Column::sum(KECCAK_SPONGE_COL_MAP.is_final_input_len))
}
/// CTL filter for reading the `i`th byte of input from memory.
pub(crate) fn ctl_looking_memory_filter<F: Field>(i: usize) -> Filter<F> {
// We perform the `i`th read if either
// - this is a full input block, or
// - this is a final block of length `i` or greater
let cols = KECCAK_SPONGE_COL_MAP;
if i == KECCAK_RATE_BYTES - 1 {
Filter::new_simple(Column::single(cols.is_full_input_block))
} else {
Filter::new_simple(Column::sum(
once(&cols.is_full_input_block).chain(&cols.is_final_input_len[i + 1..]),
))
}
}
/// CTL filter for looking at XORs in the logic table.
pub(crate) fn ctl_looking_logic_filter<F: Field>() -> Filter<F> {
let cols = KECCAK_SPONGE_COL_MAP;
Filter::new_simple(Column::sum(
once(&cols.is_full_input_block).chain(&cols.is_final_input_len),
))
}
pub(crate) fn ctl_looking_keccak_filter<F: Field>() -> Filter<F> {
let cols = KECCAK_SPONGE_COL_MAP;
Filter::new_simple(Column::sum(
once(&cols.is_full_input_block).chain(&cols.is_final_input_len),
))
}
/// Information about a Keccak sponge operation needed for witness generation.
#[derive(Clone, Debug)]
pub(crate) struct KeccakSpongeOp {
/// The base address at which inputs are read.
pub(crate) base_address: Vec<MemoryAddress>,
/// The timestamp at which inputs are read.
pub(crate) timestamp: usize,
/// The input that was read.
pub(crate) input: Vec<u8>,
}
#[derive(Copy, Clone, Default)]
pub struct KeccakSpongeStark<F, const D: usize> {
f: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> KeccakSpongeStark<F, D> {
pub(crate) fn generate_trace(
&self,
operations: Vec<KeccakSpongeOp>,
min_rows: usize,
) -> Vec<PolynomialValues<F>> {
// Generate the witness row-wise.
let trace_rows = self.generate_trace_rows(operations, min_rows);
trace_rows_to_poly_values(trace_rows)
}
fn generate_trace_rows(
&self,
operations: Vec<KeccakSpongeOp>,
min_rows: usize,
) -> Vec<[F; NUM_KECCAK_SPONGE_COLUMNS]> {
let base_len: usize = operations
.iter()
.map(|op| op.input.len() / KECCAK_RATE_BYTES + 1)
.sum();
let mut rows = Vec::with_capacity(base_len.max(min_rows).next_power_of_two());
for op in operations {
rows.extend(self.generate_rows_for_op(op));
}
let padded_rows = rows.len().max(min_rows).next_power_of_two();
for _ in rows.len()..padded_rows {
rows.push(self.generate_padding_row());
}
rows
}
fn generate_rows_for_op(&self, op: KeccakSpongeOp) -> Vec<[F; NUM_KECCAK_SPONGE_COLUMNS]> {
let mut rows = Vec::with_capacity(op.input.len() / KECCAK_RATE_BYTES + 1);
let mut sponge_state = [0u32; KECCAK_WIDTH_U32S];
let mut input_blocks = op.input.chunks_exact(KECCAK_RATE_BYTES);
let mut already_absorbed_bytes = 0;
for block in input_blocks.by_ref() {
let row = self.generate_full_input_row(
&op,
already_absorbed_bytes,
sponge_state,
block.try_into().unwrap(),
);
sponge_state[..KECCAK_DIGEST_U32S]
.iter_mut()
.zip(row.updated_digest_state_bytes.chunks_exact(4))
.for_each(|(s, bs)| {
*s = bs
.iter()
.enumerate()
.map(|(i, b)| (b.to_canonical_u64() as u32) << (8 * i))
.sum();
});
sponge_state[KECCAK_DIGEST_U32S..]
.iter_mut()
.zip(row.partial_updated_state_u32s)
.for_each(|(s, x)| *s = x.to_canonical_u64() as u32);
rows.push(row.into());
already_absorbed_bytes += KECCAK_RATE_BYTES;
}
rows.push(
self.generate_final_row(
&op,
already_absorbed_bytes,
sponge_state,
input_blocks.remainder(),
)
.into(),
);
rows
}
fn generate_full_input_row(
&self,
op: &KeccakSpongeOp,
already_absorbed_bytes: usize,
sponge_state: [u32; KECCAK_WIDTH_U32S],
block: [u8; KECCAK_RATE_BYTES],
) -> KeccakSpongeColumnsView<F> {
let mut row = KeccakSpongeColumnsView {
is_full_input_block: F::ONE,
..Default::default()
};
row.block_bytes = block.map(F::from_canonical_u8);
Self::generate_common_fields(&mut row, op, already_absorbed_bytes, sponge_state);
row
}
fn generate_final_row(
&self,
op: &KeccakSpongeOp,
already_absorbed_bytes: usize,
sponge_state: [u32; KECCAK_WIDTH_U32S],
final_inputs: &[u8],
) -> KeccakSpongeColumnsView<F> {
assert_eq!(already_absorbed_bytes + final_inputs.len(), op.input.len());
let mut row = KeccakSpongeColumnsView::default();
for (block_byte, input_byte) in row.block_bytes.iter_mut().zip(final_inputs) {
*block_byte = F::from_canonical_u8(*input_byte);
}
// pad10*1 rule
if final_inputs.len() == KECCAK_RATE_BYTES - 1 {
// Both 1s are placed in the same byte.
row.block_bytes[final_inputs.len()] = F::from_canonical_u8(0b10000001);
} else {
row.block_bytes[final_inputs.len()] = F::ONE;
row.block_bytes[KECCAK_RATE_BYTES - 1] = F::from_canonical_u8(0b10000000);
}
row.is_final_input_len[final_inputs.len()] = F::ONE;
Self::generate_common_fields(&mut row, op, already_absorbed_bytes, sponge_state);
row
}
/// Generate fields that are common to both full-input-block rows and final-block rows.
/// Also updates the sponge state with a single absorption.
fn generate_common_fields(
row: &mut KeccakSpongeColumnsView<F>,
op: &KeccakSpongeOp,
already_absorbed_bytes: usize,
mut sponge_state: [u32; KECCAK_WIDTH_U32S],
) {
let idx = already_absorbed_bytes / 4;
let end_index = min(
(already_absorbed_bytes + KECCAK_RATE_BYTES) / 4,
op.base_address.len(),
);
let mut virt = (idx..end_index)
.map(|i| op.base_address[i].virt)
.collect_vec();
virt.resize(KECCAK_RATE_U32S, 0);
let virt: [usize; KECCAK_RATE_U32S] = virt.try_into().unwrap();
row.context = F::from_canonical_usize(op.base_address[0].context);
row.segment = F::from_canonical_usize(op.base_address[Segment::Code as usize].segment);
row.virt = virt.map(F::from_canonical_usize);
row.timestamp = F::from_canonical_usize(op.timestamp);
row.len = F::from_canonical_usize(op.input.len());
row.already_absorbed_bytes = F::from_canonical_usize(already_absorbed_bytes);
row.original_rate_u32s = sponge_state[..KECCAK_RATE_U32S]
.iter()
.map(|x| F::from_canonical_u32(*x))
.collect_vec()
.try_into()
.unwrap();
row.original_capacity_u32s = sponge_state[KECCAK_RATE_U32S..]
.iter()
.map(|x| F::from_canonical_u32(*x))
.collect_vec()
.try_into()
.unwrap();
let block_u32s = (0..KECCAK_RATE_U32S).map(|i| {
u32::from_le_bytes(
row.block_bytes[i * 4..(i + 1) * 4]
.iter()
.map(|x| x.to_canonical_u64() as u8)
.collect_vec()
.try_into()
.unwrap(),
)
});
// xor in the block
for (state_i, block_i) in sponge_state.iter_mut().zip(block_u32s) {
*state_i ^= block_i;
}
let xored_rate_u32s: [u32; KECCAK_RATE_U32S] = sponge_state[..KECCAK_RATE_U32S]
.to_vec()
.try_into()
.unwrap();
row.xored_rate_u32s = xored_rate_u32s.map(F::from_canonical_u32);
keccakf_u32s(&mut sponge_state);
// Store all but the first `KECCAK_DIGEST_U32S` limbs in the updated state.
// Those missing limbs will be broken down into bytes and stored separately.
row.partial_updated_state_u32s.copy_from_slice(
&sponge_state[KECCAK_DIGEST_U32S..]
.iter()
.copied()
.map(|i| F::from_canonical_u32(i))
.collect::<Vec<_>>(),
);
sponge_state[..KECCAK_DIGEST_U32S]
.iter()
.enumerate()
.for_each(|(l, &elt)| {
let mut cur_elt = elt;
(0..4).for_each(|i| {
row.updated_digest_state_bytes[l * 4 + i] =
F::from_canonical_u32(cur_elt & 0xFF);
cur_elt >>= 8;
});
// 32-bit limb reconstruction consistency check.
let mut s = row.updated_digest_state_bytes[l * 4].to_canonical_u64();
for i in 1..4 {
s += row.updated_digest_state_bytes[l * 4 + i].to_canonical_u64() << (8 * i);
}
assert_eq!(elt as u64, s, "not equal");
})
}
fn generate_padding_row(&self) -> [F; NUM_KECCAK_SPONGE_COLUMNS] {
// The default instance has is_full_input_block = is_final_block = 0,
// indicating that it's a dummy/padding row.
KeccakSpongeColumnsView::default().into()
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for KeccakSpongeStark<F, D> {
type EvaluationFrame<FE, P, const D2: usize>
= StarkFrame<P, NUM_KECCAK_SPONGE_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_KECCAK_SPONGE_COLUMNS>;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let local_values: &[P; NUM_KECCAK_SPONGE_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &KeccakSpongeColumnsView<P> = local_values.borrow();
let next_values: &[P; NUM_KECCAK_SPONGE_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &KeccakSpongeColumnsView<P> = next_values.borrow();
// Each flag (full-input block, final block or implied dummy flag) must be boolean.
let is_full_input_block = local_values.is_full_input_block;
yield_constr.constraint(is_full_input_block * (is_full_input_block - P::ONES));
let is_final_block: P = local_values.is_final_input_len.iter().copied().sum();
yield_constr.constraint(is_final_block * (is_final_block - P::ONES));
for &is_final_len in local_values.is_final_input_len.iter() {
yield_constr.constraint(is_final_len * (is_final_len - P::ONES));
}
// Ensure that full-input block and final block flags are not set to 1 at the same time.
yield_constr.constraint(is_final_block * is_full_input_block);
// If this is the first row, the original sponge state should be 0 and already_absorbed_bytes = 0.
let already_absorbed_bytes = local_values.already_absorbed_bytes;
yield_constr.constraint_first_row(already_absorbed_bytes);
for &original_rate_elem in local_values.original_rate_u32s.iter() {
yield_constr.constraint_first_row(original_rate_elem);
}
for &original_capacity_elem in local_values.original_capacity_u32s.iter() {
yield_constr.constraint_first_row(original_capacity_elem);
}
// If this is a final block, the next row's original sponge state should be 0 and already_absorbed_bytes = 0.
yield_constr.constraint_transition(is_final_block * next_values.already_absorbed_bytes);
for &original_rate_elem in next_values.original_rate_u32s.iter() {
yield_constr.constraint_transition(is_final_block * original_rate_elem);
}
for &original_capacity_elem in next_values.original_capacity_u32s.iter() {
yield_constr.constraint_transition(is_final_block * original_capacity_elem);
}
// If this is a full-input block, the next row's address, time and len must match as well as its timestamp.
yield_constr.constraint_transition(
is_full_input_block * (local_values.context - next_values.context),
);
yield_constr.constraint_transition(
is_full_input_block * (local_values.segment - next_values.segment),
);
// yield_constr
// .constraint_transition(is_full_input_block * (local_values.virt - next_values.virt));
yield_constr.constraint_transition(
is_full_input_block * (local_values.timestamp - next_values.timestamp),
);
// If this is a full-input block, the next row's "before" should match our "after" state.
for (current_bytes_after, next_before) in local_values
.updated_digest_state_bytes
.chunks_exact(4)
.zip(&next_values.original_rate_u32s[..KECCAK_DIGEST_U32S])
{
let mut current_after = current_bytes_after[0];
for i in 1..4 {
current_after +=
current_bytes_after[i] * P::from(FE::from_canonical_usize(1 << (8 * i)));
}
yield_constr
.constraint_transition(is_full_input_block * (*next_before - current_after));
}
for (¤t_after, &next_before) in local_values
.partial_updated_state_u32s
.iter()
.zip(next_values.original_rate_u32s[KECCAK_DIGEST_U32S..].iter())
{
yield_constr.constraint_transition(is_full_input_block * (next_before - current_after));
}
for (¤t_after, &next_before) in local_values
.partial_updated_state_u32s
.iter()
.skip(KECCAK_RATE_U32S - KECCAK_DIGEST_U32S)
.zip(next_values.original_capacity_u32s.iter())
{
yield_constr.constraint_transition(is_full_input_block * (next_before - current_after));
}
// If this is a full-input block, the next row's already_absorbed_bytes should be ours plus `KECCAK_RATE_BYTES`.
yield_constr.constraint_transition(
is_full_input_block
* (already_absorbed_bytes + P::from(FE::from_canonical_usize(KECCAK_RATE_BYTES))
- next_values.already_absorbed_bytes),
);
// A dummy row is always followed by another dummy row, so the prover can't put dummy rows "in between" to avoid the above checks.
let is_dummy = P::ONES - is_full_input_block - is_final_block;
let next_is_final_block: P = next_values.is_final_input_len.iter().copied().sum();
yield_constr.constraint_transition(
is_dummy * (next_values.is_full_input_block + next_is_final_block),
);
// If this is a final block, is_final_input_len implies `len - already_absorbed == i`.
let offset = local_values.len - already_absorbed_bytes;
for (i, &is_final_len) in local_values.is_final_input_len.iter().enumerate() {
let entry_match = offset - P::from(FE::from_canonical_usize(i));
yield_constr.constraint(is_final_len * entry_match);
}
}
fn eval_ext_circuit(
&self,
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let local_values: &[ExtensionTarget<D>; NUM_KECCAK_SPONGE_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &KeccakSpongeColumnsView<ExtensionTarget<D>> = local_values.borrow();
let next_values: &[ExtensionTarget<D>; NUM_KECCAK_SPONGE_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &KeccakSpongeColumnsView<ExtensionTarget<D>> = next_values.borrow();
let one = builder.one_extension();
// Each flag (full-input block, final block or implied dummy flag) must be boolean.
let is_full_input_block = local_values.is_full_input_block;
let constraint = builder.mul_sub_extension(
is_full_input_block,
is_full_input_block,
is_full_input_block,
);
yield_constr.constraint(builder, constraint);
let is_final_block = builder.add_many_extension(local_values.is_final_input_len);
let constraint = builder.mul_sub_extension(is_final_block, is_final_block, is_final_block);
yield_constr.constraint(builder, constraint);
for &is_final_len in local_values.is_final_input_len.iter() {
let constraint = builder.mul_sub_extension(is_final_len, is_final_len, is_final_len);
yield_constr.constraint(builder, constraint);
}
// Ensure that full-input block and final block flags are not set to 1 at the same time.
let constraint = builder.mul_extension(is_final_block, is_full_input_block);
yield_constr.constraint(builder, constraint);
// If this is the first row, the original sponge state should be 0 and already_absorbed_bytes = 0.
let already_absorbed_bytes = local_values.already_absorbed_bytes;
yield_constr.constraint_first_row(builder, already_absorbed_bytes);
for &original_rate_elem in local_values.original_rate_u32s.iter() {
yield_constr.constraint_first_row(builder, original_rate_elem);
}
for &original_capacity_elem in local_values.original_capacity_u32s.iter() {
yield_constr.constraint_first_row(builder, original_capacity_elem);
}
// If this is a final block, the next row's original sponge state should be 0 and already_absorbed_bytes = 0.
let constraint = builder.mul_extension(is_final_block, next_values.already_absorbed_bytes);
yield_constr.constraint_transition(builder, constraint);
for &original_rate_elem in next_values.original_rate_u32s.iter() {
let constraint = builder.mul_extension(is_final_block, original_rate_elem);
yield_constr.constraint_transition(builder, constraint);
}
for &original_capacity_elem in next_values.original_capacity_u32s.iter() {
let constraint = builder.mul_extension(is_final_block, original_capacity_elem);
yield_constr.constraint_transition(builder, constraint);
}
// If this is a full-input block, the next row's address, time and len must match as well as its timestamp.
let context_diff = builder.sub_extension(local_values.context, next_values.context);
let constraint = builder.mul_extension(is_full_input_block, context_diff);
yield_constr.constraint_transition(builder, constraint);
let segment_diff = builder.sub_extension(local_values.segment, next_values.segment);
let constraint = builder.mul_extension(is_full_input_block, segment_diff);
yield_constr.constraint_transition(builder, constraint);
// let virt_diff = builder.sub_extension(local_values.virt, next_values.virt);
// let constraint = builder.mul_extension(is_full_input_block, virt_diff);
// yield_constr.constraint_transition(builder, constraint);
let timestamp_diff = builder.sub_extension(local_values.timestamp, next_values.timestamp);
let constraint = builder.mul_extension(is_full_input_block, timestamp_diff);
yield_constr.constraint_transition(builder, constraint);
// If this is a full-input block, the next row's "before" should match our "after" state.
for (current_bytes_after, next_before) in local_values
.updated_digest_state_bytes
.chunks_exact(4)
.zip(&next_values.original_rate_u32s[..KECCAK_DIGEST_U32S])
{
let mut current_after = current_bytes_after[0];
for i in 1..4 {
current_after = builder.mul_const_add_extension(
F::from_canonical_usize(1 << (8 * i)),
current_bytes_after[i],
current_after,
);
}
let diff = builder.sub_extension(*next_before, current_after);
let constraint = builder.mul_extension(is_full_input_block, diff);
yield_constr.constraint_transition(builder, constraint);
}
for (¤t_after, &next_before) in local_values
.partial_updated_state_u32s
.iter()
.zip(next_values.original_rate_u32s[KECCAK_DIGEST_U32S..].iter())
{
let diff = builder.sub_extension(next_before, current_after);
let constraint = builder.mul_extension(is_full_input_block, diff);
yield_constr.constraint_transition(builder, constraint);
}
for (¤t_after, &next_before) in local_values
.partial_updated_state_u32s
.iter()
.skip(KECCAK_RATE_U32S - KECCAK_DIGEST_U32S)
.zip(next_values.original_capacity_u32s.iter())
{
let diff = builder.sub_extension(next_before, current_after);
let constraint = builder.mul_extension(is_full_input_block, diff);
yield_constr.constraint_transition(builder, constraint);
}
// If this is a full-input block, the next row's already_absorbed_bytes should be ours plus `KECCAK_RATE_BYTES`.
let absorbed_bytes = builder.add_const_extension(
already_absorbed_bytes,
F::from_canonical_usize(KECCAK_RATE_BYTES),
);
let absorbed_diff =
builder.sub_extension(absorbed_bytes, next_values.already_absorbed_bytes);
let constraint = builder.mul_extension(is_full_input_block, absorbed_diff);
yield_constr.constraint_transition(builder, constraint);
// A dummy row is always followed by another dummy row, so the prover can't put dummy rows "in between" to avoid the above checks.
let is_dummy = {
let tmp = builder.sub_extension(one, is_final_block);
builder.sub_extension(tmp, is_full_input_block)
};
let next_is_final_block = builder.add_many_extension(next_values.is_final_input_len);
let constraint = {
let tmp = builder.add_extension(next_is_final_block, next_values.is_full_input_block);
builder.mul_extension(is_dummy, tmp)
};
yield_constr.constraint_transition(builder, constraint);
// If this is a final block, is_final_input_len implies `len - already_absorbed == i`.
let offset = builder.sub_extension(local_values.len, already_absorbed_bytes);
for (i, &is_final_len) in local_values.is_final_input_len.iter().enumerate() {
let index = builder.constant_extension(F::from_canonical_usize(i).into());
let entry_match = builder.sub_extension(offset, index);
let constraint = builder.mul_extension(is_final_len, entry_match);
yield_constr.constraint(builder, constraint);
}
}
fn constraint_degree(&self) -> usize {
3
}
}
#[cfg(test)]
mod tests {
use std::borrow::Borrow;
use anyhow::Result;
use itertools::Itertools;
use keccak_hash::keccak;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::PrimeField64;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use crate::keccak_sponge::columns::KeccakSpongeColumnsView;
use crate::keccak_sponge::keccak_sponge_stark::{KeccakSpongeOp, KeccakSpongeStark};
use crate::memory::segments::Segment;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
use crate::witness::memory::MemoryAddress;
#[test]
fn test_stark_degree() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = KeccakSpongeStark<F, D>;
let stark = S::default();
test_stark_low_degree(stark)
}
#[test]
fn test_stark_circuit() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = KeccakSpongeStark<F, D>;
let stark = S::default();
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
#[test]
fn test_generation() -> Result<()> {
const D: usize = 2;
type F = GoldilocksField;
type S = KeccakSpongeStark<F, D>;
let input = vec![1, 2, 3];
let expected_output = keccak(&input);
let op = KeccakSpongeOp {
base_address: vec![MemoryAddress {
context: 0,
segment: Segment::Code as usize,
virt: 0,
}],
timestamp: 0,
input,
};
let stark = S::default();
let rows = stark.generate_rows_for_op(op);
assert_eq!(rows.len(), 1);
let last_row: &KeccakSpongeColumnsView<F> = rows.last().unwrap().borrow();
let output = last_row
.updated_digest_state_bytes
.iter()
.map(|x| x.to_canonical_u64() as u8)
.collect_vec();
assert_eq!(output, expected_output.0);
Ok(())
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend_sponge/sha_extend_sponge_stark.rs | prover/src/sha_extend_sponge/sha_extend_sponge_stark.rs | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::membus::NUM_CHANNELS;
use crate::cross_table_lookup::{Column, Filter};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::memory::segments::Segment;
use crate::sha_extend::logic::get_input_range_4;
use crate::sha_extend_sponge::columns::{
ShaExtendSpongeColumnsView, NUM_EXTEND_INPUT, NUM_SHA_EXTEND_SPONGE_COLUMNS,
SHA_EXTEND_SPONGE_COL_MAP,
};
use crate::sha_extend_sponge::logic::{
diff_address_ext_circuit_constraint, round_increment_ext_circuit_constraint,
};
use crate::stark::Stark;
use crate::util::trace_rows_to_poly_values;
use crate::witness::memory::MemoryAddress;
use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use std::borrow::Borrow;
use std::marker::PhantomData;
pub const NUM_ROUNDS: usize = 48;
pub(crate) fn ctl_looking_sha_extend_inputs<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_SPONGE_COL_MAP;
let mut res: Vec<_> = Column::singles(
[
cols.w_i_minus_15.as_slice(),
cols.w_i_minus_2.as_slice(),
cols.w_i_minus_16.as_slice(),
cols.w_i_minus_7.as_slice(),
]
.concat(),
)
.collect();
res.push(Column::single(cols.timestamp));
res
}
pub(crate) fn ctl_looking_sha_extend_outputs<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_SPONGE_COL_MAP;
let mut res = vec![];
res.extend(Column::singles(&cols.w_i));
res.push(Column::single(cols.timestamp));
res
}
pub(crate) fn ctl_looked_data<F: Field>() -> Vec<Column<F>> {
let cols = SHA_EXTEND_SPONGE_COL_MAP;
let w_i = Column::le_bytes(cols.w_i);
Column::singles([cols.context, cols.segment, cols.output_virt, cols.timestamp])
.chain([w_i])
.collect()
}
pub(crate) fn ctl_looking_memory<F: Field>(i: usize) -> Vec<Column<F>> {
let cols = SHA_EXTEND_SPONGE_COL_MAP;
let mut res = vec![Column::constant(F::ONE)]; // is_read
res.extend(Column::singles([cols.context, cols.segment]));
res.push(Column::single(cols.input_virt[i / 4]));
// The u32 of i'th input bit being read.
let start = i / 4;
let le_bytes;
if start == 0 {
le_bytes = cols.w_i_minus_15;
} else if start == 1 {
le_bytes = cols.w_i_minus_2;
} else if start == 2 {
le_bytes = cols.w_i_minus_16;
} else {
le_bytes = cols.w_i_minus_7;
}
// le_bit.reverse();
let u32_value: Column<F> = Column::le_bytes(le_bytes);
res.push(u32_value);
res.push(Column::single(cols.timestamp));
assert_eq!(
res.len(),
crate::memory::memory_stark::ctl_data::<F>().len()
);
res
}
pub(crate) fn ctl_looking_sha_extend_filter<F: Field>() -> Filter<F> {
let cols = SHA_EXTEND_SPONGE_COL_MAP;
// not the padding rows.
Filter::new_simple(Column::sum(cols.round))
}
#[derive(Clone, Debug)]
pub(crate) struct ShaExtendSpongeOp {
/// The base address at which inputs are read
pub(crate) base_address: Vec<MemoryAddress>,
/// The timestamp at which inputs are read
pub(crate) timestamp: usize,
/// The input that was read.
/// Values: w_i_minus_15, w_i_minus_2, w_i_minus_16, w_i_minus_7 in big-endian order.
pub(crate) input: Vec<u8>,
/// The index of round
pub(crate) i: usize,
/// The base address at which the output is written.
pub(crate) output_address: MemoryAddress,
}
#[derive(Copy, Clone, Default)]
pub struct ShaExtendSpongeStark<F, const D: usize> {
f: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> ShaExtendSpongeStark<F, D> {
pub(crate) fn generate_trace(
&self,
operations: Vec<ShaExtendSpongeOp>,
min_rows: usize,
) -> Vec<PolynomialValues<F>> {
// Generate the witness row-wise.
let trace_rows = self.generate_trace_rows(operations, min_rows);
trace_rows_to_poly_values(trace_rows)
}
fn generate_trace_rows(
&self,
operations: Vec<ShaExtendSpongeOp>,
min_rows: usize,
) -> Vec<[F; NUM_SHA_EXTEND_SPONGE_COLUMNS]> {
let base_len = operations.len();
let mut rows = Vec::with_capacity(base_len.max(min_rows).next_power_of_two());
for op in operations {
rows.push(self.generate_rows_for_op(op).into());
}
let padded_rows = rows.len().max(min_rows).next_power_of_two();
for _ in rows.len()..padded_rows {
rows.push(ShaExtendSpongeColumnsView::default().into());
}
rows
}
fn generate_rows_for_op(&self, op: ShaExtendSpongeOp) -> ShaExtendSpongeColumnsView<F> {
let mut row = ShaExtendSpongeColumnsView::default();
row.timestamp = F::from_canonical_usize(op.timestamp);
row.round = [F::ZEROS; 48];
row.round[op.i] = F::ONE;
row.context = F::from_canonical_usize(op.base_address[0].context);
row.segment = F::from_canonical_usize(op.base_address[Segment::Code as usize].segment);
let virt = (0..op.input.len() / 4)
.map(|i| op.base_address[i].virt)
.collect_vec();
let virt: [usize; 4] = virt.try_into().unwrap();
row.input_virt = virt.map(F::from_canonical_usize);
row.output_virt = F::from_canonical_usize(op.output_address.virt);
let input = op.input.clone();
row.w_i = self.compute_w_i(input);
row.w_i_minus_15 = op.input[get_input_range_4(0)]
.iter()
.map(|&x| F::from_canonical_u8(x))
.collect::<Vec<_>>()
.try_into()
.unwrap();
row.w_i_minus_2 = op.input[get_input_range_4(1)]
.iter()
.map(|&x| F::from_canonical_u8(x))
.collect::<Vec<_>>()
.try_into()
.unwrap();
row.w_i_minus_16 = op.input[get_input_range_4(2)]
.iter()
.map(|&x| F::from_canonical_u8(x))
.collect::<Vec<_>>()
.try_into()
.unwrap();
row.w_i_minus_7 = op.input[get_input_range_4(3)]
.iter()
.map(|&x| F::from_canonical_u8(x))
.collect::<Vec<_>>()
.try_into()
.unwrap();
row
}
fn compute_w_i(&self, input: Vec<u8>) -> [F; 4] {
let w_i_minus_15 = u32::from_le_bytes(input[get_input_range_4(0)].try_into().unwrap());
let w_i_minus_2 = u32::from_le_bytes(input[get_input_range_4(1)].try_into().unwrap());
let w_i_minus_16 = u32::from_le_bytes(input[get_input_range_4(2)].try_into().unwrap());
let w_i_minus_7 = u32::from_le_bytes(input[get_input_range_4(3)].try_into().unwrap());
let s0 = w_i_minus_15.rotate_right(7) ^ w_i_minus_15.rotate_right(18) ^ (w_i_minus_15 >> 3);
let s1 = w_i_minus_2.rotate_right(17) ^ w_i_minus_2.rotate_right(19) ^ (w_i_minus_2 >> 10);
let w_i_u32 = s1
.wrapping_add(w_i_minus_16)
.wrapping_add(s0)
.wrapping_add(w_i_minus_7);
w_i_u32.to_le_bytes().map(F::from_canonical_u8)
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ShaExtendSpongeStark<F, D> {
type EvaluationFrame<FE, P, const D2: usize>
= StarkFrame<P, NUM_SHA_EXTEND_SPONGE_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_SHA_EXTEND_SPONGE_COLUMNS>;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let local_values: &[P; NUM_SHA_EXTEND_SPONGE_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &ShaExtendSpongeColumnsView<P> = local_values.borrow();
let next_values: &[P; NUM_SHA_EXTEND_SPONGE_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &ShaExtendSpongeColumnsView<P> = next_values.borrow();
// check the round
for i in 0..NUM_ROUNDS {
yield_constr.constraint(local_values.round[i] * (local_values.round[i] - P::ONES));
}
// check the filter
let is_final = local_values.round[NUM_ROUNDS - 1];
yield_constr.constraint(is_final * (is_final - P::ONES));
let not_final = P::ONES - is_final;
let sum_round_flags = (0..NUM_ROUNDS).map(|i| local_values.round[i]).sum::<P>();
// If this is not the final step or a padding row,
// the timestamp must be increased by 2 * NUM_CHANNELS.
yield_constr.constraint(
sum_round_flags
* not_final
* (next_values.timestamp
- local_values.timestamp
- FE::from_canonical_usize(2 * NUM_CHANNELS)),
);
// If this is not the final step or a padding row,
// round index should be increased by one
let local_round_index = (0..NUM_ROUNDS)
.map(|i| local_values.round[i] * FE::from_canonical_u32(i as u32))
.sum::<P>();
let next_round_index = (0..NUM_ROUNDS)
.map(|i| next_values.round[i] * FE::from_canonical_u32(i as u32))
.sum::<P>();
yield_constr.constraint(
sum_round_flags * not_final * (next_round_index - local_round_index - P::ONES),
);
// If this is not the final step or a padding row,
// input and output addresses should be increased by 4 each
(0..NUM_EXTEND_INPUT).for_each(|i| {
yield_constr.constraint(
sum_round_flags
* not_final
* (next_values.input_virt[i]
- local_values.input_virt[i]
- FE::from_canonical_u32(4)),
);
});
yield_constr.constraint(
sum_round_flags
* not_final
* (next_values.output_virt - local_values.output_virt - FE::from_canonical_u32(4)),
);
// If it's not the padding row, check the virtual addresses
// The list of input addresses are: w[i-15], w[i-2], w[i-16], w[i-7]
// add_w[i-15] = add_w[i-16] + 4
yield_constr.constraint(
sum_round_flags
* (local_values.input_virt[0]
- local_values.input_virt[2]
- FE::from_canonical_u32(4)),
);
// add_w[i-2] = add_w[i-16] + 56
yield_constr.constraint(
sum_round_flags
* (local_values.input_virt[1]
- local_values.input_virt[2]
- FE::from_canonical_u32(56)),
);
// add_w[i-7] = add_w[i-16] + 36
yield_constr.constraint(
sum_round_flags
* (local_values.input_virt[3]
- local_values.input_virt[2]
- FE::from_canonical_u32(36)),
);
// add_w[i] = add_w[i-16] + 64
yield_constr.constraint(
sum_round_flags
* (local_values.output_virt
- local_values.input_virt[2]
- FE::from_canonical_u32(64)),
);
}
fn eval_ext_circuit(
&self,
builder: &mut CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let local_values: &[ExtensionTarget<D>; NUM_SHA_EXTEND_SPONGE_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &ShaExtendSpongeColumnsView<ExtensionTarget<D>> = local_values.borrow();
let next_values: &[ExtensionTarget<D>; NUM_SHA_EXTEND_SPONGE_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &ShaExtendSpongeColumnsView<ExtensionTarget<D>> = next_values.borrow();
let one_ext = builder.one_extension();
let four_ext = builder.constant_extension(F::Extension::from_canonical_u32(4));
let num_channel =
builder.constant_extension(F::Extension::from_canonical_usize(2 * NUM_CHANNELS));
// check the round
for i in 0..NUM_ROUNDS {
let constraint = builder.mul_sub_extension(
local_values.round[i],
local_values.round[i],
local_values.round[i],
);
yield_constr.constraint(builder, constraint);
}
// check the filter
let is_final = local_values.round[NUM_ROUNDS - 1];
let constraint = builder.mul_sub_extension(is_final, is_final, is_final);
yield_constr.constraint(builder, constraint);
let not_final = builder.sub_extension(one_ext, is_final);
let sum_round_flags =
builder.add_many_extension((0..NUM_ROUNDS).map(|i| local_values.round[i]));
// If this is not the final step or a padding row,
// the timestamp must be increased by 2 * NUM_CHANNELS.
let diff = builder.sub_extension(next_values.timestamp, local_values.timestamp);
let diff = builder.sub_extension(diff, num_channel);
let constraint = builder.mul_many_extension([sum_round_flags, not_final, diff]);
yield_constr.constraint(builder, constraint);
// If this is not the final step or a padding row,
// round index should be increased by one
let round_increment =
round_increment_ext_circuit_constraint(builder, local_values.round, next_values.round);
let constraint = builder.mul_many_extension([sum_round_flags, not_final, round_increment]);
yield_constr.constraint(builder, constraint);
// If this is not the final step or a padding row,
// input and output addresses should be increased by 4 each
(0..NUM_EXTEND_INPUT).for_each(|i| {
let increment =
builder.sub_extension(next_values.input_virt[i], local_values.input_virt[i]);
let address_increment = builder.sub_extension(increment, four_ext);
let constraint =
builder.mul_many_extension([sum_round_flags, not_final, address_increment]);
yield_constr.constraint(builder, constraint);
});
let increment = builder.sub_extension(next_values.output_virt, local_values.output_virt);
let address_increment = builder.sub_extension(increment, four_ext);
let constraint =
builder.mul_many_extension([sum_round_flags, not_final, address_increment]);
yield_constr.constraint(builder, constraint);
// If it's not the padding row, check the virtual addresses
// The list of input addresses are: w[i-15], w[i-2], w[i-16], w[i-7]
// add_w[i-15] = add_w[i-16] + 4
let constraint = diff_address_ext_circuit_constraint(
builder,
sum_round_flags,
local_values.input_virt[0],
local_values.input_virt[2],
4,
);
yield_constr.constraint(builder, constraint);
// add_w[i-2] = add_w[i-16] + 56
let constraint = diff_address_ext_circuit_constraint(
builder,
sum_round_flags,
local_values.input_virt[1],
local_values.input_virt[2],
56,
);
yield_constr.constraint(builder, constraint);
// add_w[i-7] = add_w[i-16] + 36
let constraint = diff_address_ext_circuit_constraint(
builder,
sum_round_flags,
local_values.input_virt[3],
local_values.input_virt[2],
36,
);
yield_constr.constraint(builder, constraint);
// add_w[i] = add_w[i-16] + 64
let constraint = diff_address_ext_circuit_constraint(
builder,
sum_round_flags,
local_values.output_virt,
local_values.input_virt[2],
64,
);
yield_constr.constraint(builder, constraint);
}
fn constraint_degree(&self) -> usize {
3
}
}
#[cfg(test)]
mod test {
use crate::config::StarkConfig;
use crate::cross_table_lookup::{
Column, CtlData, CtlZData, Filter, GrandProductChallenge, GrandProductChallengeSet,
};
use crate::memory::segments::Segment;
use crate::memory::NUM_CHANNELS;
use crate::prover::prove_single_table;
use crate::sha_extend_sponge::sha_extend_sponge_stark::{
ShaExtendSpongeOp, ShaExtendSpongeStark,
};
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
use crate::witness::memory::MemoryAddress;
use env_logger::{try_init_from_env, Env, DEFAULT_FILTER_ENV};
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::fri::oracle::PolynomialBatch;
use plonky2::iop::challenger::Challenger;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use plonky2::timed;
use plonky2::util::timing::TimingTree;
#[test]
fn test_correction() -> Result<(), String> {
const D: usize = 2;
type F = GoldilocksField;
type S = ShaExtendSpongeStark<F, D>;
let mut input_values = vec![];
input_values.extend((0..4_u32).map(|i| i.to_le_bytes()));
let input_values = input_values.into_iter().flatten().collect::<Vec<_>>();
let op = ShaExtendSpongeOp {
base_address: vec![
MemoryAddress {
context: 0,
segment: Segment::Code as usize,
virt: 4,
},
MemoryAddress {
context: 0,
segment: Segment::Code as usize,
virt: 56,
},
MemoryAddress {
context: 0,
segment: Segment::Code as usize,
virt: 0,
},
MemoryAddress {
context: 0,
segment: Segment::Code as usize,
virt: 36,
},
],
timestamp: 0,
input: input_values,
i: 0,
output_address: MemoryAddress {
context: 0,
segment: Segment::Code as usize,
virt: 64,
},
};
let stark = S::default();
let row = stark.generate_rows_for_op(op);
let w_i_bin = 40965_u32.to_le_bytes();
assert_eq!(row.w_i, w_i_bin.map(F::from_canonical_u8));
Ok(())
}
#[test]
fn test_stark_circuit() -> anyhow::Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ShaExtendSpongeStark<F, D>;
let stark = S::default();
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
#[test]
fn test_stark_degree() -> anyhow::Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ShaExtendSpongeStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_low_degree(stark)
}
fn get_random_input() -> Vec<ShaExtendSpongeOp> {
let mut w = [0u32; 64];
for i in 0..16 {
w[i] = rand::random::<u32>();
}
for i in 16..64 {
let w_i_minus_15 = w[i - 15];
let s0 =
w_i_minus_15.rotate_right(7) ^ w_i_minus_15.rotate_right(18) ^ (w_i_minus_15 >> 3);
// Read w[i-2].
let w_i_minus_2 = w[i - 2];
// Compute `s1`.
let s1 =
w_i_minus_2.rotate_right(17) ^ w_i_minus_2.rotate_right(19) ^ (w_i_minus_2 >> 10);
// Read w[i-16].
let w_i_minus_16 = w[i - 16];
let w_i_minus_7 = w[i - 7];
// Compute `w_i`.
w[i] = s1
.wrapping_add(w_i_minus_16)
.wrapping_add(s0)
.wrapping_add(w_i_minus_7);
}
let mut addresses = vec![];
for i in 0..64 {
addresses.push(MemoryAddress {
context: 0,
segment: Segment::Code as usize,
virt: i * 4,
});
}
let mut res = vec![];
let mut time = 0;
for i in 16..64 {
let mut input_values = vec![];
input_values.extend(w[i - 15].to_le_bytes());
input_values.extend(w[i - 2].to_le_bytes());
input_values.extend(w[i - 16].to_le_bytes());
input_values.extend(w[i - 7].to_le_bytes());
let op = ShaExtendSpongeOp {
base_address: vec![
addresses[i - 15],
addresses[i - 2],
addresses[i - 16],
addresses[i - 7],
],
timestamp: time,
input: input_values,
i: i - 16,
output_address: addresses[i],
};
res.push(op);
time += 2 * NUM_CHANNELS;
}
res
}
#[test]
fn sha_extend_sponge_benchmark() -> anyhow::Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = ShaExtendSpongeStark<F, D>;
let stark = S::default();
let config = StarkConfig::standard_fast_config();
init_logger();
let input = get_random_input();
let mut timing = TimingTree::new("prove", log::Level::Debug);
let trace_poly_values = stark.generate_trace(input, 8);
// TODO: Cloning this isn't great; consider having `from_values` accept a reference,
// or having `compute_permutation_z_polys` read trace values from the `PolynomialBatch`.
let cloned_trace_poly_values = timed!(timing, "clone", trace_poly_values.clone());
let trace_commitments = timed!(
timing,
"compute trace commitment",
PolynomialBatch::<F, C, D>::from_values(
cloned_trace_poly_values,
config.fri_config.rate_bits,
false,
config.fri_config.cap_height,
&mut timing,
None,
)
);
let degree = 1 << trace_commitments.degree_log;
// Fake CTL data.
let ctl_z_data = CtlZData {
helper_columns: vec![PolynomialValues::zero(degree)],
z: PolynomialValues::zero(degree),
challenge: GrandProductChallenge {
beta: F::ZERO,
gamma: F::ZERO,
},
columns: vec![],
filter: vec![Some(Filter::new_simple(Column::constant(F::ZERO)))],
};
let ctl_data = CtlData {
zs_columns: vec![ctl_z_data.clone(); config.num_challenges],
};
prove_single_table(
&stark,
&config,
&trace_poly_values,
&trace_commitments,
&ctl_data,
&GrandProductChallengeSet {
challenges: vec![ctl_z_data.challenge; config.num_challenges],
},
&mut Challenger::new(),
&mut timing,
)?;
timing.print();
Ok(())
}
fn init_logger() {
let _ = try_init_from_env(Env::default().filter_or(DEFAULT_FILTER_ENV, "debug"));
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend_sponge/columns.rs | prover/src/sha_extend_sponge/columns.rs | use crate::util::{indices_arr, transmute_no_compile_time_size_checks};
use std::borrow::{Borrow, BorrowMut};
use std::mem::transmute;
pub(crate) const NUM_EXTEND_INPUT: usize = 4;
pub(crate) const SHA_EXTEND_SPONGE_READ_BYTES: usize = NUM_EXTEND_INPUT * 4;
pub(crate) struct ShaExtendSpongeColumnsView<T: Copy> {
/// round
pub round: [T; 48],
/// Input
pub w_i_minus_15: [T; 4],
pub w_i_minus_2: [T; 4],
pub w_i_minus_16: [T; 4],
pub w_i_minus_7: [T; 4],
/// Output
pub w_i: [T; 4],
/// Input address
pub input_virt: [T; NUM_EXTEND_INPUT],
/// Output address
pub output_virt: T,
pub context: T,
pub segment: T,
/// The timestamp at which inputs should be read from memory.
pub timestamp: T,
}
pub const NUM_SHA_EXTEND_SPONGE_COLUMNS: usize = size_of::<ShaExtendSpongeColumnsView<u8>>(); //216
impl<T: Copy> From<[T; NUM_SHA_EXTEND_SPONGE_COLUMNS]> for ShaExtendSpongeColumnsView<T> {
fn from(value: [T; NUM_SHA_EXTEND_SPONGE_COLUMNS]) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> From<ShaExtendSpongeColumnsView<T>> for [T; NUM_SHA_EXTEND_SPONGE_COLUMNS] {
fn from(value: ShaExtendSpongeColumnsView<T>) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> Borrow<ShaExtendSpongeColumnsView<T>> for [T; NUM_SHA_EXTEND_SPONGE_COLUMNS] {
fn borrow(&self) -> &ShaExtendSpongeColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<ShaExtendSpongeColumnsView<T>> for [T; NUM_SHA_EXTEND_SPONGE_COLUMNS] {
fn borrow_mut(&mut self) -> &mut ShaExtendSpongeColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> Borrow<[T; NUM_SHA_EXTEND_SPONGE_COLUMNS]> for ShaExtendSpongeColumnsView<T> {
fn borrow(&self) -> &[T; NUM_SHA_EXTEND_SPONGE_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<[T; NUM_SHA_EXTEND_SPONGE_COLUMNS]> for ShaExtendSpongeColumnsView<T> {
fn borrow_mut(&mut self) -> &mut [T; NUM_SHA_EXTEND_SPONGE_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy + Default> Default for ShaExtendSpongeColumnsView<T> {
fn default() -> Self {
[T::default(); NUM_SHA_EXTEND_SPONGE_COLUMNS].into()
}
}
const fn make_col_map() -> ShaExtendSpongeColumnsView<usize> {
let indices_arr = indices_arr::<NUM_SHA_EXTEND_SPONGE_COLUMNS>();
unsafe {
transmute::<[usize; NUM_SHA_EXTEND_SPONGE_COLUMNS], ShaExtendSpongeColumnsView<usize>>(
indices_arr,
)
}
}
pub(crate) const SHA_EXTEND_SPONGE_COL_MAP: ShaExtendSpongeColumnsView<usize> = make_col_map();
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend_sponge/mod.rs | prover/src/sha_extend_sponge/mod.rs | pub mod columns;
pub mod logic;
pub mod sha_extend_sponge_stark;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_extend_sponge/logic.rs | prover/src/sha_extend_sponge/logic.rs | use crate::sha_extend_sponge::sha_extend_sponge_stark::NUM_ROUNDS;
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
// Compute (x - y - diff) * sum_round_flags
pub(crate) fn diff_address_ext_circuit_constraint<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
sum_round_flags: ExtensionTarget<D>,
x: ExtensionTarget<D>,
y: ExtensionTarget<D>,
diff: usize,
) -> ExtensionTarget<D> {
let inter_1 = builder.sub_extension(x, y);
let diff_ext = builder.constant_extension(F::Extension::from_canonical_u32(diff as u32));
let address_diff = builder.sub_extension(inter_1, diff_ext);
builder.mul_extension(sum_round_flags, address_diff)
}
// Compute nxt_round - local_round - 1
pub(crate) fn round_increment_ext_circuit_constraint<
F: RichField + Extendable<D>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
local_round: [ExtensionTarget<D>; NUM_ROUNDS],
next_round: [ExtensionTarget<D>; NUM_ROUNDS],
) -> ExtensionTarget<D> {
let one_ext = builder.one_extension();
let local_round_indices: Vec<_> = (0..NUM_ROUNDS)
.map(|i| {
let index = builder.constant_extension(F::Extension::from_canonical_u32(i as u32));
builder.mul_extension(local_round[i], index)
})
.collect();
let local_round_index = builder.add_many_extension(local_round_indices);
let next_round_indices: Vec<_> = (0..NUM_ROUNDS)
.map(|i| {
let index = builder.constant_extension(F::Extension::from_canonical_u32(i as u32));
builder.mul_extension(next_round[i], index)
})
.collect();
let next_round_index = builder.add_many_extension(next_round_indices);
let increment = builder.sub_extension(next_round_index, local_round_index);
builder.sub_extension(increment, one_ext)
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/witness/errors.rs | prover/src/witness/errors.rs | #[allow(dead_code)]
#[derive(Debug)]
pub enum ProgramError {
OutOfGas,
InvalidRegister,
InvalidSyscall,
InvalidOpcode,
Trap,
StackUnderflow,
InvalidRlp,
InvalidJumpDestination,
InvalidJumpiDestination,
StackOverflow,
KernelPanic,
MemoryError(MemoryError),
GasLimitError,
InterpreterError,
IntegerTooLarge,
ProverInputError(ProverInputError),
UnknownContractCode,
}
#[allow(clippy::enum_variant_names)]
#[derive(Debug)]
pub enum MemoryError {
ContextTooLarge { context: u32 },
SegmentTooLarge { segment: u32 },
VirtTooLarge { virt: u32 },
}
#[derive(Debug)]
pub enum ProverInputError {
OutOfMptData,
OutOfRlpData,
CodeHashNotFound,
InvalidMptInput,
InvalidInput,
InvalidFunction,
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/witness/memory.rs | prover/src/witness/memory.rs | use crate::cpu::membus::{NUM_CHANNELS, NUM_GP_CHANNELS};
use itertools::Itertools;
#[derive(Clone, Copy, Debug)]
pub enum MemoryChannel {
Code,
GeneralPurpose(usize),
}
use MemoryChannel::{Code, GeneralPurpose};
use crate::memory::segments::Segment;
impl MemoryChannel {
pub fn index(&self) -> usize {
match *self {
Code => NUM_CHANNELS - 1,
GeneralPurpose(n) => {
assert!(n < NUM_GP_CHANNELS);
n + 1
}
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)]
pub struct MemoryAddress {
pub(crate) context: usize,
pub(crate) segment: usize,
pub(crate) virt: usize,
}
impl MemoryAddress {
pub(crate) fn new(context: usize, segment: Segment, virt: usize) -> Self {
Self {
context,
segment: segment as usize,
virt,
}
}
pub(crate) fn increment(&mut self) {
self.virt = self.virt.saturating_add(4);
}
}
///
///Memory Access, for simplicity, we extend the byte and halfword(2 bytes) to a word(4 bytes).
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub enum MemoryOpKind {
Read,
Write,
}
#[derive(Clone, Copy, Debug)]
pub struct MemoryOp {
/// true if this is an actual memory operation, or false if it's a padding row.
/// NOTE: Skip the 0 register since we always read 0 from it, but may write anything in
pub filter: bool,
pub timestamp: usize,
pub address: MemoryAddress,
pub kind: MemoryOpKind,
pub value: u32,
}
pub static DUMMY_MEMOP: MemoryOp = MemoryOp {
filter: false,
timestamp: 0,
address: MemoryAddress {
context: 0,
segment: 0,
virt: 0,
},
kind: MemoryOpKind::Read,
value: 0,
};
impl MemoryOp {
pub fn new(
_channel: MemoryChannel,
clock: usize,
address: MemoryAddress,
kind: MemoryOpKind,
value: u32,
) -> Self {
let timestamp = clock * NUM_CHANNELS;
MemoryOp {
filter: true,
timestamp,
address,
kind,
value,
}
}
pub(crate) fn new_dummy_read(address: MemoryAddress, timestamp: usize, value: u32) -> Self {
Self {
filter: false,
timestamp,
address,
kind: MemoryOpKind::Read,
value,
}
}
pub(crate) fn sorting_key(&self) -> (usize, usize, usize, usize) {
(
self.address.context,
self.address.segment,
self.address.virt,
self.timestamp,
)
}
}
/// FIXME: all GPRs, HI, LO, EPC and page are also located in memory
#[derive(Clone, Debug)]
pub struct MemoryState {
pub(crate) contexts: Vec<MemoryContextState>,
}
impl MemoryState {
pub fn new(kernel_code: &[u8]) -> Self {
let code_u32s = kernel_code.iter().map(|&x| x.into()).collect();
let mut result = Self::default();
result.contexts[0].segments[Segment::Code as usize].content = code_u32s;
let shift_u32s = (0..32).map(|i| (1u32 << i).to_be()).collect_vec();
result.contexts[0].segments[Segment::ShiftTable as usize].content = shift_u32s;
result
}
pub fn apply_ops(&mut self, ops: &[MemoryOp]) {
for &op in ops {
let MemoryOp {
address,
kind,
value,
..
} = op;
if kind == MemoryOpKind::Write {
self.set(address, value.to_be());
}
}
}
pub fn get(&self, address: MemoryAddress) -> u32 {
if address.context >= self.contexts.len() {
return 0;
}
let _segment = Segment::all()[address.segment];
let val = self.contexts[address.context].segments[address.segment].get(address.virt);
log::trace!("read mem {:X} : {:X} ({})", address.virt, val, val);
/*
assert!(
u32::BITS as usize <= segment.bit_range(),
"Value {} exceeds {:?} range of {} bits",
val,
segment,
segment.bit_range()
);
*/
val
}
pub fn set(&mut self, address: MemoryAddress, val: u32) {
while address.context >= self.contexts.len() {
self.contexts.push(MemoryContextState::default());
}
let _segment = Segment::all()[address.segment];
self.contexts[address.context].segments[address.segment].set(address.virt, val);
}
}
impl Default for MemoryState {
fn default() -> Self {
Self {
// We start with an initial context for the kernel.
contexts: vec![MemoryContextState::default()],
}
}
}
#[derive(Clone, Debug)]
pub(crate) struct MemoryContextState {
/// The content of each memory segment.
pub(crate) segments: [MemorySegmentState; Segment::COUNT],
}
impl Default for MemoryContextState {
fn default() -> Self {
Self {
segments: std::array::from_fn(|_| MemorySegmentState::default()),
}
}
}
#[derive(Clone, Default, Debug)]
pub(crate) struct MemorySegmentState {
pub(crate) content: Vec<u32>,
}
impl MemorySegmentState {
pub(crate) fn get(&self, virtual_addr: usize) -> u32 {
self.content.get(virtual_addr).copied().unwrap_or(0)
}
pub(crate) fn set(&mut self, virtual_addr: usize, value: u32) {
if virtual_addr >= self.content.len() {
self.content.resize(virtual_addr + 1, 0);
}
self.content[virtual_addr] = value;
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/witness/state.rs | prover/src/witness/state.rs | use crate::cpu::kernel::assembler::Kernel;
const KERNEL_CONTEXT: usize = 0;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct RegistersState {
pub gprs: [usize; 32],
pub lo: usize,
pub hi: usize,
pub heap: usize,
pub program_counter: usize,
pub next_pc: usize,
pub brk: usize,
pub local_user: usize,
pub is_kernel: bool,
pub context: usize,
pub exited: bool,
pub exit_code: u8,
}
impl RegistersState {
pub(crate) fn code_context(&self) -> usize {
if self.is_kernel {
KERNEL_CONTEXT
} else {
self.context
}
}
}
impl RegistersState {
pub fn new(kernel: &Kernel) -> Self {
Self {
gprs: kernel.program.gprs,
lo: kernel.program.lo,
hi: kernel.program.hi,
heap: kernel.program.heap,
program_counter: kernel.program.entry as usize,
next_pc: kernel.program.next_pc,
brk: kernel.program.brk,
local_user: kernel.program.local_user,
is_kernel: true,
context: 0,
exited: false,
exit_code: 0u8,
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/witness/transition.rs | prover/src/witness/transition.rs | use anyhow::bail;
use log::log_enabled;
use plonky2::field::extension::Extendable;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::config::GenericConfig;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::assembler::Kernel;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::witness::errors::ProgramError;
use crate::witness::memory::MemoryAddress;
use crate::witness::operation::*;
use crate::witness::state::RegistersState;
use crate::witness::util::mem_read_code_with_log_and_fill;
use crate::{arithmetic, logic};
fn read_code_memory<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
state: &mut GenerationState<F, C, D>,
row: &mut CpuColumnsView<F>,
) -> u32 {
let code_context = state.registers.code_context();
row.code_context = F::from_canonical_usize(code_context);
let address = MemoryAddress::new(code_context, Segment::Code, state.registers.program_counter);
let (opcode, mem_log) = mem_read_code_with_log_and_fill(address, state, row);
log::trace!(
"read_code_memory: PC {:X} ({}) op: {:?}, {:?}",
state.registers.program_counter,
state.registers.program_counter,
opcode,
mem_log
);
state.traces.push_memory(mem_log);
opcode
}
fn decode(registers: RegistersState, insn: u32) -> Result<Operation, ProgramError> {
let opcode = ((insn >> 26) & 0x3F).to_le_bytes()[0];
let func = (insn & 0x3F).to_le_bytes()[0];
let rt = ((insn >> 16) & 0x1F).to_le_bytes()[0];
let rs = ((insn >> 21) & 0x1F).to_le_bytes()[0];
let rd = ((insn >> 11) & 0x1F).to_le_bytes()[0];
let sa = ((insn >> 6) & 0x1F).to_le_bytes()[0];
let offset = insn & 0xffff; // as known as imm
let target = insn & 0x3ffffff;
log::trace!(
"op {}, func {}, rt {}, rs {}, rd {}",
opcode,
func,
rt,
rs,
rd
);
log::trace!(
"decode: insn {:X}, opcode {:X}, func {:X}",
insn,
opcode,
func
);
match (opcode, func, registers.is_kernel) {
(0b000000, 0b001010, _) => Ok(Operation::CondMov(MovCond::EQ, rs, rt, rd)), // MOVZ: rd = rs if rt == 0
(0b000000, 0b001011, _) => Ok(Operation::CondMov(MovCond::NE, rs, rt, rd)), // MOVN: rd = rs if rt != 0
(0b000000, 0b100000, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::ADD,
rs,
rt,
rd,
)), // ADD: rd = rs+rt
(0b000000, 0b100001, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::ADDU,
rs,
rt,
rd,
)), // ADDU: rd = rs+rt
(0b000000, 0b100010, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SUB,
rs,
rt,
rd,
)), // SUB: rd = rs-rt
(0b000000, 0b100011, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SUBU,
rs,
rt,
rd,
)), // SUBU: rd = rs-rt
(0b000000, 0b000000, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SLL,
sa,
rt,
rd,
)), // SLL: rd = rt << sa
(0b000000, 0b000010, _) => {
if rs == 1 {
Ok(Operation::Ror(rd, rt, sa))
} else {
Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SRL,
sa,
rt,
rd,
))
}
} // SRL: rd = rt >> sa
(0b000000, 0b000011, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SRA,
sa,
rt,
rd,
)), // SRA: rd = rt >> sa
(0b000000, 0b000100, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SLLV,
rs,
rt,
rd,
)), // SLLV: rd = rt << rs[4:0]
(0b000000, 0b000110, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SRLV,
rs,
rt,
rd,
)), // SRLV: rd = rt >> rs[4:0]
(0b000000, 0b000111, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SRAV,
rs,
rt,
rd,
)), // SRAV: rd = rt >> rs[4:0]
(0b011100, 0b000010, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::MUL,
rs,
rt,
rd,
)), // MUL: rd = rt * rs
(0b000000, 0b011000, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::MULT,
rs,
rt,
rd,
)), // MULT: (hi, lo) = rt * rs
(0b000000, 0b011001, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::MULTU,
rs,
rt,
rd,
)), // MULTU: (hi, lo) = rt * rs
(0b000000, 0b011010, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::DIV,
rs,
rt,
rd,
)), // DIV: (hi, lo) = rt / rs
(0b000000, 0b011011, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::DIVU,
rs,
rt,
rd,
)), // DIVU: (hi, lo) = rt / rs
(0b000000, 0b010000, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::MFHI,
33,
0,
rd,
)), // MFHI: rd = hi
(0b000000, 0b010001, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::MTHI,
rs,
0,
33,
)), // MTHI: hi = rs
(0b000000, 0b010010, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::MFLO,
32,
0,
rd,
)), // MFLO: rd = lo
(0b000000, 0b010011, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::MTLO,
rs,
0,
32,
)), // MTLO: lo = rs
(0b000000, 0b001111, _) => Ok(Operation::Nop), // SYNC
(0b011100, 0b100000, _) => Ok(Operation::Count(false, rs, rd)), // CLZ: rd = count_leading_zeros(rs)
(0b011100, 0b100001, _) => Ok(Operation::Count(true, rs, rd)), // CLO: rd = count_leading_ones(rs)
(0x00, 0x08, _) => Ok(Operation::Jump(0u8, rs)), // JR
(0x00, 0x09, _) => Ok(Operation::Jump(rd, rs)), // JALR
(0x01, _, _) => {
if rt == 1 {
Ok(Operation::Branch(BranchCond::GE, rs, 0u8, offset)) // BGEZ
} else if rt == 0 {
Ok(Operation::Branch(BranchCond::LT, rs, 0u8, offset)) // BLTZ
} else if rt == 0x11 && rs == 0 {
Ok(Operation::JumpDirect(31, offset)) // BAL
} else {
Err(ProgramError::InvalidOpcode)
}
}
(0x02, _, _) => Ok(Operation::Jumpi(0u8, target)), // J
(0x03, _, _) => Ok(Operation::Jumpi(31u8, target)), // JAL
(0x04, _, _) => Ok(Operation::Branch(BranchCond::EQ, rs, rt, offset)), // BEQ
(0x05, _, _) => Ok(Operation::Branch(BranchCond::NE, rs, rt, offset)), // BNE
(0x06, _, _) => Ok(Operation::Branch(BranchCond::LE, rs, 0u8, offset)), // BLEZ
(0x07, _, _) => Ok(Operation::Branch(BranchCond::GT, rs, 0u8, offset)), // BGTZ
(0b100000, _, _) => Ok(Operation::MloadGeneral(MemOp::LB, rs, rt, offset)),
(0b100001, _, _) => Ok(Operation::MloadGeneral(MemOp::LH, rs, rt, offset)),
(0b100010, _, _) => Ok(Operation::MloadGeneral(MemOp::LWL, rs, rt, offset)),
(0b100011, _, _) => Ok(Operation::MloadGeneral(MemOp::LW, rs, rt, offset)),
(0b100100, _, _) => Ok(Operation::MloadGeneral(MemOp::LBU, rs, rt, offset)),
(0b100101, _, _) => Ok(Operation::MloadGeneral(MemOp::LHU, rs, rt, offset)),
(0b100110, _, _) => Ok(Operation::MloadGeneral(MemOp::LWR, rs, rt, offset)),
(0b110000, _, _) => Ok(Operation::MloadGeneral(MemOp::LL, rs, rt, offset)),
(0b101000, _, _) => Ok(Operation::MstoreGeneral(MemOp::SB, rs, rt, offset)),
(0b101001, _, _) => Ok(Operation::MstoreGeneral(MemOp::SH, rs, rt, offset)),
(0b101010, _, _) => Ok(Operation::MstoreGeneral(MemOp::SWL, rs, rt, offset)),
(0b101011, _, _) => Ok(Operation::MstoreGeneral(MemOp::SW, rs, rt, offset)),
(0b101110, _, _) => Ok(Operation::MstoreGeneral(MemOp::SWR, rs, rt, offset)),
(0b111000, _, _) => Ok(Operation::MstoreGeneral(MemOp::SC, rs, rt, offset)),
(0b111101, _, _) => Ok(Operation::MstoreGeneral(MemOp::SDC1, rs, rt, offset)),
(0b001000, _, _) => Ok(Operation::BinaryArithmeticImm(
arithmetic::BinaryOperator::ADDI,
rs,
rt,
offset,
)), // ADDI: rt = rs + sext(imm)
(0b001001, _, _) => Ok(Operation::BinaryArithmeticImm(
arithmetic::BinaryOperator::ADDIU,
rs,
rt,
offset,
)), // ADDIU: rt = rs + sext(imm)
(0b001010, _, _) => Ok(Operation::BinaryArithmeticImm(
arithmetic::BinaryOperator::SLTI,
rs,
rt,
offset,
)), // SLTI: rt = rs < sext(imm)
(0b001011, _, _) => Ok(Operation::BinaryArithmeticImm(
arithmetic::BinaryOperator::SLTIU,
rs,
rt,
offset,
)), // SLTIU: rt = rs < sext(imm)
(0b000000, 0b101010, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SLT,
rs,
rt,
rd,
)), // SLT: rd = rs < rt
(0b000000, 0b101011, _) => Ok(Operation::BinaryArithmetic(
arithmetic::BinaryOperator::SLTU,
rs,
rt,
rd,
)), // SLTU: rd = rs < rt
(0b001111, _, _) => Ok(Operation::BinaryArithmeticImm(
arithmetic::BinaryOperator::LUI,
rs,
rt,
offset,
)), // LUI: rt = imm << 16
(0b000000, 0b100100, _) => Ok(Operation::BinaryLogic(logic::Op::And, rs, rt, rd)), // AND: rd = rs & rt
(0b000000, 0b100101, _) => Ok(Operation::BinaryLogic(logic::Op::Or, rs, rt, rd)), // OR: rd = rs | rt
(0b000000, 0b100110, _) => Ok(Operation::BinaryLogic(logic::Op::Xor, rs, rt, rd)), // XOR: rd = rs ^ rt
(0b000000, 0b100111, _) => Ok(Operation::BinaryLogic(logic::Op::Nor, rs, rt, rd)), // NOR: rd = ! rs | rt
(0b001100, _, _) => Ok(Operation::BinaryLogicImm(logic::Op::And, rs, rt, offset)), // ANDI: rt = rs + zext(imm)
(0b001101, _, _) => Ok(Operation::BinaryLogicImm(logic::Op::Or, rs, rt, offset)), // ORI: rt = rs + zext(imm)
(0b001110, _, _) => Ok(Operation::BinaryLogicImm(logic::Op::Xor, rs, rt, offset)), // XORI: rt = rs + zext(imm)
(0b000000, 0b001100, _) => Ok(Operation::Syscall), // Syscall
(0b110011, _, _) => Ok(Operation::Nop), // Pref
(0b011100, 0b000001, _) => Ok(Operation::Maddu(rt, rs)), // rdhwr
(0b011111, 0b000000, _) => Ok(Operation::Ext(rt, rs, rd, sa)), // ext
(0b011111, 0b000100, _) => Ok(Operation::Ins(rt, rs, rd, sa)), // ins
(0b011111, 0b111011, _) => Ok(Operation::Rdhwr(rt, rd)), // rdhwr
(0b011111, 0b100000, _) => {
if sa == 0b011000 {
Ok(Operation::Signext(rd, rt, 16)) // seh
} else if sa == 0b010000 {
Ok(Operation::Signext(rd, rt, 8)) // seb
} else if sa == 0b000010 {
Ok(Operation::SwapHalf(rd, rt)) // wsbh
} else {
log::warn!(
"decode: invalid opcode {:#08b} {:#08b} {:#08b}",
opcode,
func,
sa
);
Err(ProgramError::InvalidOpcode)
}
}
(0b000000, 0b110100, _) => Ok(Operation::Teq(rs, rt)), // teq
_ => {
log::warn!("decode: invalid opcode {:#08b} {:#08b}", opcode, func);
Err(ProgramError::InvalidOpcode)
}
}
}
fn fill_op_flag<F: Field>(op: Operation, row: &mut CpuColumnsView<F>) {
let flags = &mut row.op;
*match op {
Operation::Syscall => &mut flags.syscall,
Operation::CondMov(MovCond::EQ, _, _, _) => &mut flags.movz_op,
Operation::CondMov(MovCond::NE, _, _, _) => &mut flags.movn_op,
Operation::Count(false, _, _) => &mut flags.clz_op,
Operation::Count(true, _, _) => &mut flags.clo_op,
Operation::BinaryLogic(_, _, _, _) => &mut flags.logic_op,
Operation::BinaryLogicImm(_, _, _, _) => &mut flags.logic_imm_op,
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SLL, ..)
| Operation::BinaryArithmetic(arithmetic::BinaryOperator::SRL, ..)
| Operation::BinaryArithmetic(arithmetic::BinaryOperator::SRA, ..) => &mut flags.shift_imm,
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SLLV, ..)
| Operation::BinaryArithmetic(arithmetic::BinaryOperator::SRLV, ..)
| Operation::BinaryArithmetic(arithmetic::BinaryOperator::SRAV, ..) => &mut flags.shift,
Operation::BinaryArithmetic(..) => &mut flags.binary_op,
Operation::BinaryArithmeticImm(..) => &mut flags.binary_imm_op,
Operation::KeccakGeneral => &mut flags.keccak_general,
Operation::Jump(_, _) => &mut flags.jumps,
Operation::Jumpi(_, _) => &mut flags.jumpi,
Operation::JumpDirect(_, _) => &mut flags.jumpdirect,
Operation::Branch(_, _, _, _) => &mut flags.branch,
Operation::Pc => &mut flags.pc,
Operation::GetContext => &mut flags.get_context,
Operation::SetContext => &mut flags.set_context,
Operation::MloadGeneral(..) => &mut flags.m_op_load,
Operation::MstoreGeneral(..) => &mut flags.m_op_store,
Operation::Nop => &mut flags.nop,
Operation::Ext(_, _, _, _) => &mut flags.ext,
Operation::Ins(_, _, _, _) => &mut flags.ins,
Operation::Maddu(_, _) => &mut flags.maddu,
Operation::Ror(_, _, _) => &mut flags.ror,
Operation::Rdhwr(_, _) => &mut flags.rdhwr,
Operation::Signext(_, _, 8u8) => &mut flags.signext8,
Operation::Signext(_, _, _) => &mut flags.signext16,
Operation::SwapHalf(_, _) => &mut flags.swaphalf,
Operation::Teq(_, _) => &mut flags.teq,
} = F::ONE;
}
fn perform_op<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
state: &mut GenerationState<F, C, D>,
op: Operation,
row: CpuColumnsView<F>,
kernel: &Kernel,
) -> Result<(), ProgramError> {
log::trace!("perform_op {:?}", op);
match op {
Operation::Syscall => generate_syscall(state, row, kernel)?,
Operation::CondMov(cond, rs, rt, rd) => generate_cond_mov_op(cond, rs, rt, rd, state, row)?,
Operation::Count(is_clo, rs, rd) => generate_count_op(is_clo, rs, rd, state, row)?,
Operation::BinaryLogic(binary_logic_op, rs, rt, rd) => {
generate_binary_logic_op(binary_logic_op, rs, rt, rd, state, row)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::MULT, rs, rt, rd) => {
generate_binary_arithmetic_hilo_op(
arithmetic::BinaryOperator::MULT,
rs,
rt,
rd,
state,
row,
)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::MULTU, rs, rt, rd) => {
generate_binary_arithmetic_hilo_op(
arithmetic::BinaryOperator::MULTU,
rs,
rt,
rd,
state,
row,
)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::DIV, rs, rt, rd) => {
generate_binary_arithmetic_hilo_op(
arithmetic::BinaryOperator::DIV,
rs,
rt,
rd,
state,
row,
)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::DIVU, rs, rt, rd) => {
generate_binary_arithmetic_hilo_op(
arithmetic::BinaryOperator::DIVU,
rs,
rt,
rd,
state,
row,
)?
}
Operation::BinaryLogicImm(binary_logic_op, rs, rd, imm) => {
generate_binary_logic_imm_op(binary_logic_op, rs, rd, imm, state, row)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SLL, sa, rt, rd) => {
generate_shift_imm(arithmetic::BinaryOperator::SLL, sa, rt, rd, state, row)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SRL, sa, rt, rd) => {
generate_shift_imm(arithmetic::BinaryOperator::SRL, sa, rt, rd, state, row)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SRA, sa, rt, rd) => {
generate_shift_imm(arithmetic::BinaryOperator::SRA, sa, rt, rd, state, row)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SLLV, rs, rt, rd) => {
generate_sllv(rs, rt, rd, state, row)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SRLV, rs, rt, rd) => {
generate_srlv(rs, rt, rd, state, row)?
}
Operation::BinaryArithmetic(arithmetic::BinaryOperator::SRAV, rs, rt, rd) => {
generate_srav(rs, rt, rd, state, row)?
}
Operation::BinaryArithmetic(op, rs, rt, rd) => {
generate_binary_arithmetic_op(op, rs, rt, rd, state, row)?
}
Operation::BinaryArithmeticImm(arithmetic::BinaryOperator::LUI, rs, rt, imm) => {
generate_lui(rs, rt, imm, state, row)?
}
Operation::BinaryArithmeticImm(op, rs, rt, imm) => {
generate_binary_arithmetic_imm_op(rs, rt, imm, op, state, row)?
}
Operation::KeccakGeneral => generate_keccak_general(state, row)?,
Operation::Jump(link, target) => generate_jump(link, target, state, row)?,
Operation::Jumpi(link, target) => generate_jumpi(link, target, state, row)?,
Operation::JumpDirect(link, target) => generate_jumpdirect(link, target, state, row)?,
Operation::Branch(cond, input1, input2, target) => {
generate_branch(cond, input1, input2, target, state, row)?
}
Operation::Pc => generate_pc(state, row)?,
Operation::MloadGeneral(op, base, rt, offset) => {
generate_mload_general(op, base, rt, offset, state, row)?
}
Operation::MstoreGeneral(op, base, rt, offset) => {
generate_mstore_general(op, base, rt, offset, state, row)?
}
Operation::GetContext => generate_get_context(state, row)?,
Operation::SetContext => generate_set_context(state, row)?,
Operation::Nop => generate_nop(state, row)?,
Operation::Ext(rt, rs, msbd, lsb) => generate_extract(rt, rs, msbd, lsb, state, row)?,
Operation::Ins(rt, rs, msb, lsb) => generate_insert(rt, rs, msb, lsb, state, row)?,
Operation::Maddu(rt, rs) => generate_maddu(rt, rs, state, row)?,
Operation::Ror(rd, rt, sa) => generate_ror(rd, rt, sa, state, row)?,
Operation::Rdhwr(rt, rd) => generate_rdhwr(rt, rd, state, row)?,
Operation::Signext(rd, rt, bits) => generate_signext(rd, rt, bits, state, row)?,
Operation::SwapHalf(rd, rt) => generate_swaphalf(rd, rt, state, row)?,
Operation::Teq(rs, rt) => generate_teq(rs, rt, state, row)?,
};
match op {
Operation::Jump(_, _)
| Operation::Jumpi(_, _)
| Operation::JumpDirect(_, _)
| Operation::Branch(_, _, _, _) => {
// Do nothing
}
_ => {
state.registers.program_counter = state.registers.next_pc;
state.registers.next_pc += 4;
}
};
match op {
Operation::Jump(_, _)
| Operation::Jumpi(_, _)
| Operation::JumpDirect(_, _)
| Operation::Branch(_, _, _, _) => {
log::trace!(
"states: pc {} registers: {:?}",
state.registers.program_counter,
state.registers.gprs
);
}
Operation::Syscall => {
log::trace!(
"states: pc {} registers: {:?}",
state.registers.program_counter + 4,
state.registers.gprs
);
}
_ => (),
};
Ok(())
}
/// Row that has the correct values for system registers and the code channel, but is otherwise
/// blank. It fulfills the constraints that are common to successful operations and the exception
/// operation. It also returns the opcode.
fn base_row<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
state: &mut GenerationState<F, C, D>,
) -> (CpuColumnsView<F>, u32) {
let mut row: CpuColumnsView<F> = CpuColumnsView::default();
row.clock = F::from_canonical_usize(state.traces.clock());
row.context = F::from_canonical_usize(state.registers.context);
row.program_counter = F::from_canonical_usize(state.registers.program_counter);
row.next_program_counter = F::from_canonical_usize(state.registers.next_pc);
row.is_kernel_mode = F::from_bool(state.registers.is_kernel);
let opcode = read_code_memory(state, &mut row);
(row, opcode)
}
fn try_perform_instruction<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
kernel: &Kernel,
) -> Result<(), ProgramError> {
let (mut row, opcode) = base_row(state);
let op = decode(state.registers, opcode)?;
if state.registers.is_kernel {
log_kernel_instruction(state, op, kernel);
} else {
log::trace!("user instruction: {:?}", op);
}
fill_op_flag(op, &mut row);
/*
if state.registers.is_kernel {
row.stack_len_bounds_aux = F::ZERO;
} else {
let disallowed_len = F::from_canonical_usize(MAX_USER_STACK_SIZE + 1);
let diff = row.stack_len - disallowed_len;
if let Some(inv) = diff.try_inverse() {
row.stack_len_bounds_aux = inv;
} else {
// This is a stack overflow that should have been caught earlier.
return Err(ProgramError::InterpreterError);
}
}
*/
perform_op(state, op, row, kernel)
}
fn log_kernel_instruction<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &GenerationState<F, C, D>,
op: Operation,
kernel: &Kernel,
) {
// The logic below is a bit costly, so skip it if debug logs aren't enabled.
if !log_enabled!(log::Level::Debug) {
return;
}
let pc = state.registers.program_counter;
let is_interesting_offset = kernel
.offset_label(pc)
.filter(|label| !label.starts_with("halt"))
.is_some();
let level = if is_interesting_offset {
log::Level::Debug
} else {
log::Level::Trace
};
log::log!(
level,
"Cycle {}, ctx={}, pc={}, instruction={:?}, stack={:?}",
state.traces.clock(),
state.registers.context,
kernel.offset_name(pc),
op,
//state.stack(),
0,
);
//assert!(pc < KERNEL.program.image.len(), "Kernel PC is out of range: {}", pc);
}
fn handle_error<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
state: &mut GenerationState<F, C, D>,
err: ProgramError,
) -> anyhow::Result<()> {
let exc_code: u8 = match err {
ProgramError::OutOfGas => 0,
ProgramError::InvalidOpcode => 1,
ProgramError::StackUnderflow => 2,
ProgramError::InvalidJumpDestination => 3,
ProgramError::InvalidJumpiDestination => 4,
ProgramError::StackOverflow => 5,
_ => bail!("TODO: figure out what to do with this..."),
};
log::debug!("handle_error: {:?}", exc_code);
let checkpoint = state.checkpoint();
state
.memory
.apply_ops(state.traces.mem_ops_since(checkpoint.traces));
Ok(())
}
pub(crate) fn transition<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
kernel: &Kernel,
) -> anyhow::Result<()> {
let checkpoint = state.checkpoint();
let result = try_perform_instruction(state, kernel);
match result {
Ok(()) => {
state
.memory
.apply_ops(state.traces.mem_ops_since(checkpoint.traces));
Ok(())
}
Err(e) => {
if state.registers.is_kernel {
let offset_name = kernel.offset_name(state.registers.program_counter);
bail!(
"{:?} in kernel at pc={}, stack={:?}, memory={:?}",
e,
offset_name,
//state.stack(),
0,
state.memory.contexts[0].segments[Segment::KernelGeneral as usize].content,
);
}
state.rollback(checkpoint);
handle_error(state, e)
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/witness/util.rs | prover/src/witness/util.rs | use byteorder::ByteOrder;
use byteorder::LittleEndian;
use itertools::Itertools;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::keccak_util::keccakf_u8s;
use crate::cpu::membus::NUM_CHANNELS;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::generation::state::GenerationState;
use crate::keccak_sponge::columns::KECCAK_RATE_BYTES;
use crate::keccak_sponge::columns::KECCAK_WIDTH_BYTES;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeOp;
use crate::logic;
use crate::memory::segments::Segment;
use crate::poseidon::constants::{SPONGE_RATE, SPONGE_WIDTH};
use crate::poseidon::poseidon_stark::poseidon_with_witness;
use crate::poseidon_sponge::columns::POSEIDON_RATE_BYTES;
use crate::poseidon_sponge::poseidon_sponge_stark::PoseidonSpongeOp;
use crate::sha_compress_sponge::constants::SHA_COMPRESS_K_LE_BYTES;
use crate::sha_compress_sponge::sha_compress_sponge_stark::ShaCompressSpongeOp;
use crate::sha_extend_sponge::sha_extend_sponge_stark::ShaExtendSpongeOp;
use crate::witness::errors::ProgramError;
use crate::witness::memory::{MemoryAddress, MemoryChannel, MemoryOp, MemoryOpKind};
use plonky2::field::extension::Extendable;
use plonky2::plonk::config::GenericConfig;
fn to_byte_checked(n: u32) -> u8 {
let res: u8 = n.to_le_bytes()[0];
assert_eq!(n as u8, res);
res
}
fn to_bits_le<F: Field, const N: usize>(n: u8) -> [F; N] {
let mut res = [F::ZERO; N];
for (i, bit) in res.iter_mut().enumerate() {
*bit = F::from_bool(n & (1 << i) != 0);
}
res
}
fn to_bits32_le<F: Field>(n: u32) -> [F; 32] {
let mut res = [F::ZERO; 32];
for (i, bit) in res.iter_mut().enumerate() {
*bit = F::from_bool(n & (1 << i) != 0);
}
res
}
pub(crate) fn fill_channel_with_value<F: Field>(row: &mut CpuColumnsView<F>, n: usize, val: u32) {
let channel = &mut row.mem_channels[n];
channel.value = F::from_canonical_u32(val);
}
pub(crate) fn mem_read_code_with_log_and_fill<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
address: MemoryAddress,
state: &GenerationState<F, C, D>,
row: &mut CpuColumnsView<F>,
) -> (u32, MemoryOp) {
let (val, op) = mem_read_with_log(MemoryChannel::Code, address, state);
let val_func = to_byte_checked(val & 0x3F);
let val_shamt = to_byte_checked((val >> 6) & 0x1F);
let val_rd = to_byte_checked((val >> 11) & 0x1F);
let val_rt = to_byte_checked((val >> 16) & 0x1F);
let val_rs = to_byte_checked((val >> 21) & 0x1F);
let val_op = to_byte_checked(val >> 26);
row.opcode_bits = to_bits_le::<F, 6>(val_op);
row.func_bits = to_bits_le::<F, 6>(val_func);
row.rs_bits = to_bits_le::<F, 5>(val_rs);
row.rt_bits = to_bits_le::<F, 5>(val_rt);
row.rd_bits = to_bits_le::<F, 5>(val_rd);
row.shamt_bits = to_bits_le::<F, 5>(val_shamt);
/*
// FIXME: hold last channel for code read
*/
let channel = &mut row.mem_channels[NUM_GP_CHANNELS - 1];
assert_eq!(channel.used, F::ZERO);
channel.used = F::ONE;
channel.is_read = F::ONE;
channel.addr_context = F::from_canonical_usize(address.context);
channel.addr_segment = F::from_canonical_usize(address.segment);
channel.addr_virtual = F::from_canonical_usize(address.virt);
channel.value = F::from_canonical_u32(val);
(val, op)
}
pub(crate) fn sign_extend<const N: usize>(value: u32) -> u32 {
let is_signed = (value >> (N - 1)) != 0;
let signed = ((1 << (32 - N)) - 1) << N;
let mask = (1 << N) - 1;
if is_signed {
value & mask | signed
} else {
value & mask
}
}
pub(crate) fn reg_read_with_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
index: u8,
channel: usize,
state: &GenerationState<F, C, D>,
row: &mut CpuColumnsView<F>,
) -> Result<(usize, MemoryOp), ProgramError> {
let result = {
if index < 32 {
state.registers.gprs[index as usize]
} else if index == 32 {
state.registers.lo
} else if index == 33 {
state.registers.hi
} else if index == 34 {
state.registers.heap
} else if index == 35 {
state.registers.program_counter
} else if index == 36 {
state.registers.next_pc
} else if index == 37 {
state.registers.brk
} else if index == 38 {
state.registers.local_user
} else {
return Err(ProgramError::InvalidRegister);
}
};
log::trace!("read reg {} : {:X}({})", index, result, result);
let address = MemoryAddress::new(0, Segment::RegisterFile, index as usize);
let op = MemoryOp::new(
MemoryChannel::GeneralPurpose(channel),
state.traces.clock(),
address,
MemoryOpKind::Read,
result as u32,
);
let channel = &mut row.mem_channels[channel];
assert_eq!(channel.used, F::ZERO);
channel.used = F::ONE;
channel.is_read = F::ONE;
channel.addr_context = F::from_canonical_usize(address.context);
channel.addr_segment = F::from_canonical_usize(address.segment);
channel.addr_virtual = F::from_canonical_usize(address.virt);
channel.value = F::from_canonical_u32(result as u32);
Ok((result, op))
}
pub(crate) fn reg_write_with_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
index: u8,
channel: usize,
value: usize,
state: &mut GenerationState<F, C, D>,
row: &mut CpuColumnsView<F>,
) -> Result<MemoryOp, ProgramError> {
if index == 0 {
// Ignore write to r0
} else if index < 32 {
state.registers.gprs[index as usize] = value;
} else if index == 32 {
state.registers.lo = value;
} else if index == 33 {
state.registers.hi = value;
} else if index == 34 {
state.registers.heap = value;
} else if index == 35 {
state.registers.program_counter = value;
} else if index == 36 {
state.registers.next_pc = value;
} else if index == 37 {
state.registers.brk = value;
} else if index == 38 {
state.registers.local_user = value;
} else {
return Err(ProgramError::InvalidRegister);
}
log::trace!("write reg {} : {:X} ({})", index, value, value);
let address = MemoryAddress::new(0, Segment::RegisterFile, index as usize);
// trick: skip 0 register check since we can write anything in, but read 0 out only.
let mut used = F::ONE;
let mut filter = true;
if index == 0 {
used = F::ZERO;
filter = false;
}
let mut op = MemoryOp::new(
MemoryChannel::GeneralPurpose(channel),
state.traces.clock(),
address,
MemoryOpKind::Write,
value as u32,
);
op.filter = filter;
let channel = &mut row.mem_channels[channel];
assert_eq!(channel.used, F::ZERO);
channel.used = used;
channel.is_read = F::ZERO;
channel.addr_context = F::from_canonical_usize(address.context);
channel.addr_segment = F::from_canonical_usize(address.segment);
channel.addr_virtual = F::from_canonical_usize(address.virt);
channel.value = F::from_canonical_u32(value as u32);
Ok(op)
}
pub(crate) fn reg_zero_write_with_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
channel: usize,
value: usize,
state: &mut GenerationState<F, C, D>,
row: &mut CpuColumnsView<F>,
) -> MemoryOp {
let address = MemoryAddress::new(0, Segment::RegisterFile, 0);
let mut op = MemoryOp::new(
MemoryChannel::GeneralPurpose(channel),
state.traces.clock(),
address,
MemoryOpKind::Write,
value as u32,
);
op.filter = false;
let channel = &mut row.mem_channels[channel];
assert_eq!(channel.used, F::ZERO);
channel.used = F::ZERO;
channel.is_read = F::ZERO;
channel.addr_context = F::from_canonical_usize(address.context);
channel.addr_segment = F::from_canonical_usize(address.segment);
channel.addr_virtual = F::from_canonical_usize(address.virt);
channel.value = F::from_canonical_u32(value as u32);
op
}
pub(crate) fn mem_read_with_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
channel: MemoryChannel,
address: MemoryAddress,
state: &GenerationState<F, C, D>,
) -> (u32, MemoryOp) {
let val = state.memory.get(address).to_be();
let op = MemoryOp::new(
channel,
state.traces.clock(),
address,
MemoryOpKind::Read,
val,
);
(val, op)
}
/// Pushes without writing in memory. This happens in opcodes where a push immediately follows a pop.
/// The pushed value may be loaded in a memory channel, without creating a memory operation.
pub(crate) fn push_no_write<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
_state: &mut GenerationState<F, C, D>,
row: &mut CpuColumnsView<F>,
val: u32,
channel_opt: Option<usize>,
) {
if let Some(channel) = channel_opt {
// let val_limbs: [u64; 4] = val.0;
let channel = &mut row.mem_channels[channel];
assert_eq!(channel.used, F::ZERO);
channel.used = F::ZERO;
channel.is_read = F::ZERO;
channel.addr_context = F::from_canonical_usize(0);
channel.addr_segment = F::from_canonical_usize(0);
channel.addr_virtual = F::from_canonical_usize(0);
channel.value = F::from_canonical_u32(val);
}
}
pub(crate) fn mem_read_gp_with_log_and_fill<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
n: usize,
address: MemoryAddress,
state: &GenerationState<F, C, D>,
row: &mut CpuColumnsView<F>,
) -> (u32, MemoryOp) {
let (val, op) = mem_read_with_log(MemoryChannel::GeneralPurpose(n), address, state);
let channel = &mut row.mem_channels[n];
assert_eq!(channel.used, F::ZERO);
channel.used = F::ONE;
channel.is_read = F::ONE;
channel.addr_context = F::from_canonical_usize(address.context);
channel.addr_segment = F::from_canonical_usize(address.segment);
channel.addr_virtual = F::from_canonical_usize(address.virt);
channel.value = F::from_canonical_u32(val);
(val, op)
}
pub(crate) fn mem_write_gp_log_and_fill<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
n: usize,
address: MemoryAddress,
state: &GenerationState<F, C, D>,
row: &mut CpuColumnsView<F>,
val: u32, // LE
) -> MemoryOp {
let op = mem_write_log(MemoryChannel::GeneralPurpose(n), address, state, val);
let channel = &mut row.mem_channels[n];
assert_eq!(channel.used, F::ZERO);
channel.used = F::ONE;
channel.is_read = F::ZERO;
channel.addr_context = F::from_canonical_usize(address.context);
channel.addr_segment = F::from_canonical_usize(address.segment);
channel.addr_virtual = F::from_canonical_usize(address.virt);
channel.value = F::from_canonical_u32(val);
op
}
pub(crate) fn mem_write_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
channel: MemoryChannel,
address: MemoryAddress,
state: &GenerationState<F, C, D>,
val: u32, // LE
) -> MemoryOp {
MemoryOp::new(
channel,
state.traces.clock(),
address,
MemoryOpKind::Write,
val,
)
}
pub(crate) fn poseidon_sponge_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
base_address: Vec<MemoryAddress>,
input: Vec<u8>, // BE
) {
let clock = state.traces.clock();
let mut absorbed_bytes = 0;
let mut input_blocks = input.chunks_exact(POSEIDON_RATE_BYTES);
let mut poseidon_state = [F::ZEROS; SPONGE_WIDTH];
// Since the poseidon read byte by byte, and the memory unit is of 4-byte, we just need to read
// the same memory for 4 keccak-op
let mut n_gp = 0;
for block in input_blocks.by_ref() {
for i in 0..block.len() {
//for &byte in block {
let align = (i / 4) * 4;
// todo: LittleEndian::read_u32
let val = u32::from_le_bytes(block[align..(align + 4)].try_into().unwrap());
let addr_idx = absorbed_bytes / 4;
state.traces.push_memory(MemoryOp::new(
MemoryChannel::GeneralPurpose(n_gp),
clock,
base_address[addr_idx],
MemoryOpKind::Read,
val.to_be(),
));
n_gp += 1;
n_gp %= NUM_GP_CHANNELS - 1;
absorbed_bytes += 1;
}
let rate_f = (0..POSEIDON_RATE_BYTES)
.step_by(4)
.map(|i| F::from_canonical_u32(LittleEndian::read_u32(&block[i..i + 4])))
.collect_vec();
poseidon_state[..SPONGE_RATE].copy_from_slice(&rate_f);
state
.traces
.push_poseidon(poseidon_state, clock * NUM_CHANNELS);
(poseidon_state, _) = poseidon_with_witness(&poseidon_state);
}
let rem = input_blocks.remainder();
// patch data to match sponge logic
let mut rem_data = [0u8; POSEIDON_RATE_BYTES];
rem_data[0..rem.len()].copy_from_slice(&rem[0..rem.len()]);
rem_data[rem.len()] = 1;
rem_data[POSEIDON_RATE_BYTES - 1] |= 0b10000000;
for i in 0..rem.len() {
let align = (i / 4) * 4;
let val = u32::from_le_bytes(rem_data[align..align + 4].try_into().unwrap());
let addr_idx = absorbed_bytes / 4;
state.traces.push_memory(MemoryOp::new(
MemoryChannel::GeneralPurpose(n_gp),
clock,
base_address[addr_idx],
MemoryOpKind::Read,
val.to_be(),
));
n_gp += 1;
n_gp %= NUM_GP_CHANNELS - 1;
absorbed_bytes += 1;
}
let mut final_block = [0u8; POSEIDON_RATE_BYTES];
final_block[..input_blocks.remainder().len()].copy_from_slice(input_blocks.remainder());
// pad10*1 rule
if input_blocks.remainder().len() == POSEIDON_RATE_BYTES - 1 {
// Both 1s are placed in the same byte.
final_block[input_blocks.remainder().len()] = 0b10000001;
} else {
final_block[input_blocks.remainder().len()] = 1;
final_block[POSEIDON_RATE_BYTES - 1] = 0b10000000;
}
let rate_f = (0..POSEIDON_RATE_BYTES)
.step_by(4)
.map(|i| F::from_canonical_u32(LittleEndian::read_u32(&final_block[i..i + 4])))
.collect_vec();
poseidon_state[..SPONGE_RATE].copy_from_slice(&rate_f);
state
.traces
.push_poseidon(poseidon_state, clock * NUM_CHANNELS);
//FIXME: how to setup the base address
state.traces.push_poseidon_sponge(PoseidonSpongeOp {
base_address,
timestamp: clock * NUM_CHANNELS,
input,
});
}
pub(crate) fn keccak_sponge_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
base_address: Vec<MemoryAddress>,
input: Vec<u8>, // BE
) {
let clock = state.traces.clock();
let mut absorbed_bytes = 0;
let mut input_blocks = input.chunks_exact(KECCAK_RATE_BYTES);
let mut sponge_state = [0u8; KECCAK_WIDTH_BYTES];
// Since the keccak read byte by byte, and the memory unit is of 4-byte, we just need to read
// the same memory for 4 keccak-op
let mut n_gp = 0;
for block in input_blocks.by_ref() {
for i in 0..block.len() {
//for &byte in block {
let align = (i / 4) * 4;
let val = u32::from_le_bytes(block[align..(align + 4)].try_into().unwrap());
let addr_idx = absorbed_bytes / 4;
state.traces.push_memory(MemoryOp::new(
MemoryChannel::GeneralPurpose(n_gp),
clock,
base_address[addr_idx],
MemoryOpKind::Read,
val.to_be(),
));
n_gp += 1;
n_gp %= NUM_GP_CHANNELS - 1;
absorbed_bytes += 1;
}
xor_into_sponge(state, &mut sponge_state, block.try_into().unwrap());
state
.traces
.push_keccak_bytes(sponge_state, clock * NUM_CHANNELS);
keccakf_u8s(&mut sponge_state);
}
let rem = input_blocks.remainder();
// patch data to match sponge logic
let mut rem_data = [0u8; KECCAK_RATE_BYTES];
rem_data[0..rem.len()].copy_from_slice(&rem[0..rem.len()]);
rem_data[rem.len()] = 1;
rem_data[KECCAK_RATE_BYTES - 1] |= 0b10000000;
for i in 0..rem.len() {
let align = (i / 4) * 4;
let val = u32::from_le_bytes(rem_data[align..align + 4].try_into().unwrap());
let addr_idx = absorbed_bytes / 4;
state.traces.push_memory(MemoryOp::new(
MemoryChannel::GeneralPurpose(n_gp),
clock,
base_address[addr_idx],
MemoryOpKind::Read,
val.to_be(),
));
n_gp += 1;
n_gp %= NUM_GP_CHANNELS - 1;
absorbed_bytes += 1;
}
let mut final_block = [0u8; KECCAK_RATE_BYTES];
final_block[..input_blocks.remainder().len()].copy_from_slice(input_blocks.remainder());
// pad10*1 rule
if input_blocks.remainder().len() == KECCAK_RATE_BYTES - 1 {
// Both 1s are placed in the same byte.
final_block[input_blocks.remainder().len()] = 0b10000001;
} else {
final_block[input_blocks.remainder().len()] = 1;
final_block[KECCAK_RATE_BYTES - 1] = 0b10000000;
}
xor_into_sponge(state, &mut sponge_state, &final_block);
state
.traces
.push_keccak_bytes(sponge_state, clock * NUM_CHANNELS);
//FIXME: how to setup the base address
state.traces.push_keccak_sponge(KeccakSpongeOp {
base_address,
timestamp: clock * NUM_CHANNELS,
input,
});
}
pub(crate) fn sha_extend_sponge_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
base_address: Vec<MemoryAddress>,
inputs: Vec<[u8; 4]>, // le bytes
output_address: MemoryAddress,
round: usize,
) {
// Since the Sha extend reads byte by byte, and the memory unit is of 4-byte, we just need to read
// the same memory for 4 sha-extend ops
let clock = state.traces.clock();
let mut n_gp = 0;
let extend_input: Vec<u8> = inputs.iter().flatten().cloned().collect();
for (addr_idx, input) in inputs.into_iter().enumerate() {
let val = u32::from_le_bytes(input);
for _ in 0..4 {
state.traces.push_memory(MemoryOp::new(
MemoryChannel::GeneralPurpose(n_gp),
clock,
base_address[addr_idx],
MemoryOpKind::Read,
val,
));
n_gp += 1;
n_gp %= NUM_GP_CHANNELS - 1;
}
}
state.traces.push_sha_extend(
extend_input.clone().try_into().unwrap(),
clock * NUM_CHANNELS,
);
state.traces.push_sha_extend_sponge(ShaExtendSpongeOp {
base_address,
timestamp: clock * NUM_CHANNELS,
input: extend_input,
i: round,
output_address,
});
}
pub(crate) fn sha_compress_sponge_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
hx_values: Vec<[u8; 4]>, // LE bytes
hx_addresses: Vec<MemoryAddress>,
w_i_values: Vec<[u8; 4]>, // LE bytes
w_i_addresses: Vec<MemoryAddress>,
input_state_list: Vec<Vec<[u8; 4]>>, // LE bytes
) {
// Since the Sha compress reads byte by byte, and the memory unit is of 4-byte, we just need to read
// the same memory for 4 sha-compress ops
let clock = state.traces.clock();
let mut n_gp = 0;
// read hx as input
for (j, hx) in hx_values.iter().enumerate() {
let val = u32::from_le_bytes(*hx);
// let val = 0;
for _ in 0..4 {
state.traces.push_memory(MemoryOp::new(
MemoryChannel::GeneralPurpose(n_gp),
clock,
hx_addresses[j],
MemoryOpKind::Read,
val,
));
n_gp += 1;
n_gp %= NUM_GP_CHANNELS - 1;
}
}
for i in 0..64 {
// read w_i as input
let w_i_u32 = u32::from_le_bytes(w_i_values[i]);
for _ in 0..4 {
state.traces.push_memory(MemoryOp::new(
MemoryChannel::GeneralPurpose(n_gp),
clock,
w_i_addresses[i],
MemoryOpKind::Read,
w_i_u32,
));
n_gp += 1;
n_gp %= NUM_GP_CHANNELS - 1;
}
let w_i = w_i_values[i];
let k_i = SHA_COMPRESS_K_LE_BYTES[i];
let mut compress_input: Vec<u8> = input_state_list[i]
.iter()
.chain(&[w_i, k_i])
.flatten()
.cloned()
.collect();
compress_input.push(i as u8);
debug_assert_eq!(compress_input.len(), 44);
state.traces.push_sha_compress(
compress_input.try_into().unwrap(),
w_i_addresses[i],
clock * NUM_CHANNELS,
);
}
// the 65'th round
let mut dummy_address = w_i_addresses[63];
dummy_address.virt += 4;
let mut compress_input: Vec<u8> = input_state_list[64].iter().flatten().cloned().collect();
compress_input.extend([0; 8]); // k_i and w_i
compress_input.push(64);
state.traces.push_sha_compress(
compress_input.try_into().unwrap(),
dummy_address,
clock * NUM_CHANNELS,
);
let compress_sponge_input: Vec<u8> = hx_values.iter().flatten().cloned().collect();
let mut base_address = hx_addresses.clone();
base_address.push(w_i_addresses[0]);
state.traces.push_sha_compress_sponge(ShaCompressSpongeOp {
base_address,
timestamp: clock * NUM_CHANNELS,
input: compress_sponge_input,
w_i_s: w_i_values,
});
}
pub(crate) fn xor_logic_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
lhs: u32,
rhs: u32,
) {
state
.traces
.push_logic(logic::Operation::new(logic::Op::Xor, lhs, rhs));
}
pub(crate) fn and_logic_log<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
lhs: u32,
rhs: u32,
) {
state
.traces
.push_logic(logic::Operation::new(logic::Op::And, lhs, rhs));
}
fn xor_into_sponge<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
state: &mut GenerationState<F, C, D>,
sponge_state: &mut [u8; KECCAK_WIDTH_BYTES],
block: &[u8; KECCAK_RATE_BYTES],
) {
// FIXME: why the step does not matter here?
for i in (0..KECCAK_RATE_BYTES).step_by(4) {
let range = i..KECCAK_RATE_BYTES.min(i + 4);
let lhs = LittleEndian::read_u32(&sponge_state[range.clone()]);
let rhs = LittleEndian::read_u32(&block[range]);
state
.traces
.push_logic(logic::Operation::new(logic::Op::Xor, lhs, rhs));
}
for i in 0..KECCAK_RATE_BYTES {
sponge_state[i] ^= block[i];
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/witness/traces.rs | prover/src/witness/traces.rs | use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::hash::hash_types::RichField;
use plonky2::timed;
use plonky2::util::timing::TimingTree;
use plonky2_maybe_rayon::rayon;
use std::cmp::max;
use crate::all_stark::{AllStark, MIN_TRACE_LEN, NUM_TABLES};
use crate::arithmetic::{BinaryOperator, Operation};
use crate::config::StarkConfig;
use crate::cpu::columns::CpuColumnsView;
use crate::keccak::keccak_stark;
use crate::keccak_sponge;
use crate::keccak_sponge::columns::KECCAK_WIDTH_BYTES;
use crate::keccak_sponge::keccak_sponge_stark::KeccakSpongeOp;
use crate::poseidon::constants::SPONGE_WIDTH;
use crate::poseidon_sponge::columns::POSEIDON_RATE_BYTES;
use crate::poseidon_sponge::poseidon_sponge_stark::PoseidonSpongeOp;
use crate::sha_compress::sha_compress_stark;
use crate::sha_compress_sponge::sha_compress_sponge_stark::ShaCompressSpongeOp;
use crate::sha_extend::sha_extend_stark;
use crate::sha_extend_sponge::sha_extend_sponge_stark::ShaExtendSpongeOp;
use crate::util::join;
use crate::util::trace_rows_to_poly_values;
use crate::witness::memory::{MemoryAddress, MemoryOp};
use crate::{arithmetic, logic};
#[derive(Clone, Copy, Debug)]
pub struct TraceCheckpoint {
pub(self) arithmetic_len: usize,
pub(self) cpu_len: usize,
pub(self) poseidon_len: usize,
pub(self) poseidon_sponge_len: usize,
pub(self) keccak_len: usize,
pub(self) keccak_sponge_len: usize,
pub(self) sha_extend_len: usize,
pub(self) sha_extend_sponge_len: usize,
pub(self) sha_compress_len: usize,
pub(self) sha_compress_sponge_len: usize,
pub(self) logic_len: usize,
pub(self) memory_len: usize,
}
#[derive(Debug, Clone)]
pub(crate) struct Traces<T: Copy> {
pub(crate) arithmetic_ops: Vec<Operation>,
pub(crate) cpu: Vec<CpuColumnsView<T>>,
pub(crate) logic_ops: Vec<logic::Operation>,
pub(crate) memory_ops: Vec<MemoryOp>,
pub(crate) poseidon_inputs: Vec<([T; SPONGE_WIDTH], usize)>,
pub(crate) poseidon_sponge_ops: Vec<PoseidonSpongeOp>,
pub(crate) keccak_inputs: Vec<([u64; keccak_stark::NUM_INPUTS], usize)>,
pub(crate) keccak_sponge_ops: Vec<KeccakSpongeOp>,
pub(crate) sha_extend_inputs: Vec<([u8; sha_extend_stark::NUM_INPUTS], usize)>,
pub(crate) sha_extend_sponge_ops: Vec<ShaExtendSpongeOp>,
pub(crate) sha_compress_inputs:
Vec<([u8; sha_compress_stark::NUM_INPUTS], MemoryAddress, usize)>,
pub(crate) sha_compress_sponge_ops: Vec<ShaCompressSpongeOp>,
}
impl<T: Copy> Traces<T> {
pub fn new() -> Self {
Traces {
arithmetic_ops: vec![],
cpu: vec![],
logic_ops: vec![],
memory_ops: vec![],
poseidon_inputs: vec![],
poseidon_sponge_ops: vec![],
keccak_inputs: vec![],
keccak_sponge_ops: vec![],
sha_extend_inputs: vec![],
sha_extend_sponge_ops: vec![],
sha_compress_inputs: vec![],
sha_compress_sponge_ops: vec![],
}
}
/// Returns the actual trace lengths for each STARK module.
// Uses a `TraceCheckPoint` as return object for convenience.
pub fn get_lengths(&self) -> TraceCheckpoint {
TraceCheckpoint {
arithmetic_len: self
.arithmetic_ops
.iter()
.map(|op| match op {
Operation::BinaryOperation { operator, .. } => match operator {
BinaryOperator::DIV => 2,
_ => 1,
},
})
.sum(),
cpu_len: self.cpu.len(),
poseidon_len: self.poseidon_inputs.len(),
poseidon_sponge_len: self
.poseidon_sponge_ops
.iter()
.map(|op| op.input.len() / POSEIDON_RATE_BYTES + 1)
.sum(),
keccak_len: self.keccak_inputs.len() * keccak_stark::NUM_ROUNDS,
keccak_sponge_len: self
.keccak_sponge_ops
.iter()
.map(|op| op.input.len() / keccak_sponge::columns::KECCAK_RATE_BYTES + 1)
.sum(),
sha_extend_len: self.sha_extend_inputs.len(),
sha_extend_sponge_len: self.sha_extend_sponge_ops.len(),
sha_compress_len: self.sha_compress_inputs.len(),
sha_compress_sponge_len: self.sha_compress_sponge_ops.len(),
logic_len: self.logic_ops.len(),
// This is technically a lower-bound, as we may fill gaps,
// but this gives a relatively good estimate.
memory_len: self.memory_ops.len(),
}
}
/// Returns the number of operations for each STARK module.
pub fn checkpoint(&self) -> TraceCheckpoint {
TraceCheckpoint {
arithmetic_len: self.arithmetic_ops.len(),
cpu_len: self.cpu.len(),
poseidon_len: self.poseidon_inputs.len(),
poseidon_sponge_len: self.poseidon_sponge_ops.len(),
keccak_len: self.keccak_inputs.len(),
keccak_sponge_len: self.keccak_sponge_ops.len(),
sha_extend_len: self.sha_extend_inputs.len(),
sha_extend_sponge_len: self.sha_extend_sponge_ops.len(),
sha_compress_len: self.sha_compress_inputs.len(),
sha_compress_sponge_len: self.sha_compress_sponge_ops.len(),
logic_len: self.logic_ops.len(),
memory_len: self.memory_ops.len(),
}
}
pub fn rollback(&mut self, checkpoint: TraceCheckpoint) {
self.arithmetic_ops.truncate(checkpoint.arithmetic_len);
self.cpu.truncate(checkpoint.cpu_len);
self.poseidon_inputs.truncate(checkpoint.poseidon_len);
self.poseidon_sponge_ops
.truncate(checkpoint.poseidon_sponge_len);
self.keccak_inputs.truncate(checkpoint.keccak_len);
self.keccak_sponge_ops
.truncate(checkpoint.keccak_sponge_len);
self.sha_extend_inputs.truncate(checkpoint.sha_extend_len);
self.sha_extend_sponge_ops
.truncate(checkpoint.sha_extend_sponge_len);
self.sha_compress_inputs
.truncate(checkpoint.sha_compress_len);
self.sha_compress_sponge_ops
.truncate(checkpoint.sha_compress_sponge_len);
self.logic_ops.truncate(checkpoint.logic_len);
self.memory_ops.truncate(checkpoint.memory_len);
}
pub fn mem_ops_since(&self, checkpoint: TraceCheckpoint) -> &[MemoryOp] {
&self.memory_ops[checkpoint.memory_len..]
}
pub fn push_cpu(&mut self, val: CpuColumnsView<T>) {
self.cpu.push(val);
}
pub fn push_logic(&mut self, op: logic::Operation) {
self.logic_ops.push(op);
}
pub fn push_arithmetic(&mut self, op: arithmetic::Operation) {
self.arithmetic_ops.push(op);
}
pub fn push_memory(&mut self, op: MemoryOp) {
self.memory_ops.push(op);
}
pub fn push_poseidon(&mut self, input: [T; SPONGE_WIDTH], clock: usize) {
self.poseidon_inputs.push((input, clock));
}
pub fn push_poseidon_sponge(&mut self, op: PoseidonSpongeOp) {
self.poseidon_sponge_ops.push(op);
}
pub fn push_keccak(&mut self, input: [u64; keccak_stark::NUM_INPUTS], clock: usize) {
self.keccak_inputs.push((input, clock));
}
pub fn push_keccak_bytes(&mut self, input: [u8; KECCAK_WIDTH_BYTES], clock: usize) {
let chunks = input
.chunks(size_of::<u64>())
.map(|chunk| u64::from_le_bytes(chunk.try_into().unwrap()))
.collect_vec()
.try_into()
.unwrap();
self.push_keccak(chunks, clock);
}
pub fn push_keccak_sponge(&mut self, op: KeccakSpongeOp) {
self.keccak_sponge_ops.push(op);
}
pub fn push_sha_extend(&mut self, input: [u8; sha_extend_stark::NUM_INPUTS], clock: usize) {
self.sha_extend_inputs.push((input, clock));
}
pub fn push_sha_extend_sponge(&mut self, op: ShaExtendSpongeOp) {
self.sha_extend_sponge_ops.push(op);
}
pub fn push_sha_compress(
&mut self,
input: [u8; sha_compress_stark::NUM_INPUTS],
memory_address: MemoryAddress,
clock: usize,
) {
self.sha_compress_inputs
.push((input, memory_address, clock));
}
pub fn push_sha_compress_sponge(&mut self, op: ShaCompressSpongeOp) {
self.sha_compress_sponge_ops.push(op);
}
pub fn clock(&self) -> usize {
self.cpu.len()
}
pub fn into_tables<const D: usize>(
self,
all_stark: &AllStark<T, D>,
config: &StarkConfig,
timing: &mut TimingTree,
) -> [Vec<PolynomialValues<T>>; NUM_TABLES]
where
T: RichField + Extendable<D>,
{
let cap_elements = config.fri_config.num_cap_elements();
let min_rows = max(cap_elements, MIN_TRACE_LEN);
let Traces {
arithmetic_ops,
cpu,
logic_ops,
mut memory_ops,
poseidon_inputs,
poseidon_sponge_ops,
keccak_inputs,
keccak_sponge_ops,
sha_extend_inputs,
sha_extend_sponge_ops,
sha_compress_inputs,
sha_compress_sponge_ops,
} = self;
let mut memory_trace = vec![];
let mut arithmetic_trace = vec![];
let mut cpu_trace = vec![];
let mut poseidon_trace = vec![];
let mut poseidon_sponge_trace = vec![];
let mut keccak_trace = vec![];
let mut keccak_sponge_trace = vec![];
let mut logic_trace = vec![];
let mut sha_extend_trace = vec![];
let mut sha_extend_sponge_trace = vec![];
let mut sha_compress_trace = vec![];
let mut sha_compress_sponge_trace = vec![];
timed!(
timing,
"convert trace to table parallelly",
join!(
|| memory_trace = all_stark.memory_stark.generate_trace(&mut memory_ops),
|| arithmetic_trace = all_stark.arithmetic_stark.generate_trace(&arithmetic_ops),
|| cpu_trace =
trace_rows_to_poly_values(cpu.into_iter().map(|x| x.into()).collect()),
|| poseidon_trace = all_stark
.poseidon_stark
.generate_trace(&poseidon_inputs, min_rows),
|| poseidon_sponge_trace = all_stark
.poseidon_sponge_stark
.generate_trace(&poseidon_sponge_ops, min_rows),
|| keccak_trace = all_stark
.keccak_stark
.generate_trace(keccak_inputs, min_rows),
|| keccak_sponge_trace = all_stark
.keccak_sponge_stark
.generate_trace(keccak_sponge_ops, min_rows),
|| sha_extend_trace = all_stark
.sha_extend_stark
.generate_trace(sha_extend_inputs, min_rows),
|| sha_extend_sponge_trace = all_stark
.sha_extend_sponge_stark
.generate_trace(sha_extend_sponge_ops, min_rows),
|| sha_compress_trace = all_stark
.sha_compress_stark
.generate_trace(sha_compress_inputs, min_rows),
|| sha_compress_sponge_trace = all_stark
.sha_compress_sponge_stark
.generate_trace(sha_compress_sponge_ops, min_rows),
|| logic_trace = all_stark.logic_stark.generate_trace(logic_ops, min_rows),
)
);
[
arithmetic_trace,
cpu_trace,
poseidon_trace,
poseidon_sponge_trace,
keccak_trace,
keccak_sponge_trace,
sha_extend_trace,
sha_extend_sponge_trace,
sha_compress_trace,
sha_compress_sponge_trace,
logic_trace,
memory_trace,
]
}
}
impl<T: Copy> Default for Traces<T> {
fn default() -> Self {
Self::new()
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/witness/mod.rs | prover/src/witness/mod.rs | pub(crate) mod errors;
pub(crate) mod memory;
pub(crate) mod operation;
pub(crate) mod state;
pub(crate) mod traces;
pub(crate) mod transition;
pub(crate) mod util;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/witness/operation.rs | prover/src/witness/operation.rs | use super::util::*;
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::assembler::Kernel;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::witness::errors::ProgramError;
use crate::witness::memory::MemoryAddress;
use crate::{arithmetic, logic};
use anyhow::{Context, Result};
use plonky2::field::types::Field;
use super::util::keccak_sponge_log;
use crate::keccak_sponge::columns::{KECCAK_RATE_BYTES, KECCAK_RATE_U32S};
use crate::poseidon_sponge::columns::POSEIDON_RATE_BYTES;
use itertools::Itertools;
use keccak_hash::keccak;
use plonky2::field::extension::Extendable;
use plonky2::hash::hash_types::RichField;
use plonky2::plonk::config::GenericConfig;
use std::fs;
pub const WORD_SIZE: usize = core::mem::size_of::<u32>();
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum BranchCond {
EQ,
NE,
GE,
LE,
GT,
LT,
}
impl BranchCond {
pub(crate) fn result(&self, input0: i32, input1: i32) -> bool {
match self {
BranchCond::EQ => input0 == input1,
BranchCond::NE => input0 != input1,
BranchCond::GE => input0 >= input1,
BranchCond::LE => input0 <= input1,
BranchCond::GT => input0 > input1,
BranchCond::LT => input0 < input1,
}
}
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum MovCond {
EQ,
NE,
}
pub fn generate_pinv_diff<F: Field>(val0: u32, val1: u32, lv: &mut CpuColumnsView<F>) {
let num_unequal_limbs = if val0 != val1 { 1 } else { 0 };
let _equal = num_unequal_limbs == 0;
// Form `diff_pinv`.
// Let `diff = val0 - val1`. Consider `x[i] = diff[i]^-1` if `diff[i] != 0` and 0 otherwise.
// Then `diff @ x = num_unequal_limbs`, where `@` denotes the dot product. We set
// `diff_pinv = num_unequal_limbs^-1 * x` if `num_unequal_limbs != 0` and 0 otherwise. We have
// `diff @ diff_pinv = 1 - equal` as desired.
let logic = lv.general.logic_mut();
let num_unequal_limbs_inv = F::from_canonical_usize(num_unequal_limbs)
.try_inverse()
.unwrap_or(F::ZERO);
let val0_f = F::from_canonical_u32(val0);
let val1_f = F::from_canonical_u32(val1);
logic.diff_pinv = (val0_f - val1_f).try_inverse().unwrap_or(F::ZERO) * num_unequal_limbs_inv;
}
pub(crate) const SYSSHAEXTEND: usize = 0x00300105;
pub(crate) const SYSSHACOMPRESS: usize = 0x00010106;
pub(crate) const SYSKECCAK: usize = 0x010109;
pub(crate) const SYSGETPID: usize = 4020;
pub(crate) const SYSGETGID: usize = 4047;
pub(crate) const SYSMMAP2: usize = 4210;
pub(crate) const SYSMMAP: usize = 4090;
pub(crate) const SYSBRK: usize = 4045;
pub(crate) const SYSCLONE: usize = 4120;
pub(crate) const SYSEXITGROUP: usize = 4246;
pub(crate) const SYSREAD: usize = 4003;
pub(crate) const SYSWRITE: usize = 4004;
pub(crate) const SYSFCNTL: usize = 4055;
pub(crate) const SYSSETTHREADAREA: usize = 4283;
pub(crate) const SYSHINTLEN: usize = 240;
pub(crate) const SYSHINTREAD: usize = 241;
pub(crate) const SYSVERIFY: usize = 242;
pub(crate) const FD_STDIN: usize = 0;
pub(crate) const FD_STDOUT: usize = 1;
pub(crate) const FD_STDERR: usize = 2;
pub(crate) const FD_PUBLIC_VALUES: usize = 3;
pub(crate) const FD_HINT: usize = 4;
pub(crate) const MIPSEBADF: usize = 0x9;
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum MemOp {
LH,
LWL,
LW,
LBU,
LHU,
LWR,
SB,
SH,
SWL,
SW,
SWR,
LL,
SC,
LB,
SDC1,
}
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub(crate) enum Operation {
Syscall,
BinaryLogic(logic::Op, u8, u8, u8),
BinaryLogicImm(logic::Op, u8, u8, u32),
BinaryArithmetic(arithmetic::BinaryOperator, u8, u8, u8),
BinaryArithmeticImm(arithmetic::BinaryOperator, u8, u8, u32),
Count(bool, u8, u8),
CondMov(MovCond, u8, u8, u8),
KeccakGeneral,
Jump(u8, u8),
Jumpi(u8, u32),
Branch(BranchCond, u8, u8, u32),
JumpDirect(u8, u32),
Pc,
GetContext,
SetContext,
MloadGeneral(MemOp, u8, u8, u32),
MstoreGeneral(MemOp, u8, u8, u32),
Nop,
Ext(u8, u8, u8, u8),
Ins(u8, u8, u8, u8),
Maddu(u8, u8),
Ror(u8, u8, u8),
Rdhwr(u8, u8),
Signext(u8, u8, u8),
SwapHalf(u8, u8),
Teq(u8, u8),
}
pub(crate) fn generate_cond_mov_op<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
cond: MovCond,
rs: u8,
rt: u8,
rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (in0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let (in1, log_in1) = reg_read_with_log(rt, 1, state, &mut row)?;
let (in2, log_in2) = reg_read_with_log(rd, 2, state, &mut row)?;
let mov = match cond {
MovCond::EQ => in1 == 0,
MovCond::NE => in1 != 0,
};
let out = if mov { in0 } else { in2 };
generate_pinv_diff(in1 as u32, 0, &mut row);
let log_out0 = reg_write_with_log(rd, 3, out, state, &mut row)?;
let log_out1 = reg_write_with_log(0, 4, mov as usize, state, &mut row)?;
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_in2);
state.traces.push_memory(log_out0);
state.traces.push_memory(log_out1);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_count_op<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
is_clo: bool,
rs: u8,
rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (in0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let in0 = if is_clo { !(in0 as u32) } else { in0 as u32 };
let out = in0.leading_zeros() as usize;
let log_out0 = reg_write_with_log(rd, 1, out, state, &mut row)?;
state.traces.push_memory(log_in0);
state.traces.push_memory(log_out0);
let bits_le = (0..32)
.map(|i| {
let bit = (in0 >> i) & 0x01;
F::from_canonical_u32(bit)
})
.collect_vec();
row.general.io_mut().rs_le = bits_le.try_into().unwrap();
let mut conds = vec![];
let mut inv = vec![];
for i in (0..31).rev() {
let x = in0 >> i;
conds.push(F::from_bool(x == 1));
let b = F::from_canonical_u32(x) - F::ONE;
inv.push(b.try_inverse().unwrap_or(F::ZERO));
}
conds.push(F::from_bool(in0 == 0));
inv.push(F::from_canonical_u32(in0).try_inverse().unwrap_or(F::ZERO));
// Used for aux data, nothing to do with `le`
row.general.io_mut().rt_le = conds.try_into().unwrap();
row.general.io_mut().mem_le = inv.try_into().unwrap();
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_binary_logic_op<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
op: logic::Op,
rs: u8,
rt: u8,
rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (in0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let (in1, log_in1) = reg_read_with_log(rt, 1, state, &mut row)?;
let operation = logic::Operation::new(op, in0 as u32, in1 as u32);
let out = operation.result;
let log_out0 = reg_write_with_log(rd, 2, out as usize, state, &mut row)?;
state.traces.push_logic(operation);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_out0);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_binary_logic_imm_op<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
op: logic::Op,
rs: u8,
rd: u8,
imm: u32,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (in0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let in1 = imm;
let operation = logic::Operation::new(op, in0 as u32, in1);
let out = operation.result;
let log_out0 = reg_write_with_log(rd, 2, out as usize, state, &mut row)?;
//state.traces.push_logic(operation);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_out0);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_binary_arithmetic_op<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
operator: arithmetic::BinaryOperator,
rs: u8,
rt: u8,
rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
assert!(![
arithmetic::BinaryOperator::DIV,
arithmetic::BinaryOperator::DIVU,
arithmetic::BinaryOperator::MULT,
arithmetic::BinaryOperator::MULTU,
]
.contains(&operator));
let (in0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let (in1, log_in1) = reg_read_with_log(rt, 1, state, &mut row)?;
let operation = arithmetic::Operation::binary(operator, in0 as u32, in1 as u32);
let out = operation.result().0;
let log_out0 = reg_write_with_log(rd, 2, out as usize, state, &mut row)?;
state.traces.push_arithmetic(operation);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_out0);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_binary_arithmetic_hilo_op<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
operator: arithmetic::BinaryOperator,
rs: u8,
rt: u8,
_rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
assert!([
arithmetic::BinaryOperator::DIV,
arithmetic::BinaryOperator::DIVU,
arithmetic::BinaryOperator::MULT,
arithmetic::BinaryOperator::MULTU,
]
.contains(&operator));
let (in0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let (in1, log_in1) = reg_read_with_log(rt, 1, state, &mut row)?;
let in0 = in0 as u32;
let in1 = in1 as u32;
/*
let (hi, lo) = match operator {
arithmetic::BinaryOperator::DIV => (
((in0 as i32) % (in1 as i32)) as u32,
((in0 as i32) / (in1 as i32)) as u32,
),
arithmetic::BinaryOperator::DIVU => (in0 % in1, in0 / in1),
arithmetic::BinaryOperator::MULT => {
let out = (in0 as i64 * in1 as i64) as u64;
u32_from_u64(out)
}
arithmetic::BinaryOperator::MULTU => {
let out = in0 as u64 * in1 as u64;
u32_from_u64(out)
}
_ => todo!(),
};
*/
let operation = arithmetic::Operation::binary(operator, in0, in1);
let (lo, hi) = operation.result();
let log_out0 = reg_write_with_log(32, 2, lo as usize, state, &mut row)?;
let log_out1 = reg_write_with_log(33, 3, hi as usize, state, &mut row)?;
state.traces.push_arithmetic(operation);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_out0);
state.traces.push_memory(log_out1);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_binary_arithmetic_imm_op<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
rs: u8,
rt: u8,
imm: u32,
operator: arithmetic::BinaryOperator,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (in0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let in1 = sign_extend::<16>(imm);
let log_in1 = reg_write_with_log(rt, 1, in1 as usize, state, &mut row)?;
let operation = arithmetic::Operation::binary(operator, in0 as u32, in1);
let out = operation.result().0;
let log_out0 = reg_write_with_log(rt, 2, out as usize, state, &mut row)?;
state.traces.push_arithmetic(operation);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_out0);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_lui<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
_rs: u8,
rt: u8,
imm: u32,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let in0 = sign_extend::<16>(imm);
let log_in0 = reg_write_with_log(_rs, 0, in0 as usize, state, &mut row)?;
let in1 = 1u32 << 16;
push_no_write(state, &mut row, in1, Some(1));
let log_in1 = reg_write_with_log(rt, 1, in1 as usize, state, &mut row)?;
let operation = arithmetic::Operation::binary(arithmetic::BinaryOperator::LUI, in0, in1);
let out = operation.result().0;
let log_out0 = reg_write_with_log(rt, 2, out as usize, state, &mut row)?;
state.traces.push_arithmetic(operation);
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_out0);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_keccak_general<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
_state: &mut GenerationState<F, C, D>,
_row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
//row.is_keccak_sponge = F::ONE;
/*
let [(context, _), (segment, log_in1), (base_virt, log_in2), (len, log_in3)] =
stack_pop_with_log_and_fill::<4, _>(state, &mut row)?;
*/
/*
let lookup_addr ;
let (context, _) = mem_read_gp_with_log_and_fill(0, lookup_addr, state, &mut row);
let (segment, log_in1) = mem_read_gp_with_log_and_fill(1, lookup_addr, state, &mut row);
let (base_virt, log_in2) = mem_read_gp_with_log_and_fill(2, lookup_addr, state, &mut row);
let (len, log_in3) = mem_read_gp_with_log_and_fill(3, lookup_addr, state, &mut row);
let base_address = MemoryAddress::new(context, Segment::Code, base_virt);
let input = (0..len)
.map(|i| {
let address = MemoryAddress {
virt: base_address.virt.saturating_add(i),
..base_address
};
let val = state.memory.get(address);
val as u8
})
.collect_vec();
log::trace!("Hashing {:?}", input);
let hash = keccak(&input); // FIXME
push_no_write(state, &mut row, hash[0], Some(NUM_GP_CHANNELS - 1));
keccak_sponge_log(state, base_address, input);
state.traces.push_memory(log_in1);
state.traces.push_memory(log_in2);
state.traces.push_memory(log_in3);
state.traces.push_cpu(row);
*/
Ok(())
}
pub(crate) fn generate_jump<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
link: u8,
target: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (target_pc, target_op) = reg_read_with_log(target, 0, state, &mut row)?;
let next_pc = state.registers.program_counter.wrapping_add(8);
let link_op = reg_write_with_log(link, 1, next_pc, state, &mut row)?;
state.traces.push_cpu(row);
state.traces.push_memory(target_op);
state.traces.push_memory(link_op);
state.jump_to(target_pc);
Ok(())
}
pub(crate) fn generate_branch<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
cond: BranchCond,
src1: u8,
src2: u8,
target: u32,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (src1, src1_op) = reg_read_with_log(src1, 0, state, &mut row)?;
let (src2, src2_op) = reg_read_with_log(src2, 1, state, &mut row)?;
let should_jump = cond.result(src1 as i32, src2 as i32);
match cond {
BranchCond::EQ => row.branch.is_eq = F::ONE,
BranchCond::NE => row.branch.is_ne = F::ONE,
BranchCond::GE => row.branch.is_ge = F::ONE,
BranchCond::LE => row.branch.is_le = F::ONE,
BranchCond::GT => row.branch.is_gt = F::ONE,
BranchCond::LT => row.branch.is_lt = F::ONE,
};
if src1 == src2 {
row.branch.eq = F::ONE;
}
if src1 > src2 {
row.branch.gt = F::ONE;
}
if src1 < src2 {
row.branch.lt = F::ONE;
}
//log::info!("jump: {} c0: {}, c1: {}, aux1: {}, aux2: {}", should_jump, src1, src2, src1.wrapping_sub(src2), src2.wrapping_sub(src1));
let aux1 = src1.wrapping_sub(src2);
let aux2 = src2.wrapping_sub(src1);
let aux3 = (src1 ^ src2) & 0x80000000 > 0;
let target = sign_extend::<16>(target);
let (mut target_pc, _) = target.overflowing_shl(2);
let aux4 = target_pc;
let log_out0 = reg_write_with_log(0, 2, aux1, state, &mut row)?;
let log_out1 = reg_write_with_log(0, 3, aux2, state, &mut row)?;
let log_out2 = reg_write_with_log(0, 4, aux3 as usize, state, &mut row)?;
let log_out3 = reg_write_with_log(0, 5, aux4 as usize, state, &mut row)?;
let pc = state.registers.program_counter as u32;
if should_jump {
target_pc = target_pc.wrapping_add(pc + 4);
row.branch.should_jump = F::ONE;
state.traces.push_cpu(row);
state.jump_to(target_pc as usize);
} else {
let next_pc = pc.wrapping_add(8);
row.branch.should_jump = F::ZERO;
state.traces.push_cpu(row);
state.jump_to(next_pc as usize);
}
state.traces.push_memory(src1_op);
state.traces.push_memory(src2_op);
state.traces.push_memory(log_out0);
state.traces.push_memory(log_out1);
state.traces.push_memory(log_out2);
state.traces.push_memory(log_out3);
Ok(())
}
pub(crate) fn generate_jumpi<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
link: u8,
target: u32,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (mut target_pc, _) = (target as usize).overflowing_shl(2);
let pc = state.registers.program_counter;
let operation: logic::Operation =
logic::Operation::new(logic::Op::And, pc as u32, 0xf0000000u32);
let pc_result = operation.result as usize;
let result_op = reg_write_with_log(0, 2, pc_result, state, &mut row)?;
target_pc = target_pc.wrapping_add(pc_result);
let next_pc = pc.wrapping_add(8);
let link_op = reg_write_with_log(link, 1, next_pc, state, &mut row)?;
// FIXME: skip for lookup check
//state.traces.push_logic(operation);
state.traces.push_cpu(row);
state.jump_to(target_pc);
state.traces.push_memory(link_op);
state.traces.push_memory(result_op);
Ok(())
}
pub(crate) fn generate_jumpdirect<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
link: u8,
target: u32,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let target = sign_extend::<16>(target);
let (target_pc, _) = target.overflowing_shl(2);
let offset_op = reg_write_with_log(0, 2, target_pc as usize, state, &mut row)?;
let pc = state.registers.program_counter as u32;
let target_pc = target_pc.wrapping_add(pc + 4);
let next_pc = pc.wrapping_add(8);
let link_op = reg_write_with_log(link, 1, next_pc as usize, state, &mut row)?;
// FIXME: skip for lookup check
//state.traces.push_logic(operation);
state.traces.push_cpu(row);
state.jump_to(target_pc as usize);
state.traces.push_memory(link_op);
state.traces.push_memory(offset_op);
Ok(())
}
pub(crate) fn generate_pc<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_get_context<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
_state: &mut GenerationState<F, C, D>,
_row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
/*
push_with_write(state, &mut row, state.registers.context.into())?;
state.traces.push_cpu(row);
*/
Ok(())
}
pub(crate) fn generate_set_context<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
_state: &mut GenerationState<F, C, D>,
_row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
/*
let [(ctx, _)] = stack_pop_with_log_and_fill::<1, _>(state, &mut row)?;
let sp_to_save = state.registers.stack_len.into();
let old_ctx = state.registers.context;
let new_ctx = ctx;
let sp_field = ContextMetadata::StackSize as usize;
let old_sp_addr = MemoryAddress::new(old_ctx, Segment::ContextMetadata, sp_field);
let new_sp_addr = MemoryAddress::new(new_ctx, Segment::ContextMetadata, sp_field);
let log_write_old_sp = mem_write_gp_log_and_fill(1, old_sp_addr, state, &mut row, sp_to_save);
let (new_sp, log_read_new_sp) = if old_ctx == new_ctx {
let op = MemoryOp::new(
MemoryChannel::GeneralPurpose(2),
state.traces.clock(),
new_sp_addr,
MemoryOpKind::Read,
sp_to_save,
);
let channel = &mut row.mem_channels[2];
assert_eq!(channel.used, F::ZERO);
channel.used = F::ONE;
channel.is_read = F::ONE;
channel.addr_context = F::from_canonical_usize(new_ctx);
channel.addr_segment = F::from_canonical_usize(Segment::ContextMetadata as usize);
channel.addr_virtual = F::from_canonical_usize(new_sp_addr.virt);
let val_limbs: [u64; 4] = sp_to_save.0;
for (i, limb) in val_limbs.into_iter().enumerate() {
channel.value[2 * i] = F::from_canonical_u32(limb as u32);
channel.value[2 * i + 1] = F::from_canonical_u32((limb >> 32) as u32);
}
(sp_to_save, op)
} else {
mem_read_gp_with_log_and_fill(2, new_sp_addr, state, &mut row)
};
// If the new stack isn't empty, read stack_top from memory.
let new_sp = new_sp.as_usize();
if new_sp > 0 {
// Set up columns to disable the channel if it *is* empty.
let new_sp_field = F::from_canonical_usize(new_sp);
if let Some(inv) = new_sp_field.try_inverse() {
row.general.stack_mut().stack_inv = inv;
row.general.stack_mut().stack_inv_aux = F::ONE;
} else {
row.general.stack_mut().stack_inv = F::ZERO;
row.general.stack_mut().stack_inv_aux = F::ZERO;
}
let new_top_addr = MemoryAddress::new(new_ctx, Segment::Stack, new_sp - 1);
let (new_top, log_read_new_top) =
mem_read_gp_with_log_and_fill(3, new_top_addr, state, &mut row);
state.registers.stack_top = new_top;
state.traces.push_memory(log_read_new_top);
} else {
row.general.stack_mut().stack_inv = F::ZERO;
row.general.stack_mut().stack_inv_aux = F::ZERO;
}
state.registers.context = new_ctx;
state.registers.stack_len = new_sp;
state.traces.push_memory(log_write_old_sp);
state.traces.push_memory(log_read_new_sp);
state.traces.push_cpu(row);
*/
Ok(())
}
pub(crate) fn generate_shift_imm<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
op: arithmetic::BinaryOperator,
sa: u8,
rt: u8,
rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
assert!([
arithmetic::BinaryOperator::SLL,
arithmetic::BinaryOperator::SRL,
arithmetic::BinaryOperator::SRA
]
.contains(&op));
let (input0, log_in0) = reg_read_with_log(rt, 1, state, &mut row)?;
state.traces.push_memory(log_in0);
let shift = sa as u32;
push_no_write(state, &mut row, shift, Some(0));
let lookup_addr = MemoryAddress::new(0, Segment::ShiftTable, shift as usize);
let (_, read) = mem_read_gp_with_log_and_fill(3, lookup_addr, state, &mut row);
state.traces.push_memory(read);
let operation = arithmetic::Operation::binary(op, input0 as u32, shift);
let result = operation.result().0;
state.traces.push_arithmetic(operation);
let outlog = reg_write_with_log(rd, 2, result as usize, state, &mut row)?;
state.traces.push_memory(outlog);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_sllv<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
rs: u8,
rt: u8,
rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (input0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let (input1, log_in1) = reg_read_with_log(rt, 1, state, &mut row)?;
let lookup_addr = MemoryAddress::new(0, Segment::ShiftTable, input0);
let (_, read) = mem_read_gp_with_log_and_fill(3, lookup_addr, state, &mut row);
state.traces.push_memory(read);
let operation = arithmetic::Operation::binary(
arithmetic::BinaryOperator::SLLV,
input1 as u32,
input0 as u32,
);
let result = operation.result().0;
state.traces.push_arithmetic(operation);
let outlog = reg_write_with_log(rd, 2, result as usize, state, &mut row)?;
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(outlog);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_srlv<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
rs: u8,
rt: u8,
rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (input0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let (input1, log_in1) = reg_read_with_log(rt, 1, state, &mut row)?;
let lookup_addr = MemoryAddress::new(0, Segment::ShiftTable, input0);
let (_, read) = mem_read_gp_with_log_and_fill(3, lookup_addr, state, &mut row);
state.traces.push_memory(read);
let operation = arithmetic::Operation::binary(
arithmetic::BinaryOperator::SRLV,
input1 as u32,
input0 as u32,
);
let result = operation.result().0;
state.traces.push_arithmetic(operation);
let outlog = reg_write_with_log(rd, 2, result as usize, state, &mut row)?;
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(outlog);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_srav<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
rs: u8,
rt: u8,
rd: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (input0, log_in0) = reg_read_with_log(rs, 0, state, &mut row)?;
let (input1, log_in1) = reg_read_with_log(rt, 1, state, &mut row)?;
// let input0 = in0 & 0x1F;
let lookup_addr = MemoryAddress::new(0, Segment::ShiftTable, input0);
let (_, read) = mem_read_gp_with_log_and_fill(3, lookup_addr, state, &mut row);
state.traces.push_memory(read);
let operation = arithmetic::Operation::binary(
arithmetic::BinaryOperator::SRAV,
input1 as u32,
input0 as u32,
);
let result = operation.result().0;
state.traces.push_arithmetic(operation);
let outlog = reg_write_with_log(rd, 2, result as usize, state, &mut row)?;
state.traces.push_memory(log_in0);
state.traces.push_memory(log_in1);
state.traces.push_memory(outlog);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn generate_ror<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
rd: u8,
rt: u8,
sa: u8,
state: &mut GenerationState<F, C, D>,
mut row: CpuColumnsView<F>,
) -> Result<(), ProgramError> {
let (input0, log_in0) = reg_read_with_log(rt, 0, state, &mut row)?;
let sin = (input0 as u64) + ((input0 as u64) << 32);
let result = (sin >> sa) as u32;
let bits_le = (0..32)
.map(|i| {
let bit = (input0 >> i) & 0x01;
F::from_canonical_u32(bit as u32)
})
.collect_vec();
row.general.misc_mut().rs_bits = bits_le.try_into().unwrap();
row.general.misc_mut().is_lsb = [F::ZERO; 32];
row.general.misc_mut().is_lsb[sa as usize] = F::ONE;
let outlog = reg_write_with_log(rd, 1, result as usize, state, &mut row)?;
state.traces.push_memory(log_in0);
state.traces.push_memory(outlog);
state.traces.push_cpu(row);
Ok(())
}
pub(crate) fn load_preimage<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
kernel: &Kernel,
) -> Result<()> {
let mut hash_bytes = [0u8; 32];
{
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
for i in 0..8 {
let address = MemoryAddress::new(0, Segment::Code, 0x30001000 + i * 4);
let (mem, op) = mem_read_gp_with_log_and_fill(i, address, state, &mut cpu_row);
hash_bytes[i * 4..i * 4 + 4].copy_from_slice(mem.to_be_bytes().as_ref());
state.traces.push_memory(op);
}
state.traces.push_cpu(cpu_row);
}
let hex_string = hex::encode(hash_bytes);
let mut preiamge_path = kernel.blockpath.clone();
preiamge_path.push_str("0x");
preiamge_path.push_str(hex_string.as_str());
log::trace!("load file {}", preiamge_path);
let content = fs::read(preiamge_path).expect("Read file failed");
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
let mem_op = mem_write_gp_log_and_fill(
0,
MemoryAddress::new(0, Segment::Code, 0x31000000),
state,
&mut cpu_row,
content.len() as u32,
);
log::trace!("{:X}: {:X}", 0x31000000, content.len() as u32);
state.traces.push_memory(mem_op);
let mut map_addr = 0x31000004;
let mut j = 1;
for i in (0..content.len()).step_by(WORD_SIZE) {
if j == 8 {
state.traces.push_cpu(cpu_row);
cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
j = 0;
}
let mut word = 0;
// Don't read past the end of the file.
let len = core::cmp::min(content.len() - i, WORD_SIZE);
for k in 0..len {
let offset = i + k;
let byte = content.get(offset).context("Invalid block offset")?;
word |= (*byte as u32) << (k * 8);
}
let addr = MemoryAddress::new(0, Segment::Code, map_addr);
// todo: check rate bytes
if len < WORD_SIZE {
let end = content.len() % POSEIDON_RATE_BYTES;
word |= 0b1 << (len * 8);
if end + 4 > POSEIDON_RATE_BYTES {
word |= 0b10000000 << 24;
}
}
log::trace!("{:X}: {:X}", map_addr, word);
let mem_op = mem_write_gp_log_and_fill(j, addr, state, &mut cpu_row, word.to_be());
state.traces.push_memory(mem_op);
map_addr += 4;
j += 1;
}
state.traces.push_cpu(cpu_row);
Ok(())
}
pub(crate) fn verify<F: RichField + Extendable<D>, C: GenericConfig<D, F = F>, const D: usize>(
state: &mut GenerationState<F, C, D>,
addr: usize,
size: usize,
) -> Result<()> {
assert!(size == 32);
let mut claim_digest = [0u8; 32];
{
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
for i in 0..8 {
let address = MemoryAddress::new(0, Segment::Code, addr + i * 4);
let (mem, op) = mem_read_gp_with_log_and_fill(i, address, state, &mut cpu_row);
claim_digest[i * 4..i * 4 + 4].copy_from_slice(mem.to_be_bytes().as_ref());
state.traces.push_memory(op);
}
state.traces.push_cpu(cpu_row);
}
log::debug!("SYS_VERIFY: ({:?})", claim_digest);
let assumption = state.find_assumption(&claim_digest);
// Mark the assumption as accessed, pushing it to the head of the list, and return the success code.
match assumption {
Some(assumpt) => {
state.assumptions_used.borrow_mut().insert(0, assumpt);
}
None => panic!("Assumption Not Found"),
}
Ok(())
}
pub(crate) fn load_input<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
addr: usize,
size: usize,
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | true |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/misc.rs | prover/src/cpu/misc.rs | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::util::{limb_from_bits_le, limb_from_bits_le_recursive};
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
pub fn eval_packed_rdhwr<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.rdhwr;
// Check rt Reg
{
let rt_reg = lv.mem_channels[0].addr_virtual;
let rt_src = limb_from_bits_le(lv.rt_bits);
yield_constr.constraint(filter * (rt_reg - rt_src));
}
// Check rd index
{
let rd_index = lv.general.misc().rd_index;
let rd_dst = limb_from_bits_le(lv.rd_bits);
yield_constr.constraint(filter * (rd_index - rd_dst));
}
// Check rt value
{
let rt_val = lv.mem_channels[0].value;
let local_user = lv.mem_channels[1].value;
let rd_index = lv.general.misc().rd_index;
let rd_eq_0 = lv.general.misc().rd_index_eq_0;
let rd_eq_29 = lv.general.misc().rd_index_eq_29;
yield_constr.constraint(filter * rd_eq_0 * rd_index);
yield_constr.constraint(filter * rd_eq_0 * (rt_val - P::ONES));
yield_constr
.constraint(filter * rd_eq_29 * (rd_index - P::Scalar::from_canonical_usize(29)));
yield_constr.constraint(filter * rd_eq_29 * (rt_val - local_user));
yield_constr.constraint(filter * (P::ONES - rd_eq_29 - rd_eq_0) * rt_val);
}
}
pub fn eval_ext_circuit_rdhwr<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.rdhwr;
// Check rt Reg
{
let rt_reg = lv.mem_channels[0].addr_virtual;
let rt_src = limb_from_bits_le_recursive(builder, lv.rt_bits);
let constr = builder.sub_extension(rt_reg, rt_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rd index
{
let rd_index = lv.general.misc().rd_index;
let rd_src = limb_from_bits_le_recursive(builder, lv.rd_bits);
let constr = builder.sub_extension(rd_index, rd_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rt value
{
let rt_val = lv.mem_channels[0].value;
let local_user = lv.mem_channels[1].value;
let rd_index = lv.general.misc().rd_index;
let rd_eq_0 = lv.general.misc().rd_index_eq_0;
let rd_eq_29 = lv.general.misc().rd_index_eq_29;
let one_extension = builder.one_extension();
let constr = builder.mul_extension(rd_eq_0, rd_index);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(rt_val, one_extension);
let constr = builder.mul_extension(constr, rd_eq_0);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constant29 = builder.constant_extension(F::Extension::from_canonical_usize(29));
let constr = builder.sub_extension(rd_index, constant29);
let constr = builder.mul_extension(rd_eq_29, constr);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(rt_val, local_user);
let constr = builder.mul_extension(constr, rd_eq_29);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(one_extension, rd_eq_0);
let constr = builder.sub_extension(constr, rd_eq_29);
let constr = builder.mul_extension(constr, rt_val);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed_condmov<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let rs = lv.mem_channels[0].value; // rs
let rt = lv.mem_channels[1].value; // rt
let rd = lv.mem_channels[2].value; // rd
let out = lv.mem_channels[3].value; // out
let mov = lv.mem_channels[4].value; // mov rs to rd
let is_movn = lv.op.movn_op;
let is_movz = lv.op.movz_op;
let filter = is_movn + is_movz;
//let is_movn = lv.func_bits[0];
//let is_movz = P::ONES - lv.func_bits[0];
// constraints:
// * is_ne = p_inv0 * rt
// * is_eq = 1 - is_ne
// * is_movn * (mov - is_ne) == 0
// * is_movz * (mov - is_eq) == 0
// * filter * mov * (1 - mov) == 0
// * res = mov * rs + (1 - mov) * rd
// * filter * (out - res) == 0
{
let p_inv0 = lv.general.logic().diff_pinv; // rt^-1
let is_ne = p_inv0 * rt;
let is_eq = P::ONES - is_ne;
let no_mov = P::ONES - mov;
yield_constr.constraint(is_movn * (mov - is_ne));
yield_constr.constraint(is_movz * (mov - is_eq));
yield_constr.constraint(filter * mov * no_mov);
yield_constr.constraint(filter * (out - (mov * rs + no_mov * rd)));
}
}
pub fn eval_ext_circuit_condmov<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let rs = lv.mem_channels[0].value; // rs
let rt = lv.mem_channels[1].value; // rt
let rd = lv.mem_channels[2].value; // rd
let out = lv.mem_channels[3].value; // out
let mov = lv.mem_channels[4].value; // mov rs to rd
let is_movn = lv.op.movn_op;
let is_movz = lv.op.movz_op;
let filter = builder.add_extension(is_movn, is_movz);
let one_extension = builder.one_extension();
// constraints:
// * is_ne = p_inv0 * rt
// * is_eq = 1 - is_ne
// * is_movn * (mov - is_ne) == 0
// * is_movz * (mov - is_eq) == 0
// * filter * mov * (1 - mov) == 0
// * res = mov * rs + (1 - mov) * rd
// * filter * (out - res) == 0
{
let p_inv0 = lv.general.logic().diff_pinv; // rt^-1
let is_ne = builder.mul_extension(p_inv0, rt);
let is_eq = builder.sub_extension(one_extension, is_ne);
let constr = builder.sub_extension(mov, is_ne);
let constr = builder.mul_extension(is_movn, constr);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(mov, is_eq);
let constr = builder.mul_extension(is_movz, constr);
yield_constr.constraint(builder, constr);
let no_mov = builder.sub_extension(one_extension, mov);
let constr = builder.mul_extension(mov, no_mov);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_a = builder.mul_extension(mov, rs);
let constr_b = builder.mul_extension(no_mov, rd);
let constr = builder.add_extension(constr_a, constr_b);
let constr = builder.sub_extension(out, constr);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed_teq<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.teq;
// Check rt Reg
{
let rt_reg = lv.mem_channels[1].addr_virtual;
let rt_src = limb_from_bits_le(lv.rt_bits);
yield_constr.constraint(filter * (rt_reg - rt_src));
}
// Check rs Reg
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_dst = limb_from_bits_le(lv.rs_bits);
yield_constr.constraint(filter * (rs_reg - rs_dst));
}
// Check rs_val != rt_val, Otherwise trap will be triggered
{
let rs_val = lv.mem_channels[0].value;
let rt_val = lv.mem_channels[1].value;
let p_inv0 = lv.general.logic().diff_pinv;
let is_ne = (rs_val - rt_val) * p_inv0;
yield_constr.constraint(filter * (P::ONES - is_ne));
}
}
pub fn eval_ext_circuit_teq<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.teq;
// Check rt Reg
{
let rt_reg = lv.mem_channels[1].addr_virtual;
let rt_src = limb_from_bits_le_recursive(builder, lv.rt_bits);
let constr = builder.sub_extension(rt_reg, rt_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rs Reg
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_src = limb_from_bits_le_recursive(builder, lv.rs_bits);
let constr = builder.sub_extension(rs_reg, rs_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rs_val != rt_val, Otherwise trap will be triggered
{
let rs_val = lv.mem_channels[0].value;
let rt_val = lv.mem_channels[1].value;
let p_inv0 = lv.general.logic().diff_pinv;
let one_extension = builder.one_extension();
let diff = builder.sub_extension(rs_val, rt_val);
let is_ne = builder.mul_extension(diff, p_inv0);
let constr = builder.sub_extension(one_extension, is_ne);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed_extract<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.ext;
// Check rt Reg
{
let rt_reg = lv.mem_channels[1].addr_virtual;
let rt_src = limb_from_bits_le(lv.rt_bits);
yield_constr.constraint(filter * (rt_reg - rt_src));
}
// Check rs Reg
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_dst = limb_from_bits_le(lv.rs_bits);
yield_constr.constraint(filter * (rs_reg - rs_dst));
}
// Check ext result
{
let msbd = limb_from_bits_le(lv.rd_bits);
let rs_bits = lv.general.misc().rs_bits;
let lsb = limb_from_bits_le(lv.shamt_bits);
let msb = lsb + msbd;
let auxm = lv.general.misc().auxm;
let auxl = lv.general.misc().auxl;
let auxs = lv.general.misc().auxs;
let rd_result = lv.mem_channels[1].value;
yield_constr.constraint(filter * (rd_result * auxs + auxl - auxm));
for i in 0..32 {
let mpartial = limb_from_bits_le(rs_bits[0..i + 1].to_vec());
let mut lpartial = P::ZEROS;
if i != 0 {
lpartial = limb_from_bits_le(rs_bits[0..i].to_vec());
}
let is_msb = lv.general.misc().is_msb[i];
let is_lsb = lv.general.misc().is_lsb[i];
let cur_index = P::Scalar::from_canonical_usize(i);
let cur_mul = P::Scalar::from_canonical_usize(1 << i);
yield_constr.constraint(filter * is_msb * (msb - cur_index));
yield_constr.constraint(filter * is_msb * (auxm - mpartial));
yield_constr.constraint(filter * is_lsb * (lsb - cur_index));
yield_constr.constraint(filter * is_lsb * (auxl - lpartial));
yield_constr.constraint(filter * is_lsb * (auxs - cur_mul));
}
}
}
pub fn eval_ext_circuit_extract<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.ext;
// Check rt Reg
{
let rt_reg = lv.mem_channels[1].addr_virtual;
let rt_src = limb_from_bits_le_recursive(builder, lv.rt_bits);
let constr = builder.sub_extension(rt_reg, rt_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rs Reg
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_src = limb_from_bits_le_recursive(builder, lv.rs_bits);
let constr = builder.sub_extension(rs_reg, rs_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check ext result
{
let msbd = limb_from_bits_le_recursive(builder, lv.rd_bits);
let rs_bits = lv.general.misc().rs_bits;
let lsb = limb_from_bits_le_recursive(builder, lv.shamt_bits);
let msb = builder.add_extension(lsb, msbd);
let auxm = lv.general.misc().auxm;
let auxl = lv.general.misc().auxl;
let auxs = lv.general.misc().auxs;
let rd_result = lv.mem_channels[1].value;
let constr = builder.mul_extension(rd_result, auxs);
let constr = builder.add_extension(constr, auxl);
let constr = builder.sub_extension(constr, auxm);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
for i in 0..32 {
let mpartial = limb_from_bits_le_recursive(builder, rs_bits[0..i + 1].to_vec());
let mut lpartial = builder.zero_extension();
if i != 0 {
lpartial = limb_from_bits_le_recursive(builder, rs_bits[0..i].to_vec());
}
let is_msb = lv.general.misc().is_msb[i];
let is_lsb = lv.general.misc().is_lsb[i];
let cur_index = builder.constant_extension(F::Extension::from_canonical_usize(i));
let cur_mul = builder.constant_extension(F::Extension::from_canonical_usize(1 << i));
let constr_msb = builder.mul_extension(filter, is_msb);
let constr_lsb = builder.mul_extension(filter, is_lsb);
let constr = builder.sub_extension(msb, cur_index);
let constr = builder.mul_extension(constr_msb, constr);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(auxm, mpartial);
let constr = builder.mul_extension(constr, constr_msb);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(lsb, cur_index);
let constr = builder.mul_extension(constr, constr_lsb);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(auxl, lpartial);
let constr = builder.mul_extension(constr, constr_lsb);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(auxs, cur_mul);
let constr = builder.mul_extension(constr, constr_lsb);
yield_constr.constraint(builder, constr);
}
}
}
pub fn eval_packed_insert<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.ins;
// Check rt Reg
// addr(channels[1]) == rt
// addr(channels[2]) == rt
{
let rt_reg_read = lv.mem_channels[1].addr_virtual;
let rt_reg_write = lv.mem_channels[2].addr_virtual;
let rt_src = limb_from_bits_le(lv.rt_bits);
yield_constr.constraint(filter * (rt_reg_read - rt_src));
yield_constr.constraint(filter * (rt_reg_write - rt_src));
}
// Check rs Reg
// addr(channels[0]) == rs
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_dst = limb_from_bits_le(lv.rs_bits);
yield_constr.constraint(filter * (rs_reg - rs_dst));
}
// Check ins result
// is_lsb[i] = 1 if i = lsb
// is_lsb[i] = 0 if i != lsb
// is_lsb[i] * (lsb - i) == 0
// auxs = 1 << lsd
// is_lsb[i] * (auxs - (i << 1)) == 0
// size = msb -lsb
// is_msb[i] = 1 if i = size
// is_msb[i] = 0 if i != size
// is_msb[i] * (size - i) == 0
// auxm = rt & !(mask << lsb)
// auxl = rs[0 : size+1]
// is_msb[i] * (auxl - rs[0:i+1]) == 0
// result == auxm + auxl * auxs
{
let msb = limb_from_bits_le(lv.rd_bits);
let rs_bits = lv.general.misc().rs_bits;
let lsb = limb_from_bits_le(lv.shamt_bits);
let auxm = lv.general.misc().auxm;
let auxl = lv.general.misc().auxl;
let auxs = lv.general.misc().auxs;
let rd_result = lv.mem_channels[2].value;
yield_constr.constraint(filter * (rd_result - auxm - auxl * auxs));
for i in 0..32 {
let is_msb = lv.general.misc().is_msb[i];
let is_lsb = lv.general.misc().is_lsb[i];
let cur_index = P::Scalar::from_canonical_usize(i);
let cur_mul = P::Scalar::from_canonical_usize(1 << i);
yield_constr.constraint(filter * is_lsb * (lsb - cur_index));
yield_constr.constraint(filter * is_lsb * (auxs - cur_mul));
yield_constr.constraint(filter * is_msb * (msb - lsb - cur_index));
let mut insert_bits = [P::ZEROS; 32];
insert_bits[0..i + 1].copy_from_slice(&rs_bits[0..i + 1]);
let insert_val = limb_from_bits_le(insert_bits.to_vec());
yield_constr.constraint(filter * is_msb * (auxl - insert_val));
}
}
}
pub fn eval_ext_circuit_insert<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.ins;
// Check rt Reg
// addr(channels[1]) == rt
// addr(channels[2]) == rt
{
let rt_reg_read = lv.mem_channels[1].addr_virtual;
let rt_reg_write = lv.mem_channels[2].addr_virtual;
let rt_src = limb_from_bits_le_recursive(builder, lv.rt_bits);
let constr = builder.sub_extension(rt_reg_read, rt_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(rt_reg_write, rt_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rs Reg
// addr(channels[0]) == rs
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_src = limb_from_bits_le_recursive(builder, lv.rs_bits);
let constr = builder.sub_extension(rs_reg, rs_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check ins result
// is_lsb[i] = 1 if i = lsb
// is_lsb[i] = 0 if i != lsb
// is_lsb[i] * (lsb - i) == 0
// auxs = 1 << lsd
// is_lsb[i] * (auxs - (i << 1)) == 0
// size = msb -lsb
// is_msb[i] = 1 if i = size
// is_msb[i] = 0 if i != size
// is_msb[i] * (size - i) == 0
// auxm = rt & !(mask << lsb)
// auxl = rs[0 : size+1]
// is_msb[i] * (auxl - rs[0:i+1]) == 0
// result == auxm + auxl * auxs
{
let msb = limb_from_bits_le_recursive(builder, lv.rd_bits);
let rs_bits = lv.general.misc().rs_bits;
let lsb = limb_from_bits_le_recursive(builder, lv.shamt_bits);
let auxm = lv.general.misc().auxm;
let auxl = lv.general.misc().auxl;
let auxs = lv.general.misc().auxs;
let rd_result = lv.mem_channels[2].value;
let constr = builder.mul_extension(auxl, auxs);
let constr = builder.sub_extension(rd_result, constr);
let constr = builder.sub_extension(constr, auxm);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
for i in 0..32 {
let is_msb = lv.general.misc().is_msb[i];
let is_lsb = lv.general.misc().is_lsb[i];
let cur_index = builder.constant_extension(F::Extension::from_canonical_usize(i));
let cur_mul = builder.constant_extension(F::Extension::from_canonical_usize(1 << i));
let constr_msb = builder.mul_extension(filter, is_msb);
let constr_lsb = builder.mul_extension(filter, is_lsb);
let constr = builder.sub_extension(lsb, cur_index);
let constr = builder.mul_extension(constr, constr_lsb);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(auxs, cur_mul);
let constr = builder.mul_extension(constr, constr_lsb);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(msb, lsb);
let constr = builder.sub_extension(constr, cur_index);
let constr = builder.mul_extension(constr, constr_msb);
yield_constr.constraint(builder, constr);
let mut insert_bits = [builder.zero_extension(); 32];
insert_bits[0..i + 1].copy_from_slice(&rs_bits[0..i + 1]);
let insert_val = limb_from_bits_le_recursive(builder, insert_bits);
let constr = builder.sub_extension(auxl, insert_val);
let constr = builder.mul_extension(constr, constr_msb);
yield_constr.constraint(builder, constr);
}
}
}
pub fn eval_packed_ror<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.ror;
// Check rd Reg
{
let rd_reg = lv.mem_channels[1].addr_virtual;
let rd_src = limb_from_bits_le(lv.rd_bits);
yield_constr.constraint(filter * (rd_reg - rd_src));
}
// Check rt Reg
{
let rt_reg = lv.mem_channels[0].addr_virtual;
let rt_dst = limb_from_bits_le(lv.rt_bits);
yield_constr.constraint(filter * (rt_reg - rt_dst));
}
// Check ror result
{
let rt_bits = lv.general.misc().rs_bits;
let sa = limb_from_bits_le(lv.shamt_bits);
let rd_result = lv.mem_channels[1].value;
let mut rd_bits = [P::ZEROS; 32];
for i in 0..32 {
rd_bits[0..32 - i].copy_from_slice(&rt_bits[i..32]);
rd_bits[32 - i..32].copy_from_slice(&rt_bits[0..i]);
let rd_val = limb_from_bits_le(rd_bits.to_vec());
let is_sa = lv.general.misc().is_lsb[i];
let cur_index = P::Scalar::from_canonical_usize(i);
yield_constr.constraint(filter * is_sa * (sa - cur_index));
yield_constr.constraint(filter * is_sa * (rd_result - rd_val));
}
}
}
pub fn eval_ext_circuit_ror<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.ror;
// Check rd Reg
{
let rd_reg = lv.mem_channels[1].addr_virtual;
let rd_src = limb_from_bits_le_recursive(builder, lv.rd_bits);
let constr = builder.sub_extension(rd_reg, rd_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rt Reg
{
let rt_reg = lv.mem_channels[0].addr_virtual;
let rt_src = limb_from_bits_le_recursive(builder, lv.rt_bits);
let constr = builder.sub_extension(rt_reg, rt_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check ror result
{
let rt_bits = lv.general.misc().rs_bits;
let sa = limb_from_bits_le_recursive(builder, lv.shamt_bits);
let rd_result = lv.mem_channels[1].value;
let mut rd_bits = [builder.zero_extension(); 32];
for i in 0..32 {
rd_bits[0..32 - i].copy_from_slice(&rt_bits[i..32]);
rd_bits[32 - i..32].copy_from_slice(&rt_bits[0..i]);
let rd_val = limb_from_bits_le_recursive(builder, rd_bits);
let is_sa = lv.general.misc().is_lsb[i];
let cur_index = builder.constant_extension(F::Extension::from_canonical_usize(i));
let constr_sa = builder.mul_extension(filter, is_sa);
let constr = builder.sub_extension(sa, cur_index);
let constr = builder.mul_extension(constr, constr_sa);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(rd_result, rd_val);
let constr = builder.mul_extension(constr, constr_sa);
yield_constr.constraint(builder, constr);
}
}
}
pub fn eval_packed_maddu<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.maddu;
// Check rs Reg
// addr(channels[0]) == rs
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_src = limb_from_bits_le(lv.rs_bits);
yield_constr.constraint(filter * (rs_reg - rs_src));
}
// Check rt Reg
// addr(channels[1]) == rt
{
let rt_reg = lv.mem_channels[1].addr_virtual;
let rt_dst = limb_from_bits_le(lv.rt_bits);
yield_constr.constraint(filter * (rt_reg - rt_dst));
}
// Check hi Reg
// addr(channels[2]) == 33
// addr(channels[4]) == 33
{
let hi_reg_read = lv.mem_channels[2].addr_virtual;
let hi_reg_write = lv.mem_channels[4].addr_virtual;
let hi_src = P::Scalar::from_canonical_usize(33);
yield_constr.constraint(filter * (hi_reg_read - hi_src));
yield_constr.constraint(filter * (hi_reg_write - hi_src));
}
// Check lo Reg
// addr(channels[3]) == 32
// addr(channels[5]) == 32
{
let lo_reg_read = lv.mem_channels[3].addr_virtual;
let lo_reg_write = lv.mem_channels[5].addr_virtual;
let lo_src = P::Scalar::from_canonical_usize(32);
yield_constr.constraint(filter * (lo_reg_read - lo_src));
yield_constr.constraint(filter * (lo_reg_write - lo_src));
}
// Check maddu result
// carry = overflow << 32
// scale = 1 << 32
// carry * (carry - scale) == 0
// result + (overflow << 32) == (hi,lo) + rs * rt
{
let rs = lv.mem_channels[0].value;
let rt = lv.mem_channels[1].value;
let hi = lv.mem_channels[2].value;
let lo = lv.mem_channels[3].value;
let hi_result: P = lv.mem_channels[4].value;
let lo_result = lv.mem_channels[5].value;
let carry = lv.general.misc().auxm;
let scale = P::Scalar::from_canonical_usize(1 << 32);
let result = hi_result * scale + lo_result;
let mul = rs * rt;
let addend = hi * scale + lo;
let overflow = carry * scale;
yield_constr.constraint(filter * carry * (carry - scale));
yield_constr.constraint(filter * (mul + addend - overflow - result));
}
}
pub fn eval_ext_circuit_maddu<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.maddu;
// Check rs Reg
// addr(channels[0]) == rs
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_src = limb_from_bits_le_recursive(builder, lv.rs_bits);
let constr = builder.sub_extension(rs_reg, rs_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rt Reg
// addr(channels[1]) == rt
{
let rt_reg = lv.mem_channels[1].addr_virtual;
let rt_src = limb_from_bits_le_recursive(builder, lv.rt_bits);
let constr = builder.sub_extension(rt_reg, rt_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check hi Reg
// addr(channels[2]) == 33
// addr(channels[4]) == 33
{
let hi_reg_read = lv.mem_channels[2].addr_virtual;
let hi_reg_write = lv.mem_channels[4].addr_virtual;
let hi_src = builder.constant_extension(F::Extension::from_canonical_usize(33));
let constr = builder.sub_extension(hi_reg_read, hi_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(hi_reg_write, hi_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check lo Reg
// addr(channels[3]) == 32
// addr(channels[5]) == 32
{
let lo_reg_read = lv.mem_channels[3].addr_virtual;
let lo_reg_write = lv.mem_channels[5].addr_virtual;
let lo_src = builder.constant_extension(F::Extension::from_canonical_usize(32));
let constr = builder.sub_extension(lo_reg_read, lo_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(lo_reg_write, lo_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check maddu result
// carry = overflow << 32
// scale = 1 << 32
// carry * (carry - scale) == 0
// result + (overflow << 32) == (hi,lo) + rs * rt
{
let rs = lv.mem_channels[0].value;
let rt = lv.mem_channels[1].value;
let hi = lv.mem_channels[2].value;
let lo = lv.mem_channels[3].value;
let hi_result = lv.mem_channels[4].value;
let lo_result = lv.mem_channels[5].value;
let carry = lv.general.misc().auxm;
let scale = builder.constant_extension(F::Extension::from_canonical_usize(1 << 32));
let result = builder.mul_extension(hi_result, scale);
let result = builder.add_extension(result, lo_result);
let mul = builder.mul_extension(rs, rt);
let addend = builder.mul_extension(hi, scale);
let addend = builder.add_extension(addend, lo);
let overflow = builder.mul_extension(carry, scale);
let constr = builder.sub_extension(carry, scale);
let constr = builder.mul_extension(constr, carry);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.add_extension(mul, addend);
let constr = builder.sub_extension(constr, overflow);
let constr = builder.sub_extension(constr, result);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
eval_packed_rdhwr(lv, yield_constr);
eval_packed_condmov(lv, yield_constr);
eval_packed_teq(lv, yield_constr);
eval_packed_extract(lv, yield_constr);
eval_packed_ror(lv, yield_constr);
eval_packed_insert(lv, yield_constr);
eval_packed_maddu(lv, yield_constr);
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
eval_ext_circuit_rdhwr(builder, lv, yield_constr);
eval_ext_circuit_condmov(builder, lv, yield_constr);
eval_ext_circuit_teq(builder, lv, yield_constr);
eval_ext_circuit_extract(builder, lv, yield_constr);
eval_ext_circuit_ror(builder, lv, yield_constr);
eval_ext_circuit_insert(builder, lv, yield_constr);
eval_ext_circuit_maddu(builder, lv, yield_constr);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/count.rs | prover/src/cpu/count.rs | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::util::{limb_from_bits_le, limb_from_bits_le_recursive};
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter_clz = lv.op.clz_op;
let filter_clo = lv.op.clo_op;
let filter = filter_clo + filter_clz;
// check op code
let opcode = limb_from_bits_le(lv.opcode_bits);
yield_constr.constraint(filter * (opcode - P::Scalar::from_canonical_u8(0b011100)));
//check func bits
let func = limb_from_bits_le(lv.func_bits);
yield_constr.constraint(filter_clz * (func - P::Scalar::from_canonical_u8(0b100000)));
yield_constr.constraint(filter_clo * (func - P::Scalar::from_canonical_u8(0b100001)));
// Check rs Reg
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_src = limb_from_bits_le(lv.rs_bits);
yield_constr.constraint(filter * (rs_reg - rs_src));
}
// Check rd Reg
{
let rd_reg = lv.mem_channels[1].addr_virtual;
let rd_dst = limb_from_bits_le(lv.rd_bits);
yield_constr.constraint(filter * (rd_reg - rd_dst));
}
let rs = lv.mem_channels[0].value;
let bits_le = lv.general.io().rs_le;
for bit in bits_le {
yield_constr.constraint(filter * bit * (P::ONES - bit));
}
let sum = limb_from_bits_le(bits_le);
yield_constr.constraint(filter_clz * (rs - sum));
yield_constr.constraint(filter_clo * (P::Scalar::from_canonical_u32(0xffffffff) - rs - sum));
let rd = lv.mem_channels[1].value;
let mut is_eqs = lv.general.io().rt_le.iter();
let mut invs = lv.general.io().mem_le.iter();
yield_constr.constraint(filter * bits_le[31] * rd);
for i in (0..31).rev() {
let partial = limb_from_bits_le(bits_le[i..].to_vec());
let is_eq = is_eqs.next().unwrap();
let inv = invs.next().unwrap();
let diff = partial - P::ONES;
yield_constr.constraint(filter * diff * *is_eq);
yield_constr.constraint(filter * (diff * *inv + *is_eq - P::ONES));
yield_constr.constraint(filter * *is_eq * (rd - P::Scalar::from_canonical_usize(31 - i)));
if i == 0 {
let is_eq = is_eqs.next().unwrap();
let inv = invs.next().unwrap();
yield_constr.constraint(filter * partial * *is_eq);
yield_constr.constraint(filter * (partial * *inv + *is_eq - P::ONES));
yield_constr.constraint(filter * *is_eq * (rd - P::Scalar::from_canonical_usize(32)));
}
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter_clz = lv.op.clz_op;
let filter_clo = lv.op.clo_op;
let filter = builder.add_extension(filter_clo, filter_clz);
// check op code
let opcode = limb_from_bits_le_recursive(builder, lv.opcode_bits);
let opcode_ = builder.constant_extension(F::Extension::from_canonical_u8(0b011100));
let t0 = builder.sub_extension(opcode, opcode_);
let t = builder.mul_extension(filter, t0);
yield_constr.constraint(builder, t);
//check func bits
let func = limb_from_bits_le_recursive(builder, lv.func_bits);
let func_clz = builder.constant_extension(F::Extension::from_canonical_u8(0b100000));
let t0 = builder.sub_extension(func, func_clz);
let t = builder.mul_extension(filter_clz, t0);
yield_constr.constraint(builder, t);
let func_clo = builder.constant_extension(F::Extension::from_canonical_u8(0b100001));
let t0 = builder.sub_extension(func, func_clo);
let t = builder.mul_extension(filter_clo, t0);
yield_constr.constraint(builder, t);
// Check rs Reg
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_src = limb_from_bits_le_recursive(builder, lv.rs_bits);
let constr = builder.sub_extension(rs_reg, rs_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rd Reg
{
let rd_reg = lv.mem_channels[1].addr_virtual;
let rd_src = limb_from_bits_le_recursive(builder, lv.rd_bits);
let constr = builder.sub_extension(rd_reg, rd_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
let one = builder.one_extension();
let rs = lv.mem_channels[0].value;
let bits_le = lv.general.io().rs_le;
for bit in bits_le {
let bit_neg = builder.sub_extension(one, bit);
let t = builder.mul_many_extension([filter, bit, bit_neg]);
yield_constr.constraint(builder, t);
}
let sum = limb_from_bits_le_recursive(builder, bits_le);
let t1 = builder.sub_extension(rs, sum);
let t = builder.mul_extension(filter_clz, t1);
yield_constr.constraint(builder, t);
let cst = builder.constant_extension(F::Extension::from_canonical_u32(0xffffffff));
let t2 = builder.sub_extension(cst, rs);
let t3 = builder.sub_extension(t2, sum);
let t = builder.mul_extension(filter_clo, t3);
yield_constr.constraint(builder, t);
let rd = lv.mem_channels[1].value;
let mut is_eqs = lv.general.io().rt_le.iter();
let mut invs = lv.general.io().mem_le.iter();
let t = builder.mul_many_extension([filter, bits_le[31], rd]);
yield_constr.constraint(builder, t);
for i in (0..31).rev() {
let partial = limb_from_bits_le_recursive(builder, bits_le[i..].to_vec());
let is_eq = is_eqs.next().unwrap();
let inv = invs.next().unwrap();
let diff = builder.sub_extension(partial, one);
let t = builder.mul_many_extension([filter, diff, *is_eq]);
yield_constr.constraint(builder, t);
let t1 = builder.mul_extension(diff, *inv);
let t2 = builder.add_extension(t1, *is_eq);
let t3 = builder.sub_extension(t2, one);
let t = builder.mul_extension(filter, t3);
yield_constr.constraint(builder, t);
let cst = builder.constant_extension(F::Extension::from_canonical_usize(31 - i));
let t1 = builder.sub_extension(rd, cst);
let t = builder.mul_many_extension([filter, *is_eq, t1]);
yield_constr.constraint(builder, t);
if i == 0 {
let is_eq = is_eqs.next().unwrap();
let inv = invs.next().unwrap();
let t = builder.mul_many_extension([filter, partial, *is_eq]);
yield_constr.constraint(builder, t);
let t1 = builder.mul_extension(partial, *inv);
let t2 = builder.add_extension(t1, *is_eq);
let t3 = builder.sub_extension(t2, one);
let t = builder.mul_extension(filter, t3);
yield_constr.constraint(builder, t);
let cst = builder.constant_extension(F::Extension::from_canonical_usize(32));
let t1 = builder.sub_extension(rd, cst);
let t = builder.mul_many_extension([filter, *is_eq, t1]);
yield_constr.constraint(builder, t);
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/memio.rs | prover/src/cpu/memio.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::memory::segments::Segment;
use crate::util::{limb_from_bits_le, limb_from_bits_le_recursive};
#[inline]
fn load_offset<P: PackedField>(lv: &CpuColumnsView<P>) -> P {
let mut mem_offset = [P::ZEROS; 32];
mem_offset[0..6].copy_from_slice(&lv.func_bits); // 6 bits
mem_offset[6..11].copy_from_slice(&lv.shamt_bits); // 5 bits
mem_offset[11..16].copy_from_slice(&lv.rd_bits); // 5 bits
let mem_offset = sign_extend::<_, 16>(&mem_offset);
limb_from_bits_le(mem_offset)
}
#[inline]
fn load_offset_ext<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
) -> ExtensionTarget<D> {
let mut mem_offset = [builder.zero_extension(); 32];
mem_offset[0..6].copy_from_slice(&lv.func_bits); // 6 bits
mem_offset[6..11].copy_from_slice(&lv.shamt_bits); // 5 bits
mem_offset[11..16].copy_from_slice(&lv.rd_bits); // 5 bits
let mem_offset = sign_extend_ext::<_, D, 16>(builder, &mem_offset);
limb_from_bits_le_recursive(builder, mem_offset)
}
#[inline]
fn sign_extend<P: PackedField, const N: usize>(limbs: &[P; 32]) -> [P; 32] {
let mut out = [P::ZEROS; 32];
out[..N].copy_from_slice(&limbs[..N]);
for i in N..32 {
out[i] = limbs[N - 1];
}
out
}
#[inline]
fn sign_extend_ext<F: RichField + Extendable<D>, const D: usize, const N: usize>(
builder: &mut CircuitBuilder<F, D>,
limbs: &[ExtensionTarget<D>; 32],
) -> [ExtensionTarget<D>; 32] {
let mut out = [builder.zero_extension(); 32];
out[..N].copy_from_slice(&limbs[..N]);
for i in N..32 {
out[i] = limbs[N - 1];
}
out
}
//let sum = rs_limbs[1] * (mem - mem_val_1) + (rs_limbs[1] - P::ONES) * (mem - mem_val_0);
//yield_constr.constraint(filter * lv.general.io().micro_op[0] * sum);
#[inline]
fn enforce_half_word<P: PackedField>(
yield_constr: &mut ConstraintConsumer<P>,
op: P,
rs_limbs: &[P],
mem: P,
mem_val_1: P,
mem_val_0: P,
) {
let lh_sum_a = (rs_limbs[1] - P::ONES) * (mem - mem_val_0);
let lh_sum_b = rs_limbs[1] * (mem - mem_val_1);
yield_constr.constraint(op * (lh_sum_a + lh_sum_b));
}
#[inline]
fn enforce_half_word_ext<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
op: ExtensionTarget<D>,
rs_limbs: &[ExtensionTarget<D>],
mem: ExtensionTarget<D>,
mem_val_1: ExtensionTarget<D>,
mem_val_0: ExtensionTarget<D>,
) {
let fc = builder.add_const_extension(rs_limbs[1], -F::ONES);
let fc2 = builder.sub_extension(mem, mem_val_0);
let lh_sum_a = builder.mul_extension(fc, fc2);
let fc = builder.sub_extension(mem, mem_val_1);
let lh_sum_b = builder.mul_extension(rs_limbs[1], fc);
let fc = builder.add_extension(lh_sum_a, lh_sum_b);
let fc = builder.mul_extension(op, fc);
yield_constr.constraint(builder, fc);
}
//let sum = (mem - mem_val_0_0) * (rs_limbs[1] - P::ONES) * (rs_limbs[0] - P::ONES)
// + (mem - mem_val_1_0) * (rs_limbs[1] - P::ONES) * rs_limbs[0]
// + (mem - mem_val_0_1) * rs_limbs[1] * (rs_limbs[0] - P::ONES)
// + (mem - mem_val_1_1) * rs_limbs[1] * rs_limbs[0];
//yield_constr.constraint(filter * lv.general.io().micro_op[1] * sum);
#[inline]
fn enforce_byte<P: PackedField>(
yield_constr: &mut ConstraintConsumer<P>,
lv: &CpuColumnsView<P>,
op: P,
rs_limbs: &[P],
mem: P,
mem_val_0_0: P,
mem_val_1_0: P,
mem_val_0_1: P,
mem_val_1_1: P,
) {
let rs_limbs_1_rs_limbs_0 = rs_limbs[0] * rs_limbs[1];
let rs_limbs_1_rs_limbs_0_aux = lv.general.io().aux_rs0_mul_rs1;
yield_constr.constraint(op * (rs_limbs_1_rs_limbs_0 - rs_limbs_1_rs_limbs_0_aux));
let sum = (mem - mem_val_0_0)
* (rs_limbs_1_rs_limbs_0_aux - rs_limbs[1] - rs_limbs[0] + P::ONES)
+ (mem - mem_val_1_0) * (rs_limbs_1_rs_limbs_0_aux - rs_limbs[0])
+ (mem - mem_val_0_1) * (rs_limbs_1_rs_limbs_0_aux - rs_limbs[1])
+ (mem - mem_val_1_1) * (rs_limbs_1_rs_limbs_0_aux);
yield_constr.constraint(sum * op);
}
#[inline]
fn enforce_byte_ext<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
op: ExtensionTarget<D>,
rs_limbs: &[ExtensionTarget<D>],
mem: ExtensionTarget<D>,
mem_val_0_0: ExtensionTarget<D>,
mem_val_1_0: ExtensionTarget<D>,
mem_val_0_1: ExtensionTarget<D>,
mem_val_1_1: ExtensionTarget<D>,
) {
let rs_limbs_1_rs_limbs_0 = builder.mul_extension(rs_limbs[0], rs_limbs[1]);
let rs_limbs_1_rs_limbs_0_aux = lv.general.io().aux_rs0_mul_rs1;
let fc = builder.sub_extension(rs_limbs_1_rs_limbs_0, rs_limbs_1_rs_limbs_0_aux);
let fc = builder.mul_extension(op, fc);
yield_constr.constraint(builder, fc);
let mem00 = builder.sub_extension(mem, mem_val_0_0);
let fc0 = builder.add_const_extension(rs_limbs_1_rs_limbs_0_aux, F::ONES);
let fc1 = builder.add_extension(rs_limbs[1], rs_limbs[0]);
let fc00 = builder.sub_extension(fc0, fc1);
let fc00 = builder.mul_extension(mem00, fc00);
let mem10 = builder.sub_extension(mem, mem_val_1_0);
let fc2 = builder.sub_extension(rs_limbs_1_rs_limbs_0_aux, rs_limbs[0]);
let fc10 = builder.mul_extension(mem10, fc2);
let mem01 = builder.sub_extension(mem, mem_val_0_1);
let fc3 = builder.sub_extension(rs_limbs_1_rs_limbs_0_aux, rs_limbs[1]);
let fc01 = builder.mul_extension(mem01, fc3);
let mem11 = builder.sub_extension(mem, mem_val_1_1);
let fc11 = builder.mul_extension(mem11, rs_limbs_1_rs_limbs_0_aux);
let sum = builder.add_many_extension([fc00, fc01, fc10, fc11]);
let fc = builder.mul_extension(op, sum);
yield_constr.constraint(builder, fc);
}
/// Constant -4
const GOLDILOCKS_INVERSE_NEG4: u64 = 18446744069414584317;
fn eval_packed_load<P: PackedField>(
lv: &CpuColumnsView<P>,
_nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// If the operation is MLOAD_GENERAL, lv.opcode_bits[5] = 1
let filter = lv.op.m_op_load * lv.opcode_bits[5];
let aux_filter = lv.memio.aux_filter;
yield_constr.constraint(filter * (P::ONES - aux_filter));
// Check mem channel segment is register
let diff = lv.mem_channels[0].addr_segment
- P::Scalar::from_canonical_u64(Segment::RegisterFile as u64);
yield_constr.constraint(filter * diff);
let diff = lv.mem_channels[1].addr_segment
- P::Scalar::from_canonical_u64(Segment::RegisterFile as u64);
yield_constr.constraint(filter * diff);
// Check memory is used
// Check is_read is 0/1
let rs = lv.mem_channels[0].value;
let rt = lv.mem_channels[1].value;
let mem = lv.mem_channels[3].value;
let rs_limbs = lv.general.io().rs_le;
let rt_limbs = lv.general.io().rt_le;
let mem_limbs = lv.general.io().mem_le;
// Calculate rs:
// let virt_raw = (rs as u32).wrapping_add(sign_extend::<16>(offset));
let offset = load_offset(lv);
let virt_raw = rs + offset;
// here it may raise overflow here since wrapping_add used in simulator
let rs_from_bits = limb_from_bits_le(rs_limbs);
let power32 = P::Scalar::from_canonical_u64(1u64 << 32);
yield_constr
.constraint(aux_filter * (rs_from_bits - virt_raw) * (rs_from_bits + power32 - virt_raw));
let rt_from_bits = limb_from_bits_le(rt_limbs);
yield_constr.constraint(filter * (rt_from_bits - rt));
// Constrain mem address
// let virt = virt_raw & 0xFFFF_FFFC;
let mut tmp = rs_limbs;
tmp[0] = P::ZEROS;
tmp[1] = P::ZEROS;
let virt = limb_from_bits_le(tmp);
let mem_virt = lv.mem_channels[2].addr_virtual;
yield_constr.constraint(filter * (virt - mem_virt));
// Constrain mem value
// LH: micro_op[0] * sign_extend::<16>((mem >> (16 - (rs & 2) * 8)) & 0xffff)
{
// Range value(rs[1]): rs[1] == 1
let mut mem_val_1 = [P::ZEROS; 32];
mem_val_1[0..16].copy_from_slice(&mem_limbs[0..16]);
let mem_val_1 = sign_extend::<_, 16>(&mem_val_1);
// Range value(rs[1]): rs[1] == 0
let mut mem_val_0 = [P::ZEROS; 32];
mem_val_0[0..16].copy_from_slice(&mem_limbs[16..32]);
let mem_val_0 = sign_extend::<_, 16>(&mem_val_0);
let mem_val_1 = limb_from_bits_le(mem_val_1);
let mem_val_0 = limb_from_bits_le(mem_val_0);
// Range check
enforce_half_word(
yield_constr,
lv.memio.is_lh,
&rs_limbs,
mem,
mem_val_1,
mem_val_0,
);
}
// LWL:
// let val = mem << ((rs & 3) * 8);
// let mask = 0xffFFffFFu32 << ((rs & 3) * 8);
// (rt & (!mask)) | val
// Use mem_val_{rs[0]}_{rs[1]} to indicate the mem value for different value on rs' first and
// second bit
{
let mut mem_val_0_0 = [P::ZEROS; 32];
let mut mem_val_0_1 = [P::ZEROS; 32];
let mut mem_val_1_0 = [P::ZEROS; 32];
let mut mem_val_1_1 = [P::ZEROS; 32];
mem_val_0_0[0..32].copy_from_slice(&mem_limbs[0..32]);
mem_val_1_0[0..8].copy_from_slice(&rt_limbs[0..8]);
mem_val_1_0[8..].copy_from_slice(&mem_limbs[0..24]);
mem_val_0_1[0..16].copy_from_slice(&rt_limbs[0..16]);
mem_val_0_1[16..].copy_from_slice(&mem_limbs[0..16]);
mem_val_1_1[0..24].copy_from_slice(&rt_limbs[0..24]);
mem_val_1_1[24..].copy_from_slice(&mem_limbs[0..8]);
let mem_val_0_0 = limb_from_bits_le(mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le(mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le(mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le(mem_val_1_1);
enforce_byte(
yield_constr,
lv,
lv.memio.is_lwl,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// LW:
{
let mem_value = limb_from_bits_le(mem_limbs);
yield_constr.constraint(lv.memio.is_lw * (mem - mem_value));
}
// LBU: (mem >> (24 - (rs & 3) * 8)) & 0xff
{
let mut mem_val_0_0 = [P::ZEROS; 32];
let mut mem_val_0_1 = [P::ZEROS; 32];
let mut mem_val_1_0 = [P::ZEROS; 32];
let mut mem_val_1_1 = [P::ZEROS; 32];
mem_val_0_0[0..8].copy_from_slice(&mem_limbs[24..32]);
mem_val_1_0[0..8].copy_from_slice(&mem_limbs[16..24]);
mem_val_0_1[0..8].copy_from_slice(&mem_limbs[8..16]);
mem_val_1_1[0..8].copy_from_slice(&mem_limbs[0..8]);
let mem_val_0_0 = limb_from_bits_le(mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le(mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le(mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le(mem_val_1_1);
enforce_byte(
yield_constr,
lv,
lv.memio.is_lbu,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// LHU: (mem >> (16 - (rs & 2) * 8)) & 0xffff
{
let mut mem_val_0 = [P::ZEROS; 32];
let mut mem_val_1 = [P::ZEROS; 32];
mem_val_0[0..16].copy_from_slice(&mem_limbs[16..32]);
mem_val_1[0..16].copy_from_slice(&mem_limbs[0..16]);
let mem_val_1 = limb_from_bits_le(mem_val_1);
let mem_val_0 = limb_from_bits_le(mem_val_0);
enforce_half_word(
yield_constr,
lv.memio.is_lhu,
&rs_limbs,
mem,
mem_val_1,
mem_val_0,
);
}
// LWR:
// let val = mem >> (24 - (rs & 3) * 8);
// let mask = 0xffFFffFFu32 >> (24 - (rs & 3) * 8);
// (rt & (!mask)) | val
{
let mut mem_val_0_0 = [P::ZEROS; 32];
let mut mem_val_0_1 = [P::ZEROS; 32];
let mut mem_val_1_0 = [P::ZEROS; 32];
let mut mem_val_1_1 = [P::ZEROS; 32];
mem_val_0_0[8..].copy_from_slice(&rt_limbs[8..32]);
mem_val_0_0[0..8].copy_from_slice(&mem_limbs[24..32]);
mem_val_1_0[16..].copy_from_slice(&rt_limbs[16..32]);
mem_val_1_0[0..16].copy_from_slice(&mem_limbs[16..32]);
mem_val_0_1[24..].copy_from_slice(&rt_limbs[24..32]);
mem_val_0_1[0..24].copy_from_slice(&mem_limbs[8..32]);
mem_val_1_1[0..32].copy_from_slice(&mem_limbs[..]);
let mem_val_0_0 = limb_from_bits_le(mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le(mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le(mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le(mem_val_1_1);
enforce_byte(
yield_constr,
lv,
lv.memio.is_lwr,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// LL:
{
let mem_value = limb_from_bits_le(mem_limbs);
yield_constr.constraint(lv.memio.is_ll * (mem - mem_value));
}
// LB: sign_extend::<8>((mem >> (24 - (rs & 3) * 8)) & 0xff)
{
let mut mem_val_0_0 = [P::ZEROS; 32];
let mut mem_val_0_1 = [P::ZEROS; 32];
let mut mem_val_1_0 = [P::ZEROS; 32];
let mut mem_val_1_1 = [P::ZEROS; 32];
mem_val_0_0[0..8].copy_from_slice(&mem_limbs[24..]);
mem_val_1_0[0..8].copy_from_slice(&mem_limbs[16..24]);
mem_val_0_1[0..8].copy_from_slice(&mem_limbs[8..16]);
mem_val_1_1[0..8].copy_from_slice(&mem_limbs[0..8]);
let mem_val_0_0 = sign_extend::<_, 8>(&mem_val_0_0);
let mem_val_1_0 = sign_extend::<_, 8>(&mem_val_1_0);
let mem_val_0_1 = sign_extend::<_, 8>(&mem_val_0_1);
let mem_val_1_1 = sign_extend::<_, 8>(&mem_val_1_1);
let mem_val_0_0 = limb_from_bits_le(mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le(mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le(mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le(mem_val_1_1);
enforce_byte(
yield_constr,
lv,
lv.memio.is_lb,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// Disable remaining memory channels, if any.
// Note: SC needs 5 channel
for &channel in &lv.mem_channels[6..(NUM_GP_CHANNELS - 1)] {
yield_constr.constraint(filter * channel.used);
}
}
fn eval_ext_circuit_load<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
_nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let zeros = builder.zero_extension();
let ones = builder.one_extension();
let filter = builder.mul_extension(lv.op.m_op_load, lv.opcode_bits[5]);
let aux_filter = lv.memio.aux_filter;
let constr = builder.sub_extension(ones, aux_filter);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
// Check mem channel segment is register
let diff = builder.add_const_extension(
lv.mem_channels[0].addr_segment,
-F::from_canonical_u64(Segment::RegisterFile as u64),
);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
let diff = builder.add_const_extension(
lv.mem_channels[1].addr_segment,
-F::from_canonical_u64(Segment::RegisterFile as u64),
);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
let rs = lv.mem_channels[0].value;
let rt = lv.mem_channels[1].value;
let mem = lv.mem_channels[3].value;
let rs_limbs = lv.general.io().rs_le;
let rt_limbs = lv.general.io().rt_le;
let mem_limbs = lv.general.io().mem_le;
// Calculate rs:
// let virt_raw = (rs as u32).wrapping_add(sign_extend::<16>(offset));
let offset = load_offset_ext(builder, lv);
let virt_raw = builder.add_extension(rs, offset);
let power32 = F::from_canonical_u64(1u64 << 32);
//yield_constr.constraint(filter * (rs_from_bits - virt_raw) * (rs_from_bits + power32 - virt_raw));
let rs_from_bits = limb_from_bits_le_recursive(builder, rs_limbs);
let diff1 = builder.sub_extension(rs_from_bits, virt_raw);
let diff2 = builder.add_const_extension(rs_from_bits, power32);
let diff2 = builder.sub_extension(diff2, virt_raw);
let constr = builder.mul_many_extension([aux_filter, diff1, diff2]);
yield_constr.constraint(builder, constr);
let rt_from_bits = limb_from_bits_le_recursive(builder, rt_limbs);
let diff = builder.sub_extension(rt_from_bits, rt);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
// Constrain mem address
// let virt = virt_raw & 0xFFFF_FFFC;
let mut tmp = rs_limbs;
tmp[0] = zeros;
tmp[1] = zeros;
let virt = limb_from_bits_le_recursive(builder, tmp);
let mem_virt = lv.mem_channels[2].addr_virtual;
let diff = builder.sub_extension(virt, mem_virt);
let constr = builder.mul_extension(filter, diff);
yield_constr.constraint(builder, constr);
// Constrain mem value
// LH: micro_op[0] * sign_extend::<16>((mem >> (16 - (rs & 2) * 8)) & 0xffff)
{
// Range value(rs[1]): rs[1] == 1
let mut mem_val_1 = [zeros; 32];
mem_val_1[0..16].copy_from_slice(&mem_limbs[0..16]);
let mem_val_1 = sign_extend_ext::<_, D, 16>(builder, &mem_val_1);
// Range value(rs[1]): rs[1] == 0
let mut mem_val_0 = [zeros; 32];
mem_val_0[0..16].copy_from_slice(&mem_limbs[16..32]);
let mem_val_0 = sign_extend_ext::<_, D, 16>(builder, &mem_val_0);
let mem_val_1 = limb_from_bits_le_recursive(builder, mem_val_1);
let mem_val_0 = limb_from_bits_le_recursive(builder, mem_val_0);
// Range check
// let sum = rs_limbs[1] * (mem - mem_val_1) + (rs_limbs[1] - P::ONES) * (mem - mem_val_0);
// yield_constr.constraint(filter * lv.general.io().micro_op[0] * sum);
enforce_half_word_ext(
builder,
yield_constr,
lv.memio.is_lh,
&rs_limbs,
mem,
mem_val_1,
mem_val_0,
);
}
// LWL:
// let val = mem << ((rs & 3) * 8);
// let mask = 0xffFFffFFu32 << ((rs & 3) * 8);
// (rt & (!mask)) | val
// Use mem_val_{rs[0]}_{rs[1]} to indicate the mem value for different value on rs' first and
// second bit
{
let mut mem_val_0_0 = [zeros; 32];
let mut mem_val_0_1 = [zeros; 32];
let mut mem_val_1_0 = [zeros; 32];
let mut mem_val_1_1 = [zeros; 32];
mem_val_0_0[0..32].copy_from_slice(&mem_limbs[0..32]);
mem_val_1_0[0..8].copy_from_slice(&rt_limbs[0..8]);
mem_val_1_0[8..].copy_from_slice(&mem_limbs[0..24]);
mem_val_0_1[0..16].copy_from_slice(&rt_limbs[0..16]);
mem_val_0_1[16..].copy_from_slice(&mem_limbs[0..16]);
mem_val_1_1[0..24].copy_from_slice(&rt_limbs[0..24]);
mem_val_1_1[24..].copy_from_slice(&mem_limbs[0..8]);
let mem_val_0_0 = limb_from_bits_le_recursive(builder, mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le_recursive(builder, mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le_recursive(builder, mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le_recursive(builder, mem_val_1_1);
// let sum =
// (mem - mem_val_0_0) * (rs_limbs[1] - P::ONES) * (rs_limbs[0] - P::ONES) +
// (mem - mem_val_1_0) * (rs_limbs[1] - P::ONES) * rs_limbs[0] +
// (mem - mem_val_0_1) * rs_limbs[1] * (rs_limbs[0] - P::ONES) +
// (mem - mem_val_1_1) * rs_limbs[1] * rs_limbs[0];
// yield_constr.constraint(filter * lv.general.io().micro_op[1] * sum);
enforce_byte_ext(
builder,
yield_constr,
lv,
lv.memio.is_lwl,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// LW:
{
let mem_value = limb_from_bits_le_recursive(builder, mem_limbs);
// yield_constr.constraint(filter * lv.general.io().micro_op[2] * (mem - mem_value));
let fc = builder.sub_extension(mem, mem_value);
let fc = builder.mul_extension(lv.memio.is_lw, fc);
yield_constr.constraint(builder, fc);
}
// LBU: (mem >> (24 - (rs & 3) * 8)) & 0xff
{
let mut mem_val_0_0 = [zeros; 32];
let mut mem_val_0_1 = [zeros; 32];
let mut mem_val_1_0 = [zeros; 32];
let mut mem_val_1_1 = [zeros; 32];
mem_val_0_0[0..8].copy_from_slice(&mem_limbs[24..32]);
mem_val_1_0[0..8].copy_from_slice(&mem_limbs[16..24]);
mem_val_0_1[0..8].copy_from_slice(&mem_limbs[8..16]);
mem_val_1_1[0..8].copy_from_slice(&mem_limbs[0..8]);
let mem_val_0_0 = limb_from_bits_le_recursive(builder, mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le_recursive(builder, mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le_recursive(builder, mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le_recursive(builder, mem_val_1_1);
enforce_byte_ext(
builder,
yield_constr,
lv,
lv.memio.is_lbu,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// LHU: (mem >> (16 - (rs & 2) * 8)) & 0xffff
{
let mut mem_val_0 = [zeros; 32];
let mut mem_val_1 = [zeros; 32];
mem_val_0[0..16].copy_from_slice(&mem_limbs[16..32]);
mem_val_1[0..16].copy_from_slice(&mem_limbs[0..16]);
let mem_val_1 = limb_from_bits_le_recursive(builder, mem_val_1);
let mem_val_0 = limb_from_bits_le_recursive(builder, mem_val_0);
enforce_half_word_ext(
builder,
yield_constr,
lv.memio.is_lhu,
&rs_limbs,
mem,
mem_val_1,
mem_val_0,
);
}
// LWR:
// let val = mem >> (24 - (rs & 3) * 8);
// let mask = 0xffFFffFFu32 >> (24 - (rs & 3) * 8);
// (rt & (!mask)) | val
{
let mut mem_val_0_0 = [zeros; 32];
let mut mem_val_0_1 = [zeros; 32];
let mut mem_val_1_0 = [zeros; 32];
let mut mem_val_1_1 = [zeros; 32];
mem_val_0_0[8..].copy_from_slice(&rt_limbs[8..32]);
mem_val_0_0[0..8].copy_from_slice(&mem_limbs[24..32]);
mem_val_1_0[16..].copy_from_slice(&rt_limbs[16..32]);
mem_val_1_0[0..16].copy_from_slice(&mem_limbs[16..32]);
mem_val_0_1[24..].copy_from_slice(&rt_limbs[24..32]);
mem_val_0_1[0..24].copy_from_slice(&mem_limbs[8..32]);
mem_val_1_1[0..32].copy_from_slice(&mem_limbs[..]);
let mem_val_0_0 = limb_from_bits_le_recursive(builder, mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le_recursive(builder, mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le_recursive(builder, mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le_recursive(builder, mem_val_1_1);
enforce_byte_ext(
builder,
yield_constr,
lv,
lv.memio.is_lwr,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// LL:
{
let mem_value = limb_from_bits_le_recursive(builder, mem_limbs);
let fc = builder.sub_extension(mem, mem_value);
let fc = builder.mul_extension(lv.memio.is_ll, fc);
yield_constr.constraint(builder, fc);
}
// LB: sign_extend::<8>((mem >> (24 - (rs & 3) * 8)) & 0xff)
{
let mut mem_val_0_0 = [zeros; 32];
let mut mem_val_0_1 = [zeros; 32];
let mut mem_val_1_0 = [zeros; 32];
let mut mem_val_1_1 = [zeros; 32];
mem_val_0_0[0..8].copy_from_slice(&mem_limbs[24..]);
mem_val_1_0[0..8].copy_from_slice(&mem_limbs[16..24]);
mem_val_0_1[0..8].copy_from_slice(&mem_limbs[8..16]);
mem_val_1_1[0..8].copy_from_slice(&mem_limbs[0..8]);
let mem_val_0_0 = sign_extend_ext::<_, D, 8>(builder, &mem_val_0_0);
let mem_val_1_0 = sign_extend_ext::<_, D, 8>(builder, &mem_val_1_0);
let mem_val_0_1 = sign_extend_ext::<_, D, 8>(builder, &mem_val_0_1);
let mem_val_1_1 = sign_extend_ext::<_, D, 8>(builder, &mem_val_1_1);
let mem_val_0_0 = limb_from_bits_le_recursive(builder, mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le_recursive(builder, mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le_recursive(builder, mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le_recursive(builder, mem_val_1_1);
enforce_byte_ext(
builder,
yield_constr,
lv,
lv.memio.is_lb,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// Disable remaining memory channels, if any.
for &channel in &lv.mem_channels[6..(NUM_GP_CHANNELS - 1)] {
let constr = builder.mul_extension(filter, channel.used);
yield_constr.constraint(builder, constr);
}
}
fn eval_packed_store<P: PackedField>(
lv: &CpuColumnsView<P>,
_nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.m_op_store * lv.opcode_bits[5];
let aux_filter = lv.memio.aux_filter;
yield_constr.constraint(filter * (P::ONES - aux_filter));
// Check mem channel segment is register
let diff = lv.mem_channels[0].addr_segment
- P::Scalar::from_canonical_u64(Segment::RegisterFile as u64);
yield_constr.constraint(filter * diff);
let diff = lv.mem_channels[1].addr_segment
- P::Scalar::from_canonical_u64(Segment::RegisterFile as u64);
yield_constr.constraint(filter * diff);
// Check memory is used
// Check is_read is 0/1
let rs = lv.mem_channels[0].value;
let rt = lv.mem_channels[1].value;
let mem = lv.mem_channels[3].value;
let rs_limbs = lv.general.io().rs_le;
let rt_limbs = lv.general.io().rt_le;
let mem_limbs = lv.general.io().mem_le;
// Calculate rs:
// let virt_raw = (rs as u32).wrapping_add(sign_extend::<16>(offset));
let offset = load_offset(lv);
let virt_raw = rs + offset;
let rs_from_bits = limb_from_bits_le(rs_limbs);
let power32 = P::Scalar::from_canonical_u64(1u64 << 32);
yield_constr
.constraint(aux_filter * (rs_from_bits - virt_raw) * (rs_from_bits + power32 - virt_raw));
let rt_from_bits = limb_from_bits_le(rt_limbs);
yield_constr.constraint(filter * (rt_from_bits - rt));
// Constrain mem address
// let virt = virt_raw & 0xFFFF_FFFC;
let mut tmp = rs_limbs;
tmp[0] = P::ZEROS;
tmp[1] = P::ZEROS;
let virt = limb_from_bits_le(tmp);
let mem_virt = lv.mem_channels[2].addr_virtual;
yield_constr.constraint(filter * (virt - mem_virt));
// Constrain mem value
// SB:
// let val = (rt & 0xff) << (24 - (rs & 3) * 8);
// let mask = 0xffFFffFFu32 ^ (0xff << (24 - (rs & 3) * 8));
// (mem & mask) | val
{
let mut mem_val_0_0 = [P::ZEROS; 32];
let mut mem_val_1_0 = [P::ZEROS; 32];
let mut mem_val_0_1 = [P::ZEROS; 32];
let mut mem_val_1_1 = [P::ZEROS; 32];
// rs[0] = 0, rs[1] = 0
mem_val_0_0[24..].copy_from_slice(&rt_limbs[0..8]);
mem_val_0_0[0..24].copy_from_slice(&mem_limbs[0..24]);
// rs[0] = 1, rs[1] = 0
mem_val_1_0[24..].copy_from_slice(&mem_limbs[24..]);
mem_val_1_0[16..24].copy_from_slice(&rt_limbs[0..8]);
mem_val_1_0[0..16].copy_from_slice(&mem_limbs[0..16]);
// rs[0] = 0, rs[1] = 1
mem_val_0_1[16..].copy_from_slice(&mem_limbs[16..]);
mem_val_0_1[8..16].copy_from_slice(&rt_limbs[0..8]);
mem_val_0_1[0..8].copy_from_slice(&mem_limbs[0..8]);
// rs[0] = 1, rs[1] = 1
mem_val_1_1[0..8].copy_from_slice(&rt_limbs[0..8]);
mem_val_1_1[8..].copy_from_slice(&mem_limbs[8..]);
let mem_val_0_0 = limb_from_bits_le(mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le(mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le(mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le(mem_val_1_1);
enforce_byte(
yield_constr,
lv,
lv.memio.is_sb,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// SH
// let val = (rt & 0xffff) << (16 - (rs & 2) * 8);
// let mask = 0xffFFffFFu32 ^ (0xffff << (16 - (rs & 2) * 8));
// (mem & mask) | val
{
let mut mem_val_0 = [P::ZEROS; 32];
let mut mem_val_1 = [P::ZEROS; 32];
mem_val_0[16..].copy_from_slice(&rt_limbs[0..16]);
mem_val_0[0..16].copy_from_slice(&mem_limbs[0..16]);
mem_val_1[0..16].copy_from_slice(&rt_limbs[0..16]);
mem_val_1[16..].copy_from_slice(&mem_limbs[16..]);
let mem_val_1 = limb_from_bits_le(mem_val_1);
let mem_val_0 = limb_from_bits_le(mem_val_0);
enforce_half_word(
yield_constr,
lv.memio.is_sh,
&rs_limbs,
mem,
mem_val_1,
mem_val_0,
);
}
// SWL
// let val = rt >> ((rs & 3) * 8);
// let mask = 0xffFFffFFu32 >> ((rs & 3) * 8);
// (mem & (!mask)) | val
{
let mut mem_val_0_0 = [P::ZEROS; 32];
let mut mem_val_1_0 = [P::ZEROS; 32];
let mut mem_val_0_1 = [P::ZEROS; 32];
let mut mem_val_1_1 = [P::ZEROS; 32];
// rs[0] = 0, rs[1] = 0
mem_val_0_0[..].copy_from_slice(&rt_limbs[..]);
// rs[0] = 1, rs[1] = 0
mem_val_1_0[0..24].copy_from_slice(&rt_limbs[8..]);
mem_val_1_0[24..].copy_from_slice(&mem_limbs[24..]);
// rs[0] = 0, rs[1] = 1
mem_val_0_1[0..16].copy_from_slice(&rt_limbs[16..]);
mem_val_0_1[16..].copy_from_slice(&mem_limbs[16..]);
// rs[0] = 1, rs[1] = 1
mem_val_1_1[0..8].copy_from_slice(&rt_limbs[24..]);
mem_val_1_1[8..].copy_from_slice(&mem_limbs[8..]);
let mem_val_0_0 = limb_from_bits_le(mem_val_0_0);
let mem_val_1_0 = limb_from_bits_le(mem_val_1_0);
let mem_val_0_1 = limb_from_bits_le(mem_val_0_1);
let mem_val_1_1 = limb_from_bits_le(mem_val_1_1);
enforce_byte(
yield_constr,
lv,
lv.memio.is_swl,
&rs_limbs,
mem,
mem_val_0_0,
mem_val_1_0,
mem_val_0_1,
mem_val_1_1,
);
}
// SW
{
let rt_value = limb_from_bits_le(rt_limbs);
yield_constr.constraint(lv.memio.is_sw * (mem - rt_value));
}
// SWR
// let val = rt << (24 - (rs & 3) * 8);
// let mask = 0xffFFffFFu32 << (24 - (rs & 3) * 8);
// (mem & (!mask)) | val
{
let mut mem_val_0_0 = [P::ZEROS; 32];
let mut mem_val_1_0 = [P::ZEROS; 32];
let mut mem_val_0_1 = [P::ZEROS; 32];
let mut mem_val_1_1 = [P::ZEROS; 32];
// rs[0] = 0, rs[1] = 0
mem_val_0_0[24..].copy_from_slice(&rt_limbs[0..8]);
mem_val_0_0[0..24].copy_from_slice(&mem_limbs[0..24]);
// rs[0] = 1, rs[1] = 0
mem_val_1_0[16..].copy_from_slice(&rt_limbs[0..16]);
mem_val_1_0[0..16].copy_from_slice(&mem_limbs[0..16]);
// rs[0] = 0, rs[1] = 1
mem_val_0_1[8..].copy_from_slice(&rt_limbs[0..24]);
mem_val_0_1[0..8].copy_from_slice(&mem_limbs[0..8]);
// rs[0] = 1, rs[1] = 1
mem_val_1_1[..].copy_from_slice(&rt_limbs[..]);
let mem_val_0_0 = limb_from_bits_le(mem_val_0_0);
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | true |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/jumps.rs | prover/src/cpu/jumps.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
//use plonky2_evm::util::limb_from_bits_le;
use crate::util::{limb_from_bits_le, limb_from_bits_le_recursive};
/// 2^-32 mod (2^64 - 2^32 + 1)
const GOLDILOCKS_INVERSE_2EXP32: u64 = 18446744065119617026;
pub fn eval_packed_jump_jumpi<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_jump = lv.op.jumps;
let is_jumpi = lv.op.jumpi;
let is_jumpdirect = lv.op.jumpdirect;
let is_link = is_jump * lv.func_bits[0];
let is_linki = is_jumpi * lv.opcode_bits[0];
// Check `jump target value`:
// constraint: is_jump * (next_program_counter - reg[rs]) == 0
{
let reg_dst = lv.mem_channels[0].value;
yield_constr.constraint(is_jump * (nv.next_program_counter - reg_dst));
}
// Check `jump target register`:
// constraint: is_jump *(jump_reg - rs) == 0
{
let jump_reg = lv.mem_channels[0].addr_virtual;
let jump_dst = limb_from_bits_le(lv.rs_bits);
yield_constr.constraint(is_jump * (jump_dst - jump_reg));
}
// Check `jumpi target value`:
// constraint:
// * jump_dest = offset << 2 + pc_remain
// * is_jumpi * (next_program_coutner - jump_dest) == 0
// * where pc_remain is pc[28..32] << 28
{
let mut jump_imm = [P::ZEROS; 28];
jump_imm[2..8].copy_from_slice(&lv.func_bits);
jump_imm[8..13].copy_from_slice(&lv.shamt_bits);
jump_imm[13..18].copy_from_slice(&lv.rd_bits);
jump_imm[18..23].copy_from_slice(&lv.rt_bits);
jump_imm[23..28].copy_from_slice(&lv.rs_bits);
let imm_dst = limb_from_bits_le(jump_imm);
let pc_remain = lv.mem_channels[2].value;
let jump_dest = pc_remain + imm_dst;
yield_constr.constraint(is_jumpi * (nv.next_program_counter - jump_dest));
}
// Check `jumpdirect target value`:
// constraint:
// * jump_dest = offset << 2 + pc
// * is_jumpdirect * (next_program_coutner - jump_dest) == 0
{
let aux = lv.mem_channels[2].value;
let overflow = P::Scalar::from_canonical_u64(1 << 32);
let mut jump_offset = [P::ZEROS; 32];
jump_offset[2..8].copy_from_slice(&lv.func_bits); // 6 bits
jump_offset[8..13].copy_from_slice(&lv.shamt_bits); // 5 bits
jump_offset[13..18].copy_from_slice(&lv.rd_bits); // 5 bits
jump_offset[18..32].copy_from_slice(&[lv.rd_bits[4]; 14]); // lv.insn_bits[15]
let offset_dst = limb_from_bits_le(jump_offset);
yield_constr.constraint(is_jumpdirect * (aux - offset_dst));
let jump_dst = lv.program_counter + P::Scalar::from_canonical_u8(4) + aux;
yield_constr.constraint(
is_jumpdirect
* (nv.next_program_counter - jump_dst)
* (nv.next_program_counter + overflow - jump_dst),
);
}
// Check `link/linki target value`:
// constraint:
// * next_addr = program_counter + 8
// * link = is_link + is_linki + is_jumpdirect
// * link * (ret_addr - next_addr) == 0
{
let link_dest = lv.mem_channels[1].value;
yield_constr.constraint(
(is_link + is_linki + is_jumpdirect)
* (lv.program_counter + P::Scalar::from_canonical_u64(8) - link_dest),
);
}
// Check `link target regiseter`:
// constraint: is_link * (ret_reg - rd) == 0
let link_reg = lv.mem_channels[1].addr_virtual;
{
let link_dst = limb_from_bits_le(lv.rd_bits);
yield_constr.constraint(is_link * (link_reg - link_dst));
}
// Check `linki/jumpdirect target regiseter`:
// constraint: (is_linki + is_jumpdirect) * (ret_reg - 31) == 0
{
yield_constr.constraint(
(is_linki + is_jumpdirect) * (link_reg - P::Scalar::from_canonical_u64(31)),
);
}
}
pub fn eval_ext_circuit_jump_jumpi<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_jump = lv.op.jumps;
let is_jumpi = lv.op.jumpi;
let is_jumpdirect = lv.op.jumpdirect;
let zero_extension = builder.zero_extension();
let is_link = builder.mul_extension(is_jump, lv.func_bits[0]);
let is_linki = builder.mul_extension(is_jumpi, lv.opcode_bits[0]);
// Check `jump target value`:
// constraint: is_jump * (next_program_coutner - reg[rs]) == 0
{
let reg_dst = lv.mem_channels[0].value;
let constr = builder.sub_extension(nv.next_program_counter, reg_dst);
let constr = builder.mul_extension(is_jump, constr);
yield_constr.constraint(builder, constr);
}
// Check `jump target register`:
// constraint: is_jump *(jump_reg - rs) == 0
{
let jump_reg = lv.mem_channels[0].addr_virtual;
let jump_dst = limb_from_bits_le_recursive(builder, lv.rs_bits);
let constr = builder.sub_extension(jump_dst, jump_reg);
let constr = builder.mul_extension(constr, is_jump);
yield_constr.constraint(builder, constr);
}
// Check `jumpi target value`:
// constraint:
// * jump_dest = offset << 2 + pc_remain
// * is_jumpi * (next_program_coutner - jump_dest) == 0
// * where pc_remain is pc[28..32] << 28
{
let mut jump_imm = [zero_extension; 28];
jump_imm[2..8].copy_from_slice(&lv.func_bits);
jump_imm[8..13].copy_from_slice(&lv.shamt_bits);
jump_imm[13..18].copy_from_slice(&lv.rd_bits);
jump_imm[18..23].copy_from_slice(&lv.rt_bits);
jump_imm[23..28].copy_from_slice(&lv.rs_bits);
let jump_dest = limb_from_bits_le_recursive(builder, jump_imm);
let constr = builder.add_extension(lv.mem_channels[2].value, jump_dest);
let constr = builder.sub_extension(nv.next_program_counter, constr);
let constr = builder.mul_extension(is_jumpi, constr);
yield_constr.constraint(builder, constr);
}
// Check `jumpdirect target value`:
// constraints:
// * aux = sign_extended(offset << 2)
// * jump_dest = sign_extended(offset << 2) + pc + 4
// * is_jumpdirect *(next_program_coutner - jump_dest) * (next_program_coutner + 1 << 32 - jump_dest) == 0
{
let aux = lv.mem_channels[2].value;
let overflow = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32));
let mut jump_offset = [zero_extension; 32];
jump_offset[2..8].copy_from_slice(&lv.func_bits); // 6 bits
jump_offset[8..13].copy_from_slice(&lv.shamt_bits); // 5 bits
jump_offset[13..18].copy_from_slice(&lv.rd_bits); // 5 bits
jump_offset[18..32].copy_from_slice(&[lv.rd_bits[4]; 14]); // lv.insn_bits[15]
let offset_dst = limb_from_bits_le_recursive(builder, jump_offset);
let constr = builder.sub_extension(aux, offset_dst);
let constr = builder.mul_extension(is_jumpdirect, constr);
yield_constr.constraint(builder, constr);
let base_pc = builder.add_const_extension(lv.program_counter, F::from_canonical_u64(4));
let jump_dst = builder.add_extension(base_pc, aux);
let overflow_target = builder.add_extension(nv.next_program_counter, overflow);
let constr_a = builder.sub_extension(overflow_target, jump_dst);
let constr_b = builder.sub_extension(nv.next_program_counter, jump_dst);
let constr = builder.mul_extension(is_jumpdirect, constr_a);
let constr = builder.mul_extension(constr, constr_b);
yield_constr.constraint(builder, constr);
}
// Check `link/linki target value`:
// constraint:
// * next_addr = program_counter + 8
// * link = is_link + is_linki
// * link * (ret_addr - next_addr) == 0
{
let link_dst = lv.mem_channels[1].value;
let link_dest = builder.add_const_extension(lv.program_counter, F::from_canonical_u64(8));
let constr = builder.sub_extension(link_dest, link_dst);
let is_link = builder.add_extension(is_link, is_linki);
let is_link = builder.add_extension(is_link, is_jumpdirect);
let constr = builder.mul_extension(is_link, constr);
yield_constr.constraint(builder, constr);
}
// Check `link target register`:
// constraint: is_link * (ret_reg - rd) == 0
let link_reg = lv.mem_channels[1].addr_virtual;
{
let link_dst = limb_from_bits_le_recursive(builder, lv.rd_bits);
let constr = builder.sub_extension(link_reg, link_dst);
let constr = builder.mul_extension(constr, is_link);
yield_constr.constraint(builder, constr);
}
// Check `linki target register`
// constraint: (is_linki + is_jumpdirect) * (ret_reg - 31) == 0
{
let reg_31 = builder.constant_extension(F::Extension::from_canonical_u64(31));
let constr = builder.sub_extension(link_reg, reg_31);
let link31 = builder.add_extension(is_jumpdirect, is_linki);
let constr = builder.mul_extension(constr, link31);
yield_constr.constraint(builder, constr);
}
}
pub fn eval_packed_branch<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let branch_lv = lv.branch;
let filter = lv.op.branch; // `BRANCH`
let is_eq = branch_lv.is_eq;
let is_ne = branch_lv.is_ne;
let is_le = branch_lv.is_le;
let is_gt = branch_lv.is_gt;
let is_ge = branch_lv.is_ge;
let is_lt = branch_lv.is_lt;
let norm_filter = is_eq + is_ne + is_le + is_gt;
let special_filter = is_ge + is_lt;
let src1 = lv.mem_channels[0].value;
let src2 = lv.mem_channels[1].value;
let aux1 = lv.mem_channels[2].value; // src1 - src2
let aux2 = lv.mem_channels[3].value; // src2 - src1
let aux3 = lv.mem_channels[4].value; // (src1 ^ src2) & 0x80000000 > 0
let aux4 = lv.mem_channels[5].value; // branch offset
let overflow = P::Scalar::from_canonical_u64(1 << 32);
let overflow_inv = P::Scalar::from_canonical_u64(GOLDILOCKS_INVERSE_2EXP32);
// Check `should_jump`:
// constraint:
// (1 - should_jump) * should_jump == 0
// should_jump * (1 - filter) == 0
yield_constr.constraint(branch_lv.should_jump * (P::ONES - branch_lv.should_jump));
yield_constr.constraint(branch_lv.should_jump * (P::ONES - filter));
// Check branch flags:
// filter * (1 - (is_eq + is_ne + is_le + is_gt + is_ge + is_lt)) == 0
// filter * (1 - (lt + gt + eq)) == 0
yield_constr.constraint(filter * (P::ONES - (norm_filter + special_filter)));
yield_constr.constraint(filter * (P::ONES - (branch_lv.lt + branch_lv.gt + branch_lv.eq)));
// Check `branch target value`:
// constraints:
// * aux4 = sign_extended(offset << 2)
// * jump_dest = sign_extended(offset << 2) + pc + 4
// * should_jump *(next_program_coutner - jump_dest) * (next_program_coutner + 1 << 32 - jump_dest) == 0
// * next_addr = pc + 8
// * filter * (1 - should_jump) * (next_program_coutner - next_pc) == 0
{
let mut branch_offset = [P::ZEROS; 32];
branch_offset[2..8].copy_from_slice(&lv.func_bits); // 6 bits
branch_offset[8..13].copy_from_slice(&lv.shamt_bits); // 5 bits
branch_offset[13..18].copy_from_slice(&lv.rd_bits); // 5 bits
branch_offset[18..32].copy_from_slice(&[lv.rd_bits[4]; 14]); // lv.insn_bits[15]
let offset_dst = limb_from_bits_le(branch_offset);
yield_constr.constraint(filter * (aux4 - offset_dst));
let branch_dst = lv.program_counter + P::Scalar::from_canonical_u8(4) + aux4;
yield_constr.constraint(
branch_lv.should_jump
* (nv.next_program_counter - branch_dst)
* (nv.next_program_counter + overflow - branch_dst),
);
let next_inst = lv.program_counter + P::Scalar::from_canonical_u64(8);
yield_constr.constraint(
filter * (P::ONES - branch_lv.should_jump) * (nv.next_program_counter - next_inst),
);
}
// Check Aux Reg
// constraint:
// * sum = aux1 + aux2
// * filter * aux1 * (sum - overflow) == 0
// * filter * aux3 * (1 - aux3) == 0
{
yield_constr.constraint(filter * (aux1 + src2 - src1) * (aux1 + src2 - src1 - overflow));
yield_constr.constraint(filter * (aux2 + src1 - src2) * (aux2 + src1 - src2 - overflow));
yield_constr.constraint(filter * aux1 * ((aux1 + aux2) - overflow));
yield_constr.constraint(filter * aux3 * (P::ONES - aux3));
}
// Check rs Reg
// constraint: filter * (src1_reg - rs) == 0
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let rs_src = limb_from_bits_le(lv.rs_bits);
yield_constr.constraint(filter * (rs_reg - rs_src));
}
// Check rt Reg
// constraint: filter * (src2_reg - rt) == 0
{
let rt_reg = lv.mem_channels[1].addr_virtual;
let rt_src = limb_from_bits_le(lv.rt_bits);
yield_constr.constraint(norm_filter * (rt_reg - rt_src));
yield_constr.constraint(special_filter * rt_reg * (P::ONES - rt_reg));
}
// Check Condition
{
// constraints:
// * z = src2 + aux - src1
// * filter * z * (overflow - z) == 0
// * is_lt = z * overflow_inv
// * branch_lv.lt * (1 - is_lt) == 0
// where aux = src1 - src2 in u32, overflow - 2^32, overflow_inv = 2^-32
let constr_a = src2 + aux1 - src1;
yield_constr.constraint(filter * constr_a * (overflow - constr_a));
let lt = constr_a * overflow_inv;
yield_constr.constraint(branch_lv.lt * (P::ONES - lt));
// constraints:
// * z = src1 + aux - src2
// * filter * z * (overflow - z) == 0
// * is_gt = z * overflow_inv
// * branch_lv.gt * (1 - is_gt) == 0
// where aux = src2 - src1 in u32, overflow - 2^32, overflow_inv = 2^-32
let constr_b = src1 + aux2 - src2;
yield_constr.constraint(filter * constr_b * (overflow - constr_b));
let gt = constr_b * overflow_inv;
yield_constr.constraint(branch_lv.gt * (P::ONES - gt));
// constraints:
// * is_ne = is_lt + is_gt
// * branch_lv.eq * ne == 0
let ne = lt + gt;
yield_constr.constraint(branch_lv.eq * ne);
// invert lt/gt if aux3 = 1 (src1 and src2 have different sign bits)
let lt = branch_lv.lt * (P::ONES - aux3) + (P::ONES - branch_lv.lt) * aux3;
let gt = branch_lv.gt * (P::ONES - aux3) + (P::ONES - branch_lv.gt) * aux3;
// constraints:
// * is_eq * (1 - filter) = 0
// * is_eq * (should_jump - (1 - ne)) == 0
yield_constr.constraint(is_eq * (P::ONES - filter));
yield_constr.constraint(is_eq * (branch_lv.should_jump - (P::ONES - ne)));
yield_constr.constraint(is_ne * (P::ONES - filter));
yield_constr.constraint(is_ne * (branch_lv.should_jump - ne));
yield_constr.constraint(is_le * (P::ONES - filter));
yield_constr.constraint(is_le * (branch_lv.should_jump - (P::ONES - gt)));
yield_constr.constraint(is_ge * (P::ONES - filter));
yield_constr.constraint(is_ge * (branch_lv.should_jump - (P::ONES - lt)));
yield_constr.constraint(is_gt * (P::ONES - filter));
yield_constr.constraint(is_gt * (branch_lv.should_jump - gt));
yield_constr.constraint(is_lt * (P::ONES - filter));
yield_constr.constraint(is_lt * (branch_lv.should_jump - lt));
}
}
pub fn eval_ext_circuit_branch<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let branch_lv = lv.branch;
let filter = lv.op.branch; // `BRANCH`
let one_extension = builder.one_extension();
let zero_extension = builder.zero_extension();
let is_eq = branch_lv.is_eq;
let is_ne = branch_lv.is_ne;
let is_le = branch_lv.is_le;
let is_gt = branch_lv.is_gt;
let is_ge = branch_lv.is_ge;
let is_lt = branch_lv.is_lt;
let src1 = lv.mem_channels[0].value;
let src2 = lv.mem_channels[1].value;
let aux1 = lv.mem_channels[2].value; // src1 - src2
let aux2 = lv.mem_channels[3].value; // src2 - src1
let aux3 = lv.mem_channels[4].value; // (src1 ^ src2) & 0x80000000 > 0
let aux4 = lv.mem_channels[5].value; // branch offset
let norm_filter = builder.add_extension(is_eq, is_ne);
let norm_filter = builder.add_extension(norm_filter, is_le);
let norm_filter = builder.add_extension(norm_filter, is_gt);
let special_filter = builder.add_extension(is_ge, is_lt);
let overflow = builder.constant_extension(F::Extension::from_canonical_u64(1 << 32));
let overflow_inv =
builder.constant_extension(F::Extension::from_canonical_u64(GOLDILOCKS_INVERSE_2EXP32));
// Check `should_jump`:
{
let constr = builder.sub_extension(one_extension, branch_lv.should_jump);
let constr = builder.mul_extension(branch_lv.should_jump, constr);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(one_extension, filter);
let constr = builder.mul_extension(branch_lv.should_jump, constr);
yield_constr.constraint(builder, constr);
}
// Check branch flags:
{
let constr = builder.add_extension(norm_filter, special_filter);
let constr = builder.sub_extension(one_extension, constr);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr = builder.add_extension(branch_lv.lt, branch_lv.gt);
let constr = builder.add_extension(constr, branch_lv.eq);
let constr = builder.sub_extension(one_extension, constr);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
}
// Check `branch target value`:
// constraints:
// * aux4 = sign_extended(offset << 2)
// * jump_dest = sign_extended(offset << 2) + pc + 4
// * filter * should_jump *(next_program_coutner - jump_dest) * (next_program_coutner + 1 << 32 - jump_dest) == 0
// * next_addr = pc + 8
// * filter * (1 - should_jump) * (next_program_coutner - next_pc) == 0
{
let mut branch_offset = [zero_extension; 32];
branch_offset[2..8].copy_from_slice(&lv.func_bits); // 6 bits
branch_offset[8..13].copy_from_slice(&lv.shamt_bits); // 5 bits
branch_offset[13..18].copy_from_slice(&lv.rd_bits); // 5 bits
branch_offset[18..32].copy_from_slice(&[lv.rd_bits[4]; 14]); // lv.insn_bits[15]
let offset_dst = limb_from_bits_le_recursive(builder, branch_offset);
let constr = builder.sub_extension(aux4, offset_dst);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let base_pc = builder.add_const_extension(lv.program_counter, F::from_canonical_u64(4));
let branch_dst = builder.add_extension(base_pc, aux4);
let overflow_target = builder.add_extension(nv.next_program_counter, overflow);
let constr_a = builder.sub_extension(overflow_target, branch_dst);
let constr_b = builder.sub_extension(nv.next_program_counter, branch_dst);
let constr = builder.mul_extension(branch_lv.should_jump, constr_a);
let constr = builder.mul_extension(constr, constr_b);
yield_constr.constraint(builder, constr);
let next_insn = builder.add_const_extension(lv.program_counter, F::from_canonical_u64(8));
let constr_a = builder.sub_extension(one_extension, branch_lv.should_jump);
let constr_b = builder.sub_extension(nv.next_program_counter, next_insn);
let constr = builder.mul_extension(constr_a, constr_b);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check Aux Reg
// constraint:
// * sum = aux1 + aux2
// * filter * aux1 * (sum - overflow) == 0
// * filter * aux3 * (1 - aux3) == 0
{
let constr_a = builder.add_extension(aux1, src2);
let constr_a = builder.sub_extension(constr_a, src1);
let constr_b = builder.sub_extension(constr_a, overflow);
let constr = builder.mul_extension(constr_a, constr_b);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr_a = builder.add_extension(aux2, src1);
let constr_a = builder.sub_extension(constr_a, src2);
let constr_b = builder.sub_extension(constr_a, overflow);
let constr = builder.mul_extension(constr_a, constr_b);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.add_extension(aux1, aux2);
let constr = builder.sub_extension(constr, overflow);
let constr = builder.mul_extension(aux1, constr);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(one_extension, aux3);
let constr = builder.mul_extension(filter, constr);
let constr = builder.mul_extension(aux3, constr);
yield_constr.constraint(builder, constr);
}
// Check rs Reg
{
let rs_reg = lv.mem_channels[0].addr_virtual;
let mut rs_reg_index = [one_extension; 5];
rs_reg_index.copy_from_slice(&lv.rs_bits);
let rs_src = limb_from_bits_le_recursive(builder, rs_reg_index);
let constr = builder.sub_extension(rs_reg, rs_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rt Reg
{
let rt_reg = lv.mem_channels[1].addr_virtual;
let rt_src = limb_from_bits_le_recursive(builder, lv.rt_bits);
let constr = builder.sub_extension(rt_reg, rt_src);
let constr = builder.mul_extension(constr, norm_filter);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(one_extension, rt_reg);
let constr = builder.mul_extension(constr, rt_reg);
let constr = builder.mul_extension(constr, special_filter);
yield_constr.constraint(builder, constr);
}
// Check Condition
{
let src1 = lv.mem_channels[0].value;
let src2 = lv.mem_channels[1].value;
let aux1 = lv.mem_channels[2].value;
let aux2 = lv.mem_channels[3].value;
let aux3 = lv.mem_channels[4].value;
// constraints:
// * z = src2 + aux - src1
// * filter * z * (overflow - z) == 0
// * is_lt = z * overflow_inv
// * branch_lv.lt * (1 - is_lt) == 0
// where aux = src1 - src2 in u32, overflow - 2^32, overflow_inv = 2^-32
let diff_a = builder.add_extension(src2, aux1);
let diff_a = builder.sub_extension(diff_a, src1);
let constr_a = builder.sub_extension(overflow, diff_a);
let constr_a = builder.mul_extension(constr_a, diff_a);
let constr_a = builder.mul_extension(constr_a, filter);
yield_constr.constraint(builder, constr_a);
let lt = builder.mul_extension(diff_a, overflow_inv);
let constr = builder.sub_extension(one_extension, lt);
let constr = builder.mul_extension(constr, branch_lv.lt);
yield_constr.constraint(builder, constr);
// constraints:
// * z = src1 + aux - src2
// * filter * z * (overflow - z) == 0
// * is_gt = z * overflow_inv
// * branch_lv.gt * (1 - is_gt) == 0
// where aux = src2 - src1 in u32, overflow - 2^32, overflow_inv = 2^-32
let diff_b = builder.add_extension(src1, aux2);
let diff_b = builder.sub_extension(diff_b, src2);
let constr_b = builder.sub_extension(overflow, diff_b);
let constr_b = builder.mul_extension(constr_b, diff_b);
let constr_b = builder.mul_extension(constr_b, filter);
yield_constr.constraint(builder, constr_b);
let gt = builder.mul_extension(diff_b, overflow_inv);
let constr = builder.sub_extension(one_extension, gt);
let constr = builder.mul_extension(constr, branch_lv.gt);
yield_constr.constraint(builder, constr);
// constraints:
// * is_ne = is_lt + is_gt
// * branch_lv.eq * ne == 0
let ne = builder.add_extension(lt, gt);
let constr = builder.mul_extension(branch_lv.eq, ne);
yield_constr.constraint(builder, constr);
// invert lt/gt if aux3 = 1 (src1 and src2 have different sign bits)
let inv_aux3 = builder.sub_extension(one_extension, aux3);
let inv_lt = builder.sub_extension(one_extension, branch_lv.lt);
let inv_gt = builder.sub_extension(one_extension, branch_lv.gt);
let lt_norm = builder.mul_extension(branch_lv.lt, inv_aux3);
let lt_inv = builder.mul_extension(inv_lt, aux3);
let lt = builder.add_extension(lt_norm, lt_inv);
let gt_norm = builder.mul_extension(branch_lv.gt, inv_aux3);
let gt_inv = builder.mul_extension(inv_gt, aux3);
let gt = builder.add_extension(gt_norm, gt_inv);
// constraints:
// * is_eq * (1 - filter) = 0
// * is_eq * (should_jump - (1 - ne)) == 0
let constr_eq = builder.sub_extension(one_extension, filter);
let constr_eq = builder.mul_extension(constr_eq, is_eq);
yield_constr.constraint(builder, constr_eq);
let eq = builder.sub_extension(one_extension, ne);
let constr_eq = builder.sub_extension(branch_lv.should_jump, eq);
let constr_eq = builder.mul_extension(constr_eq, is_eq);
yield_constr.constraint(builder, constr_eq);
let constr_ne = builder.sub_extension(one_extension, filter);
let constr_ne = builder.mul_extension(constr_ne, is_ne);
yield_constr.constraint(builder, constr_ne);
let constr_ne = builder.sub_extension(branch_lv.should_jump, ne);
let constr_ne = builder.mul_extension(constr_ne, is_ne);
yield_constr.constraint(builder, constr_ne);
let constr_le = builder.sub_extension(one_extension, filter);
let constr_le = builder.mul_extension(constr_le, is_le);
yield_constr.constraint(builder, constr_le);
let le = builder.sub_extension(one_extension, gt);
let constr_le = builder.sub_extension(branch_lv.should_jump, le);
let constr_le = builder.mul_extension(constr_le, is_le);
yield_constr.constraint(builder, constr_le);
let constr_ge = builder.sub_extension(one_extension, filter);
let constr_ge = builder.mul_extension(constr_ge, is_ge);
yield_constr.constraint(builder, constr_ge);
let ge = builder.sub_extension(one_extension, lt);
let constr_ge = builder.sub_extension(branch_lv.should_jump, ge);
let constr_ge = builder.mul_extension(constr_ge, is_ge);
yield_constr.constraint(builder, constr_ge);
let constr_gt = builder.sub_extension(one_extension, filter);
let constr_gt = builder.mul_extension(constr_gt, is_gt);
yield_constr.constraint(builder, constr_gt);
let constr_gt = builder.sub_extension(branch_lv.should_jump, gt);
let constr_gt = builder.mul_extension(constr_gt, is_gt);
yield_constr.constraint(builder, constr_gt);
let constr_lt = builder.sub_extension(one_extension, filter);
let constr_lt = builder.mul_extension(constr_lt, is_lt);
yield_constr.constraint(builder, constr_lt);
let constr_lt = builder.sub_extension(branch_lv.should_jump, lt);
let constr_lt = builder.mul_extension(constr_lt, is_lt);
yield_constr.constraint(builder, constr_lt);
}
}
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
nv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
//eval_packed_exit_kernel(lv, nv, yield_constr);
eval_packed_jump_jumpi(lv, nv, yield_constr);
eval_packed_branch(lv, nv, yield_constr);
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
nv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
//eval_ext_circuit_exit_kernel(builder, lv, nv, yield_constr);
eval_ext_circuit_jump_jumpi(builder, lv, nv, yield_constr);
eval_ext_circuit_branch(builder, lv, nv, yield_constr);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/bootstrap_kernel.rs | prover/src/cpu/bootstrap_kernel.rs | use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use plonky2::plonk::config::GenericConfig;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::assembler::Kernel;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::poseidon::constants::SPONGE_RATE;
use crate::poseidon_sponge::columns::POSEIDON_RATE_BYTES;
use crate::poseidon_sponge::poseidon_sponge_stark::poseidon;
use crate::witness::memory::MemoryAddress;
use crate::witness::util::mem_write_gp_log_and_fill;
use crate::witness::util::poseidon_sponge_log;
use zkm_emulator::memory::{
END_PC_ADDRESS, HASH_ADDRESS_BASE, HASH_ADDRESS_END, ROOT_HASH_ADDRESS_BASE,
};
use zkm_emulator::page::{PAGE_ADDR_MASK, PAGE_SIZE};
pub(crate) fn generate_bootstrap_kernel<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
kernel: &Kernel,
) {
// Iterate through chunks of the code, such that we can write one chunk to memory per row.
let mut image_addr_value = vec![];
let mut image_addr = vec![];
let mut page_addr = vec![];
// handle 8 memory for each cpu instruction
for chunk in &kernel.program.image.iter().chunks(8) {
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
cpu_row.is_bootstrap_kernel = F::ONE;
// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
for (channel, (addr, val)) in chunk.enumerate() {
// Both instruction and memory data are located in code section for MIPS
let address = MemoryAddress::new(0, Segment::Code, *addr as usize);
image_addr.push(address);
image_addr_value.push(*val); // BE
if (addr & PAGE_ADDR_MASK as u32) == 0 {
page_addr.push(*addr);
}
let write =
mem_write_gp_log_and_fill(channel, address, state, &mut cpu_row, (*val).to_be());
state.traces.push_memory(write);
}
state.traces.push_cpu(cpu_row);
}
state.memory.apply_ops(&state.traces.memory_ops);
for addr in page_addr {
check_memory_page_hash(state, kernel, addr, false);
}
check_image_id(state, kernel, false);
log::info!("Bootstrapping took {} cycles", state.traces.clock());
}
pub(crate) fn check_image_id<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
kernel: &Kernel,
post: bool,
) {
// push mem root and pc
let mut root_u32s: [u32; 9] = [kernel.program.entry; 9];
if post {
root_u32s[8] = kernel.program.end_pc as u32;
}
for i in 0..8 {
let start = i * 4;
if post {
root_u32s[i] = u32::from_be_bytes(
kernel.program.page_hash_root[start..(start + 4)]
.try_into()
.unwrap(),
);
} else {
root_u32s[i] = u32::from_be_bytes(
kernel.program.pre_hash_root[start..(start + 4)]
.try_into()
.unwrap(),
);
}
}
let root_hash_addr_value: Vec<_> = (ROOT_HASH_ADDRESS_BASE..=END_PC_ADDRESS)
.step_by(4)
.collect::<Vec<u32>>();
let root_hash_addr_value: Vec<_> = root_hash_addr_value.iter().zip(root_u32s).collect();
let mut root_hash_addr = Vec::new();
for chunk in &root_hash_addr_value.iter().chunks(8) {
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
if post {
cpu_row.is_exit_kernel = F::ONE;
cpu_row.program_counter = F::from_canonical_usize(state.registers.program_counter);
} else {
cpu_row.is_bootstrap_kernel = F::ONE;
}
// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
for (channel, (addr, val)) in chunk.enumerate() {
// Both instruction and memory data are located in code section for MIPS
let address = MemoryAddress::new(0, Segment::Code, **addr as usize);
root_hash_addr.push(address);
let write =
mem_write_gp_log_and_fill(channel, address, state, &mut cpu_row, (*val).to_be());
state.traces.push_memory(write);
}
state.traces.push_cpu(cpu_row);
}
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
if post {
cpu_row.is_exit_kernel = F::ONE;
cpu_row.program_counter = F::from_canonical_usize(state.registers.program_counter);
} else {
cpu_row.is_bootstrap_kernel = F::ONE;
}
cpu_row.is_poseidon_sponge = F::ONE;
let mut image_addr_value_byte_be = vec![0u8; root_hash_addr_value.len() * 4];
for (i, (_, v)) in root_hash_addr_value.iter().enumerate() {
image_addr_value_byte_be[i * 4..(i * 4 + 4)].copy_from_slice(&v.to_le_bytes());
}
// The Poseidon sponge CTL uses memory value columns for its inputs and outputs.
let final_index = root_hash_addr.len() / SPONGE_RATE * SPONGE_RATE;
cpu_row.mem_channels[0].value = F::ZERO; // context
cpu_row.mem_channels[1].value = F::from_canonical_usize(Segment::Code as usize);
cpu_row.mem_channels[2].value = F::from_canonical_usize(root_hash_addr[final_index].virt);
cpu_row.mem_channels[3].value = F::from_canonical_usize(image_addr_value_byte_be.len()); // len
let code_hash_u64s = poseidon::<F>(&image_addr_value_byte_be);
let code_hash_bytes = code_hash_u64s
.iter()
.flat_map(|&num| num.to_le_bytes())
.collect_vec();
// let code_hash_be = core::array::from_fn(|i| {
// u32::from_le_bytes(core::array::from_fn(|j| code_hash_bytes[i * 4 + j]))
// });
// let code_hash = code_hash_be.map(u32::from_be);
if post {
log::trace!("actual post image id: {:?}", code_hash_bytes);
log::trace!("expected post image id: {:?}", kernel.program.image_id);
assert_eq!(code_hash_bytes, kernel.program.image_id);
} else {
log::trace!("actual pre image id: {:?}", code_hash_bytes);
log::trace!("expected pre image id: {:?}", kernel.program.pre_image_id);
assert_eq!(code_hash_bytes, kernel.program.pre_image_id);
}
cpu_row.general.hash_mut().value = code_hash_u64s.map(F::from_canonical_u64);
poseidon_sponge_log(state, root_hash_addr, image_addr_value_byte_be);
state.traces.push_cpu(cpu_row);
}
pub(crate) fn check_memory_page_hash<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
kernel: &Kernel,
addr: u32,
update: bool,
) {
log::trace!("check page hash, addr: {:X}", addr);
assert_eq!(addr & PAGE_ADDR_MASK as u32, 0u32);
let page_data_addr_value: Vec<_> = (addr..addr + PAGE_SIZE as u32)
.step_by(4)
.collect::<Vec<u32>>();
let mut page_data_addr = Vec::new();
let mut page_addr_value_byte_be = vec![0u8; PAGE_SIZE];
for (i, addr) in page_data_addr_value.iter().enumerate() {
let address = MemoryAddress::new(0, Segment::Code, *addr as usize);
page_data_addr.push(address);
let v = state.memory.get(address);
page_addr_value_byte_be[i * 4..(i * 4 + 4)].copy_from_slice(&v.to_le_bytes());
}
let code_hash_u64s = poseidon::<F>(&page_addr_value_byte_be);
let code_hash_bytes = code_hash_u64s
.iter()
.flat_map(|&num| num.to_le_bytes())
.collect_vec();
let code_hash_be: [u32; 8] = core::array::from_fn(|i| {
u32::from_le_bytes(core::array::from_fn(|j| code_hash_bytes[i * 4 + j]))
});
// let code_hash = code_hash_be.map(u32::from_be);
if addr == HASH_ADDRESS_END {
log::debug!("actual root page hash: {:?}", code_hash_bytes);
if update {
log::trace!(
"expected post root page hash: {:?}",
kernel.program.page_hash_root
);
assert_eq!(code_hash_bytes, kernel.program.page_hash_root);
} else {
log::trace!(
"expected pre root page hash: {:?}",
kernel.program.pre_hash_root
);
assert_eq!(code_hash_bytes, kernel.program.pre_hash_root);
}
} else if update {
let start_hash_addr = HASH_ADDRESS_BASE + ((addr >> 12) << 5);
let hash_addr_value: Vec<_> = (start_hash_addr..=start_hash_addr + 31)
.step_by(4)
.collect::<Vec<u32>>();
let hash_addr_value: Vec<_> = hash_addr_value.iter().zip(code_hash_be).collect();
for chunk in &hash_addr_value.iter().chunks(8) {
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
cpu_row.is_exit_kernel = F::ONE;
cpu_row.program_counter = F::from_canonical_usize(state.registers.program_counter);
// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
for (channel, (addr, val)) in chunk.enumerate() {
// Both instruction and memory data are located in code section for MIPS
let address = MemoryAddress::new(0, Segment::Code, **addr as usize);
let write = mem_write_gp_log_and_fill(
channel,
address,
state,
&mut cpu_row,
(*val).to_be(),
);
state.traces.push_memory(write);
}
state.traces.push_cpu(cpu_row);
}
log::trace!("update page hash: {:?}", code_hash_bytes);
} else {
let mut expected_hash_byte = [0u8; 32];
let hash_addr = HASH_ADDRESS_BASE + ((addr >> 12) << 5);
for i in 0..8 {
let addr = hash_addr + (i << 2) as u32;
let v = kernel.program.image.get(&addr).unwrap();
expected_hash_byte[i * 4..(i * 4 + 4)].copy_from_slice(&v.to_le_bytes());
}
log::trace!("actual page hash: {:?}", code_hash_bytes);
log::trace!("expected page hash: {:?}", expected_hash_byte);
assert_eq!(code_hash_bytes, expected_hash_byte);
}
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
if update {
cpu_row.is_exit_kernel = F::ONE;
cpu_row.program_counter = F::from_canonical_usize(state.registers.program_counter);
} else {
cpu_row.is_bootstrap_kernel = F::ONE;
}
cpu_row.is_poseidon_sponge = F::ONE;
// The Poseidon sponge CTL uses memory value columns for its inputs and outputs.
cpu_row.mem_channels[0].value = F::ZERO; // context
cpu_row.mem_channels[1].value = F::from_canonical_usize(Segment::Code as usize);
let final_idx = page_addr_value_byte_be.len() / POSEIDON_RATE_BYTES * SPONGE_RATE;
let virt = if final_idx >= page_data_addr.len() {
0
} else {
page_data_addr[final_idx].virt
};
cpu_row.mem_channels[2].value = F::from_canonical_usize(virt);
cpu_row.mem_channels[3].value = F::from_canonical_usize(page_addr_value_byte_be.len()); // len
cpu_row.general.hash_mut().value = code_hash_u64s.map(F::from_canonical_u64);
poseidon_sponge_log(state, page_data_addr, page_addr_value_byte_be);
state.traces.push_cpu(cpu_row);
if update {
state.memory.apply_ops(&state.traces.memory_ops);
}
}
pub(crate) fn eval_bootstrap_kernel_packed<F: Field, P: PackedField<Scalar = F>>(
local_values: &CpuColumnsView<P>,
next_values: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
let local_is_bootstrap = local_values.is_bootstrap_kernel;
let next_is_bootstrap = next_values.is_bootstrap_kernel;
yield_constr.constraint_first_row(local_is_bootstrap - P::ONES);
yield_constr.constraint_last_row(local_is_bootstrap);
let delta_is_bootstrap = next_is_bootstrap - local_is_bootstrap;
yield_constr.constraint_transition(delta_is_bootstrap * (delta_is_bootstrap + P::ONES));
// If this is a bootloading row and the i'th memory channel is used, it must have the right
// address, name context = 0, segment = Code, virt + 4 = next_virt
let code_segment = F::from_canonical_usize(Segment::Code as usize);
for channel in local_values.mem_channels.iter() {
let filter = local_is_bootstrap * channel.used;
yield_constr.constraint(filter * channel.addr_context);
yield_constr.constraint(filter * (channel.addr_segment - code_segment));
/* FIXME
let delta_virt = channel.addr_virtual + P::from(F::from_canonical_u32(32)) - next_values.mem_channels[i].addr_virtual;
log::trace!("virt {:?} {:?} {:?} {:?} {}", channel.addr_virtual, delta_virt, local_values.clock, NUM_GP_CHANNELS, i);
yield_constr.constraint_transition(filter * delta_virt);
*/
}
// If this is the final bootstrap row (i.e. delta_is_bootstrap = 1), check that
// - all memory channels are disabled
// - the current kernel hash matches a precomputed one
for channel in local_values.mem_channels.iter() {
yield_constr.constraint_transition(delta_is_bootstrap * channel.used);
}
/*
for (&expected, actual) in KERNEL
.code_hash
.iter()
.rev()
.zip(local_values.mem_channels.last().unwrap().value)
{
let expected = P::from(F::from_canonical_u32(expected));
let diff = expected - actual;
yield_constr.constraint_transition(delta_is_bootstrap * diff);
}
*/
}
pub(crate) fn eval_bootstrap_kernel_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
local_values: &CpuColumnsView<ExtensionTarget<D>>,
next_values: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let one = builder.one_extension();
// IS_BOOTSTRAP_KERNEL must have an init value of 1, a final value of 0, and a delta in {0, -1}.
let local_is_bootstrap = local_values.is_bootstrap_kernel;
let next_is_bootstrap = next_values.is_bootstrap_kernel;
let constraint = builder.sub_extension(local_is_bootstrap, one);
yield_constr.constraint_first_row(builder, constraint);
yield_constr.constraint_last_row(builder, local_is_bootstrap);
let delta_is_bootstrap = builder.sub_extension(next_is_bootstrap, local_is_bootstrap);
let constraint =
builder.mul_add_extension(delta_is_bootstrap, delta_is_bootstrap, delta_is_bootstrap);
yield_constr.constraint_transition(builder, constraint);
// If this is a bootloading row and the i'th memory channel is used, it must have the right
// address, name context = 0, segment = Code, virt + 4 = next_virt
let code_segment =
builder.constant_extension(F::Extension::from_canonical_usize(Segment::Code as usize));
for channel in local_values.mem_channels {
let filter = builder.mul_extension(local_is_bootstrap, channel.used);
let constraint = builder.mul_extension(filter, channel.addr_context);
yield_constr.constraint(builder, constraint);
let segment_diff = builder.sub_extension(channel.addr_segment, code_segment);
let constraint = builder.mul_extension(filter, segment_diff);
yield_constr.constraint(builder, constraint);
/*
let i_ext = builder.constant_extension(F::Extension::from_canonical_u32(32));
let prev_virt = builder.add_extension(channel.addr_virtual, i_ext);
let virt_diff = builder.sub_extension(prev_virt, next_values.mem_channels[i].addr_virtual);
let constraint = builder.mul_extension(filter, virt_diff);
yield_constr.constraint_transition(builder, constraint);
*/
}
// If this is the final bootstrap row (i.e. delta_is_bootstrap = 1), check that
// - all memory channels are disabled
// - the current kernel hash matches a precomputed one
for channel in local_values.mem_channels.iter() {
let constraint = builder.mul_extension(delta_is_bootstrap, channel.used);
yield_constr.constraint_transition(builder, constraint);
}
/*
for (&expected, actual) in KERNEL
.code_hash
.iter()
.rev()
.zip(local_values.mem_channels.last().unwrap().value)
{
let expected = builder.constant_extension(F::Extension::from_canonical_u32(expected));
let diff = builder.sub_extension(expected, actual);
let constraint = builder.mul_extension(delta_is_bootstrap, diff);
yield_constr.constraint_transition(builder, constraint);
}
*/
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/decode.rs | prover/src/cpu/decode.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::{CpuColumnsView, COL_MAP};
/// List of opcode blocks
/// Each block corresponds to exactly one flag, and each flag corresponds to exactly one block.
/// Each block of opcodes:
/// - is contiguous,
/// - has a length that is a power of 2, and
/// - its start index is a multiple of its length (it is aligned).
/// These properties permit us to check if an opcode belongs to a block of length 2^n by checking
/// its top 8-n bits.
/// Additionally, each block can be made available only to the user, only to the kernel, or to
/// both. This is mainly useful for making some instructions kernel-only, while still decoding to
/// invalid for the user. We do this by making one kernel-only block and another user-only block.
/// The exception is the PANIC instruction which is user-only without a corresponding kernel block.
/// This makes the proof unverifiable when PANIC is executed in kernel mode, which is the intended
/// behavior.
/// Note: invalid opcodes are not represented here. _Any_ opcode is permitted to decode to
/// `is_invalid`. The kernel then verifies that the opcode was _actually_ invalid.
/// FIXME: stephen
const OPCODES: [(u32, usize, bool, usize); 8] = [
// (start index of block, number of top bits to check (log2), kernel-only, flag column)
// ADD, MUL, SUB, DIV, MOD, LT, GT and BYTE flags are handled partly manually here, and partly through the Arithmetic table CTL.
// ADDMOD, MULMOD and SUBMOD flags are handled partly manually here, and partly through the Arithmetic table CTL.
(0x7, 1, false, COL_MAP.op.eq_iszero),
// AND, OR and XOR flags are handled partly manually here, and partly through the Logic table CTL.
// SHL and SHR flags are handled partly manually here, and partly through the Logic table CTL.
(0x0B, 0, true, COL_MAP.op.keccak_general),
(0x0D, 1, false, COL_MAP.op.jumps), // 0x56-0x57
(0x0E, 0, false, COL_MAP.op.branch),
(0x0F, 0, false, COL_MAP.op.pc),
(0x12, 0, true, COL_MAP.op.get_context),
(0x13, 0, true, COL_MAP.op.set_context),
(0x16, 0, true, COL_MAP.op.exit_kernel),
// MLOAD_GENERAL and MSTORE_GENERAL flags are handled manually here.
];
/// List of combined opcodes requiring a special handling.
/// Each index in the list corresponds to an arbitrary combination
/// of opcodes defined in evm/src/cpu/columns/ops.rs.
const COMBINED_OPCODES: [usize; 7] = [
COL_MAP.op.logic_op,
COL_MAP.op.binary_op,
COL_MAP.op.binary_imm_op,
COL_MAP.op.shift,
COL_MAP.op.shift_imm,
COL_MAP.op.m_op_load,
COL_MAP.op.m_op_store,
];
/// Break up an opcode (which is 32 bits long) into its 32 bits.
fn bits_from_opcode(opcode: u32) -> [bool; 32] {
let mut insn = [false; 32];
for i in 0..32 {
insn[i] = opcode & (1 << i) != 0;
}
insn
}
pub fn eval_packed_generic<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// Ensure that the kernel flag is valid (either 0 or 1).
let kernel_mode = lv.is_kernel_mode;
yield_constr.constraint(kernel_mode * (kernel_mode - P::ONES));
// Ensure that the opcode bits are valid: each has to be either 0 or 1.
for bit in lv.opcode_bits {
yield_constr.constraint(bit * (bit - P::ONES));
}
// Check that the instruction flags are valid.
// First, check that they are all either 0 or 1.
for (_, _, _, flag_col) in OPCODES {
let flag = lv[flag_col];
yield_constr.constraint(flag * (flag - P::ONES));
}
// Also check that the combined instruction flags are valid.
for flag_idx in COMBINED_OPCODES {
yield_constr.constraint(lv[flag_idx] * (lv[flag_idx] - P::ONES));
}
// Now check that they sum to 0 or 1, including the combined flags.
let flag_sum: P = OPCODES
.into_iter()
.map(|(_, _, _, flag_col)| lv[flag_col])
.chain(COMBINED_OPCODES.map(|op| lv[op]))
.sum::<P>();
yield_constr.constraint(flag_sum * (flag_sum - P::ONES));
// Finally, classify all opcodes, together with the kernel flag, into blocks
// TODO
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
// Note: The constraints below do not need to be restricted to CPU cycles.
// Ensure that the kernel flag is valid (either 0 or 1).
let kernel_mode = lv.is_kernel_mode;
{
let constr = builder.mul_sub_extension(kernel_mode, kernel_mode, kernel_mode);
yield_constr.constraint(builder, constr);
}
// Ensure that the opcode bits are valid: each has to be either 0 or 1.
for bit in lv.opcode_bits {
let constr = builder.mul_sub_extension(bit, bit, bit);
yield_constr.constraint(builder, constr);
}
// Check that the instruction flags are valid.
// First, check that they are all either 0 or 1.
for (_, _, _, flag_col) in OPCODES {
let flag = lv[flag_col];
let constr = builder.mul_sub_extension(flag, flag, flag);
yield_constr.constraint(builder, constr);
}
// Also check that the combined instruction flags are valid.
for flag_idx in COMBINED_OPCODES {
let constr = builder.mul_sub_extension(lv[flag_idx], lv[flag_idx], lv[flag_idx]);
yield_constr.constraint(builder, constr);
}
// Now check that they sum to 0 or 1, including the combined flags.
{
let mut flag_sum =
builder.add_many_extension(COMBINED_OPCODES.into_iter().map(|idx| lv[idx]));
for (_, _, _, flag_col) in OPCODES {
let flag = lv[flag_col];
flag_sum = builder.add_extension(flag_sum, flag);
}
let constr = builder.mul_sub_extension(flag_sum, flag_sum, flag_sum);
yield_constr.constraint(builder, constr);
}
// Finally, classify all opcodes, together with the kernel flag, into blocks
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/bits.rs | prover/src/cpu/bits.rs | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::util::{limb_from_bits_le, limb_from_bits_le_recursive};
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter_seh = lv.op.signext16;
let filter_seb = lv.op.signext8;
let filter_wsbh = lv.op.swaphalf;
let filter = filter_seh + filter_seb + filter_wsbh;
// Check rt Reg
{
let rt_reg = lv.mem_channels[0].addr_virtual;
let rt_src = limb_from_bits_le(lv.rt_bits);
yield_constr.constraint(filter * (rt_reg - rt_src));
}
// Check rd Reg
{
let rd_reg = lv.mem_channels[1].addr_virtual;
let rd_dst = limb_from_bits_le(lv.rd_bits);
yield_constr.constraint(filter * (rd_reg - rd_dst));
}
let rt = lv.mem_channels[0].value;
let bits_le = lv.general.io().rt_le;
for bit in bits_le {
yield_constr.constraint(filter * bit * (P::ONES - bit));
}
let sum = limb_from_bits_le(bits_le);
yield_constr.constraint(filter * (rt - sum));
// check seb result
let rd = lv.mem_channels[1].value;
let mut seb_result = [bits_le[7]; 32];
seb_result[..7].copy_from_slice(&bits_le[..7]);
let sum = limb_from_bits_le(seb_result);
yield_constr.constraint(filter_seb * (rd - sum));
// check seh result
let mut seh_result = [bits_le[15]; 32];
seh_result[..15].copy_from_slice(&bits_le[..15]);
let sum = limb_from_bits_le(seh_result);
yield_constr.constraint(filter_seh * (rd - sum));
// check wsbh result
let mut wsbh_result = [bits_le[0]; 32];
wsbh_result[..8].copy_from_slice(&bits_le[8..16]);
wsbh_result[8..16].copy_from_slice(&bits_le[..8]);
wsbh_result[16..24].copy_from_slice(&bits_le[24..32]);
wsbh_result[24..32].copy_from_slice(&bits_le[16..24]);
let sum = limb_from_bits_le(wsbh_result);
yield_constr.constraint(filter_wsbh * (rd - sum));
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter_seh = lv.op.signext16;
let filter_seb = lv.op.signext8;
let filter_wsbh = lv.op.swaphalf;
let filter = builder.add_extension(filter_seh, filter_seb);
let filter = builder.add_extension(filter_wsbh, filter);
// Check rt Reg
{
let rt_reg = lv.mem_channels[0].addr_virtual;
let rt_src = limb_from_bits_le_recursive(builder, lv.rt_bits);
let constr = builder.sub_extension(rt_reg, rt_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
// Check rd Reg
{
let rd_reg = lv.mem_channels[1].addr_virtual;
let rd_src = limb_from_bits_le_recursive(builder, lv.rd_bits);
let constr = builder.sub_extension(rd_reg, rd_src);
let constr = builder.mul_extension(constr, filter);
yield_constr.constraint(builder, constr);
}
let one = builder.one_extension();
let rt = lv.mem_channels[0].value;
let bits_le = lv.general.io().rt_le;
for bit in bits_le {
let bit_neg = builder.sub_extension(one, bit);
let t = builder.mul_many_extension([filter, bit, bit_neg]);
yield_constr.constraint(builder, t);
}
let sum = limb_from_bits_le_recursive(builder, bits_le);
let t1 = builder.sub_extension(rt, sum);
let t = builder.mul_extension(filter, t1);
yield_constr.constraint(builder, t);
// check seb result
let rd = lv.mem_channels[1].value;
let mut seb_result = [bits_le[7]; 32];
seb_result[..7].copy_from_slice(&bits_le[..7]);
let sum = limb_from_bits_le_recursive(builder, seb_result);
let t1 = builder.sub_extension(rd, sum);
let t = builder.mul_extension(filter_seb, t1);
yield_constr.constraint(builder, t);
// check seh result
let mut seh_result = [bits_le[15]; 32];
seh_result[..15].copy_from_slice(&bits_le[..15]);
let sum = limb_from_bits_le_recursive(builder, seh_result);
let t1 = builder.sub_extension(rd, sum);
let t = builder.mul_extension(filter_seh, t1);
yield_constr.constraint(builder, t);
// check wsbh result
let mut wsbh_result = [bits_le[0]; 32];
wsbh_result[..8].copy_from_slice(&bits_le[8..16]);
wsbh_result[8..16].copy_from_slice(&bits_le[..8]);
wsbh_result[16..24].copy_from_slice(&bits_le[24..32]);
wsbh_result[24..32].copy_from_slice(&bits_le[16..24]);
let sum = limb_from_bits_le_recursive(builder, wsbh_result);
let t1 = builder.sub_extension(rd, sum);
let t = builder.mul_extension(filter_wsbh, t1);
yield_constr.constraint(builder, t);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/shift.rs | prover/src/cpu/shift.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::memory::segments::Segment;
pub(crate) fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
eval_packed_variable(lv, yield_constr);
eval_packed_immediate(lv, yield_constr);
}
pub(crate) fn eval_packed_variable<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_shift = lv.op.shift;
let displacement = lv.mem_channels[0]; // holds the shift displacement d
let two_exp = lv.mem_channels[3]; // holds 2^d
// Not needed here; val is the input and we're verifying that output is
// val * 2^d (mod 2^256)
// let val = lv.mem_channels[0];
// let output = lv.mem_channels[NUM_GP_CHANNELS - 1];
let shift_table_segment = P::Scalar::from_canonical_u64(Segment::ShiftTable as u64);
// Only lookup the shifting factor when displacement is < 2^32.
// two_exp.used is true (1) if the high limbs of the displacement are
// zero and false (0) otherwise.
let high_limbs_are_zero = two_exp.used;
yield_constr.constraint(is_shift * high_limbs_are_zero * (two_exp.is_read - P::ONES));
/*
let high_limbs_sum: P = displacement.value[1..].iter().copied().sum();
let high_limbs_sum_inv = lv.general.shift().high_limb_sum_inv;
// Verify that high_limbs_are_zero = 0 implies high_limbs_sum != 0 and
// high_limbs_are_zero = 1 implies high_limbs_sum = 0.
let t = high_limbs_sum * high_limbs_sum_inv - (P::ONES - high_limbs_are_zero);
yield_constr.constraint(is_shift * t);
yield_constr.constraint(is_shift * high_limbs_sum * high_limbs_are_zero);
*/
// When the shift displacement is < 2^32, constrain the two_exp
// mem_channel to be the entry corresponding to `displacement` in
// the shift table lookup (will be zero if displacement >= 256).
yield_constr.constraint(is_shift * two_exp.addr_context); // read from kernel memory
yield_constr.constraint(is_shift * (two_exp.addr_segment - shift_table_segment));
yield_constr.constraint(is_shift * (two_exp.addr_virtual - displacement.value));
// // Other channels must be unused
// for chan in &lv.mem_channels[4..NUM_GP_CHANNELS - 1] {
// yield_constr.constraint(is_shift * chan.used); // channel is not used
// }
// Cross-table lookup must connect the memory channels here to MUL
// (in the case of left shift) or DIV (in the case of right shift)
// in the arithmetic table. Specifically, the mapping is
//
// 1 -> 0 (value to be shifted is the same)
// 2 -> 1 (two_exp becomes the multiplicand (resp. divisor))
// last -> last (output is the same)
}
pub(crate) fn eval_packed_immediate<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_shift = lv.op.shift_imm;
let displacement = lv
.shamt_bits
.iter()
.rev()
.fold(P::ZEROS, |acc, bit| acc.doubles() + *bit); // holds the shift displacement d
let two_exp = lv.mem_channels[3]; // holds 2^d
// Not needed here; val is the input and we're verifying that output is
// val * 2^d (mod 2^256)
// let val = lv.mem_channels[0];
// let output = lv.mem_channels[NUM_GP_CHANNELS - 1];
let shift_table_segment = P::Scalar::from_canonical_u64(Segment::ShiftTable as u64);
// Only lookup the shifting factor when displacement is < 2^32.
// two_exp.used is true (1) if the high limbs of the displacement are
// zero and false (0) otherwise.
let high_limbs_are_zero = two_exp.used;
yield_constr.constraint(is_shift * high_limbs_are_zero * (two_exp.is_read - P::ONES));
/*
let high_limbs_sum: P = displacement.value[1..].iter().copied().sum();
let high_limbs_sum_inv = lv.general.shift().high_limb_sum_inv;
// Verify that high_limbs_are_zero = 0 implies high_limbs_sum != 0 and
// high_limbs_are_zero = 1 implies high_limbs_sum = 0.
let t = high_limbs_sum * high_limbs_sum_inv - (P::ONES - high_limbs_are_zero);
yield_constr.constraint(is_shift * t);
yield_constr.constraint(is_shift * high_limbs_sum * high_limbs_are_zero);
*/
// When the shift displacement is < 2^32, constrain the two_exp
// mem_channel to be the entry corresponding to `displacement` in
// the shift table lookup (will be zero if displacement >= 256).
yield_constr.constraint(is_shift * two_exp.addr_context); // read from kernel memory
yield_constr.constraint(is_shift * (two_exp.addr_segment - shift_table_segment));
yield_constr.constraint(is_shift * (two_exp.addr_virtual - displacement));
// // Other channels must be unused
// for chan in &lv.mem_channels[4..NUM_GP_CHANNELS - 1] {
// yield_constr.constraint(is_shift * chan.used); // channel is not used
// }
// Cross-table lookup must connect the memory channels here to MUL
// (in the case of left shift) or DIV (in the case of right shift)
// in the arithmetic table. Specifically, the mapping is
//
// 1 -> 0 (value to be shifted is the same)
// 2 -> 1 (two_exp becomes the multiplicand (resp. divisor))
// last -> last (output is the same)
}
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
eval_variable_ext_circuit(builder, lv, yield_constr);
eval_immediate_ext_circuit(builder, lv, yield_constr);
}
pub(crate) fn eval_variable_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_shift = lv.op.shift;
let displacement = lv.mem_channels[0];
let two_exp = lv.mem_channels[3];
let shift_table_segment = F::from_canonical_u64(Segment::ShiftTable as u64);
// Only lookup the shifting factor when displacement is < 2^32.
// two_exp.used is true (1) if the high limbs of the displacement are
// zero and false (0) otherwise.
let high_limbs_are_zero = two_exp.used;
let one = builder.one_extension();
let t = builder.sub_extension(two_exp.is_read, one);
let t = builder.mul_extension(high_limbs_are_zero, t);
let t = builder.mul_extension(is_shift, t);
yield_constr.constraint(builder, t);
/*
let high_limbs_sum = builder.add_many_extension(&displacement.value[1..]);
let high_limbs_sum_inv = lv.general.shift().high_limb_sum_inv;
// Verify that high_limbs_are_zero = 0 implies high_limbs_sum != 0 and
// high_limbs_are_zero = 1 implies high_limbs_sum = 0.
let t = builder.one_extension();
let t = builder.sub_extension(t, high_limbs_are_zero);
let t = builder.mul_sub_extension(high_limbs_sum, high_limbs_sum_inv, t);
let t = builder.mul_extension(is_shift, t);
yield_constr.constraint(builder, t);
let t = builder.mul_many_extension([is_shift, high_limbs_sum, high_limbs_are_zero]);
yield_constr.constraint(builder, t);
*/
// When the shift displacement is < 2^32, constrain the two_exp
// mem_channel to be the entry corresponding to `displacement` in
// the shift table lookup (will be zero if displacement >= 256).
let t = builder.mul_extension(is_shift, two_exp.addr_context);
yield_constr.constraint(builder, t);
let t = builder.arithmetic_extension(
F::ONE,
-shift_table_segment,
is_shift,
two_exp.addr_segment,
is_shift,
);
yield_constr.constraint(builder, t);
let t = builder.sub_extension(two_exp.addr_virtual, displacement.value);
let t = builder.mul_extension(is_shift, t);
yield_constr.constraint(builder, t);
// // Other channels must be unused
// for chan in &lv.mem_channels[4..NUM_GP_CHANNELS - 1] {
// let t = builder.mul_extension(is_shift, chan.used);
// yield_constr.constraint(builder, t);
// }
}
pub(crate) fn eval_immediate_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_shift = lv.op.shift_imm;
let displacement = lv
.shamt_bits
.iter()
.rev()
.fold(builder.zero_extension(), |acc, bit| {
builder.mul_const_add_extension(F::from_canonical_u64(2), acc, *bit)
});
let two_exp = lv.mem_channels[3];
let shift_table_segment = F::from_canonical_u64(Segment::ShiftTable as u64);
// Only lookup the shifting factor when displacement is < 2^32.
// two_exp.used is true (1) if the high limbs of the displacement are
// zero and false (0) otherwise.
let high_limbs_are_zero = two_exp.used;
let one = builder.one_extension();
let t = builder.sub_extension(two_exp.is_read, one);
let t = builder.mul_extension(high_limbs_are_zero, t);
let t = builder.mul_extension(is_shift, t);
yield_constr.constraint(builder, t);
/*
let high_limbs_sum = builder.add_many_extension(&displacement.value[1..]);
let high_limbs_sum_inv = lv.general.shift().high_limb_sum_inv;
// Verify that high_limbs_are_zero = 0 implies high_limbs_sum != 0 and
// high_limbs_are_zero = 1 implies high_limbs_sum = 0.
let t = builder.one_extension();
let t = builder.sub_extension(t, high_limbs_are_zero);
let t = builder.mul_sub_extension(high_limbs_sum, high_limbs_sum_inv, t);
let t = builder.mul_extension(is_shift, t);
yield_constr.constraint(builder, t);
let t = builder.mul_many_extension([is_shift, high_limbs_sum, high_limbs_are_zero]);
yield_constr.constraint(builder, t);
*/
// When the shift displacement is < 2^32, constrain the two_exp
// mem_channel to be the entry corresponding to `displacement` in
// the shift table lookup (will be zero if displacement >= 256).
let t = builder.mul_extension(is_shift, two_exp.addr_context);
yield_constr.constraint(builder, t);
let t = builder.arithmetic_extension(
F::ONE,
-shift_table_segment,
is_shift,
two_exp.addr_segment,
is_shift,
);
yield_constr.constraint(builder, t);
let t = builder.sub_extension(two_exp.addr_virtual, displacement);
let t = builder.mul_extension(is_shift, t);
yield_constr.constraint(builder, t);
// // Other channels must be unused
// for chan in &lv.mem_channels[4..NUM_GP_CHANNELS - 1] {
// let t = builder.mul_extension(is_shift, chan.used);
// yield_constr.constraint(builder, t);
// }
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/mod.rs | prover/src/cpu/mod.rs | pub(crate) mod bits;
pub(crate) mod bootstrap_kernel;
pub mod columns;
pub(crate) mod count;
pub mod cpu_stark;
pub(crate) mod decode;
pub(crate) mod exit_kernel;
pub(crate) mod jumps;
pub mod kernel;
pub(crate) mod membus;
pub(crate) mod memio;
pub(crate) mod misc;
pub(crate) mod shift;
pub(crate) mod syscall;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/membus.rs | prover/src/cpu/membus.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
/// General-purpose memory channels; they can read and write to all contexts/segments/addresses.
pub const NUM_GP_CHANNELS: usize = 9;
pub mod channel_indices {
use std::ops::Range;
pub const CODE: usize = 0;
pub const GP: Range<usize> = CODE + 1..(CODE + 1) + super::NUM_GP_CHANNELS;
}
/// Total memory channels used by the CPU table. This includes all the `GP_MEM_CHANNELS` as well as
/// all special-purpose memory channels.
///
/// Currently, there is one special-purpose memory channel, which reads the opcode from memory. Its
/// limitations are:
/// - it is enabled by `is_cpu_cycle`,
/// - it always reads and cannot write,
/// - the context is derived from the current context and the `is_kernel_mode` flag,
/// - the segment is hard-wired to the code segment,
/// - the address is `program_counter`,
/// - the value must fit in one byte (in the least-significant position) and its eight bits are
/// found in `opcode_bits`.
/// These limitations save us numerous columns in the CPU table.
pub const NUM_CHANNELS: usize = channel_indices::GP.end;
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// Validate `lv.code_context`.
// It should be 0 if in kernel mode and `lv.context` if in user mode.
// Note: This doesn't need to be filtered to CPU cycles, as this should also be satisfied
// during Kernel bootstrapping.
yield_constr.constraint(lv.code_context - (P::ONES - lv.is_kernel_mode) * lv.context);
// Validate `channel.used`. It should be binary.
for channel in lv.mem_channels {
yield_constr.constraint(channel.used * (channel.used - P::ONES));
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
// Validate `lv.code_context`.
// It should be 0 if in kernel mode and `lv.context` if in user mode.
// Note: This doesn't need to be filtered to CPU cycles, as this should also be satisfied
// during Kernel bootstrapping.
let diff = builder.sub_extension(lv.context, lv.code_context);
let constr = builder.mul_sub_extension(lv.is_kernel_mode, lv.context, diff);
yield_constr.constraint(builder, constr);
// Validate `channel.used`. It should be binary.
for channel in lv.mem_channels {
let constr = builder.mul_sub_extension(channel.used, channel.used, channel.used);
yield_constr.constraint(builder, constr);
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/cpu_stark.rs | prover/src/cpu/cpu_stark.rs | use std::borrow::Borrow;
use std::iter::repeat_n;
use std::marker::PhantomData;
use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use super::columns::CpuColumnsView;
use crate::all_stark::Table;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::{COL_MAP, NUM_CPU_COLUMNS};
use crate::cpu::{
bits, bootstrap_kernel, count, decode, jumps, membus, memio, misc, shift, syscall,
};
use crate::cross_table_lookup::{Column, Filter, TableWithColumns};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::memory::segments::Segment;
use crate::memory::{NUM_CHANNELS, VALUE_LIMBS};
use crate::stark::Stark;
pub fn ctl_data_keccak_sponge<F: Field>() -> Vec<Column<F>> {
// When executing KECCAK_GENERAL, the GP memory channels are used as follows:
// GP channel 0: stack[-1] = context
// GP channel 1: stack[-2] = segment
// GP channel 2: stack[-3] = virt
// GP channel 3: stack[-4] = len
// GP channel 4: pushed = outputs
let context = Column::single(COL_MAP.mem_channels[0].value);
let segment = Column::single(COL_MAP.mem_channels[1].value);
let virt = Column::single(COL_MAP.mem_channels[2].value);
let len = Column::single(COL_MAP.mem_channels[3].value);
let num_channels = F::from_canonical_usize(NUM_CHANNELS);
let timestamp = Column::linear_combination([(COL_MAP.clock, num_channels)]);
let mut cols = vec![context, segment, virt, len, timestamp];
cols.extend(COL_MAP.general.khash().value.map(Column::single));
cols
}
pub fn ctl_data_sha_extend_sponge<F: Field>() -> Vec<Column<F>> {
// When executing SHA_EXTEND_GENERAL, the GP memory channels are used as follows:
// GP channel 0: stack[-1] = context
// GP channel 1: stack[-2] = segment
// GP channel 2: stack[-3] = virt
// GP channel 3: pushed = outputs
let context = Column::single(COL_MAP.mem_channels[0].value);
let segment = Column::single(COL_MAP.mem_channels[1].value);
let virt = Column::single(COL_MAP.mem_channels[2].value);
let num_channels = F::from_canonical_usize(NUM_CHANNELS);
let timestamp = Column::linear_combination([(COL_MAP.clock, num_channels)]);
let mut cols = vec![context, segment, virt, timestamp];
cols.push(Column::single(COL_MAP.general.element().value));
cols
}
pub fn ctl_data_sha_compress_sponge<F: Field>() -> Vec<Column<F>> {
// When executing SHA_COMPRESS_GENERAL, the GP memory channels are used as follows:
// GP channel 0: stack[-1] = context
// GP channel 1: stack[-2] = segment
// GP channel 2: stack[-3] = start virt
// GP channel 3: pushed = outputs
let context = Column::single(COL_MAP.mem_channels[0].value);
let segment = Column::single(COL_MAP.mem_channels[1].value);
let virt = Column::single(COL_MAP.mem_channels[2].value);
let num_channels = F::from_canonical_usize(NUM_CHANNELS);
let timestamp = Column::linear_combination([(COL_MAP.clock, num_channels)]);
let mut cols = vec![context, segment, virt, timestamp];
// let mut cols = vec![context, segment, virt];
cols.extend(COL_MAP.general.shash().value.map(Column::single));
cols
}
pub fn ctl_filter_keccak_sponge<F: Field>() -> Filter<F> {
Filter::new_simple(Column::single(COL_MAP.is_keccak_sponge))
}
pub fn ctl_filter_sha_extend_sponge<F: Field>() -> Filter<F> {
Filter::new_simple(Column::single(COL_MAP.is_sha_extend_sponge))
}
pub fn ctl_filter_sha_compress_sponge<F: Field>() -> Filter<F> {
Filter::new_simple(Column::single(COL_MAP.is_sha_compress_sponge))
}
pub fn ctl_data_poseidon_sponge<F: Field>() -> Vec<Column<F>> {
// When executing POSEIDON_GENERAL, the GP memory channels are used as follows:
// GP channel 0: stack[-1] = context
// GP channel 1: stack[-2] = segment
// GP channel 2: stack[-3] = virt
// GP channel 3: stack[-4] = len
// GP channel 4: pushed = outputs
let context = Column::single(COL_MAP.mem_channels[0].value);
let segment = Column::single(COL_MAP.mem_channels[1].value);
let virt = Column::single(COL_MAP.mem_channels[2].value);
let len = Column::single(COL_MAP.mem_channels[3].value);
let num_channels = F::from_canonical_usize(NUM_CHANNELS);
let timestamp = Column::linear_combination([(COL_MAP.clock, num_channels)]);
let mut cols = vec![context, segment, virt, len, timestamp];
cols.extend(COL_MAP.general.hash().value.map(Column::single));
cols
}
pub fn ctl_filter_poseidon_sponge<F: Field>() -> Filter<F> {
Filter::new_simple(Column::single(COL_MAP.is_poseidon_sponge))
}
/// Create the vector of Columns corresponding to the two inputs and
/// one output of a binary operation.
/// FIXME: the column is unchecked. The in0 should starts from column 4 in looked table, and in1
/// 36, out 68. But the looking table offers in0 77, in1 83, out 89.
fn ctl_data_binops<F: Field>() -> Vec<Column<F>> {
// FIXME: select all values
let mut res = Column::singles(vec![COL_MAP.mem_channels[0].value]).collect_vec();
res.extend(Column::singles(vec![COL_MAP.mem_channels[1].value]));
res.extend(Column::singles(vec![COL_MAP.mem_channels[2].value]));
res
}
pub fn ctl_data_logic<F: Field>() -> Vec<Column<F>> {
// Instead of taking single columns, we reconstruct the entire opcode value directly.
// opcode: 6, func: 6. The rt would be non-zero only when insn are BGEZ, BLTZ
let mut base = [0usize; COL_MAP.opcode_bits.len() + COL_MAP.func_bits.len()];
base[0..COL_MAP.opcode_bits.len()].copy_from_slice(&COL_MAP.opcode_bits[..]);
base[COL_MAP.opcode_bits.len()..].copy_from_slice(&COL_MAP.func_bits[..]);
let mut res = vec![Column::le_bits(base)];
res.extend(ctl_data_binops());
res
}
pub fn ctl_filter_logic<F: Field>() -> Filter<F> {
Filter::new_simple(Column::single(COL_MAP.op.logic_op))
}
// If an arithmetic operation is happening on the CPU side, the CTL
// will enforce that the reconstructed opcode value from the
// opcode bits matches.
pub fn ctl_arithmetic_base_rows<F: Field>() -> TableWithColumns<F> {
// Instead of taking single columns, we reconstruct the entire opcode value directly.
let mut base = [0usize; COL_MAP.opcode_bits.len() + COL_MAP.func_bits.len()];
base[0..COL_MAP.opcode_bits.len()].copy_from_slice(&COL_MAP.opcode_bits[..]);
base[COL_MAP.opcode_bits.len()..].copy_from_slice(&COL_MAP.func_bits[..]);
let mut columns = vec![Column::le_bits(base)];
columns.extend(ctl_data_binops());
// Create the CPU Table whose columns are those with the two
// inputs and one output of the ternary operations listed in `ops`
// (also `ops` is used as the operation filter).
TableWithColumns::new(
Table::Cpu,
columns,
Some(Filter::new_simple(Column::sum([
COL_MAP.op.binary_op,
COL_MAP.op.shift,
COL_MAP.op.shift_imm,
]))),
)
}
pub fn ctl_arithmetic_imm_base_rows<F: Field>() -> TableWithColumns<F> {
// Instead of taking single columns, we reconstruct the entire opcode value directly.
let mut columns = vec![Column::le_bits(COL_MAP.opcode_bits)];
columns.extend(ctl_data_binops());
// Create the CPU Table whose columns are those with the two
// inputs and one output of the ternary operations listed in `ops`
// (also `ops` is used as the operation filter).
TableWithColumns::new(
Table::Cpu,
columns,
Some(Filter::new_simple(Column::single(COL_MAP.op.binary_imm_op))),
)
}
pub fn ctl_data_byte_packing<F: Field>() -> Vec<Column<F>> {
ctl_data_poseidon_sponge()
}
pub const MEM_CODE_CHANNEL_IDX: usize = 0;
pub const MEM_GP_CHANNELS_IDX_START: usize = MEM_CODE_CHANNEL_IDX + 1;
/// Make the time/channel column for memory lookups.
fn mem_time_and_channel<F: Field>(channel: usize) -> Column<F> {
let scalar = F::from_canonical_usize(NUM_CHANNELS);
let addend = F::from_canonical_usize(channel);
Column::linear_combination_with_constant([(COL_MAP.clock, scalar)], addend)
}
pub fn ctl_data_code_memory<F: Field>() -> Vec<Column<F>> {
let mut cols = vec![
Column::constant(F::ONE), // is_read
Column::single(COL_MAP.code_context), // addr_context
Column::constant(F::from_canonical_u64(Segment::Code as u64)), // addr_segment
Column::single(COL_MAP.program_counter), // addr_virtual
];
// Low limb of the value matches the opcode bits
let mut base = [0usize; COL_MAP.opcode_bits.len() + COL_MAP.func_bits.len()];
base[0..COL_MAP.opcode_bits.len()].copy_from_slice(&COL_MAP.opcode_bits[..]);
base[COL_MAP.opcode_bits.len()..].copy_from_slice(&COL_MAP.func_bits[..]);
cols.push(Column::le_bits(base));
// High limbs of the value are all zero.
cols.extend(repeat_n(Column::constant(F::ZERO), VALUE_LIMBS - 1));
cols.push(mem_time_and_channel(MEM_CODE_CHANNEL_IDX));
cols
}
pub fn ctl_data_gp_memory<F: Field>(channel: usize) -> Vec<Column<F>> {
let channel_map = COL_MAP.mem_channels[channel];
let mut cols: Vec<_> = Column::singles([
channel_map.is_read,
channel_map.addr_context,
channel_map.addr_segment,
channel_map.addr_virtual,
channel_map.value,
])
.collect();
//cols.extend(Column::singles(channel_map.value));
// cols.push(mem_time_and_channel(MEM_GP_CHANNELS_IDX_START + channel));
cols.push(mem_time_and_channel(0));
cols
}
pub fn ctl_filter_code_memory<F: Field>() -> Column<F> {
Column::sum(COL_MAP.op.iter())
}
pub fn ctl_filter_gp_memory<F: Field>(channel: usize) -> Filter<F> {
Filter::new_simple(Column::single(COL_MAP.mem_channels[channel].used))
}
#[derive(Copy, Clone, Default)]
pub struct CpuStark<F, const D: usize> {
pub f: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for CpuStark<F, D> {
type EvaluationFrame<FE, P, const D2: usize>
= StarkFrame<P, NUM_CPU_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_CPU_COLUMNS>;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let local_values: &[P; NUM_CPU_COLUMNS] = vars.get_local_values().try_into().unwrap();
let local_values: &CpuColumnsView<P> = local_values.borrow();
let next_values: &[P; NUM_CPU_COLUMNS] = vars.get_next_values().try_into().unwrap();
let next_values: &CpuColumnsView<P> = next_values.borrow();
bootstrap_kernel::eval_bootstrap_kernel_packed(local_values, next_values, yield_constr);
//contextops::eval_packed(local_values, next_values, yield_constr);
decode::eval_packed_generic(local_values, yield_constr);
jumps::eval_packed(local_values, next_values, yield_constr);
membus::eval_packed(local_values, yield_constr);
memio::eval_packed(local_values, next_values, yield_constr);
shift::eval_packed(local_values, yield_constr);
count::eval_packed(local_values, yield_constr);
syscall::eval_packed(local_values, yield_constr);
bits::eval_packed(local_values, yield_constr);
misc::eval_packed(local_values, yield_constr);
//exit_kernel::eval_exit_kernel_packed(local_values, next_values, yield_constr);
}
fn eval_ext_circuit(
&self,
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let local_values: &[ExtensionTarget<D>; NUM_CPU_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &CpuColumnsView<ExtensionTarget<D>> = local_values.borrow();
let next_values: &[ExtensionTarget<D>; NUM_CPU_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &CpuColumnsView<ExtensionTarget<D>> = next_values.borrow();
bootstrap_kernel::eval_bootstrap_kernel_ext_circuit(
builder,
local_values,
next_values,
yield_constr,
);
//contextops::eval_ext_circuit(builder, local_values, next_values, yield_constr);
decode::eval_ext_circuit(builder, local_values, yield_constr);
jumps::eval_ext_circuit(builder, local_values, next_values, yield_constr);
membus::eval_ext_circuit(builder, local_values, yield_constr);
memio::eval_ext_circuit(builder, local_values, next_values, yield_constr);
shift::eval_ext_circuit(builder, local_values, yield_constr);
count::eval_ext_circuit(builder, local_values, yield_constr);
syscall::eval_ext_circuit(builder, local_values, yield_constr);
bits::eval_ext_circuit(builder, local_values, yield_constr);
misc::eval_ext_circuit(builder, local_values, yield_constr);
//exit_kernel::eval_exit_kernel_ext_circuit(builder, local_values, next_values, yield_constr);
}
fn constraint_degree(&self) -> usize {
3
}
}
#[cfg(test)]
mod tests {
use anyhow::Result;
use crate::cpu::cpu_stark::CpuStark;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
#[test]
fn test_stark_degree() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = CpuStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_low_degree(stark)
}
#[test]
fn test_stark_circuit() -> Result<()> {
env_logger::try_init().unwrap_or_default();
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = CpuStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
/*
#[test]
#[ignore]
fn test_stark_check_memio() {
env_logger::try_init().unwrap_or_default();
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = CpuStark<F, D>;
let stark = S {
f: Default::default(),
};
let mut state = GenerationState::<F>::new(40000000, &TEST_KERNEL).unwrap();
generate_bootstrap_kernel::<F>(&mut state, &TEST_KERNEL);
simulate_cpu::<F, D>(&mut state, &TEST_KERNEL).unwrap();
let vals: Vec<[F; NUM_CPU_COLUMNS]> = state
.clone()
.traces
.cpu
.into_iter()
.map(|x| x.into())
.collect::<Vec<_>>();
for i in 0..(vals.len() - 1) {
log::debug!(
"[{i}] vals: {:?},\ncpu column: {:?}",
vals[i],
state.traces.cpu[i]
);
assert!(
state.traces.cpu[i].op.binary_op == F::ZERO
|| state.traces.cpu[i].op.binary_op == F::ONE
);
assert!(
state.traces.cpu[i].mem_channels[0].is_read == F::ZERO
|| state.traces.cpu[i].mem_channels[0].is_read == F::ONE
);
test_stark_cpu_check_constraints::<F, C, S, D>(stark, &vals[i], &vals[i + 1]);
}
}
*/
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/syscall.rs | prover/src/cpu/syscall.rs | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::columns::CpuColumnsView;
use crate::witness::operation::*;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
pub fn eval_packed<P: PackedField>(
lv: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv.op.syscall; // syscall
// let _sys_num = lv.mem_channels[0].value;
let a0 = lv.mem_channels[1].value;
let a1 = lv.mem_channels[2].value;
let a2 = lv.mem_channels[3].value;
let v0 = P::ZEROS;
let v1 = P::ZEROS;
let syscall = lv.general.syscall();
let result_v0 = lv.mem_channels[4].value;
let result_v1 = lv.mem_channels[5].value;
//sysmap
// is_sysmap|is_sz_mid_not_zero|is_a0_zero is calculated outside and written in the mem_channels.
let is_sysmap = syscall.sysnum[1];
let is_sz_mid_not_zero = syscall.a1; //sz & 0xFFF != 0
let is_sz_mid_zero = syscall.sysnum[10]; //sz & 0xFFF == 0
let sz = a1;
let sz_in_sz_mid_not_zero = syscall.sysnum[9]; //the value of sz_mid
let is_a0_zero = syscall.a0[0];
let is_a0_not_zero = syscall.a0[2];
let heap_in_a0_zero = lv.mem_channels[6].value;
let result_heap = lv.mem_channels[7].value;
let is_sysmap_a0_zero = syscall.cond[0];
let is_sysmap_a0_zero_sz_nz = syscall.cond[1];
let is_sysmap_a0_zero_sz_zero = syscall.cond[2];
let is_sysmap_a0_nz = syscall.cond[3];
let is_sysread_a0_not_stdin = syscall.cond[4];
let is_sysread_a0_stdin = syscall.cond[5];
let is_syswrite_a0_not_stdout_err = syscall.cond[6];
let is_syswrite_a0_stdout_or_err = syscall.cond[7];
let is_sysfcntl_a0_stdin = syscall.cond[8];
let is_sysfcntl_a0_stdout_or_err = syscall.cond[9];
let v0_in_a0_zero = heap_in_a0_zero;
let heap_in_a0_zero_and_in_sz_mid_not_zero = heap_in_a0_zero + sz_in_sz_mid_not_zero; // branch1:sz&fff!=0 & a0==0
let heap_in_a0_zero_and_not_in_sz_mid_not_zero = heap_in_a0_zero + sz; // branch2: sz&fff==0 &a0 ==0
//check:
//1 is_syscall
//2 sysnum==sysmap
//3 a0 is zero
//4 heap value is right
//5 sz & 0xFFF != 0
yield_constr.constraint(filter * (is_sysmap_a0_zero - is_sysmap * is_a0_zero));
yield_constr
.constraint(filter * (is_sysmap_a0_zero_sz_nz - is_sysmap_a0_zero * is_sz_mid_not_zero));
yield_constr.constraint(
filter * is_sysmap_a0_zero_sz_nz * (heap_in_a0_zero_and_in_sz_mid_not_zero - result_heap),
);
//check:
//1 is_syscall
//2 sysnum==sysmap
//3 a0 is zero
//4 heap value is right
//5 sz & 0xFFF == 0
yield_constr
.constraint(filter * (is_sysmap_a0_zero_sz_zero - is_sysmap_a0_zero * is_sz_mid_zero));
yield_constr.constraint(
filter
* is_sysmap_a0_zero_sz_zero
* (heap_in_a0_zero_and_not_in_sz_mid_not_zero - result_heap),
);
//check:
//1 is_syscall
//2 sysnum==sysmap
//3 a0 is zero
//4 v0 value is right
yield_constr.constraint(filter * is_sysmap_a0_zero * (v0_in_a0_zero - result_v0));
//check:
//1 is_syscall
//2 sysnum==sysmap
//3 a0 is not zero
//4 v0 value is right
yield_constr.constraint(filter * (is_sysmap_a0_nz - is_sysmap * is_a0_not_zero));
yield_constr.constraint(filter * is_sysmap_a0_nz * (a0 - result_v0));
//sysbrk
let is_sysbrk = syscall.sysnum[2];
let is_sysbrk_gt = syscall.cond[10];
let is_sysbrk_le = syscall.cond[11];
let initial_brk = lv.mem_channels[6].value;
//check:
//1 is_syscall
//2 sysnum==sysbrk
//3 v0&v1 are right
yield_constr.constraint(filter * is_sysbrk * (P::ONES - (is_sysbrk_gt + is_sysbrk_le)));
yield_constr.constraint(filter * is_sysbrk_gt * (a0 - result_v0));
yield_constr.constraint(filter * is_sysbrk_le * (initial_brk - result_v0));
yield_constr.constraint(filter * is_sysbrk * (v1 - result_v1));
//sysclone
let is_sysclone = syscall.sysnum[3];
let v0_in_sysclone = P::ONES;
//check:
//1 is_syscall
//2 sysnum==sysclone
//3 v0&v1 are right
yield_constr.constraint(filter * is_sysclone * (v0_in_sysclone - result_v0));
yield_constr.constraint(filter * is_sysclone * (v1 - result_v1));
// let is_SYSEXITGROUP =sys_num.is_equal_private(P::Scalar::from_canonical_usize(SYSEXITGROUP),Equal);
// //todo
//sysread
let is_sysread = syscall.sysnum[5];
let a0_is_fd_stdin = syscall.a0[0];
let a0_is_not_fd_stdin = syscall.a0[2];
let v0_in_a0_is_not_fd_stdin = P::Scalar::from_canonical_usize(0xFFFFFFFF);
let v1_in_a0_is_not_fd_stdin = P::Scalar::from_canonical_usize(MIPSEBADF);
//check:
//1 is_syscall
//2 sysnum==sysread
//3 v0&v1 are right
//4 a0 != fd_stdin
yield_constr.constraint(filter * (is_sysread_a0_not_stdin - is_sysread * a0_is_not_fd_stdin));
yield_constr
.constraint(filter * is_sysread_a0_not_stdin * (v0_in_a0_is_not_fd_stdin - result_v0));
yield_constr
.constraint(filter * is_sysread_a0_not_stdin * (v1_in_a0_is_not_fd_stdin - result_v1));
//check:
//1 is_syscall
//2 sysnum==sysread
//3 v0&v1 are right
//4 a0 == fd_stdin
yield_constr.constraint(filter * (is_sysread_a0_stdin - is_sysread * a0_is_fd_stdin));
yield_constr.constraint(filter * is_sysread_a0_stdin * (v0 - result_v0));
yield_constr.constraint(filter * is_sysread_a0_stdin * (v1 - result_v1));
//syswrite
let is_syswrite = syscall.sysnum[6];
let a0_is_fd_stdout_or_fd_stderr = syscall.a0[1];
let a0_is_not_fd_stdout_and_fd_stderr = syscall.a0[2];
let v0_in_a0_is_not_fd_stdout_and_fd_stderr = P::Scalar::from_canonical_usize(0xFFFFFFFF);
let v1_in_a0_is_not_fd_stdin_and_fd_stderr = P::Scalar::from_canonical_usize(MIPSEBADF);
//check:
//1 is_syscall
//2 sysnum==syswrite
//3 v0&v1 are right
//4 a0 =! fd_stderr and a0 != fd_stderr
yield_constr.constraint(
filter * (is_syswrite_a0_not_stdout_err - is_syswrite * a0_is_not_fd_stdout_and_fd_stderr),
);
yield_constr.constraint(
filter
* is_syswrite_a0_not_stdout_err
* (v0_in_a0_is_not_fd_stdout_and_fd_stderr - result_v0),
);
yield_constr.constraint(
filter
* is_syswrite_a0_not_stdout_err
* (v1_in_a0_is_not_fd_stdin_and_fd_stderr - result_v1),
);
//check:
//1 is_syscall
//2 sysnum==syswrite
//3 v0&v1 are right
//4 a0 ==fd_stderr or a0 == fd_stderr
yield_constr.constraint(
filter * (is_syswrite_a0_stdout_or_err - is_syswrite * a0_is_fd_stdout_or_fd_stderr),
);
yield_constr.constraint(filter * is_syswrite_a0_stdout_or_err * (a2 - result_v0));
yield_constr.constraint(filter * is_syswrite_a0_stdout_or_err * (v1 - result_v1));
//sysfcntl
let is_sysfcntl = syscall.sysnum[7];
let a0_is_fd_stdin = syscall.a0[0];
let v0_in_a0_is_fd_stdin = P::ZEROS;
let a0_is_fd_stdout_or_fd_stderr = syscall.a0[1];
let _v0_in_a0_is_fd_stdout_or_fd_stderr = P::ONES;
let a0_is_else = syscall.a0[2];
let v0_in_a0_is_not_fd_stdout_and_fd_stderr_and_fd_stdin =
P::Scalar::from_canonical_usize(0xFFFFFFFF);
let v1_in_a0_is_not_fd_stdin_and_fd_stderr_and_fd_stdin =
P::Scalar::from_canonical_usize(MIPSEBADF);
yield_constr.constraint(filter * (is_sysfcntl_a0_stdin - is_sysfcntl * a0_is_fd_stdin));
yield_constr.constraint(filter * is_sysfcntl_a0_stdin * (v0_in_a0_is_fd_stdin - result_v0));
yield_constr.constraint(filter * is_sysfcntl_a0_stdin * (v1 - result_v1));
yield_constr.constraint(
filter * (is_sysfcntl_a0_stdout_or_err - is_sysfcntl * a0_is_fd_stdout_or_fd_stderr),
);
yield_constr.constraint(filter * is_sysfcntl_a0_stdout_or_err * (P::ONES - result_v0));
yield_constr.constraint(filter * is_sysfcntl_a0_stdout_or_err * (v1 - result_v1));
yield_constr.constraint(
filter
* (is_sysfcntl
- is_sysfcntl_a0_stdin
- is_sysfcntl_a0_stdout_or_err
- is_sysfcntl * a0_is_else),
);
yield_constr.constraint(
filter
* (is_sysfcntl - is_sysfcntl_a0_stdin - is_sysfcntl_a0_stdout_or_err)
* (v0_in_a0_is_not_fd_stdout_and_fd_stderr_and_fd_stdin - result_v0),
);
yield_constr.constraint(
filter
* (is_sysfcntl - is_sysfcntl_a0_stdin - is_sysfcntl_a0_stdout_or_err)
* (v1_in_a0_is_not_fd_stdin_and_fd_stderr_and_fd_stdin - result_v1),
);
//syssetthreadarea
let is_syssetthreadarea = syscall.sysnum[8];
let threadarea = lv.mem_channels[6].value;
yield_constr.constraint(filter * is_syssetthreadarea * (a0 - threadarea));
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
lv: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv.op.syscall;
// let _sys_num = lv.mem_channels[0].value;
let a0 = lv.mem_channels[1].value;
let a1 = lv.mem_channels[2].value;
let a2 = lv.mem_channels[3].value;
let v0 = builder.zero_extension();
let v1 = builder.zero_extension();
let syscall = lv.general.syscall();
let result_v0 = lv.mem_channels[4].value;
let result_v1 = lv.mem_channels[5].value;
//sysmap
let is_sysmap = syscall.sysnum[1];
let is_sz_mid_not_zero = syscall.a1; //sz & 0xFFF != 0
let is_sz_mid_zero = syscall.sysnum[10]; //sz & 0xFFF == 0
let sz = a1;
let sz_in_sz_mid_not_zero = syscall.sysnum[9]; //the value of sz_mid
let is_a0_zero = syscall.a0[0];
let is_a0_not_zero = syscall.a0[2];
let heap_in_a0_zero = lv.mem_channels[6].value;
let result_heap = lv.mem_channels[7].value;
let is_sysmap_a0_zero = syscall.cond[0];
let is_sysmap_a0_zero_sz_nz = syscall.cond[1];
let is_sysmap_a0_zero_sz_zero = syscall.cond[2];
let is_sysmap_a0_nz = syscall.cond[3];
let is_sysread_a0_not_stdin = syscall.cond[4];
let is_sysread_a0_stdin = syscall.cond[5];
let is_syswrite_a0_not_stdout_err = syscall.cond[6];
let is_syswrite_a0_stdout_or_err = syscall.cond[7];
let is_sysfcntl_a0_stdin = syscall.cond[8];
let is_sysfcntl_a0_stdout_or_err = syscall.cond[9];
let one_extension = builder.one_extension();
let v0_in_a0_zero = heap_in_a0_zero;
let heap_in_a0_zero_and_in_sz_mid_not_zero =
builder.add_extension(heap_in_a0_zero, sz_in_sz_mid_not_zero); // branch1:sz&fff!=0 & a0==0
let heap_in_a0_zero_and_not_in_sz_mid_not_zero = builder.add_extension(heap_in_a0_zero, sz); // branch2: sz&fff==0 &a0 ==0
let filter_1 = builder.mul_extension(is_sysmap, is_a0_zero);
let constr = builder.sub_extension(is_sysmap_a0_zero, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let filter_1 = builder.mul_extension(is_sysmap_a0_zero, is_sz_mid_not_zero);
let constr = builder.sub_extension(is_sysmap_a0_zero_sz_nz, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysmap_a0_zero_sz_nz);
let constr_2 = builder.sub_extension(heap_in_a0_zero_and_in_sz_mid_not_zero, result_heap);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let filter_1 = builder.mul_extension(is_sysmap_a0_zero, is_sz_mid_zero);
let constr = builder.sub_extension(is_sysmap_a0_zero_sz_zero, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysmap_a0_zero_sz_zero);
let constr_2 = builder.sub_extension(heap_in_a0_zero_and_not_in_sz_mid_not_zero, result_heap);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(v0_in_a0_zero, result_v0);
let constr = builder.mul_extension(is_sysmap_a0_zero, constr);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let filter_1 = builder.mul_extension(is_sysmap, is_a0_not_zero);
let constr = builder.sub_extension(is_sysmap_a0_nz, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr = builder.sub_extension(a0, result_v0);
let constr = builder.mul_extension(constr, is_sysmap_a0_nz);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
//sysbrk
let is_sysbrk = syscall.sysnum[2];
let is_sysbrk_gt = syscall.cond[10];
let is_sysbrk_le = syscall.cond[11];
let initial_brk = lv.mem_channels[6].value;
let constr_1 = builder.mul_extension(filter, is_sysbrk);
let constr_2 = builder.add_extension(is_sysbrk_gt, is_sysbrk_le);
let constr_2 = builder.sub_extension(one_extension, constr_2);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysbrk_gt);
let constr_2 = builder.sub_extension(a0, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysbrk_le);
let constr_2 = builder.sub_extension(initial_brk, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysbrk);
let constr_2 = builder.sub_extension(v1, result_v1);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
//sysclone
let is_sysclone = syscall.sysnum[3];
let v0_in_sysclone = builder.one_extension();
let constr_1 = builder.mul_extension(filter, is_sysclone);
let constr_2 = builder.sub_extension(v0_in_sysclone, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_2 = builder.sub_extension(v1, result_v1);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
// let is_SYSEXITGROUP =sys_num.is_equal_private(P::Scalar::from_canonical_usize(SYSEXITGROUP),Equal);
// //todo
//sysread
let is_sysread = syscall.sysnum[5];
let a0_is_fd_stdin = syscall.a0[0];
let a0_is_not_fd_stdin = syscall.a0[2];
let v0_in_a0_is_not_fd_stdin =
builder.constant_extension(F::Extension::from_canonical_usize(0xFFFFFFFF));
let v1_in_a0_is_not_fd_stdin =
builder.constant_extension(F::Extension::from_canonical_usize(MIPSEBADF));
let filter_1 = builder.mul_extension(is_sysread, a0_is_not_fd_stdin);
let constr = builder.sub_extension(is_sysread_a0_not_stdin, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysread_a0_not_stdin);
let constr_2 = builder.sub_extension(v0_in_a0_is_not_fd_stdin, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_2 = builder.sub_extension(v1_in_a0_is_not_fd_stdin, result_v1);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let filter_1 = builder.mul_extension(is_sysread, a0_is_fd_stdin);
let constr = builder.sub_extension(is_sysread_a0_stdin, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysread_a0_stdin);
let constr_2 = builder.sub_extension(v0, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_2 = builder.sub_extension(v1, result_v1);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
//syswrite
let is_syswrite = syscall.sysnum[6];
let a0_is_fd_stdout_or_fd_stderr = syscall.a0[1];
let a0_is_not_fd_stdout_and_fd_stderr = syscall.a0[2];
let v0_in_a0_is_not_fd_stdout_and_fd_stderr =
builder.constant_extension(F::Extension::from_canonical_usize(0xFFFFFFFF));
let v1_in_a0_is_not_fd_stdin_and_fd_stderr =
builder.constant_extension(F::Extension::from_canonical_usize(MIPSEBADF));
let filter_1 = builder.mul_extension(is_syswrite, a0_is_not_fd_stdout_and_fd_stderr);
let constr = builder.sub_extension(is_syswrite_a0_not_stdout_err, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_syswrite_a0_not_stdout_err);
let constr_2 = builder.sub_extension(v0_in_a0_is_not_fd_stdout_and_fd_stderr, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_2 = builder.sub_extension(v1_in_a0_is_not_fd_stdin_and_fd_stderr, result_v1);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let filter_1 = builder.mul_extension(is_syswrite, a0_is_fd_stdout_or_fd_stderr);
let constr = builder.sub_extension(is_syswrite_a0_stdout_or_err, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_syswrite_a0_stdout_or_err);
let constr_2 = builder.sub_extension(a2, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_2 = builder.sub_extension(v1, result_v1);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
//sysfcntl
let is_sysfcntl = syscall.sysnum[7];
let a0_is_fd_stdin = syscall.a0[0];
let v0_in_a0_is_fd_stdin = builder.zero_extension();
let a0_is_fd_stdout_or_fd_stderr = syscall.a0[1];
let v0_in_a0_is_fd_stdout_or_fd_stderr = builder.one_extension();
let a0_is_else = syscall.a0[2];
let v0_in_a0_is_not_fd_stdout_and_fd_stderr_and_fd_stdin =
builder.constant_extension(F::Extension::from_canonical_usize(0xFFFFFFFF));
let v1_in_a0_is_not_fd_stdin_and_fd_stderr_and_fd_stdin =
builder.constant_extension(F::Extension::from_canonical_usize(MIPSEBADF));
let filter_1 = builder.mul_extension(is_sysfcntl, a0_is_fd_stdin);
let constr = builder.sub_extension(is_sysfcntl_a0_stdin, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysfcntl_a0_stdin);
let constr_2 = builder.sub_extension(v0_in_a0_is_fd_stdin, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_2 = builder.sub_extension(v1, result_v1);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let filter_1 = builder.mul_extension(is_sysfcntl, a0_is_fd_stdout_or_fd_stderr);
let constr = builder.sub_extension(is_sysfcntl_a0_stdout_or_err, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, is_sysfcntl_a0_stdout_or_err);
let constr_2 = builder.sub_extension(v0_in_a0_is_fd_stdout_or_fd_stderr, result_v0);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_2 = builder.sub_extension(v1, result_v1);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let filter_1 = builder.mul_extension(is_sysfcntl, a0_is_else);
let filter_2 = builder.sub_extension(is_sysfcntl, is_sysfcntl_a0_stdin);
let filter_2 = builder.sub_extension(filter_2, is_sysfcntl_a0_stdout_or_err);
let constr = builder.sub_extension(filter_2, filter_1);
let constr = builder.mul_extension(filter, constr);
yield_constr.constraint(builder, constr);
let constr_1 = builder.mul_extension(filter, filter_2);
let constr_2 = builder.sub_extension(
v0_in_a0_is_not_fd_stdout_and_fd_stderr_and_fd_stdin,
result_v0,
);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
let constr_2 = builder.sub_extension(
v1_in_a0_is_not_fd_stdin_and_fd_stderr_and_fd_stdin,
result_v1,
);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
//syssetthreadarea
let is_syssetthreadarea = syscall.sysnum[8];
let threadarea = lv.mem_channels[6].value;
let constr_1 = builder.mul_extension(filter, is_syssetthreadarea);
let constr_2 = builder.sub_extension(a0, threadarea);
let constr = builder.mul_extension(constr_1, constr_2);
yield_constr.constraint(builder, constr);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/exit_kernel.rs | prover/src/cpu/exit_kernel.rs | use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cpu::bootstrap_kernel::{check_image_id, check_memory_page_hash};
use crate::cpu::columns::CpuColumnsView;
use crate::cpu::kernel::assembler::Kernel;
use crate::generation::state::GenerationState;
use crate::memory::segments::Segment;
use crate::witness::memory::MemoryAddress;
use crate::witness::util::mem_write_gp_log_and_fill;
use crate::witness::util::reg_zero_write_with_log;
use plonky2::plonk::config::GenericConfig;
use zkm_emulator::page::PAGE_ADDR_MASK;
use zkm_emulator::state::REGISTERS_START;
pub(crate) fn generate_exit_kernel<
F: RichField + Extendable<D>,
C: GenericConfig<D, F = F>,
const D: usize,
>(
state: &mut GenerationState<F, C, D>,
kernel: &Kernel,
) {
// check exit pc = end pc
assert_eq!(kernel.program.end_pc, state.registers.program_counter);
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
cpu_row.is_kernel_mode = F::ONE;
cpu_row.program_counter = F::from_canonical_usize(state.registers.program_counter);
cpu_row.next_program_counter = F::from_canonical_usize(state.registers.next_pc);
let log_end_pc = reg_zero_write_with_log(0, kernel.program.end_pc, state, &mut cpu_row);
state.traces.push_memory(log_end_pc);
state.traces.push_cpu(cpu_row);
// sync registers to memory
let registers_addr: Vec<_> = (REGISTERS_START..=REGISTERS_START + (39 << 2) - 1)
.step_by(4)
.collect::<Vec<u32>>();
let mut registers_value: [u32; 39] = [0; 39];
for i in 0..32 {
registers_value[i] = state.registers.gprs[i] as u32;
}
registers_value[32] = state.registers.lo as u32;
registers_value[33] = state.registers.hi as u32;
registers_value[34] = state.registers.heap as u32;
registers_value[35] = state.registers.program_counter as u32;
registers_value[36] = state.registers.next_pc as u32;
registers_value[37] = state.registers.brk as u32;
registers_value[38] = state.registers.local_user as u32;
let register_addr_value: Vec<_> = registers_addr.iter().zip(registers_value).collect();
for chunk in ®ister_addr_value.iter().chunks(8) {
let mut cpu_row = CpuColumnsView::default();
cpu_row.clock = F::from_canonical_usize(state.traces.clock());
cpu_row.is_exit_kernel = F::ONE;
cpu_row.program_counter = F::from_canonical_usize(state.registers.program_counter);
// Write this chunk to memory, while simultaneously packing its bytes into a u32 word.
for (channel, (addr, val)) in chunk.enumerate() {
// Both instruction and memory data are located in code section for MIPS
let address = MemoryAddress::new(0, Segment::Code, **addr as usize);
let write = mem_write_gp_log_and_fill(channel, address, state, &mut cpu_row, *val);
state.traces.push_memory(write);
}
state.traces.push_cpu(cpu_row);
}
state.memory.apply_ops(&state.traces.memory_ops);
// update memory hash root
for (addr, _) in kernel.program.image.iter() {
if (*addr & PAGE_ADDR_MASK as u32) == 0 {
check_memory_page_hash(state, kernel, *addr, true);
}
}
// check post image
check_image_id(state, kernel, true);
}
pub(crate) fn eval_exit_kernel_packed<F: Field, P: PackedField<Scalar = F>>(
local_values: &CpuColumnsView<P>,
next_values: &CpuColumnsView<P>,
yield_constr: &mut ConstraintConsumer<P>,
) {
// IS_EXIT_KERNEL must have an init value of 0, a final value of 1, and a delta in {0, 1}.
let local_is_exit = local_values.is_exit_kernel;
let next_is_exit = next_values.is_exit_kernel;
yield_constr.constraint_last_row(local_is_exit - P::ONES);
yield_constr.constraint_first_row(local_is_exit);
let delta_is_exit = next_is_exit - local_is_exit;
yield_constr.constraint_transition(delta_is_exit * (delta_is_exit - P::ONES));
// If this is a exit row and the i'th memory channel is used, it must have the right
// address, name context = 0, segment = Code, virt + 4 = next_virt
let code_segment = F::from_canonical_usize(Segment::Code as usize);
for channel in local_values.mem_channels.iter() {
let filter = local_is_exit * channel.used;
yield_constr.constraint(filter * channel.addr_context);
yield_constr.constraint(filter * (channel.addr_segment - code_segment));
}
// for the next is exit, the current pc should be end_pc
// for the exit row, all the pc should be end_pc
let input0 = local_values.mem_channels[0].value;
yield_constr.constraint_transition(delta_is_exit * (input0 - local_values.program_counter));
yield_constr.constraint_transition(
local_is_exit * (next_values.program_counter - local_values.program_counter),
);
}
pub(crate) fn eval_exit_kernel_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
local_values: &CpuColumnsView<ExtensionTarget<D>>,
next_values: &CpuColumnsView<ExtensionTarget<D>>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let one = builder.one_extension();
// IS_EXIT_KERNEL must have an init value of 0, a final value of 1, and a delta in {0, 1}.
let local_is_exit = local_values.is_exit_kernel;
let next_is_exit = next_values.is_exit_kernel;
let constraint = builder.sub_extension(local_is_exit, one);
yield_constr.constraint_last_row(builder, constraint);
yield_constr.constraint_first_row(builder, local_is_exit);
let delta_is_exit = builder.sub_extension(next_is_exit, local_is_exit);
let constraint = builder.sub_extension(delta_is_exit, one);
let constraint = builder.mul_extension(delta_is_exit, constraint);
yield_constr.constraint_transition(builder, constraint);
// If this is a exit row and the i'th memory channel is used, it must have the right
// address, name context = 0, segment = Code, virt + 4 = next_virt
let code_segment =
builder.constant_extension(F::Extension::from_canonical_usize(Segment::Code as usize));
for channel in local_values.mem_channels {
let filter = builder.mul_extension(local_is_exit, channel.used);
let constraint = builder.mul_extension(filter, channel.addr_context);
yield_constr.constraint(builder, constraint);
let segment_diff = builder.sub_extension(channel.addr_segment, code_segment);
let constraint = builder.mul_extension(filter, segment_diff);
yield_constr.constraint(builder, constraint);
}
// for the next is exit, the current pc should be end_pc
// for the exit row, all the pc should be end_pc
let input0 = local_values.mem_channels[0].value;
let pc_constr = builder.sub_extension(input0, local_values.program_counter);
let pc_constr = builder.mul_extension(delta_is_exit, pc_constr);
yield_constr.constraint_transition(builder, pc_constr);
let pc_constr =
builder.sub_extension(next_values.program_counter, local_values.program_counter);
let pc_constr = builder.mul_extension(local_values.is_exit_kernel, pc_constr);
yield_constr.constraint_transition(builder, pc_constr);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/kernel/assembler.rs | prover/src/cpu/kernel/assembler.rs | use super::elf::Program;
use crate::all_stark::NUM_PUBLIC_INPUT_USERDATA;
use serde::{Deserialize, Serialize};
use sha2::{Digest, Sha256};
use std::{collections::HashMap, io::Read};
use zkm_emulator::utils::get_block_path;
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize)]
pub struct Kernel {
// MIPS ELF
pub(crate) program: Program,
// For debugging purposes
pub(crate) ordered_labels: Vec<String>,
// should be preprocessed after loading code
pub(crate) global_labels: HashMap<String, usize>,
pub blockpath: String,
}
pub const MAX_MEM: u32 = 0x80000000;
pub fn segment_kernel<T: Read>(basedir: &str, block: &str, file: &str, seg_reader: T) -> Kernel {
let p: Program = Program::load_segment(seg_reader).unwrap();
let blockpath = get_block_path(basedir, block, file);
Kernel {
program: p,
ordered_labels: vec![],
global_labels: HashMap::new(),
blockpath,
}
}
impl Kernel {
/// Get a string representation of the current offset for debugging purposes.
pub(crate) fn offset_name(&self, offset: usize) -> String {
match self
.ordered_labels
.binary_search_by_key(&offset, |label| self.global_labels[label])
{
Ok(idx) => self.ordered_labels[idx].clone(),
Err(0) => offset.to_string(),
Err(idx) => format!("{}, below {}", offset, self.ordered_labels[idx - 1]),
}
}
pub(crate) fn offset_label(&self, offset: usize) -> Option<String> {
self.global_labels
.iter()
.find_map(|(k, v)| (*v == offset).then(|| k.clone()))
}
/// Read public input from input stream index 0
pub fn read_public_inputs(&self) -> Vec<u8> {
let mut hasher = Sha256::new();
let public_input = if let Some(first) = self.program.input_stream.first() {
// bincode::deserialize::<Vec<u8>>(first).expect("deserialization failed")
if first.is_empty() {
&vec![0u8; NUM_PUBLIC_INPUT_USERDATA]
} else {
first
}
} else {
&vec![0u8; NUM_PUBLIC_INPUT_USERDATA]
};
hasher.update(public_input);
let result = hasher.finalize();
result.to_vec()
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/kernel/keccak_util.rs | prover/src/cpu/kernel/keccak_util.rs | use tiny_keccak::keccakf;
use crate::keccak_sponge::columns::{KECCAK_WIDTH_BYTES, KECCAK_WIDTH_U32S};
/// Like tiny-keccak's `keccakf`, but deals with `u32` limbs instead of `u64` limbs.
pub(crate) fn keccakf_u32s(state_u32s: &mut [u32; KECCAK_WIDTH_U32S]) {
let mut state_u64s: [u64; 25] = core::array::from_fn(|i| {
let lo = state_u32s[i * 2] as u64;
let hi = state_u32s[i * 2 + 1] as u64;
lo | (hi << 32)
});
keccakf(&mut state_u64s);
*state_u32s = core::array::from_fn(|i| {
let u64_limb = state_u64s[i / 2];
let is_hi = i % 2;
(u64_limb >> (is_hi * 32)) as u32
});
}
/// Like tiny-keccak's `keccakf`, but deals with bytes instead of `u64` limbs.
pub(crate) fn keccakf_u8s(state_u8s: &mut [u8; KECCAK_WIDTH_BYTES]) {
let mut state_u64s: [u64; 25] =
core::array::from_fn(|i| u64::from_le_bytes(state_u8s[i * 8..][..8].try_into().unwrap()));
keccakf(&mut state_u64s);
*state_u8s = core::array::from_fn(|i| {
let u64_limb = state_u64s[i / 8];
u64_limb.to_le_bytes()[i % 8]
});
}
#[cfg(test)]
mod tests {
use tiny_keccak::keccakf;
use crate::cpu::kernel::keccak_util::{keccakf_u32s, keccakf_u8s};
#[test]
#[rustfmt::skip]
fn test_consistency() {
// We will hash the same data using keccakf, keccakf_u32s and keccakf_u8s.
// The inputs were randomly generated in Python.
let mut state_u64s: [u64; 25] = [0x5dc43ed05dc64048, 0x7bb9e18cdc853880, 0xc1fde300665b008f, 0xeeab85e089d5e431, 0xf7d61298e9ef27ea, 0xc2c5149d1a492455, 0x37a2f4eca0c2d2f2, 0xa35e50c015b3e85c, 0xd2daeced29446ebe, 0x245845f1bac1b98e, 0x3b3aa8783f30a9bf, 0x209ca9a81956d241, 0x8b8ea714da382165, 0x6063e67e202c6d29, 0xf4bac2ded136b907, 0xb17301b461eae65, 0xa91ff0e134ed747c, 0xcc080b28d0c20f1d, 0xf0f79cbec4fb551c, 0x25e04cb0aa930cad, 0x803113d1b541a202, 0xfaf1e4e7cd23b7ec, 0x36a03bbf2469d3b0, 0x25217341908cdfc0, 0xe9cd83f88fdcd500];
let mut state_u32s: [u32; 50] = [0x5dc64048, 0x5dc43ed0, 0xdc853880, 0x7bb9e18c, 0x665b008f, 0xc1fde300, 0x89d5e431, 0xeeab85e0, 0xe9ef27ea, 0xf7d61298, 0x1a492455, 0xc2c5149d, 0xa0c2d2f2, 0x37a2f4ec, 0x15b3e85c, 0xa35e50c0, 0x29446ebe, 0xd2daeced, 0xbac1b98e, 0x245845f1, 0x3f30a9bf, 0x3b3aa878, 0x1956d241, 0x209ca9a8, 0xda382165, 0x8b8ea714, 0x202c6d29, 0x6063e67e, 0xd136b907, 0xf4bac2de, 0x461eae65, 0xb17301b, 0x34ed747c, 0xa91ff0e1, 0xd0c20f1d, 0xcc080b28, 0xc4fb551c, 0xf0f79cbe, 0xaa930cad, 0x25e04cb0, 0xb541a202, 0x803113d1, 0xcd23b7ec, 0xfaf1e4e7, 0x2469d3b0, 0x36a03bbf, 0x908cdfc0, 0x25217341, 0x8fdcd500, 0xe9cd83f8];
let mut state_u8s: [u8; 200] = [0x48, 0x40, 0xc6, 0x5d, 0xd0, 0x3e, 0xc4, 0x5d, 0x80, 0x38, 0x85, 0xdc, 0x8c, 0xe1, 0xb9, 0x7b, 0x8f, 0x0, 0x5b, 0x66, 0x0, 0xe3, 0xfd, 0xc1, 0x31, 0xe4, 0xd5, 0x89, 0xe0, 0x85, 0xab, 0xee, 0xea, 0x27, 0xef, 0xe9, 0x98, 0x12, 0xd6, 0xf7, 0x55, 0x24, 0x49, 0x1a, 0x9d, 0x14, 0xc5, 0xc2, 0xf2, 0xd2, 0xc2, 0xa0, 0xec, 0xf4, 0xa2, 0x37, 0x5c, 0xe8, 0xb3, 0x15, 0xc0, 0x50, 0x5e, 0xa3, 0xbe, 0x6e, 0x44, 0x29, 0xed, 0xec, 0xda, 0xd2, 0x8e, 0xb9, 0xc1, 0xba, 0xf1, 0x45, 0x58, 0x24, 0xbf, 0xa9, 0x30, 0x3f, 0x78, 0xa8, 0x3a, 0x3b, 0x41, 0xd2, 0x56, 0x19, 0xa8, 0xa9, 0x9c, 0x20, 0x65, 0x21, 0x38, 0xda, 0x14, 0xa7, 0x8e, 0x8b, 0x29, 0x6d, 0x2c, 0x20, 0x7e, 0xe6, 0x63, 0x60, 0x7, 0xb9, 0x36, 0xd1, 0xde, 0xc2, 0xba, 0xf4, 0x65, 0xae, 0x1e, 0x46, 0x1b, 0x30, 0x17, 0xb, 0x7c, 0x74, 0xed, 0x34, 0xe1, 0xf0, 0x1f, 0xa9, 0x1d, 0xf, 0xc2, 0xd0, 0x28, 0xb, 0x8, 0xcc, 0x1c, 0x55, 0xfb, 0xc4, 0xbe, 0x9c, 0xf7, 0xf0, 0xad, 0xc, 0x93, 0xaa, 0xb0, 0x4c, 0xe0, 0x25, 0x2, 0xa2, 0x41, 0xb5, 0xd1, 0x13, 0x31, 0x80, 0xec, 0xb7, 0x23, 0xcd, 0xe7, 0xe4, 0xf1, 0xfa, 0xb0, 0xd3, 0x69, 0x24, 0xbf, 0x3b, 0xa0, 0x36, 0xc0, 0xdf, 0x8c, 0x90, 0x41, 0x73, 0x21, 0x25, 0x0, 0xd5, 0xdc, 0x8f, 0xf8, 0x83, 0xcd, 0xe9];
// The first output was generated using tiny-keccak; the others were derived from it.
let out_u64s: [u64; 25] = [0x8a541df597e79a72, 0x5c26b8c84faaebb3, 0xc0e8f4e67ca50497, 0x95d98a688de12dec, 0x1c837163975ffaed, 0x9481ec7ef948900e, 0x6a072c65d050a9a1, 0x3b2817da6d615bee, 0x7ffb3c4f8b94bf21, 0x85d6c418cced4a11, 0x18edbe0442884135, 0x2bf265ef3204b7fd, 0xc1e12ce30630d105, 0x8c554dbc61844574, 0x5504db652ce9e42c, 0x2217f3294d0dabe5, 0x7df8eebbcf5b74df, 0x3a56ebb61956f501, 0x7840219dc6f37cc, 0x23194159c967947, 0x9da289bf616ba14d, 0x5a90aaeeca9e9e5b, 0x885dcdc4a549b4e3, 0x46cb188c20947df7, 0x1ef285948ee3d8ab];
let out_u32s: [u32; 50] = [0x97e79a72, 0x8a541df5, 0x4faaebb3, 0x5c26b8c8, 0x7ca50497, 0xc0e8f4e6, 0x8de12dec, 0x95d98a68, 0x975ffaed, 0x1c837163, 0xf948900e, 0x9481ec7e, 0xd050a9a1, 0x6a072c65, 0x6d615bee, 0x3b2817da, 0x8b94bf21, 0x7ffb3c4f, 0xcced4a11, 0x85d6c418, 0x42884135, 0x18edbe04, 0x3204b7fd, 0x2bf265ef, 0x630d105, 0xc1e12ce3, 0x61844574, 0x8c554dbc, 0x2ce9e42c, 0x5504db65, 0x4d0dabe5, 0x2217f329, 0xcf5b74df, 0x7df8eebb, 0x1956f501, 0x3a56ebb6, 0xdc6f37cc, 0x7840219, 0x9c967947, 0x2319415, 0x616ba14d, 0x9da289bf, 0xca9e9e5b, 0x5a90aaee, 0xa549b4e3, 0x885dcdc4, 0x20947df7, 0x46cb188c, 0x8ee3d8ab, 0x1ef28594];
let out_u8s: [u8; 200] = [0x72, 0x9a, 0xe7, 0x97, 0xf5, 0x1d, 0x54, 0x8a, 0xb3, 0xeb, 0xaa, 0x4f, 0xc8, 0xb8, 0x26, 0x5c, 0x97, 0x4, 0xa5, 0x7c, 0xe6, 0xf4, 0xe8, 0xc0, 0xec, 0x2d, 0xe1, 0x8d, 0x68, 0x8a, 0xd9, 0x95, 0xed, 0xfa, 0x5f, 0x97, 0x63, 0x71, 0x83, 0x1c, 0xe, 0x90, 0x48, 0xf9, 0x7e, 0xec, 0x81, 0x94, 0xa1, 0xa9, 0x50, 0xd0, 0x65, 0x2c, 0x7, 0x6a, 0xee, 0x5b, 0x61, 0x6d, 0xda, 0x17, 0x28, 0x3b, 0x21, 0xbf, 0x94, 0x8b, 0x4f, 0x3c, 0xfb, 0x7f, 0x11, 0x4a, 0xed, 0xcc, 0x18, 0xc4, 0xd6, 0x85, 0x35, 0x41, 0x88, 0x42, 0x4, 0xbe, 0xed, 0x18, 0xfd, 0xb7, 0x4, 0x32, 0xef, 0x65, 0xf2, 0x2b, 0x5, 0xd1, 0x30, 0x6, 0xe3, 0x2c, 0xe1, 0xc1, 0x74, 0x45, 0x84, 0x61, 0xbc, 0x4d, 0x55, 0x8c, 0x2c, 0xe4, 0xe9, 0x2c, 0x65, 0xdb, 0x4, 0x55, 0xe5, 0xab, 0xd, 0x4d, 0x29, 0xf3, 0x17, 0x22, 0xdf, 0x74, 0x5b, 0xcf, 0xbb, 0xee, 0xf8, 0x7d, 0x1, 0xf5, 0x56, 0x19, 0xb6, 0xeb, 0x56, 0x3a, 0xcc, 0x37, 0x6f, 0xdc, 0x19, 0x2, 0x84, 0x7, 0x47, 0x79, 0x96, 0x9c, 0x15, 0x94, 0x31, 0x2, 0x4d, 0xa1, 0x6b, 0x61, 0xbf, 0x89, 0xa2, 0x9d, 0x5b, 0x9e, 0x9e, 0xca, 0xee, 0xaa, 0x90, 0x5a, 0xe3, 0xb4, 0x49, 0xa5, 0xc4, 0xcd, 0x5d, 0x88, 0xf7, 0x7d, 0x94, 0x20, 0x8c, 0x18, 0xcb, 0x46, 0xab, 0xd8, 0xe3, 0x8e, 0x94, 0x85, 0xf2, 0x1e];
keccakf(&mut state_u64s);
keccakf_u32s(&mut state_u32s);
keccakf_u8s(&mut state_u8s);
assert_eq!(state_u64s, out_u64s);
assert_eq!(state_u32s, out_u32s);
assert_eq!(state_u8s, out_u8s);
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/kernel/mod.rs | prover/src/cpu/kernel/mod.rs | pub mod assembler;
pub(crate) mod constants;
pub(crate) mod elf;
pub(crate) mod keccak_util;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/kernel/elf.rs | prover/src/cpu/kernel/elf.rs | extern crate alloc;
use alloc::collections::BTreeMap;
use anyhow::{Context, Result};
use serde::{Deserialize, Serialize};
use std::fs::{self};
use std::io::Read;
use zkm_emulator::memory::WORD_SIZE;
use zkm_emulator::state::{Segment, REGISTERS_START};
pub const PAGE_SIZE: u32 = 4096;
/// A MIPS program
#[derive(PartialEq, Eq, Debug, Serialize, Deserialize, Default)]
pub struct Program {
/// The entrypoint of the program, PC
pub entry: u32,
pub next_pc: usize,
/// The initial memory image
pub image: BTreeMap<u32, u32>,
pub gprs: [usize; 32],
pub lo: usize,
pub hi: usize,
pub heap: usize,
pub brk: usize,
pub local_user: usize,
pub end_pc: usize,
pub step: usize,
pub image_id: [u8; 32],
pub pre_image_id: [u8; 32],
pub pre_hash_root: [u8; 32],
pub page_hash_root: [u8; 32],
pub input_stream: Vec<Vec<u8>>,
pub input_stream_ptr: usize,
pub public_values_stream: Vec<u8>,
pub public_values_stream_ptr: usize,
}
impl Program {
pub fn load_block(&mut self, blockpath: &str) -> Result<bool> {
let content = fs::read(blockpath).expect("Read file failed");
let mut map_addr = 0x30000000;
for i in (0..content.len()).step_by(WORD_SIZE) {
let mut word = 0;
// Don't read past the end of the file.
let len = core::cmp::min(content.len() - i, WORD_SIZE);
for j in 0..len {
let offset = i + j;
let byte = content.get(offset).context("Invalid block offset")?;
word |= (*byte as u32) << (j * 8);
}
self.image.insert(map_addr, word);
map_addr += 4;
}
Ok(true)
}
pub fn load_segment<T: Read>(reader: T) -> Result<Program> {
let segment: Segment = serde_json::from_reader(reader).unwrap();
let entry = segment.pc;
let image = segment.mem_image;
let end_pc = segment.end_pc as usize;
let mut gprs: [usize; 32] = [0; 32];
for i in 0..32 {
let data = image.get(&(REGISTERS_START + (i << 2) as u32)).unwrap();
gprs[i] = data.to_be() as usize;
}
let lo: usize = image
.get(&(REGISTERS_START + (32 << 2) as u32))
.unwrap()
.to_be() as usize;
let hi: usize = image
.get(&(REGISTERS_START + (33 << 2) as u32))
.unwrap()
.to_be() as usize;
let heap: usize = image
.get(&(REGISTERS_START + (34 << 2) as u32))
.unwrap()
.to_be() as usize;
let pc: usize = image
.get(&(REGISTERS_START + (35 << 2) as u32))
.unwrap()
.to_be() as usize;
let next_pc: usize = image
.get(&(REGISTERS_START + (36 << 2) as u32))
.unwrap()
.to_be() as usize;
let brk: usize = image
.get(&(REGISTERS_START + (37 << 2) as u32))
.unwrap()
.to_be() as usize;
let local_user: usize = image
.get(&(REGISTERS_START + (38 << 2) as u32))
.unwrap()
.to_be() as usize;
let page_hash_root = segment.page_hash_root;
assert!(pc as u32 == segment.pc);
log::trace!(
"load segment pc: {} image: {:?} gprs: {:?} lo: {} hi: {} heap:{} range: ({} -> {})",
segment.pc,
segment.image_id,
gprs,
lo,
hi,
heap,
pc,
end_pc
);
Ok(Program {
entry,
next_pc,
image,
gprs,
lo,
hi,
heap,
brk,
local_user,
end_pc,
step: segment.step as usize,
image_id: segment.image_id,
pre_image_id: segment.pre_image_id,
pre_hash_root: segment.pre_hash_root,
page_hash_root,
input_stream: segment.input_stream,
input_stream_ptr: segment.input_stream_ptr,
public_values_stream: segment.public_values_stream,
public_values_stream_ptr: segment.public_values_stream_ptr,
})
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/kernel/constants/mod.rs | prover/src/cpu/kernel/constants/mod.rs | pub(crate) mod context_metadata;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/kernel/constants/context_metadata.rs | prover/src/cpu/kernel/constants/context_metadata.rs | /// These metadata fields contain VM state specific to a particular context.
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
pub(crate) enum ContextMetadata {
/// The ID of the context which created this one.
ParentContext = 0,
/// The program counter to return to when we return to the parent context.
ParentProgramCounter = 1,
CalldataSize = 2,
ReturndataSize = 3,
/// The address of the account associated with this context.
Address = 4,
/// The size of the code under the account associated with this context.
/// While this information could be obtained from the state trie, it is best to cache it since
/// the `CODESIZE` instruction is very cheap.
CodeSize = 5,
/// The address of the caller who spawned this context.
Caller = 6,
/// The value (in wei) deposited by the caller.
CallValue = 7,
/// Whether this context was created by `STATICCALL`, in which case state changes are
/// prohibited.
Static = 8,
/// Pointer to the initial version of the state trie, at the creation of this context. Used when
/// we need to revert a context.
StateTrieCheckpointPointer = 9,
/// Size of the active main memory, in (32 byte) words.
MemWords = 10,
StackSize = 11,
/// The gas limit for this call (not the entire transaction).
GasLimit = 12,
ContextCheckpointsLen = 13,
}
impl ContextMetadata {
pub(crate) const COUNT: usize = 14;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
Self::ParentContext,
Self::ParentProgramCounter,
Self::CalldataSize,
Self::ReturndataSize,
Self::Address,
Self::CodeSize,
Self::Caller,
Self::CallValue,
Self::Static,
Self::StateTrieCheckpointPointer,
Self::MemWords,
Self::StackSize,
Self::GasLimit,
Self::ContextCheckpointsLen,
]
}
/// The variable name that gets passed into kernel assembly code.
pub(crate) fn var_name(&self) -> &'static str {
match self {
ContextMetadata::ParentContext => "CTX_METADATA_PARENT_CONTEXT",
ContextMetadata::ParentProgramCounter => "CTX_METADATA_PARENT_PC",
ContextMetadata::CalldataSize => "CTX_METADATA_CALLDATA_SIZE",
ContextMetadata::ReturndataSize => "CTX_METADATA_RETURNDATA_SIZE",
ContextMetadata::Address => "CTX_METADATA_ADDRESS",
ContextMetadata::CodeSize => "CTX_METADATA_CODE_SIZE",
ContextMetadata::Caller => "CTX_METADATA_CALLER",
ContextMetadata::CallValue => "CTX_METADATA_CALL_VALUE",
ContextMetadata::Static => "CTX_METADATA_STATIC",
ContextMetadata::StateTrieCheckpointPointer => "CTX_METADATA_STATE_TRIE_CHECKPOINT_PTR",
ContextMetadata::MemWords => "CTX_METADATA_MEM_WORDS",
ContextMetadata::StackSize => "CTX_METADATA_STACK_SIZE",
ContextMetadata::GasLimit => "CTX_METADATA_GAS_LIMIT",
ContextMetadata::ContextCheckpointsLen => "CTX_METADATA_CHECKPOINTS_LEN",
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/columns/general.rs | prover/src/cpu/columns/general.rs | use std::borrow::{Borrow, BorrowMut};
use std::fmt::{Debug, Formatter};
use std::mem::{size_of, transmute};
/// General purpose columns, which can have different meanings depending on what CTL or other
/// operation is occurring at this row.
#[derive(Clone, Copy)]
pub(crate) union CpuGeneralColumnsView<T: Copy> {
syscall: CpuSyscallView<T>,
logic: CpuLogicView<T>,
shift: CpuShiftView<T>,
io: CpuIOAuxView<T>,
hash: CpuHashView<T>,
khash: CpuKHashView<T>,
shash: CpuSHashView<T>,
element: CpuElementView<T>,
misc: CpuMiscView<T>,
}
impl<T: Copy> CpuGeneralColumnsView<T> {
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn hash(&self) -> &CpuHashView<T> {
unsafe { &self.hash }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn hash_mut(&mut self) -> &mut CpuHashView<T> {
unsafe { &mut self.hash }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn khash(&self) -> &CpuKHashView<T> {
unsafe { &self.khash }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn khash_mut(&mut self) -> &mut CpuKHashView<T> {
unsafe { &mut self.khash }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn element(&self) -> &CpuElementView<T> {
unsafe { &self.element }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn element_mut(&mut self) -> &mut CpuElementView<T> {
unsafe { &mut self.element }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn shash(&self) -> &CpuSHashView<T> {
unsafe { &self.shash }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn shash_mut(&mut self) -> &mut CpuSHashView<T> {
unsafe { &mut self.shash }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn syscall(&self) -> &CpuSyscallView<T> {
unsafe { &self.syscall }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn syscall_mut(&mut self) -> &mut CpuSyscallView<T> {
unsafe { &mut self.syscall }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn logic(&self) -> &CpuLogicView<T> {
unsafe { &self.logic }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn logic_mut(&mut self) -> &mut CpuLogicView<T> {
unsafe { &mut self.logic }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn shift(&self) -> &CpuShiftView<T> {
unsafe { &self.shift }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn shift_mut(&mut self) -> &mut CpuShiftView<T> {
unsafe { &mut self.shift }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn misc(&self) -> &CpuMiscView<T> {
unsafe { &self.misc }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn misc_mut(&mut self) -> &mut CpuMiscView<T> {
unsafe { &mut self.misc }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn io(&self) -> &CpuIOAuxView<T> {
unsafe { &self.io }
}
// SAFETY: Each view is a valid interpretation of the underlying array.
pub(crate) fn io_mut(&mut self) -> &mut CpuIOAuxView<T> {
unsafe { &mut self.io }
}
}
impl<T: Copy + PartialEq> PartialEq<Self> for CpuGeneralColumnsView<T> {
fn eq(&self, other: &Self) -> bool {
let self_arr: &[T; NUM_SHARED_COLUMNS] = self.borrow();
let other_arr: &[T; NUM_SHARED_COLUMNS] = other.borrow();
self_arr == other_arr
}
}
impl<T: Copy + Eq> Eq for CpuGeneralColumnsView<T> {}
impl<T: Copy + Debug> Debug for CpuGeneralColumnsView<T> {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
let self_arr: &[T; NUM_SHARED_COLUMNS] = self.borrow();
Debug::fmt(self_arr, f)
}
}
impl<T: Copy> Borrow<[T; NUM_SHARED_COLUMNS]> for CpuGeneralColumnsView<T> {
fn borrow(&self) -> &[T; NUM_SHARED_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<[T; NUM_SHARED_COLUMNS]> for CpuGeneralColumnsView<T> {
fn borrow_mut(&mut self) -> &mut [T; NUM_SHARED_COLUMNS] {
unsafe { transmute(self) }
}
}
#[derive(Copy, Clone)]
pub(crate) struct CpuSyscallView<T: Copy> {
pub(crate) cond: [T; 12],
pub(crate) sysnum: [T; 12],
pub(crate) a0: [T; 3],
pub(crate) a1: T,
}
#[derive(Copy, Clone)]
pub(crate) struct CpuMiscView<T: Copy> {
pub(crate) rs_bits: [T; 32],
pub(crate) is_msb: [T; 32],
pub(crate) is_lsb: [T; 32],
pub(crate) auxm: T,
pub(crate) auxl: T,
pub(crate) auxs: T,
pub(crate) rd_index: T,
pub(crate) rd_index_eq_0: T,
pub(crate) rd_index_eq_29: T,
}
#[derive(Copy, Clone)]
pub(crate) struct CpuLogicView<T: Copy> {
// Pseudoinverse of `(input0 - input1)`. Used prove that they are unequal. Assumes 32-bit limbs.
pub(crate) diff_pinv: T,
}
#[derive(Copy, Clone)]
pub(crate) struct CpuShiftView<T: Copy> {
// For a shift amount of displacement: [T], this is the inverse of
// sum(displacement[1..]) or zero if the sum is zero.
pub(crate) high_limb_sum_inv: T,
}
#[derive(Copy, Clone)]
pub(crate) struct CpuIOAuxView<T: Copy> {
pub(crate) rs_le: [T; 32],
pub(crate) rt_le: [T; 32],
pub(crate) mem_le: [T; 32],
pub(crate) aux_rs0_mul_rs1: T,
}
#[derive(Copy, Clone)]
pub(crate) struct CpuHashView<T: Copy> {
pub(crate) value: [T; 4],
}
#[derive(Copy, Clone)]
pub(crate) struct CpuKHashView<T: Copy> {
pub(crate) value: [T; 8],
}
#[derive(Copy, Clone)]
pub(crate) struct CpuSHashView<T: Copy> {
pub(crate) value: [T; 8],
}
#[derive(Copy, Clone)]
pub(crate) struct CpuElementView<T: Copy> {
pub(crate) value: T,
}
// `u8` is guaranteed to have a `size_of` of 1.
pub const NUM_SHARED_COLUMNS: usize = size_of::<CpuGeneralColumnsView<u8>>();
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/columns/mod.rs | prover/src/cpu/columns/mod.rs | use std::borrow::{Borrow, BorrowMut};
use std::fmt::Debug;
use std::mem::{size_of, transmute};
use std::ops::{Index, IndexMut};
use plonky2::field::types::Field;
use crate::cpu::columns::general::CpuGeneralColumnsView;
use crate::cpu::columns::ops::OpsColumnsView;
use crate::cpu::membus::NUM_GP_CHANNELS;
use crate::util::{indices_arr, transmute_no_compile_time_size_checks};
mod general;
pub(crate) mod ops;
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct CpuBranchView<T: Copy> {
// A flag.
pub should_jump: T,
pub gt: T,
pub lt: T,
pub eq: T,
pub is_gt: T,
pub is_lt: T,
pub is_eq: T,
pub is_ge: T,
pub is_le: T,
pub is_ne: T,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct MemoryChannelView<T: Copy> {
/// 1 if this row includes a memory operation in the `i`th channel of the memory bus, otherwise
/// 0.
pub used: T,
pub is_read: T,
pub addr_context: T,
pub addr_segment: T,
pub addr_virtual: T,
pub value: T,
}
#[repr(C)]
#[derive(Clone, Copy, Debug, Eq, PartialEq)]
pub struct MemIOView<T: Copy> {
pub(crate) is_lh: T,
pub(crate) is_lwl: T,
pub(crate) is_lw: T,
pub(crate) is_lbu: T,
pub(crate) is_lhu: T,
pub(crate) is_lwr: T,
pub(crate) is_sb: T,
pub(crate) is_sh: T,
pub(crate) is_swl: T,
pub(crate) is_sw: T,
pub(crate) is_swr: T,
pub(crate) is_ll: T,
pub(crate) is_sc: T,
pub(crate) is_sdc1: T,
pub(crate) is_lb: T,
pub(crate) aux_filter: T,
}
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct CpuColumnsView<T: Copy> {
/// Filter. 1 if the row is part of bootstrapping the kernel code, 0 otherwise.
pub is_bootstrap_kernel: T,
pub is_exit_kernel: T,
/// If CPU cycle: Current context.
// TODO: this is currently unconstrained
pub context: T,
/// If CPU cycle: Context for code memory channel.
pub code_context: T,
/// If CPU cycle: The program counter for the current instruction.
pub program_counter: T,
pub next_program_counter: T,
/// If CPU cycle: We're in kernel (privileged) mode.
pub is_kernel_mode: T,
/// If CPU cycle: flags for ZKVM instructions (a few cannot be shared; see the comments in
/// `OpsColumnsView`).
pub op: OpsColumnsView<T>,
pub branch: CpuBranchView<T>,
/// If CPU cycle: the opcode, broken up into bits in little-endian order.
pub opcode_bits: [T; 6], // insn[31:26]
pub rs_bits: [T; 5], // insn[25:21]
pub rt_bits: [T; 5], // insn[20:16]
pub rd_bits: [T; 5], // insn[15:11]
pub shamt_bits: [T; 5], // insn[10:6] i.e. hint
pub func_bits: [T; 6], // insn[5:0]
// imm | offset: [rd_bits, shamt_bits, func_bits]
// code: [rs_bits, rt_bits, rd_bits, shamt_bits]
// inst_index: [rs_bits, rt_bits, rd_bits, shamt_bits, func_bits]
/// Filter. 1 iff a Poseidon sponge lookup is performed on this row.
pub is_poseidon_sponge: T,
pub is_keccak_sponge: T,
pub is_sha_extend_sponge: T,
pub is_sha_compress_sponge: T,
pub(crate) general: CpuGeneralColumnsView<T>,
pub(crate) memio: MemIOView<T>,
pub(crate) clock: T,
pub mem_channels: [MemoryChannelView<T>; NUM_GP_CHANNELS],
}
// `u8` is guaranteed to have a `size_of` of 1.
pub const NUM_CPU_COLUMNS: usize = size_of::<CpuColumnsView<u8>>();
impl<F: Field> Default for CpuColumnsView<F> {
fn default() -> Self {
Self::from([F::ZERO; NUM_CPU_COLUMNS])
}
}
impl<T: Copy> From<[T; NUM_CPU_COLUMNS]> for CpuColumnsView<T> {
fn from(value: [T; NUM_CPU_COLUMNS]) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> From<CpuColumnsView<T>> for [T; NUM_CPU_COLUMNS] {
fn from(value: CpuColumnsView<T>) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> Borrow<CpuColumnsView<T>> for [T; NUM_CPU_COLUMNS] {
fn borrow(&self) -> &CpuColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<CpuColumnsView<T>> for [T; NUM_CPU_COLUMNS] {
fn borrow_mut(&mut self) -> &mut CpuColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> Borrow<[T; NUM_CPU_COLUMNS]> for CpuColumnsView<T> {
fn borrow(&self) -> &[T; NUM_CPU_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<[T; NUM_CPU_COLUMNS]> for CpuColumnsView<T> {
fn borrow_mut(&mut self) -> &mut [T; NUM_CPU_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy, I> Index<I> for CpuColumnsView<T>
where
[T]: Index<I>,
{
type Output = <[T] as Index<I>>::Output;
fn index(&self, index: I) -> &Self::Output {
let arr: &[T; NUM_CPU_COLUMNS] = self.borrow();
<[T] as Index<I>>::index(arr, index)
}
}
impl<T: Copy, I> IndexMut<I> for CpuColumnsView<T>
where
[T]: IndexMut<I>,
{
fn index_mut(&mut self, index: I) -> &mut Self::Output {
let arr: &mut [T; NUM_CPU_COLUMNS] = self.borrow_mut();
<[T] as IndexMut<I>>::index_mut(arr, index)
}
}
const fn make_col_map() -> CpuColumnsView<usize> {
let indices_arr = indices_arr::<NUM_CPU_COLUMNS>();
unsafe { transmute::<[usize; NUM_CPU_COLUMNS], CpuColumnsView<usize>>(indices_arr) }
}
pub const COL_MAP: CpuColumnsView<usize> = make_col_map();
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/cpu/columns/ops.rs | prover/src/cpu/columns/ops.rs | use std::borrow::{Borrow, BorrowMut};
use std::mem::{size_of, transmute};
use std::ops::{Deref, DerefMut};
use crate::util::transmute_no_compile_time_size_checks;
#[repr(C)]
#[derive(Clone, Copy, Eq, PartialEq, Debug)]
pub struct OpsColumnsView<T: Copy> {
pub binary_op: T, // Combines ADD, MUL, SUB, DIV, SLL, ... flags.
pub binary_imm_op: T, // Combines ADDI, ADDIU, SLTI, SLTIU, LUI
pub eq_iszero: T, // Combines EQ and ISZERO flags.
pub logic_op: T, // Combines AND, OR, XOR, Nor flags.
pub logic_imm_op: T, // Combines ANDI, ORI, XORI flags.
pub movz_op: T,
pub movn_op: T,
pub clz_op: T,
pub clo_op: T,
pub shift: T, // Combines SHL and SHR flags.
pub shift_imm: T, // Combines SHL and SHR flags.
pub keccak_general: T,
pub jumps: T,
pub jumpi: T,
pub jumpdirect: T,
pub branch: T,
pub pc: T,
pub get_context: T,
pub set_context: T,
pub exit_kernel: T,
pub m_op_load: T,
pub m_op_store: T,
pub nop: T,
pub ext: T,
pub ins: T,
pub maddu: T,
pub rdhwr: T,
pub signext8: T,
pub signext16: T,
pub swaphalf: T,
pub teq: T,
pub ror: T,
pub syscall: T,
}
// `u8` is guaranteed to have a `size_of` of 1.
pub const NUM_OPS_COLUMNS: usize = size_of::<OpsColumnsView<u8>>();
impl<T: Copy> From<[T; NUM_OPS_COLUMNS]> for OpsColumnsView<T> {
fn from(value: [T; NUM_OPS_COLUMNS]) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> From<OpsColumnsView<T>> for [T; NUM_OPS_COLUMNS] {
fn from(value: OpsColumnsView<T>) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> Borrow<OpsColumnsView<T>> for [T; NUM_OPS_COLUMNS] {
fn borrow(&self) -> &OpsColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<OpsColumnsView<T>> for [T; NUM_OPS_COLUMNS] {
fn borrow_mut(&mut self) -> &mut OpsColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> Deref for OpsColumnsView<T> {
type Target = [T; NUM_OPS_COLUMNS];
fn deref(&self) -> &Self::Target {
unsafe { transmute(self) }
}
}
impl<T: Copy> DerefMut for OpsColumnsView<T> {
fn deref_mut(&mut self) -> &mut Self::Target {
unsafe { transmute(self) }
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/memory/columns.rs | prover/src/memory/columns.rs | //! Memory registers.
use crate::memory::VALUE_LIMBS;
// Columns for memory operations, ordered by (addr, timestamp).
/// 1 if this is an actual memory operation, or 0 if it's a padding row.
pub(crate) const FILTER: usize = 0;
pub(crate) const TIMESTAMP: usize = FILTER + 1;
pub(crate) const IS_READ: usize = TIMESTAMP + 1;
pub(crate) const ADDR_CONTEXT: usize = IS_READ + 1;
pub(crate) const ADDR_SEGMENT: usize = ADDR_CONTEXT + 1;
pub(crate) const ADDR_VIRTUAL: usize = ADDR_SEGMENT + 1;
// Eight 32-bit limbs hold a total of 256 bits.
// If a value represents an integer, it is little-endian encoded.
const VALUE_START: usize = ADDR_VIRTUAL + 1;
pub(crate) const fn value_limb(i: usize) -> usize {
debug_assert!(i < VALUE_LIMBS);
VALUE_START + i
}
// Flags to indicate whether this part of the address differs from the next row,
// and the previous parts do not differ.
// That is, e.g., `SEGMENT_FIRST_CHANGE` is `F::ONE` iff `ADDR_CONTEXT` is the same in this
// row and the next, but `ADDR_SEGMENT` is not.
pub(crate) const CONTEXT_FIRST_CHANGE: usize = VALUE_START + VALUE_LIMBS;
pub(crate) const SEGMENT_FIRST_CHANGE: usize = CONTEXT_FIRST_CHANGE + 1;
pub(crate) const VIRTUAL_FIRST_CHANGE: usize = SEGMENT_FIRST_CHANGE + 1;
// We use a range check to enforce the ordering.
pub(crate) const RANGE_CHECK: usize = VIRTUAL_FIRST_CHANGE + 1;
/// The counter column (used for the range check) starts from 0 and increments.
pub(crate) const COUNTER: usize = RANGE_CHECK + 1;
/// The frequencies column used in logUp.
pub(crate) const FREQUENCIES: usize = COUNTER + 1;
pub(crate) const NUM_COLUMNS: usize = FREQUENCIES + 1;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/memory/memory_stark.rs | prover/src/memory/memory_stark.rs | use std::marker::PhantomData;
use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::util::transpose;
use plonky2_maybe_rayon::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, Filter};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::lookup::Lookup;
use crate::memory::columns::{
value_limb, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL, CONTEXT_FIRST_CHANGE, COUNTER, FILTER,
FREQUENCIES, IS_READ, NUM_COLUMNS, RANGE_CHECK, SEGMENT_FIRST_CHANGE, TIMESTAMP,
VIRTUAL_FIRST_CHANGE,
};
use crate::memory::segments::Segment;
use crate::memory::VALUE_LIMBS;
use crate::stark::Stark;
use crate::witness::memory::MemoryOpKind::Read;
use crate::witness::memory::MemoryOpKind::Write;
use crate::witness::memory::{MemoryAddress, MemoryOp};
pub fn ctl_data<F: Field>() -> Vec<Column<F>> {
let mut res =
Column::singles([IS_READ, ADDR_CONTEXT, ADDR_SEGMENT, ADDR_VIRTUAL]).collect_vec();
res.extend(Column::singles((0..VALUE_LIMBS).map(value_limb)));
res.push(Column::single(TIMESTAMP));
res
}
pub fn ctl_filter<F: Field>() -> Filter<F> {
Filter::new_simple(Column::single(FILTER))
}
#[derive(Copy, Clone, Default)]
pub struct MemoryStark<F, const D: usize> {
pub(crate) f: PhantomData<F>,
}
impl MemoryOp {
/// Generate a row for a given memory operation. Note that this does not generate columns which
/// depend on the next operation, such as `CONTEXT_FIRST_CHANGE`; those are generated later.
/// It also does not generate columns such as `COUNTER`, which are generated later, after the
/// trace has been transposed into column-major form.
fn into_row<F: Field>(self) -> [F; NUM_COLUMNS] {
let mut row = [F::ZERO; NUM_COLUMNS];
row[FILTER] = F::from_bool(self.filter);
row[TIMESTAMP] = F::from_canonical_usize(self.timestamp);
row[IS_READ] = F::from_bool(self.kind == Read);
let MemoryAddress {
context,
segment,
virt,
} = self.address;
row[ADDR_CONTEXT] = F::from_canonical_usize(context);
row[ADDR_SEGMENT] = F::from_canonical_usize(segment);
row[ADDR_VIRTUAL] = F::from_canonical_usize(virt);
// The value written to R0 register should be ignored as 0
let value = if (self.kind == Write)
&& (context == 0)
&& (segment == (Segment::RegisterFile as usize))
&& (virt == 0)
{
0_u32
} else {
self.value
};
row[value_limb(0)] = F::from_canonical_u32(value);
/*
for j in 0..VALUE_LIMBS {
row[value_limb(j)] = F::from_canonical_u32(self.value >> (j * 32));
}
*/
row
}
}
/// Generates the `_FIRST_CHANGE` columns and the `RANGE_CHECK` column in the trace.
pub fn generate_first_change_flags_and_rc<F: RichField>(trace_rows: &mut [[F; NUM_COLUMNS]]) {
let num_ops = trace_rows.len();
for idx in 0..num_ops - 1 {
let row = trace_rows[idx].as_slice();
let next_row = trace_rows[idx + 1].as_slice();
let context = row[ADDR_CONTEXT];
let segment = row[ADDR_SEGMENT];
let virt = row[ADDR_VIRTUAL];
let timestamp = row[TIMESTAMP];
let next_context = next_row[ADDR_CONTEXT];
let next_segment = next_row[ADDR_SEGMENT];
let next_virt = next_row[ADDR_VIRTUAL];
let next_timestamp = next_row[TIMESTAMP];
let context_changed = context != next_context;
let segment_changed = segment != next_segment;
let virtual_changed = virt != next_virt;
let context_first_change = context_changed;
let segment_first_change = segment_changed && !context_first_change;
let virtual_first_change =
virtual_changed && !segment_first_change && !context_first_change;
let row = trace_rows[idx].as_mut_slice();
row[CONTEXT_FIRST_CHANGE] = F::from_bool(context_first_change);
row[SEGMENT_FIRST_CHANGE] = F::from_bool(segment_first_change);
row[VIRTUAL_FIRST_CHANGE] = F::from_bool(virtual_first_change);
row[RANGE_CHECK] = if context_first_change {
next_context - context - F::ONE
} else if segment_first_change {
next_segment - segment - F::ONE
} else if virtual_first_change {
next_virt - virt - F::ONE
} else {
next_timestamp - timestamp
};
assert!(
row[RANGE_CHECK].to_canonical_u64() < num_ops as u64,
"Range check of {} is too large. Bug in fill_gaps?",
row[RANGE_CHECK]
);
}
}
impl<F: RichField + Extendable<D>, const D: usize> MemoryStark<F, D> {
/// Generate most of the trace rows. Excludes a few columns like `COUNTER`, which are generated
/// later, after transposing to column-major form.
fn generate_trace_row_major(&self, memory_ops: &mut Vec<MemoryOp>) -> Vec<[F; NUM_COLUMNS]> {
// fill_gaps expects an ordered list of operations.
memory_ops.sort_by_key(MemoryOp::sorting_key);
Self::fill_gaps(memory_ops);
Self::pad_memory_ops(memory_ops);
// fill_gaps may have added operations at the end which break the order, so sort again.
memory_ops.sort_by_key(MemoryOp::sorting_key);
let mut trace_rows = memory_ops
.into_par_iter()
.map(|op| op.into_row())
.collect::<Vec<_>>();
generate_first_change_flags_and_rc(trace_rows.as_mut_slice());
trace_rows
}
/// Generates the `COUNTER`, `RANGE_CHECK` and `FREQUENCIES` columns, given a
/// trace in column-major form.
fn generate_trace_col_major(trace_col_vecs: &mut [Vec<F>]) {
let height = trace_col_vecs[0].len();
trace_col_vecs[COUNTER] = (0..height).map(|i| F::from_canonical_usize(i)).collect();
for i in 0..height {
let x = trace_col_vecs[RANGE_CHECK][i].to_canonical_u64() as usize;
trace_col_vecs[FREQUENCIES][x] += F::ONE;
}
}
/// This memory STARK orders rows by `(context, segment, virt, timestamp)`. To enforce the
/// ordering, it range checks the delta of the first field that changed.
///
/// This method adds some dummy operations to ensure that none of these range checks will be too
/// large, i.e. that they will all be smaller than the number of rows, allowing them to be
/// checked easily with a single lookup.
///
/// For example, say there are 32 memory operations, and a particular address is accessed at
/// timestamps 20 and 100. 80 would fail the range check, so this method would add two dummy
/// reads to the same address, say at timestamps 50 and 80.
fn fill_gaps(memory_ops: &mut Vec<MemoryOp>) {
let max_rc = memory_ops.len().next_power_of_two() - 1;
for (mut curr, next) in memory_ops.clone().into_iter().tuple_windows() {
if curr.address.context != next.address.context
|| curr.address.segment != next.address.segment
{
// We won't bother to check if there's a large context gap, because there can't be
// more than 500 contexts or so, as explained here:
// https://notes.ethereum.org/@vbuterin/proposals_to_adjust_memory_gas_costs
// Similarly, the number of possible segments is a small constant, so any gap must
// be small. max_rc will always be much larger, as just bootloading the kernel will
// trigger thousands of memory operations.
} else if curr.address.virt != next.address.virt {
while next.address.virt - curr.address.virt - 1 > max_rc {
let mut dummy_address = curr.address;
dummy_address.virt += max_rc + 1;
let dummy_read = MemoryOp::new_dummy_read(dummy_address, 0, 0);
memory_ops.push(dummy_read);
curr = dummy_read;
}
} else {
while next.timestamp - curr.timestamp > max_rc {
let dummy_read =
MemoryOp::new_dummy_read(curr.address, curr.timestamp + max_rc, curr.value);
memory_ops.push(dummy_read);
curr = dummy_read;
}
}
}
}
fn pad_memory_ops(memory_ops: &mut Vec<MemoryOp>) {
let last_op = *memory_ops.last().expect("No memory ops?");
// We essentially repeat the last operation until our operation list has the desired size,
// with a few changes:
// - We change its filter to 0 to indicate that this is a dummy operation.
// - We make sure it's a read, since dummy operations must be reads.
let padding_op = MemoryOp {
filter: false,
kind: Read,
..last_op
};
let num_ops = memory_ops.len();
let num_ops_padded = num_ops.next_power_of_two();
for _ in num_ops..num_ops_padded {
memory_ops.push(padding_op);
}
}
pub(crate) fn generate_trace(
&self,
memory_ops: &mut Vec<MemoryOp>,
) -> Vec<PolynomialValues<F>> {
// Generate most of the trace in row-major form.
let trace_rows = self.generate_trace_row_major(memory_ops);
let trace_row_vecs: Vec<_> = trace_rows.into_iter().map(|row| row.to_vec()).collect();
// Transpose to column-major form.
let mut trace_col_vecs = transpose(&trace_row_vecs);
// A few final generation steps, which work better in column-major form.
Self::generate_trace_col_major(&mut trace_col_vecs);
trace_col_vecs
.into_iter()
.map(|column| PolynomialValues::new(column))
.collect()
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for MemoryStark<F, D> {
type EvaluationFrame<FE, P, const D2: usize>
= StarkFrame<P, NUM_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_COLUMNS>;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let one = P::from(FE::ONE);
let local_values = vars.get_local_values();
let next_values = vars.get_next_values();
let timestamp = local_values[TIMESTAMP];
let addr_context = local_values[ADDR_CONTEXT];
let addr_segment = local_values[ADDR_SEGMENT];
let addr_virtual = local_values[ADDR_VIRTUAL];
let value_limbs: Vec<_> = (0..VALUE_LIMBS)
.map(|i| local_values[value_limb(i)])
.collect();
let next_timestamp = next_values[TIMESTAMP];
let next_is_read = next_values[IS_READ];
let next_addr_context = next_values[ADDR_CONTEXT];
let next_addr_segment = next_values[ADDR_SEGMENT];
let next_addr_virtual = next_values[ADDR_VIRTUAL];
let next_values_limbs: Vec<_> = (0..VALUE_LIMBS)
.map(|i| next_values[value_limb(i)])
.collect();
// The filter must be 0 or 1.
let filter = local_values[FILTER];
yield_constr.constraint(filter * (filter - P::ONES));
// If this is a dummy row (filter is off), it must be a read. This means the prover can
// insert reads which never appear in the CPU trace (which are harmless), but not writes.
// FIXME: 0 register also set filter false
/*
let is_dummy = P::ONES - filter;
let is_write = P::ONES - local_values[IS_READ];
yield_constr.constraint(is_dummy * is_write);
*/
let context_first_change = local_values[CONTEXT_FIRST_CHANGE];
let segment_first_change = local_values[SEGMENT_FIRST_CHANGE];
let virtual_first_change = local_values[VIRTUAL_FIRST_CHANGE];
let address_unchanged =
one - context_first_change - segment_first_change - virtual_first_change;
let range_check = local_values[RANGE_CHECK];
let not_context_first_change = one - context_first_change;
let not_segment_first_change = one - segment_first_change;
let not_virtual_first_change = one - virtual_first_change;
let not_address_unchanged = one - address_unchanged;
// First set of ordering constraint: first_change flags are boolean.
yield_constr.constraint(context_first_change * not_context_first_change);
yield_constr.constraint(segment_first_change * not_segment_first_change);
yield_constr.constraint(virtual_first_change * not_virtual_first_change);
yield_constr.constraint(address_unchanged * not_address_unchanged);
// Second set of ordering constraints: no change before the column corresponding to the nonzero first_change flag.
yield_constr
.constraint_transition(segment_first_change * (next_addr_context - addr_context));
yield_constr
.constraint_transition(virtual_first_change * (next_addr_context - addr_context));
yield_constr
.constraint_transition(virtual_first_change * (next_addr_segment - addr_segment));
yield_constr.constraint_transition(address_unchanged * (next_addr_context - addr_context));
yield_constr.constraint_transition(address_unchanged * (next_addr_segment - addr_segment));
yield_constr.constraint_transition(address_unchanged * (next_addr_virtual - addr_virtual));
// Third set of ordering constraints: range-check difference in the column that should be increasing.
let computed_range_check = context_first_change * (next_addr_context - addr_context - one)
+ segment_first_change * (next_addr_segment - addr_segment - one)
+ virtual_first_change * (next_addr_virtual - addr_virtual - one)
+ address_unchanged * (next_timestamp - timestamp);
yield_constr.constraint_transition(range_check - computed_range_check);
// Enumerate purportedly-ordered log.
for i in 0..VALUE_LIMBS {
yield_constr.constraint_transition(
next_is_read * address_unchanged * (next_values_limbs[i] - value_limbs[i]),
);
}
}
fn eval_ext_circuit(
&self,
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let one = builder.one_extension();
let local_values = vars.get_local_values();
let next_values = vars.get_next_values();
let addr_context = local_values[ADDR_CONTEXT];
let addr_segment = local_values[ADDR_SEGMENT];
let addr_virtual = local_values[ADDR_VIRTUAL];
let value_limbs: Vec<_> = (0..VALUE_LIMBS)
.map(|i| local_values[value_limb(i)])
.collect();
let timestamp = local_values[TIMESTAMP];
let next_addr_context = next_values[ADDR_CONTEXT];
let next_addr_segment = next_values[ADDR_SEGMENT];
let next_addr_virtual = next_values[ADDR_VIRTUAL];
let next_values_limbs: Vec<_> = (0..VALUE_LIMBS)
.map(|i| next_values[value_limb(i)])
.collect();
let next_is_read = next_values[IS_READ];
let next_timestamp = next_values[TIMESTAMP];
// The filter must be 0 or 1.
let filter = local_values[FILTER];
let constraint = builder.mul_sub_extension(filter, filter, filter);
yield_constr.constraint(builder, constraint);
// If this is a dummy row (filter is off), it must be a read. This means the prover can
// insert reads which never appear in the CPU trace (which are harmless), but not writes.
// FIXME: 0 register also set filter false
/*
let is_dummy = builder.sub_extension(one, filter);
let is_write = builder.sub_extension(one, local_values[IS_READ]);
let is_dummy_write = builder.mul_extension(is_dummy, is_write);
yield_constr.constraint(builder, is_dummy_write);
*/
let context_first_change = local_values[CONTEXT_FIRST_CHANGE];
let segment_first_change = local_values[SEGMENT_FIRST_CHANGE];
let virtual_first_change = local_values[VIRTUAL_FIRST_CHANGE];
let address_unchanged = {
let mut cur = builder.sub_extension(one, context_first_change);
cur = builder.sub_extension(cur, segment_first_change);
builder.sub_extension(cur, virtual_first_change)
};
let range_check = local_values[RANGE_CHECK];
let not_context_first_change = builder.sub_extension(one, context_first_change);
let not_segment_first_change = builder.sub_extension(one, segment_first_change);
let not_virtual_first_change = builder.sub_extension(one, virtual_first_change);
let not_address_unchanged = builder.sub_extension(one, address_unchanged);
let addr_context_diff = builder.sub_extension(next_addr_context, addr_context);
let addr_segment_diff = builder.sub_extension(next_addr_segment, addr_segment);
let addr_virtual_diff = builder.sub_extension(next_addr_virtual, addr_virtual);
// First set of ordering constraint: traces are boolean.
let context_first_change_bool =
builder.mul_extension(context_first_change, not_context_first_change);
yield_constr.constraint(builder, context_first_change_bool);
let segment_first_change_bool =
builder.mul_extension(segment_first_change, not_segment_first_change);
yield_constr.constraint(builder, segment_first_change_bool);
let virtual_first_change_bool =
builder.mul_extension(virtual_first_change, not_virtual_first_change);
yield_constr.constraint(builder, virtual_first_change_bool);
let address_unchanged_bool =
builder.mul_extension(address_unchanged, not_address_unchanged);
yield_constr.constraint(builder, address_unchanged_bool);
// Second set of ordering constraints: no change before the column corresponding to the nonzero first_change flag.
let segment_first_change_check =
builder.mul_extension(segment_first_change, addr_context_diff);
yield_constr.constraint_transition(builder, segment_first_change_check);
let virtual_first_change_check_1 =
builder.mul_extension(virtual_first_change, addr_context_diff);
yield_constr.constraint_transition(builder, virtual_first_change_check_1);
let virtual_first_change_check_2 =
builder.mul_extension(virtual_first_change, addr_segment_diff);
yield_constr.constraint_transition(builder, virtual_first_change_check_2);
let address_unchanged_check_1 = builder.mul_extension(address_unchanged, addr_context_diff);
yield_constr.constraint_transition(builder, address_unchanged_check_1);
let address_unchanged_check_2 = builder.mul_extension(address_unchanged, addr_segment_diff);
yield_constr.constraint_transition(builder, address_unchanged_check_2);
let address_unchanged_check_3 = builder.mul_extension(address_unchanged, addr_virtual_diff);
yield_constr.constraint_transition(builder, address_unchanged_check_3);
// Third set of ordering constraints: range-check difference in the column that should be increasing.
let context_diff = {
let diff = builder.sub_extension(next_addr_context, addr_context);
builder.sub_extension(diff, one)
};
let segment_diff = {
let diff = builder.sub_extension(next_addr_segment, addr_segment);
builder.sub_extension(diff, one)
};
let segment_range_check = builder.mul_extension(segment_first_change, segment_diff);
let virtual_diff = {
let diff = builder.sub_extension(next_addr_virtual, addr_virtual);
builder.sub_extension(diff, one)
};
let virtual_range_check = builder.mul_extension(virtual_first_change, virtual_diff);
let timestamp_diff = builder.sub_extension(next_timestamp, timestamp);
let timestamp_range_check = builder.mul_extension(address_unchanged, timestamp_diff);
let computed_range_check = {
// context_range_check = context_first_change * context_diff
let mut sum =
builder.mul_add_extension(context_first_change, context_diff, segment_range_check);
sum = builder.add_extension(sum, virtual_range_check);
builder.add_extension(sum, timestamp_range_check)
};
let range_check_diff = builder.sub_extension(range_check, computed_range_check);
yield_constr.constraint_transition(builder, range_check_diff);
// Enumerate purportedly-ordered log.
for i in 0..VALUE_LIMBS {
let value_diff = builder.sub_extension(next_values_limbs[i], value_limbs[i]);
let zero_if_read = builder.mul_extension(address_unchanged, value_diff);
let read_constraint = builder.mul_extension(next_is_read, zero_if_read);
yield_constr.constraint_transition(builder, read_constraint);
}
}
fn constraint_degree(&self) -> usize {
3
}
fn lookups(&self) -> Vec<Lookup<F>> {
vec![Lookup {
columns: vec![Column::single(RANGE_CHECK)],
table_column: Column::single(COUNTER),
frequencies_column: Column::single(FREQUENCIES),
filter_columns: vec![None],
}]
}
}
#[cfg(test)]
pub(crate) mod tests {
use anyhow::Result;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use crate::memory::memory_stark::MemoryStark;
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
#[test]
fn test_stark_degree() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = MemoryStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_low_degree(stark)
}
#[test]
fn test_stark_circuit() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = MemoryStark<F, D>;
let stark = S {
f: Default::default(),
};
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/memory/segments.rs | prover/src/memory/segments.rs | /// Use segment for possible register reduce optimization
#[allow(dead_code)]
#[derive(Copy, Clone, Eq, PartialEq, Hash, Ord, PartialOrd, Debug)]
pub enum Segment {
/// Code
Code = 0,
/// General purpose kernel memory, used by various kernel functions.
/// In general, calling a helper function can result in this memory being clobbered.
KernelGeneral = 1,
/// Another segment for general purpose kernel use.
KernelGeneral2 = 2,
/// instructions; initialised by `kernel/asm/shift.asm::init_shift_table()`.
ShiftTable = 3,
// Register file
RegisterFile = 4,
}
impl Segment {
pub(crate) const COUNT: usize = 5;
pub(crate) fn all() -> [Self; Self::COUNT] {
[
Self::Code,
Self::KernelGeneral,
Self::KernelGeneral2,
Self::ShiftTable,
Self::RegisterFile,
]
}
/// The variable name that gets passed into kernel assembly code.
pub(crate) fn var_name(&self) -> &'static str {
match self {
Segment::Code => "SEGMENT_CODE",
Segment::KernelGeneral => "SEGMENT_KERNEL_GENERAL",
Segment::KernelGeneral2 => "SEGMENT_KERNEL_GENERAL_2",
Segment::ShiftTable => "SEGMENT_SHIFT_TABLE",
Segment::RegisterFile => "SEGMENT_REGISTERFILE_TABLE",
}
}
#[allow(dead_code)]
pub(crate) fn bit_range(&self) -> usize {
match self {
Segment::Code => 32,
Segment::KernelGeneral => 256,
Segment::KernelGeneral2 => 256,
Segment::ShiftTable => 32,
Segment::RegisterFile => 32,
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/memory/mod.rs | prover/src/memory/mod.rs | pub mod columns;
pub mod memory_stark;
pub mod segments;
// TODO: Move to CPU module, now that channels have been removed from the memory table.
pub(crate) const NUM_CHANNELS: usize = crate::cpu::membus::NUM_CHANNELS;
pub(crate) const VALUE_LIMBS: usize = 1;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_compress/columns.rs | prover/src/sha_compress/columns.rs | use crate::sha_compress::not_operation::NotOperation;
use crate::sha_compress::wrapping_add_2::WrappingAdd2Op;
use crate::sha_compress::wrapping_add_5::WrappingAdd5Op;
use crate::sha_extend::rotate_right::RotateRightOp;
use crate::util::{indices_arr, transmute_no_compile_time_size_checks};
use std::borrow::{Borrow, BorrowMut};
use std::mem::transmute;
pub(crate) struct ShaCompressColumnsView<T: Copy> {
/// a,b,c,d,e,f,g,h in le bytes form
pub state: [T; 32],
pub e_not: NotOperation<T>,
/// w[i] and key[i]
pub w_i: [T; 4],
pub k_i: [T; 4],
/// Intermediate values
pub s_1_inter: [T; 4],
pub s_1: [T; 4],
pub e_and_f: [T; 4],
pub e_not_and_g: [T; 4],
pub ch: [T; 4],
pub s_0_inter: [T; 4],
pub s_0: [T; 4],
pub a_and_b: [T; 4],
pub a_and_c: [T; 4],
pub b_and_c: [T; 4],
pub maj_inter: [T; 4],
pub maj: [T; 4],
pub e_rr_6: RotateRightOp<T>,
pub e_rr_11: RotateRightOp<T>,
pub e_rr_25: RotateRightOp<T>,
pub a_rr_2: RotateRightOp<T>,
pub a_rr_13: RotateRightOp<T>,
pub a_rr_22: RotateRightOp<T>,
pub temp2: WrappingAdd2Op<T>,
pub d_add_temp1: WrappingAdd2Op<T>,
pub temp1_add_temp2: WrappingAdd2Op<T>,
// The timestamp at which inputs should be read from memory.
pub timestamp: T,
pub segment: T,
pub context: T,
pub w_i_virt: T,
pub temp1: WrappingAdd5Op<T>,
// round number
pub round: [T; 65],
}
pub const NUM_SHA_COMPRESS_COLUMNS: usize = size_of::<ShaCompressColumnsView<u8>>();
impl<T: Copy> From<[T; NUM_SHA_COMPRESS_COLUMNS]> for ShaCompressColumnsView<T> {
fn from(value: [T; NUM_SHA_COMPRESS_COLUMNS]) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> From<ShaCompressColumnsView<T>> for [T; NUM_SHA_COMPRESS_COLUMNS] {
fn from(value: ShaCompressColumnsView<T>) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> Borrow<ShaCompressColumnsView<T>> for [T; NUM_SHA_COMPRESS_COLUMNS] {
fn borrow(&self) -> &ShaCompressColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<ShaCompressColumnsView<T>> for [T; NUM_SHA_COMPRESS_COLUMNS] {
fn borrow_mut(&mut self) -> &mut ShaCompressColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> Borrow<[T; NUM_SHA_COMPRESS_COLUMNS]> for ShaCompressColumnsView<T> {
fn borrow(&self) -> &[T; NUM_SHA_COMPRESS_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<[T; NUM_SHA_COMPRESS_COLUMNS]> for ShaCompressColumnsView<T> {
fn borrow_mut(&mut self) -> &mut [T; NUM_SHA_COMPRESS_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy + Default> Default for ShaCompressColumnsView<T> {
fn default() -> Self {
[T::default(); NUM_SHA_COMPRESS_COLUMNS].into()
}
}
const fn make_col_map() -> ShaCompressColumnsView<usize> {
let indices_arr = indices_arr::<NUM_SHA_COMPRESS_COLUMNS>();
unsafe {
transmute::<[usize; NUM_SHA_COMPRESS_COLUMNS], ShaCompressColumnsView<usize>>(indices_arr)
}
}
pub(crate) const SHA_COMPRESS_COL_MAP: ShaCompressColumnsView<usize> = make_col_map();
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_compress/wrapping_add_2.rs | prover/src/sha_compress/wrapping_add_2.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
pub struct WrappingAdd2Op<T> {
/// The result of `a + b`.
pub value: [T; 4],
/// The carry. Each digit is carry == 0, 1.
pub carry: [T; 2],
}
impl<F: Field> WrappingAdd2Op<F> {
pub fn generate_trace(&mut self, a: u32, b: u32) -> u32 {
let expected = a.wrapping_add(b);
let overflowed_result = a as u64 + b as u64;
let carry = overflowed_result >> 32;
assert_eq!(carry * 2_u64.pow(32) + expected as u64, overflowed_result);
assert!(carry < 2);
self.carry = [F::ZERO; 2];
self.carry[carry as usize] = F::ONE;
self.value = expected.to_le_bytes().map(F::from_canonical_u8);
expected
}
}
pub(crate) fn wrapping_add_2_packed_constraints<P: PackedField>(
a: [P; 4],
b: [P; 4],
cols: &WrappingAdd2Op<P>,
) -> Vec<P> {
let mut result = vec![];
let two_pow_8 = P::from(P::Scalar::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = P::from(P::Scalar::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = P::from(P::Scalar::from_canonical_u32(2u32.pow(24)));
let two_pow_32 = P::from(P::Scalar::from_canonical_u64(2u64.pow(32)));
let wrapping_added_result = cols.value[0]
+ two_pow_8 * cols.value[1]
+ two_pow_16 * cols.value[2]
+ two_pow_24 * cols.value[3];
// Each value in carry_{0,1} is 0 or 1, and exactly one of them is 1 per digit.
for i in 0..2 {
result.push(cols.carry[i] * (P::ONES - cols.carry[i]));
}
result.push(cols.carry[0] + cols.carry[1] - P::ONES);
// Calculates carry from carry_{0,1}.
let carry = cols.carry[1];
// Wrapping added constraint
let overflowed_result = (a[0] + b[0])
+ (a[1] + b[1]) * two_pow_8
+ (a[2] + b[2]) * two_pow_16
+ (a[3] + b[3]) * two_pow_24;
let constraint = overflowed_result - carry * two_pow_32 - wrapping_added_result;
result.push(constraint);
result
}
pub(crate) fn wrapping_add_2_ext_circuit_constraints<
F: RichField + Extendable<D>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; 4],
b: [ExtensionTarget<D>; 4],
cols: &WrappingAdd2Op<ExtensionTarget<D>>,
) -> Vec<ExtensionTarget<D>> {
let mut result = vec![];
let one = builder.one_extension();
let two_pow_8 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(24)));
let two_pow_32 = builder.constant_extension(F::Extension::from_canonical_u64(2u64.pow(32)));
let tmp = builder.mul_extension(cols.value[1], two_pow_8);
let tmp2 = builder.mul_extension(cols.value[2], two_pow_16);
let tmp3 = builder.mul_extension(cols.value[3], two_pow_24);
let wrapping_added_result = builder.add_many_extension([cols.value[0], tmp, tmp2, tmp3]);
// Each value in carry_{0,1} is 0 or 1, and exactly one of them is 1 per digit.
for i in 0..2 {
let tmp = builder.sub_extension(one, cols.carry[i]);
result.push(builder.mul_extension(cols.carry[i], tmp));
}
let tmp = builder.add_many_extension(cols.carry);
result.push(builder.sub_extension(tmp, one));
// Calculates carry from carry_{0,1}.
let carry = cols.carry[1];
// Wrapping added constraint
let byte_0 = builder.add_many_extension([a[0], b[0]]);
let byte_1 = builder.add_many_extension([a[1], b[1]]);
let byte_2 = builder.add_many_extension([a[2], b[2]]);
let byte_3 = builder.add_many_extension([a[3], b[3]]);
let tmp1 = builder.mul_extension(byte_1, two_pow_8);
let tmp2 = builder.mul_extension(byte_2, two_pow_16);
let tmp3 = builder.mul_extension(byte_3, two_pow_24);
let overflowed_result = builder.add_many_extension([byte_0, tmp1, tmp2, tmp3]);
let carry_mul = builder.mul_extension(carry, two_pow_32);
let computed_overflowed_result = builder.add_extension(carry_mul, wrapping_added_result);
let constraint = builder.sub_extension(overflowed_result, computed_overflowed_result);
result.push(constraint);
result
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_compress/sha_compress_stark.rs | prover/src/sha_compress/sha_compress_stark.rs | use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, Filter};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::sha_compress::columns::{
ShaCompressColumnsView, NUM_SHA_COMPRESS_COLUMNS, SHA_COMPRESS_COL_MAP,
};
use crate::sha_compress::logic::{equal_ext_circuit_constraints, equal_packed_constraint};
use crate::sha_compress::not_operation::{
not_operation_ext_circuit_constraints, not_operation_packed_constraints,
};
use crate::sha_compress::wrapping_add_2::{
wrapping_add_2_ext_circuit_constraints, wrapping_add_2_packed_constraints,
};
use crate::sha_compress::wrapping_add_5::{
wrapping_add_5_ext_circuit_constraints, wrapping_add_5_packed_constraints,
};
use crate::sha_compress_sponge::constants::{NUM_COMPRESS_ROWS, SHA_COMPRESS_K_LE_BYTES};
use crate::sha_extend::logic::get_input_range_4;
use crate::sha_extend::rotate_right::{
rotate_right_ext_circuit_constraint, rotate_right_packed_constraints,
};
use crate::stark::Stark;
use crate::util::trace_rows_to_poly_values;
use crate::witness::memory::MemoryAddress;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use std::borrow::Borrow;
use std::marker::PhantomData;
pub const NUM_ROUND_CONSTANTS: usize = 65;
pub const NUM_INPUTS: usize = 10 * 4 + 1; // (states (a, b, ..., h) + w_i + key_i) + i
pub fn ctl_data_inputs<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res: Vec<_> = Column::singles([cols.state.as_slice()].concat()).collect();
res.extend(Column::singles([
cols.timestamp,
cols.segment,
cols.context,
cols.w_i_virt,
]));
res
}
pub fn ctl_data_outputs<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res: Vec<_> = Column::singles(&cols.state).collect();
res.push(Column::single(cols.timestamp));
res
}
// logic
pub(crate) fn ctl_s_1_inter_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
res.push(Column::le_bytes(cols.e_rr_6.value));
res.push(Column::le_bytes(cols.e_rr_11.value));
res.push(Column::le_bytes(cols.s_1_inter));
res
}
pub(crate) fn ctl_s_1_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
res.push(Column::le_bytes(cols.s_1_inter));
res.push(Column::le_bytes(cols.e_rr_25.value));
res.push(Column::le_bytes(cols.s_1));
res
}
pub(crate) fn ctl_e_and_f_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100100 * (1 << 6))), // is_and
];
res.push(Column::le_bytes(&cols.state[get_input_range_4(4)]));
res.push(Column::le_bytes(&cols.state[get_input_range_4(5)]));
res.push(Column::le_bytes(cols.e_and_f));
res
}
pub(crate) fn ctl_not_e_and_g_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100100 * (1 << 6))), // is_and
];
res.push(Column::le_bytes(cols.e_not.value));
res.push(Column::le_bytes(&cols.state[get_input_range_4(6)]));
res.push(Column::le_bytes(cols.e_not_and_g));
res
}
pub(crate) fn ctl_ch_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
res.push(Column::le_bytes(cols.e_and_f));
res.push(Column::le_bytes(cols.e_not_and_g));
res.push(Column::le_bytes(cols.ch));
res
}
pub(crate) fn ctl_s_0_inter_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
res.push(Column::le_bytes(cols.a_rr_2.value));
res.push(Column::le_bytes(cols.a_rr_13.value));
res.push(Column::le_bytes(cols.s_0_inter));
res
}
pub(crate) fn ctl_s_0_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
res.push(Column::le_bytes(cols.s_0_inter));
res.push(Column::le_bytes(cols.a_rr_22.value));
res.push(Column::le_bytes(cols.s_0));
res
}
pub(crate) fn ctl_a_and_b_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100100 * (1 << 6))), // is_and
];
res.push(Column::le_bytes(&cols.state[get_input_range_4(0)]));
res.push(Column::le_bytes(&cols.state[get_input_range_4(1)]));
res.push(Column::le_bytes(cols.a_and_b));
res
}
pub(crate) fn ctl_a_and_c_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100100 * (1 << 6))), // is_and
];
res.push(Column::le_bytes(&cols.state[get_input_range_4(0)]));
res.push(Column::le_bytes(&cols.state[get_input_range_4(2)]));
res.push(Column::le_bytes(cols.a_and_c));
res
}
pub(crate) fn ctl_b_and_c_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100100 * (1 << 6))), // is_and
];
res.push(Column::le_bytes(&cols.state[get_input_range_4(1)]));
res.push(Column::le_bytes(&cols.state[get_input_range_4(2)]));
res.push(Column::le_bytes(cols.b_and_c));
res
}
pub(crate) fn ctl_maj_inter_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
res.push(Column::le_bytes(cols.a_and_b));
res.push(Column::le_bytes(cols.a_and_c));
res.push(Column::le_bytes(cols.maj_inter));
res
}
pub(crate) fn ctl_maj_looking_logic<F: Field>() -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![
Column::constant(F::from_canonical_u32(0b100110 * (1 << 6))), // is_xor
];
res.push(Column::le_bytes(cols.maj_inter));
res.push(Column::le_bytes(cols.b_and_c));
res.push(Column::le_bytes(cols.maj));
res
}
// read w_i ctl
pub(crate) fn ctl_looking_memory<F: Field>(_: usize) -> Vec<Column<F>> {
let cols = SHA_COMPRESS_COL_MAP;
let mut res = vec![Column::constant(F::ONE)]; // is_read
res.extend(Column::singles([cols.context, cols.segment]));
res.push(Column::single(cols.w_i_virt));
// le_bit.reverse();
let u32_value: Column<F> = Column::le_bytes(cols.w_i);
res.push(u32_value);
res.push(Column::single(cols.timestamp));
assert_eq!(
res.len(),
crate::memory::memory_stark::ctl_data::<F>().len()
);
res
}
pub fn ctl_filter_inputs<F: Field>() -> Filter<F> {
let cols = SHA_COMPRESS_COL_MAP;
// The first row only
Filter::new_simple(Column::single(cols.round[0]))
}
pub fn ctl_filter_outputs<F: Field>() -> Filter<F> {
let cols = SHA_COMPRESS_COL_MAP;
// the final round
Filter::new_simple(Column::single(cols.round[NUM_COMPRESS_ROWS - 1]))
}
pub fn ctl_logic_filter<F: Field>() -> Filter<F> {
let cols = SHA_COMPRESS_COL_MAP;
// not the padding rows.
Filter::new_simple(Column::sum(&cols.round[..NUM_COMPRESS_ROWS - 1]))
}
#[derive(Copy, Clone, Default)]
pub struct ShaCompressStark<F, const D: usize> {
pub(crate) f: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> ShaCompressStark<F, D> {
pub(crate) fn generate_trace(
&self,
inputs: Vec<([u8; NUM_INPUTS], MemoryAddress, usize)>,
min_rows: usize,
) -> Vec<PolynomialValues<F>> {
// Generate the witness row-wise
let trace_rows = self.generate_trace_rows(inputs, min_rows);
trace_rows_to_poly_values(trace_rows)
}
fn generate_trace_rows(
&self,
inputs_and_timestamps: Vec<([u8; NUM_INPUTS], MemoryAddress, usize)>,
min_rows: usize,
) -> Vec<[F; NUM_SHA_COMPRESS_COLUMNS]> {
let num_rows = inputs_and_timestamps
.len()
.max(min_rows)
.next_power_of_two();
let mut rows = Vec::with_capacity(num_rows);
for input_and_timestamp in inputs_and_timestamps.iter() {
let row_for_compress = self.generate_trace_rows_for_compress(*input_and_timestamp);
rows.push(row_for_compress);
}
while rows.len() < num_rows {
rows.push([F::ZERO; NUM_SHA_COMPRESS_COLUMNS]);
}
rows
}
fn generate_trace_rows_for_compress(
&self,
input_and_timestamp: ([u8; NUM_INPUTS], MemoryAddress, usize),
) -> [F; NUM_SHA_COMPRESS_COLUMNS] {
let timestamp = input_and_timestamp.2;
let w_i_address = input_and_timestamp.1;
let inputs = input_and_timestamp.0;
let mut row = ShaCompressColumnsView::<F>::default();
row.timestamp = F::from_canonical_usize(timestamp);
row.segment = F::from_canonical_usize(w_i_address.segment);
row.context = F::from_canonical_usize(w_i_address.context);
row.w_i_virt = F::from_canonical_usize(w_i_address.virt);
let i = inputs[40] as usize;
row.round = [F::ZERO; NUM_ROUND_CONSTANTS];
row.round[i] = F::ONE;
// read inputs
row.state = inputs[0..32]
.iter()
.map(|x| F::from_canonical_u8(*x))
.collect::<Vec<F>>()
.try_into()
.unwrap();
row.w_i = inputs[get_input_range_4(8)]
.iter()
.map(|x| F::from_canonical_u8(*x))
.collect::<Vec<F>>()
.try_into()
.unwrap();
row.k_i = inputs[get_input_range_4(9)]
.iter()
.map(|x| F::from_canonical_u8(*x))
.collect::<Vec<F>>()
.try_into()
.unwrap();
// compute
let e_rr_6 = row
.e_rr_6
.generate_trace(inputs[get_input_range_4(4)].try_into().unwrap(), 6);
let e_rr_11 = row
.e_rr_11
.generate_trace(inputs[get_input_range_4(4)].try_into().unwrap(), 11);
let e_rr_25 = row
.e_rr_25
.generate_trace(inputs[get_input_range_4(4)].try_into().unwrap(), 25);
let s_1_inter = e_rr_6 ^ e_rr_11;
// log::info!("GENE: e_rr_6: {:?}, e_rr_11: {:?}, s_1_inter {:?}", e_rr_6, e_rr_11, s_1_inter);
row.s_1_inter = s_1_inter.to_le_bytes().map(F::from_canonical_u8);
let s_1 = s_1_inter ^ e_rr_25;
row.s_1 = s_1.to_le_bytes().map(F::from_canonical_u8);
let e = u32::from_le_bytes(inputs[get_input_range_4(4)].try_into().unwrap());
let f = u32::from_le_bytes(inputs[get_input_range_4(5)].try_into().unwrap());
let e_and_f = e & f;
row.e_and_f = e_and_f.to_le_bytes().map(F::from_canonical_u8);
let e_not = row
.e_not
.generate_trace(inputs[get_input_range_4(4)].try_into().unwrap());
let g = u32::from_le_bytes(inputs[get_input_range_4(6)].try_into().unwrap());
let e_not_and_g = e_not & g;
row.e_not_and_g = e_not_and_g.to_le_bytes().map(F::from_canonical_u8);
let ch = e_and_f ^ e_not_and_g;
row.ch = ch.to_le_bytes().map(F::from_canonical_u8);
let temp1 = row.temp1.generate_trace(
u32::from_le_bytes(inputs[get_input_range_4(7)].try_into().unwrap()),
s_1,
ch,
u32::from_le_bytes(inputs[get_input_range_4(9)].try_into().unwrap()),
u32::from_le_bytes(inputs[get_input_range_4(8)].try_into().unwrap()),
);
let a_rr_2 = row
.a_rr_2
.generate_trace(inputs[get_input_range_4(0)].try_into().unwrap(), 2);
let a_rr_13 = row
.a_rr_13
.generate_trace(inputs[get_input_range_4(0)].try_into().unwrap(), 13);
let a_rr_22 = row
.a_rr_22
.generate_trace(inputs[get_input_range_4(0)].try_into().unwrap(), 22);
let s_0_inter = a_rr_2 ^ a_rr_13;
let s_0 = s_0_inter ^ a_rr_22;
row.s_0_inter = s_0_inter.to_le_bytes().map(F::from_canonical_u8);
row.s_0 = s_0.to_le_bytes().map(F::from_canonical_u8);
let a = u32::from_le_bytes(inputs[get_input_range_4(0)].try_into().unwrap());
let b = u32::from_le_bytes(inputs[get_input_range_4(1)].try_into().unwrap());
let c = u32::from_le_bytes(inputs[get_input_range_4(2)].try_into().unwrap());
let a_and_b = a & b;
row.a_and_b = a_and_b.to_le_bytes().map(F::from_canonical_u8);
let a_and_c = a & c;
row.a_and_c = a_and_c.to_le_bytes().map(F::from_canonical_u8);
let b_and_c = b & c;
row.b_and_c = b_and_c.to_le_bytes().map(F::from_canonical_u8);
let maj_inter = a_and_b ^ a_and_c;
let maj = maj_inter ^ b_and_c;
row.maj_inter = maj_inter.to_le_bytes().map(F::from_canonical_u8);
row.maj = maj.to_le_bytes().map(F::from_canonical_u8);
let temp2 = row.temp2.generate_trace(s_0, maj);
// next value of e
let _ = row.d_add_temp1.generate_trace(
u32::from_le_bytes(inputs[get_input_range_4(3)].try_into().unwrap()),
temp1,
);
// next value of a
let _ = row.temp1_add_temp2.generate_trace(temp1, temp2);
row.into()
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for ShaCompressStark<F, D> {
type EvaluationFrame<FE, P, const D2: usize>
= StarkFrame<P, NUM_SHA_COMPRESS_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_SHA_COMPRESS_COLUMNS>;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let local_values: &[P; NUM_SHA_COMPRESS_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &ShaCompressColumnsView<P> = local_values.borrow();
let next_values: &[P; NUM_SHA_COMPRESS_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &ShaCompressColumnsView<P> = next_values.borrow();
// filter
let is_final = local_values.round[NUM_COMPRESS_ROWS - 1];
yield_constr.constraint(is_final * (is_final - P::ONES));
let not_final = P::ONES - is_final;
let sum_round_flags = (0..NUM_COMPRESS_ROWS)
.map(|i| local_values.round[i])
.sum::<P>();
yield_constr.constraint(sum_round_flags * (sum_round_flags - P::ONES));
// check the value of k_i
for i in 0..4 {
let mut bit_i = P::ZEROS;
for j in 0..64 {
bit_i +=
local_values.round[j] * FE::from_canonical_u8(SHA_COMPRESS_K_LE_BYTES[j][i])
}
let diff = local_values.k_i[i] - bit_i;
yield_constr.constraint(sum_round_flags * not_final * diff);
}
// check the rotation
rotate_right_packed_constraints(
local_values.state[get_input_range_4(4)].try_into().unwrap(),
&local_values.e_rr_6,
6,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
rotate_right_packed_constraints(
local_values.state[get_input_range_4(4)].try_into().unwrap(),
&local_values.e_rr_11,
11,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
rotate_right_packed_constraints(
local_values.state[get_input_range_4(4)].try_into().unwrap(),
&local_values.e_rr_25,
25,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
rotate_right_packed_constraints(
local_values.state[get_input_range_4(0)].try_into().unwrap(),
&local_values.a_rr_2,
2,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
rotate_right_packed_constraints(
local_values.state[get_input_range_4(0)].try_into().unwrap(),
&local_values.a_rr_13,
13,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
rotate_right_packed_constraints(
local_values.state[get_input_range_4(0)].try_into().unwrap(),
&local_values.a_rr_22,
22,
)
.into_iter()
.for_each(|c| yield_constr.constraint(c));
// The XOR, AND checks are in the logic table
// The NOT check
not_operation_packed_constraints(
local_values.state[get_input_range_4(4)].try_into().unwrap(),
&local_values.e_not,
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * c));
// wrapping add constraints
wrapping_add_5_packed_constraints(
local_values.state[get_input_range_4(7)].try_into().unwrap(),
local_values.s_1,
local_values.ch,
local_values.k_i,
local_values.w_i,
&local_values.temp1,
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * c));
wrapping_add_2_packed_constraints(local_values.s_0, local_values.maj, &local_values.temp2)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * c));
wrapping_add_2_packed_constraints(
local_values.state[get_input_range_4(3)].try_into().unwrap(),
local_values.temp1.value,
&local_values.d_add_temp1,
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * c));
wrapping_add_2_packed_constraints(
local_values.temp1.value,
local_values.temp2.value,
&local_values.temp1_add_temp2,
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * c));
// If this is not the final step or a padding row:
// the local and next timestamps must match.
yield_constr.constraint(
sum_round_flags * not_final * (next_values.timestamp - local_values.timestamp),
);
// the address of w_i must be increased by 4
yield_constr.constraint(
sum_round_flags
* not_final
* (next_values.w_i_virt - local_values.w_i_virt - FE::from_canonical_u8(4)),
);
// Output constraint when it is not the final round or padding row
// local.temp1 + local.temp2 = next.a
equal_packed_constraint::<P, 4>(
local_values.temp1_add_temp2.value,
next_values.state[get_input_range_4(0)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * not_final * c));
// local.a = next.b
equal_packed_constraint::<P, 4>(
local_values.state[get_input_range_4(0)].try_into().unwrap(),
next_values.state[get_input_range_4(1)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * not_final * c));
// local.b = next.c
equal_packed_constraint::<P, 4>(
local_values.state[get_input_range_4(1)].try_into().unwrap(),
next_values.state[get_input_range_4(2)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * not_final * c));
// local.c = next.d
equal_packed_constraint::<P, 4>(
local_values.state[get_input_range_4(2)].try_into().unwrap(),
next_values.state[get_input_range_4(3)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * not_final * c));
// local.d + local.temp1 = next.e
equal_packed_constraint::<P, 4>(
local_values.d_add_temp1.value,
next_values.state[get_input_range_4(4)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * not_final * c));
// local.e = next.f
equal_packed_constraint::<P, 4>(
local_values.state[get_input_range_4(4)].try_into().unwrap(),
next_values.state[get_input_range_4(5)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * not_final * c));
// local.f = next.g
equal_packed_constraint::<P, 4>(
local_values.state[get_input_range_4(5)].try_into().unwrap(),
next_values.state[get_input_range_4(6)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * not_final * c));
// local.g = next.h
equal_packed_constraint::<P, 4>(
local_values.state[get_input_range_4(6)].try_into().unwrap(),
next_values.state[get_input_range_4(7)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| yield_constr.constraint(sum_round_flags * not_final * c));
}
fn eval_ext_circuit(
&self,
builder: &mut CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let local_values: &[ExtensionTarget<D>; NUM_SHA_COMPRESS_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &ShaCompressColumnsView<ExtensionTarget<D>> = local_values.borrow();
let next_values: &[ExtensionTarget<D>; NUM_SHA_COMPRESS_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &ShaCompressColumnsView<ExtensionTarget<D>> = next_values.borrow();
let one_extension = builder.one_extension();
// filter
let is_final = local_values.round[NUM_COMPRESS_ROWS - 1];
let constraint = builder.mul_sub_extension(is_final, is_final, is_final);
yield_constr.constraint(builder, constraint);
let not_final = builder.sub_extension(one_extension, is_final);
let sum_round_flags =
builder.add_many_extension((0..NUM_COMPRESS_ROWS).map(|i| local_values.round[i]));
let constraint =
builder.mul_sub_extension(sum_round_flags, sum_round_flags, sum_round_flags);
yield_constr.constraint(builder, constraint);
// check the value of k_i
for i in 0..4 {
let bit_i_comp: Vec<_> = (0..64)
.map(|j| {
let k_j_i = builder.constant_extension(F::Extension::from_canonical_u8(
SHA_COMPRESS_K_LE_BYTES[j][i],
));
builder.mul_extension(local_values.round[j], k_j_i)
})
.collect();
let bit_i = builder.add_many_extension(bit_i_comp);
let diff = builder.sub_extension(local_values.k_i[i], bit_i);
let constraint = builder.mul_many_extension([sum_round_flags, not_final, diff]);
yield_constr.constraint(builder, constraint);
}
// check the rotation
rotate_right_ext_circuit_constraint(
builder,
local_values.state[get_input_range_4(4)].try_into().unwrap(),
&local_values.e_rr_6,
6,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
rotate_right_ext_circuit_constraint(
builder,
local_values.state[get_input_range_4(4)].try_into().unwrap(),
&local_values.e_rr_11,
11,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
rotate_right_ext_circuit_constraint(
builder,
local_values.state[get_input_range_4(4)].try_into().unwrap(),
&local_values.e_rr_25,
25,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
rotate_right_ext_circuit_constraint(
builder,
local_values.state[get_input_range_4(0)].try_into().unwrap(),
&local_values.a_rr_2,
2,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
rotate_right_ext_circuit_constraint(
builder,
local_values.state[get_input_range_4(0)].try_into().unwrap(),
&local_values.a_rr_13,
13,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
rotate_right_ext_circuit_constraint(
builder,
local_values.state[get_input_range_4(0)].try_into().unwrap(),
&local_values.a_rr_22,
22,
)
.into_iter()
.for_each(|c| yield_constr.constraint(builder, c));
// The XOR, AND checks are in the logic table
// The NOT check
not_operation_ext_circuit_constraints(
builder,
local_values.state[get_input_range_4(4)].try_into().unwrap(),
&local_values.e_not,
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(sum_round_flags, c);
yield_constr.constraint(builder, constraint)
});
// wrapping add constraints
wrapping_add_5_ext_circuit_constraints(
builder,
local_values.state[get_input_range_4(7)].try_into().unwrap(),
local_values.s_1,
local_values.ch,
local_values.k_i,
local_values.w_i,
&local_values.temp1,
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(c, sum_round_flags);
yield_constr.constraint(builder, constraint)
});
wrapping_add_2_ext_circuit_constraints(
builder,
local_values.s_0,
local_values.maj,
&local_values.temp2,
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(c, sum_round_flags);
yield_constr.constraint(builder, constraint)
});
wrapping_add_2_ext_circuit_constraints(
builder,
local_values.state[get_input_range_4(3)].try_into().unwrap(),
local_values.temp1.value,
&local_values.d_add_temp1,
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(c, sum_round_flags);
yield_constr.constraint(builder, constraint)
});
wrapping_add_2_ext_circuit_constraints(
builder,
local_values.temp1.value,
local_values.temp2.value,
&local_values.temp1_add_temp2,
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(c, sum_round_flags);
yield_constr.constraint(builder, constraint)
});
// If this is not the final step or a padding row:
let normal_round = builder.mul_extension(sum_round_flags, not_final);
// the local and next timestamps must match.
let diff = builder.sub_extension(next_values.timestamp, local_values.timestamp);
let constraint = builder.mul_many_extension([sum_round_flags, not_final, diff]);
yield_constr.constraint(builder, constraint);
// the address of w_i must be increased by 4, except the last round
let four_ext = builder.constant_extension(F::Extension::from_canonical_u8(4));
let increment = builder.sub_extension(next_values.w_i_virt, local_values.w_i_virt);
let address_increment = builder.sub_extension(increment, four_ext);
let constraint = builder.mul_extension(normal_round, address_increment);
yield_constr.constraint(builder, constraint);
// Output constraint when it is not the final round or padding row
// local.temp1 + local.temp2 = next.a
equal_ext_circuit_constraints::<F, D, 4>(
builder,
local_values.temp1_add_temp2.value,
next_values.state[get_input_range_4(0)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(normal_round, c);
yield_constr.constraint(builder, constraint)
});
// local.a = next.b
equal_ext_circuit_constraints::<F, D, 4>(
builder,
local_values.state[get_input_range_4(0)].try_into().unwrap(),
next_values.state[get_input_range_4(1)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(normal_round, c);
yield_constr.constraint(builder, constraint)
});
// local.b = next.c
equal_ext_circuit_constraints::<F, D, 4>(
builder,
local_values.state[get_input_range_4(1)].try_into().unwrap(),
next_values.state[get_input_range_4(2)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(normal_round, c);
yield_constr.constraint(builder, constraint)
});
// local.c = next.d
equal_ext_circuit_constraints::<F, D, 4>(
builder,
local_values.state[get_input_range_4(2)].try_into().unwrap(),
next_values.state[get_input_range_4(3)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(normal_round, c);
yield_constr.constraint(builder, constraint)
});
// local.d + local.temp1 = next.e
equal_ext_circuit_constraints::<F, D, 4>(
builder,
local_values.d_add_temp1.value,
next_values.state[get_input_range_4(4)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(normal_round, c);
yield_constr.constraint(builder, constraint)
});
// local.e = next.f
equal_ext_circuit_constraints::<F, D, 4>(
builder,
local_values.state[get_input_range_4(4)].try_into().unwrap(),
next_values.state[get_input_range_4(5)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(normal_round, c);
yield_constr.constraint(builder, constraint)
});
// local.f = next.g
equal_ext_circuit_constraints::<F, D, 4>(
builder,
local_values.state[get_input_range_4(5)].try_into().unwrap(),
next_values.state[get_input_range_4(6)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(normal_round, c);
yield_constr.constraint(builder, constraint)
});
// local.g = next.h
equal_ext_circuit_constraints::<F, D, 4>(
builder,
local_values.state[get_input_range_4(6)].try_into().unwrap(),
next_values.state[get_input_range_4(7)].try_into().unwrap(),
)
.into_iter()
.for_each(|c| {
let constraint = builder.mul_extension(normal_round, c);
yield_constr.constraint(builder, constraint)
});
}
fn constraint_degree(&self) -> usize {
3
}
}
#[cfg(test)]
mod test {
use crate::config::StarkConfig;
use crate::cross_table_lookup::{
Column, CtlData, CtlZData, Filter, GrandProductChallenge, GrandProductChallengeSet,
};
use crate::memory::segments::Segment;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | true |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_compress/mod.rs | prover/src/sha_compress/mod.rs | pub mod columns;
pub mod logic;
pub mod not_operation;
pub mod sha_compress_stark;
pub mod wrapping_add_2;
pub mod wrapping_add_5;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_compress/not_operation.rs | prover/src/sha_compress/not_operation.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
pub struct NotOperation<T: Copy> {
/// The result of `!x`.
pub value: [T; 4],
}
impl<F: Field> NotOperation<F> {
pub fn generate_trace(&mut self, x: [u8; 4]) -> u32 {
let x_u32 = u32::from_le_bytes(x);
let expected = !x_u32;
self.value = expected.to_le_bytes().map(F::from_canonical_u8);
expected
}
}
pub(crate) fn not_operation_packed_constraints<P: PackedField>(
original_value: [P; 4],
cols: &NotOperation<P>,
) -> Vec<P> {
let mut result = vec![];
let u8_max = P::from(P::Scalar::from_canonical_u8(255));
for i in 0..4 {
result.push(original_value[i] + cols.value[i] - u8_max);
}
result
}
pub(crate) fn not_operation_ext_circuit_constraints<
F: RichField + Extendable<D>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
original_value: [ExtensionTarget<D>; 4],
cols: &NotOperation<ExtensionTarget<D>>,
) -> Vec<ExtensionTarget<D>> {
let mut result = vec![];
let u8_max = builder.constant_extension(F::Extension::from_canonical_u8(255));
for i in 0..4 {
let tmp1 = builder.add_extension(original_value[i], cols.value[i]);
result.push(builder.sub_extension(tmp1, u8_max));
}
result
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_compress/wrapping_add_5.rs | prover/src/sha_compress/wrapping_add_5.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::Field;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
pub struct WrappingAdd5Op<T> {
/// The result of `a + b + c + d + e`.
pub value: [T; 4],
/// The carry. Each digit is carry == 0, 1, 2, 3 or 4.
pub carry: [T; 5],
}
impl<F: Field> WrappingAdd5Op<F> {
pub fn generate_trace(&mut self, a: u32, b: u32, c: u32, d: u32, e: u32) -> u32 {
let expected = a
.wrapping_add(b)
.wrapping_add(c)
.wrapping_add(d)
.wrapping_add(e);
let overflowed_result = a as u64 + b as u64 + c as u64 + d as u64 + e as u64;
let carry = overflowed_result >> 32;
assert_eq!(carry * 2_u64.pow(32) + expected as u64, overflowed_result);
assert!(carry < 5);
self.carry = [F::ZERO; 5];
self.carry[carry as usize] = F::ONE;
self.value = expected.to_le_bytes().map(F::from_canonical_u8);
expected
}
}
pub(crate) fn wrapping_add_5_packed_constraints<P: PackedField>(
a: [P; 4],
b: [P; 4],
c: [P; 4],
d: [P; 4],
e: [P; 4],
cols: &WrappingAdd5Op<P>,
) -> Vec<P> {
let mut result = vec![];
let two_pow_8 = P::from(P::Scalar::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = P::from(P::Scalar::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = P::from(P::Scalar::from_canonical_u32(2u32.pow(24)));
let two_pow_32 = P::from(P::Scalar::from_canonical_u64(2u64.pow(32)));
let wrapping_added_result = cols.value[0]
+ two_pow_8 * cols.value[1]
+ two_pow_16 * cols.value[2]
+ two_pow_24 * cols.value[3];
// Each value in carry_{0,1,2,3,4} is 0 or 1, and exactly one of them is 1 per digit.
for i in 0..5 {
result.push(cols.carry[i] * (P::ONES - cols.carry[i]));
}
result.push(
cols.carry[0] + cols.carry[1] + cols.carry[2] + cols.carry[3] + cols.carry[4] - P::ONES,
);
// Calculates carry from carry_{0,1,2,3,4}.
let one = P::ONES;
let two = P::from(P::Scalar::from_canonical_u32(2));
let three = P::from(P::Scalar::from_canonical_u32(3));
let four = P::from(P::Scalar::from_canonical_u32(4));
let carry =
cols.carry[1] * one + cols.carry[2] * two + cols.carry[3] * three + cols.carry[4] * four;
// Wrapping added constraint
let overflowed_result = (a[0] + b[0] + c[0] + d[0] + e[0])
+ (a[1] + b[1] + c[1] + d[1] + e[1]) * two_pow_8
+ (a[2] + b[2] + c[2] + d[2] + e[2]) * two_pow_16
+ (a[3] + b[3] + c[3] + d[3] + e[3]) * two_pow_24;
let constraint = overflowed_result - carry * two_pow_32 - wrapping_added_result;
result.push(constraint);
result
}
pub(crate) fn wrapping_add_5_ext_circuit_constraints<
F: RichField + Extendable<D>,
const D: usize,
>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; 4],
b: [ExtensionTarget<D>; 4],
c: [ExtensionTarget<D>; 4],
d: [ExtensionTarget<D>; 4],
e: [ExtensionTarget<D>; 4],
cols: &WrappingAdd5Op<ExtensionTarget<D>>,
) -> Vec<ExtensionTarget<D>> {
let mut result = vec![];
let one = builder.one_extension();
let two = builder.constant_extension(F::Extension::from_canonical_u32(2));
let three = builder.constant_extension(F::Extension::from_canonical_u32(3));
let four = builder.constant_extension(F::Extension::from_canonical_u32(4));
let two_pow_8 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(8)));
let two_pow_16 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(16)));
let two_pow_24 = builder.constant_extension(F::Extension::from_canonical_u32(2u32.pow(24)));
let two_pow_32 = builder.constant_extension(F::Extension::from_canonical_u64(2u64.pow(32)));
let tmp = builder.mul_extension(cols.value[1], two_pow_8);
let tmp2 = builder.mul_extension(cols.value[2], two_pow_16);
let tmp3 = builder.mul_extension(cols.value[3], two_pow_24);
let wrapping_added_result = builder.add_many_extension([cols.value[0], tmp, tmp2, tmp3]);
// Each value in carry_{0,1,2,3,4} is 0 or 1, and exactly one of them is 1 per digit.
for i in 0..5 {
let tmp = builder.sub_extension(one, cols.carry[i]);
result.push(builder.mul_extension(cols.carry[i], tmp));
}
let tmp = builder.add_many_extension(cols.carry);
result.push(builder.sub_extension(tmp, one));
// Calculates carry from carry_{0,1,2,3,4}.
let tmp = builder.mul_extension(cols.carry[1], one);
let tmp2 = builder.mul_extension(cols.carry[2], two);
let tmp3 = builder.mul_extension(cols.carry[3], three);
let tmp4 = builder.mul_extension(cols.carry[4], four);
let carry = builder.add_many_extension([tmp, tmp2, tmp3, tmp4]);
// Wrapping added constraint
let byte_0 = builder.add_many_extension([a[0], b[0], c[0], d[0], e[0]]);
let byte_1 = builder.add_many_extension([a[1], b[1], c[1], d[1], e[1]]);
let byte_2 = builder.add_many_extension([a[2], b[2], c[2], d[2], e[2]]);
let byte_3 = builder.add_many_extension([a[3], b[3], c[3], d[3], e[3]]);
let tmp1 = builder.mul_extension(byte_1, two_pow_8);
let tmp2 = builder.mul_extension(byte_2, two_pow_16);
let tmp3 = builder.mul_extension(byte_3, two_pow_24);
let overflowed_result = builder.add_many_extension([byte_0, tmp1, tmp2, tmp3]);
let carry_mul = builder.mul_extension(carry, two_pow_32);
let computed_overflowed_result = builder.add_extension(carry_mul, wrapping_added_result);
let constraint = builder.sub_extension(overflowed_result, computed_overflowed_result);
result.push(constraint);
result
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/sha_compress/logic.rs | prover/src/sha_compress/logic.rs | use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
pub(crate) fn equal_packed_constraint<P: PackedField, const N: usize>(
x: [P; N],
y: [P; N],
) -> Vec<P> {
let mut result = vec![];
for i in 0..N {
result.push(x[i] - y[i]);
}
result
}
pub(crate) fn equal_ext_circuit_constraints<
F: RichField + Extendable<D>,
const D: usize,
const N: usize,
>(
builder: &mut CircuitBuilder<F, D>,
x: [ExtensionTarget<D>; N],
y: [ExtensionTarget<D>; N],
) -> Vec<ExtensionTarget<D>> {
let mut result = vec![];
for i in 0..N {
let out_constraint = builder.sub_extension(x[i], y[i]);
result.push(out_constraint);
}
result
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/poseidon_sponge/columns.rs | prover/src/poseidon_sponge/columns.rs | use crate::poseidon::constants::{SPONGE_CAPACITY, SPONGE_RATE, SPONGE_WIDTH};
use std::borrow::{Borrow, BorrowMut};
use std::mem::{size_of, transmute};
use crate::util::{indices_arr, transmute_no_compile_time_size_checks};
pub(crate) const POSEIDON_WIDTH_BYTES: usize = 48; // 12 * 4
pub(crate) const POSEIDON_WIDTH_U32S: usize = POSEIDON_WIDTH_BYTES / 4;
pub(crate) const POSEIDON_WIDTH_MINUS_DIGEST: usize = SPONGE_WIDTH - POSEIDON_DIGEST;
pub(crate) const POSEIDON_RATE_BYTES: usize = SPONGE_RATE * 4;
pub(crate) const POSEIDON_RATE_U32S: usize = POSEIDON_RATE_BYTES / 4;
pub(crate) const POSEIDON_CAPACITY_BYTES: usize = 64;
pub(crate) const POSEIDON_CAPACITY_U32S: usize = POSEIDON_CAPACITY_BYTES / 4;
pub(crate) const POSEIDON_DIGEST_BYTES: usize = 32;
pub(crate) const POSEIDON_DIGEST: usize = 4;
#[repr(C)]
#[derive(Eq, PartialEq, Debug)]
pub(crate) struct PoseidonSpongeColumnsView<T: Copy> {
/// 1 if this row represents a full input block, i.e. one in which each byte is an input byte,
/// not a padding byte; 0 otherwise.
pub is_full_input_block: T,
// The base address at which we will read the input block.
pub context: T,
pub segment: T,
// address
pub virt: [T; SPONGE_RATE],
/// The timestamp at which inputs should be read from memory.
pub timestamp: T,
/// The length of the original input, in bytes.
pub len: T,
/// The number of input bytes that have already been absorbed prior to this block.
pub already_absorbed_bytes: T,
/// If this row represents a final block row, the `i`th entry should be 1 if the final chunk of
/// input has length `i` (in other words if `len - already_absorbed == i`), otherwise 0.
///
/// If this row represents a full input block, this should contain all 0s.
pub is_final_input_len: [T; POSEIDON_RATE_BYTES],
/// The initial rate part of the sponge, at the start of this step.
pub original_rate: [T; SPONGE_RATE],
/// The capacity part of the sponge, at the start of this step.
pub original_capacity: [T; SPONGE_CAPACITY],
/// The block being absorbed, which may contain input bytes and/or padding bytes.
pub block_bytes: [T; POSEIDON_RATE_BYTES],
/// The rate part of the sponge, that is the current block, before add round constant
pub new_rate: [T; SPONGE_RATE],
/// The entire state (rate + capacity) of the sponge, after the
/// permutation is applied, minus the first limbs where the digest is extracted from.
/// Those missing limbs can be recomputed from `updated_digest_state`.
pub partial_updated_state: [T; POSEIDON_WIDTH_MINUS_DIGEST],
/// The first part of the state of the sponge, after the permutation is applied.
/// This also represents the output digest of the Poseidon sponge during the squeezing phase.
pub updated_digest_state: [T; POSEIDON_DIGEST],
}
// `u8` is guaranteed to have a `size_of` of 1.
pub const NUM_POSEIDON_SPONGE_COLUMNS: usize = size_of::<PoseidonSpongeColumnsView<u8>>();
impl<T: Copy> From<[T; NUM_POSEIDON_SPONGE_COLUMNS]> for PoseidonSpongeColumnsView<T> {
fn from(value: [T; NUM_POSEIDON_SPONGE_COLUMNS]) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> From<PoseidonSpongeColumnsView<T>> for [T; NUM_POSEIDON_SPONGE_COLUMNS] {
fn from(value: PoseidonSpongeColumnsView<T>) -> Self {
unsafe { transmute_no_compile_time_size_checks(value) }
}
}
impl<T: Copy> Borrow<PoseidonSpongeColumnsView<T>> for [T; NUM_POSEIDON_SPONGE_COLUMNS] {
fn borrow(&self) -> &PoseidonSpongeColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<PoseidonSpongeColumnsView<T>> for [T; NUM_POSEIDON_SPONGE_COLUMNS] {
fn borrow_mut(&mut self) -> &mut PoseidonSpongeColumnsView<T> {
unsafe { transmute(self) }
}
}
impl<T: Copy> Borrow<[T; NUM_POSEIDON_SPONGE_COLUMNS]> for PoseidonSpongeColumnsView<T> {
fn borrow(&self) -> &[T; NUM_POSEIDON_SPONGE_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy> BorrowMut<[T; NUM_POSEIDON_SPONGE_COLUMNS]> for PoseidonSpongeColumnsView<T> {
fn borrow_mut(&mut self) -> &mut [T; NUM_POSEIDON_SPONGE_COLUMNS] {
unsafe { transmute(self) }
}
}
impl<T: Copy + Default> Default for PoseidonSpongeColumnsView<T> {
fn default() -> Self {
[T::default(); NUM_POSEIDON_SPONGE_COLUMNS].into()
}
}
const fn make_col_map() -> PoseidonSpongeColumnsView<usize> {
let indices_arr = indices_arr::<NUM_POSEIDON_SPONGE_COLUMNS>();
unsafe {
transmute::<[usize; NUM_POSEIDON_SPONGE_COLUMNS], PoseidonSpongeColumnsView<usize>>(
indices_arr,
)
}
}
pub(crate) const POSEIDON_SPONGE_COL_MAP: PoseidonSpongeColumnsView<usize> = make_col_map();
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/poseidon_sponge/mod.rs | prover/src/poseidon_sponge/mod.rs | pub mod columns;
pub mod poseidon_sponge_stark;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/poseidon_sponge/poseidon_sponge_stark.rs | prover/src/poseidon_sponge/poseidon_sponge_stark.rs | use std::borrow::Borrow;
use std::cmp::min;
use std::iter::once;
use std::marker::PhantomData;
use itertools::Itertools;
use plonky2::field::extension::{Extendable, FieldExtension};
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialValues;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use crate::cross_table_lookup::{Column, Filter};
use crate::evaluation_frame::{StarkEvaluationFrame, StarkFrame};
use crate::memory::segments::Segment;
use crate::poseidon::constants::{SPONGE_RATE, SPONGE_WIDTH};
use crate::poseidon::poseidon_stark::poseidon_with_witness;
use crate::poseidon_sponge::columns::*;
use crate::stark::Stark;
use crate::util::trace_rows_to_poly_values;
use crate::witness::memory::MemoryAddress;
pub const U8S_PER_CTL: usize = 4;
pub const U32S_PER_CTL: usize = 1;
pub(crate) fn ctl_looked_data<F: Field>() -> Vec<Column<F>> {
let cols = POSEIDON_SPONGE_COL_MAP;
Column::singles(
[
cols.context,
cols.segment,
cols.virt[0],
cols.len,
cols.timestamp,
]
.iter()
.chain(cols.updated_digest_state.iter()),
)
.collect()
}
pub(crate) fn ctl_looking_poseidon_inputs<F: Field>() -> Vec<Column<F>> {
let cols = POSEIDON_SPONGE_COL_MAP;
let mut res: Vec<_> =
Column::singles(cols.new_rate.iter().chain(cols.original_capacity.iter())).collect();
res.push(Column::single(cols.timestamp));
res
}
pub(crate) fn ctl_looking_poseidon_outputs<F: Field>() -> Vec<Column<F>> {
let cols = POSEIDON_SPONGE_COL_MAP;
let mut res = Column::singles(&cols.updated_digest_state).collect_vec();
res.extend(Column::singles(&cols.partial_updated_state));
res.push(Column::single(cols.timestamp));
res
}
pub(crate) fn ctl_looking_memory<F: Field>(i: usize) -> Vec<Column<F>> {
let cols = POSEIDON_SPONGE_COL_MAP;
let mut res = vec![Column::constant(F::ONE)]; // is_read
res.extend(Column::singles([cols.context, cols.segment]));
// The address of the byte being read is `virt + already_absorbed_bytes + i`.
/*
res.push(Column::linear_combination_with_constant(
[(cols.virt, F::ONE), (cols.already_absorbed_bytes, F::ONE)],
F::from_canonical_usize(i),
));
*/
res.push(Column::single(cols.virt[i / 4]));
// The u32 of i'th input byte being read.
let start = (i / 4) * 4;
let lc: Column<F> = Column::le_bytes([
cols.block_bytes[start + 3],
cols.block_bytes[start + 2],
cols.block_bytes[start + 1],
cols.block_bytes[start],
]);
res.push(lc);
// Since we're reading a single byte, the higher limbs must be zero.
// res.extend((1..8).map(|_| Column::zero()));
res.push(Column::single(cols.timestamp));
assert_eq!(
res.len(),
crate::memory::memory_stark::ctl_data::<F>().len()
);
res
}
pub(crate) fn ctl_looked_filter<F: Field>() -> Filter<F> {
// The CPU table is only interested in our final-block rows, since those contain the final
// sponge output.
Filter::new_simple(Column::sum(POSEIDON_SPONGE_COL_MAP.is_final_input_len))
}
/// CTL filter for reading the `i`th byte of input from memory.
pub(crate) fn ctl_looking_memory_filter<F: Field>(i: usize) -> Filter<F> {
// We perform the `i`th read if either
// - this is a full input block, or
// - this is a final block of length `i` or greater
let cols = POSEIDON_SPONGE_COL_MAP;
if i == POSEIDON_RATE_BYTES - 1 {
Filter::new_simple(Column::single(cols.is_full_input_block))
} else {
Filter::new_simple(Column::sum(
once(&cols.is_full_input_block).chain(&cols.is_final_input_len[i + 1..]),
))
}
}
pub(crate) fn ctl_looking_poseidon_filter<F: Field>() -> Filter<F> {
let cols = POSEIDON_SPONGE_COL_MAP;
Filter::new_simple(Column::sum(
once(&cols.is_full_input_block).chain(&cols.is_final_input_len),
))
}
pub fn poseidon<F: PrimeField64>(inputs: &[u8]) -> [u64; POSEIDON_DIGEST] {
let l = inputs.len();
let chunks = l / POSEIDON_RATE_BYTES + 1;
let mut input = inputs.to_owned();
input.resize(chunks * POSEIDON_RATE_BYTES, 0);
// pad10*1 rule
if l % POSEIDON_RATE_BYTES == POSEIDON_RATE_BYTES - 1 {
// Both 1s are placed in the same byte.
input[l] = 0b10000001;
} else {
input[l] = 1;
input[chunks * POSEIDON_RATE_BYTES - 1] = 0b10000000;
}
let mut state = [F::ZEROS; SPONGE_WIDTH];
for block in input.chunks(POSEIDON_RATE_BYTES) {
let block_u32s = (0..SPONGE_RATE)
.map(|i| {
F::from_canonical_u32(u32::from_le_bytes(
block[i * 4..(i + 1) * 4].to_vec().try_into().unwrap(),
))
})
.collect_vec();
state[..SPONGE_RATE].copy_from_slice(&block_u32s);
let (output, _) = poseidon_with_witness(&state);
state.copy_from_slice(&output);
}
let hash = state
.iter()
.take(POSEIDON_DIGEST)
.map(|x| x.to_canonical_u64())
.collect_vec();
hash.try_into().unwrap()
}
/// Information about a Poseidon sponge operation needed for witness generation.
#[derive(Clone, Debug)]
pub(crate) struct PoseidonSpongeOp {
/// The base address at which inputs are read.
pub(crate) base_address: Vec<MemoryAddress>,
/// The timestamp at which inputs are read.
pub(crate) timestamp: usize,
/// The input that was read.
pub(crate) input: Vec<u8>,
}
#[derive(Copy, Clone, Default)]
pub struct PoseidonSpongeStark<F, const D: usize> {
f: PhantomData<F>,
}
impl<F: RichField + Extendable<D>, const D: usize> PoseidonSpongeStark<F, D> {
pub(crate) fn generate_trace(
&self,
operations: &Vec<PoseidonSpongeOp>,
min_rows: usize,
) -> Vec<PolynomialValues<F>> {
// Generate the witness row-wise.
let trace_rows = self.generate_trace_rows(operations, min_rows);
trace_rows_to_poly_values(trace_rows)
}
fn generate_trace_rows(
&self,
operations: &Vec<PoseidonSpongeOp>,
min_rows: usize,
) -> Vec<[F; NUM_POSEIDON_SPONGE_COLUMNS]> {
let base_len: usize = operations
.iter()
.map(|op| op.input.len() / POSEIDON_RATE_BYTES + 1)
.sum();
let mut rows = Vec::with_capacity(base_len.max(min_rows).next_power_of_two());
for op in operations {
rows.extend(self.generate_rows_for_op(op));
}
let padded_rows = rows.len().max(min_rows).next_power_of_two();
for _ in rows.len()..padded_rows {
rows.push(self.generate_padding_row());
}
rows
}
fn generate_rows_for_op(&self, op: &PoseidonSpongeOp) -> Vec<[F; NUM_POSEIDON_SPONGE_COLUMNS]> {
let mut rows = Vec::with_capacity(op.input.len() / POSEIDON_RATE_BYTES + 1);
let mut sponge_state = [F::ZEROS; SPONGE_WIDTH];
let mut input_blocks = op.input.chunks_exact(POSEIDON_RATE_BYTES);
let mut already_absorbed_bytes = 0;
for block in input_blocks.by_ref() {
let row = self.generate_full_input_row(
op,
already_absorbed_bytes,
sponge_state,
block.try_into().unwrap(),
);
sponge_state[..POSEIDON_DIGEST].copy_from_slice(&row.updated_digest_state);
sponge_state[POSEIDON_DIGEST..].copy_from_slice(&row.partial_updated_state);
rows.push(row.into());
already_absorbed_bytes += POSEIDON_RATE_BYTES;
}
rows.push(
self.generate_final_row(
op,
already_absorbed_bytes,
sponge_state,
input_blocks.remainder(),
)
.into(),
);
rows
}
fn generate_full_input_row(
&self,
op: &PoseidonSpongeOp,
already_absorbed_bytes: usize,
sponge_state: [F; SPONGE_WIDTH],
block: [u8; POSEIDON_RATE_BYTES],
) -> PoseidonSpongeColumnsView<F> {
let mut row = PoseidonSpongeColumnsView {
is_full_input_block: F::ONE,
..Default::default()
};
row.block_bytes = block.map(F::from_canonical_u8);
Self::generate_common_fields(&mut row, op, already_absorbed_bytes, sponge_state);
row
}
fn generate_final_row(
&self,
op: &PoseidonSpongeOp,
already_absorbed_bytes: usize,
sponge_state: [F; SPONGE_WIDTH],
final_inputs: &[u8],
) -> PoseidonSpongeColumnsView<F> {
assert_eq!(already_absorbed_bytes + final_inputs.len(), op.input.len());
let mut row = PoseidonSpongeColumnsView::default();
for (block_byte, input_byte) in row.block_bytes.iter_mut().zip(final_inputs) {
*block_byte = F::from_canonical_u8(*input_byte);
}
// pad10*1 rule
if final_inputs.len() == POSEIDON_RATE_BYTES - 1 {
// Both 1s are placed in the same byte.
row.block_bytes[final_inputs.len()] = F::from_canonical_u8(0b10000001);
} else {
row.block_bytes[final_inputs.len()] = F::ONE;
row.block_bytes[POSEIDON_RATE_BYTES - 1] = F::from_canonical_u8(0b10000000);
}
row.is_final_input_len[final_inputs.len()] = F::ONE;
Self::generate_common_fields(&mut row, op, already_absorbed_bytes, sponge_state);
row
}
/// Generate fields that are common to both full-input-block rows and final-block rows.
/// Also updates the sponge state with a single absorption.
fn generate_common_fields(
row: &mut PoseidonSpongeColumnsView<F>,
op: &PoseidonSpongeOp,
already_absorbed_bytes: usize,
mut sponge_state: [F; SPONGE_WIDTH],
) {
let idx = already_absorbed_bytes / 4;
let end_index = min(
(already_absorbed_bytes + POSEIDON_RATE_BYTES) / 4,
op.base_address.len(),
);
let mut virt = (idx..end_index)
.map(|i| op.base_address[i].virt)
.collect_vec();
virt.resize(SPONGE_RATE, 0);
let virt: [usize; SPONGE_RATE] = virt.try_into().unwrap();
row.context = F::from_canonical_usize(op.base_address[0].context);
row.segment = F::from_canonical_usize(op.base_address[Segment::Code as usize].segment);
row.virt = virt.map(F::from_canonical_usize);
row.timestamp = F::from_canonical_usize(op.timestamp);
row.len = F::from_canonical_usize(op.input.len());
row.already_absorbed_bytes = F::from_canonical_usize(already_absorbed_bytes);
row.original_rate
.copy_from_slice(&sponge_state[..SPONGE_RATE]);
row.original_capacity
.copy_from_slice(&sponge_state[SPONGE_RATE..]);
let block_u32s = (0..SPONGE_RATE)
.map(|i| {
F::from_canonical_u32(u32::from_le_bytes(
row.block_bytes[i * 4..(i + 1) * 4]
.iter()
.map(|x| x.to_canonical_u64() as u8)
.collect_vec()
.try_into()
.unwrap(),
))
})
.collect_vec();
row.new_rate.copy_from_slice(&block_u32s);
sponge_state[..SPONGE_RATE].copy_from_slice(&block_u32s);
let (output, _) = poseidon_with_witness(&sponge_state);
sponge_state.copy_from_slice(&output);
// Store all but the first `POSEIDON_DIGEST` limbs in the updated state.
// Those missing limbs will be stored separately.
row.partial_updated_state
.copy_from_slice(&output[POSEIDON_DIGEST..]);
row.updated_digest_state
.copy_from_slice(&output[..POSEIDON_DIGEST]);
}
fn generate_padding_row(&self) -> [F; NUM_POSEIDON_SPONGE_COLUMNS] {
// The default instance has is_full_input_block = is_final_block = 0,
// indicating that it's a dummy/padding row.
PoseidonSpongeColumnsView::default().into()
}
}
impl<F: RichField + Extendable<D>, const D: usize> Stark<F, D> for PoseidonSpongeStark<F, D> {
type EvaluationFrame<FE, P, const D2: usize>
= StarkFrame<P, NUM_POSEIDON_SPONGE_COLUMNS>
where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>;
type EvaluationFrameTarget = StarkFrame<ExtensionTarget<D>, NUM_POSEIDON_SPONGE_COLUMNS>;
fn eval_packed_generic<FE, P, const D2: usize>(
&self,
vars: &Self::EvaluationFrame<FE, P, D2>,
yield_constr: &mut ConstraintConsumer<P>,
) where
FE: FieldExtension<D2, BaseField = F>,
P: PackedField<Scalar = FE>,
{
let local_values: &[P; NUM_POSEIDON_SPONGE_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &PoseidonSpongeColumnsView<P> = local_values.borrow();
let next_values: &[P; NUM_POSEIDON_SPONGE_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &PoseidonSpongeColumnsView<P> = next_values.borrow();
// Each flag (full-input block, final block or implied dummy flag) must be boolean.
let is_full_input_block = local_values.is_full_input_block;
yield_constr.constraint(is_full_input_block * (is_full_input_block - P::ONES));
let is_final_block: P = local_values.is_final_input_len.iter().copied().sum();
yield_constr.constraint(is_final_block * (is_final_block - P::ONES));
for &is_final_len in local_values.is_final_input_len.iter() {
yield_constr.constraint(is_final_len * (is_final_len - P::ONES));
}
// Ensure that full-input block and final block flags are not set to 1 at the same time.
yield_constr.constraint(is_final_block * is_full_input_block);
// If this is the first row, the original sponge state should be 0 and already_absorbed_bytes = 0.
let already_absorbed_bytes = local_values.already_absorbed_bytes;
yield_constr.constraint_first_row(already_absorbed_bytes);
for &original_rate_elem in local_values.original_rate.iter() {
yield_constr.constraint_first_row(original_rate_elem);
}
for &original_capacity_elem in local_values.original_capacity.iter() {
yield_constr.constraint_first_row(original_capacity_elem);
}
// If this is a final block, the next row's original sponge state should be 0 and already_absorbed_bytes = 0.
yield_constr.constraint_transition(is_final_block * next_values.already_absorbed_bytes);
for &original_rate_elem in next_values.original_rate.iter() {
yield_constr.constraint_transition(is_final_block * original_rate_elem);
}
for &original_capacity_elem in next_values.original_capacity.iter() {
yield_constr.constraint_transition(is_final_block * original_capacity_elem);
}
// If this is a full-input block, the next row's address, time and len must match as well as its timestamp.
yield_constr.constraint_transition(
is_full_input_block * (local_values.context - next_values.context),
);
yield_constr.constraint_transition(
is_full_input_block * (local_values.segment - next_values.segment),
);
yield_constr.constraint_transition(
is_full_input_block * (local_values.timestamp - next_values.timestamp),
);
// If this is a full-input block, the next row's "before" should match our "after" state.
for (current_after, next_before) in local_values
.updated_digest_state
.iter()
.zip_eq(&next_values.original_rate[..POSEIDON_DIGEST])
{
yield_constr
.constraint_transition(is_full_input_block * (*next_before - *current_after));
}
for (¤t_after, &next_before) in local_values
.partial_updated_state
.iter()
.zip(next_values.original_rate[POSEIDON_DIGEST..].iter())
{
yield_constr.constraint_transition(is_full_input_block * (next_before - current_after));
}
for (¤t_after, &next_before) in local_values
.partial_updated_state
.iter()
.skip(SPONGE_RATE - POSEIDON_DIGEST)
.zip(next_values.original_capacity.iter())
{
yield_constr.constraint_transition(is_full_input_block * (next_before - current_after));
}
// If this is a full-input block, the next row's already_absorbed_bytes should be ours plus `POSEIDON_RATE_BYTES`.
yield_constr.constraint_transition(
is_full_input_block
* (already_absorbed_bytes + P::from(FE::from_canonical_usize(POSEIDON_RATE_BYTES))
- next_values.already_absorbed_bytes),
);
// A dummy row is always followed by another dummy row, so the prover can't put dummy rows "in between" to avoid the above checks.
let is_dummy = P::ONES - is_full_input_block - is_final_block;
let next_is_final_block: P = next_values.is_final_input_len.iter().copied().sum();
yield_constr.constraint_transition(
is_dummy * (next_values.is_full_input_block + next_is_final_block),
);
// If this is a final block, is_final_input_len implies `len - already_absorbed == i`.
let offset = local_values.len - already_absorbed_bytes;
for (i, &is_final_len) in local_values.is_final_input_len.iter().enumerate() {
let entry_match = offset - P::from(FE::from_canonical_usize(i));
yield_constr.constraint(is_final_len * entry_match);
}
}
fn eval_ext_circuit(
&self,
builder: &mut plonky2::plonk::circuit_builder::CircuitBuilder<F, D>,
vars: &Self::EvaluationFrameTarget,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let local_values: &[ExtensionTarget<D>; NUM_POSEIDON_SPONGE_COLUMNS] =
vars.get_local_values().try_into().unwrap();
let local_values: &PoseidonSpongeColumnsView<ExtensionTarget<D>> = local_values.borrow();
let next_values: &[ExtensionTarget<D>; NUM_POSEIDON_SPONGE_COLUMNS] =
vars.get_next_values().try_into().unwrap();
let next_values: &PoseidonSpongeColumnsView<ExtensionTarget<D>> = next_values.borrow();
let one = builder.one_extension();
// Each flag (full-input block, final block or implied dummy flag) must be boolean.
let is_full_input_block = local_values.is_full_input_block;
let constraint = builder.mul_sub_extension(
is_full_input_block,
is_full_input_block,
is_full_input_block,
);
yield_constr.constraint(builder, constraint);
let is_final_block = builder.add_many_extension(local_values.is_final_input_len);
let constraint = builder.mul_sub_extension(is_final_block, is_final_block, is_final_block);
yield_constr.constraint(builder, constraint);
for &is_final_len in local_values.is_final_input_len.iter() {
let constraint = builder.mul_sub_extension(is_final_len, is_final_len, is_final_len);
yield_constr.constraint(builder, constraint);
}
// Ensure that full-input block and final block flags are not set to 1 at the same time.
let constraint = builder.mul_extension(is_final_block, is_full_input_block);
yield_constr.constraint(builder, constraint);
// If this is the first row, the original sponge state should be 0 and already_absorbed_bytes = 0.
let already_absorbed_bytes = local_values.already_absorbed_bytes;
yield_constr.constraint_first_row(builder, already_absorbed_bytes);
for &original_rate_elem in local_values.original_rate.iter() {
yield_constr.constraint_first_row(builder, original_rate_elem);
}
for &original_capacity_elem in local_values.original_capacity.iter() {
yield_constr.constraint_first_row(builder, original_capacity_elem);
}
// If this is a final block, the next row's original sponge state should be 0 and already_absorbed_bytes = 0.
let constraint = builder.mul_extension(is_final_block, next_values.already_absorbed_bytes);
yield_constr.constraint_transition(builder, constraint);
for &original_rate_elem in next_values.original_rate.iter() {
let constraint = builder.mul_extension(is_final_block, original_rate_elem);
yield_constr.constraint_transition(builder, constraint);
}
for &original_capacity_elem in next_values.original_capacity.iter() {
let constraint = builder.mul_extension(is_final_block, original_capacity_elem);
yield_constr.constraint_transition(builder, constraint);
}
// If this is a full-input block, the next row's address, time and len must match as well as its timestamp.
let context_diff = builder.sub_extension(local_values.context, next_values.context);
let constraint = builder.mul_extension(is_full_input_block, context_diff);
yield_constr.constraint_transition(builder, constraint);
let segment_diff = builder.sub_extension(local_values.segment, next_values.segment);
let constraint = builder.mul_extension(is_full_input_block, segment_diff);
yield_constr.constraint_transition(builder, constraint);
let timestamp_diff = builder.sub_extension(local_values.timestamp, next_values.timestamp);
let constraint = builder.mul_extension(is_full_input_block, timestamp_diff);
yield_constr.constraint_transition(builder, constraint);
// If this is a full-input block, the next row's "before" should match our "after" state.
for (current_after, next_before) in local_values
.updated_digest_state
.iter()
.zip_eq(&next_values.original_rate[..POSEIDON_DIGEST])
{
let diff = builder.sub_extension(*next_before, *current_after);
let constraint = builder.mul_extension(is_full_input_block, diff);
yield_constr.constraint_transition(builder, constraint);
}
for (¤t_after, &next_before) in local_values
.partial_updated_state
.iter()
.zip(next_values.original_rate[POSEIDON_DIGEST..].iter())
{
let diff = builder.sub_extension(next_before, current_after);
let constraint = builder.mul_extension(is_full_input_block, diff);
yield_constr.constraint_transition(builder, constraint);
}
for (¤t_after, &next_before) in local_values
.partial_updated_state
.iter()
.skip(SPONGE_RATE - POSEIDON_DIGEST)
.zip(next_values.original_capacity.iter())
{
let diff = builder.sub_extension(next_before, current_after);
let constraint = builder.mul_extension(is_full_input_block, diff);
yield_constr.constraint_transition(builder, constraint);
}
// If this is a full-input block, the next row's already_absorbed_bytes should be ours plus `POSEIDON_RATE_BYTES`.
let absorbed_bytes = builder.add_const_extension(
already_absorbed_bytes,
F::from_canonical_usize(POSEIDON_RATE_BYTES),
);
let absorbed_diff =
builder.sub_extension(absorbed_bytes, next_values.already_absorbed_bytes);
let constraint = builder.mul_extension(is_full_input_block, absorbed_diff);
yield_constr.constraint_transition(builder, constraint);
// A dummy row is always followed by another dummy row, so the prover can't put dummy rows "in between" to avoid the above checks.
let is_dummy = {
let tmp = builder.sub_extension(one, is_final_block);
builder.sub_extension(tmp, is_full_input_block)
};
let next_is_final_block = builder.add_many_extension(next_values.is_final_input_len);
let constraint = {
let tmp = builder.add_extension(next_is_final_block, next_values.is_full_input_block);
builder.mul_extension(is_dummy, tmp)
};
yield_constr.constraint_transition(builder, constraint);
// If this is a final block, is_final_input_len implies `len - already_absorbed == i`.
let offset = builder.sub_extension(local_values.len, already_absorbed_bytes);
for (i, &is_final_len) in local_values.is_final_input_len.iter().enumerate() {
let index = builder.constant_extension(F::from_canonical_usize(i).into());
let entry_match = builder.sub_extension(offset, index);
let constraint = builder.mul_extension(is_final_len, entry_match);
yield_constr.constraint(builder, constraint);
}
}
fn constraint_degree(&self) -> usize {
3
}
}
#[cfg(test)]
mod tests {
use std::borrow::Borrow;
use anyhow::Result;
use itertools::Itertools;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::PrimeField64;
use plonky2::plonk::config::{GenericConfig, PoseidonGoldilocksConfig};
use crate::memory::segments::Segment;
use crate::poseidon_sponge::columns::PoseidonSpongeColumnsView;
use crate::poseidon_sponge::poseidon_sponge_stark::{
poseidon, PoseidonSpongeOp, PoseidonSpongeStark,
};
use crate::stark_testing::{test_stark_circuit_constraints, test_stark_low_degree};
use crate::witness::memory::MemoryAddress;
#[test]
fn test_stark_degree() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = PoseidonSpongeStark<F, D>;
let stark = S::default();
test_stark_low_degree(stark)
}
#[test]
fn test_stark_circuit() -> Result<()> {
const D: usize = 2;
type C = PoseidonGoldilocksConfig;
type F = <C as GenericConfig<D>>::F;
type S = PoseidonSpongeStark<F, D>;
let stark = S::default();
test_stark_circuit_constraints::<F, C, S, D>(stark)
}
#[test]
fn test_generation() -> Result<()> {
const D: usize = 2;
type F = GoldilocksField;
type S = PoseidonSpongeStark<F, D>;
let input = vec![1, 2, 3];
let expected_output = poseidon::<F>(&input);
let op = PoseidonSpongeOp {
base_address: vec![MemoryAddress {
context: 0,
segment: Segment::Code as usize,
virt: 0,
}],
timestamp: 0,
input,
};
let stark = S::default();
let rows = stark.generate_rows_for_op(&op);
assert_eq!(rows.len(), 1);
let last_row: &PoseidonSpongeColumnsView<F> = rows.last().unwrap().borrow();
let output = last_row
.updated_digest_state
.iter()
.map(|x| x.to_canonical_u64())
.collect_vec();
assert_eq!(output, expected_output);
Ok(())
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/arithmetic/slt.rs | prover/src/arithmetic/slt.rs | use crate::arithmetic::columns::*;
use crate::arithmetic::utils::u32_to_array;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
/// Generate row for SLT operations.
pub(crate) fn generate<F: PrimeField64>(
lv: &mut [F],
filter: usize,
left_in: u32,
right_in: u32,
rd: u32,
) {
u32_to_array(&mut lv[INPUT_REGISTER_0], left_in);
u32_to_array(&mut lv[INPUT_REGISTER_1], right_in);
u32_to_array(&mut lv[INPUT_REGISTER_2], 0);
match filter {
IS_SLT | IS_SLTI => {
let (diff, cy) = left_in.overflowing_sub(right_in);
let mut cy_val = cy as u32;
if (left_in & 0x80000000u32) != (right_in & 0x80000000u32) {
cy_val = (1u32 << 16) | (!cy as u32);
}
u32_to_array(&mut lv[AUX_INPUT_REGISTER_0], diff);
u32_to_array(&mut lv[AUX_INPUT_REGISTER_1], cy_val);
u32_to_array(&mut lv[OUTPUT_REGISTER], rd);
}
IS_SLTU | IS_SLTIU => {
let (diff, cy) = left_in.overflowing_sub(right_in);
u32_to_array(&mut lv[AUX_INPUT_REGISTER_0], diff);
u32_to_array(&mut lv[AUX_INPUT_REGISTER_1], cy as u32);
u32_to_array(&mut lv[OUTPUT_REGISTER], rd);
}
_ => panic!("unexpected operation filter"),
};
}
/// 2^-16 mod (2^64 - 2^32 + 1)
const GOLDILOCKS_INVERSE_65536: u64 = 18446462594437939201;
pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let is_lt = lv[IS_SLT] + lv[IS_SLTU];
let is_lti = lv[IS_SLTI] + lv[IS_SLTIU];
let is_lt = is_lt + is_lti;
let is_sign = lv[IS_SLT] + lv[IS_SLTI];
let in0 = &lv[INPUT_REGISTER_0];
let in1 = &lv[INPUT_REGISTER_1];
let out = &lv[OUTPUT_REGISTER];
let aux = &lv[AUX_INPUT_REGISTER_0];
let rd = &lv[AUX_INPUT_REGISTER_1];
eval_packed_generic_slt(yield_constr, is_lt, is_sign, in1, aux, in0, rd, out);
}
pub(crate) fn eval_packed_generic_slt<P: PackedField>(
yield_constr: &mut ConstraintConsumer<P>,
filter: P,
sign: P,
x: &[P], // right
y: &[P], // diff (left-right)
z: &[P], // left
given_cy: &[P], // out
rd: &[P], // rd
) {
debug_assert!(
x.len() == N_LIMBS && y.len() == N_LIMBS && z.len() == N_LIMBS && given_cy.len() == N_LIMBS
);
let overflow = P::Scalar::from_canonical_u64(1u64 << LIMB_BITS);
let overflow_inv = P::Scalar::from_canonical_u64(GOLDILOCKS_INVERSE_65536);
debug_assert!(
overflow * overflow_inv == P::Scalar::ONE,
"only works with LIMB_BITS=16 and F=Goldilocks"
);
let mut cy = P::ZEROS;
for ((&xi, &yi), &zi) in x.iter().zip_eq(y).zip_eq(z) {
// Verify that (xi + yi) - zi is either 0 or 2^LIMB_BITS (right[i]+aux[i]-left[i])
let t = cy + xi + yi - zi;
yield_constr.constraint(filter * t * (overflow - t));
// cy <-- 0 or 1 le:cy=0 gt:cy=1
// NB: this is multiplication by a constant, so doesn't
// increase the degree of the constraint.
cy = t * overflow_inv; // (right[i]+aux[i]-left[i])/overflow
}
{
yield_constr.constraint(filter * given_cy[0] * (given_cy[0] - P::ONES));
yield_constr.constraint(filter * (cy - given_cy[0]) * (P::ONES - sign));
yield_constr.constraint(filter * given_cy[1] * (P::ONES - cy - given_cy[0]));
yield_constr.constraint_transition(filter * (rd[0] - given_cy[0]));
for i in 1..N_LIMBS {
yield_constr.constraint(filter * given_cy[i] * (P::ONES - sign));
yield_constr.constraint_transition(filter * rd[i]);
}
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let is_lt = builder.add_extension(lv[IS_SLT], lv[IS_SLTU]);
let is_lti = builder.add_extension(lv[IS_SLTI], lv[IS_SLTIU]);
let is_lt = builder.add_extension(is_lt, is_lti);
let is_sign = builder.add_extension(lv[IS_SLT], lv[IS_SLTI]);
let in0 = &lv[INPUT_REGISTER_0];
let in1 = &lv[INPUT_REGISTER_1];
let out = &lv[OUTPUT_REGISTER];
let aux = &lv[AUX_INPUT_REGISTER_0];
let rd = &lv[AUX_INPUT_REGISTER_1];
eval_ext_circuit_slt(
builder,
yield_constr,
is_lt,
is_sign,
in1,
aux,
in0,
rd,
out,
);
}
#[allow(clippy::needless_collect)]
pub(crate) fn eval_ext_circuit_slt<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
filter: ExtensionTarget<D>,
sign: ExtensionTarget<D>,
x: &[ExtensionTarget<D>],
y: &[ExtensionTarget<D>],
z: &[ExtensionTarget<D>],
given_cy: &[ExtensionTarget<D>],
rd: &[ExtensionTarget<D>],
) {
debug_assert!(
x.len() == N_LIMBS && y.len() == N_LIMBS && z.len() == N_LIMBS && given_cy.len() == N_LIMBS
);
// 2^LIMB_BITS in the base field
let overflow_base = F::from_canonical_u64(1 << LIMB_BITS);
// 2^LIMB_BITS in the extension field as an ExtensionTarget
let overflow = builder.constant_extension(F::Extension::from(overflow_base));
// 2^-LIMB_BITS in the base field.
let overflow_inv = F::from_canonical_u64(GOLDILOCKS_INVERSE_65536);
let mut cy = builder.zero_extension();
let one = builder.one_extension();
let not_sign = builder.sub_extension(one, sign);
for ((&xi, &yi), &zi) in x.iter().zip_eq(y).zip_eq(z) {
// t0 = cy + xi + yi
let t0 = builder.add_many_extension([cy, xi, yi]);
// t = t0 - zi
let t = builder.sub_extension(t0, zi);
// t1 = overflow - t
let t1 = builder.sub_extension(overflow, t);
// t2 = t * t1
let t2 = builder.mul_extension(t, t1);
let filtered_limb_constraint = builder.mul_extension(filter, t2);
yield_constr.constraint(builder, filtered_limb_constraint);
cy = builder.mul_const_extension(overflow_inv, t);
}
let good_cy1 = builder.sub_extension(cy, given_cy[0]);
let cy_filter1 = builder.mul_extension(good_cy1, not_sign);
let cy_filter1 = builder.mul_extension(filter, cy_filter1);
let good_cy2 = builder.sub_extension(one, cy);
let good_cy2 = builder.sub_extension(good_cy2, given_cy[0]);
let cy_filter2 = builder.mul_extension(given_cy[1], good_cy2);
let cy_filter2 = builder.mul_extension(filter, cy_filter2);
// Check given carry is one bit
let bit_constr = builder.mul_sub_extension(given_cy[0], given_cy[0], given_cy[0]);
let bit_filter = builder.mul_extension(filter, bit_constr);
{
yield_constr.constraint(builder, bit_filter);
yield_constr.constraint(builder, cy_filter1);
yield_constr.constraint(builder, cy_filter2);
let rd_filter = builder.sub_extension(rd[0], given_cy[0]);
let rd_filter = builder.mul_extension(filter, rd_filter);
yield_constr.constraint_transition(builder, rd_filter);
for i in 1..N_LIMBS {
let t = builder.mul_extension(filter, given_cy[i]);
let t = builder.mul_extension(t, not_sign);
yield_constr.constraint(builder, t);
let r = builder.mul_extension(filter, rd[i]);
yield_constr.constraint_transition(builder, r);
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/arithmetic/sra.rs | prover/src/arithmetic/sra.rs | //! Support for the MIPS SRA(V) instructions.
//!
//! This crate verifies an MIPS shift instruction, which takes two
//! 32-bit inputs S and A, and produces a 32-bit output C satisfying
//!
//! C = A >> S (mod 2^32)
use itertools::Itertools;
use plonky2::field::extension::Extendable;
use plonky2::field::interpolation::interpolant;
use plonky2::field::packed::PackedField;
use plonky2::field::polynomial::PolynomialCoeffs;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::arithmetic::columns::*;
use crate::arithmetic::div::{
eval_ext_circuit_divmod_helper, eval_packed_div_helper, generate_divu_helper,
};
use crate::arithmetic::utils::{read_value, u32_to_array};
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
/// Generates a shift operation (SRA(V).
///
/// The inputs are stored in the form `(shift, input, 1 >> shift)`.
/// NB: if `shift >= 32`, then the third register holds 0.
/// We leverage the functions in div.rs to carry out
/// the computation.
pub fn generate<F: PrimeField64>(
lv: &mut [F],
nv: &mut [F],
filter: usize,
shift: u32,
input: u32,
result: u32,
) {
// We use the multiplication logic to generate SLL(V)
// TODO: It would probably be clearer/cleaner to read the U32
// into an [i64;N] and then copy that to the lv table.
// The first input is the shift we need to apply.
u32_to_array(&mut lv[INPUT_REGISTER_0], shift);
// The second register holds the input which needs shifting.
u32_to_array(&mut lv[INPUT_REGISTER_1], input);
u32_to_array(&mut lv[OUTPUT_REGISTER], result);
// Compute 1 << shift and store it in the third input register.
let shifted_displacement = 1u32 << (shift & 0x1F);
u32_to_array(&mut lv[INPUT_REGISTER_2], shifted_displacement);
// input >> shift
u32_to_array(&mut lv[AUX_INPUT_REGISTER_2], input >> shift);
// Set lv[AUX_INPUT_REGISTER_2.end] = (input_high_16 + 2^15) % 2^16
lv[AUX_INPUT_REGISTER_2.end] = F::from_canonical_u32((input >> 16) ^ 0x8000);
// Set lv[AUX_INPUT_REGISTER_2.end+1] = 1 if neg otherwise 0.
lv[AUX_INPUT_REGISTER_2.end + 1] = F::from_canonical_u32(input >> 31);
// set aux data in lv[SRA_EXTRA] and nv[SRA_EXTRA]
// We do not check if shift < 32.
let aux_data = eval_aux_sign_extend(F::from_canonical_u32(shift));
lv[AUX_EXTRA].copy_from_slice(&aux_data[..8]);
nv[AUX_EXTRA].copy_from_slice(&aux_data[8..]);
// This equals to nv[SRA_EXTRA.end-1]
u32_to_array(
&mut nv[AUX_INPUT_REGISTER_2],
((1 << shift) - 1) << ((32 - shift) % 32),
);
// shift * shift
nv[AUX_INPUT_REGISTER_2.end] = F::from_canonical_u32(shift * shift);
match filter {
IS_SRA | IS_SRAV => {
generate_divu_helper(
lv,
nv,
filter,
INPUT_REGISTER_1,
INPUT_REGISTER_2,
AUX_INPUT_REGISTER_2,
None,
);
}
_ => panic!("expected filter to be IS_SRA(V), but it was {filter}"),
}
}
/// Evaluates the constraints for an SRA(V) opcode.
/// We use div and add to impl the opcode.
pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv[IS_SRA] + lv[IS_SRAV];
let shift = &lv[INPUT_REGISTER_0];
// The high limbs should be 0.
for i in shift.iter().skip(1) {
yield_constr.constraint_transition(filter * *i);
}
// check is_neg is bool
let is_neg = lv[AUX_INPUT_REGISTER_2.end + 1];
yield_constr.constraint_transition(filter * is_neg * (P::ONES - is_neg));
// check input is negative or not. We just check the most significant bit in significant limb
let over_flow = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
let add = P::Scalar::from_canonical_u64(1 << (LIMB_BITS - 1));
let sum = lv[AUX_INPUT_REGISTER_2.end];
let input_hi = lv[INPUT_REGISTER_1.end - 1];
yield_constr.constraint_transition(filter * (input_hi + add - sum - is_neg * over_flow));
// shift_sq == shift * shift
let shift_sq = nv[AUX_INPUT_REGISTER_2.end];
yield_constr.constraint_transition(filter * (shift_sq - shift[0] * shift[0]));
// Compute the added number if negative
let intermediate1 = lv[AUX_EXTRA].to_vec();
let intermediate2 = nv[AUX_EXTRA].to_vec();
let mut coeffs = sign_extend_poly::<P::Scalar>().coeffs;
coeffs.reverse();
let mut acc = P::ZEROS;
for (w, j) in intermediate1
.into_iter()
.chain(intermediate2.into_iter())
.zip(coeffs.chunks(2))
{
yield_constr.constraint_transition(filter * (acc * shift_sq + j[0] * shift[0] + j[1] - w));
acc = w;
}
// acc == nv[AUX_INPUT_REGISTER_2]
let acc_lo = nv[AUX_INPUT_REGISTER_2.start];
let acc_hi = nv[AUX_INPUT_REGISTER_2.start + 1];
yield_constr.constraint_transition(filter * (acc_hi * over_flow + acc_lo - acc));
// check input >> shift == lv[AUX_INPUT_REGISTER_2]
eval_packed_div_helper(
lv,
nv,
yield_constr,
filter,
INPUT_REGISTER_1,
INPUT_REGISTER_2,
AUX_INPUT_REGISTER_2,
AUX_INPUT_REGISTER_0,
);
// next will check lv[AUX_INPUT_REGISTER_2] + is_neg * nv[AUX_INPUT_REGISTER_2] == lv[OUTPUT_REGISTER]
// There is not overflow for each added limb
let logic_shifted_input = &lv[AUX_INPUT_REGISTER_2];
let output = &lv[OUTPUT_REGISTER];
for (x, (y, z)) in logic_shifted_input
.iter()
.zip(([acc_lo, acc_hi].iter()).zip(output.iter()))
{
yield_constr.constraint_transition(filter * (*x + *y * is_neg - *z));
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = builder.add_extension(lv[IS_SRA], lv[IS_SRAV]);
let shift: [ExtensionTarget<D>; N_LIMBS] = read_value(lv, INPUT_REGISTER_0);
// The high limbs should be 0.
for i in shift.iter().skip(1) {
let t = builder.mul_extension(filter, *i);
yield_constr.constraint_transition(builder, t);
}
// check is_neg is bool
let is_neg = lv[AUX_INPUT_REGISTER_2.end + 1];
{
let one = builder.one_extension();
let t = builder.sub_extension(one, is_neg);
let multi_t = builder.mul_many_extension([filter, is_neg, t]);
yield_constr.constraint_transition(builder, multi_t);
}
// check input is negative or not. We just check the most significant bit in significant limb
let over_flow = builder.constant_extension(F::Extension::from_canonical_u64(1 << LIMB_BITS));
{
let add =
builder.constant_extension(F::Extension::from_canonical_u64(1 << (LIMB_BITS - 1)));
let sum = lv[AUX_INPUT_REGISTER_2.end];
let input_hi = lv[INPUT_REGISTER_1.end - 1];
let t0 = builder.add_extension(input_hi, add);
let t1 = builder.sub_extension(t0, sum);
let t2 = builder.mul_extension(over_flow, is_neg);
let t3 = builder.sub_extension(t1, t2);
let t = builder.mul_extension(filter, t3);
yield_constr.constraint_transition(builder, t); //filter * (input_hi + add - sum - is_neg * over_flow
}
// shift_sq == shift * shift
let shift_sq = nv[AUX_INPUT_REGISTER_2.end];
let sq = builder.square_extension(shift[0]);
let t0 = builder.sub_extension(shift_sq, sq);
let t = builder.mul_extension(filter, t0);
yield_constr.constraint_transition(builder, t);
// Compute the added number if negative
let mut acc = builder.zero_extension();
{
let intermediate1 = lv[AUX_EXTRA].to_vec();
let intermediate2 = nv[AUX_EXTRA].to_vec();
let coeffs = sign_extend_poly::<F>()
.coeffs
.into_iter()
.map(|c| F::Extension::from(c))
.map(|c| builder.constant_extension(c))
.rev()
.collect_vec();
for (w, j) in intermediate1
.into_iter()
.chain(intermediate2.into_iter())
.zip(coeffs.chunks(2))
{
let t0 = builder.wide_arithmetic_extension(acc, shift_sq, j[0], shift[0], j[1]);
let t = builder.sub_extension(t0, w);
let constr = builder.mul_extension(filter, t);
yield_constr.constraint_transition(builder, constr);
acc = w;
}
}
// acc == nv[AUX_INPUT_REGISTER_2]
let acc_lo = nv[AUX_INPUT_REGISTER_2.start];
let acc_hi = nv[AUX_INPUT_REGISTER_2.start + 1];
{
let t0 = builder.sub_extension(acc_lo, acc);
let t1 = builder.mul_add_extension(over_flow, acc_hi, t0);
let t = builder.mul_extension(filter, t1);
yield_constr.constraint_transition(builder, t);
}
// check input >> shift == lv[AUX_INPUT_REGISTER_2]
eval_ext_circuit_divmod_helper(
builder,
lv,
nv,
yield_constr,
filter,
INPUT_REGISTER_1,
INPUT_REGISTER_2,
AUX_INPUT_REGISTER_2,
AUX_INPUT_REGISTER_0,
);
// next will check lv[AUX_INPUT_REGISTER_2] + is_neg * nv[AUX_INPUT_REGISTER_2] == lv[OUTPUT_REGISTER]
// There is not overflow for each added limb
let logic_shifted_input = &lv[AUX_INPUT_REGISTER_2];
let output = &lv[OUTPUT_REGISTER];
for (x, (y, z)) in logic_shifted_input
.iter()
.zip(([acc_lo, acc_hi].iter()).zip(output.iter()))
{
let t0 = builder.sub_extension(*x, *z);
let t1 = builder.mul_add_extension(*y, is_neg, t0);
let t = builder.mul_extension(filter, t1);
yield_constr.constraint_transition(builder, t);
}
}
/// Compute a polynomial f that satisfies these points:
/// (0,0), (1,2^31),(2,2^31+2^30),...,(31, 2^31+2^32+...+2^1)
fn sign_extend_poly<F: Field>() -> PolynomialCoeffs<F> {
let mut sum = 0u64;
let mut points = vec![(F::ZERO, F::ZERO)];
for i in 1u64..32 {
sum += 1 << (32 - i);
points.push((F::from_canonical_u64(i), F::from_canonical_u64(sum)));
}
interpolant(&points)
}
fn eval_poly<F: Field>(poly: PolynomialCoeffs<F>, x: F) -> Vec<F> {
debug_assert_eq!(poly.len() % 2, 0);
let expected = poly.eval(x);
let mut results = vec![];
let mut acc = F::ZERO;
for chunks in poly.coeffs.chunks(2).rev() {
let inter_coeff = chunks.iter().chain([acc].iter()).copied().collect_vec();
let inter_poly = PolynomialCoeffs::new(inter_coeff);
acc = inter_poly.eval(x);
results.push(acc);
}
debug_assert_eq!(results.len(), poly.len() / 2);
debug_assert_eq!(expected, results[poly.len() / 2 - 1]);
results
}
fn eval_aux_sign_extend<F: Field>(x: F) -> Vec<F> {
let poly = sign_extend_poly();
eval_poly(poly, x)
}
#[cfg(test)]
mod tests {
use crate::arithmetic::columns::{IS_SRA, IS_SRAV, NUM_ARITH_COLUMNS};
use crate::arithmetic::sra::{eval_packed_generic, eval_poly, generate, sign_extend_poly};
use crate::constraint_consumer::ConstraintConsumer;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::{Field, Sample};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
const N_RND_TESTS: usize = 1000;
const SRA_OPS: [usize; 2] = [IS_SRA, IS_SRAV];
#[test]
fn test_poly() {
type F = GoldilocksField;
let x = F::from_canonical_u64(2);
let poly = sign_extend_poly();
println!("{:?}", poly);
let res = eval_poly(poly, x);
let expected = [
18260604987135149276u64,
6641582332263005918,
4185170977706284464,
8069729718270694767,
2720953444603644942,
9143808191498674830,
14156617978482227317,
2619661922624664514,
9865344867852737688,
7289981648341815148,
14234318509450809877,
15083771169118776894,
2211192019722872880,
5624745679944178802,
15168639727975586488,
3221225472,
]
.map(F::from_noncanonical_u64);
assert_eq!(expected.to_vec(), res);
}
#[test]
fn generate_eval_consistency_not_sra() {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
let nv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
// if `IS_SRA, IS_SRAV == 0`, then the constraints should be met even
// if all values are garbage.
lv[IS_SRA] = F::ZERO;
lv[IS_SRAV] = F::ZERO;
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
GoldilocksField::ONE,
GoldilocksField::ONE,
GoldilocksField::ONE,
);
eval_packed_generic(&lv, &nv, &mut constraint_consumer);
for &acc in &constraint_consumer.constraint_accs {
assert_eq!(acc, GoldilocksField::ZERO);
}
}
#[test]
fn generate_eval_consistency() {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
for op_filter in SRA_OPS {
for _ in 0..N_RND_TESTS {
// set inputs to random values
let mut lv = [F::default(); NUM_ARITH_COLUMNS]
.map(|_| F::from_canonical_u16(rng.gen::<u16>()));
let mut nv = [F::default(); NUM_ARITH_COLUMNS]
.map(|_| F::from_canonical_u16(rng.gen::<u16>()));
// Reset operation columns, then select one
for op in SRA_OPS {
lv[op] = F::ZERO;
}
lv[op_filter] = F::ONE;
let input0: u32 = rng.gen();
let input1: u32 = rng.gen_range(0..32);
let result = ((input0 as i32) >> input1) as u32;
generate(&mut lv, &mut nv, op_filter, input1, input0, result);
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
GoldilocksField::ONE,
GoldilocksField::ZERO,
GoldilocksField::ZERO,
);
eval_packed_generic(&lv, &nv, &mut constraint_consumer);
for &acc in &constraint_consumer.constraint_accs {
assert_eq!(acc, GoldilocksField::ZERO);
}
}
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/arithmetic/lo_hi.rs | prover/src/arithmetic/lo_hi.rs | //! Support for MIPS instructions MFHI, MTHI, MHLO, MTLO
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::PrimeField64;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::u32_to_array;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
/// Generate row for MFHI, MTHI, MHLO, and MTLO operations.
pub(crate) fn generate<F: PrimeField64>(lv: &mut [F], filter: usize, input: u32, result: u32) {
u32_to_array(&mut lv[INPUT_REGISTER_0], input);
match filter {
IS_MFHI | IS_MTHI | IS_MFLO | IS_MTLO => {
u32_to_array(&mut lv[OUTPUT_REGISTER], result);
}
_ => panic!("unexpected operation filter"),
};
}
pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv[IS_MFHI] + lv[IS_MTHI] + lv[IS_MFLO] + lv[IS_MTLO];
let input = &lv[INPUT_REGISTER_0];
let output = &lv[OUTPUT_REGISTER];
for (input, output) in input.iter().zip(output) {
yield_constr.constraint(filter * (*input - *output));
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = builder.add_many_extension([lv[IS_MFHI], lv[IS_MTHI], lv[IS_MFLO], lv[IS_MTLO]]);
let input = &lv[INPUT_REGISTER_0];
let output = &lv[OUTPUT_REGISTER];
for (input, output) in input.iter().zip(output) {
let sub = builder.sub_extension(*input, *output);
let t = builder.mul_extension(filter, sub);
yield_constr.constraint(builder, t);
}
}
#[cfg(test)]
mod tests {
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::{Field, Sample};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use super::*;
const OPS: [usize; 4] = [IS_MFHI, IS_MTHI, IS_MFLO, IS_MTLO];
#[test]
fn generate_eval_consistency_not_lo_hi() {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
// if the operation filters are all zero, then the constraints
// should be met even if all values are garbage.
OPS.map(|i| lv[i] = F::ZERO);
let mut constrant_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
F::ONE,
F::ONE,
F::ONE,
);
eval_packed_generic(&lv, &mut constrant_consumer);
for &acc in &constrant_consumer.constraint_accs {
assert_eq!(acc, F::ZERO);
}
}
#[test]
fn generate_eval_consistency_addcy() {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
const N_ITERS: usize = 1000;
for _ in 0..N_ITERS {
for op_filter in OPS {
// set entire row to random 16-bit values
let mut lv = [F::default(); NUM_ARITH_COLUMNS]
.map(|_| F::from_canonical_u16(rng.gen::<u16>()));
// set operation filter and ensure all constraints are
// satisfied. We have to explicitly set the other
// operation filters to zero since all are treated by
// the call.
OPS.map(|i| lv[i] = F::ZERO);
lv[op_filter] = F::ONE;
let input = rng.gen::<u32>();
generate(&mut lv, op_filter, input, input);
let mut constrant_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
F::ONE,
F::ONE,
F::ONE,
);
eval_packed_generic(&lv, &mut constrant_consumer);
for &acc in &constrant_consumer.constraint_accs {
assert_eq!(acc, F::ZERO);
}
let mut expected_limbs = [F::ZERO; N_LIMBS];
u32_to_array(&mut expected_limbs, input);
assert!(expected_limbs
.iter()
.zip(&lv[OUTPUT_REGISTER])
.all(|(x, y)| x == y));
}
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/arithmetic/mult.rs | prover/src/arithmetic/mult.rs | //! Support for the MIPS MULT/MULTU instruction.
//!
//! This crate verifies an MIPS MULT/MULTU instruction, which takes two
//! 32-bit inputs A and B, and produces two 32-bit output H and L satisfying
//!
//! (H,L)=A*B
//!
//! i.e. H is the higher half of the usual long multiplication
//! A*B and L is the lower half. Inputs A and B, and output H and L, are given as arrays of 16-bit
//! limbs. For example, if the limbs of A are a[0],a[1], then
//!
//! A = \sum_{i=0}^1 a[i] β^i,
//!
//! where β = 2^16 = 2^LIMB_BITS. To verify that A, B and H, L satisfy
//! the equation we proceed as follows. Define
//!
//! a(x) = \sum_{i=0}^1 a[i] x^i
//!
//! (so A = a(β)) and similarly for b(x), h(x) and l(x). Then A*B = (H,L)
//! if and only if the polynomial
//!
//! a(x) * b(x) - [h,l](x)
//!
//! is zero when evaluated at x = β, i.e. it is divisible by (x - β);
//! equivalently, there exists a polynomial s (representing the
//! carries from the long multiplication) such that
//!
//! a(x) * b(x) - [h,l](x) - (x - β) * s(x) == 0
//!
//! In the code below, this "constraint polynomial" is constructed in
//! the variable `constr_poly`. It must be identically zero for the
//! multiplication operation to be verified, or, equivalently, each of
//! its coefficients must be zero. The variable names of the
//! constituent polynomials are (writing N for N_LIMBS=2):
//!
//! a(x) = \sum_{i=0}^{N-1} input0[i] * x^i
//! b(x) = \sum_{i=0}^{N-1} input1[i] * x^i
//! h(x) = \sum_{i=0}^{N-1} output0[i] * x^i
//! l(x) = \sum_{i=0}^{N-1} output1[i] * x^i
//! s(x) = \sum_i^{2N-2} aux[i] * x^i
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
/// Given the two limbs of `input0` and `input1`, computes `input0 * input1`.
pub(crate) fn generate<F: PrimeField64>(lv: &mut [F], filter: usize, input0: u32, input1: u32) {
u32_to_array(&mut lv[INPUT_REGISTER_0], input0);
u32_to_array(&mut lv[INPUT_REGISTER_1], input1);
let left_in = read_value_i64_limbs(lv, INPUT_REGISTER_0);
let right_in = read_value_i64_limbs(lv, INPUT_REGISTER_1);
if filter == IS_MULT {
generate_mult(lv, input0, input1);
} else if filter == IS_MULTU {
generate_multu(lv, left_in, right_in);
} else {
panic!()
}
}
pub(crate) fn generate_mult<F: PrimeField64>(lv: &mut [F], input0: u32, input1: u32) {
log::debug!("generate_mult");
let is_input0_neg = (input0 as i32) < 0;
let is_input1_neg = (input1 as i32) < 0;
lv[AUX_EXTRA.start] = F::from_bool(is_input0_neg);
lv[AUX_EXTRA.start + 1] = F::from_bool(is_input1_neg);
lv[INPUT_REGISTER_2.start] = F::from_canonical_u32((input0 >> LIMB_BITS) ^ 0x8000);
lv[INPUT_REGISTER_2.start + 1] = F::from_canonical_u32((input1 >> LIMB_BITS) ^ 0x8000);
let sign_extend = |is_neg, range| {
let input = read_value_i64_limbs::<N_LIMBS, _>(lv, range);
let pad = [if is_neg { u16::MAX as i64 } else { 0 }; N_LIMBS];
let mut result = [0; 2 * N_LIMBS];
result[..N_LIMBS].clone_from_slice(&input);
result[N_LIMBS..].clone_from_slice(&pad);
result
};
let left_in = sign_extend(is_input0_neg, INPUT_REGISTER_0);
let right_in = sign_extend(is_input1_neg, INPUT_REGISTER_1);
generate_mult_helper(lv, left_in, right_in);
}
pub(crate) fn generate_multu<F: PrimeField64>(
lv: &mut [F],
input0: [i64; N_LIMBS],
input1: [i64; N_LIMBS],
) {
let mut left_in = [0; 2 * N_LIMBS];
left_in[..N_LIMBS].clone_from_slice(&input0);
let mut right_in = [0; 2 * N_LIMBS];
right_in[..N_LIMBS].clone_from_slice(&input1);
generate_mult_helper(lv, left_in, right_in);
}
pub(crate) fn generate_mult_helper<F: PrimeField64>(
lv: &mut [F],
left_in: [i64; 2 * N_LIMBS],
right_in: [i64; 2 * N_LIMBS],
) {
const MASK: i64 = (1i64 << LIMB_BITS) - 1i64;
// Input and output have 16-bit limbs
let mut output_limbs = [0i64; 2 * N_LIMBS];
// Column-wise pen-and-paper long multiplication on 16-bit limbs.
// First calculate the coefficients of a(x)*b(x) (in unreduced_prod),
// then do carry propagation to obtain C = c(β) = a(β)*b(β).
let mut cy = 0i64;
let mut unreduced_prod = pol_mul_lo(left_in, right_in);
for col in 0..2 * N_LIMBS {
let t = unreduced_prod[col] + cy;
cy = t >> LIMB_BITS;
output_limbs[col] = t & MASK;
}
lv[OUTPUT_REGISTER_LO].copy_from_slice(
&output_limbs[..N_LIMBS]
.iter()
.map(|c| F::from_canonical_i64(*c))
.collect::<Vec<_>>(),
);
lv[OUTPUT_REGISTER_HI].copy_from_slice(
&output_limbs[N_LIMBS..]
.iter()
.map(|c| F::from_canonical_i64(*c))
.collect::<Vec<_>>(),
);
pol_sub_assign(&mut unreduced_prod, &output_limbs);
let mut aux_limbs = pol_remove_root_2exp::<LIMB_BITS, _, { 2 * N_LIMBS }>(unreduced_prod);
aux_limbs[2 * N_LIMBS - 1] = -cy;
for c in aux_limbs.iter_mut() {
*c += AUX_COEFF_ABS_MAX;
}
debug_assert!(aux_limbs.iter().all(|&c| c.abs() <= 2 * AUX_COEFF_ABS_MAX));
lv[MULT_AUX_LO].copy_from_slice(&aux_limbs.map(|c| F::from_canonical_u16(c as u16)));
lv[MULT_AUX_HI].copy_from_slice(&aux_limbs.map(|c| F::from_canonical_u16((c >> 16) as u16)));
}
pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let input0_limbs = read_value::<N_LIMBS, _>(lv, INPUT_REGISTER_0);
let input1_limbs = read_value::<N_LIMBS, _>(lv, INPUT_REGISTER_1);
let output_limbs_lo = read_value::<N_LIMBS, _>(lv, OUTPUT_REGISTER_LO);
let output_limbs_hi = read_value::<N_LIMBS, _>(lv, OUTPUT_REGISTER_HI);
let mut output_limbs = [P::ZEROS; 2 * N_LIMBS];
output_limbs[..N_LIMBS].copy_from_slice(&output_limbs_lo);
output_limbs[N_LIMBS..].copy_from_slice(&output_limbs_hi);
eval_packed_generic_mult(
lv,
lv[IS_MULT],
input0_limbs,
input1_limbs,
output_limbs,
yield_constr,
);
eval_packed_generic_multu(
lv,
lv[IS_MULTU],
input0_limbs,
input1_limbs,
output_limbs,
yield_constr,
);
}
pub(crate) fn eval_packed_generic_mult<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
filter: P,
left_in_limbs: [P; N_LIMBS],
right_in_limbs: [P; N_LIMBS],
output_limbs: [P; 2 * N_LIMBS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let base = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
let sign_extend = |is_neg_idx: usize,
sum_idx: usize,
input: &[P; N_LIMBS],
yield_constr: &mut ConstraintConsumer<P>| {
let is_neg = lv[is_neg_idx];
yield_constr.constraint(filter * is_neg * (P::ONES - is_neg));
let add = P::Scalar::from_canonical_u64(1 << (LIMB_BITS - 1));
let sum = lv[sum_idx];
let input_hi = input[N_LIMBS - 1];
yield_constr.constraint(filter * (input_hi + add - sum - is_neg * base));
// Let's begin to extend
let mut result = [P::ZEROS; 2 * N_LIMBS];
let pad = [is_neg * P::Scalar::from_canonical_u16(u16::MAX); N_LIMBS];
result[..N_LIMBS].clone_from_slice(input);
result[N_LIMBS..].clone_from_slice(&pad);
result
};
let left_in_limbs = sign_extend(
AUX_EXTRA.start,
INPUT_REGISTER_2.start,
&left_in_limbs,
yield_constr,
);
let right_in_limbs = sign_extend(
AUX_EXTRA.start + 1,
INPUT_REGISTER_2.start + 1,
&right_in_limbs,
yield_constr,
);
eval_packed_generic_mult_helper(
lv,
filter,
left_in_limbs,
right_in_limbs,
output_limbs,
yield_constr,
);
}
pub(crate) fn eval_packed_generic_multu<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
filter: P,
left_in_limbs: [P; N_LIMBS],
right_in_limbs: [P; N_LIMBS],
output_limbs: [P; 2 * N_LIMBS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let left_in_limbs = {
let mut limbs = [P::ZEROS; 2 * N_LIMBS];
limbs[..N_LIMBS].clone_from_slice(&left_in_limbs);
limbs
};
let right_in_limbs = {
let mut limbs = [P::ZEROS; 2 * N_LIMBS];
limbs[..N_LIMBS].clone_from_slice(&right_in_limbs);
limbs
};
eval_packed_generic_mult_helper(
lv,
filter,
left_in_limbs,
right_in_limbs,
output_limbs,
yield_constr,
);
}
pub(crate) fn eval_packed_generic_mult_helper<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
filter: P,
left_in_limbs: [P; 2 * N_LIMBS],
right_in_limbs: [P; 2 * N_LIMBS],
output_limbs: [P; 2 * N_LIMBS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let base = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
let aux_limbs = {
// MUL_AUX_INPUT was offset by 2^20 in generation, so we undo
// that here
let offset = P::Scalar::from_canonical_u64(AUX_COEFF_ABS_MAX as u64);
let mut aux_limbs = read_value::<{ 2 * N_LIMBS }, _>(lv, MULT_AUX_LO);
let aux_limbs_hi = &lv[MULT_AUX_HI];
for (lo, &hi) in aux_limbs.iter_mut().zip(aux_limbs_hi) {
*lo += hi * base - offset;
}
aux_limbs
};
// Constraint poly holds the coefficients of the polynomial that
// must be identically zero for this multiplication to be
// verified.
//
// These two lines set constr_poly to the polynomial a(x)b(x) - [h,l](x),
// where a, b, h and l are the polynomials
//
// a(x) = \sum_i input0_limbs[i] * x^i
// b(x) = \sum_i input1_limbs[i] * x^i
// [h,l](x) = \sum_i output_limbs[i] * x^i
//
// This polynomial should equal (x - β)*s(x) where s is
//
// s(x) = \sum_i aux_limbs[i] * x^i
//
let mut constr_poly = pol_mul_lo(left_in_limbs, right_in_limbs);
pol_sub_assign(&mut constr_poly, &output_limbs);
// This subtracts (x - β) * s(x) from constr_poly.
pol_sub_assign(&mut constr_poly, &pol_adjoin_root(aux_limbs, base));
// At this point constr_poly holds the coefficients of the
// polynomial a(x)b(x) - [h,l](x) - (x - β)*s(x). The
// multiplication is valid if and only if all of those
// coefficients are zero.
for &c in &constr_poly {
yield_constr.constraint(filter * c);
}
}
pub(crate) fn eval_ext_mult_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
filter: ExtensionTarget<D>,
left_in_limbs: [ExtensionTarget<D>; N_LIMBS],
right_in_limbs: [ExtensionTarget<D>; N_LIMBS],
output_limbs: [ExtensionTarget<D>; 2 * N_LIMBS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let base = builder.constant_extension(F::Extension::from_canonical_u64(1 << LIMB_BITS));
let one = builder.one_extension();
let sign_extend = |builder: &mut CircuitBuilder<F, D>,
is_neg_idx: usize,
sum_idx: usize,
input: &[ExtensionTarget<D>; N_LIMBS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>| {
let is_neg = lv[is_neg_idx];
let t0 = builder.sub_extension(one, is_neg);
let t = builder.mul_many_extension([filter, is_neg, t0]);
yield_constr.constraint(builder, t);
let add =
builder.constant_extension(F::Extension::from_canonical_u64(1 << (LIMB_BITS - 1)));
let sum = lv[sum_idx];
let input_hi = input[N_LIMBS - 1];
let t0 = builder.add_extension(input_hi, add);
let t1 = builder.sub_extension(t0, sum);
let t2 = builder.mul_extension(is_neg, base);
let t3 = builder.sub_extension(t1, t2);
let t = builder.mul_extension(filter, t3);
yield_constr.constraint(builder, t);
// Let's begin to extend
let mut result = [ExtensionTarget::default(); 2 * N_LIMBS];
let u16_max = builder.constant_extension(F::Extension::from_canonical_u16(u16::MAX));
let pad = builder.mul_extension(is_neg, u16_max);
result[..N_LIMBS].clone_from_slice(input);
result[N_LIMBS..].clone_from_slice(&[pad; N_LIMBS]);
result
};
let left_in_limbs = sign_extend(
builder,
AUX_EXTRA.start,
INPUT_REGISTER_2.start,
&left_in_limbs,
yield_constr,
);
let right_in_limbs = sign_extend(
builder,
AUX_EXTRA.start + 1,
INPUT_REGISTER_2.start + 1,
&right_in_limbs,
yield_constr,
);
eval_ext_mult_helper_circuit(
builder,
lv,
filter,
left_in_limbs,
right_in_limbs,
output_limbs,
yield_constr,
);
}
pub(crate) fn eval_ext_multu_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
filter: ExtensionTarget<D>,
left_in_limbs: [ExtensionTarget<D>; N_LIMBS],
right_in_limbs: [ExtensionTarget<D>; N_LIMBS],
output_limbs: [ExtensionTarget<D>; 2 * N_LIMBS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let left_in_limbs = {
let mut limbs = [builder.zero_extension(); 2 * N_LIMBS];
limbs[..N_LIMBS].clone_from_slice(&left_in_limbs);
limbs
};
let right_in_limbs = {
let mut limbs = [builder.zero_extension(); 2 * N_LIMBS];
limbs[..N_LIMBS].clone_from_slice(&right_in_limbs);
limbs
};
eval_ext_mult_helper_circuit(
builder,
lv,
filter,
left_in_limbs,
right_in_limbs,
output_limbs,
yield_constr,
);
}
pub(crate) fn eval_ext_mult_helper_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
filter: ExtensionTarget<D>,
left_in_limbs: [ExtensionTarget<D>; 2 * N_LIMBS],
right_in_limbs: [ExtensionTarget<D>; 2 * N_LIMBS],
output_limbs: [ExtensionTarget<D>; 2 * N_LIMBS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let base = builder.constant_extension(F::Extension::from_canonical_u64(1 << LIMB_BITS));
let aux_limbs = {
let offset =
builder.constant_extension(F::Extension::from_canonical_u64(AUX_COEFF_ABS_MAX as u64));
let mut aux_limbs = read_value::<{ 2 * N_LIMBS }, _>(lv, MULT_AUX_LO);
let aux_limbs_hi = &lv[MULT_AUX_HI];
for (lo, &hi) in aux_limbs.iter_mut().zip(aux_limbs_hi) {
//*lo = lo + hi * base - offset;
let t = builder.mul_sub_extension(hi, base, offset);
*lo = builder.add_extension(*lo, t);
}
aux_limbs
};
let mut constr_poly = pol_mul_lo_ext_circuit(builder, left_in_limbs, right_in_limbs);
pol_sub_assign_ext_circuit(builder, &mut constr_poly, &output_limbs);
let rhs = pol_adjoin_root_ext_circuit(builder, aux_limbs, base);
pol_sub_assign_ext_circuit(builder, &mut constr_poly, &rhs);
for &c in &constr_poly {
let filter = builder.mul_extension(filter, c);
yield_constr.constraint(builder, filter);
}
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let input0_limbs = read_value::<N_LIMBS, _>(lv, INPUT_REGISTER_0);
let input1_limbs = read_value::<N_LIMBS, _>(lv, INPUT_REGISTER_1);
let output_limbs_lo = read_value::<N_LIMBS, _>(lv, OUTPUT_REGISTER_LO);
let output_limbs_hi = read_value::<N_LIMBS, _>(lv, OUTPUT_REGISTER_HI);
let mut output_limbs = [ExtensionTarget::default(); 2 * N_LIMBS];
output_limbs[..N_LIMBS].copy_from_slice(&output_limbs_lo);
output_limbs[N_LIMBS..].copy_from_slice(&output_limbs_hi);
eval_ext_mult_circuit(
builder,
lv,
lv[IS_MULT],
input0_limbs,
input1_limbs,
output_limbs,
yield_constr,
);
eval_ext_multu_circuit(
builder,
lv,
lv[IS_MULTU],
input0_limbs,
input1_limbs,
output_limbs,
yield_constr,
);
}
#[cfg(test)]
mod tests {
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::Sample;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use super::*;
const N_RND_TESTS: usize = 100000;
const OPS: [usize; 2] = [IS_MULT, IS_MULTU];
#[test]
fn generate_eval_consistency_not_mult() {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
// if `IS_MULT and IS_MULTU == 0`, then the constraints should be met even
// if all values are garbage.
lv[IS_MULT] = F::ZERO;
lv[IS_MULTU] = F::ZERO;
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
GoldilocksField::ONE,
GoldilocksField::ONE,
GoldilocksField::ONE,
);
eval_packed_generic(&lv, &mut constraint_consumer);
for &acc in &constraint_consumer.constraint_accs {
assert_eq!(acc, GoldilocksField::ZERO);
}
}
#[test]
fn generate_eval_consistency_mult() {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
for op_filter in OPS {
for op in OPS {
lv[op] = F::ZEROS;
}
lv[op_filter] = F::ONES;
for i in 0..N_RND_TESTS {
// set inputs to random values
for (ai, bi) in INPUT_REGISTER_0.zip(INPUT_REGISTER_1) {
lv[ai] = F::from_canonical_u16(rng.gen());
lv[bi] = F::from_canonical_u16(rng.gen());
}
let mut left_in = rng.gen::<u32>();
if i > N_RND_TESTS / 2 {
left_in |= 0x80000000;
}
let right_in = rng.gen::<u32>();
generate(&mut lv, op_filter, left_in, right_in);
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
GoldilocksField::ONE,
GoldilocksField::ONE,
GoldilocksField::ONE,
);
eval_packed_generic(&lv, &mut constraint_consumer);
for &acc in &constraint_consumer.constraint_accs {
assert_eq!(acc, GoldilocksField::ZERO);
}
}
}
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/arithmetic/columns.rs | prover/src/arithmetic/columns.rs | //! Arithmetic unit
use std::ops::Range;
pub const LIMB_BITS: usize = 16;
const ZKM_REGISTER_BITS: usize = 32;
/// Return the number of LIMB_BITS limbs that are in an MIPS
/// register-sized number, panicking if LIMB_BITS doesn't divide in
/// the MIPS register size.
const fn n_limbs() -> usize {
if ZKM_REGISTER_BITS % LIMB_BITS != 0 {
panic!("limb size must divide MIPS register size");
}
let n = ZKM_REGISTER_BITS / LIMB_BITS;
if n % 2 == 1 {
panic!("number of limbs must be even");
}
n
}
/// Number of LIMB_BITS limbs that are in on MIPS register-sized number.
pub const N_LIMBS: usize = n_limbs();
pub(crate) const IS_ADD: usize = 0;
pub(crate) const IS_ADDU: usize = IS_ADD + 1;
pub(crate) const IS_ADDI: usize = IS_ADDU + 1;
pub(crate) const IS_ADDIU: usize = IS_ADDI + 1;
pub(crate) const IS_SUB: usize = IS_ADDIU + 1;
pub(crate) const IS_SUBU: usize = IS_SUB + 1;
pub(crate) const IS_MULT: usize = IS_SUBU + 1;
pub(crate) const IS_MULTU: usize = IS_MULT + 1;
pub(crate) const IS_MUL: usize = IS_MULTU + 1;
pub(crate) const IS_DIV: usize = IS_MUL + 1;
pub(crate) const IS_DIVU: usize = IS_DIV + 1;
pub(crate) const IS_SLLV: usize = IS_DIVU + 1;
pub(crate) const IS_SRLV: usize = IS_SLLV + 1;
pub(crate) const IS_SRAV: usize = IS_SRLV + 1;
pub(crate) const IS_SLL: usize = IS_SRAV + 1;
pub(crate) const IS_SRL: usize = IS_SLL + 1;
pub(crate) const IS_SRA: usize = IS_SRL + 1;
pub(crate) const IS_SLT: usize = IS_SRA + 1;
pub(crate) const IS_SLTU: usize = IS_SLT + 1;
pub(crate) const IS_SLTI: usize = IS_SLTU + 1;
pub(crate) const IS_SLTIU: usize = IS_SLTI + 1;
pub(crate) const IS_LUI: usize = IS_SLTIU + 1;
pub(crate) const IS_MFHI: usize = IS_LUI + 1;
pub(crate) const IS_MTHI: usize = IS_MFHI + 1;
pub(crate) const IS_MFLO: usize = IS_MTHI + 1;
pub(crate) const IS_MTLO: usize = IS_MFLO + 1;
pub(crate) const START_SHARED_COLS: usize = IS_MTLO + 1;
/// Within the Arithmetic Unit, there are shared columns which can be
/// used by any arithmetic circuit, depending on which one is active
/// this cycle.
///
/// Modular arithmetic takes 11 * N_LIMBS columns which is split across
/// two rows, the first with 6 * N_LIMBS columns and the second with
/// 5 * N_LIMBS columns. (There are hence N_LIMBS "wasted columns" in
/// the second row.)
pub(crate) const NUM_SHARED_COLS: usize = 9 * N_LIMBS;
pub(crate) const SHARED_COLS: Range<usize> = START_SHARED_COLS..START_SHARED_COLS + NUM_SHARED_COLS;
pub(crate) const INPUT_REGISTER_0: Range<usize> = START_SHARED_COLS..START_SHARED_COLS + N_LIMBS;
pub(crate) const INPUT_REGISTER_1: Range<usize> =
INPUT_REGISTER_0.end..INPUT_REGISTER_0.end + N_LIMBS;
pub(crate) const INPUT_REGISTER_2: Range<usize> =
INPUT_REGISTER_1.end..INPUT_REGISTER_1.end + N_LIMBS;
pub(crate) const OUTPUT_REGISTER: Range<usize> =
INPUT_REGISTER_2.end..INPUT_REGISTER_2.end + N_LIMBS;
// NB: Only one of AUX_INPUT_REGISTER_[01] or AUX_INPUT_REGISTER_DBL
// will be used for a given operation since they overlap
pub(crate) const AUX_INPUT_REGISTER_0: Range<usize> =
OUTPUT_REGISTER.end..OUTPUT_REGISTER.end + N_LIMBS;
pub(crate) const AUX_INPUT_REGISTER_1: Range<usize> =
AUX_INPUT_REGISTER_0.end..AUX_INPUT_REGISTER_0.end + N_LIMBS;
pub(crate) const AUX_INPUT_REGISTER_DBL: Range<usize> =
OUTPUT_REGISTER.end..OUTPUT_REGISTER.end + 2 * N_LIMBS;
pub(crate) const AUX_INPUT_REGISTER_2: Range<usize> =
AUX_INPUT_REGISTER_1.end..AUX_INPUT_REGISTER_1.end + N_LIMBS;
// The auxiliary input columns overlap the general input columns
// because they correspond to the values in the second row for modular
// operations.
const AUX_REGISTER_0: Range<usize> = START_SHARED_COLS..START_SHARED_COLS + N_LIMBS;
const AUX_REGISTER_1: Range<usize> = AUX_REGISTER_0.end..AUX_REGISTER_0.end + 2 * N_LIMBS;
const AUX_REGISTER_2: Range<usize> = AUX_REGISTER_1.end..AUX_REGISTER_1.end + 2 * N_LIMBS - 1;
// Each element c of {MUL,MODULAR}_AUX_REGISTER is -2^20 <= c <= 2^20;
// this value is used as an offset so that everything is positive in
// the range checks.
pub(crate) const AUX_COEFF_ABS_MAX: i64 = 1 << 20;
pub(crate) const MUL_AUX_INPUT_LO: Range<usize> = AUX_INPUT_REGISTER_0;
pub(crate) const MUL_AUX_INPUT_HI: Range<usize> = AUX_INPUT_REGISTER_1;
pub(crate) const MODULAR_INPUT_0: Range<usize> = INPUT_REGISTER_0;
pub(crate) const MODULAR_INPUT_1: Range<usize> = INPUT_REGISTER_1;
pub(crate) const MODULAR_MODULUS: Range<usize> = INPUT_REGISTER_2;
pub(crate) const MODULAR_OUTPUT: Range<usize> = OUTPUT_REGISTER;
pub(crate) const MODULAR_QUO_INPUT: Range<usize> = AUX_INPUT_REGISTER_DBL;
pub(crate) const MODULAR_OUT_AUX_RED: Range<usize> = AUX_REGISTER_0;
// NB: Last value is not used in AUX, it is used in MOD_IS_ZERO
pub(crate) const MODULAR_MOD_IS_ZERO: usize = AUX_REGISTER_1.start;
pub(crate) const MODULAR_AUX_INPUT_LO: Range<usize> = AUX_REGISTER_1.start + 1..AUX_REGISTER_1.end;
pub(crate) const MODULAR_AUX_INPUT_HI: Range<usize> = AUX_REGISTER_2;
// Must be set to MOD_IS_ZERO for DIV and SHR operations i.e. MOD_IS_ZERO * (lv[IS_DIV] + lv[IS_SHR]).
pub(crate) const MODULAR_DIV_DENOM_IS_ZERO: usize = AUX_REGISTER_2.end;
/// The counter column (used for the range check) starts from 0 and increments.
pub(crate) const RANGE_COUNTER: usize = START_SHARED_COLS + NUM_SHARED_COLS;
/// The frequencies column used in logUp.
pub(crate) const RC_FREQUENCIES: usize = RANGE_COUNTER + 1;
// These counter columns only used in SRA(V) and DIV, and do not check range
pub(crate) const AUX_EXTRA: Range<usize> = RC_FREQUENCIES + 1..RC_FREQUENCIES + 9;
pub const NUM_ARITH_COLUMNS: usize = START_SHARED_COLS + NUM_SHARED_COLS + 10;
// These counters are only be used in mul and div that use LO and HI.
pub(crate) const OUTPUT_REGISTER_LO: Range<usize> = OUTPUT_REGISTER;
pub(crate) const OUTPUT_REGISTER_HI: Range<usize> =
OUTPUT_REGISTER.end..OUTPUT_REGISTER.end + N_LIMBS;
pub(crate) const MULT_AUX_LO: Range<usize> =
OUTPUT_REGISTER_HI.end..OUTPUT_REGISTER_HI.end + 2 * N_LIMBS;
pub(crate) const MULT_AUX_HI: Range<usize> = MULT_AUX_LO.end..MULT_AUX_LO.end + 2 * N_LIMBS;
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/arithmetic/div.rs | prover/src/arithmetic/div.rs | //! Support for MIPS instructions DIV and DIVU.
use std::ops::Range;
use num::{One, Zero};
use num_bigint::{BigInt, Sign};
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use crate::arithmetic::addcy::{eval_ext_circuit_addcy, eval_packed_generic_addcy};
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
const QUOT_ABS: Range<usize> = AUX_INPUT_REGISTER_2.end..AUX_INPUT_REGISTER_2.end + N_LIMBS;
const REM_ABS: Range<usize> = QUOT_ABS.end..QUOT_ABS.end + N_LIMBS;
/// Generate the output and auxiliary values for div/divu operations.
pub(crate) fn generate<F: PrimeField64>(
lv: &mut [F],
nv: &mut [F],
filter: usize,
input0: u32,
input1: u32,
quot: u32,
rem: u32,
) {
debug_assert!(lv.len() == NUM_ARITH_COLUMNS);
u32_to_array(&mut lv[INPUT_REGISTER_0], input0);
u32_to_array(&mut lv[INPUT_REGISTER_1], input1);
u32_to_array(&mut lv[OUTPUT_REGISTER_LO], quot);
u32_to_array(&mut lv[OUTPUT_REGISTER_HI], rem);
if filter == IS_DIV {
generate_div(lv, nv, input0, input1, quot, rem);
} else if filter == IS_DIVU {
generate_divu(lv, nv);
} else {
panic!();
}
}
pub(crate) fn generate_divu<F: PrimeField64>(lv: &mut [F], nv: &mut [F]) {
generate_divu_helper(
lv,
nv,
IS_DIVU,
INPUT_REGISTER_0,
INPUT_REGISTER_1,
OUTPUT_REGISTER_LO,
Some(OUTPUT_REGISTER_HI),
);
}
pub(crate) fn generate_div<F: PrimeField64>(
lv: &mut [F],
nv: &mut [F],
input0: u32,
input1: u32,
quot: u32,
rem: u32,
) {
let fill_columns = |lv: &mut [F],
nv: &mut [F],
input,
abs_idx: Range<usize>,
sum_idx,
is_neg_idx,
lo_borrow_idx| {
let is_neg = (input as i32) < 0;
nv[is_neg_idx] = F::from_bool(is_neg);
let sum = (input >> LIMB_BITS) ^ 0x8000;
nv[sum_idx] = F::from_canonical_u32(sum);
let lo_borrow = input & 0xffff > 0;
nv[lo_borrow_idx] = F::from_bool(lo_borrow);
debug_assert_eq!(abs_idx.len(), N_LIMBS);
u32_to_array(&mut lv[abs_idx], (input as i32).unsigned_abs());
is_neg
};
let is_input0_neg = fill_columns(
lv,
nv,
input0,
INPUT_REGISTER_2,
MODULAR_DIV_DENOM_IS_ZERO + 1,
MODULAR_DIV_DENOM_IS_ZERO + 5,
MODULAR_DIV_DENOM_IS_ZERO + 6,
);
let is_input1_neg = fill_columns(
lv,
nv,
input1,
AUX_INPUT_REGISTER_2,
MODULAR_DIV_DENOM_IS_ZERO + 2,
MODULAR_DIV_DENOM_IS_ZERO + 7,
MODULAR_DIV_DENOM_IS_ZERO + 8,
);
nv[RC_FREQUENCIES + 5] = F::from_bool(is_input0_neg ^ is_input1_neg);
fill_columns(
lv,
nv,
quot,
QUOT_ABS,
MODULAR_DIV_DENOM_IS_ZERO + 3,
RC_FREQUENCIES + 1,
RC_FREQUENCIES + 2,
);
fill_columns(
lv,
nv,
rem,
REM_ABS,
MODULAR_DIV_DENOM_IS_ZERO + 4,
RC_FREQUENCIES + 3,
RC_FREQUENCIES + 4,
);
generate_divu_helper(
lv,
nv,
IS_DIV,
INPUT_REGISTER_2,
AUX_INPUT_REGISTER_2,
QUOT_ABS,
Some(REM_ABS),
);
}
/// Generates the output and auxiliary values for modular operations,
/// assuming the input, modular and output limbs are already set.
pub(crate) fn generate_divu_helper<F: PrimeField64>(
lv: &mut [F],
nv: &mut [F],
filter: usize,
input_limbs_range: Range<usize>,
modulus_range: Range<usize>,
output_range: Range<usize>,
rem_range: Option<Range<usize>>,
) {
let input_limbs = read_value_i64_limbs::<N_LIMBS, _>(lv, input_limbs_range);
let pol_input = pol_extend(input_limbs);
let (out, quo_input) = generate_modular_op(lv, nv, filter, pol_input, modulus_range);
debug_assert!(
&quo_input[N_LIMBS..].iter().all(|&x| x == F::ZERO),
"expected top half of quo_input to be zero"
);
// // Initialise whole (double) register to zero; the low half will
// // be overwritten via lv[AUX_INPUT_REGISTER] below.
// for i in MODULAR_QUO_INPUT {
// lv[i] = F::ZERO;
// }
match filter {
IS_DIV | IS_DIVU | IS_SRL | IS_SRLV | IS_SRA | IS_SRAV => {
debug_assert!(
lv[output_range]
.iter()
.zip(&quo_input[..N_LIMBS])
.all(|(x, y)| x == y),
"computed output doesn't match expected"
);
if rem_range.is_some() {
debug_assert!(
lv[rem_range.unwrap()].iter().zip(out).all(|(x, y)| x == &y),
"computed rem doesn't match expected"
);
} else {
lv[AUX_INPUT_REGISTER_0].copy_from_slice(&out);
}
}
_ => panic!("expected filter to be IS_DIV, IS_DIVU, IS_SRL, SRLV, IS_SRA or IS_SRAV, but it was {filter}"),
};
}
/// Generate the output and auxiliary values for given `operation`.
///
/// NB: `operation` can set the higher order elements in its result to
/// zero if they are not used.
pub(crate) fn generate_modular_op<F: PrimeField64>(
lv: &[F],
nv: &mut [F],
filter: usize,
pol_input: [i64; 2 * N_LIMBS - 1],
modulus_range: Range<usize>,
) -> ([F; N_LIMBS], [F; 2 * N_LIMBS]) {
assert_eq!(modulus_range.len(), N_LIMBS);
let mut modulus_limbs = read_value_i64_limbs(lv, modulus_range);
// BigInts are just used to avoid having to implement modular
// reduction.
let mut modulus = columns_to_bigint(&modulus_limbs);
// constr_poly is initialised to the input calculation as
// polynomials, and is used as such for the BigInt reduction;
// later, other values are added/subtracted, which is where its
// meaning as the "constraint polynomial" comes in.
let mut constr_poly = [0i64; 2 * N_LIMBS];
constr_poly[..2 * N_LIMBS - 1].copy_from_slice(&pol_input);
// two_exp_32 == 2^32
let two_exp_32 = {
let mut t = BigInt::zero();
t.set_bit(32, true);
t
};
let mut mod_is_zero = F::ZERO;
if modulus.is_zero() {
if [IS_DIV, IS_DIVU, IS_SRL, IS_SRLV].contains(&filter) {
// set modulus = 2^32; the condition above means we know
// it's zero at this point, so we can just set bit 32.
modulus.set_bit(32, true);
// modulus_limbs don't play a role below
} else {
// set modulus = 1
modulus = BigInt::one();
modulus_limbs[0] = 1i64;
}
mod_is_zero = F::ONE;
}
let input = columns_to_bigint(&constr_poly);
// modulus != 0 here, because, if the given modulus was zero, then
// it was set to 1 or 2^32 above
let mut output = &input % &modulus;
// output will be -ve (but > -modulus) if input was -ve, so we can
// add modulus to obtain a "canonical" +ve output.
if output.sign() == Sign::Minus {
output += &modulus;
}
let output_limbs = bigint_to_columns::<N_LIMBS>(&output);
// exact division; can be -ve for SUB* operations.
let quot = (&input - &output) / &modulus;
if quot.sign() == Sign::Minus {
// TODO: check if any op use?
unimplemented!();
}
let quot_limbs = bigint_to_columns::<{ 2 * N_LIMBS }>(");
// output < modulus here; the proof requires (output - modulus) % 2^32:
let out_aux_red = bigint_to_columns::<N_LIMBS>(&(two_exp_32 - modulus + output));
// constr_poly is the array of coefficients of the polynomial
//
// operation(a(x), b(x)) - c(x) - s(x)*m(x).
//
pol_sub_assign(&mut constr_poly, &output_limbs);
let prod = pol_mul_wide2(quot_limbs, modulus_limbs);
pol_sub_assign(&mut constr_poly, &prod[0..2 * N_LIMBS]);
// Higher order terms of the product must be zero for valid quot and modulus:
debug_assert!(&prod[2 * N_LIMBS..].iter().all(|&x| x == 0i64));
// constr_poly must be zero when evaluated at x = β :=
// 2^LIMB_BITS, hence it's divisible by (x - β). `aux_limbs` is
// the result of removing that root.
let mut aux_limbs = pol_remove_root_2exp::<LIMB_BITS, _, { 2 * N_LIMBS }>(constr_poly);
for c in aux_limbs.iter_mut() {
// we store the unsigned offset value c + 2^20.
*c += AUX_COEFF_ABS_MAX;
}
debug_assert!(aux_limbs.iter().all(|&c| c.abs() <= 2 * AUX_COEFF_ABS_MAX));
for (i, &c) in MODULAR_AUX_INPUT_LO.zip(&aux_limbs[..2 * N_LIMBS - 1]) {
nv[i] = F::from_canonical_u16(c as u16);
}
for (i, &c) in MODULAR_AUX_INPUT_HI.zip(&aux_limbs[..2 * N_LIMBS - 1]) {
nv[i] = F::from_canonical_u16((c >> 16) as u16);
}
// quo_input can be negative for SUB* operations, so we offset it
// to ensure i
nv[MODULAR_MOD_IS_ZERO] = mod_is_zero;
nv[MODULAR_OUT_AUX_RED].copy_from_slice(&out_aux_red.map(F::from_canonical_i64));
nv[MODULAR_DIV_DENOM_IS_ZERO] =
mod_is_zero * (lv[IS_DIV] + lv[IS_DIVU] + lv[IS_SRL] + lv[IS_SRLV]);
(
output_limbs.map(F::from_canonical_i64),
quot_limbs.map(F::from_noncanonical_i64),
)
}
/// Convert the base-2^16 representation of a number into a BigInt.
///
/// Given `N` signed (16 + ε)-bit values in `limbs`, return the BigInt
///
/// \sum_{i=0}^{N-1} limbs[i] * β^i.
///
/// This is basically "evaluate the given polynomial at β". Although
/// the input type is i64, the values must always be in (-2^16 - ε,
/// 2^16 + ε) because of the caller's range check on the inputs (the ε
/// allows us to convert calculated output, which can be bigger than
/// 2^16).
fn columns_to_bigint<const N: usize>(limbs: &[i64; N]) -> BigInt {
const BASE: i64 = 1i64 << LIMB_BITS;
let mut pos_limbs_u32 = Vec::with_capacity(N / 2 + 1);
let mut neg_limbs_u32 = Vec::with_capacity(N / 2 + 1);
let mut cy = 0i64; // cy is necessary to handle ε > 0
for i in 0..(N / 2) {
let t = cy + limbs[2 * i] + BASE * limbs[2 * i + 1];
pos_limbs_u32.push(if t > 0 { t as u32 } else { 0u32 });
neg_limbs_u32.push(if t < 0 { -t as u32 } else { 0u32 });
cy = t / (1i64 << 32);
}
if N & 1 != 0 {
// If N is odd we need to add the last limb on its own
let t = cy + limbs[N - 1];
pos_limbs_u32.push(if t > 0 { t as u32 } else { 0u32 });
neg_limbs_u32.push(if t < 0 { -t as u32 } else { 0u32 });
cy = t / (1i64 << 32);
}
pos_limbs_u32.push(if cy > 0 { cy as u32 } else { 0u32 });
neg_limbs_u32.push(if cy < 0 { -cy as u32 } else { 0u32 });
let pos = BigInt::from_slice(Sign::Plus, &pos_limbs_u32);
let neg = BigInt::from_slice(Sign::Plus, &neg_limbs_u32);
pos - neg
}
/// Convert a BigInt into a base-2^16 representation.
///
/// Given a BigInt `num`, return an array of `N` signed 16-bit
/// values, say `limbs`, such that
///
/// num = \sum_{i=0}^{N-1} limbs[i] * β^i.
///
/// Note that `N` must be at least ceil(log2(num)/16) in order to be
/// big enough to hold `num`.
fn bigint_to_columns<const N: usize>(num: &BigInt) -> [i64; N] {
assert!(num.bits() <= 16 * N as u64);
let mut output = [0i64; N];
for (i, limb) in num.iter_u32_digits().enumerate() {
output[2 * i] = limb as u16 as i64;
output[2 * i + 1] = (limb >> LIMB_BITS) as i64;
}
if num.sign() == Sign::Minus {
for c in output.iter_mut() {
*c = -*c;
}
}
output
}
pub(crate) fn check_reduced<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
filter: P,
output: [P; N_LIMBS],
modulus: [P; N_LIMBS],
mod_is_zero: P,
) {
// Verify that the output is reduced, i.e. output < modulus.
let out_aux_red = &nv[MODULAR_OUT_AUX_RED];
// This sets is_less_than to 1 unless we get mod_is_zero when
// doing a DIV, DIV or maybe `shift` ; in that case, we need is_less_than=0,
// since eval_packed_generic_addcy checks
//
// modulus + out_aux_red == output + is_less_than*2^64
//
// and we are given output = out_aux_red when modulus is zero.
let mut is_less_than = [P::ZEROS; N_LIMBS];
is_less_than[0] = P::ONES
- mod_is_zero
* (lv[IS_DIV] + lv[IS_DIVU] + lv[IS_SRL] + lv[IS_SRLV] + lv[IS_SRA] + lv[IS_SRAV]);
// NB: output and modulus in lv while out_aux_red and
// is_less_than (via mod_is_zero) depend on nv, hence the
// 'is_two_row_op' argument is set to 'true'.
eval_packed_generic_addcy(
yield_constr,
filter,
&modulus,
out_aux_red,
&output,
&is_less_than,
true,
);
}
/// Build the part of the constraint polynomial that applies to the
/// DIV, DIVU operations and perform the common verifications.
///
/// Specifically, with the notation above, build the polynomial
///
/// c(x) + q(x) * m(x) + (x - β) * s(x)
///
/// and check consistency when m = 0, and that c is reduced. Note that
/// q(x) CANNOT be negative here.
pub(crate) fn modular_constr_poly<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
filter: P,
mut output: [P; N_LIMBS],
mut modulus: [P; N_LIMBS],
quot: [P; 2 * N_LIMBS],
) -> [P; 2 * N_LIMBS] {
let mod_is_zero = nv[MODULAR_MOD_IS_ZERO];
// Check that mod_is_zero is zero or one
yield_constr.constraint_transition(filter * (mod_is_zero * mod_is_zero - mod_is_zero));
// Check that mod_is_zero is zero if modulus is not zero (they
// could both be zero)
let limb_sum = modulus.into_iter().sum::<P>();
yield_constr.constraint_transition(filter * limb_sum * mod_is_zero);
// See the file documentation for why this suffices to handle
// modulus = 0.
modulus[0] += mod_is_zero;
// Is 1 iff the operation is DIV, DIVU, IS_SRL or IS_SRLV and the denominator is zero.
let div_denom_is_zero = nv[MODULAR_DIV_DENOM_IS_ZERO];
yield_constr.constraint_transition(
filter
* (mod_is_zero
* (lv[IS_DIV] + lv[IS_DIVU] + lv[IS_SRL] + lv[IS_SRLV] + lv[IS_SRA] + lv[IS_SRAV])
- div_denom_is_zero),
);
// Needed to compensate for adding mod_is_zero to modulus above,
// since the call eval_packed_generic_addcy() below subtracts modulus
// to verify in the case of a DIV or SHR.
output[0] += div_denom_is_zero;
check_reduced(lv, nv, yield_constr, filter, output, modulus, mod_is_zero);
// restore output[0]
output[0] -= div_denom_is_zero;
// prod = q(x) * m(x)
let prod = pol_mul_wide2(quot, modulus);
// higher order terms must be zero
for &x in prod[2 * N_LIMBS..].iter() {
yield_constr.constraint_transition(filter * x);
}
// constr_poly = c(x) + q(x) * m(x)
let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap();
pol_add_assign(&mut constr_poly, &output);
let base = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
// TODO: change AUX_COEFF_ABS_MAX?
let offset = P::Scalar::from_canonical_u64(AUX_COEFF_ABS_MAX as u64);
// constr_poly = c(x) + q(x) * m(x) + (x - β) * s(x)c
let mut aux = [P::ZEROS; 2 * N_LIMBS];
for (c, i) in aux.iter_mut().zip(MODULAR_AUX_INPUT_LO) {
// MODULAR_AUX_INPUT elements were offset by 2^20 in
// generation, so we undo that here.
*c = nv[i] - offset;
}
// add high 16-bits of aux input
for (c, j) in aux.iter_mut().zip(MODULAR_AUX_INPUT_HI) {
*c += base * nv[j];
}
pol_add_assign(&mut constr_poly, &pol_adjoin_root(aux, base));
constr_poly
}
pub(crate) fn eval_packed<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
eval_packed_divu(lv, nv, yield_constr);
eval_packed_div(lv, nv, yield_constr);
}
pub(crate) fn eval_packed_divu<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
eval_packed_div_helper(
lv,
nv,
yield_constr,
lv[IS_DIVU],
INPUT_REGISTER_0,
INPUT_REGISTER_1,
OUTPUT_REGISTER,
AUX_INPUT_REGISTER_0,
);
}
pub(crate) fn eval_packed_div<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv[IS_DIV];
let over_flow = P::Scalar::from_canonical_u64(1 << LIMB_BITS);
let add = P::Scalar::from_canonical_u64(1 << (LIMB_BITS - 1));
let mut check_abs = |input_idx: Range<usize>, abs_idx, sum_idx, is_neg_idx, lo_borrow_idx| {
// check is_neg bool
let is_neg = nv[is_neg_idx];
yield_constr.constraint_transition(filter * is_neg * (P::ONES - is_neg));
// check input is negative or not. We just check the most significant bit in significant limb
let sum = nv[sum_idx];
let input_hi = lv[input_idx.end - 1];
yield_constr.constraint_transition(filter * (input_hi + add - sum - is_neg * over_flow));
// input_lo_borrow
let input_lo_borrow = nv[lo_borrow_idx];
yield_constr.constraint_transition(filter * input_lo_borrow * (P::ONES - input_lo_borrow));
let neg_inputs = vec![
input_lo_borrow * over_flow - lv[input_idx.start],
over_flow - lv[input_idx.start + 1] - input_lo_borrow,
];
for ((i, j), neg_input) in input_idx.zip(abs_idx).zip(neg_inputs) {
yield_constr.constraint_transition(
filter * (is_neg * neg_input + (P::ONES - is_neg) * lv[i] - lv[j]),
);
}
is_neg
};
let is_input0_neg = check_abs(
INPUT_REGISTER_0,
INPUT_REGISTER_2,
MODULAR_DIV_DENOM_IS_ZERO + 1,
MODULAR_DIV_DENOM_IS_ZERO + 5,
MODULAR_DIV_DENOM_IS_ZERO + 6,
);
let is_input1_neg = check_abs(
INPUT_REGISTER_1,
AUX_INPUT_REGISTER_2,
MODULAR_DIV_DENOM_IS_ZERO + 2,
MODULAR_DIV_DENOM_IS_ZERO + 7,
MODULAR_DIV_DENOM_IS_ZERO + 8,
);
let is_quot_neg = check_abs(
OUTPUT_REGISTER_LO,
QUOT_ABS,
MODULAR_DIV_DENOM_IS_ZERO + 3,
RC_FREQUENCIES + 1,
RC_FREQUENCIES + 2,
);
let is_rem_neg = check_abs(
OUTPUT_REGISTER_HI,
REM_ABS,
MODULAR_DIV_DENOM_IS_ZERO + 4,
RC_FREQUENCIES + 3,
RC_FREQUENCIES + 4,
);
// check sign of quot
// sign(quot) == sign(input0) xor sign(input1) or quot==0
let is_same_sign = nv[RC_FREQUENCIES + 5];
yield_constr.constraint_transition(
filter
* (is_input0_neg + is_input1_neg
- P::Scalar::from_canonical_u8(2) * is_input0_neg * is_input1_neg
- is_same_sign),
);
let quot_limbs_sum: P = OUTPUT_REGISTER_LO.map(|i| lv[i]).sum();
yield_constr.constraint_transition(filter * (is_quot_neg - is_same_sign) * quot_limbs_sum);
// check sign of rem
// sign(rem) == sign(input0) or rem==0
let rem_limbs_sum: P = OUTPUT_REGISTER_HI.map(|i| lv[i]).sum();
yield_constr.constraint_transition(filter * (is_rem_neg - is_input0_neg) * rem_limbs_sum);
eval_packed_div_helper(
lv,
nv,
yield_constr,
filter,
INPUT_REGISTER_2,
AUX_INPUT_REGISTER_2,
QUOT_ABS,
REM_ABS,
);
}
/// Verify that num = quo * den + rem and 0 <= rem < den.
pub(crate) fn eval_packed_div_helper<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
filter: P,
num_range: Range<usize>,
den_range: Range<usize>,
quo_range: Range<usize>,
rem_range: Range<usize>,
) {
debug_assert!(quo_range.len() == N_LIMBS);
debug_assert!(rem_range.len() == N_LIMBS);
yield_constr.constraint_last_row(filter);
let num = &lv[num_range];
let den = read_value(lv, den_range);
let quo = {
let mut quo = [P::ZEROS; 2 * N_LIMBS];
quo[..N_LIMBS].copy_from_slice(&lv[quo_range]);
quo
};
let rem = read_value(lv, rem_range);
let mut constr_poly = modular_constr_poly(lv, nv, yield_constr, filter, rem, den, quo);
let input = num;
pol_sub_assign(&mut constr_poly, input);
for &c in constr_poly.iter() {
yield_constr.constraint_transition(filter * c);
}
}
pub(crate) fn modular_constr_poly_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
builder: &mut CircuitBuilder<F, D>,
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
filter: ExtensionTarget<D>,
mut output: [ExtensionTarget<D>; N_LIMBS],
mut modulus: [ExtensionTarget<D>; N_LIMBS],
quot: [ExtensionTarget<D>; 2 * N_LIMBS],
) -> [ExtensionTarget<D>; 2 * N_LIMBS] {
let mod_is_zero = nv[MODULAR_MOD_IS_ZERO];
// Check that mod_is_zero is zero or one
let t = builder.mul_sub_extension(mod_is_zero, mod_is_zero, mod_is_zero);
let t = builder.mul_extension(filter, t);
yield_constr.constraint_transition(builder, t);
// Check that mod_is_zero is zero if modulus is not zero (they
// could both be zero)
let limb_sum = builder.add_many_extension(modulus);
let t = builder.mul_extension(limb_sum, mod_is_zero);
let t = builder.mul_extension(filter, t);
yield_constr.constraint_transition(builder, t);
modulus[0] = builder.add_extension(modulus[0], mod_is_zero);
// Is 1 iff the operation is DIV, DIVU and the denominator is zero.
let div_denom_is_zero = nv[MODULAR_DIV_DENOM_IS_ZERO];
let div_shr_filter = builder.add_many_extension([
lv[IS_DIV],
lv[IS_DIVU],
lv[IS_SRL],
lv[IS_SRLV],
lv[IS_SRA],
lv[IS_SRAV],
]);
let t = builder.mul_sub_extension(mod_is_zero, div_shr_filter, div_denom_is_zero);
let t = builder.mul_extension(filter, t);
yield_constr.constraint_transition(builder, t);
// Needed to compensate for adding mod_is_zero to modulus above,
// since the call eval_packed_generic_addcy() below subtracts modulus
// to verify in the case of a DIV or DIVU.
output[0] = builder.add_extension(output[0], div_denom_is_zero);
// Verify that the output is reduced, i.e. output < modulus.
let out_aux_red = &nv[MODULAR_OUT_AUX_RED];
let one = builder.one_extension();
let zero = builder.zero_extension();
let mut is_less_than = [zero; N_LIMBS];
is_less_than[0] =
builder.arithmetic_extension(F::NEG_ONE, F::ONE, mod_is_zero, div_shr_filter, one);
eval_ext_circuit_addcy(
builder,
yield_constr,
filter,
&modulus,
out_aux_red,
&output,
&is_less_than,
true,
);
// restore output[0]
output[0] = builder.sub_extension(output[0], div_denom_is_zero);
// prod = q(x) * m(x)
let prod = pol_mul_wide2_ext_circuit(builder, quot, modulus);
// higher order terms must be zero
for &x in prod[2 * N_LIMBS..].iter() {
let t = builder.mul_extension(filter, x);
yield_constr.constraint_transition(builder, t);
}
// constr_poly = c(x) + q(x) * m(x)
let mut constr_poly: [_; 2 * N_LIMBS] = prod[0..2 * N_LIMBS].try_into().unwrap();
pol_add_assign_ext_circuit(builder, &mut constr_poly, &output);
let offset =
builder.constant_extension(F::Extension::from_canonical_u64(AUX_COEFF_ABS_MAX as u64));
let zero = builder.zero_extension();
// constr_poly = c(x) + q(x) * m(x)
let mut aux = [zero; 2 * N_LIMBS];
for (c, i) in aux.iter_mut().zip(MODULAR_AUX_INPUT_LO) {
*c = builder.sub_extension(nv[i], offset);
}
// add high 16-bits of aux input
let base = F::from_canonical_u64(1u64 << LIMB_BITS);
for (c, j) in aux.iter_mut().zip(MODULAR_AUX_INPUT_HI) {
*c = builder.mul_const_add_extension(base, nv[j], *c);
}
let base = builder.constant_extension(base.into());
let t = pol_adjoin_root_ext_circuit(builder, aux, base);
pol_add_assign_ext_circuit(builder, &mut constr_poly, &t);
constr_poly
}
pub(crate) fn eval_ext_circuit_divmod_helper<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
filter: ExtensionTarget<D>,
num_range: Range<usize>,
den_range: Range<usize>,
quo_range: Range<usize>,
rem_range: Range<usize>,
) {
yield_constr.constraint_last_row(builder, filter);
let num = &lv[num_range];
let den = read_value(lv, den_range);
let quo = {
let zero = builder.zero_extension();
let mut quo = [zero; 2 * N_LIMBS];
quo[..N_LIMBS].copy_from_slice(&lv[quo_range]);
quo
};
let rem = read_value(lv, rem_range);
let mut constr_poly =
modular_constr_poly_ext_circuit(lv, nv, builder, yield_constr, filter, rem, den, quo);
let input = num;
pol_sub_assign_ext_circuit(builder, &mut constr_poly, input);
for &c in constr_poly.iter() {
let t = builder.mul_extension(filter, c);
yield_constr.constraint_transition(builder, t);
}
}
pub(crate) fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
eval_divu_ext_circuit(builder, lv, nv, yield_constr);
eval_div_ext_circuit(builder, lv, nv, yield_constr);
}
pub(crate) fn eval_divu_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
eval_ext_circuit_divmod_helper(
builder,
lv,
nv,
yield_constr,
lv[IS_DIVU],
INPUT_REGISTER_0,
INPUT_REGISTER_1,
OUTPUT_REGISTER,
AUX_INPUT_REGISTER_0,
);
}
pub(crate) fn eval_div_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = lv[IS_DIV];
let over_flow = builder.constant_extension(F::Extension::from_canonical_u64(1 << LIMB_BITS));
let add = builder.constant_extension(F::Extension::from_canonical_u64(1 << (LIMB_BITS - 1)));
let mut check_abs = |input_idx: Range<usize>, abs_idx, sum_idx, is_neg_idx, lo_borrow_idx| {
// check is_neg bool
let is_neg = nv[is_neg_idx];
let one = builder.one_extension();
{
let t = builder.sub_extension(one, is_neg);
let multi_t = builder.mul_many_extension([filter, is_neg, t]);
yield_constr.constraint_transition(builder, multi_t);
}
// check input is negative or not. We just check the most significant bit in significant limb
{
let sum = nv[sum_idx];
let input_hi = lv[input_idx.end - 1];
let t0 = builder.add_extension(input_hi, add);
let t1 = builder.sub_extension(t0, sum);
let t2 = builder.mul_extension(over_flow, is_neg);
let t3 = builder.sub_extension(t1, t2);
let t = builder.mul_extension(filter, t3);
yield_constr.constraint_transition(builder, t); //filter * (input_hi + add - sum - is_neg * over_flow
}
// input_lo_borrow
let input_lo_borrow = nv[lo_borrow_idx];
let t0 = builder.sub_extension(one, input_lo_borrow);
let t = builder.mul_many_extension([filter, input_lo_borrow, t0]);
yield_constr.constraint_transition(builder, t);
let neg_inputs = {
let lo = builder.mul_extension(input_lo_borrow, over_flow);
let lo = builder.sub_extension(lo, lv[input_idx.start]);
let hi = builder.sub_extension(over_flow, lv[input_idx.start + 1]);
let hi = builder.sub_extension(hi, input_lo_borrow);
[lo, hi]
};
for ((i, j), neg_input) in input_idx.zip(abs_idx).zip(neg_inputs) {
let t0 = builder.mul_extension(is_neg, neg_input);
let t1 = builder.sub_extension(one, is_neg);
let t2 = builder.mul_extension(t1, lv[i]);
let t3 = builder.sub_extension(t2, lv[j]);
let t4 = builder.add_extension(t0, t3);
let t = builder.mul_extension(filter, t4);
yield_constr.constraint_transition(builder, t);
}
is_neg
};
let is_input0_neg = check_abs(
INPUT_REGISTER_0,
INPUT_REGISTER_2,
MODULAR_DIV_DENOM_IS_ZERO + 1,
MODULAR_DIV_DENOM_IS_ZERO + 5,
MODULAR_DIV_DENOM_IS_ZERO + 6,
);
let is_input1_neg = check_abs(
INPUT_REGISTER_1,
AUX_INPUT_REGISTER_2,
MODULAR_DIV_DENOM_IS_ZERO + 2,
MODULAR_DIV_DENOM_IS_ZERO + 7,
MODULAR_DIV_DENOM_IS_ZERO + 8,
);
let is_quot_neg = check_abs(
OUTPUT_REGISTER_LO,
QUOT_ABS,
MODULAR_DIV_DENOM_IS_ZERO + 3,
RC_FREQUENCIES + 1,
RC_FREQUENCIES + 2,
);
let is_rem_neg = check_abs(
OUTPUT_REGISTER_HI,
REM_ABS,
MODULAR_DIV_DENOM_IS_ZERO + 4,
RC_FREQUENCIES + 3,
RC_FREQUENCIES + 4,
);
// check sign of quot
// sign(quot) == sign(input0) xor sign(input1) or quot==0
let is_same_sign = nv[RC_FREQUENCIES + 5];
{
let t0 = builder.add_extension(is_input0_neg, is_input1_neg);
let two = builder.constant_extension(F::Extension::from_canonical_u8(2));
let t1 = builder.mul_many_extension([two, is_input0_neg, is_input1_neg]);
let t2 = builder.sub_extension(t0, t1);
let t3 = builder.sub_extension(t2, is_same_sign);
let t = builder.mul_extension(filter, t3);
yield_constr.constraint_transition(builder, t);
}
let quot_limbs_sum = OUTPUT_REGISTER_LO.fold(builder.zero_extension(), |acc, i| {
builder.add_extension(acc, lv[i])
});
let t0 = builder.sub_extension(is_quot_neg, is_same_sign);
let t = builder.mul_many_extension([filter, t0, quot_limbs_sum]);
yield_constr.constraint_transition(builder, t);
// check sign of rem
// sign(rem) == sign(input0) or rem==0
let rem_limbs_sum = OUTPUT_REGISTER_HI.fold(builder.zero_extension(), |acc, i| {
builder.add_extension(acc, lv[i])
});
let t0 = builder.sub_extension(is_rem_neg, is_input0_neg);
let t = builder.mul_many_extension([filter, t0, rem_limbs_sum]);
yield_constr.constraint_transition(builder, t);
eval_ext_circuit_divmod_helper(
builder,
lv,
nv,
yield_constr,
filter,
INPUT_REGISTER_2,
AUX_INPUT_REGISTER_2,
QUOT_ABS,
REM_ABS,
);
}
#[cfg(test)]
mod tests {
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::Sample;
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
use super::*;
const N_RND_TESTS: usize = 1000;
const MODULAR_OPS: [usize; 2] = [IS_DIV, IS_DIVU];
#[test]
fn generate_eval_consistency_not_modular() {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
let nv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
// if `IS_MOD == 0`, then the constraints should be met even
// if all values are garbage (and similarly for the other operations).
for op in MODULAR_OPS {
lv[op] = F::ZERO;
}
let mut constraint_consumer = ConstraintConsumer::new(
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | true |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/arithmetic/shift.rs | prover/src/arithmetic/shift.rs | //! Support for the MIPS SLL(V), and SRL(V) instructions.
//!
//! This crate verifies an MIPS shift instruction, which takes two
//! 32-bit inputs S and A, and produces a 32-bit output C satisfying
//!
//! C = A << S (mod 2^32) for SLL(V) or
//! C = A >> S (mod 2^32) for SRL(V).
//!
//! The way this computation is carried is by providing a third input
//! B = 1 << S (mod 2^32)
//! and then computing:
//! C = A * B (mod 2^32) for SLL(V) or
//! C = A / B (mod 2^32) for SRL(V)
//!
//! Inputs A, S, and B, and output C, are given as arrays of 16-bit
//! limbs. For example, if the limbs of A are a[0].a[1], then
//!
//! A = \sum_{i=0}^1 a[i] β^i,
//!
//! where β = 2^16 = 2^LIMB_BITS. To verify that A, S, B and C satisfy
//! the equations, we proceed similarly to MUL for SLL(V) and to DIV for SRL(V).
use plonky2::field::extension::Extendable;
use plonky2::field::packed::PackedField;
use plonky2::field::types::PrimeField64;
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use super::{div, mul};
use crate::arithmetic::columns::*;
use crate::arithmetic::utils::{read_value, read_value_i64_limbs, u32_to_array};
// use crate::arithmetic::utils::*;
use crate::constraint_consumer::{ConstraintConsumer, RecursiveConstraintConsumer};
/// Generates a shift operation (either SLL(V) or SRL(V)).
///
/// The inputs are stored in the form `(shift, input, 1 << shift)`.
/// NB: if `shift >= 32`, then the third register holds 0.
/// We leverage the functions in mul.rs and div.rs to carry out
/// the computation.
pub fn generate<F: PrimeField64>(
lv: &mut [F],
nv: &mut [F],
filter: usize,
shift: u32,
input: u32,
result: u32,
) {
// We use the multiplication logic to generate SLL(V)
// TODO: It would probably be clearer/cleaner to read the U32
// into an [i64;N] and then copy that to the lv table.
// The first input is the shift we need to apply.
u32_to_array(&mut lv[INPUT_REGISTER_0], shift);
// The second register holds the input which needs shifting.
u32_to_array(&mut lv[INPUT_REGISTER_1], input);
u32_to_array(&mut lv[OUTPUT_REGISTER], result);
// If `shift >= 32`, the shifted displacement is set to 0.
// Compute 1 << shift and store it in the third input register.
let shifted_displacement = 1 << (shift & 0x1F);
u32_to_array(&mut lv[INPUT_REGISTER_2], shifted_displacement);
let input0 = read_value_i64_limbs(lv, INPUT_REGISTER_1); // input
let input1 = read_value_i64_limbs(lv, INPUT_REGISTER_2); // 1 << shift
match filter {
IS_SLL | IS_SLLV => {
// We generate the multiplication input0 * input1 using mul.rs.
mul::generate_mul(lv, input0, input1);
}
IS_SRL | IS_SRLV => {
// If the operation is IS_SRL(IS_SRLV), we compute: `input / shifted_displacement` if `shifted_displacement == 0`
// otherwise, the output is 0. We use the logic in div.rs to achieve that.
div::generate_divu_helper(
lv,
nv,
filter,
INPUT_REGISTER_1,
INPUT_REGISTER_2,
OUTPUT_REGISTER,
None,
);
}
_ => panic!("expected filter to be IS_SLL(V), or IS_SRL(V) but it was {filter}"),
}
}
/// Evaluates the constraints for an SLL(V) opcode.
/// The logic is the same as the one for MUL. The only difference is that
/// the inputs are in `INPUT_REGISTER_1` and `INPUT_REGISTER_2` instead of
/// `INPUT_REGISTER_0` and `INPUT_REGISTER_1`.
fn eval_packed_sll<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let filter = lv[IS_SLL] + lv[IS_SLLV];
let input0_limbs = read_value::<N_LIMBS, _>(lv, INPUT_REGISTER_1);
let shifted_limbs = read_value::<N_LIMBS, _>(lv, INPUT_REGISTER_2);
mul::eval_packed_generic_mul(lv, filter, input0_limbs, shifted_limbs, yield_constr);
}
/// Evaluates the constraints for an SRL(V) opcode.
/// The logic is tha same as the one for DIV. The only difference is that
/// the inputs are in `INPUT_REGISTER_1` and `INPUT_REGISTER_2` instead of
/// `INPUT_REGISTER_0` and `INPUT_REGISTER_1`.
fn eval_packed_srl<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
let quo_range = OUTPUT_REGISTER;
let rem_range = AUX_INPUT_REGISTER_0;
div::eval_packed_div_helper(
lv,
nv,
yield_constr,
lv[IS_SRL] + lv[IS_SRLV],
INPUT_REGISTER_1,
INPUT_REGISTER_2,
quo_range,
rem_range,
);
}
pub fn eval_packed_generic<P: PackedField>(
lv: &[P; NUM_ARITH_COLUMNS],
nv: &[P; NUM_ARITH_COLUMNS],
yield_constr: &mut ConstraintConsumer<P>,
) {
eval_packed_sll(lv, yield_constr);
eval_packed_srl(lv, nv, yield_constr);
}
fn eval_ext_circuit_sll<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = builder.add_extension(lv[IS_SLL], lv[IS_SLLV]);
let input0_limbs = read_value::<N_LIMBS, _>(lv, INPUT_REGISTER_1);
let shifted_limbs = read_value::<N_LIMBS, _>(lv, INPUT_REGISTER_2);
mul::eval_ext_mul_circuit(
builder,
lv,
filter,
input0_limbs,
shifted_limbs,
yield_constr,
);
}
fn eval_ext_circuit_srl<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
let filter = builder.add_extension(lv[IS_SRL], lv[IS_SRLV]);
let quo_range = OUTPUT_REGISTER;
let rem_range = AUX_INPUT_REGISTER_0;
div::eval_ext_circuit_divmod_helper(
builder,
lv,
nv,
yield_constr,
filter,
INPUT_REGISTER_1,
INPUT_REGISTER_2,
quo_range,
rem_range,
);
}
pub fn eval_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
lv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
nv: &[ExtensionTarget<D>; NUM_ARITH_COLUMNS],
yield_constr: &mut RecursiveConstraintConsumer<F, D>,
) {
eval_ext_circuit_sll(builder, lv, yield_constr);
eval_ext_circuit_srl(builder, lv, nv, yield_constr);
}
#[cfg(test)]
mod tests {
use super::*;
use plonky2::field::goldilocks_field::GoldilocksField;
use plonky2::field::types::{Field, Sample};
use rand::{Rng, SeedableRng};
use rand_chacha::ChaCha8Rng;
const N_RND_TESTS: usize = 1;
#[test]
fn generate_eval_consistency_not_shift() {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
let nv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
// if `IS_SLL, IS_SLLV, IS_SRL, IS_SRLV == 0`, then the constraints should be met even
// if all values are garbage.
lv[IS_SLL] = F::ZERO;
lv[IS_SLLV] = F::ZERO;
lv[IS_SRL] = F::ZERO;
lv[IS_SRLV] = F::ZERO;
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
GoldilocksField::ONE,
GoldilocksField::ONE,
GoldilocksField::ONE,
);
eval_packed_generic(&lv, &nv, &mut constraint_consumer);
for &acc in &constraint_consumer.constraint_accs {
assert_eq!(acc, GoldilocksField::ZERO);
}
}
fn generate_eval_consistency_shift(filter: usize) {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
let mut nv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
[IS_SLL, IS_SLLV, IS_SRL, IS_SRLV].map(|filter| lv[filter] = F::ZERO);
lv[filter] = F::ONE;
if filter == IS_SRL || filter == IS_SRLV {
// Set `IS_DIV` to 0 in this case, since we're using the logic of DIV for SHR.
lv[IS_DIV] = F::ZERO;
}
for _i in 0..N_RND_TESTS {
let shift: u32 = rng.gen_range(0..32);
let mut full_input = 0;
// set inputs to random values
for ai in INPUT_REGISTER_1 {
lv[ai] = F::from_canonical_u16(rng.gen());
full_input = lv[ai].to_canonical_u64() as u32 + full_input * (1 << 16);
}
let output = if filter == IS_SLL || filter == IS_SLLV {
full_input << shift
} else {
full_input >> shift
};
generate(&mut lv, &mut nv, filter, shift, full_input, output);
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
GoldilocksField::ONE,
GoldilocksField::ONE,
GoldilocksField::ZERO,
);
eval_packed_generic(&lv, &nv, &mut constraint_consumer);
for &acc in &constraint_consumer.constraint_accs {
assert_eq!(acc, GoldilocksField::ZERO);
}
}
}
#[test]
fn generate_eval_consistency() {
generate_eval_consistency_shift(IS_SLL);
generate_eval_consistency_shift(IS_SLLV);
generate_eval_consistency_shift(IS_SRL);
generate_eval_consistency_shift(IS_SRLV);
}
fn generate_eval_consistency_shift_over_32(filter: usize) {
type F = GoldilocksField;
let mut rng = ChaCha8Rng::seed_from_u64(0x6feb51b7ec230f25);
let mut lv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
let mut nv = [F::default(); NUM_ARITH_COLUMNS].map(|_| F::sample(&mut rng));
[IS_SLL, IS_SLLV, IS_SRL, IS_SRLV, IS_SRA, IS_SRAV].map(|filter| lv[filter] = F::ZERO);
lv[filter] = F::ONE;
if filter == IS_SRL || filter == IS_SRLV {
// Set `IS_DIV` to 0 in this case, since we're using the logic of DIV for SHR.
lv[IS_DIV] = F::ZERO;
lv[IS_DIVU] = F::ZERO;
}
for _i in 0..N_RND_TESTS {
let shift: u32 = rng.gen_range(32..=u32::MAX);
let mut full_input = 0;
// set inputs to random values
for ai in INPUT_REGISTER_1 {
lv[ai] = F::from_canonical_u16(rng.gen());
full_input = lv[ai].to_canonical_u64() as u32 + full_input * (1 << 16);
}
let output = if filter == IS_SLL || filter == IS_SLLV {
full_input << (shift & 0x1F)
} else {
full_input >> (shift & 0x1F)
};
generate(&mut lv, &mut nv, filter, shift, full_input, output);
let mut constraint_consumer = ConstraintConsumer::new(
vec![GoldilocksField(2), GoldilocksField(3), GoldilocksField(5)],
GoldilocksField::ONE,
GoldilocksField::ONE,
GoldilocksField::ZERO,
);
eval_packed_generic(&lv, &nv, &mut constraint_consumer);
for &acc in &constraint_consumer.constraint_accs {
assert_eq!(acc, GoldilocksField::ZERO);
}
}
}
#[test]
fn generate_eval_consistency_over_32() {
generate_eval_consistency_shift_over_32(IS_SLL);
generate_eval_consistency_shift_over_32(IS_SLLV);
generate_eval_consistency_shift_over_32(IS_SRL);
generate_eval_consistency_shift_over_32(IS_SRLV);
}
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
ProjectZKM/zkm | https://github.com/ProjectZKM/zkm/blob/eb09cf6c81bed9717bc98e4a4c9a61f54787a351/prover/src/arithmetic/utils.rs | prover/src/arithmetic/utils.rs | use std::ops::{Add, AddAssign, Mul, Neg, Range, Shr, Sub, SubAssign};
use plonky2::field::extension::Extendable;
use plonky2::field::types::{Field, PrimeField64};
use plonky2::hash::hash_types::RichField;
use plonky2::iop::ext_target::ExtensionTarget;
use plonky2::plonk::circuit_builder::CircuitBuilder;
use static_assertions::const_assert;
use crate::arithmetic::columns::{LIMB_BITS, N_LIMBS};
/// Return an array of `N` zeros of type T.
pub(crate) fn pol_zero<T, const N: usize>() -> [T; N]
where
T: Copy + Default,
{
// TODO: This should really be T::zero() from num::Zero, because
// default() doesn't guarantee to initialise to zero (though in
// our case it always does). However I couldn't work out how to do
// that without touching half of the entire crate because it
// involves replacing Field::is_zero() with num::Zero::is_zero()
// which is used everywhere. Hence Default::default() it is.
[T::default(); N]
}
/// a(x) += b(x), but must have deg(a) >= deg(b).
pub(crate) fn pol_add_assign<T>(a: &mut [T], b: &[T])
where
T: AddAssign + Copy + Default,
{
debug_assert!(a.len() >= b.len(), "expected {} >= {}", a.len(), b.len());
for (a_item, b_item) in a.iter_mut().zip(b) {
*a_item += *b_item;
}
}
pub(crate) fn pol_add_assign_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: &mut [ExtensionTarget<D>],
b: &[ExtensionTarget<D>],
) {
debug_assert!(a.len() >= b.len(), "expected {} >= {}", a.len(), b.len());
for (a_item, b_item) in a.iter_mut().zip(b) {
*a_item = builder.add_extension(*a_item, *b_item);
}
}
/// Return a(x) + b(x); returned array is bigger than necessary to
/// make the interface consistent with `pol_mul_wide`.
pub(crate) fn pol_add<T>(a: [T; N_LIMBS], b: [T; N_LIMBS]) -> [T; 2 * N_LIMBS - 1]
where
T: Add<Output = T> + Copy + Default,
{
let mut sum = pol_zero();
for i in 0..N_LIMBS {
sum[i] = a[i] + b[i];
}
sum
}
pub(crate) fn pol_add_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; N_LIMBS],
b: [ExtensionTarget<D>; N_LIMBS],
) -> [ExtensionTarget<D>; 2 * N_LIMBS - 1] {
let zero = builder.zero_extension();
let mut sum = [zero; 2 * N_LIMBS - 1];
for i in 0..N_LIMBS {
sum[i] = builder.add_extension(a[i], b[i]);
}
sum
}
/// Return a(x) - b(x); returned array is bigger than necessary to
/// make the interface consistent with `pol_mul_wide`.
pub(crate) fn pol_sub<T>(a: [T; N_LIMBS], b: [T; N_LIMBS]) -> [T; 2 * N_LIMBS - 1]
where
T: Sub<Output = T> + Copy + Default,
{
let mut diff = pol_zero();
for i in 0..N_LIMBS {
diff[i] = a[i] - b[i];
}
diff
}
pub(crate) fn pol_sub_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; N_LIMBS],
b: [ExtensionTarget<D>; N_LIMBS],
) -> [ExtensionTarget<D>; 2 * N_LIMBS - 1] {
let zero = builder.zero_extension();
let mut diff = [zero; 2 * N_LIMBS - 1];
for i in 0..N_LIMBS {
diff[i] = builder.sub_extension(a[i], b[i]);
}
diff
}
/// a(x) -= b(x), but must have deg(a) >= deg(b).
pub(crate) fn pol_sub_assign<T>(a: &mut [T], b: &[T])
where
T: SubAssign + Copy,
{
debug_assert!(a.len() >= b.len(), "expected {} >= {}", a.len(), b.len());
for (a_item, b_item) in a.iter_mut().zip(b) {
*a_item -= *b_item;
}
}
pub(crate) fn pol_sub_assign_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: &mut [ExtensionTarget<D>],
b: &[ExtensionTarget<D>],
) {
debug_assert!(a.len() >= b.len(), "expected {} >= {}", a.len(), b.len());
for (a_item, b_item) in a.iter_mut().zip(b) {
*a_item = builder.sub_extension(*a_item, *b_item);
}
}
/// Given polynomials a(x) and b(x), return a(x)*b(x).
///
/// NB: The caller is responsible for ensuring that no undesired
/// overflow occurs during the calculation of the coefficients of the
/// product.
pub(crate) fn pol_mul_wide<T>(a: [T; N_LIMBS], b: [T; N_LIMBS]) -> [T; 2 * N_LIMBS - 1]
where
T: AddAssign + Copy + Mul<Output = T> + Default,
{
let mut res = [T::default(); 2 * N_LIMBS - 1];
for (i, &ai) in a.iter().enumerate() {
for (j, &bj) in b.iter().enumerate() {
res[i + j] += ai * bj;
}
}
res
}
pub(crate) fn pol_mul_wide_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; N_LIMBS],
b: [ExtensionTarget<D>; N_LIMBS],
) -> [ExtensionTarget<D>; 2 * N_LIMBS - 1] {
let zero = builder.zero_extension();
let mut res = [zero; 2 * N_LIMBS - 1];
for (i, &ai) in a.iter().enumerate() {
for (j, &bj) in b.iter().enumerate() {
res[i + j] = builder.mul_add_extension(ai, bj, res[i + j]);
}
}
res
}
/// As for `pol_mul_wide` but the first argument has 2N elements and
/// hence the result has 3N-1.
pub(crate) fn pol_mul_wide2<T>(a: [T; 2 * N_LIMBS], b: [T; N_LIMBS]) -> [T; 3 * N_LIMBS - 1]
where
T: AddAssign + Copy + Mul<Output = T> + Default,
{
let mut res = [T::default(); 3 * N_LIMBS - 1];
for (i, &ai) in a.iter().enumerate() {
for (j, &bj) in b.iter().enumerate() {
res[i + j] += ai * bj;
}
}
res
}
pub(crate) fn pol_mul_wide2_ext_circuit<F: RichField + Extendable<D>, const D: usize>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; 2 * N_LIMBS],
b: [ExtensionTarget<D>; N_LIMBS],
) -> [ExtensionTarget<D>; 3 * N_LIMBS - 1] {
let zero = builder.zero_extension();
let mut res = [zero; 3 * N_LIMBS - 1];
for (i, &ai) in a.iter().enumerate() {
for (j, &bj) in b.iter().enumerate() {
res[i + j] = builder.mul_add_extension(ai, bj, res[i + j]);
}
}
res
}
/// Given a(x) and b(x), return a(x)*b(x) mod 2^32.
pub(crate) fn pol_mul_lo<T, const N: usize>(a: [T; N], b: [T; N]) -> [T; N]
where
T: AddAssign + Copy + Default + Mul<Output = T>,
{
let mut res = pol_zero();
for deg in 0..N {
// Invariant: i + j = deg
for i in 0..=deg {
let j = deg - i;
res[deg] += a[i] * b[j];
}
}
res
}
pub(crate) fn pol_mul_lo_ext_circuit<
F: RichField + Extendable<D>,
const D: usize,
const N: usize,
>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; N],
b: [ExtensionTarget<D>; N],
) -> [ExtensionTarget<D>; N] {
let zero = builder.zero_extension();
let mut res = [zero; N];
for deg in 0..N {
for i in 0..=deg {
let j = deg - i;
res[deg] = builder.mul_add_extension(a[i], b[j], res[deg]);
}
}
res
}
/// Adjoin M - N zeros to a, returning [a[0], a[1], ..., a[N-1], 0, 0, ..., 0].
pub(crate) fn pol_extend<T, const N: usize, const M: usize>(a: [T; N]) -> [T; M]
where
T: Copy + Default,
{
assert_eq!(M, 2 * N - 1);
let mut zero_extend = pol_zero();
zero_extend[..N].copy_from_slice(&a);
zero_extend
}
/// Given polynomial a(x) = \sum_{i=0}^{N-2} a[i] x^i and an element
/// `root`, return b = (x - root) * a(x).
pub(crate) fn pol_adjoin_root<T, U, const N: usize>(a: [T; N], root: U) -> [T; N]
where
T: Add<Output = T> + Copy + Default + Mul<Output = T> + Sub<Output = T>,
U: Copy + Mul<T, Output = T> + Neg<Output = U>,
{
// \sum_i res[i] x^i = (x - root) \sum_i a[i] x^i. Comparing
// coefficients, res[0] = -root*a[0] and
// res[i] = a[i-1] - root * a[i]
let mut res = [T::default(); N];
res[0] = -root * a[0];
for deg in 1..N {
res[deg] = a[deg - 1] - (root * a[deg]);
}
res
}
pub(crate) fn pol_adjoin_root_ext_circuit<
F: RichField + Extendable<D>,
const D: usize,
const N: usize,
>(
builder: &mut CircuitBuilder<F, D>,
a: [ExtensionTarget<D>; N],
root: ExtensionTarget<D>,
) -> [ExtensionTarget<D>; N] {
let zero = builder.zero_extension();
let mut res = [zero; N];
// res[0] = NEG_ONE * root * a[0] + ZERO * zero
res[0] = builder.mul_extension_with_const(F::NEG_ONE, root, a[0]);
for deg in 1..N {
// res[deg] = NEG_ONE * root * a[deg] + ONE * a[deg - 1]
res[deg] = builder.arithmetic_extension(F::NEG_ONE, F::ONE, root, a[deg], a[deg - 1]);
}
res
}
/// Given polynomial a(x) = \sum_{i=0}^{N-1} a[i] x^i and a root of `a`
/// of the form 2^EXP, return q(x) satisfying a(x) = (x - root) * q(x).
///
/// NB: We do not verify that a(2^EXP) = 0; if this doesn't hold the
/// result is basically junk.
///
/// NB: The result could be returned in N-1 elements, but we return
/// N and set the last element to zero since the calling code
/// happens to require a result zero-extended to N elements.
pub(crate) fn pol_remove_root_2exp<const EXP: usize, T, const N: usize>(a: [T; N]) -> [T; N]
where
T: Copy + Default + Neg<Output = T> + Shr<usize, Output = T> + Sub<Output = T>,
{
// By assumption β := 2^EXP is a root of `a`, i.e. (x - β) divides
// `a`; if we write
//
// a(x) = \sum_{i=0}^{N-1} a[i] x^i
// = (x - β) \sum_{i=0}^{N-2} q[i] x^i
//
// then by comparing coefficients it is easy to see that
//
// q[0] = -a[0] / β and q[i] = (q[i-1] - a[i]) / β
//
// for 0 < i <= N-1 (and the divisions are exact).
let mut q = [T::default(); N];
q[0] = -(a[0] >> EXP);
// NB: Last element of q is deliberately left equal to zero.
for deg in 1..N - 1 {
q[deg] = (q[deg - 1] - a[deg]) >> EXP;
}
q
}
/// Read the range `value_idxs` of values from `lv` into an array of
/// length `N`. Panics if the length of the range is not `N`.
pub(crate) fn read_value<const N: usize, T: Copy>(lv: &[T], value_idxs: Range<usize>) -> [T; N] {
lv[value_idxs].try_into().unwrap()
}
/// Read the range `value_idxs` of values from `lv` into an array of
/// length `N`, interpreting the values as `i64`s. Panics if the
/// length of the range is not `N`.
pub(crate) fn read_value_i64_limbs<const N: usize, F: PrimeField64>(
lv: &[F],
value_idxs: Range<usize>,
) -> [i64; N] {
let limbs: [_; N] = lv[value_idxs].try_into().unwrap();
limbs.map(|c| c.to_canonical_u64() as i64)
}
#[inline]
pub fn u32_to_array<F: Field>(out: &mut [F], x: u32) {
const_assert!(LIMB_BITS == 16);
debug_assert!(out.len() == 2);
out[0] = F::from_canonical_u16(x as u16);
out[1] = F::from_canonical_u16((x >> 16) as u16);
}
| rust | MIT | eb09cf6c81bed9717bc98e4a4c9a61f54787a351 | 2026-01-04T20:21:15.281276Z | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.