From 61a3e9b7ffab4299fd37e9b8af83363610df7e2a Mon Sep 17 00:00:00 2001 From: Artem Storozhuk Date: Fri, 8 Aug 2025 17:42:20 +0300 Subject: [PATCH 1/5] feat: Initial Utility chip (U32FromLeBytes + ByteChip) --- src/chips/blake3_circuit.rs | 20 +- src/chips/mod.rs | 1 + src/chips/utility.rs | 395 ++++++++++++++++++++++++++++++++++++ 3 files changed, 406 insertions(+), 10 deletions(-) create mode 100644 src/chips/utility.rs diff --git a/src/chips/blake3_circuit.rs b/src/chips/blake3_circuit.rs index cfb4b24..6005953 100644 --- a/src/chips/blake3_circuit.rs +++ b/src/chips/blake3_circuit.rs @@ -1209,13 +1209,13 @@ mod tests { match claim[0].as_canonical_u64() { 0u64 => { // This is our U8Xor claim. We should have chip_idx, A, B, A xor B (where A, B are bytes) - assert!(claim.len() == 4, "[U8Xor] wrong claim format"); + assert_eq!(claim.len(), 4, "[U8Xor] wrong claim format"); byte_xor_values_from_claims.push((claim[1], claim[2], claim[3])); } 1u64 => { /* This is our U32Xor claim. We should have chip_idx, A, B, A xor B (where A, B are u32) */ - assert!(claim.len() == 4, "[U32Xor] wrong claim format"); + assert_eq!(claim.len(), 4, "[U32Xor] wrong claim format"); let a_u32 = u32::try_from(claim[1].as_canonical_u64()).unwrap(); let b_u32 = u32::try_from(claim[2].as_canonical_u64()).unwrap(); let xor_u32 = u32::try_from(claim[3].as_canonical_u64()).unwrap(); @@ -1226,7 +1226,7 @@ mod tests { 2u64 => { /* This is our U32Add claim. We should have chip_idx, A, B, A + B (where A, B are u32) */ - assert!(claim.len() == 4, "[U32Add] wrong claim format"); + assert_eq!(claim.len(), 4, "[U32Add] wrong claim format"); let a_u32 = u32::try_from(claim[1].as_canonical_u64()).unwrap(); let b_u32 = u32::try_from(claim[2].as_canonical_u64()).unwrap(); let add_u32 = u32::try_from(claim[3].as_canonical_u64()).unwrap(); @@ -1236,7 +1236,7 @@ mod tests { 3u64 => { /* This is our U32RotateRight8 claim. We should have chip_idx, A, A_rot */ - assert!(claim.len() == 3, "[U32RightRotate8] wrong claim format"); + assert_eq!(claim.len(), 3, "[U32RightRotate8] wrong claim format"); let a_u32 = u32::try_from(claim[1].as_canonical_u64()).unwrap(); let rot_u32 = u32::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -1245,7 +1245,7 @@ mod tests { 4u64 => { /* This is our U32RotateRight16 claim. We should have chip_idx, A, A_rot */ - assert!(claim.len() == 3, "[U32RightRotate16] wrong claim format"); + assert_eq!(claim.len(), 3, "[U32RightRotate16] wrong claim format"); let a_u32 = u32::try_from(claim[1].as_canonical_u64()).unwrap(); let rot_u32 = u32::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -1254,7 +1254,7 @@ mod tests { 5u64 => { /* This is our U32RotateRight12 claim. We should have chip_idx, A, A_rot */ - assert!(claim.len() == 3, "[U32RightRotate12] wrong claim format"); + assert_eq!(claim.len(), 3, "[U32RightRotate12] wrong claim format"); let a_u32 = u32::try_from(claim[1].as_canonical_u64()).unwrap(); let rot_u32 = u32::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -1263,7 +1263,7 @@ mod tests { 6u64 => { /* This is our U32RotateRight7 claim. We should have chip_idx, A, A_rot */ - assert!(claim.len() == 3, "[U32RightRotate7] wrong claim format"); + assert_eq!(claim.len(), 3, "[U32RightRotate7] wrong claim format"); let a_u32 = u32::try_from(claim[1].as_canonical_u64()).unwrap(); let rot_u32 = u32::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -1273,13 +1273,13 @@ mod tests { 7u64 => { /* This is our U8PairRangeCheck claim. We should have chip_idx, A, B */ - assert!(claim.len() == 3, "[U8Xor] wrong claim format"); + assert_eq!(claim.len(), 3, "[U8PairRangeCheck] wrong claim format"); byte_range_check_values_from_claims.push((claim[1], claim[2])); } 8u64 => { /* This is our GFunction claim. We should have chip_idx, A, B, C, D, MX_IN, MY_IN, A1, D1, C1, B1 */ - assert!(claim.len() == 11, "[GFunction] wrong claim format"); + assert_eq!(claim.len(), 11, "[GFunction] wrong claim format"); let a_in = u32::try_from(claim[1].as_canonical_u64()).unwrap(); let b_in = u32::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -1298,7 +1298,7 @@ mod tests { 9u64 => { /* This is our StateTransition claim. We should have chip_idx, state_in[32], state_out[16] */ - assert!(claim.len() == 49, "[StateTransition] wrong claim format"); + assert_eq!(claim.len(), 49, "[StateTransition] wrong claim format"); let state_in: [u32; 32] = array::from_fn(|i| { u32::try_from(claim[i + 1].as_canonical_u64()).unwrap() diff --git a/src/chips/mod.rs b/src/chips/mod.rs index 16e7cf5..39fa366 100644 --- a/src/chips/mod.rs +++ b/src/chips/mod.rs @@ -7,6 +7,7 @@ mod blake3_circuit; mod byte_operations; mod u32_add; +mod utility; use crate::builder::symbolic::SymbolicExpression; use crate::types::Val; diff --git a/src/chips/utility.rs b/src/chips/utility.rs new file mode 100644 index 0000000..f02530b --- /dev/null +++ b/src/chips/utility.rs @@ -0,0 +1,395 @@ +#[cfg(test)] +mod tests { + use crate::builder::symbolic::{preprocessed_var, var}; + use crate::chips::SymbExpr; + use crate::lookup::{Lookup, LookupAir}; + use crate::system::{System, SystemWitness}; + use crate::types::{CommitmentParameters, FriParameters, Val}; + use p3_air::{Air, AirBuilder, BaseAir}; + use p3_field::{Field, PrimeCharacteristicRing, PrimeField64}; + use p3_matrix::Matrix; + use p3_matrix::dense::RowMajorMatrix; + use std::array; + + const BYTE_VALUES_NUM: usize = 256; + + // Preprocessed columns are: [A, B, A or B], where A and B are bytes + const PREPROCESSED_TRACE_WIDTH: usize = 3; + + // multiplicity, byte0, byte1, bytes2, byte3 + const U32_FROM_LE_BYTES_TRACE_WIDTH: usize = 5; + + // Main trace consists of multiplicity for 'u8_or' and 'range_check' operations + const U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH: usize = 2; + + enum UtilityChip { + U32FromLeBytes, + U32Or, + U8Or, + U8PairRangeCheck, + } + + impl UtilityChip { + fn position(&self) -> usize { + match self { + Self::U32FromLeBytes => 0, + Self::U32Or => 1, + Self::U8Or => 2, + Self::U8PairRangeCheck => 3, + } + } + } + + impl BaseAir for UtilityChip { + fn width(&self) -> usize { + match self { + Self::U32FromLeBytes => U32_FROM_LE_BYTES_TRACE_WIDTH, + Self::U32Or => todo!(), + Self::U8Or | Self::U8PairRangeCheck => U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH, + } + } + + fn preprocessed_trace(&self) -> Option> { + match self { + Self::U8Or | Self::U8PairRangeCheck => { + let bytes: [u8; BYTE_VALUES_NUM] = + array::from_fn(|idx| u8::try_from(idx).unwrap()); + let mut trace_values = Vec::with_capacity( + BYTE_VALUES_NUM * BYTE_VALUES_NUM * PREPROCESSED_TRACE_WIDTH, + ); + for i in 0..BYTE_VALUES_NUM { + for j in 0..BYTE_VALUES_NUM { + trace_values.push(F::from_u8(bytes[i])); + trace_values.push(F::from_u8(bytes[j])); + trace_values.push(F::from_u8(bytes[i] | bytes[j])); + } + } + Some(RowMajorMatrix::new(trace_values, PREPROCESSED_TRACE_WIDTH)) + } + Self::U32FromLeBytes => None, + Self::U32Or => None, + } + } + } + + impl Air for UtilityChip + where + AB: AirBuilder, + AB::Var: Copy, + { + fn eval(&self, _builder: &mut AB) { + match self { + Self::U32FromLeBytes => {} + Self::U32Or => {} + Self::U8Or => {} + Self::U8PairRangeCheck => {} + } + } + } + + impl UtilityChip { + fn lookups(&self) -> Vec> { + let u32_from_le_bytes_idx = Self::U32FromLeBytes.position(); + // let u32_or_idx = Self::U32Or.position(); + let u8_or_idx = Self::U8Or.position(); + let u8_pair_range_check_idx = Self::U8PairRangeCheck.position(); + + match self { + Self::U32FromLeBytes => { + vec![ + Lookup::pull( + var(0), + vec![ + SymbExpr::from_usize(u32_from_le_bytes_idx), + var(1), + var(2), + var(3), + var(4), + var(1) + + var(2) * SymbExpr::from_u32(256) + + var(3) * SymbExpr::from_u32(256 * 256) + + var(4) * SymbExpr::from_u32(256 * 256 * 256), + ], + ), + Lookup::push( + SymbExpr::ONE, + vec![ + SymbExpr::from_usize(u8_pair_range_check_idx), + var(1), + var(2), + ], + ), + Lookup::push( + SymbExpr::ONE, + vec![ + SymbExpr::from_usize(u8_pair_range_check_idx), + var(3), + var(4), + ], + ), + ] + } + Self::U32Or => { + todo!(); + } + + Self::U8Or | Self::U8PairRangeCheck => { + vec![ + Lookup::pull( + var(0), + vec![ + SymbExpr::from_usize(u8_or_idx), + preprocessed_var(0), + preprocessed_var(1), + preprocessed_var(2), + ], + ), + Lookup::pull( + var(1), + vec![ + SymbExpr::from_usize(u8_pair_range_check_idx), + preprocessed_var(0), + preprocessed_var(1), + ], + ), + ] + } + } + } + } + + struct UtilityChipClaims { + claims: Vec>, + } + + impl UtilityChipClaims { + fn witness( + &self, + system: &System, + ) -> (Vec>, SystemWitness) { + let mut byte_or_values_from_claims = vec![]; + let mut byte_range_check_values_from_claims = vec![]; + let mut u32_from_le_bytes_values_from_claims = vec![]; + + for claim in self.claims.clone() { + // we should have at least chip index + assert!(!claim.is_empty(), "wrong claim format"); + match claim[0].as_canonical_u64() { + 0u64 => { + // this is out u32_from_le_bytes chip. We should have chip_idx, byte0, byte1, byte2, byte3, u32 + assert_eq!(claim.len(), 6); + + let byte0_val = u8::try_from(claim[1].as_canonical_u64()).unwrap(); + let byte1_val = u8::try_from(claim[2].as_canonical_u64()).unwrap(); + let byte2_val = u8::try_from(claim[3].as_canonical_u64()).unwrap(); + let byte3_val = u8::try_from(claim[4].as_canonical_u64()).unwrap(); + + let u32_val = u32::try_from(claim[5].as_canonical_u64()).unwrap(); + + u32_from_le_bytes_values_from_claims + .push((byte0_val, byte1_val, byte2_val, byte3_val, u32_val)); + } + 1u64 => { + // u32 or + todo!(); + } + + 2u64 => { + // This is our U8Or claim. We should have chip_idx, A, B, A or B (where A, B are bytes) + assert_eq!(claim.len(), 4, "[U8Or] wrong claim format"); + byte_or_values_from_claims.push((claim[1], claim[2], claim[3])); + } + + 3u64 => { + /* This is our U8PairRangeCheck claim. We should have chip_idx, A, B */ + + assert_eq!(claim.len(), 3, "[U8PairRangeCheck] wrong claim format"); + byte_range_check_values_from_claims.push((claim[1], claim[2])); + } + + _ => { + panic!("unsupported chip") + } + } + } + + let mut u32_from_le_bytes_trace_values = + Vec::::with_capacity(u32_from_le_bytes_values_from_claims.len()); + if u32_from_le_bytes_values_from_claims.is_empty() { + u32_from_le_bytes_trace_values = Val::zero_vec(U32_FROM_LE_BYTES_TRACE_WIDTH); + + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + } else { + for (byte0, byte1, byte2, byte3, u32_val) in u32_from_le_bytes_values_from_claims { + let bytes: [u8; 4] = [byte0, byte1, byte2, byte3]; + let computed = u32::from_le_bytes(bytes); + debug_assert_eq!(u32_val, computed); + + u32_from_le_bytes_trace_values.push(Val::ONE); // multiplicity + u32_from_le_bytes_trace_values + .extend_from_slice(bytes.map(Val::from_u8).as_slice()); + + /* we send decomposed bytes to U8PairRangeCheck chip, relying on lookup constraining */ + + byte_range_check_values_from_claims + .push((Val::from_u8(byte0), Val::from_u8(byte1))); + byte_range_check_values_from_claims + .push((Val::from_u8(byte2), Val::from_u8(byte3))); + } + } + + let mut u32_from_le_bytes_trace = RowMajorMatrix::new( + u32_from_le_bytes_trace_values, + U32_FROM_LE_BYTES_TRACE_WIDTH, + ); + let height = u32_from_le_bytes_trace.height().next_power_of_two(); + let zero_rows = height - u32_from_le_bytes_trace.height(); + for _ in 0..zero_rows { + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + } + u32_from_le_bytes_trace.pad_to_height(height, Val::ZERO); + + // finally build U8Or / U8PairRangeCheck trace (columns: multiplicity_u8_or, multiplicity_pair_range_check) + // since this it "lowest-level" trace, its multiplicities could be updated by other chips previously + let mut u8_or_range_check_trace_values = Vec::::with_capacity( + BYTE_VALUES_NUM * BYTE_VALUES_NUM * U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH, + ); + for i in 0..BYTE_VALUES_NUM { + for j in 0..BYTE_VALUES_NUM { + let mut multiplicity_u8_or = Val::ZERO; + let mut multiplicity_u8_pair_range_check = Val::ZERO; + + for vals in byte_or_values_from_claims.clone() { + if vals.0 == Val::from_usize(i) + && vals.1 == Val::from_usize(j) + && vals.2 == Val::from_usize(i | j) + { + multiplicity_u8_or += Val::ONE; + } + } + + for vals in byte_range_check_values_from_claims.clone() { + if vals.0 == Val::from_usize(i) && vals.1 == Val::from_usize(j) { + multiplicity_u8_pair_range_check += Val::ONE; + } + } + + u8_or_range_check_trace_values.push(multiplicity_u8_or); + u8_or_range_check_trace_values.push(multiplicity_u8_pair_range_check); + } + } + + let traces = vec![ + RowMajorMatrix::new( + u8_or_range_check_trace_values, + U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH, + ), + u32_from_le_bytes_trace, + ]; + + (traces.clone(), SystemWitness::from_stage_1(traces, system)) + } + } + + #[test] + fn test_u32_to_le_bytes() { + let byte0 = 0xfe; + let byte1 = 0xac; + let byte2 = 0x68; + let byte3 = 0x01; + let u32_val = u32::from_le_bytes([byte0, byte1, byte2, byte3]); + + let commitment_parameters = CommitmentParameters { log_blowup: 1 }; + let u8_circuit = LookupAir::new(UtilityChip::U8Or, UtilityChip::U8Or.lookups()); + let u32_from_le_bytes_circuit = LookupAir::new( + UtilityChip::U32FromLeBytes, + UtilityChip::U32FromLeBytes.lookups(), + ); + + let (system, prover_key) = System::new( + commitment_parameters, + vec![u8_circuit, u32_from_le_bytes_circuit], + ); + + let claims = UtilityChipClaims { + claims: vec![ + [ + vec![Val::from_usize(UtilityChip::U32FromLeBytes.position())], + vec![ + Val::from_u8(byte0), + Val::from_u8(byte1), + Val::from_u8(byte2), + Val::from_u8(byte3), + ], + vec![Val::from_u32(u32_val)], + ] + .concat(), + ], + }; + + let (_traces, witness) = claims.witness(&system); + + let claims_slice: Vec<&[Val]> = claims.claims.iter().map(|v| v.as_slice()).collect(); + let claims_slice: &[&[Val]] = &claims_slice; + + let fri_parameters = FriParameters { + log_final_poly_len: 0, + num_queries: 64, + proof_of_work_bits: 0, + }; + + let proof = + system.prove_multiple_claims(fri_parameters, &prover_key, claims_slice, witness); + system + .verify_multiple_claims(fri_parameters, claims_slice, &proof) + .expect("verification issue"); + } + + #[test] + fn test_u8_or() { + let byte0 = 0xfe; + let byte1 = 0xac; + let or = byte0 | byte1; + + let commitment_parameters = CommitmentParameters { log_blowup: 1 }; + let u8_circuit = LookupAir::new(UtilityChip::U8Or, UtilityChip::U8Or.lookups()); + let u32_from_le_bytes_circuit = LookupAir::new( + UtilityChip::U32FromLeBytes, + UtilityChip::U32FromLeBytes.lookups(), + ); + + let (system, prover_key) = System::new( + commitment_parameters, + vec![u8_circuit, u32_from_le_bytes_circuit], + ); + + let claims = UtilityChipClaims { + claims: vec![ + [ + vec![Val::from_usize(UtilityChip::U8Or.position())], + vec![Val::from_u8(byte0), Val::from_u8(byte1), Val::from_u8(or)], + ] + .concat(), + ], + }; + + let (_traces, witness) = claims.witness(&system); + + let claims_slice: Vec<&[Val]> = claims.claims.iter().map(|v| v.as_slice()).collect(); + let claims_slice: &[&[Val]] = &claims_slice; + + let fri_parameters = FriParameters { + log_final_poly_len: 0, + num_queries: 64, + proof_of_work_bits: 0, + }; + + let proof = + system.prove_multiple_claims(fri_parameters, &prover_key, claims_slice, witness); + system + .verify_multiple_claims(fri_parameters, claims_slice, &proof) + .expect("verification issue"); + } +} From dc8d5a0acd4560dd0c09e63f6cd9a4b359045d26 Mon Sep 17 00:00:00 2001 From: Artem Storozhuk Date: Thu, 14 Aug 2025 00:22:54 +0300 Subject: [PATCH 2/5] feat: Implement U32Or chip --- src/chips/utility.rs | 201 ++++++++++++++++++++++++++++++++++++------- 1 file changed, 170 insertions(+), 31 deletions(-) diff --git a/src/chips/utility.rs b/src/chips/utility.rs index f02530b..55fd9b6 100644 --- a/src/chips/utility.rs +++ b/src/chips/utility.rs @@ -22,6 +22,9 @@ mod tests { // Main trace consists of multiplicity for 'u8_or' and 'range_check' operations const U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH: usize = 2; + // multiplicity, left0, left1, left2, left3, right0, right1, right2, right3, or0, or1, or2, or3 + const U32_OR_TRACE_WIDTH: usize = 13; + enum UtilityChip { U32FromLeBytes, U32Or, @@ -32,10 +35,10 @@ mod tests { impl UtilityChip { fn position(&self) -> usize { match self { - Self::U32FromLeBytes => 0, - Self::U32Or => 1, - Self::U8Or => 2, - Self::U8PairRangeCheck => 3, + Self::U8Or => 0, + Self::U8PairRangeCheck => 1, + Self::U32FromLeBytes => 2, + Self::U32Or => 3, } } } @@ -44,7 +47,7 @@ mod tests { fn width(&self) -> usize { match self { Self::U32FromLeBytes => U32_FROM_LE_BYTES_TRACE_WIDTH, - Self::U32Or => todo!(), + Self::U32Or => U32_OR_TRACE_WIDTH, Self::U8Or | Self::U8PairRangeCheck => U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH, } } @@ -66,8 +69,7 @@ mod tests { } Some(RowMajorMatrix::new(trace_values, PREPROCESSED_TRACE_WIDTH)) } - Self::U32FromLeBytes => None, - Self::U32Or => None, + Self::U32FromLeBytes | Self::U32Or => None, } } } @@ -79,20 +81,17 @@ mod tests { { fn eval(&self, _builder: &mut AB) { match self { - Self::U32FromLeBytes => {} - Self::U32Or => {} - Self::U8Or => {} - Self::U8PairRangeCheck => {} + Self::U32FromLeBytes | Self::U32Or | Self::U8Or | Self::U8PairRangeCheck => {} } } } impl UtilityChip { fn lookups(&self) -> Vec> { - let u32_from_le_bytes_idx = Self::U32FromLeBytes.position(); - // let u32_or_idx = Self::U32Or.position(); let u8_or_idx = Self::U8Or.position(); let u8_pair_range_check_idx = Self::U8PairRangeCheck.position(); + let u32_from_le_bytes_idx = Self::U32FromLeBytes.position(); + let u32_or_idx = Self::U32Or.position(); match self { Self::U32FromLeBytes => { @@ -130,9 +129,37 @@ mod tests { ] } Self::U32Or => { - todo!(); + let mut lookups = vec![Lookup::pull( + var(0), + vec![ + SymbExpr::from_usize(u32_or_idx), + var(1) + + var(2) * SymbExpr::from_u32(256) + + var(3) * SymbExpr::from_u32(256 * 256) + + var(4) * SymbExpr::from_u32(256 * 256 * 256), + var(5) + + var(6) * SymbExpr::from_u32(256) + + var(7) * SymbExpr::from_u32(256 * 256) + + var(8) * SymbExpr::from_u32(256 * 256 * 256), + var(9) + + var(10) * SymbExpr::from_u32(256) + + var(11) * SymbExpr::from_u32(256 * 256) + + var(12) * SymbExpr::from_u32(256 * 256 * 256), + ], + )]; + + lookups.extend((0..6).map(|i| { + Lookup::push( + SymbExpr::ONE, + vec![ + SymbExpr::from_usize(u8_pair_range_check_idx), + var(i + 1), + var(i + 7), + ], + ) + })); + lookups } - Self::U8Or | Self::U8PairRangeCheck => { vec![ Lookup::pull( @@ -170,12 +197,24 @@ mod tests { let mut byte_or_values_from_claims = vec![]; let mut byte_range_check_values_from_claims = vec![]; let mut u32_from_le_bytes_values_from_claims = vec![]; + let mut u32_or_values_from_claims = vec![]; for claim in self.claims.clone() { // we should have at least chip index assert!(!claim.is_empty(), "wrong claim format"); match claim[0].as_canonical_u64() { 0u64 => { + // This is our U8Or claim. We should have chip_idx, A, B, A or B (where A, B are bytes) + assert_eq!(claim.len(), 4, "[U8Or] wrong claim format"); + byte_or_values_from_claims.push((claim[1], claim[2], claim[3])); + } + 1u64 => { + /* This is our U8PairRangeCheck claim. We should have chip_idx, A, B */ + + assert_eq!(claim.len(), 3, "[U8PairRangeCheck] wrong claim format"); + byte_range_check_values_from_claims.push((claim[1], claim[2])); + } + 2u64 => { // this is out u32_from_le_bytes chip. We should have chip_idx, byte0, byte1, byte2, byte3, u32 assert_eq!(claim.len(), 6); @@ -189,22 +228,15 @@ mod tests { u32_from_le_bytes_values_from_claims .push((byte0_val, byte1_val, byte2_val, byte3_val, u32_val)); } - 1u64 => { - // u32 or - todo!(); - } - - 2u64 => { - // This is our U8Or claim. We should have chip_idx, A, B, A or B (where A, B are bytes) - assert_eq!(claim.len(), 4, "[U8Or] wrong claim format"); - byte_or_values_from_claims.push((claim[1], claim[2], claim[3])); - } - 3u64 => { - /* This is our U8PairRangeCheck claim. We should have chip_idx, A, B */ + // this is out u32_or chip. We should have: chip_idx, left, right, or + assert_eq!(claim.len(), 4); - assert_eq!(claim.len(), 3, "[U8PairRangeCheck] wrong claim format"); - byte_range_check_values_from_claims.push((claim[1], claim[2])); + let left = u32::try_from(claim[1].as_canonical_u64()).unwrap(); + let right = u32::try_from(claim[2].as_canonical_u64()).unwrap(); + let or = u32::try_from(claim[3].as_canonical_u64()).unwrap(); + + u32_or_values_from_claims.push((left, right, or)); } _ => { @@ -213,6 +245,62 @@ mod tests { } } + // u32_or + let mut u32_or_trace_values = + Vec::::with_capacity(u32_or_values_from_claims.len()); + if u32_or_values_from_claims.is_empty() { + u32_or_trace_values = Val::zero_vec(U32_OR_TRACE_WIDTH); + + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + } else { + for (left, right, or) in u32_or_values_from_claims { + let computed = left | right; + debug_assert_eq!(or, computed); + + let left_bytes: [u8; 4] = left.to_le_bytes(); + let right_bytes: [u8; 4] = right.to_le_bytes(); + let or_bytes: [u8; 4] = or.to_le_bytes(); + + u32_or_trace_values.push(Val::ONE); // multiplicity + u32_or_trace_values.extend_from_slice(left_bytes.map(Val::from_u8).as_slice()); + u32_or_trace_values.extend_from_slice(right_bytes.map(Val::from_u8).as_slice()); + u32_or_trace_values.extend_from_slice(or_bytes.map(Val::from_u8).as_slice()); + + /* we send decomposed bytes to U8PairRangeCheck chip, relying on lookup constraining */ + + byte_range_check_values_from_claims + .push((Val::from_u8(left_bytes[0]), Val::from_u8(right_bytes[2]))); + byte_range_check_values_from_claims + .push((Val::from_u8(left_bytes[1]), Val::from_u8(right_bytes[3]))); + byte_range_check_values_from_claims + .push((Val::from_u8(left_bytes[2]), Val::from_u8(or_bytes[0]))); + byte_range_check_values_from_claims + .push((Val::from_u8(left_bytes[3]), Val::from_u8(or_bytes[1]))); + byte_range_check_values_from_claims + .push((Val::from_u8(right_bytes[0]), Val::from_u8(or_bytes[2]))); + byte_range_check_values_from_claims + .push((Val::from_u8(right_bytes[1]), Val::from_u8(or_bytes[3]))); + } + } + let mut u32_or_trace = RowMajorMatrix::new(u32_or_trace_values, U32_OR_TRACE_WIDTH); + let height = u32_or_trace.height().next_power_of_two(); + let zero_rows = height - u32_or_trace.height(); + for _ in 0..zero_rows { + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + } + u32_or_trace.pad_to_height(height, Val::ZERO); + + // u32_from_le let mut u32_from_le_bytes_trace_values = Vec::::with_capacity(u32_from_le_bytes_values_from_claims.len()); if u32_from_le_bytes_values_from_claims.is_empty() { @@ -286,7 +374,9 @@ mod tests { u8_or_range_check_trace_values, U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH, ), + // range check trace is entirely preprocessed, so it is "free" u32_from_le_bytes_trace, + u32_or_trace, ]; (traces.clone(), SystemWitness::from_stage_1(traces, system)) @@ -307,10 +397,11 @@ mod tests { UtilityChip::U32FromLeBytes, UtilityChip::U32FromLeBytes.lookups(), ); + let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); let (system, prover_key) = System::new( commitment_parameters, - vec![u8_circuit, u32_from_le_bytes_circuit], + vec![u8_circuit, u32_from_le_bytes_circuit, u32_or_circuit], // order matters ); let claims = UtilityChipClaims { @@ -359,10 +450,11 @@ mod tests { UtilityChip::U32FromLeBytes, UtilityChip::U32FromLeBytes.lookups(), ); + let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); let (system, prover_key) = System::new( commitment_parameters, - vec![u8_circuit, u32_from_le_bytes_circuit], + vec![u8_circuit, u32_from_le_bytes_circuit, u32_or_circuit], // order matters! ); let claims = UtilityChipClaims { @@ -392,4 +484,51 @@ mod tests { .verify_multiple_claims(fri_parameters, claims_slice, &proof) .expect("verification issue"); } + + #[test] + fn test_u32_or() { + let left = 0xabcd1234u32; + let right = 0x998123ffu32; + let or = left | right; + + let commitment_parameters = CommitmentParameters { log_blowup: 1 }; + let u8_circuit = LookupAir::new(UtilityChip::U8Or, UtilityChip::U8Or.lookups()); + let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); + let u32_from_le_bytes_circuit = LookupAir::new( + UtilityChip::U32FromLeBytes, + UtilityChip::U32FromLeBytes.lookups(), + ); + + let (system, prover_key) = System::new( + commitment_parameters, + vec![u8_circuit, u32_from_le_bytes_circuit, u32_or_circuit], // order matters + ); + + let claims = UtilityChipClaims { + claims: vec![ + [ + vec![Val::from_usize(UtilityChip::U32Or.position())], + vec![Val::from_u32(left), Val::from_u32(right), Val::from_u32(or)], + ] + .concat(), + ], + }; + + let (_traces, witness) = claims.witness(&system); + + let claims_slice: Vec<&[Val]> = claims.claims.iter().map(|v| v.as_slice()).collect(); + let claims_slice: &[&[Val]] = &claims_slice; + + let fri_parameters = FriParameters { + log_final_poly_len: 0, + num_queries: 64, + proof_of_work_bits: 0, + }; + + let proof = + system.prove_multiple_claims(fri_parameters, &prover_key, claims_slice, witness); + system + .verify_multiple_claims(fri_parameters, claims_slice, &proof) + .expect("verification issue"); + } } From c094cd5dc52764128bbaf2c1eff464f6be664800 Mon Sep 17 00:00:00 2001 From: Artem Storozhuk Date: Sat, 16 Aug 2025 14:26:57 +0300 Subject: [PATCH 3/5] chore: Update to latest main --- src/chips/utility.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/chips/utility.rs b/src/chips/utility.rs index 55fd9b6..b00dcae 100644 --- a/src/chips/utility.rs +++ b/src/chips/utility.rs @@ -78,6 +78,7 @@ mod tests { where AB: AirBuilder, AB::Var: Copy, + AB::F: Field, { fn eval(&self, _builder: &mut AB) { match self { From 1c95b632bf2f14841f7c6b3981da22a6710d87a5 Mon Sep 17 00:00:00 2001 From: Artem Storozhuk Date: Sat, 16 Aug 2025 16:30:58 +0300 Subject: [PATCH 4/5] feat: Implement u64_to_u32 chips --- src/chips/utility.rs | 394 ++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 385 insertions(+), 9 deletions(-) diff --git a/src/chips/utility.rs b/src/chips/utility.rs index b00dcae..d036693 100644 --- a/src/chips/utility.rs +++ b/src/chips/utility.rs @@ -25,11 +25,16 @@ mod tests { // multiplicity, left0, left1, left2, left3, right0, right1, right2, right3, or0, or1, or2, or3 const U32_OR_TRACE_WIDTH: usize = 13; + // multiplicity, in_byte0, in_byte1, in_byte2, in_byte3, in_byte4, in_byte5, in_byte6, in_byte7 + const U64_SHIFT_32_TRACE_WIDTH: usize = 9; + enum UtilityChip { U32FromLeBytes, U32Or, U8Or, U8PairRangeCheck, + U64ShiftRight32AsU32, + U64AsU32, } impl UtilityChip { @@ -39,6 +44,8 @@ mod tests { Self::U8PairRangeCheck => 1, Self::U32FromLeBytes => 2, Self::U32Or => 3, + Self::U64ShiftRight32AsU32 => 4, + Self::U64AsU32 => 5, } } } @@ -49,6 +56,7 @@ mod tests { Self::U32FromLeBytes => U32_FROM_LE_BYTES_TRACE_WIDTH, Self::U32Or => U32_OR_TRACE_WIDTH, Self::U8Or | Self::U8PairRangeCheck => U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH, + Self::U64ShiftRight32AsU32 | Self::U64AsU32 => U64_SHIFT_32_TRACE_WIDTH, } } @@ -69,7 +77,10 @@ mod tests { } Some(RowMajorMatrix::new(trace_values, PREPROCESSED_TRACE_WIDTH)) } - Self::U32FromLeBytes | Self::U32Or => None, + Self::U32FromLeBytes + | Self::U32Or + | Self::U64ShiftRight32AsU32 + | Self::U64AsU32 => None, } } } @@ -82,7 +93,12 @@ mod tests { { fn eval(&self, _builder: &mut AB) { match self { - Self::U32FromLeBytes | Self::U32Or | Self::U8Or | Self::U8PairRangeCheck => {} + Self::U32FromLeBytes + | Self::U32Or + | Self::U8Or + | Self::U8PairRangeCheck + | Self::U64ShiftRight32AsU32 + | Self::U64AsU32 => {} } } } @@ -93,6 +109,8 @@ mod tests { let u8_pair_range_check_idx = Self::U8PairRangeCheck.position(); let u32_from_le_bytes_idx = Self::U32FromLeBytes.position(); let u32_or_idx = Self::U32Or.position(); + let u64_shift_right_32_idx = Self::U64ShiftRight32AsU32.position(); + let u64_shift_left_32_idx = Self::U64AsU32.position(); match self { Self::U32FromLeBytes => { @@ -182,6 +200,75 @@ mod tests { ), ] } + Self::U64ShiftRight32AsU32 => { + let mut lookups = vec![Lookup::pull( + var(0), + vec![ + SymbExpr::from_usize(u64_shift_right_32_idx), + var(1) + + var(2) * SymbExpr::from_u64(256) + + var(3) * SymbExpr::from_u64(256 * 256) + + var(4) * SymbExpr::from_u64(256 * 256 * 256) + + var(5) * SymbExpr::from_u64(256 * 256 * 256 * 256) + + var(6) * SymbExpr::from_u64(256 * 256 * 256 * 256 * 256) + + var(7) * SymbExpr::from_u64(256 * 256 * 256 * 256 * 256 * 256) + + var(8) + * SymbExpr::from_u64(256 * 256 * 256 * 256 * 256 * 256 * 256), // we are ok here, since Goldilock has 64 bits + var(5) + + var(6) * SymbExpr::from_u64(256) + + var(7) * SymbExpr::from_u64(256 * 256) + + var(8) * SymbExpr::from_u64(256 * 256 * 256), // 32 bits shifting to the right means that 4 least significant bytes are simply ignored + ], + )]; + + lookups.extend((0..4).map(|i| { + Lookup::push( + SymbExpr::ONE, + vec![ + SymbExpr::from_usize(u8_pair_range_check_idx), + var(i + 1), + var(i + 5), + ], + ) + })); + + lookups + } + + Self::U64AsU32 => { + let mut lookups = vec![Lookup::pull( + var(0), + vec![ + SymbExpr::from_usize(u64_shift_left_32_idx), + var(1) + + var(2) * SymbExpr::from_u64(256) + + var(3) * SymbExpr::from_u64(256 * 256) + + var(4) * SymbExpr::from_u64(256 * 256 * 256) + + var(5) * SymbExpr::from_u64(256 * 256 * 256 * 256) + + var(6) * SymbExpr::from_u64(256 * 256 * 256 * 256 * 256) + + var(7) * SymbExpr::from_u64(256 * 256 * 256 * 256 * 256 * 256) + + var(8) + * SymbExpr::from_u64(256 * 256 * 256 * 256 * 256 * 256 * 256), // we are ok here, since Goldilock has 64 bits + var(1) + + var(2) * SymbExpr::from_u64(256) + + var(3) * SymbExpr::from_u64(256 * 256) + + var(4) * SymbExpr::from_u64(256 * 256 * 256), // u64 as u32 means that we do 32 bits shifting to the left and 4 most significant bytes are simply ignored + ], + )]; + + lookups.extend((0..4).map(|i| { + Lookup::push( + SymbExpr::ONE, + vec![ + SymbExpr::from_usize(u8_pair_range_check_idx), + var(i + 1), + var(i + 5), + ], + ) + })); + + lookups + } } } } @@ -199,6 +286,8 @@ mod tests { let mut byte_range_check_values_from_claims = vec![]; let mut u32_from_le_bytes_values_from_claims = vec![]; let mut u32_or_values_from_claims = vec![]; + let mut u64_shift_right_32_values_from_claims = vec![]; + let mut u64_to_u32_values_from_claims = vec![]; for claim in self.claims.clone() { // we should have at least chip index @@ -216,7 +305,7 @@ mod tests { byte_range_check_values_from_claims.push((claim[1], claim[2])); } 2u64 => { - // this is out u32_from_le_bytes chip. We should have chip_idx, byte0, byte1, byte2, byte3, u32 + // this is our u32_from_le_bytes chip. We should have chip_idx, byte0, byte1, byte2, byte3, u32 assert_eq!(claim.len(), 6); let byte0_val = u8::try_from(claim[1].as_canonical_u64()).unwrap(); @@ -230,7 +319,7 @@ mod tests { .push((byte0_val, byte1_val, byte2_val, byte3_val, u32_val)); } 3u64 => { - // this is out u32_or chip. We should have: chip_idx, left, right, or + // this is our u32_or chip. We should have: chip_idx, left, right, or assert_eq!(claim.len(), 4); let left = u32::try_from(claim[1].as_canonical_u64()).unwrap(); @@ -240,12 +329,139 @@ mod tests { u32_or_values_from_claims.push((left, right, or)); } + 4u64 => { + // this is our u64_shift_right_32. We should have: chip_idx, u64 (input), u32 (output) + assert_eq!(claim.len(), 3); + + let u64_val = claim[1].as_canonical_u64(); + let shifted = u32::try_from(claim[2].as_canonical_u64()).unwrap(); + + u64_shift_right_32_values_from_claims.push((u64_val, shifted)); + } + 5u64 => { + // this is our u64_to_u32. We should have: chip_idx, u64 (input), u32 (output) + assert_eq!(claim.len(), 3); + + let u64_val = claim[1].as_canonical_u64(); + let shifted = u32::try_from(claim[2].as_canonical_u64()).unwrap(); + + u64_to_u32_values_from_claims.push((u64_val, shifted)); + } + _ => { panic!("unsupported chip") } } } + fn u64_to_u32( + values_from_claims: &Vec<(u64, u32)>, + range_check_values: &mut Vec<(Val, Val)>, + shift_right: bool, + ) -> RowMajorMatrix { + let mut u64_to_u32_trace_values = + Vec::::with_capacity(values_from_claims.len()); + if values_from_claims.is_empty() { + u64_to_u32_trace_values = Val::zero_vec(U64_SHIFT_32_TRACE_WIDTH); + + range_check_values.push((Val::ZERO, Val::ZERO)); + range_check_values.push((Val::ZERO, Val::ZERO)); + range_check_values.push((Val::ZERO, Val::ZERO)); + range_check_values.push((Val::ZERO, Val::ZERO)); + } else { + for (u64_val, shifted) in values_from_claims { + let computed: u32 = if shift_right { + u32::try_from(*u64_val >> 32).unwrap() + } else { + *u64_val as u32 + }; + debug_assert_eq!(computed, *shifted); + + let u64_bytes: [u8; 8] = u64_val.to_le_bytes(); + u64_to_u32_trace_values.push(Val::ONE); // multiplicity + u64_to_u32_trace_values + .extend_from_slice(u64_bytes.map(Val::from_u8).as_slice()); + + range_check_values + .push((Val::from_u8(u64_bytes[0]), Val::from_u8(u64_bytes[4]))); + range_check_values + .push((Val::from_u8(u64_bytes[1]), Val::from_u8(u64_bytes[5]))); + range_check_values + .push((Val::from_u8(u64_bytes[2]), Val::from_u8(u64_bytes[6]))); + range_check_values + .push((Val::from_u8(u64_bytes[3]), Val::from_u8(u64_bytes[7]))); + } + } + let mut u64_to_u32_trace = + RowMajorMatrix::new(u64_to_u32_trace_values, U64_SHIFT_32_TRACE_WIDTH); + let height = u64_to_u32_trace.height().next_power_of_two(); + let zero_rows = height - u64_to_u32_trace.height(); + for _ in 0..zero_rows { + range_check_values.push((Val::ZERO, Val::ZERO)); + range_check_values.push((Val::ZERO, Val::ZERO)); + range_check_values.push((Val::ZERO, Val::ZERO)); + range_check_values.push((Val::ZERO, Val::ZERO)); + } + u64_to_u32_trace.pad_to_height(height, Val::ZERO); + u64_to_u32_trace + } + + // u64_to_32 + let u64_to_u32_trace = u64_to_u32( + &u64_to_u32_values_from_claims, + &mut byte_range_check_values_from_claims, + false, + ); + + // u64_shift_right_32 + let u64_shift_right_32_trace = u64_to_u32( + &u64_shift_right_32_values_from_claims, + &mut byte_range_check_values_from_claims, + true, + ); + + // // u64_shift_right_32 + // let mut u64_shift_right_32_trace_values = + // Vec::::with_capacity(u64_shift_right_32_values_from_claims.len()); + // if u64_shift_right_32_values_from_claims.is_empty() { + // u64_shift_right_32_trace_values = Val::zero_vec(U64_SHIFT_32_TRACE_WIDTH); + // + // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + // } else { + // for (u64_val, shifted) in u64_shift_right_32_values_from_claims { + // let computed = (u64_val >> 32) as u32; + // debug_assert_eq!(computed, shifted); + // + // let u64_bytes: [u8; 8] = u64_val.to_le_bytes(); + // u64_shift_right_32_trace_values.push(Val::ONE); // multiplicity + // u64_shift_right_32_trace_values + // .extend_from_slice(u64_bytes.map(Val::from_u8).as_slice()); + // + // byte_range_check_values_from_claims + // .push((Val::from_u8(u64_bytes[0]), Val::from_u8(u64_bytes[4]))); + // byte_range_check_values_from_claims + // .push((Val::from_u8(u64_bytes[1]), Val::from_u8(u64_bytes[5]))); + // byte_range_check_values_from_claims + // .push((Val::from_u8(u64_bytes[2]), Val::from_u8(u64_bytes[6]))); + // byte_range_check_values_from_claims + // .push((Val::from_u8(u64_bytes[3]), Val::from_u8(u64_bytes[7]))); + // } + // } + // let mut u64_shift_right_32_trace = + // RowMajorMatrix::new(u64_shift_right_32_trace_values, U64_SHIFT_32_TRACE_WIDTH); + // let height = u64_shift_right_32_trace.height().next_power_of_two(); + // let zero_rows = height - u64_shift_right_32_trace.height(); + // for _ in 0..zero_rows { + // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); + // } + // u64_shift_right_32_trace.pad_to_height(height, Val::ZERO); + // u32_or let mut u32_or_trace_values = Vec::::with_capacity(u32_or_values_from_claims.len()); @@ -378,6 +594,8 @@ mod tests { // range check trace is entirely preprocessed, so it is "free" u32_from_le_bytes_trace, u32_or_trace, + u64_shift_right_32_trace, + u64_to_u32_trace, ]; (traces.clone(), SystemWitness::from_stage_1(traces, system)) @@ -394,15 +612,27 @@ mod tests { let commitment_parameters = CommitmentParameters { log_blowup: 1 }; let u8_circuit = LookupAir::new(UtilityChip::U8Or, UtilityChip::U8Or.lookups()); + let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); let u32_from_le_bytes_circuit = LookupAir::new( UtilityChip::U32FromLeBytes, UtilityChip::U32FromLeBytes.lookups(), ); - let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); + let u64_shift_right_32_circuit = LookupAir::new( + UtilityChip::U64ShiftRight32AsU32, + UtilityChip::U64ShiftRight32AsU32.lookups(), + ); + let u64_as_u32_circuit = + LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); let (system, prover_key) = System::new( commitment_parameters, - vec![u8_circuit, u32_from_le_bytes_circuit, u32_or_circuit], // order matters + vec![ + u8_circuit, + u32_from_le_bytes_circuit, + u32_or_circuit, + u64_shift_right_32_circuit, + u64_as_u32_circuit, + ], // order matters ); let claims = UtilityChipClaims { @@ -447,15 +677,27 @@ mod tests { let commitment_parameters = CommitmentParameters { log_blowup: 1 }; let u8_circuit = LookupAir::new(UtilityChip::U8Or, UtilityChip::U8Or.lookups()); + let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); let u32_from_le_bytes_circuit = LookupAir::new( UtilityChip::U32FromLeBytes, UtilityChip::U32FromLeBytes.lookups(), ); - let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); + let u64_shift_right_32_circuit = LookupAir::new( + UtilityChip::U64ShiftRight32AsU32, + UtilityChip::U64ShiftRight32AsU32.lookups(), + ); + let u64_as_u32_circuit = + LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); let (system, prover_key) = System::new( commitment_parameters, - vec![u8_circuit, u32_from_le_bytes_circuit, u32_or_circuit], // order matters! + vec![ + u8_circuit, + u32_from_le_bytes_circuit, + u32_or_circuit, + u64_shift_right_32_circuit, + u64_as_u32_circuit, + ], // order matters ); let claims = UtilityChipClaims { @@ -499,10 +741,22 @@ mod tests { UtilityChip::U32FromLeBytes, UtilityChip::U32FromLeBytes.lookups(), ); + let u64_shift_right_32_circuit = LookupAir::new( + UtilityChip::U64ShiftRight32AsU32, + UtilityChip::U64ShiftRight32AsU32.lookups(), + ); + let u64_as_u32_circuit = + LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); let (system, prover_key) = System::new( commitment_parameters, - vec![u8_circuit, u32_from_le_bytes_circuit, u32_or_circuit], // order matters + vec![ + u8_circuit, + u32_from_le_bytes_circuit, + u32_or_circuit, + u64_shift_right_32_circuit, + u64_as_u32_circuit, + ], // order matters ); let claims = UtilityChipClaims { @@ -532,4 +786,126 @@ mod tests { .verify_multiple_claims(fri_parameters, claims_slice, &proof) .expect("verification issue"); } + + #[test] + fn test_u64_shift_right_32_as_u32() { + let u64_val = 0xabcd12341f1f1f1fu64; + let u64_val_right_shifted = u64_val >> 32; + + let commitment_parameters = CommitmentParameters { log_blowup: 1 }; + let u8_circuit = LookupAir::new(UtilityChip::U8Or, UtilityChip::U8Or.lookups()); + let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); + let u32_from_le_bytes_circuit = LookupAir::new( + UtilityChip::U32FromLeBytes, + UtilityChip::U32FromLeBytes.lookups(), + ); + let u64_shift_right_32_circuit = LookupAir::new( + UtilityChip::U64ShiftRight32AsU32, + UtilityChip::U64ShiftRight32AsU32.lookups(), + ); + let u64_as_u32_circuit = + LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); + + let (system, prover_key) = System::new( + commitment_parameters, + vec![ + u8_circuit, + u32_from_le_bytes_circuit, + u32_or_circuit, + u64_shift_right_32_circuit, + u64_as_u32_circuit, + ], // order matters + ); + + let claims = UtilityChipClaims { + claims: vec![ + [ + vec![Val::from_usize( + UtilityChip::U64ShiftRight32AsU32.position(), + )], + vec![ + Val::from_u64(u64_val), + Val::from_u32(u64_val_right_shifted as u32), + ], + ] + .concat(), + ], + }; + + let (_traces, witness) = claims.witness(&system); + + let claims_slice: Vec<&[Val]> = claims.claims.iter().map(|v| v.as_slice()).collect(); + let claims_slice: &[&[Val]] = &claims_slice; + + let fri_parameters = FriParameters { + log_final_poly_len: 0, + num_queries: 64, + proof_of_work_bits: 0, + }; + + let proof = + system.prove_multiple_claims(fri_parameters, &prover_key, claims_slice, witness); + system + .verify_multiple_claims(fri_parameters, claims_slice, &proof) + .expect("verification issue"); + } + + #[test] + fn test_u64_as_u32() { + let u64_val = 0xabcd12341f1f1f1fu64; + let u32_val = u64_val as u32; + assert_eq!(u32_val, 0x1f1f1f1fu32); + + let commitment_parameters = CommitmentParameters { log_blowup: 1 }; + let u8_circuit = LookupAir::new(UtilityChip::U8Or, UtilityChip::U8Or.lookups()); + let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); + let u32_from_le_bytes_circuit = LookupAir::new( + UtilityChip::U32FromLeBytes, + UtilityChip::U32FromLeBytes.lookups(), + ); + let u64_shift_right_32_circuit = LookupAir::new( + UtilityChip::U64ShiftRight32AsU32, + UtilityChip::U64ShiftRight32AsU32.lookups(), + ); + let u64_as_u32_circuit = + LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); + + let (system, prover_key) = System::new( + commitment_parameters, + vec![ + u8_circuit, + u32_from_le_bytes_circuit, + u32_or_circuit, + u64_shift_right_32_circuit, + u64_as_u32_circuit, + ], // order matters + ); + + let claims = UtilityChipClaims { + claims: vec![ + [ + vec![Val::from_usize(UtilityChip::U64AsU32.position())], + vec![Val::from_u64(u64_val), Val::from_u32(u32_val)], + ] + .concat(), + ], + }; + + let (_traces, witness) = claims.witness(&system); + + let claims_slice: Vec<&[Val]> = claims.claims.iter().map(|v| v.as_slice()).collect(); + let claims_slice: &[&[Val]] = &claims_slice; + + let fri_parameters = FriParameters { + log_final_poly_len: 0, + num_queries: 64, + proof_of_work_bits: 0, + }; + + let proof = + system.prove_multiple_claims(fri_parameters, &prover_key, claims_slice, witness); + system + .verify_multiple_claims(fri_parameters, claims_slice, &proof) + .expect("verification issue"); + } } From 4b0fe093620c6fce0df7a8f9674ee87baafd2945 Mon Sep 17 00:00:00 2001 From: Artem Storozhuk Date: Tue, 19 Aug 2025 01:06:19 +0300 Subject: [PATCH 5/5] feat: Implement 1-bit right shift of u64 using bit decomposition --- src/chips/utility.rs | 221 +++++++++++++++++++++++++++++++++---------- 1 file changed, 172 insertions(+), 49 deletions(-) diff --git a/src/chips/utility.rs b/src/chips/utility.rs index d036693..3484622 100644 --- a/src/chips/utility.rs +++ b/src/chips/utility.rs @@ -10,6 +10,7 @@ mod tests { use p3_matrix::Matrix; use p3_matrix::dense::RowMajorMatrix; use std::array; + use std::ops::Range; const BYTE_VALUES_NUM: usize = 256; @@ -28,6 +29,9 @@ mod tests { // multiplicity, in_byte0, in_byte1, in_byte2, in_byte3, in_byte4, in_byte5, in_byte6, in_byte7 const U64_SHIFT_32_TRACE_WIDTH: usize = 9; + // multiplicity, in_bit_0 ... in_bit_63 + const U64_SHIFT_RIGHT_1: usize = 65; + enum UtilityChip { U32FromLeBytes, U32Or, @@ -35,6 +39,7 @@ mod tests { U8PairRangeCheck, U64ShiftRight32AsU32, U64AsU32, + U64ShiftRight1, } impl UtilityChip { @@ -46,6 +51,7 @@ mod tests { Self::U32Or => 3, Self::U64ShiftRight32AsU32 => 4, Self::U64AsU32 => 5, + Self::U64ShiftRight1 => 6, } } } @@ -57,6 +63,7 @@ mod tests { Self::U32Or => U32_OR_TRACE_WIDTH, Self::U8Or | Self::U8PairRangeCheck => U8_OR_PAIR_RANGE_CHECK_TRACE_WIDTH, Self::U64ShiftRight32AsU32 | Self::U64AsU32 => U64_SHIFT_32_TRACE_WIDTH, + Self::U64ShiftRight1 => U64_SHIFT_RIGHT_1, } } @@ -80,7 +87,8 @@ mod tests { Self::U32FromLeBytes | Self::U32Or | Self::U64ShiftRight32AsU32 - | Self::U64AsU32 => None, + | Self::U64AsU32 + | Self::U64ShiftRight1 => None, } } } @@ -91,7 +99,7 @@ mod tests { AB::Var: Copy, AB::F: Field, { - fn eval(&self, _builder: &mut AB) { + fn eval(&self, builder: &mut AB) { match self { Self::U32FromLeBytes | Self::U32Or @@ -99,6 +107,13 @@ mod tests { | Self::U8PairRangeCheck | Self::U64ShiftRight32AsU32 | Self::U64AsU32 => {} + Self::U64ShiftRight1 => { + let main = builder.main(); + let local = main.row_slice(0).unwrap(); + for i in 0..64 { + builder.assert_bool(local[i + 1]); + } + } } } } @@ -111,6 +126,7 @@ mod tests { let u32_or_idx = Self::U32Or.position(); let u64_shift_right_32_idx = Self::U64ShiftRight32AsU32.position(); let u64_shift_left_32_idx = Self::U64AsU32.position(); + let u64_shift_right_1_idx = Self::U64ShiftRight1.position(); match self { Self::U32FromLeBytes => { @@ -269,6 +285,27 @@ mod tests { lookups } + + Self::U64ShiftRight1 => { + fn bit_decomposition(bit_range: Range) -> SymbExpr { + let powers_of_two: Vec = + (0..63).map(|i| SymbExpr::from_u64(1u64 << i)).collect(); + let vars: Vec = bit_range.map(var).collect(); + + vars.into_iter() + .zip(powers_of_two) + .fold(SymbExpr::ZERO, |acc, (v, pow_2)| acc + v * pow_2) + } + + vec![Lookup::pull( + var(0), + vec![ + SymbExpr::from_usize(u64_shift_right_1_idx), + bit_decomposition(1..64), + bit_decomposition(2..64), + ], + )] + } } } } @@ -288,6 +325,7 @@ mod tests { let mut u32_or_values_from_claims = vec![]; let mut u64_shift_right_32_values_from_claims = vec![]; let mut u64_to_u32_values_from_claims = vec![]; + let mut u64_shift_right_1_values_from_claims = vec![]; for claim in self.claims.clone() { // we should have at least chip index @@ -306,7 +344,7 @@ mod tests { } 2u64 => { // this is our u32_from_le_bytes chip. We should have chip_idx, byte0, byte1, byte2, byte3, u32 - assert_eq!(claim.len(), 6); + assert_eq!(claim.len(), 6, "[U32FromLeBytes] wrong claim format"); let byte0_val = u8::try_from(claim[1].as_canonical_u64()).unwrap(); let byte1_val = u8::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -320,7 +358,7 @@ mod tests { } 3u64 => { // this is our u32_or chip. We should have: chip_idx, left, right, or - assert_eq!(claim.len(), 4); + assert_eq!(claim.len(), 4, "[U32Or] wrong claim format"); let left = u32::try_from(claim[1].as_canonical_u64()).unwrap(); let right = u32::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -331,7 +369,7 @@ mod tests { 4u64 => { // this is our u64_shift_right_32. We should have: chip_idx, u64 (input), u32 (output) - assert_eq!(claim.len(), 3); + assert_eq!(claim.len(), 3, "[U64ShiftRight32] wrong claim format"); let u64_val = claim[1].as_canonical_u64(); let shifted = u32::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -340,7 +378,7 @@ mod tests { } 5u64 => { // this is our u64_to_u32. We should have: chip_idx, u64 (input), u32 (output) - assert_eq!(claim.len(), 3); + assert_eq!(claim.len(), 3, "[U64ToU32] wrong claim format"); let u64_val = claim[1].as_canonical_u64(); let shifted = u32::try_from(claim[2].as_canonical_u64()).unwrap(); @@ -348,12 +386,50 @@ mod tests { u64_to_u32_values_from_claims.push((u64_val, shifted)); } + 6u64 => { + // this is our u64_shift_right_1. We should have: chip_idx, u64 (input), u64 (output) + assert_eq!(claim.len(), 3, "[U64ShiftRight1] wrong claim format"); + let u64_val = claim[1].as_canonical_u64(); + + // technical limitation since Goldilock can't be instantiated from u64::MAX + assert!(u64_val < u64::MAX); + + let u64_shifted = claim[2].as_canonical_u64(); + + u64_shift_right_1_values_from_claims.push((u64_val, u64_shifted)); + } + _ => { panic!("unsupported chip") } } } + // u64_shift_right_1 + let mut u64_shift_right_1_trace_values = + Vec::::with_capacity(u64_shift_right_1_values_from_claims.len()); + if u64_shift_right_1_values_from_claims.is_empty() { + u64_shift_right_1_trace_values = Val::zero_vec(U64_SHIFT_RIGHT_1); + } else { + for (u64_val, u64_val_shifted_1) in u64_shift_right_1_values_from_claims { + debug_assert_eq!(u64_val_shifted_1, u64_val >> 1); + + u64_shift_right_1_trace_values.push(Val::ONE); // multiplicity + for i in 0..64 { + if ((u64_val >> i) & 1) == 1 { + u64_shift_right_1_trace_values.push(Val::ONE) + } else { + u64_shift_right_1_trace_values.push(Val::ZERO) + } + } + } + } + let mut u64_shift_right_1_trace = + RowMajorMatrix::new(u64_shift_right_1_trace_values, U64_SHIFT_RIGHT_1); + let height = u64_shift_right_1_trace.height().next_power_of_two(); + u64_shift_right_1_trace.pad_to_height(height, Val::ZERO); + + // u64_to_32 fn u64_to_u32( values_from_claims: &Vec<(u64, u32)>, range_check_values: &mut Vec<(Val, Val)>, @@ -406,7 +482,6 @@ mod tests { u64_to_u32_trace } - // u64_to_32 let u64_to_u32_trace = u64_to_u32( &u64_to_u32_values_from_claims, &mut byte_range_check_values_from_claims, @@ -420,48 +495,6 @@ mod tests { true, ); - // // u64_shift_right_32 - // let mut u64_shift_right_32_trace_values = - // Vec::::with_capacity(u64_shift_right_32_values_from_claims.len()); - // if u64_shift_right_32_values_from_claims.is_empty() { - // u64_shift_right_32_trace_values = Val::zero_vec(U64_SHIFT_32_TRACE_WIDTH); - // - // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); - // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); - // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); - // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); - // } else { - // for (u64_val, shifted) in u64_shift_right_32_values_from_claims { - // let computed = (u64_val >> 32) as u32; - // debug_assert_eq!(computed, shifted); - // - // let u64_bytes: [u8; 8] = u64_val.to_le_bytes(); - // u64_shift_right_32_trace_values.push(Val::ONE); // multiplicity - // u64_shift_right_32_trace_values - // .extend_from_slice(u64_bytes.map(Val::from_u8).as_slice()); - // - // byte_range_check_values_from_claims - // .push((Val::from_u8(u64_bytes[0]), Val::from_u8(u64_bytes[4]))); - // byte_range_check_values_from_claims - // .push((Val::from_u8(u64_bytes[1]), Val::from_u8(u64_bytes[5]))); - // byte_range_check_values_from_claims - // .push((Val::from_u8(u64_bytes[2]), Val::from_u8(u64_bytes[6]))); - // byte_range_check_values_from_claims - // .push((Val::from_u8(u64_bytes[3]), Val::from_u8(u64_bytes[7]))); - // } - // } - // let mut u64_shift_right_32_trace = - // RowMajorMatrix::new(u64_shift_right_32_trace_values, U64_SHIFT_32_TRACE_WIDTH); - // let height = u64_shift_right_32_trace.height().next_power_of_two(); - // let zero_rows = height - u64_shift_right_32_trace.height(); - // for _ in 0..zero_rows { - // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); - // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); - // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); - // byte_range_check_values_from_claims.push((Val::ZERO, Val::ZERO)); - // } - // u64_shift_right_32_trace.pad_to_height(height, Val::ZERO); - // u32_or let mut u32_or_trace_values = Vec::::with_capacity(u32_or_values_from_claims.len()); @@ -596,6 +629,7 @@ mod tests { u32_or_trace, u64_shift_right_32_trace, u64_to_u32_trace, + u64_shift_right_1_trace, ]; (traces.clone(), SystemWitness::from_stage_1(traces, system)) @@ -623,6 +657,10 @@ mod tests { ); let u64_as_u32_circuit = LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); + let u64_shift_right_1_circuit = LookupAir::new( + UtilityChip::U64ShiftRight1, + UtilityChip::U64ShiftRight1.lookups(), + ); let (system, prover_key) = System::new( commitment_parameters, @@ -632,6 +670,7 @@ mod tests { u32_or_circuit, u64_shift_right_32_circuit, u64_as_u32_circuit, + u64_shift_right_1_circuit, ], // order matters ); @@ -688,6 +727,10 @@ mod tests { ); let u64_as_u32_circuit = LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); + let u64_shift_right_1_circuit = LookupAir::new( + UtilityChip::U64ShiftRight1, + UtilityChip::U64ShiftRight1.lookups(), + ); let (system, prover_key) = System::new( commitment_parameters, @@ -697,6 +740,7 @@ mod tests { u32_or_circuit, u64_shift_right_32_circuit, u64_as_u32_circuit, + u64_shift_right_1_circuit, ], // order matters ); @@ -747,6 +791,10 @@ mod tests { ); let u64_as_u32_circuit = LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); + let u64_shift_right_1_circuit = LookupAir::new( + UtilityChip::U64ShiftRight1, + UtilityChip::U64ShiftRight1.lookups(), + ); let (system, prover_key) = System::new( commitment_parameters, @@ -756,6 +804,7 @@ mod tests { u32_or_circuit, u64_shift_right_32_circuit, u64_as_u32_circuit, + u64_shift_right_1_circuit, ], // order matters ); @@ -805,6 +854,10 @@ mod tests { ); let u64_as_u32_circuit = LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); + let u64_shift_right_1_circuit = LookupAir::new( + UtilityChip::U64ShiftRight1, + UtilityChip::U64ShiftRight1.lookups(), + ); let (system, prover_key) = System::new( commitment_parameters, @@ -814,6 +867,7 @@ mod tests { u32_or_circuit, u64_shift_right_32_circuit, u64_as_u32_circuit, + u64_shift_right_1_circuit, ], // order matters ); @@ -869,6 +923,10 @@ mod tests { ); let u64_as_u32_circuit = LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); + let u64_shift_right_1_circuit = LookupAir::new( + UtilityChip::U64ShiftRight1, + UtilityChip::U64ShiftRight1.lookups(), + ); let (system, prover_key) = System::new( commitment_parameters, @@ -878,6 +936,7 @@ mod tests { u32_or_circuit, u64_shift_right_32_circuit, u64_as_u32_circuit, + u64_shift_right_1_circuit, ], // order matters ); @@ -908,4 +967,68 @@ mod tests { .verify_multiple_claims(fri_parameters, claims_slice, &proof) .expect("verification issue"); } + + #[test] + fn test_u64_shift_right_1() { + let u64_val = 0x7fffffffffffffffu64; + let u64_val_right_shifted = u64_val >> 1; + assert_eq!(u64_val_right_shifted, 0x3fffffffffffffffu64); + + let commitment_parameters = CommitmentParameters { log_blowup: 1 }; + let u8_circuit = LookupAir::new(UtilityChip::U8Or, UtilityChip::U8Or.lookups()); + let u32_or_circuit = LookupAir::new(UtilityChip::U32Or, UtilityChip::U32Or.lookups()); + let u32_from_le_bytes_circuit = LookupAir::new( + UtilityChip::U32FromLeBytes, + UtilityChip::U32FromLeBytes.lookups(), + ); + let u64_shift_right_32_circuit = LookupAir::new( + UtilityChip::U64ShiftRight32AsU32, + UtilityChip::U64ShiftRight32AsU32.lookups(), + ); + let u64_as_u32_circuit = + LookupAir::new(UtilityChip::U64AsU32, UtilityChip::U64AsU32.lookups()); + let u64_shift_right_1_circuit = LookupAir::new( + UtilityChip::U64ShiftRight1, + UtilityChip::U64ShiftRight1.lookups(), + ); + + let (system, prover_key) = System::new( + commitment_parameters, + vec![ + u8_circuit, + u32_from_le_bytes_circuit, + u32_or_circuit, + u64_shift_right_32_circuit, + u64_as_u32_circuit, + u64_shift_right_1_circuit, + ], // order matters + ); + + let claims = UtilityChipClaims { + claims: vec![ + [ + vec![Val::from_usize(UtilityChip::U64ShiftRight1.position())], + vec![Val::from_u64(u64_val), Val::from_u64(u64_val_right_shifted)], + ] + .concat(), + ], + }; + + let (_traces, witness) = claims.witness(&system); + + let claims_slice: Vec<&[Val]> = claims.claims.iter().map(|v| v.as_slice()).collect(); + let claims_slice: &[&[Val]] = &claims_slice; + + let fri_parameters = FriParameters { + log_final_poly_len: 0, + num_queries: 64, + proof_of_work_bits: 0, + }; + + let proof = + system.prove_multiple_claims(fri_parameters, &prover_key, claims_slice, witness); + system + .verify_multiple_claims(fri_parameters, claims_slice, &proof) + .expect("verification issue"); + } }