From 789354a1e05cf2d94fcf1c7b87d7ed9e90bc4735 Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 24 May 2023 16:53:57 +0800 Subject: [PATCH 001/100] Add utils for hadamard and matrix vector product --- src/lib.rs | 2 +- src/utils.rs | 81 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 82 insertions(+), 1 deletion(-) create mode 100644 src/utils.rs diff --git a/src/lib.rs b/src/lib.rs index bc48c9323..1844b72f2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -17,7 +17,7 @@ mod circuit; mod constants; mod nifs; mod r1cs; - +mod utils; // public modules pub mod errors; pub mod gadgets; diff --git a/src/utils.rs b/src/utils.rs new file mode 100644 index 000000000..0cd6f8c71 --- /dev/null +++ b/src/utils.rs @@ -0,0 +1,81 @@ +//! Basic utils +use crate::{ + errors::NovaError +}; +use ff::PrimeField; + +#[allow(dead_code)] +pub fn matrix_vector_product( + matrix: &Vec>, + vector: &Vec, +) -> Result, NovaError> { + if matrix.len() == 0 || matrix[0].len() == 0 { + return Err(NovaError::InvalidIndex); + } + + if matrix[0].len() != vector.len() { + return Err(NovaError::InvalidIndex); + } + + let mut res = Vec::with_capacity(matrix.len()); + for i in 0..matrix.len() { + let mut sum = F::ZERO; + for j in 0..matrix[i].len() { + sum += matrix[i][j] * vector[j]; + } + res.push(sum); + } + + Ok(res) +} + +#[allow(dead_code)] +pub fn hadamard_product(a: &Vec, b: &Vec) -> Result, NovaError> { + if a.len() != b.len() { + return Err(NovaError::InvalidIndex); + } + + let mut res = Vec::with_capacity(a.len()); + for i in 0..a.len() { + res.push(a[i] * b[i]); + } + + Ok(res) +} + +#[allow(dead_code)] +pub fn to_F_vec(v: Vec) -> Vec { + v.iter().map(|x| F::from(*x)).collect() +} + +#[allow(dead_code)] +pub fn to_F_matrix(m: Vec>) -> Vec> { + m.iter().map(|x| to_F_vec(x.clone())).collect() +} + +#[cfg(test)] +mod tests { + use super::*; + use pasta_curves::Fq; + + #[test] + fn test_matrix_vector_product() { + + let matrix = vec![vec![1, 2, 3], vec![4, 5, 6]]; + let vector = vec![1, 2, 3]; + let A = to_F_matrix::(matrix); + let z = to_F_vec::(vector); + let res = matrix_vector_product(&A, &z).unwrap(); + + assert_eq!(res, to_F_vec::(vec![14, 32])); + } + + #[test] + fn test_hadamard_product() { + let a = to_F_vec::(vec![1, 2, 3]); + let b = to_F_vec::(vec![4, 5, 6]); + let res = hadamard_product(&a, &b).unwrap(); + assert_eq!(res, to_F_vec::(vec![4, 10, 18])); + } + +} \ No newline at end of file From 5a025c6d43ead6572702926f12d537eb276ba792 Mon Sep 17 00:00:00 2001 From: oskarth Date: Thu, 25 May 2023 16:34:38 +0800 Subject: [PATCH 002/100] utils: Add more utils, sparse matrix vector product etc --- src/utils.rs | 120 +++++++++++++++++++++++++++++++++++++++++++-------- 1 file changed, 102 insertions(+), 18 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 0cd6f8c71..08c0317d8 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,9 +1,31 @@ //! Basic utils -use crate::{ - errors::NovaError -}; +use crate::errors::NovaError; use ff::PrimeField; +#[allow(dead_code)] +pub fn vector_add(a: &Vec, b: &Vec) -> Result, NovaError> { + if a.len() != b.len() { + return Err(NovaError::InvalidIndex); + } + + let mut res = Vec::with_capacity(a.len()); + for i in 0..a.len() { + res.push(a[i] + b[i]); + } + + Ok(res) +} + +#[allow(dead_code)] +pub fn vector_elem_product(a: &Vec, e: &F) -> Result, NovaError> { + let mut res = Vec::with_capacity(a.len()); + for i in 0..a.len() { + res.push(a[i] * e); + } + + Ok(res) +} + #[allow(dead_code)] pub fn matrix_vector_product( matrix: &Vec>, @@ -29,6 +51,31 @@ pub fn matrix_vector_product( Ok(res) } +// Matrix vector product where matrix is sparse +// First element is row index, second column, third value stored +#[allow(dead_code)] +pub fn matrix_vector_product_sparse( + matrix: &Vec<(usize, usize, F)>, + vector: &Vec, +) -> Result, NovaError> { + if matrix.len() == 0 { + return Err(NovaError::InvalidIndex); + } + + // Find the maximum row index in the matrix + let max_row = matrix.iter().map(|r| r.0).max().unwrap() + 1; + if max_row > vector.len() { + return Err(NovaError::InvalidIndex); + } + + let mut res = vec![F::ZERO; max_row]; + for &(row, col, value) in matrix { + res[row] += value * vector[col]; + } + + Ok(res) +} + #[allow(dead_code)] pub fn hadamard_product(a: &Vec, b: &Vec) -> Result, NovaError> { if a.len() != b.len() { @@ -53,29 +100,66 @@ pub fn to_F_matrix(m: Vec>) -> Vec> { m.iter().map(|x| to_F_vec(x.clone())).collect() } +#[allow(dead_code)] +pub fn to_F_matrix_sparse(m: Vec<(usize, usize, u64)>) -> Vec<(usize, usize, F)> { + m.iter().map(|x| (x.0, x.1, F::from(x.2))).collect() +} + #[cfg(test)] mod tests { - use super::*; - use pasta_curves::Fq; + use super::*; + use pasta_curves::Fq; - #[test] - fn test_matrix_vector_product() { + #[test] + fn test_vector_add() { + let a = to_F_vec::(vec![1, 2, 3]); + let b = to_F_vec::(vec![4, 5, 6]); + let res = vector_add(&a, &b).unwrap(); + assert_eq!(res, to_F_vec::(vec![5, 7, 9])); + } - let matrix = vec![vec![1, 2, 3], vec![4, 5, 6]]; - let vector = vec![1, 2, 3]; - let A = to_F_matrix::(matrix); - let z = to_F_vec::(vector); - let res = matrix_vector_product(&A, &z).unwrap(); + #[test] + fn test_vector_elem_product() { + let a = to_F_vec::(vec![1, 2, 3]); + let e = Fq::from(2); + let res = vector_elem_product(&a, &e).unwrap(); + assert_eq!(res, to_F_vec::(vec![2, 4, 6])); + } - assert_eq!(res, to_F_vec::(vec![14, 32])); + #[test] + fn test_matrix_vector_product() { + let matrix = vec![vec![1, 2, 3], vec![4, 5, 6]]; + let vector = vec![1, 2, 3]; + let A = to_F_matrix::(matrix); + let z = to_F_vec::(vector); + let res = matrix_vector_product(&A, &z).unwrap(); + + assert_eq!(res, to_F_vec::(vec![14, 32])); } #[test] fn test_hadamard_product() { - let a = to_F_vec::(vec![1, 2, 3]); - let b = to_F_vec::(vec![4, 5, 6]); - let res = hadamard_product(&a, &b).unwrap(); - assert_eq!(res, to_F_vec::(vec![4, 10, 18])); + let a = to_F_vec::(vec![1, 2, 3]); + let b = to_F_vec::(vec![4, 5, 6]); + let res = hadamard_product(&a, &b).unwrap(); + assert_eq!(res, to_F_vec::(vec![4, 10, 18])); } -} \ No newline at end of file + #[test] + fn test_matrix_vector_product_sparse() { + let matrix = vec![ + (0, 0, 1), + (0, 1, 2), + (0, 2, 3), + (1, 0, 4), + (1, 1, 5), + (1, 2, 6), + ]; + let vector = vec![1, 2, 3]; + let A = to_F_matrix_sparse::(matrix); + let z = to_F_vec::(vector); + let res = matrix_vector_product_sparse(&A, &z).unwrap(); + + assert_eq!(res, to_F_vec::(vec![14, 32])); + } +} From a3e21822445cdada2a0a94ac341b6bef6c2b7d34 Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 26 May 2023 12:19:45 +0800 Subject: [PATCH 003/100] feat: First cut of CCS (#14) Still quite rough and lots of bugs --- src/ccs.rs | 540 +++++++++++++++++++++++++++++++++++++++++++++++++++++ src/lib.rs | 2 + 2 files changed, 542 insertions(+) create mode 100644 src/ccs.rs diff --git a/src/ccs.rs b/src/ccs.rs new file mode 100644 index 000000000..3cc22acd4 --- /dev/null +++ b/src/ccs.rs @@ -0,0 +1,540 @@ +//! This module defines CCS related types and functions. +#![allow(unused_imports)] +#![allow(dead_code)] +#![allow(clippy::type_complexity)] +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_HASH_BITS}, + errors::NovaError, + gadgets::{ + nonnative::{bignat::nat_to_limbs, util::f_to_nat}, + utils::scalar_as_base, + }, + traits::{ + commitment::CommitmentEngineTrait, AbsorbInROTrait, Group, ROTrait, TranscriptReprTrait, + }, + utils::*, + Commitment, CommitmentKey, CE, +}; +use bitvec::vec; +use core::{cmp::max, marker::PhantomData}; +use ff::Field; +use flate2::{write::ZlibEncoder, Compression}; +use itertools::concat; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Sha3_256}; + +// TODO, based on r1cs.rs: +// x CCS struct +// x CCS basic impl +// x CCS basic is_sat +// - Clean up old R1CS stuff we don't need +// - Get rid of hardcoded R1CS +// - Linearized/Committed CCS +// - R1CS to CCS + +/// Public parameters for a given CCS +#[derive(Clone, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CCS { + _p: PhantomData, +} + +// A type that holds the shape of a CCS instance +// Unlike R1CS we have a list of matrices M instead of only A, B, C +// We also have t, q, d constants and c (vector), S (set) +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct CCSShape { + pub(crate) num_cons: usize, + pub(crate) num_vars: usize, + pub(crate) num_io: usize, + pub(crate) M: Vec>, + pub(crate) t: usize, + pub(crate) q: usize, + pub(crate) d: usize, + pub(crate) S: Vec>, + pub(crate) c: Vec, + digest: G::Scalar, // digest of the rest of CCSShape +} + +/// A type that holds a witness for a given CCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct CCSWitness { + W: Vec, +} + +/// A type that holds an CCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CCSInstance { + pub(crate) comm_W: Commitment, + pub(crate) X: Vec, +} + +// TODO: Type for other CCS types, eqv to RelaxedR1CS + +// TODO: Function to convert R1CS to CCS +// Put here or in r1cs module + +// TODO: Util fn to create new CCSShape for R1CS with following values +// n=n, m=m, N=N, l=l, t=3, q=2, d=2 +// M={A,B,C}, S={{0, 1}, {2}}, c={1,−1} + +impl CCSShape { + /// Create an object of type `CCSSShape` from the explicitly specified CCS matrices + pub fn new( + num_cons: usize, + num_vars: usize, + num_io: usize, + M: &[Vec<(usize, usize, G::Scalar)>], + ) -> Result, NovaError> { + // NOTE: We assume the following constants for R1CS-to-CCS + // TODO: Add as parameters to new once all hardcoding is removed + const T: usize = 3; + const Q: usize = 2; + const D: usize = 2; + const S1: [usize; 2] = [0, 1]; + const S2: [usize; 1] = [2]; + const C0: i32 = 1; + const C1: i32 = -1; + + let is_valid = |num_cons: usize, + num_vars: usize, + num_io: usize, + matrix: &[(usize, usize, G::Scalar)]| + -> Result<(), NovaError> { + let res = (0..matrix.len()) + .map(|i| { + let (row, col, _val) = matrix[i]; + if row >= num_cons || col > num_io + num_vars { + Err(NovaError::InvalidIndex) + } else { + Ok(()) + } + }) + .collect::, NovaError>>(); + + if res.is_err() { + Err(NovaError::InvalidIndex) + } else { + Ok(()) + } + }; + + // Check that the row and column indexes are within the range of the number of constraints and variables + let res_M = M + .iter() + .map(|m| is_valid(num_cons, num_vars, num_io, m)) + .collect::, NovaError>>(); + + // If any of the matricies are invalid, return an error + if res_M.is_err() { + return Err(NovaError::InvalidIndex); + } + + // We require the number of public inputs/outputs to be even + if num_io % 2 != 0 { + return Err(NovaError::OddInputLength); + } + + let digest = Self::compute_digest(num_cons, num_vars, num_io, M); + + let shape = CCSShape { + num_cons, + num_vars, + num_io, + M: M.to_vec(), + t: T, + q: Q, + d: D, + S: vec![S1.to_vec(), S2.to_vec()], + c: vec![C0 as usize, C1 as usize], + digest, + }; + + Ok(shape) + } + + // NOTE: Not currently used + // TODO This has to be updated for CCS to not just return Az, Bz, Cz + // pub fn multiply_vec( + // &self, + // z: &[G::Scalar], + // ) -> Result>, NovaError> { + // if z.len() != self.num_io + self.num_vars + 1 { + // return Err(NovaError::InvalidWitnessLength); + // } + + // // computes a product between a sparse matrix `matrix` and a vector `z` + // // This does not perform any validation of entries in M (e.g., if entries in `M` reference indexes outside the range of `z`) + // // This is safe since we know that `M` is valid + // let sparse_matrix_vec_product = + // |matrix: &Vec<(usize, usize, G::Scalar)>, num_rows: usize, z: &[G::Scalar]| -> Vec { + // (0..matrix.len()) + // .map(|i| { + // let (row, col, val) =matrix[i]; + // (row, val * z[col]) + // }) + // .fold(vec![G::Scalar::ZERO; num_rows], |mut Mz, (r, v)| { + // Mz[r] += v; + // Mz + // }) + // }; + + // // // XXX: Hacky, assumes M is A, B, C (true for R1CS) + // // let A = self.M[0].clone(); + // // let B = self.M[1].clone(); + // // let C = self.M[2].clone(); + + // // let (Az, (Bz, Cz)) = rayon::join( + // // || sparse_matrix_vec_product(&A, self.num_cons, z), + // // || { + // // rayon::join( + // // || sparse_matrix_vec_product(&B, self.num_cons, z), + // // || sparse_matrix_vec_product(&C, self.num_cons, z), + // // ) + // // }, + // // ); + + // // TODO Use rayon to parallelize + // let Mzs = self.M.iter().map(|m| sparse_matrix_vec_product(m, self.num_cons, z)).collect::>(); + + // Ok(Mzs) + // } + + /// Checks if the Relaxed R1CS instance is satisfiable given a witness and its shape + // pub fn is_sat_relaxed( + // &self, + // ck: &CommitmentKey, + // U: &RelaxedR1CSInstance, + // W: &RelaxedR1CSWitness, + // ) -> Result<(), NovaError> { + // assert_eq!(W.W.len(), self.num_vars); + // assert_eq!(W.E.len(), self.num_cons); + // assert_eq!(U.X.len(), self.num_io); + + // // verify if Az * Bz = u*Cz + E + // let res_eq: bool = { + // let z = concat(vec![W.W.clone(), vec![U.u], U.X.clone()]); + // let (Az, Bz, Cz) = self.multiply_vec(&z)?; + // assert_eq!(Az.len(), self.num_cons); + // assert_eq!(Bz.len(), self.num_cons); + // assert_eq!(Cz.len(), self.num_cons); + + // let res: usize = (0..self.num_cons) + // .map(|i| usize::from(Az[i] * Bz[i] != U.u * Cz[i] + W.E[i])) + // .sum(); + + // res == 0 + // }; + + // // verify if comm_E and comm_W are commitments to E and W + // let res_comm: bool = { + // let (comm_W, comm_E) = + // rayon::join(|| CE::::commit(ck, &W.W), || CE::::commit(ck, &W.E)); + // U.comm_W == comm_W && U.comm_E == comm_E + // }; + + // if res_eq && res_comm { + // Ok(()) + // } else { + // Err(NovaError::UnSat) + // } + // } + + /// Checks if the CCS instance is satisfiable given a witness and its shape + pub fn is_sat( + &self, + ck: &CommitmentKey, + U: &CCSInstance, + W: &CCSWitness, + ) -> Result<(), NovaError> { + assert_eq!(W.W.len(), self.num_vars); + assert_eq!(U.X.len(), self.num_io); + + let m = self.M[0].len(); + + // Sage code to check CCS relation: + // + // r = [F(0)] * m + // for i in range(0, q): + // hadamard_output = [F(1)]*m + // for j in S[i]: + // hadamard_output = hadamard_product(hadamard_output, + // matrix_vector_product(M[j], z)) + // + // r = vec_add(r, vec_elem_mul(hadamard_output, c[i])) + // print("\nCCS relation check (∑ cᵢ ⋅ ◯ Mⱼ z == 0):", r == [0]*m) + // + // verify if ∑ cᵢ ⋅ ◯ Mⱼ z == 0 + let res_eq: bool = { + let mut r = vec![G::Scalar::ZERO; m]; + let z = concat(vec![W.W.clone(), vec![G::Scalar::ONE], U.X.clone()]); + + for i in 0..self.q { + let mut hadamard_output = vec![G::Scalar::ONE; m]; + for j in &self.S[i] { + let mvp = matrix_vector_product_sparse(&self.M[*j], &z)?; + hadamard_output = hadamard_product(&hadamard_output, &mvp)?; + } + + // XXX Problem if c[i] is F? + let civ = G::Scalar::from(self.c[i] as u64); + let vep = vector_elem_product(&hadamard_output, &civ)?; + + r = vector_add(&r, &vep)?; + } + r == vec![G::Scalar::ZERO; m] + }; + + // NOTE: Previous R1CS code for reference + // // verify if Az * Bz = u*Cz + // let res_eq: bool = { + // let z = concat(vec![W.W.clone(), vec![G::Scalar::ONE], U.X.clone()]); + // let (Az, Bz, Cz) = self.multiply_vec(&z)?; + // assert_eq!(Az.len(), self.num_cons); + // assert_eq!(Bz.len(), self.num_cons); + // assert_eq!(Cz.len(), self.num_cons); + + // let res: usize = (0..self.num_cons) + // .map(|i| usize::from(Az[i] * Bz[i] != Cz[i])) + // .sum(); + + // res == 0 + // }; + + // verify if comm_W is a commitment to W + let res_comm: bool = U.comm_W == CE::::commit(ck, &W.W); + + if res_eq && res_comm { + Ok(()) + } else { + Err(NovaError::UnSat) + } + } + + /// A method to compute a commitment to the cross-term `T` given a + /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair + // pub fn commit_T( + // &self, + // ck: &CommitmentKey, + // U1: &RelaxedR1CSInstance, + // W1: &RelaxedR1CSWitness, + // U2: &R1CSInstance, + // W2: &R1CSWitness, + // ) -> Result<(Vec, Commitment), NovaError> { + // let (AZ_1, BZ_1, CZ_1) = { + // let Z1 = concat(vec![W1.W.clone(), vec![U1.u], U1.X.clone()]); + // self.multiply_vec(&Z1)? + // }; + + // let (AZ_2, BZ_2, CZ_2) = { + // let Z2 = concat(vec![W2.W.clone(), vec![G::Scalar::ONE], U2.X.clone()]); + // self.multiply_vec(&Z2)? + // }; + + // let AZ_1_circ_BZ_2 = (0..AZ_1.len()) + // .into_par_iter() + // .map(|i| AZ_1[i] * BZ_2[i]) + // .collect::>(); + // let AZ_2_circ_BZ_1 = (0..AZ_2.len()) + // .into_par_iter() + // .map(|i| AZ_2[i] * BZ_1[i]) + // .collect::>(); + // let u_1_cdot_CZ_2 = (0..CZ_2.len()) + // .into_par_iter() + // .map(|i| U1.u * CZ_2[i]) + // .collect::>(); + // let u_2_cdot_CZ_1 = (0..CZ_1.len()) + // .into_par_iter() + // .map(|i| CZ_1[i]) + // .collect::>(); + + // let T = AZ_1_circ_BZ_2 + // .par_iter() + // .zip(&AZ_2_circ_BZ_1) + // .zip(&u_1_cdot_CZ_2) + // .zip(&u_2_cdot_CZ_1) + // .map(|(((a, b), c), d)| *a + *b - *c - *d) + // .collect::>(); + + // let comm_T = CE::::commit(ck, &T); + + // Ok((T, comm_T)) + // } + + /// returns the digest of R1CSShape + pub fn get_digest(&self) -> G::Scalar { + self.digest + } + + fn compute_digest( + num_cons: usize, + num_vars: usize, + num_io: usize, + M: &[Vec<(usize, usize, G::Scalar)>], + ) -> G::Scalar { + #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] + struct CCSShapeWithoutDigest { + num_cons: usize, + num_vars: usize, + num_io: usize, + M: Vec>, + } + + let shape = CCSShapeWithoutDigest:: { + num_cons, + num_vars, + num_io, + M: M.to_vec(), + }; + + // obtain a vector of bytes representing the CCS shape + let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); + bincode::serialize_into(&mut encoder, &shape).unwrap(); + let shape_bytes = encoder.finish().unwrap(); + + // convert shape_bytes into a short digest + let mut hasher = Sha3_256::new(); + hasher.input(&shape_bytes); + let digest = hasher.result(); + + // truncate the digest to 250 bits + let bv = (0..NUM_HASH_BITS).map(|i| { + let (byte_pos, bit_pos) = (i / 8, i % 8); + let bit = (digest[byte_pos] >> bit_pos) & 1; + bit == 1 + }); + + // turn the bit vector into a scalar + let mut res = G::Scalar::ZERO; + let mut coeff = G::Scalar::ONE; + for bit in bv { + if bit { + res += coeff; + } + coeff += coeff; + } + res + } + + /// Pads the R1CSShape so that the number of variables is a power of two + /// Renumbers variables to accomodate padded variables + pub fn pad(&self) -> Self { + // equalize the number of variables and constraints + let m = max(self.num_vars, self.num_cons).next_power_of_two(); + + // check if the provided R1CSShape is already as required + if self.num_vars == m && self.num_cons == m { + return self.clone(); + } + + // check if the number of variables are as expected, then + // we simply set the number of constraints to the next power of two + if self.num_vars == m { + let digest = Self::compute_digest(m, self.num_vars, self.num_io, &self.M); + + // NOTE: We assume the following constants for R1CS-to-CCS + const T: usize = 3; + const Q: usize = 2; + const D: usize = 2; + const S: [[usize; 2]; 1] = [[0, 1]]; + const S2: [usize; 1] = [2]; + const C0: i32 = 1; + const C1: i32 = -1; + + return CCSShape { + num_cons: m, + num_vars: m, + num_io: self.num_io, + M: self.M.clone(), + t: T, + q: Q, + d: D, + S: vec![S[0].to_vec(), S2.to_vec()], + c: vec![C0 as usize, C1 as usize], + digest, + }; + } + + // otherwise, we need to pad the number of variables and renumber variable accesses + let num_vars_padded = m; + let num_cons_padded = m; + let apply_pad = |M: &[(usize, usize, G::Scalar)]| -> Vec<(usize, usize, G::Scalar)> { + M.par_iter() + .map(|(r, c, v)| { + ( + *r, + if c >= &self.num_vars { + c + num_vars_padded - self.num_vars + } else { + *c + }, + *v, + ) + }) + .collect::>() + }; + + // Apply pad for each matrix in M + let M_padded = self.M.iter().map(|m| apply_pad(m)).collect::>(); + + let digest = Self::compute_digest(num_cons_padded, num_vars_padded, self.num_io, &M_padded); + + // NOTE: We assume the following constants for R1CS-to-CCS + const T: usize = 3; + const Q: usize = 2; + const D: usize = 2; + const S: [[usize; 2]; 1] = [[0, 1]]; + const S2: [usize; 1] = [2]; + const C0: i32 = 1; + const C1: i32 = -1; + + CCSShape { + num_cons: num_cons_padded, + num_vars: num_vars_padded, + num_io: self.num_io, + M: M_padded, + t: T, + q: Q, + d: D, + S: vec![S[0].to_vec(), S2.to_vec()], + c: vec![C0 as usize, C1 as usize], + digest, + } + } +} + +impl CCSWitness { + /// A method to create a witness object using a vector of scalars + pub fn new(S: &CCSShape, W: &[G::Scalar]) -> Result, NovaError> { + if S.num_vars != W.len() { + Err(NovaError::InvalidWitnessLength) + } else { + Ok(CCSWitness { W: W.to_owned() }) + } + } + + /// Commits to the witness using the supplied generators + pub fn commit(&self, ck: &CommitmentKey) -> Commitment { + CE::::commit(ck, &self.W) + } +} + +impl CCSInstance { + /// A method to create an instance object using consitituent elements + pub fn new( + S: &CCSShape, + comm_W: &Commitment, + X: &[G::Scalar], + ) -> Result, NovaError> { + if S.num_io != X.len() { + Err(NovaError::InvalidInputLength) + } else { + Ok(CCSInstance { + comm_W: *comm_W, + X: X.to_owned(), + }) + } + } +} diff --git a/src/lib.rs b/src/lib.rs index 1844b72f2..5bd71438e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,11 +13,13 @@ // private modules mod bellperson; +mod ccs; mod circuit; mod constants; mod nifs; mod r1cs; mod utils; + // public modules pub mod errors; pub mod gadgets; From 5d4b1104e40dcb2c7cc63c5d4486e08506f9aa6c Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 26 May 2023 12:38:03 +0800 Subject: [PATCH 004/100] refactor: Remove digest Simplified in recent commit https://github.com/microsoft/Nova/commit/b28aaf70a854d540e48607705e880aa61505ae2a --- src/ccs.rs | 65 ------------------------------------------------------ 1 file changed, 65 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 3cc22acd4..de5366965 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -54,7 +54,6 @@ pub struct CCSShape { pub(crate) d: usize, pub(crate) S: Vec>, pub(crate) c: Vec, - digest: G::Scalar, // digest of the rest of CCSShape } /// A type that holds a witness for a given CCS instance @@ -137,8 +136,6 @@ impl CCSShape { return Err(NovaError::OddInputLength); } - let digest = Self::compute_digest(num_cons, num_vars, num_io, M); - let shape = CCSShape { num_cons, num_vars, @@ -149,7 +146,6 @@ impl CCSShape { d: D, S: vec![S1.to_vec(), S2.to_vec()], c: vec![C0 as usize, C1 as usize], - digest, }; Ok(shape) @@ -363,61 +359,6 @@ impl CCSShape { // Ok((T, comm_T)) // } - /// returns the digest of R1CSShape - pub fn get_digest(&self) -> G::Scalar { - self.digest - } - - fn compute_digest( - num_cons: usize, - num_vars: usize, - num_io: usize, - M: &[Vec<(usize, usize, G::Scalar)>], - ) -> G::Scalar { - #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] - struct CCSShapeWithoutDigest { - num_cons: usize, - num_vars: usize, - num_io: usize, - M: Vec>, - } - - let shape = CCSShapeWithoutDigest:: { - num_cons, - num_vars, - num_io, - M: M.to_vec(), - }; - - // obtain a vector of bytes representing the CCS shape - let mut encoder = ZlibEncoder::new(Vec::new(), Compression::default()); - bincode::serialize_into(&mut encoder, &shape).unwrap(); - let shape_bytes = encoder.finish().unwrap(); - - // convert shape_bytes into a short digest - let mut hasher = Sha3_256::new(); - hasher.input(&shape_bytes); - let digest = hasher.result(); - - // truncate the digest to 250 bits - let bv = (0..NUM_HASH_BITS).map(|i| { - let (byte_pos, bit_pos) = (i / 8, i % 8); - let bit = (digest[byte_pos] >> bit_pos) & 1; - bit == 1 - }); - - // turn the bit vector into a scalar - let mut res = G::Scalar::ZERO; - let mut coeff = G::Scalar::ONE; - for bit in bv { - if bit { - res += coeff; - } - coeff += coeff; - } - res - } - /// Pads the R1CSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&self) -> Self { @@ -432,8 +373,6 @@ impl CCSShape { // check if the number of variables are as expected, then // we simply set the number of constraints to the next power of two if self.num_vars == m { - let digest = Self::compute_digest(m, self.num_vars, self.num_io, &self.M); - // NOTE: We assume the following constants for R1CS-to-CCS const T: usize = 3; const Q: usize = 2; @@ -453,7 +392,6 @@ impl CCSShape { d: D, S: vec![S[0].to_vec(), S2.to_vec()], c: vec![C0 as usize, C1 as usize], - digest, }; } @@ -479,8 +417,6 @@ impl CCSShape { // Apply pad for each matrix in M let M_padded = self.M.iter().map(|m| apply_pad(m)).collect::>(); - let digest = Self::compute_digest(num_cons_padded, num_vars_padded, self.num_io, &M_padded); - // NOTE: We assume the following constants for R1CS-to-CCS const T: usize = 3; const Q: usize = 2; @@ -500,7 +436,6 @@ impl CCSShape { d: D, S: vec![S[0].to_vec(), S2.to_vec()], c: vec![C0 as usize, C1 as usize], - digest, } } } From 60bbec2458eb9404d133978bb8beb28bc248ccc7 Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 26 May 2023 12:51:51 +0800 Subject: [PATCH 005/100] feat(CCS): Add from_r1cs and get rid of hardcoded constants --- src/ccs.rs | 93 +++++++++++++++++++++++++++++------------------------- 1 file changed, 50 insertions(+), 43 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index de5366965..d9ec27325 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -9,6 +9,7 @@ use crate::{ nonnative::{bignat::nat_to_limbs, util::f_to_nat}, utils::scalar_as_base, }, + r1cs::R1CSShape, traits::{ commitment::CommitmentEngineTrait, AbsorbInROTrait, Group, ROTrait, TranscriptReprTrait, }, @@ -40,9 +41,12 @@ pub struct CCS { _p: PhantomData, } +// TODO Pull out matrix type? + // A type that holds the shape of a CCS instance // Unlike R1CS we have a list of matrices M instead of only A, B, C // We also have t, q, d constants and c (vector), S (set) +// TODO Add m, n, or infer from M? #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct CCSShape { pub(crate) num_cons: usize, @@ -86,17 +90,12 @@ impl CCSShape { num_vars: usize, num_io: usize, M: &[Vec<(usize, usize, G::Scalar)>], + t: usize, + q: usize, + d: usize, + S: Vec>, + c: Vec, ) -> Result, NovaError> { - // NOTE: We assume the following constants for R1CS-to-CCS - // TODO: Add as parameters to new once all hardcoding is removed - const T: usize = 3; - const Q: usize = 2; - const D: usize = 2; - const S1: [usize; 2] = [0, 1]; - const S2: [usize; 1] = [2]; - const C0: i32 = 1; - const C1: i32 = -1; - let is_valid = |num_cons: usize, num_vars: usize, num_io: usize, @@ -141,11 +140,13 @@ impl CCSShape { num_vars, num_io, M: M.to_vec(), - t: T, - q: Q, - d: D, - S: vec![S1.to_vec(), S2.to_vec()], - c: vec![C0 as usize, C1 as usize], + t, + q, + d, + S, + c, + // S: vec![S1.to_vec(), S2.to_vec()], + // c: vec![C0 as usize, C1 as usize], }; Ok(shape) @@ -309,6 +310,29 @@ impl CCSShape { } } + pub fn from_r1cs(r1cs: R1CSShape) -> Self { + // These contants are used for R1CS-to-CCS, see the paper for more details + const T: usize = 3; + const Q: usize = 2; + const D: usize = 2; + const S1: [usize; 2] = [0, 1]; + const S2: [usize; 1] = [2]; + const C0: i32 = 1; + const C1: i32 = -1; + + Self { + num_cons: r1cs.num_cons, + num_vars: r1cs.num_vars, + num_io: r1cs.num_io, + M: vec![r1cs.A, r1cs.B, r1cs.C], + t: T, + q: Q, + d: D, + S: vec![S1.to_vec(), S2.to_vec()], + c: vec![C0 as usize, C1 as usize], + } + } + /// A method to compute a commitment to the cross-term `T` given a /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair // pub fn commit_T( @@ -373,25 +397,16 @@ impl CCSShape { // check if the number of variables are as expected, then // we simply set the number of constraints to the next power of two if self.num_vars == m { - // NOTE: We assume the following constants for R1CS-to-CCS - const T: usize = 3; - const Q: usize = 2; - const D: usize = 2; - const S: [[usize; 2]; 1] = [[0, 1]]; - const S2: [usize; 1] = [2]; - const C0: i32 = 1; - const C1: i32 = -1; - return CCSShape { num_cons: m, num_vars: m, num_io: self.num_io, M: self.M.clone(), - t: T, - q: Q, - d: D, - S: vec![S[0].to_vec(), S2.to_vec()], - c: vec![C0 as usize, C1 as usize], + t: self.t, + q: self.q, + d: self.d, + S: self.S.clone(), + c: self.c.clone(), }; } @@ -417,25 +432,17 @@ impl CCSShape { // Apply pad for each matrix in M let M_padded = self.M.iter().map(|m| apply_pad(m)).collect::>(); - // NOTE: We assume the following constants for R1CS-to-CCS - const T: usize = 3; - const Q: usize = 2; - const D: usize = 2; - const S: [[usize; 2]; 1] = [[0, 1]]; - const S2: [usize; 1] = [2]; - const C0: i32 = 1; - const C1: i32 = -1; - + // XXX: Check if CCS padding is correct here CCSShape { num_cons: num_cons_padded, num_vars: num_vars_padded, num_io: self.num_io, M: M_padded, - t: T, - q: Q, - d: D, - S: vec![S[0].to_vec(), S2.to_vec()], - c: vec![C0 as usize, C1 as usize], + t: self.t, + q: self.q, + d: self.d, + S: self.S.clone(), + c: self.c.clone(), } } } From 4f5e78493dbf33bca945cce33275b2dd20edc8d5 Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 26 May 2023 14:00:23 +0800 Subject: [PATCH 006/100] fix(CCS): Add commitment key and CCS is_sat test --- src/ccs.rs | 148 ++++++++++++++++++++++++++++++++++++++++++++++++++--- 1 file changed, 142 insertions(+), 6 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index d9ec27325..76f9624d1 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -2,16 +2,17 @@ #![allow(unused_imports)] #![allow(dead_code)] #![allow(clippy::type_complexity)] + use crate::{ - constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_HASH_BITS}, + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, gadgets::{ nonnative::{bignat::nat_to_limbs, util::f_to_nat}, utils::scalar_as_base, }, - r1cs::R1CSShape, + r1cs::{R1CSInstance, R1CSShape, R1CSWitness, R1CS}, traits::{ - commitment::CommitmentEngineTrait, AbsorbInROTrait, Group, ROTrait, TranscriptReprTrait, + commitment::CommitmentEngineTrait, commitment::CommitmentTrait, AbsorbInROTrait, Group, ROTrait, }, utils::*, Commitment, CommitmentKey, CE, @@ -30,7 +31,7 @@ use sha3::{Digest, Sha3_256}; // x CCS basic impl // x CCS basic is_sat // - Clean up old R1CS stuff we don't need -// - Get rid of hardcoded R1CS +// x Get rid of hardcoded R1CS // - Linearized/Committed CCS // - R1CS to CCS @@ -76,8 +77,17 @@ pub struct CCSInstance { // TODO: Type for other CCS types, eqv to RelaxedR1CS -// TODO: Function to convert R1CS to CCS -// Put here or in r1cs module +// TODO: Update variables here? This is currently based on R1CS with M length, not sanity checked +impl CCS { + /// Samples public parameters for the specified number of constraints and variables in an CCS + pub fn commitment_key(S: &CCSShape) -> CommitmentKey { + let num_cons = S.num_cons; + let num_vars = S.num_vars; + let total_nz = S.M.iter().fold(0, |acc, m| acc + m.len()); + + G::CE::setup(b"ck", max(max(num_cons, num_vars), total_nz)) + } +} // TODO: Util fn to create new CCSShape for R1CS with following values // n=n, m=m, N=N, l=l, t=3, q=2, d=2 @@ -383,6 +393,9 @@ impl CCSShape { // Ok((T, comm_T)) // } + // TODO: Compute linearized form + // Requires MLE + /// Pads the R1CSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&self) -> Self { @@ -480,3 +493,126 @@ impl CCSInstance { } } } + +#[cfg(test)] +pub mod test { + use super::*; + use crate::{ + r1cs::R1CS, + traits::{Group, ROConstantsTrait}, + }; + use ::bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; + use ff::{Field, PrimeField}; + use rand::rngs::OsRng; + + type S = pasta_curves::pallas::Scalar; + type G = pasta_curves::pallas::Point; + + #[test] + fn test_tiny_ccs() { + // 1. Generate valid R1CS Shape + // 2. Convert to CCS + // 3. Test that it is satisfiable + + let one = S::one(); + let (num_cons, num_vars, num_io, A, B, C) = { + let num_cons = 4; + let num_vars = 4; + let num_io = 2; + + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are respectively the input and output. + // The R1CS for this problem consists of the following constraints: + // `I0 * I0 - Z0 = 0` + // `Z0 * I0 - Z1 = 0` + // `(Z1 + I0) * 1 - Z2 = 0` + // `(Z2 + 5) * 1 - I1 = 0` + + // Relaxed R1CS is a set of three sparse matrices (A B C), where there is a row for every + // constraint and a column for every entry in z = (vars, u, inputs) + // An R1CS instance is satisfiable iff: + // Az \circ Bz = u \cdot Cz + E, where z = (vars, 1, inputs) + let mut A: Vec<(usize, usize, S)> = Vec::new(); + let mut B: Vec<(usize, usize, S)> = Vec::new(); + let mut C: Vec<(usize, usize, S)> = Vec::new(); + + // constraint 0 entries in (A,B,C) + // `I0 * I0 - Z0 = 0` + A.push((0, num_vars + 1, one)); + B.push((0, num_vars + 1, one)); + C.push((0, 0, one)); + + // constraint 1 entries in (A,B,C) + // `Z0 * I0 - Z1 = 0` + A.push((1, 0, one)); + B.push((1, num_vars + 1, one)); + C.push((1, 1, one)); + + // constraint 2 entries in (A,B,C) + // `(Z1 + I0) * 1 - Z2 = 0` + A.push((2, 1, one)); + A.push((2, num_vars + 1, one)); + B.push((2, num_vars, one)); + C.push((2, 2, one)); + + // constraint 3 entries in (A,B,C) + // `(Z2 + 5) * 1 - I1 = 0` + A.push((3, 2, one)); + A.push((3, num_vars, one + one + one + one + one)); + B.push((3, num_vars, one)); + C.push((3, num_vars + 2, one)); + + (num_cons, num_vars, num_io, A, B, C) + }; + + // create a R1CS shape object + let S = { + let res = R1CSShape::new(num_cons, num_vars, num_io, &A, &B, &C); + assert!(res.is_ok()); + res.unwrap() + }; + + // 2. Take R1CS and convert to CCS + let S = CCSShape::from_r1cs(S); + + // generate generators and ro constants + let _ck = CCS::::commitment_key(&S); + let _ro_consts = + <::RO as ROTrait<::Base, ::Scalar>>::Constants::new(); + + // 3. Test that CCS is satisfiable + let _rand_inst_witness_generator = + |ck: &CommitmentKey, I: &S| -> (S, CCSInstance, CCSWitness) { + let i0 = *I; + + // compute a satisfying (vars, X) tuple + let (O, vars, X) = { + let z0 = i0 * i0; // constraint 0 + let z1 = i0 * z0; // constraint 1 + let z2 = z1 + i0; // constraint 2 + let i1 = z2 + one + one + one + one + one; // constraint 3 + + // store the witness and IO for the instance + let W = vec![z0, z1, z2, S::zero()]; + let X = vec![i0, i1]; + (i1, W, X) + }; + + let W = { + let res = CCSWitness::new(&S, &vars); + assert!(res.is_ok()); + res.unwrap() + }; + let U = { + let comm_W = W.commit(ck); + let res = CCSInstance::new(&S, &comm_W, &X); + assert!(res.is_ok()); + res.unwrap() + }; + + // check that generated instance is satisfiable + assert!(S.is_sat(ck, &U, &W).is_ok()); + + (O, U, W) + }; + } +} From f72c49b244796b802e99254f9f098fbec001b31d Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 2 Jun 2023 12:06:48 +0800 Subject: [PATCH 007/100] style(ccs): Remove old comments and unused code, clarify todos --- src/ccs.rs | 195 ++++----------------------------------------------- src/utils.rs | 4 -- 2 files changed, 15 insertions(+), 184 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 76f9624d1..6a0cf307f 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -26,14 +26,9 @@ use rayon::prelude::*; use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; -// TODO, based on r1cs.rs: -// x CCS struct -// x CCS basic impl -// x CCS basic is_sat -// - Clean up old R1CS stuff we don't need -// x Get rid of hardcoded R1CS -// - Linearized/Committed CCS -// - R1CS to CCS +// TODO: Create a SparseMatrix type? Vec<(usize, usize, G::Scalar)> +// TODO: Committed CCS using MLE (see src/spartan/pp.rs) +// TODO: Linearized CCS struct and methods, separate struct similar to RelaxedR1CS /// Public parameters for a given CCS #[derive(Clone, Serialize, Deserialize)] @@ -42,12 +37,10 @@ pub struct CCS { _p: PhantomData, } -// TODO Pull out matrix type? - -// A type that holds the shape of a CCS instance -// Unlike R1CS we have a list of matrices M instead of only A, B, C -// We also have t, q, d constants and c (vector), S (set) -// TODO Add m, n, or infer from M? +// NOTE: Currently m, n are implicit, could possibly infer from M +/// A type that holds the shape of a CCS instance +/// Unlike R1CS we have a list of matrices M instead of only A, B, C +/// We also have t, q, d constants and c (vector), S (set) #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct CCSShape { pub(crate) num_cons: usize, @@ -67,6 +60,7 @@ pub struct CCSWitness { W: Vec, } +// TODO: Make sure this is in the right form for committed CCS using MLE, possibly a separate type? /// A type that holds an CCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] @@ -75,10 +69,8 @@ pub struct CCSInstance { pub(crate) X: Vec, } -// TODO: Type for other CCS types, eqv to RelaxedR1CS - -// TODO: Update variables here? This is currently based on R1CS with M length, not sanity checked impl CCS { + // TODO: Update commitment_key variables here? This is currently based on R1CS with M length /// Samples public parameters for the specified number of constraints and variables in an CCS pub fn commitment_key(S: &CCSShape) -> CommitmentKey { let num_cons = S.num_cons; @@ -89,10 +81,6 @@ impl CCS { } } -// TODO: Util fn to create new CCSShape for R1CS with following values -// n=n, m=m, N=N, l=l, t=3, q=2, d=2 -// M={A,B,C}, S={{0, 1}, {2}}, c={1,−1} - impl CCSShape { /// Create an object of type `CCSSShape` from the explicitly specified CCS matrices pub fn new( @@ -155,99 +143,15 @@ impl CCSShape { d, S, c, - // S: vec![S1.to_vec(), S2.to_vec()], - // c: vec![C0 as usize, C1 as usize], }; Ok(shape) } - // NOTE: Not currently used - // TODO This has to be updated for CCS to not just return Az, Bz, Cz - // pub fn multiply_vec( - // &self, - // z: &[G::Scalar], - // ) -> Result>, NovaError> { - // if z.len() != self.num_io + self.num_vars + 1 { - // return Err(NovaError::InvalidWitnessLength); - // } - - // // computes a product between a sparse matrix `matrix` and a vector `z` - // // This does not perform any validation of entries in M (e.g., if entries in `M` reference indexes outside the range of `z`) - // // This is safe since we know that `M` is valid - // let sparse_matrix_vec_product = - // |matrix: &Vec<(usize, usize, G::Scalar)>, num_rows: usize, z: &[G::Scalar]| -> Vec { - // (0..matrix.len()) - // .map(|i| { - // let (row, col, val) =matrix[i]; - // (row, val * z[col]) - // }) - // .fold(vec![G::Scalar::ZERO; num_rows], |mut Mz, (r, v)| { - // Mz[r] += v; - // Mz - // }) - // }; - - // // // XXX: Hacky, assumes M is A, B, C (true for R1CS) - // // let A = self.M[0].clone(); - // // let B = self.M[1].clone(); - // // let C = self.M[2].clone(); - - // // let (Az, (Bz, Cz)) = rayon::join( - // // || sparse_matrix_vec_product(&A, self.num_cons, z), - // // || { - // // rayon::join( - // // || sparse_matrix_vec_product(&B, self.num_cons, z), - // // || sparse_matrix_vec_product(&C, self.num_cons, z), - // // ) - // // }, - // // ); - - // // TODO Use rayon to parallelize - // let Mzs = self.M.iter().map(|m| sparse_matrix_vec_product(m, self.num_cons, z)).collect::>(); - - // Ok(Mzs) - // } - - /// Checks if the Relaxed R1CS instance is satisfiable given a witness and its shape - // pub fn is_sat_relaxed( - // &self, - // ck: &CommitmentKey, - // U: &RelaxedR1CSInstance, - // W: &RelaxedR1CSWitness, - // ) -> Result<(), NovaError> { - // assert_eq!(W.W.len(), self.num_vars); - // assert_eq!(W.E.len(), self.num_cons); - // assert_eq!(U.X.len(), self.num_io); - - // // verify if Az * Bz = u*Cz + E - // let res_eq: bool = { - // let z = concat(vec![W.W.clone(), vec![U.u], U.X.clone()]); - // let (Az, Bz, Cz) = self.multiply_vec(&z)?; - // assert_eq!(Az.len(), self.num_cons); - // assert_eq!(Bz.len(), self.num_cons); - // assert_eq!(Cz.len(), self.num_cons); - - // let res: usize = (0..self.num_cons) - // .map(|i| usize::from(Az[i] * Bz[i] != U.u * Cz[i] + W.E[i])) - // .sum(); - - // res == 0 - // }; - - // // verify if comm_E and comm_W are commitments to E and W - // let res_comm: bool = { - // let (comm_W, comm_E) = - // rayon::join(|| CE::::commit(ck, &W.W), || CE::::commit(ck, &W.E)); - // U.comm_W == comm_W && U.comm_E == comm_E - // }; - - // if res_eq && res_comm { - // Ok(()) - // } else { - // Err(NovaError::UnSat) - // } - // } + // NOTE: Not using previous used multiply_vec (r1cs.rs), see utils.rs + + // NOTE: Equivalent to is_sat_relaxed (r1cs.rs) but for CCCSS/LCCCS? + // Either here or as a separate method on LCCCS struct /// Checks if the CCS instance is satisfiable given a witness and its shape pub fn is_sat( @@ -285,7 +189,7 @@ impl CCSShape { hadamard_output = hadamard_product(&hadamard_output, &mvp)?; } - // XXX Problem if c[i] is F? + // XXX: Problem if c[i] is F? let civ = G::Scalar::from(self.c[i] as u64); let vep = vector_elem_product(&hadamard_output, &civ)?; @@ -294,22 +198,6 @@ impl CCSShape { r == vec![G::Scalar::ZERO; m] }; - // NOTE: Previous R1CS code for reference - // // verify if Az * Bz = u*Cz - // let res_eq: bool = { - // let z = concat(vec![W.W.clone(), vec![G::Scalar::ONE], U.X.clone()]); - // let (Az, Bz, Cz) = self.multiply_vec(&z)?; - // assert_eq!(Az.len(), self.num_cons); - // assert_eq!(Bz.len(), self.num_cons); - // assert_eq!(Cz.len(), self.num_cons); - - // let res: usize = (0..self.num_cons) - // .map(|i| usize::from(Az[i] * Bz[i] != Cz[i])) - // .sum(); - - // res == 0 - // }; - // verify if comm_W is a commitment to W let res_comm: bool = U.comm_W == CE::::commit(ck, &W.W); @@ -343,59 +231,6 @@ impl CCSShape { } } - /// A method to compute a commitment to the cross-term `T` given a - /// Relaxed R1CS instance-witness pair and an R1CS instance-witness pair - // pub fn commit_T( - // &self, - // ck: &CommitmentKey, - // U1: &RelaxedR1CSInstance, - // W1: &RelaxedR1CSWitness, - // U2: &R1CSInstance, - // W2: &R1CSWitness, - // ) -> Result<(Vec, Commitment), NovaError> { - // let (AZ_1, BZ_1, CZ_1) = { - // let Z1 = concat(vec![W1.W.clone(), vec![U1.u], U1.X.clone()]); - // self.multiply_vec(&Z1)? - // }; - - // let (AZ_2, BZ_2, CZ_2) = { - // let Z2 = concat(vec![W2.W.clone(), vec![G::Scalar::ONE], U2.X.clone()]); - // self.multiply_vec(&Z2)? - // }; - - // let AZ_1_circ_BZ_2 = (0..AZ_1.len()) - // .into_par_iter() - // .map(|i| AZ_1[i] * BZ_2[i]) - // .collect::>(); - // let AZ_2_circ_BZ_1 = (0..AZ_2.len()) - // .into_par_iter() - // .map(|i| AZ_2[i] * BZ_1[i]) - // .collect::>(); - // let u_1_cdot_CZ_2 = (0..CZ_2.len()) - // .into_par_iter() - // .map(|i| U1.u * CZ_2[i]) - // .collect::>(); - // let u_2_cdot_CZ_1 = (0..CZ_1.len()) - // .into_par_iter() - // .map(|i| CZ_1[i]) - // .collect::>(); - - // let T = AZ_1_circ_BZ_2 - // .par_iter() - // .zip(&AZ_2_circ_BZ_1) - // .zip(&u_1_cdot_CZ_2) - // .zip(&u_2_cdot_CZ_1) - // .map(|(((a, b), c), d)| *a + *b - *c - *d) - // .collect::>(); - - // let comm_T = CE::::commit(ck, &T); - - // Ok((T, comm_T)) - // } - - // TODO: Compute linearized form - // Requires MLE - /// Pads the R1CSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&self) -> Self { @@ -445,7 +280,7 @@ impl CCSShape { // Apply pad for each matrix in M let M_padded = self.M.iter().map(|m| apply_pad(m)).collect::>(); - // XXX: Check if CCS padding is correct here + // TODO: Sanity check if CCS padding is correct here CCSShape { num_cons: num_cons_padded, num_vars: num_vars_padded, diff --git a/src/utils.rs b/src/utils.rs index 08c0317d8..432c48955 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -2,7 +2,6 @@ use crate::errors::NovaError; use ff::PrimeField; -#[allow(dead_code)] pub fn vector_add(a: &Vec, b: &Vec) -> Result, NovaError> { if a.len() != b.len() { return Err(NovaError::InvalidIndex); @@ -16,7 +15,6 @@ pub fn vector_add(a: &Vec, b: &Vec) -> Result, NovaE Ok(res) } -#[allow(dead_code)] pub fn vector_elem_product(a: &Vec, e: &F) -> Result, NovaError> { let mut res = Vec::with_capacity(a.len()); for i in 0..a.len() { @@ -53,7 +51,6 @@ pub fn matrix_vector_product( // Matrix vector product where matrix is sparse // First element is row index, second column, third value stored -#[allow(dead_code)] pub fn matrix_vector_product_sparse( matrix: &Vec<(usize, usize, F)>, vector: &Vec, @@ -76,7 +73,6 @@ pub fn matrix_vector_product_sparse( Ok(res) } -#[allow(dead_code)] pub fn hadamard_product(a: &Vec, b: &Vec) -> Result, NovaError> { if a.len() != b.len() { return Err(NovaError::InvalidIndex); From 4470d92d4ac00cab299e3abf78b94d41d21c0f10 Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 2 Jun 2023 12:22:16 +0800 Subject: [PATCH 008/100] refactor(ccs): Add experimental hypernova feature flag (on by default) Also add docs to README --- Cargo.toml | 3 ++- README.md | 9 +++++++++ src/lib.rs | 9 ++++++--- 3 files changed, 17 insertions(+), 4 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index 7269a0ed0..d429d6002 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,9 +66,10 @@ name = "sha256" harness = false [features] -default = [] +default = ["hypernova"] # Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. portable = ["pasta-msm/portable"] cuda = ["neptune/cuda", "neptune/pasta", "neptune/arity24"] opencl = ["neptune/opencl", "neptune/pasta", "neptune/arity24"] +hypernova = [] flamegraph = ["pprof/flamegraph", "pprof/criterion"] diff --git a/README.md b/README.md index feca29336..84d6a41d5 100644 --- a/README.md +++ b/README.md @@ -51,6 +51,15 @@ This project has adopted the [Microsoft Open Source Code of Conduct](https://ope For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments. + +### Experimental features + +To run early experimental work on CCS and HyperNova, use the `hypernova` feature flag: + +```text +cargo test --features hypernova ccs +``` + ## Trademarks This project may contain trademarks or logos for projects, products, or services. Authorized use of Microsoft diff --git a/src/lib.rs b/src/lib.rs index 5bd71438e..959bdcbe1 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -13,13 +13,10 @@ // private modules mod bellperson; -mod ccs; mod circuit; mod constants; mod nifs; mod r1cs; -mod utils; - // public modules pub mod errors; pub mod gadgets; @@ -27,6 +24,12 @@ pub mod provider; pub mod spartan; pub mod traits; +// experimental modules +#[cfg(feature = "hypernova")] +mod ccs; +#[cfg(feature = "hypernova")] +mod utils; + use crate::bellperson::{ r1cs::{NovaShape, NovaWitness}, shape_cs::ShapeCS, From 9182cc3334b370fdc9152e430aefa3193b73d189 Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 2 Jun 2023 12:28:29 +0800 Subject: [PATCH 009/100] chore(ccs): Turn off hypernova feature flag by default --- Cargo.toml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d429d6002..9b2423a33 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ rand_chacha = "0.3" itertools = "0.9.0" subtle = "2.4" pasta_curves = { version = "0.5", features = ["repr-c", "serde"] } -neptune = { version = "10.0.0", default-features = false } +neptune = { version = "9.0.0", default-features = false } generic-array = "0.14.4" num-bigint = { version = "0.4", features = ["serde", "rand"] } num-traits = "0.2" @@ -66,10 +66,11 @@ name = "sha256" harness = false [features] -default = ["hypernova"] +default = [] # Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. portable = ["pasta-msm/portable"] cuda = ["neptune/cuda", "neptune/pasta", "neptune/arity24"] opencl = ["neptune/opencl", "neptune/pasta", "neptune/arity24"] hypernova = [] + flamegraph = ["pprof/flamegraph", "pprof/criterion"] From db6add06b3ab0e2082d1afcf3b82f3cc25c51742 Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 2 Jun 2023 12:36:30 +0800 Subject: [PATCH 010/100] chore: update dep --- Cargo.toml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Cargo.toml b/Cargo.toml index 9b2423a33..f1de5681b 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -21,7 +21,7 @@ rand_chacha = "0.3" itertools = "0.9.0" subtle = "2.4" pasta_curves = { version = "0.5", features = ["repr-c", "serde"] } -neptune = { version = "9.0.0", default-features = false } +neptune = { version = "10.0.0", default-features = false } generic-array = "0.14.4" num-bigint = { version = "0.4", features = ["serde", "rand"] } num-traits = "0.2" From 2a59d808a0b0ce9b5828ed048c666bbb7622255c Mon Sep 17 00:00:00 2001 From: oskarth Date: Mon, 5 Jun 2023 12:30:59 +0800 Subject: [PATCH 011/100] chore: Cargo.toml --- Cargo.toml | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index f1de5681b..d429d6002 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -66,11 +66,10 @@ name = "sha256" harness = false [features] -default = [] +default = ["hypernova"] # Compiles in portable mode, w/o ISA extensions => binary can be executed on all systems. portable = ["pasta-msm/portable"] cuda = ["neptune/cuda", "neptune/pasta", "neptune/arity24"] opencl = ["neptune/opencl", "neptune/pasta", "neptune/arity24"] hypernova = [] - flamegraph = ["pprof/flamegraph", "pprof/criterion"] From 85f4e22f21ba688e19b89f86e1bb29beba86bffc Mon Sep 17 00:00:00 2001 From: wangtsiao Date: Sun, 28 May 2023 00:06:32 +0800 Subject: [PATCH 012/100] add doc and tests for spartan/polynomial.rs 1. Add documentation and test for spartan/polynomial.rs. - EqPolynomial: $\tilde{eq}$ - MultilinearPolynomial: multilinear polynomial - SparsePolynomial 2. Remove duplicate get_bits in SparsePolynomial::evaluate. --- src/gadgets/ecc.rs | 1 + src/spartan/polynomial.rs | 120 +++++++++++++++++++++++++++++++++++--- 2 files changed, 113 insertions(+), 8 deletions(-) diff --git a/src/gadgets/ecc.rs b/src/gadgets/ecc.rs index 0f982cc0b..04495e500 100644 --- a/src/gadgets/ecc.rs +++ b/src/gadgets/ecc.rs @@ -423,6 +423,7 @@ where Ok(Self { x, y, is_infinity }) } + #[allow(rustdoc::bare_urls)] /// A gadget for scalar multiplication, optimized to use incomplete addition law. /// The optimization here is analogous to https://github.com/arkworks-rs/r1cs-std/blob/6d64f379a27011b3629cf4c9cb38b7b7b695d5a0/src/groups/curves/short_weierstrass/mod.rs#L295, /// except we use complete addition law over affine coordinates instead of projective coordinates for the tail bits diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 18387f19f..5f2948cd1 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -4,6 +4,19 @@ use ff::PrimeField; use rayon::prelude::*; use serde::{Deserialize, Serialize}; +use crate::spartan::math::Math; + +/// The multilinear extension polynomial, denoted as $\tilde{eq}$, is defined as follows: +/// +/// $$ +/// \tilde{eq}(x, e) = \prod_{i=0}^m(e_i * x_i + (1 - e_i) * (1 - x_i)) +/// $$ +/// +/// This polynomial evaluates to 1 only when each component $x_i$ is equal to its corresponding component $e_i$. +/// Otherwise, it evaluates to 0. +/// +/// The vector r contains all the values of e_i, where e_i represents the individual bits of a binary representation of e. +/// For example, let's consider e = 6, which in binary is 0b110. In this case, the vector r would be [1, 1, 0]. pub(crate) struct EqPolynomial { r: Vec, } @@ -22,6 +35,8 @@ impl EqPolynomial { .fold(Scalar::ONE, |acc, item| acc * item) } + /// Evaluates the polynomial at all the `2^|r|` points, ranging from 0 to `2^|r| - 1`. + /// TODO: document this function pub fn evals(&self) -> Vec { let ell = self.r.len(); let mut evals: Vec = vec![Scalar::ZERO; (2_usize).pow(ell as u32)]; @@ -46,6 +61,22 @@ impl EqPolynomial { } } +/// A multilinear extension of a polynomial $Z(\cdot)$, donate it as $\tilde{Z}(x_1, ..., x_m)$ +/// where the degree of each variable is at most one. +/// +/// This is the dense representation of a multilinear poynomial. +/// Let it be $\mathbb{G}(\cdot): \mathbb{F}^m \rightarrow \mathbb{F}$, it can be represented uniquely by the list of +/// evaluations of $\mathbb{G}(\cdot)$ over the Boolean hypercube $\{0, 1\}^m$. +/// +/// For example, a 3 variables multilinear polynomial can be represented by evaluation +/// at points $[0, 2^3-1]$. +/// +/// The implementation follows +/// $$ +/// \tilde{Z}(x_1, ..., x_m) = \sum_{e\in {0,1}^m}Z(e)\cdot \prod_{i=0}^m(x_i\cdot e_i)\cdot (1-e_i) +/// $$ +/// +/// Vector $Z$ indicates $Z(e)$ where $e$ ranges from $0$ to $2^m-1$. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MultilinearPolynomial { num_vars: usize, // the number of variables in the multilinear polynomial @@ -69,6 +100,7 @@ impl MultilinearPolynomial { self.Z.len() } + /// TODO: document and test this function pub fn bound_poly_var_top(&mut self, r: &Scalar) { let n = self.len() / 2; @@ -118,6 +150,12 @@ impl Index for MultilinearPolynomial { } } +/// Sparse multilinear polynomial, which means the $Z(\cdot)$ is zero at most points. +/// So we do not have to store every evaluations of $Z(\cdot)$, only store the non-zero points. +/// +/// For example, the evaluations are [0, 0, 0, 1, 0, 1, 0, 2]. +/// The sparse polynomial only store the non-zero values, [(3, 1), (5, 1), (7, 2)]. +/// In the tuple, the first is index, the second is value. pub(crate) struct SparsePolynomial { num_vars: usize, Z: Vec<(usize, Scalar)>, @@ -128,6 +166,8 @@ impl SparsePolynomial { SparsePolynomial { num_vars, Z } } + /// Computes the $\tilde{eq}$ extension polynomial. + /// return 1 when a == r, otherwise return 0. fn compute_chi(a: &[bool], r: &[Scalar]) -> Scalar { assert_eq!(a.len(), r.len()); let mut chi_i = Scalar::ONE; @@ -145,19 +185,83 @@ impl SparsePolynomial { pub fn evaluate(&self, r: &[Scalar]) -> Scalar { assert_eq!(self.num_vars, r.len()); - let get_bits = |num: usize, num_bits: usize| -> Vec { - (0..num_bits) - .into_par_iter() - .map(|shift_amount| ((num & (1 << (num_bits - shift_amount - 1))) > 0)) - .collect::>() - }; - (0..self.Z.len()) .into_par_iter() .map(|i| { - let bits = get_bits(self.Z[i].0, r.len()); + let bits = (self.Z[i].0).get_bits(r.len()); SparsePolynomial::compute_chi(&bits, r) * self.Z[i].1 }) .reduce(|| Scalar::ZERO, |x, y| x + y) } } + +#[cfg(test)] +mod tests { + use super::*; + + #[derive(PrimeField)] + #[PrimeFieldModulus = "52435875175126190479447740508185965837690552500527637822603658699938581184513"] + #[PrimeFieldGenerator = "7"] + #[PrimeFieldReprEndianness = "little"] + struct Fp([u64; 4]); + + #[test] + fn test_eq_polynomial() { + let ZERO = Fp::from(0); + let ONE = Fp::from(1); + + let eq_poly = EqPolynomial::::new(vec![ONE, ZERO, ONE]); + let y = eq_poly.evaluate(vec![ONE, ONE, ONE].as_slice()); + assert_eq!(y, ZERO); + + let y = eq_poly.evaluate(vec![ONE, ZERO, ONE].as_slice()); + assert_eq!(y, ONE); + + let eval_list = eq_poly.evals(); + for i in 0..(2_usize).pow(3) { + if i == 5 { + assert_eq!(eval_list[i], ONE); + } else { + assert_eq!(eval_list[i], ZERO); + } + } + } + + #[test] + fn test_multilinear_polynomial() { + // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3 + // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, 0, 1, 0, 2]. + + let ZERO = Fp::from(0); + let ONE = Fp::from(1); + let TWO = Fp::from(2); + + let Z = vec![ZERO, ZERO, ZERO, ONE, ZERO, ONE, ZERO, TWO]; + let m_poly = MultilinearPolynomial::::new(Z.clone()); + assert_eq!(m_poly.get_num_vars(), 3); + + let x = vec![ONE, ONE, ONE]; + assert_eq!(m_poly.evaluate(x.as_slice()), TWO); + + let y = MultilinearPolynomial::::evaluate_with(Z.as_slice(), x.as_slice()); + assert_eq!(y, TWO); + } + + #[test] + fn test_sparse_polynomial() { + // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3 + // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, 0, 1, 0, 2]. + + let ZERO = Fp::from(0); + let ONE = Fp::from(1); + let TWO = Fp::from(2); + let Z = vec![(3, ONE), (5, ONE), (7, TWO)]; + let m_poly = SparsePolynomial::::new(3, Z); + + let x = vec![ONE, ONE, ONE]; + assert_eq!(m_poly.evaluate(x.as_slice()), TWO); + + let x = vec![ONE, ZERO, ONE]; + assert_eq!(m_poly.evaluate(x.as_slice()), ONE); + } +} \ No newline at end of file From 5223feaaafd6a5065fec28bb8f5b12fb219abcf3 Mon Sep 17 00:00:00 2001 From: wangtsiao Date: Wed, 31 May 2023 22:17:08 +0800 Subject: [PATCH 013/100] use pasta instead of defining new fp --- src/gadgets/ecc.rs | 3 +-- src/spartan/polynomial.rs | 46 ++++++++++++++------------------------- 2 files changed, 17 insertions(+), 32 deletions(-) diff --git a/src/gadgets/ecc.rs b/src/gadgets/ecc.rs index 04495e500..250449401 100644 --- a/src/gadgets/ecc.rs +++ b/src/gadgets/ecc.rs @@ -423,9 +423,8 @@ where Ok(Self { x, y, is_infinity }) } - #[allow(rustdoc::bare_urls)] /// A gadget for scalar multiplication, optimized to use incomplete addition law. - /// The optimization here is analogous to https://github.com/arkworks-rs/r1cs-std/blob/6d64f379a27011b3629cf4c9cb38b7b7b695d5a0/src/groups/curves/short_weierstrass/mod.rs#L295, + /// The optimization here is analogous to github.com/arkworks-rs/r1cs-std/blob/6d64f379a27011b3629cf4c9cb38b7b7b695d5a0/src/groups/curves/short_weierstrass/mod.rs#L295, /// except we use complete addition law over affine coordinates instead of projective coordinates for the tail bits pub fn scalar_mul>( &self, diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 5f2948cd1..27cd456a9 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -36,7 +36,6 @@ impl EqPolynomial { } /// Evaluates the polynomial at all the `2^|r|` points, ranging from 0 to `2^|r| - 1`. - /// TODO: document this function pub fn evals(&self) -> Vec { let ell = self.r.len(); let mut evals: Vec = vec![Scalar::ZERO; (2_usize).pow(ell as u32)]; @@ -100,7 +99,6 @@ impl MultilinearPolynomial { self.Z.len() } - /// TODO: document and test this function pub fn bound_poly_var_top(&mut self, r: &Scalar) { let n = self.len() / 2; @@ -198,31 +196,23 @@ impl SparsePolynomial { #[cfg(test)] mod tests { use super::*; - - #[derive(PrimeField)] - #[PrimeFieldModulus = "52435875175126190479447740508185965837690552500527637822603658699938581184513"] - #[PrimeFieldGenerator = "7"] - #[PrimeFieldReprEndianness = "little"] - struct Fp([u64; 4]); + use pasta_curves::Fp; #[test] fn test_eq_polynomial() { - let ZERO = Fp::from(0); - let ONE = Fp::from(1); - - let eq_poly = EqPolynomial::::new(vec![ONE, ZERO, ONE]); - let y = eq_poly.evaluate(vec![ONE, ONE, ONE].as_slice()); - assert_eq!(y, ZERO); + let eq_poly = EqPolynomial::::new(vec![Fp::one(), Fp::zero(), Fp::one()]); + let y = eq_poly.evaluate(vec![Fp::one(), Fp::one(), Fp::one()].as_slice()); + assert_eq!(y, Fp::zero()); - let y = eq_poly.evaluate(vec![ONE, ZERO, ONE].as_slice()); - assert_eq!(y, ONE); + let y = eq_poly.evaluate(vec![Fp::one(), Fp::zero(), Fp::one()].as_slice()); + assert_eq!(y, Fp::one()); let eval_list = eq_poly.evals(); for i in 0..(2_usize).pow(3) { if i == 5 { - assert_eq!(eval_list[i], ONE); + assert_eq!(eval_list[i], Fp::one()); } else { - assert_eq!(eval_list[i], ZERO); + assert_eq!(eval_list[i], Fp::zero()); } } } @@ -232,15 +222,13 @@ mod tests { // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, 0, 1, 0, 2]. - let ZERO = Fp::from(0); - let ONE = Fp::from(1); let TWO = Fp::from(2); - let Z = vec![ZERO, ZERO, ZERO, ONE, ZERO, ONE, ZERO, TWO]; + let Z = vec![Fp::zero(), Fp::zero(), Fp::zero(), Fp::one(), Fp::zero(), Fp::one(), Fp::zero(), TWO]; let m_poly = MultilinearPolynomial::::new(Z.clone()); assert_eq!(m_poly.get_num_vars(), 3); - let x = vec![ONE, ONE, ONE]; + let x = vec![Fp::one(), Fp::one(), Fp::one()]; assert_eq!(m_poly.evaluate(x.as_slice()), TWO); let y = MultilinearPolynomial::::evaluate_with(Z.as_slice(), x.as_slice()); @@ -252,16 +240,14 @@ mod tests { // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, 0, 1, 0, 2]. - let ZERO = Fp::from(0); - let ONE = Fp::from(1); let TWO = Fp::from(2); - let Z = vec![(3, ONE), (5, ONE), (7, TWO)]; + let Z = vec![(3, Fp::one()), (5, Fp::one()), (7, TWO)]; let m_poly = SparsePolynomial::::new(3, Z); - - let x = vec![ONE, ONE, ONE]; + + let x = vec![Fp::one(), Fp::one(), Fp::one()]; assert_eq!(m_poly.evaluate(x.as_slice()), TWO); - let x = vec![ONE, ZERO, ONE]; - assert_eq!(m_poly.evaluate(x.as_slice()), ONE); + let x = vec![Fp::one(), Fp::zero(), Fp::one()]; + assert_eq!(m_poly.evaluate(x.as_slice()), Fp::one()); } -} \ No newline at end of file +} From 6a454891d95bf6a6b12c546c9922b9d2d3520821 Mon Sep 17 00:00:00 2001 From: wangtsiao Date: Thu, 1 Jun 2023 10:55:28 +0800 Subject: [PATCH 014/100] wrap link with <> in comment --- src/gadgets/ecc.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/gadgets/ecc.rs b/src/gadgets/ecc.rs index 250449401..bb17ee7f4 100644 --- a/src/gadgets/ecc.rs +++ b/src/gadgets/ecc.rs @@ -424,7 +424,7 @@ where } /// A gadget for scalar multiplication, optimized to use incomplete addition law. - /// The optimization here is analogous to github.com/arkworks-rs/r1cs-std/blob/6d64f379a27011b3629cf4c9cb38b7b7b695d5a0/src/groups/curves/short_weierstrass/mod.rs#L295, + /// The optimization here is analogous to , /// except we use complete addition law over affine coordinates instead of projective coordinates for the tail bits pub fn scalar_mul>( &self, From 9598d1d03ef1163af2f0f01627320c3b095a5fe5 Mon Sep 17 00:00:00 2001 From: oskarth Date: Mon, 5 Jun 2023 12:37:28 +0800 Subject: [PATCH 015/100] fmt(spartan): cargo fmt --- Cargo.toml | 1 + src/spartan/polynomial.rs | 27 ++++++++++++++++++--------- 2 files changed, 19 insertions(+), 9 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index d429d6002..6eae31f6d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -72,4 +72,5 @@ portable = ["pasta-msm/portable"] cuda = ["neptune/cuda", "neptune/pasta", "neptune/arity24"] opencl = ["neptune/opencl", "neptune/pasta", "neptune/arity24"] hypernova = [] + flamegraph = ["pprof/flamegraph", "pprof/criterion"] diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 27cd456a9..719db145a 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -14,7 +14,7 @@ use crate::spartan::math::Math; /// /// This polynomial evaluates to 1 only when each component $x_i$ is equal to its corresponding component $e_i$. /// Otherwise, it evaluates to 0. -/// +/// /// The vector r contains all the values of e_i, where e_i represents the individual bits of a binary representation of e. /// For example, let's consider e = 6, which in binary is 0b110. In this case, the vector r would be [1, 1, 0]. pub(crate) struct EqPolynomial { @@ -62,19 +62,19 @@ impl EqPolynomial { /// A multilinear extension of a polynomial $Z(\cdot)$, donate it as $\tilde{Z}(x_1, ..., x_m)$ /// where the degree of each variable is at most one. -/// +/// /// This is the dense representation of a multilinear poynomial. -/// Let it be $\mathbb{G}(\cdot): \mathbb{F}^m \rightarrow \mathbb{F}$, it can be represented uniquely by the list of +/// Let it be $\mathbb{G}(\cdot): \mathbb{F}^m \rightarrow \mathbb{F}$, it can be represented uniquely by the list of /// evaluations of $\mathbb{G}(\cdot)$ over the Boolean hypercube $\{0, 1\}^m$. -/// +/// /// For example, a 3 variables multilinear polynomial can be represented by evaluation /// at points $[0, 2^3-1]$. -/// -/// The implementation follows +/// +/// The implementation follows /// $$ /// \tilde{Z}(x_1, ..., x_m) = \sum_{e\in {0,1}^m}Z(e)\cdot \prod_{i=0}^m(x_i\cdot e_i)\cdot (1-e_i) /// $$ -/// +/// /// Vector $Z$ indicates $Z(e)$ where $e$ ranges from $0$ to $2^m-1$. #[derive(Debug, Clone, Serialize, Deserialize)] pub struct MultilinearPolynomial { @@ -150,7 +150,7 @@ impl Index for MultilinearPolynomial { /// Sparse multilinear polynomial, which means the $Z(\cdot)$ is zero at most points. /// So we do not have to store every evaluations of $Z(\cdot)$, only store the non-zero points. -/// +/// /// For example, the evaluations are [0, 0, 0, 1, 0, 1, 0, 2]. /// The sparse polynomial only store the non-zero values, [(3, 1), (5, 1), (7, 2)]. /// In the tuple, the first is index, the second is value. @@ -224,7 +224,16 @@ mod tests { let TWO = Fp::from(2); - let Z = vec![Fp::zero(), Fp::zero(), Fp::zero(), Fp::one(), Fp::zero(), Fp::one(), Fp::zero(), TWO]; + let Z = vec![ + Fp::zero(), + Fp::zero(), + Fp::zero(), + Fp::one(), + Fp::zero(), + Fp::one(), + Fp::zero(), + TWO, + ]; let m_poly = MultilinearPolynomial::::new(Z.clone()); assert_eq!(m_poly.get_num_vars(), 3); From 64b7bd7931705c8318b4e8c4e53fc6e40bfc0f4b Mon Sep 17 00:00:00 2001 From: CPerezz Date: Mon, 5 Jun 2023 12:20:07 +0200 Subject: [PATCH 016/100] Remove: Clippy unused fn lint --- src/lib.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/lib.rs b/src/lib.rs index 959bdcbe1..b4057c7a2 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ //! This library implements Nova, a high-speed recursive SNARK. #![deny( warnings, - unused, + //unused, future_incompatible, nonstandard_style, rust_2018_idioms, From 8db47a5984b35f7d5b400a538b73059fad3abb2f Mon Sep 17 00:00:00 2001 From: CPerezz Date: Mon, 5 Jun 2023 12:20:46 +0200 Subject: [PATCH 017/100] change: Impl SparseMatrix type --- src/utils.rs | 47 ++++++++++++++++++++++++++++++++++++++--------- 1 file changed, 38 insertions(+), 9 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 432c48955..524c016af 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,6 +1,35 @@ //! Basic utils use crate::errors::NovaError; -use ff::PrimeField; +use crate::traits::Group; +use ff::{Field, PrimeField}; +use serde::{Deserialize, Serialize}; + +/// A matrix structure represented on a sparse form. +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct SparseMatrix(pub(crate) Vec<(usize, usize, G::Scalar)>); + +impl SparseMatrix { + pub fn new() -> Self { + Self(vec![]) + } + + pub fn with_capacity(n: usize) -> Self { + Self(Vec::with_capacity(n)) + } +} + +impl From> for SparseMatrix { + fn from(matrix: Vec<(usize, usize, G::Scalar)>) -> SparseMatrix { + SparseMatrix(matrix) + } +} + +impl From<&Vec<(usize, usize, G::Scalar)>> for SparseMatrix { + fn from(matrix: &Vec<(usize, usize, G::Scalar)>) -> SparseMatrix { + SparseMatrix(matrix.clone()) + } +} pub fn vector_add(a: &Vec, b: &Vec) -> Result, NovaError> { if a.len() != b.len() { @@ -51,22 +80,22 @@ pub fn matrix_vector_product( // Matrix vector product where matrix is sparse // First element is row index, second column, third value stored -pub fn matrix_vector_product_sparse( - matrix: &Vec<(usize, usize, F)>, - vector: &Vec, -) -> Result, NovaError> { - if matrix.len() == 0 { +pub fn matrix_vector_product_sparse( + matrix: &SparseMatrix, + vector: &Vec, +) -> Result, NovaError> { + if matrix.0.len() == 0 { return Err(NovaError::InvalidIndex); } // Find the maximum row index in the matrix - let max_row = matrix.iter().map(|r| r.0).max().unwrap() + 1; + let max_row = matrix.0.iter().map(|r| r.0).max().unwrap() + 1; if max_row > vector.len() { return Err(NovaError::InvalidIndex); } - let mut res = vec![F::ZERO; max_row]; - for &(row, col, value) in matrix { + let mut res = vec![G::Scalar::ZERO; max_row]; + for &(row, col, value) in matrix.0.iter() { res[row] += value * vector[col]; } From ac57cb62523eaa8b9b55a2c8ead41904065981ca Mon Sep 17 00:00:00 2001 From: CPerezz Date: Mon, 5 Jun 2023 12:23:38 +0200 Subject: [PATCH 018/100] change: Adopt SparseMatrix type and make `pad` recieve &mut --- src/ccs.rs | 57 +++++++++++++++++++++--------------------------------- src/lib.rs | 2 +- 2 files changed, 23 insertions(+), 36 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 6a0cf307f..aeae52c33 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -42,11 +42,12 @@ pub struct CCS { /// Unlike R1CS we have a list of matrices M instead of only A, B, C /// We also have t, q, d constants and c (vector), S (set) #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] pub struct CCSShape { pub(crate) num_cons: usize, pub(crate) num_vars: usize, pub(crate) num_io: usize, - pub(crate) M: Vec>, + pub(crate) M: Vec>, pub(crate) t: usize, pub(crate) q: usize, pub(crate) d: usize, @@ -75,7 +76,7 @@ impl CCS { pub fn commitment_key(S: &CCSShape) -> CommitmentKey { let num_cons = S.num_cons; let num_vars = S.num_vars; - let total_nz = S.M.iter().fold(0, |acc, m| acc + m.len()); + let total_nz = S.M.iter().fold(0, |acc, m| acc + m.0.len()); G::CE::setup(b"ck", max(max(num_cons, num_vars), total_nz)) } @@ -133,11 +134,14 @@ impl CCSShape { return Err(NovaError::OddInputLength); } + // We collect the matrixes. + let M: Vec> = M.iter().map(|m| SparseMatrix::from(m)).collect(); + let shape = CCSShape { num_cons, num_vars, num_io, - M: M.to_vec(), + M, t, q, d, @@ -163,7 +167,7 @@ impl CCSShape { assert_eq!(W.W.len(), self.num_vars); assert_eq!(U.X.len(), self.num_io); - let m = self.M[0].len(); + let m = self.M[0].0.len(); // Sage code to check CCS relation: // @@ -222,7 +226,7 @@ impl CCSShape { num_cons: r1cs.num_cons, num_vars: r1cs.num_vars, num_io: r1cs.num_io, - M: vec![r1cs.A, r1cs.B, r1cs.C], + M: vec![r1cs.A.into(), r1cs.B.into(), r1cs.C.into()], t: T, q: Q, d: D, @@ -233,19 +237,19 @@ impl CCSShape { /// Pads the R1CSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables - pub fn pad(&self) -> Self { + pub fn pad(&mut self) { // equalize the number of variables and constraints let m = max(self.num_vars, self.num_cons).next_power_of_two(); // check if the provided R1CSShape is already as required if self.num_vars == m && self.num_cons == m { - return self.clone(); + return; } // check if the number of variables are as expected, then // we simply set the number of constraints to the next power of two if self.num_vars == m { - return CCSShape { + *self = CCSShape { num_cons: m, num_vars: m, num_io: self.num_io, @@ -260,38 +264,21 @@ impl CCSShape { // otherwise, we need to pad the number of variables and renumber variable accesses let num_vars_padded = m; - let num_cons_padded = m; - let apply_pad = |M: &[(usize, usize, G::Scalar)]| -> Vec<(usize, usize, G::Scalar)> { - M.par_iter() - .map(|(r, c, v)| { - ( - *r, - if c >= &self.num_vars { - c + num_vars_padded - self.num_vars - } else { - *c - }, - *v, - ) - }) - .collect::>() + let apply_pad = |M: &mut SparseMatrix| { + M.0.par_iter_mut().for_each(|(_, c, _)| { + *c = if *c >= self.num_vars { + *c + num_vars_padded - self.num_vars + } else { + *c + }; + }); }; // Apply pad for each matrix in M - let M_padded = self.M.iter().map(|m| apply_pad(m)).collect::>(); + let mut M_padded = self.M.clone(); + M_padded.iter_mut().for_each(|m| apply_pad(m)); // TODO: Sanity check if CCS padding is correct here - CCSShape { - num_cons: num_cons_padded, - num_vars: num_vars_padded, - num_io: self.num_io, - M: M_padded, - t: self.t, - q: self.q, - d: self.d, - S: self.S.clone(), - c: self.c.clone(), - } } } diff --git a/src/lib.rs b/src/lib.rs index b4057c7a2..7e9e0404e 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,6 +1,6 @@ //! This library implements Nova, a high-speed recursive SNARK. #![deny( - warnings, + //warnings, //unused, future_incompatible, nonstandard_style, From d3737537c8ac033381a7aaf368ef90d6fc02a6d1 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Mon, 5 Jun 2023 14:09:10 +0200 Subject: [PATCH 019/100] Fix: Update tests to match new trait bounds --- src/ccs.rs | 1 - src/utils.rs | 4 ++-- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index aeae52c33..aba383290 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -26,7 +26,6 @@ use rayon::prelude::*; use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; -// TODO: Create a SparseMatrix type? Vec<(usize, usize, G::Scalar)> // TODO: Committed CCS using MLE (see src/spartan/pp.rs) // TODO: Linearized CCS struct and methods, separate struct similar to RelaxedR1CS diff --git a/src/utils.rs b/src/utils.rs index 524c016af..9a36578ea 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -133,7 +133,7 @@ pub fn to_F_matrix_sparse(m: Vec<(usize, usize, u64)>) -> Vec<(us #[cfg(test)] mod tests { use super::*; - use pasta_curves::Fq; + use pasta_curves::{Ep, Fq}; #[test] fn test_vector_add() { @@ -183,7 +183,7 @@ mod tests { let vector = vec![1, 2, 3]; let A = to_F_matrix_sparse::(matrix); let z = to_F_vec::(vector); - let res = matrix_vector_product_sparse(&A, &z).unwrap(); + let res = matrix_vector_product_sparse::(&(A.into()), &z).unwrap(); assert_eq!(res, to_F_vec::(vec![14, 32])); } From ed36b4aecc9e48e6a593a81f24d75989180bd2b0 Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 7 Jun 2023 10:17:38 +0800 Subject: [PATCH 020/100] feat(ccs): Sketch out basic CCCS/LCCCS associated data structures Also add some comments --- src/ccs.rs | 41 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 41 insertions(+) diff --git a/src/ccs.rs b/src/ccs.rs index aba383290..76418d7df 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -57,6 +57,7 @@ pub struct CCSShape { /// A type that holds a witness for a given CCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct CCSWitness { + // Vector W in F^{n - l - 1} W: Vec, } @@ -65,8 +66,48 @@ pub struct CCSWitness { #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] pub struct CCSInstance { + // XXX: Move commitment out of CCSInstance for more clean conceptual separation? + // (Pedersen) Commitment to a witness pub(crate) comm_W: Commitment, + + // Public input x in F^l + pub(crate) X: Vec, +} + +// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` +/// A type that holds a CCCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CCCSInstance { + // Commitment to a multilinear polynomial in s' - 1 variables + pub(crate) C: Commitment, + + // $x in F^l$ + pub(crate) X: Vec, +} + +/// A type that holds a witness for a given CCCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct CCCSWitness { + // Multilinear polynomial w_mle in s' - 1 variables + pub(crate) w_mle: Vec, +} + +// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` +/// A type that holds a LCCCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct LCCCSInstance { + pub(crate) C: Commitment, pub(crate) X: Vec, + pub(crate) u: G::Scalar, + pub(crate) v: Vec, +} + +/// A type that holds a witness for a given LCCCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct LCCCSWitness { + pub(crate) w_mle: Vec, } impl CCS { From 0262dc6673ac3c57170813f23169ea40b986882c Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 7 Jun 2023 11:18:06 +0800 Subject: [PATCH 021/100] feat(utils): Add SparseMatrix n_rows/n_cols utils --- src/ccs.rs | 1 + src/utils.rs | 39 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 40 insertions(+) diff --git a/src/ccs.rs b/src/ccs.rs index 76418d7df..130faf328 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -207,6 +207,7 @@ impl CCSShape { assert_eq!(W.W.len(), self.num_vars); assert_eq!(U.X.len(), self.num_io); + // FIXME: Think this is wrong? With a SparseMatrix representation of M we need to do something different here let m = self.M[0].0.len(); // Sage code to check CCS relation: diff --git a/src/utils.rs b/src/utils.rs index 9a36578ea..685c21c07 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -5,6 +5,7 @@ use ff::{Field, PrimeField}; use serde::{Deserialize, Serialize}; /// A matrix structure represented on a sparse form. +/// First element is row index, second column, third value stored #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] pub struct SparseMatrix(pub(crate) Vec<(usize, usize, G::Scalar)>); @@ -17,6 +18,16 @@ impl SparseMatrix { pub fn with_capacity(n: usize) -> Self { Self(Vec::with_capacity(n)) } + + // Find the maximum row index in the matrix + pub fn n_rows(&self) -> usize { + self.0.iter().map(|r| r.0).max().unwrap() + 1 + } + + // Find the maximum column index in the matrix + pub fn n_cols(&self) -> usize { + self.0.iter().map(|r| r.1).max().unwrap() + 1 + } } impl From> for SparseMatrix { @@ -187,4 +198,32 @@ mod tests { assert_eq!(res, to_F_vec::(vec![14, 32])); } + + #[test] + fn test_sparse_matrix_n_rows() { + let matrix = vec![ + (0, 0, 1), + (0, 1, 2), + (0, 2, 3), + (1, 0, 4), + (1, 1, 5), + (1, 2, 6), + ]; + let A: SparseMatrix = to_F_matrix_sparse::(matrix).into(); + assert_eq!(A.n_rows(), 2); + } + + #[test] + fn test_sparse_matrix_n_cols() { + let matrix = vec![ + (0, 0, 1), + (0, 1, 2), + (0, 2, 3), + (1, 0, 4), + (1, 1, 5), + (1, 2, 6), + ]; + let A: SparseMatrix = to_F_matrix_sparse::(matrix).into(); + assert_eq!(A.n_cols(), 3); + } } From f1a8834c8c5eab074e0d5fbe76cdbf1e1c7e3344 Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 7 Jun 2023 11:30:16 +0800 Subject: [PATCH 022/100] fix(ccs): Calculcate m n_rows correctly for a set of sparse matrices --- src/ccs.rs | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 130faf328..ee5f8fad7 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -207,8 +207,12 @@ impl CCSShape { assert_eq!(W.W.len(), self.num_vars); assert_eq!(U.X.len(), self.num_io); - // FIXME: Think this is wrong? With a SparseMatrix representation of M we need to do something different here - let m = self.M[0].0.len(); + // NOTE: All matricies have the same number of rows, but in a SparseMatrix we need to check all of them + // Can probably be made more efficient by keeping track fo n_rows/n_cols at creation/insert time + let m = self + .M + .iter() + .fold(0, |acc, matrix| max(acc, matrix.n_rows())); // Sage code to check CCS relation: // From d1e63a0a2c2e78473769e395db39899c3d0d45bc Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 7 Jun 2023 12:19:39 +0800 Subject: [PATCH 023/100] feat(ccs): Add m, n, s and s_prime to CCS struct --- src/ccs.rs | 47 +++++++++++++++++++++++++++++++++++++++++++++- src/spartan/mod.rs | 4 ---- 2 files changed, 46 insertions(+), 5 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index ee5f8fad7..60ee0419c 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -3,6 +3,7 @@ #![allow(dead_code)] #![allow(clippy::type_complexity)] +use crate::spartan::math::Math; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, @@ -36,22 +37,32 @@ pub struct CCS { _p: PhantomData, } -// NOTE: Currently m, n are implicit, could possibly infer from M /// A type that holds the shape of a CCS instance /// Unlike R1CS we have a list of matrices M instead of only A, B, C /// We also have t, q, d constants and c (vector), S (set) +/// As well as m, n, s, s_prime #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] pub struct CCSShape { pub(crate) num_cons: usize, pub(crate) num_vars: usize, pub(crate) num_io: usize, + pub(crate) M: Vec>, pub(crate) t: usize, pub(crate) q: usize, pub(crate) d: usize, pub(crate) S: Vec>, pub(crate) c: Vec, + + // m is the number of columns in M_i + pub(crate) m: usize, + // n is the number of rows in M_i + pub(crate) n: usize, + // s = log m + pub(crate) s: usize, + // s_prime = log n + pub(crate) s_prime: usize, } /// A type that holds a witness for a given CCS instance @@ -177,6 +188,14 @@ impl CCSShape { // We collect the matrixes. let M: Vec> = M.iter().map(|m| SparseMatrix::from(m)).collect(); + // NOTE: All matricies have the same number of rows, but in a SparseMatrix we need to check all of them + // Can probably be made more efficient by keeping track fo n_rows/n_cols at creation/insert time + let m = M.iter().fold(0, |acc, matrix| max(acc, matrix.n_rows())); + let n = M.iter().fold(0, |acc, matrix| max(acc, matrix.n_cols())); + + let s = m.log_2() as usize; + let s_prime = n.log_2() as usize; + let shape = CCSShape { num_cons, num_vars, @@ -187,6 +206,10 @@ impl CCSShape { d, S, c, + m, + n, + s, + s_prime, }; Ok(shape) @@ -267,6 +290,19 @@ impl CCSShape { const C0: i32 = 1; const C1: i32 = -1; + // NOTE: All matricies have the same number of rows, but in a SparseMatrix we need to check all of them + // TODO: Consider using SparseMatrix type in R1CSShape too + // XXX: This can probably be made a lot better + let A: SparseMatrix = r1cs.A.clone().into(); + let B: SparseMatrix = r1cs.B.clone().into(); + let C: SparseMatrix = r1cs.C.clone().into(); + + let m = max(A.n_rows(), max(B.n_rows(), C.n_rows())); + let n = max(A.n_cols(), max(B.n_cols(), C.n_cols())); + + let s = m.log_2() as usize; + let s_prime = n.log_2() as usize; + Self { num_cons: r1cs.num_cons, num_vars: r1cs.num_vars, @@ -277,12 +313,17 @@ impl CCSShape { d: D, S: vec![S1.to_vec(), S2.to_vec()], c: vec![C0 as usize, C1 as usize], + m: m, + n: n, + s: s, + s_prime: s_prime, } } /// Pads the R1CSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&mut self) { + // XXX: Is this definitely always the same as m number of rows? // equalize the number of variables and constraints let m = max(self.num_vars, self.num_cons).next_power_of_two(); @@ -304,6 +345,10 @@ impl CCSShape { d: self.d, S: self.S.clone(), c: self.c.clone(), + m: self.m, + n: self.n, + s: self.s, + s_prime: self.s_prime, }; } diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index a8ef0223c..5edbddf01 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -1,9 +1,5 @@ //! This module implements RelaxedR1CSSNARKTrait using Spartan that is generic //! over the polynomial commitment and evaluation argument (i.e., a PCS) -//! We provide two implementations, one in snark.rs (which does not use any preprocessing) -//! and another in ppsnark.rs (which uses preprocessing to keep the verifier's state small if the PCS scheme provides a succinct verifier) -//! We also provide direct.rs that allows proving a step circuit directly with either of the two SNARKs. -pub mod direct; mod math; pub(crate) mod polynomial; pub mod ppsnark; From 0192d6362c4e75df4ec338574855b102d0bbba68 Mon Sep 17 00:00:00 2001 From: oskarth Date: Mon, 12 Jun 2023 18:08:43 +0800 Subject: [PATCH 024/100] WIP CCCS --- src/ccs.rs | 25 ++++++++++++++++++++++++- 1 file changed, 24 insertions(+), 1 deletion(-) diff --git a/src/ccs.rs b/src/ccs.rs index 60ee0419c..3916f8ba6 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -72,7 +72,7 @@ pub struct CCSWitness { W: Vec, } -// TODO: Make sure this is in the right form for committed CCS using MLE, possibly a separate type? +// TODO: Make sure this is in the right form for committed CCS using MLE, possibly a separate type /// A type that holds an CCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] @@ -85,6 +85,29 @@ pub struct CCSInstance { pub(crate) X: Vec, } + +/// A type that holds the shape of a Committed CCS (CCCS) instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CCCSShape { + // Sequence of sparse MLE polynomials in s+s' variables M_MLE1, ..., M_MLEt + // TODO This should be MLE + // XXX Here atm - look at other example see how it is + pub(crate) M_MLE: Vec>, + + + // XXX Embed CCS directly here or do a flat structure? + // pub(crate) ccs: CCS, + + // q multisets S (same as CCS) + // q constants c (same as CCS) + +} + +/// CCCS Instance is (C, x) +/// CCCS Witness is w _mle + + // NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` /// A type that holds a CCCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] From 49455f8268628c1a885fe23fd8996a97b2230505 Mon Sep 17 00:00:00 2001 From: oskarth Date: Tue, 13 Jun 2023 13:26:41 +0800 Subject: [PATCH 025/100] WIP: CCS MLE stuff --- src/ccs.rs | 49 ++++++++++++++++++++++++++++++++++++++++--------- src/utils.rs | 49 +++++++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 89 insertions(+), 9 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 3916f8ba6..b2a7f30ba 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -4,6 +4,7 @@ #![allow(clippy::type_complexity)] use crate::spartan::math::Math; +use crate::spartan::polynomial::MultilinearPolynomial; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, @@ -85,29 +86,24 @@ pub struct CCSInstance { pub(crate) X: Vec, } - /// A type that holds the shape of a Committed CCS (CCCS) instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] -pub struct CCCSShape { +pub struct CCCSShape { // Sequence of sparse MLE polynomials in s+s' variables M_MLE1, ..., M_MLEt - // TODO This should be MLE - // XXX Here atm - look at other example see how it is + // TODO This should be MLE, but possible we don't have to keep in struct? + // Exists in paper but not multifolding-poc pub(crate) M_MLE: Vec>, - // XXX Embed CCS directly here or do a flat structure? - // pub(crate) ccs: CCS, - + pub(crate) ccs: CCSShape, // q multisets S (same as CCS) // q constants c (same as CCS) - } /// CCCS Instance is (C, x) /// CCCS Witness is w _mle - // NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` /// A type that holds a CCCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -429,6 +425,41 @@ impl CCSInstance { } } +use std::fmt::Debug; + +impl CCSShape { + pub fn multiply_matrices(&self, z: &Vec) -> Result>, NovaError> { + let mut Mz: Vec> = Vec::new(); + for matrix in &self.M { + let product = matrix_vector_product_sparse(matrix, z)?; + Mz.push(product); + } + Ok(Mz) + } +} + +impl CCCSShape { + // TODO: compute_g but based on MLE in `pp.rs` + // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) + // polynomial over x + //pub fn compute_q(&self, z: &Vec) -> VirtualPolynomial {} + + pub fn compute_q(&self, z: &Vec) -> MultilinearPolynomial { + // XXX: Do we need to instrument this to use s_prime as n_vars somehow? + let z_mle = MultilinearPolynomial::new(z.clone()); + assert_eq!(z_mle.get_num_vars(), self.ccs.s_prime); + + // Use matrix_vector_product_sparse to multiple M_i with z + // util Use sparse_matrix_to_mlp; + + // Similar logic in Spartan + // let (mut Az, mut Bz, mut Cz) = pk.S.multiply_vec(&z)?; + //poly_Az: MultilinearPolynomial::new(Az.clone()), + + return MultilinearPolynomial::new(vec![]); + } +} + #[cfg(test)] pub mod test { use super::*; diff --git a/src/utils.rs b/src/utils.rs index 685c21c07..f4fb6ad47 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,5 +1,6 @@ //! Basic utils use crate::errors::NovaError; +use crate::spartan::polynomial::MultilinearPolynomial; use crate::traits::Group; use ff::{Field, PrimeField}; use serde::{Deserialize, Serialize}; @@ -141,6 +142,25 @@ pub fn to_F_matrix_sparse(m: Vec<(usize, usize, u64)>) -> Vec<(us m.iter().map(|x| (x.0, x.1, F::from(x.2))).collect() } +fn sparse_matrix_to_mlp(matrix: &SparseMatrix) -> MultilinearPolynomial { + let n_rows = matrix.n_rows(); + let n_cols = matrix.n_cols(); + + let n_vars: usize = (n_rows + n_cols).next_power_of_two().trailing_zeros() as usize; + + // Create a vector of zeros with size 2^n_vars + let mut vec: Vec = vec![G::Scalar::ZERO; 2_usize.pow(n_vars as u32)]; + + // Assign non-zero entries from the sparse matrix to the vector + for &(i, j, val) in matrix.0.iter() { + let index = i * n_cols + j; // Convert (i, j) into an index for a flat vector + vec[index] = val; + } + + // Convert this vector into a MultilinearPolynomial + MultilinearPolynomial::new(vec) +} + #[cfg(test)] mod tests { use super::*; @@ -226,4 +246,33 @@ mod tests { let A: SparseMatrix = to_F_matrix_sparse::(matrix).into(); assert_eq!(A.n_cols(), 3); } + + // FIXME: Currently fails with thread 'utils::tests::test_sparse_matrix_to_mlp' panicked at 'index out of bounds: the len is 8 but the index is 8', src/utils.rs:157:5 + #[test] + fn test_sparse_matrix_to_mlp() { + let matrix = vec![ + (0, 0, 2), + (0, 1, 3), + (0, 2, 4), + (0, 3, 4), + (1, 0, 4), + (1, 1, 11), + (1, 2, 14), + (1, 3, 14), + (2, 0, 2), + (2, 1, 8), + (2, 2, 17), + (2, 3, 17), + (3, 0, 420), + (3, 1, 4), + (3, 2, 2), + ]; + let A: SparseMatrix = to_F_matrix_sparse::(matrix).into(); + + // Convert the sparse matrix to a multilinear polynomial + let mlp = sparse_matrix_to_mlp(&A); + + // A 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals + assert_eq!(mlp.len(), 16); + } } From 1a9244e77f87d97fcb15802e30a13123deda7bca Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 14 Jun 2023 12:15:23 +0800 Subject: [PATCH 026/100] fix(utils): fix sparse_matrix_to_mlp test --- src/utils.rs | 21 +++++++++++++++++---- 1 file changed, 17 insertions(+), 4 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index f4fb6ad47..67bb61627 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -142,11 +142,16 @@ pub fn to_F_matrix_sparse(m: Vec<(usize, usize, u64)>) -> Vec<(us m.iter().map(|x| (x.0, x.1, F::from(x.2))).collect() } -fn sparse_matrix_to_mlp(matrix: &SparseMatrix) -> MultilinearPolynomial { +pub fn sparse_matrix_to_mlp( + matrix: &SparseMatrix, +) -> MultilinearPolynomial { let n_rows = matrix.n_rows(); let n_cols = matrix.n_cols(); - let n_vars: usize = (n_rows + n_cols).next_power_of_two().trailing_zeros() as usize; + // Since n_rows and n_cols already account for 0 indexing, + // The total number of elements would be n_rows * n_cols + let total_elements: usize = n_rows * n_cols; + let n_vars: usize = total_elements.next_power_of_two().trailing_zeros() as usize; // Create a vector of zeros with size 2^n_vars let mut vec: Vec = vec![G::Scalar::ZERO; 2_usize.pow(n_vars as u32)]; @@ -157,8 +162,17 @@ fn sparse_matrix_to_mlp(matrix: &SparseMatrix) -> MultilinearPolyno vec[index] = val; } + // Pad to 2^n_vars + let vec_padded: Vec = [ + vec.clone(), + std::iter::repeat(G::Scalar::ZERO) + .take((1 << n_vars) - vec.len()) + .collect(), + ] + .concat(); + // Convert this vector into a MultilinearPolynomial - MultilinearPolynomial::new(vec) + MultilinearPolynomial::new(vec_padded) } #[cfg(test)] @@ -247,7 +261,6 @@ mod tests { assert_eq!(A.n_cols(), 3); } - // FIXME: Currently fails with thread 'utils::tests::test_sparse_matrix_to_mlp' panicked at 'index out of bounds: the len is 8 but the index is 8', src/utils.rs:157:5 #[test] fn test_sparse_matrix_to_mlp() { let matrix = vec![ From 93fc0619fe04dd0ab41ecc30db26ca8b4cce882f Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 14 Jun 2023 13:39:42 +0800 Subject: [PATCH 027/100] refactor(ccs): change c to be Scalar Makes some operations easier --- src/ccs.rs | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index b2a7f30ba..51d06486d 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -54,7 +54,9 @@ pub struct CCSShape { pub(crate) q: usize, pub(crate) d: usize, pub(crate) S: Vec>, - pub(crate) c: Vec, + + // Was: usize + pub(crate) c: Vec, // m is the number of columns in M_i pub(crate) m: usize, @@ -163,7 +165,7 @@ impl CCSShape { q: usize, d: usize, S: Vec>, - c: Vec, + c: Vec, ) -> Result, NovaError> { let is_valid = |num_cons: usize, num_vars: usize, @@ -280,9 +282,7 @@ impl CCSShape { hadamard_output = hadamard_product(&hadamard_output, &mvp)?; } - // XXX: Problem if c[i] is F? - let civ = G::Scalar::from(self.c[i] as u64); - let vep = vector_elem_product(&hadamard_output, &civ)?; + let vep = vector_elem_product(&hadamard_output, &self.c[i])?; r = vector_add(&r, &vep)?; } @@ -306,8 +306,9 @@ impl CCSShape { const D: usize = 2; const S1: [usize; 2] = [0, 1]; const S2: [usize; 1] = [2]; - const C0: i32 = 1; - const C1: i32 = -1; + + let C0 = G::Scalar::ONE; + let C1 = -G::Scalar::ONE; // NOTE: All matricies have the same number of rows, but in a SparseMatrix we need to check all of them // TODO: Consider using SparseMatrix type in R1CSShape too @@ -331,7 +332,7 @@ impl CCSShape { q: Q, d: D, S: vec![S1.to_vec(), S2.to_vec()], - c: vec![C0 as usize, C1 as usize], + c: vec![C0, C1], m: m, n: n, s: s, From dbc0c2278fe29ea350cb8b2c8048d8ad0b27bcae Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 14 Jun 2023 13:56:12 +0800 Subject: [PATCH 028/100] feat(polynomial): Add convenience functions for MLP --- src/spartan/polynomial.rs | 79 ++++++++++++++++++++++++++++++++++++++- 1 file changed, 78 insertions(+), 1 deletion(-) diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 719db145a..15908f182 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -137,6 +137,47 @@ impl MultilinearPolynomial { .map(|(a, b)| a * b) .reduce(|| Scalar::ZERO, |x, y| x + y) } + + // Adds another multilinear polynomial to `self`. + // Assumes the two polynomials have the same number of variables. + pub fn add(&self, other: &Self) -> Result { + if self.get_num_vars() != other.get_num_vars() { + return Err("The two polynomials must have the same number of variables"); + } + + let sum = self + .Z + .par_iter() + .zip(&other.Z) + .map(|(a, b)| *a + b) + .collect(); + + Ok(MultilinearPolynomial::new(sum)) + } + + // Multiplies `self` by a scalar. + pub fn scalar_mul(&self, scalar: &Scalar) -> Self { + let mut new_poly = self.clone(); + for z in &mut new_poly.Z { + *z *= scalar; + } + new_poly + } + + // Multiplies `self` by another multilinear polynomial. + // Assumes the two polynomials have the same number of variables. + pub fn mul(&self, other: &Self) -> Result { + if self.num_vars != other.num_vars { + return Err("The two polynomials must have the same number of variables"); + } + let product = self + .Z + .par_iter() + .zip(&other.Z) + .map(|(a, b)| *a * *b) + .collect(); + Ok(Self::new(product)) + } } impl Index for MultilinearPolynomial { @@ -198,6 +239,13 @@ mod tests { use super::*; use pasta_curves::Fp; + fn make_mlp(len: usize, value: Fp) -> MultilinearPolynomial { + MultilinearPolynomial { + num_vars: len.count_ones() as usize, + Z: vec![value; len], + } +} + #[test] fn test_eq_polynomial() { let eq_poly = EqPolynomial::::new(vec![Fp::one(), Fp::zero(), Fp::one()]); @@ -259,4 +307,33 @@ mod tests { let x = vec![Fp::one(), Fp::zero(), Fp::one()]; assert_eq!(m_poly.evaluate(x.as_slice()), Fp::one()); } -} + + #[test] + fn test_mlp_add() { + let mlp1 = make_mlp(4, Fp::from(3)); + let mlp2 = make_mlp(4, Fp::from(7)); + + let mlp3 = mlp1.add(&mlp2).unwrap(); + + assert_eq!(mlp3.Z, vec![Fp::from(10); 4]); + } + + #[test] + fn test_mlp_scalar_mul() { + let mlp = make_mlp(4, Fp::from(3)); + + let mlp2 = mlp.scalar_mul(&Fp::from(2)); + + assert_eq!(mlp2.Z, vec![Fp::from(6); 4]); + } + + #[test] + fn test_mlp_mul() { + let mlp1 = make_mlp(4, Fp::from(3)); + let mlp2 = make_mlp(4, Fp::from(7)); + + let mlp3 = mlp1.mul(&mlp2).unwrap(); + + assert_eq!(mlp3.Z, vec![Fp::from(21); 4]); + } +} \ No newline at end of file From 48c1ac9056d603ca168fb6013c63228f3334a300 Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 14 Jun 2023 13:56:34 +0800 Subject: [PATCH 029/100] wip(ccs): compute_g function --- src/ccs.rs | 40 +++++++++++++++++++++++++++++++--------- src/utils.rs | 8 ++++++++ 2 files changed, 39 insertions(+), 9 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 51d06486d..68d296ad9 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -440,24 +440,46 @@ impl CCSShape { } impl CCCSShape { - // TODO: compute_g but based on MLE in `pp.rs` + // XXX: Take below and util functions with a grain of salt, need to sanity check + // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) // polynomial over x - //pub fn compute_q(&self, z: &Vec) -> VirtualPolynomial {} - - pub fn compute_q(&self, z: &Vec) -> MultilinearPolynomial { + pub fn compute_q( + &self, + z: &Vec, + ) -> Result, &'static str> { // XXX: Do we need to instrument this to use s_prime as n_vars somehow? let z_mle = MultilinearPolynomial::new(z.clone()); - assert_eq!(z_mle.get_num_vars(), self.ccs.s_prime); + if z_mle.get_num_vars() != self.ccs.s_prime { + return Err("z_mle number of variables does not match ccs.s_prime"); + } + let mut q = MultilinearPolynomial::new(vec![G::Scalar::ZERO; self.ccs.s]); - // Use matrix_vector_product_sparse to multiple M_i with z - // util Use sparse_matrix_to_mlp; + for i in 0..self.ccs.q { + let mut prod = MultilinearPolynomial::new(vec![G::Scalar::ONE; self.ccs.s]); + + for j in &self.ccs.S[i] { + let M_j = sparse_matrix_to_mlp(&self.ccs.M[*j]); + + // TODO: We need to implement this function + let sum_Mz = compute_sum_Mz::(M_j, &z_mle)?; + + // Fold this sum into the running product + prod = prod.mul(&sum_Mz)?; + } + + // Multiply the product by the coefficient c_i + prod = prod.scalar_mul(&self.ccs.c[i]); + + // Add it to the running sum + q = q.add(&prod)?; + } + + Ok(q) // Similar logic in Spartan // let (mut Az, mut Bz, mut Cz) = pk.S.multiply_vec(&z)?; //poly_Az: MultilinearPolynomial::new(Az.clone()), - - return MultilinearPolynomial::new(vec![]); } } diff --git a/src/utils.rs b/src/utils.rs index 67bb61627..70e6cc14f 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -175,6 +175,14 @@ pub fn sparse_matrix_to_mlp( MultilinearPolynomial::new(vec_padded) } +// FIXME: Dummy function +pub fn compute_sum_Mz( + _M_j: MultilinearPolynomial, + _z_mle: &Z, +) -> Result::Scalar>, &'static str> { + Ok(MultilinearPolynomial::new(vec![])) +} + #[cfg(test)] mod tests { use super::*; From 0a473b054f3a222ec608198f45d9ae8b28d1244e Mon Sep 17 00:00:00 2001 From: oskarth Date: Thu, 15 Jun 2023 12:48:38 +0800 Subject: [PATCH 030/100] refactor: use shorthand syntax for add and mul operations --- src/ccs.rs | 5 +- src/spartan/polynomial.rs | 105 +++++++++++++++++++++----------------- 2 files changed, 61 insertions(+), 49 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 68d296ad9..15749532f 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -27,6 +27,7 @@ use itertools::concat; use rayon::prelude::*; use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; +use std::ops::{Add, Mul}; // TODO: Committed CCS using MLE (see src/spartan/pp.rs) // TODO: Linearized CCS struct and methods, separate struct similar to RelaxedR1CS @@ -465,14 +466,14 @@ impl CCCSShape { let sum_Mz = compute_sum_Mz::(M_j, &z_mle)?; // Fold this sum into the running product - prod = prod.mul(&sum_Mz)?; + prod = prod.mul(sum_Mz)?; } // Multiply the product by the coefficient c_i prod = prod.scalar_mul(&self.ccs.c[i]); // Add it to the running sum - q = q.add(&prod)?; + q = q.add(prod)?; } Ok(q) diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 15908f182..42b1eb442 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -3,6 +3,7 @@ use core::ops::Index; use ff::PrimeField; use rayon::prelude::*; use serde::{Deserialize, Serialize}; +use std::ops::{Add, Mul}; use crate::spartan::math::Math; @@ -138,23 +139,6 @@ impl MultilinearPolynomial { .reduce(|| Scalar::ZERO, |x, y| x + y) } - // Adds another multilinear polynomial to `self`. - // Assumes the two polynomials have the same number of variables. - pub fn add(&self, other: &Self) -> Result { - if self.get_num_vars() != other.get_num_vars() { - return Err("The two polynomials must have the same number of variables"); - } - - let sum = self - .Z - .par_iter() - .zip(&other.Z) - .map(|(a, b)| *a + b) - .collect(); - - Ok(MultilinearPolynomial::new(sum)) - } - // Multiplies `self` by a scalar. pub fn scalar_mul(&self, scalar: &Scalar) -> Self { let mut new_poly = self.clone(); @@ -163,21 +147,6 @@ impl MultilinearPolynomial { } new_poly } - - // Multiplies `self` by another multilinear polynomial. - // Assumes the two polynomials have the same number of variables. - pub fn mul(&self, other: &Self) -> Result { - if self.num_vars != other.num_vars { - return Err("The two polynomials must have the same number of variables"); - } - let product = self - .Z - .par_iter() - .zip(&other.Z) - .map(|(a, b)| *a * *b) - .collect(); - Ok(Self::new(product)) - } } impl Index for MultilinearPolynomial { @@ -234,6 +203,48 @@ impl SparsePolynomial { } } +/// Adds another multilinear polynomial to `self`. +/// Assumes the two polynomials have the same number of variables. +impl Add for MultilinearPolynomial { + type Output = Result; + + fn add(self, other: Self) -> Self::Output { + if self.get_num_vars() != other.get_num_vars() { + return Err("The two polynomials must have the same number of variables"); + } + + let sum: Vec = self + .Z + .iter() + .zip(other.Z.iter()) + .map(|(a, b)| *a + *b) + .collect(); + + Ok(MultilinearPolynomial::new(sum)) + } +} + +/// Multiplies `self` by another multilinear polynomial. +/// Assumes the two polynomials have the same number of variables. +impl Mul for MultilinearPolynomial { + type Output = Result; + + fn mul(self, other: Self) -> Self::Output { + if self.get_num_vars() != other.get_num_vars() { + return Err("The two polynomials must have the same number of variables"); + } + + let product: Vec = self + .Z + .iter() + .zip(other.Z.iter()) + .map(|(a, b)| *a * *b) + .collect(); + + Ok(MultilinearPolynomial::new(product)) + } +} + #[cfg(test)] mod tests { use super::*; @@ -241,10 +252,10 @@ mod tests { fn make_mlp(len: usize, value: Fp) -> MultilinearPolynomial { MultilinearPolynomial { - num_vars: len.count_ones() as usize, - Z: vec![value; len], + num_vars: len.count_ones() as usize, + Z: vec![value; len], } -} + } #[test] fn test_eq_polynomial() { @@ -310,30 +321,30 @@ mod tests { #[test] fn test_mlp_add() { - let mlp1 = make_mlp(4, Fp::from(3)); - let mlp2 = make_mlp(4, Fp::from(7)); + let mlp1 = make_mlp(4, Fp::from(3)); + let mlp2 = make_mlp(4, Fp::from(7)); - let mlp3 = mlp1.add(&mlp2).unwrap(); + let mlp3 = mlp1.add(mlp2).unwrap(); - assert_eq!(mlp3.Z, vec![Fp::from(10); 4]); + assert_eq!(mlp3.Z, vec![Fp::from(10); 4]); } #[test] fn test_mlp_scalar_mul() { - let mlp = make_mlp(4, Fp::from(3)); + let mlp = make_mlp(4, Fp::from(3)); - let mlp2 = mlp.scalar_mul(&Fp::from(2)); + let mlp2 = mlp.scalar_mul(&Fp::from(2)); - assert_eq!(mlp2.Z, vec![Fp::from(6); 4]); + assert_eq!(mlp2.Z, vec![Fp::from(6); 4]); } #[test] fn test_mlp_mul() { - let mlp1 = make_mlp(4, Fp::from(3)); - let mlp2 = make_mlp(4, Fp::from(7)); + let mlp1 = make_mlp(4, Fp::from(3)); + let mlp2 = make_mlp(4, Fp::from(7)); - let mlp3 = mlp1.mul(&mlp2).unwrap(); + let mlp3 = mlp1.mul(mlp2).unwrap(); - assert_eq!(mlp3.Z, vec![Fp::from(21); 4]); + assert_eq!(mlp3.Z, vec![Fp::from(21); 4]); } -} \ No newline at end of file +} From e40474276685c374c9c3a5d6712baad821e78f97 Mon Sep 17 00:00:00 2001 From: oskarth Date: Thu, 15 Jun 2023 13:37:21 +0800 Subject: [PATCH 031/100] feat: Add BooleanHypercube to evaluate multilinear polynomial at given point --- src/spartan/polynomial.rs | 86 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 86 insertions(+) diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 42b1eb442..015755466 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -245,6 +245,68 @@ impl Mul for MultilinearPolynomial { } } +#[derive(Debug)] +pub struct BooleanHypercube { + dimensions: usize, + current: u64, + max: u64, + coefficients: Vec, +} + +impl BooleanHypercube { + pub fn new(dimensions: usize, coefficients: Vec) -> Self { + assert!(coefficients.len() == 2_usize.pow(dimensions as u32)); + + BooleanHypercube { + dimensions, + current: 0, + max: 2_u32.pow(dimensions as u32) as u64, + coefficients, + } + } + + // Evaluate the multilinear polynomial at the given point + pub fn evaluate(&self, point: &[Scalar]) -> Scalar { + assert!(point.len() == self.dimensions); + + let mut result = Scalar::ZERO; + + for i in 0..self.max as usize { + let monomial = self.monomial(i, point); + result = result + self.coefficients[i] * monomial; + } + + result + } + + // This calculates a single monomial of the multilinear polynomial + fn monomial(&self, i: usize, point: &[Scalar]) -> Scalar { + assert!(i < self.max as usize); + let mut result = Scalar::ONE; + + let bits = bit_decompose(i as u64, self.dimensions); + + for j in 0..self.dimensions { + if bits[j] { + result = result * point[j]; + } + } + + result + } +} + +/// Decompose an integer into a binary vector in little endian. +pub fn bit_decompose(input: u64, num_var: usize) -> Vec { + let mut res = Vec::with_capacity(num_var); + let mut i = input; + for _ in 0..num_var { + res.push(i & 1 == 1); + i >>= 1; + } + res +} + #[cfg(test)] mod tests { use super::*; @@ -347,4 +409,28 @@ mod tests { assert_eq!(mlp3.Z, vec![Fp::from(21); 4]); } + + #[test] + fn test_evaluate() { + // Declare the coefficients in the order 1, x, y, xy, z, xz, yz, xyz. + let poly = BooleanHypercube::::new( + 3, + vec![ + Fp::from(0u64), + Fp::from(4u64), + Fp::from(2u64), + Fp::from(0u64), + Fp::from(1u64), + Fp::from(0u64), + Fp::from(0u64), + Fp::from(0u64), + ], + ); + + let point = vec![Fp::from(1u64), Fp::from(1u64), Fp::from(1u64)]; + + // The polynomial would be f(x, y, z) = 4x + 2y + z. + // So, f(1, 1, 1) = 4*1 + 2*1 + 1 = 7. + assert_eq!(poly.evaluate(&point), Fp::from(7u64)); + } } From 987359b3940d9263fd6ef1b2b711c30551b28939 Mon Sep 17 00:00:00 2001 From: oskarth Date: Thu, 15 Jun 2023 13:47:09 +0800 Subject: [PATCH 032/100] refactor: Move Boolean hypercube to separate module --- src/hypercube.rs | 94 +++++++++++++++++++++++++++++++++++++++ src/lib.rs | 2 + src/spartan/polynomial.rs | 86 ----------------------------------- 3 files changed, 96 insertions(+), 86 deletions(-) create mode 100644 src/hypercube.rs diff --git a/src/hypercube.rs b/src/hypercube.rs new file mode 100644 index 000000000..12a50fbd6 --- /dev/null +++ b/src/hypercube.rs @@ -0,0 +1,94 @@ +//! This module defines basic types related to Boolean hypercubes. +/// There's some overlap with polynomial.rs. +use ff::PrimeField; + +#[derive(Debug)] +pub struct BooleanHypercube { + dimensions: usize, + current: u64, + max: u64, + coefficients: Vec, +} + +impl BooleanHypercube { + pub fn new(dimensions: usize, coefficients: Vec) -> Self { + assert!(coefficients.len() == 2_usize.pow(dimensions as u32)); + + BooleanHypercube { + dimensions, + current: 0, + max: 2_u32.pow(dimensions as u32) as u64, + coefficients, + } + } + + // Evaluate the multilinear polynomial at the given point + pub fn evaluate(&self, point: &[Scalar]) -> Scalar { + assert!(point.len() == self.dimensions); + + let mut result = Scalar::ZERO; + + for i in 0..self.max as usize { + let monomial = self.monomial(i, point); + result = result + self.coefficients[i] * monomial; + } + + result + } + + // This calculates a single monomial of the multilinear polynomial + fn monomial(&self, i: usize, point: &[Scalar]) -> Scalar { + assert!(i < self.max as usize); + let mut result = Scalar::ONE; + + let bits = bit_decompose(i as u64, self.dimensions); + + for j in 0..self.dimensions { + if bits[j] { + result = result * point[j]; + } + } + + result + } +} + +/// Decompose an integer into a binary vector in little endian. +pub fn bit_decompose(input: u64, num_var: usize) -> Vec { + let mut res = Vec::with_capacity(num_var); + let mut i = input; + for _ in 0..num_var { + res.push(i & 1 == 1); + i >>= 1; + } + res +} + +mod tests { + use super::*; + use pasta_curves::Fp; + + #[test] + fn test_evaluate() { + // Declare the coefficients in the order 1, x, y, xy, z, xz, yz, xyz. + let poly = BooleanHypercube::::new( + 3, + vec![ + Fp::from(0u64), + Fp::from(4u64), + Fp::from(2u64), + Fp::from(0u64), + Fp::from(1u64), + Fp::from(0u64), + Fp::from(0u64), + Fp::from(0u64), + ], + ); + + let point = vec![Fp::from(1u64), Fp::from(1u64), Fp::from(1u64)]; + + // The polynomial would be f(x, y, z) = 4x + 2y + z. + // So, f(1, 1, 1) = 4*1 + 2*1 + 1 = 7. + assert_eq!(poly.evaluate(&point), Fp::from(7u64)); + } +} diff --git a/src/lib.rs b/src/lib.rs index 7e9e0404e..5eb178b9f 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -28,6 +28,8 @@ pub mod traits; #[cfg(feature = "hypernova")] mod ccs; #[cfg(feature = "hypernova")] +mod hypercube; +#[cfg(feature = "hypernova")] mod utils; use crate::bellperson::{ diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 015755466..42b1eb442 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -245,68 +245,6 @@ impl Mul for MultilinearPolynomial { } } -#[derive(Debug)] -pub struct BooleanHypercube { - dimensions: usize, - current: u64, - max: u64, - coefficients: Vec, -} - -impl BooleanHypercube { - pub fn new(dimensions: usize, coefficients: Vec) -> Self { - assert!(coefficients.len() == 2_usize.pow(dimensions as u32)); - - BooleanHypercube { - dimensions, - current: 0, - max: 2_u32.pow(dimensions as u32) as u64, - coefficients, - } - } - - // Evaluate the multilinear polynomial at the given point - pub fn evaluate(&self, point: &[Scalar]) -> Scalar { - assert!(point.len() == self.dimensions); - - let mut result = Scalar::ZERO; - - for i in 0..self.max as usize { - let monomial = self.monomial(i, point); - result = result + self.coefficients[i] * monomial; - } - - result - } - - // This calculates a single monomial of the multilinear polynomial - fn monomial(&self, i: usize, point: &[Scalar]) -> Scalar { - assert!(i < self.max as usize); - let mut result = Scalar::ONE; - - let bits = bit_decompose(i as u64, self.dimensions); - - for j in 0..self.dimensions { - if bits[j] { - result = result * point[j]; - } - } - - result - } -} - -/// Decompose an integer into a binary vector in little endian. -pub fn bit_decompose(input: u64, num_var: usize) -> Vec { - let mut res = Vec::with_capacity(num_var); - let mut i = input; - for _ in 0..num_var { - res.push(i & 1 == 1); - i >>= 1; - } - res -} - #[cfg(test)] mod tests { use super::*; @@ -409,28 +347,4 @@ mod tests { assert_eq!(mlp3.Z, vec![Fp::from(21); 4]); } - - #[test] - fn test_evaluate() { - // Declare the coefficients in the order 1, x, y, xy, z, xz, yz, xyz. - let poly = BooleanHypercube::::new( - 3, - vec![ - Fp::from(0u64), - Fp::from(4u64), - Fp::from(2u64), - Fp::from(0u64), - Fp::from(1u64), - Fp::from(0u64), - Fp::from(0u64), - Fp::from(0u64), - ], - ); - - let point = vec![Fp::from(1u64), Fp::from(1u64), Fp::from(1u64)]; - - // The polynomial would be f(x, y, z) = 4x + 2y + z. - // So, f(1, 1, 1) = 4*1 + 2*1 + 1 = 7. - assert_eq!(poly.evaluate(&point), Fp::from(7u64)); - } } From 5ec547a81af8db169f058674d51aeaa4f5d28ab1 Mon Sep 17 00:00:00 2001 From: oskarth Date: Thu, 15 Jun 2023 14:15:02 +0800 Subject: [PATCH 033/100] feat(ccs): compute_sum_Mz --- src/ccs.rs | 32 ++++++++++++++++++++++++++++++-- src/hypercube.rs | 21 +++++++++++++-------- src/utils.rs | 25 +++++++++++++++++++------ 3 files changed, 62 insertions(+), 16 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 15749532f..7955f340b 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -3,8 +3,10 @@ #![allow(dead_code)] #![allow(clippy::type_complexity)] +use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; use crate::spartan::polynomial::MultilinearPolynomial; +use crate::utils::bit_decompose; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, @@ -441,6 +443,33 @@ impl CCSShape { } impl CCCSShape { + // TODO: Sanity check this + pub fn compute_sum_Mz( + &self, + M_j: &MultilinearPolynomial, + z: &MultilinearPolynomial, + s_prime: usize, + ) -> MultilinearPolynomial { + assert_eq!(M_j.get_num_vars(), s_prime); + assert_eq!(z.get_num_vars(), s_prime); + + let num_vars = M_j.get_num_vars(); + let two_to_num_vars = (2_usize).pow(num_vars as u32); + let mut result_coefficients = Vec::with_capacity(two_to_num_vars); + + for i in 0..two_to_num_vars { + let r = bit_decompose(i as u64, num_vars) + .into_iter() + .map(|bit| G::Scalar::from(if bit { 1 } else { 0 })) + .collect::>(); + + let value = M_j.evaluate(&r) * z.evaluate(&r); + result_coefficients.push(value); + } + + MultilinearPolynomial::new(result_coefficients) + } + // XXX: Take below and util functions with a grain of salt, need to sanity check // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) @@ -462,8 +491,7 @@ impl CCCSShape { for j in &self.ccs.S[i] { let M_j = sparse_matrix_to_mlp(&self.ccs.M[*j]); - // TODO: We need to implement this function - let sum_Mz = compute_sum_Mz::(M_j, &z_mle)?; + let sum_Mz = self.compute_sum_Mz(&M_j, &z_mle, self.ccs.s_prime); // Fold this sum into the running product prod = prod.mul(sum_Mz)?; diff --git a/src/hypercube.rs b/src/hypercube.rs index 12a50fbd6..9d8bfc093 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -1,4 +1,5 @@ //! This module defines basic types related to Boolean hypercubes. +use crate::utils::*; /// There's some overlap with polynomial.rs. use ff::PrimeField; @@ -53,15 +54,19 @@ impl BooleanHypercube { } } -/// Decompose an integer into a binary vector in little endian. -pub fn bit_decompose(input: u64, num_var: usize) -> Vec { - let mut res = Vec::with_capacity(num_var); - let mut i = input; - for _ in 0..num_var { - res.push(i & 1 == 1); - i >>= 1; +impl Iterator for BooleanHypercube { + type Item = Vec; + + fn next(&mut self) -> Option { + if self.current > self.max { + None + } else { + let bits = bit_decompose(self.current, self.dimensions); + let point: Vec = bits.iter().map(|&bit| Scalar::from(bit as u64)).collect(); + self.current += 1; + Some(point) + } } - res } mod tests { diff --git a/src/utils.rs b/src/utils.rs index 70e6cc14f..db4d02c7c 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -175,12 +175,25 @@ pub fn sparse_matrix_to_mlp( MultilinearPolynomial::new(vec_padded) } -// FIXME: Dummy function -pub fn compute_sum_Mz( - _M_j: MultilinearPolynomial, - _z_mle: &Z, -) -> Result::Scalar>, &'static str> { - Ok(MultilinearPolynomial::new(vec![])) +pub fn bit_to_index(bits: &[bool]) -> usize { + let mut index = 0; + for (i, &bit) in bits.iter().enumerate() { + if bit { + index += 1 << i; + } + } + index +} + +/// Decompose an integer into a binary vector in little endian. +pub fn bit_decompose(input: u64, num_var: usize) -> Vec { + let mut res = Vec::with_capacity(num_var); + let mut i = input; + for _ in 0..num_var { + res.push(i & 1 == 1); + i >>= 1; + } + res } #[cfg(test)] From ab70853b4cb5dcb352ab18fe655852d264f44d80 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 09:49:19 +0200 Subject: [PATCH 034/100] fix: Correct SparseMatrix aux methods This updates and makes compile the `n_rows`/`n_cols` fns of SparseMatrix. It also updates `is_valid` which is transfromed from a closure into an associated fn for SparseMatrix. --- src/utils.rs | 49 +++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 43 insertions(+), 6 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index db4d02c7c..ded855397 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -22,12 +22,50 @@ impl SparseMatrix { // Find the maximum row index in the matrix pub fn n_rows(&self) -> usize { - self.0.iter().map(|r| r.0).max().unwrap() + 1 + let max_row_idx = self + .0 + .iter() + .copied() + .map(|r| r.0) + .fold(std::usize::MIN, |a, b| a.max(b)); + max_row_idx + 1 } // Find the maximum column index in the matrix pub fn n_cols(&self) -> usize { - self.0.iter().map(|r| r.1).max().unwrap() + 1 + let max_col_idx = self + .0 + .iter() + .copied() + .map(|r| r.1) + .fold(std::usize::MIN, |a, b| a.max(b)); + max_col_idx + 1 + } + + pub(crate) fn is_valid( + &self, + num_cons: usize, + num_vars: usize, + num_io: usize, + ) -> Result<(), NovaError> { + let res = self + .0 + .iter() + .copied() + .map(|(row, col, _val)| { + if row >= num_cons || col > num_io + num_vars { + Err(NovaError::InvalidIndex) + } else { + Ok(()) + } + }) + .collect::, NovaError>>(); + + if res.is_err() { + Err(NovaError::InvalidIndex) + } else { + Ok(()) + } } } @@ -100,13 +138,12 @@ pub fn matrix_vector_product_sparse( return Err(NovaError::InvalidIndex); } - // Find the maximum row index in the matrix - let max_row = matrix.0.iter().map(|r| r.0).max().unwrap() + 1; - if max_row > vector.len() { + // Ensure we can perform the Matrix x Vec multiplication. + if matrix.n_rows() != vector.len() { return Err(NovaError::InvalidIndex); } - let mut res = vec![G::Scalar::ZERO; max_row]; + let mut res = vec![G::Scalar::ZERO; vector.len()]; for &(row, col, value) in matrix.0.iter() { res[row] += value * vector[col]; } From d0a5160cf4b083be49f845396a5dd967e93b21b2 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 10:11:02 +0200 Subject: [PATCH 035/100] fix: Update CCS-related structs & constructor This commit: - Removes attribute duplicity from `CCSShape`. - Fixes the reversed naming for col&row number. - Removes leftover comments that no longer apply. - Update the `CCSShape::new` to use `is_valid` on the new format as well as remove useless checks & update to new `CCSSape` format. --- src/ccs.rs | 96 ++++++++++++--------------------------- src/spartan/polynomial.rs | 2 +- 2 files changed, 30 insertions(+), 68 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 7955f340b..93aaa866a 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -49,11 +49,12 @@ pub struct CCS { #[serde(bound = "")] pub struct CCSShape { pub(crate) num_cons: usize, - pub(crate) num_vars: usize, - pub(crate) num_io: usize, pub(crate) M: Vec>, + // Num vars pub(crate) t: usize, + // Number of public witness + pub(crate) l: usize, pub(crate) q: usize, pub(crate) d: usize, pub(crate) S: Vec>, @@ -61,10 +62,10 @@ pub struct CCSShape { // Was: usize pub(crate) c: Vec, - // m is the number of columns in M_i - pub(crate) m: usize, - // n is the number of rows in M_i + // n is the number of columns in M_i pub(crate) n: usize, + // m is the number of rows in M_i + pub(crate) m: usize, // s = log m pub(crate) s: usize, // s_prime = log n @@ -78,7 +79,7 @@ pub struct CCSWitness { W: Vec, } -// TODO: Make sure this is in the right form for committed CCS using MLE, possibly a separate type +// XXX: Not sure this type is needed if we do have CCCSInstance and LCCCSInstance. /// A type that holds an CCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] @@ -96,20 +97,11 @@ pub struct CCSInstance { #[serde(bound = "")] pub struct CCCSShape { // Sequence of sparse MLE polynomials in s+s' variables M_MLE1, ..., M_MLEt - // TODO This should be MLE, but possible we don't have to keep in struct? - // Exists in paper but not multifolding-poc - pub(crate) M_MLE: Vec>, + pub(crate) M_MLE: Vec>, - // XXX Embed CCS directly here or do a flat structure? pub(crate) ccs: CCSShape, - // q multisets S (same as CCS) - // q constants c (same as CCS) } -/// CCCS Instance is (C, x) -/// CCCS Witness is w _mle - -// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` /// A type that holds a CCCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] @@ -121,6 +113,7 @@ pub struct CCCSInstance { pub(crate) X: Vec, } +// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` /// A type that holds a witness for a given CCCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct CCCSWitness { @@ -128,7 +121,6 @@ pub struct CCCSWitness { pub(crate) w_mle: Vec, } -// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` /// A type that holds a LCCCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] @@ -139,6 +131,7 @@ pub struct LCCCSInstance { pub(crate) v: Vec, } +// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` /// A type that holds a witness for a given LCCCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct LCCCSWitness { @@ -150,7 +143,7 @@ impl CCS { /// Samples public parameters for the specified number of constraints and variables in an CCS pub fn commitment_key(S: &CCSShape) -> CommitmentKey { let num_cons = S.num_cons; - let num_vars = S.num_vars; + let num_vars = S.t; let total_nz = S.M.iter().fold(0, |acc, m| acc + m.0.len()); G::CE::setup(b"ck", max(max(num_cons, num_vars), total_nz)) @@ -158,74 +151,45 @@ impl CCS { } impl CCSShape { - /// Create an object of type `CCSSShape` from the explicitly specified CCS matrices + /// Create an object of type `CCSShape` from the explicitly specified CCS matrices pub fn new( num_cons: usize, - num_vars: usize, - num_io: usize, - M: &[Vec<(usize, usize, G::Scalar)>], + M: &[SparseMatrix], t: usize, + l: usize, q: usize, d: usize, S: Vec>, c: Vec, ) -> Result, NovaError> { - let is_valid = |num_cons: usize, - num_vars: usize, - num_io: usize, - matrix: &[(usize, usize, G::Scalar)]| - -> Result<(), NovaError> { - let res = (0..matrix.len()) - .map(|i| { - let (row, col, _val) = matrix[i]; - if row >= num_cons || col > num_io + num_vars { - Err(NovaError::InvalidIndex) - } else { - Ok(()) - } - }) - .collect::, NovaError>>(); - - if res.is_err() { - Err(NovaError::InvalidIndex) - } else { - Ok(()) - } - }; + // Check matrix validity // Check that the row and column indexes are within the range of the number of constraints and variables - let res_M = M - .iter() - .map(|m| is_valid(num_cons, num_vars, num_io, m)) - .collect::, NovaError>>(); - - // If any of the matricies are invalid, return an error - if res_M.is_err() { - return Err(NovaError::InvalidIndex); - } + M.iter() + .map(|m| m.is_valid(num_cons, t, l)) + .collect::, NovaError>>()?; // We require the number of public inputs/outputs to be even - if num_io % 2 != 0 { + if l % 2 != 0 { return Err(NovaError::OddInputLength); } - // We collect the matrixes. - let M: Vec> = M.iter().map(|m| SparseMatrix::from(m)).collect(); - - // NOTE: All matricies have the same number of rows, but in a SparseMatrix we need to check all of them // Can probably be made more efficient by keeping track fo n_rows/n_cols at creation/insert time - let m = M.iter().fold(0, |acc, matrix| max(acc, matrix.n_rows())); - let n = M.iter().fold(0, |acc, matrix| max(acc, matrix.n_cols())); + let m = M + .iter() + .fold(usize::MIN, |acc, matrix| max(acc, matrix.n_rows())); + let n = M + .iter() + .fold(usize::MIN, |acc, matrix| max(acc, matrix.n_cols())); let s = m.log_2() as usize; let s_prime = n.log_2() as usize; - let shape = CCSShape { + Ok(CCSShape { num_cons, - num_vars, - num_io, - M, + M: M.to_vec(), t, + l, q, d, S, @@ -234,9 +198,7 @@ impl CCSShape { n, s, s_prime, - }; - - Ok(shape) + }) } // NOTE: Not using previous used multiply_vec (r1cs.rs), see utils.rs diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 42b1eb442..8f45c2549 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -77,7 +77,7 @@ impl EqPolynomial { /// $$ /// /// Vector $Z$ indicates $Z(e)$ where $e$ ranges from $0$ to $2^m-1$. -#[derive(Debug, Clone, Serialize, Deserialize)] +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct MultilinearPolynomial { num_vars: usize, // the number of variables in the multilinear polynomial Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs From fa0376edde2999eb52de5f64272230175befe1c9 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 14:33:14 +0200 Subject: [PATCH 036/100] change: Remove Result from Matrix/vec ops This removes the annoying results that were not needed from ops implementations and replaces them by leaving the "necessary" sanity-checks as assertions. --- src/utils.rs | 75 ++++++++++++++++++++++------------------------------ 1 file changed, 31 insertions(+), 44 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index ded855397..627902dde 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -81,41 +81,35 @@ impl From<&Vec<(usize, usize, G::Scalar)>> for SparseMatrix { } } -pub fn vector_add(a: &Vec, b: &Vec) -> Result, NovaError> { - if a.len() != b.len() { - return Err(NovaError::InvalidIndex); - } - +pub fn vector_add(a: &Vec, b: &Vec) -> Vec { + assert_eq!(a.len(), b.len(), "Vector addition with different lengths"); let mut res = Vec::with_capacity(a.len()); for i in 0..a.len() { res.push(a[i] + b[i]); } - Ok(res) + res } -pub fn vector_elem_product(a: &Vec, e: &F) -> Result, NovaError> { +pub fn vector_elem_product(a: &Vec, e: &F) -> Vec { let mut res = Vec::with_capacity(a.len()); for i in 0..a.len() { res.push(a[i] * e); } - Ok(res) + res } +// XXX: This could be implemented via Mul trait in the lib. We should consider as it will reduce imports. #[allow(dead_code)] -pub fn matrix_vector_product( - matrix: &Vec>, - vector: &Vec, -) -> Result, NovaError> { - if matrix.len() == 0 || matrix[0].len() == 0 { - return Err(NovaError::InvalidIndex); - } - - if matrix[0].len() != vector.len() { - return Err(NovaError::InvalidIndex); - } - +pub fn matrix_vector_product(matrix: &Vec>, vector: &Vec) -> Vec { + assert_ne!(matrix.len(), 0, "empty-row matrix"); + assert_ne!(matrix[0].len(), 0, "empty-col matrix"); + assert_eq!( + matrix[0].len(), + vector.len(), + "matrix rows != vector length" + ); let mut res = Vec::with_capacity(matrix.len()); for i in 0..matrix.len() { let mut sum = F::ZERO; @@ -125,43 +119,36 @@ pub fn matrix_vector_product( res.push(sum); } - Ok(res) + res } // Matrix vector product where matrix is sparse // First element is row index, second column, third value stored +// XXX: This could be implemented via Mul trait in the lib. We should consider as it will reduce imports. pub fn matrix_vector_product_sparse( matrix: &SparseMatrix, vector: &Vec, -) -> Result, NovaError> { - if matrix.0.len() == 0 { - return Err(NovaError::InvalidIndex); - } - - // Ensure we can perform the Matrix x Vec multiplication. - if matrix.n_rows() != vector.len() { - return Err(NovaError::InvalidIndex); - } - +) -> Vec { + assert_eq!( + matrix.n_rows(), + vector.len(), + "matrix rows != vector length" + ); let mut res = vec![G::Scalar::ZERO; vector.len()]; for &(row, col, value) in matrix.0.iter() { res[row] += value * vector[col]; } - - Ok(res) + res } -pub fn hadamard_product(a: &Vec, b: &Vec) -> Result, NovaError> { - if a.len() != b.len() { - return Err(NovaError::InvalidIndex); - } - +pub fn hadamard_product(a: &Vec, b: &Vec) -> Vec { + assert_eq!(a.len(), b.len(), "Haddamard needs same len vectors"); let mut res = Vec::with_capacity(a.len()); for i in 0..a.len() { res.push(a[i] * b[i]); } - Ok(res) + res } #[allow(dead_code)] @@ -242,7 +229,7 @@ mod tests { fn test_vector_add() { let a = to_F_vec::(vec![1, 2, 3]); let b = to_F_vec::(vec![4, 5, 6]); - let res = vector_add(&a, &b).unwrap(); + let res = vector_add(&a, &b); assert_eq!(res, to_F_vec::(vec![5, 7, 9])); } @@ -250,7 +237,7 @@ mod tests { fn test_vector_elem_product() { let a = to_F_vec::(vec![1, 2, 3]); let e = Fq::from(2); - let res = vector_elem_product(&a, &e).unwrap(); + let res = vector_elem_product(&a, &e); assert_eq!(res, to_F_vec::(vec![2, 4, 6])); } @@ -260,7 +247,7 @@ mod tests { let vector = vec![1, 2, 3]; let A = to_F_matrix::(matrix); let z = to_F_vec::(vector); - let res = matrix_vector_product(&A, &z).unwrap(); + let res = matrix_vector_product(&A, &z); assert_eq!(res, to_F_vec::(vec![14, 32])); } @@ -269,7 +256,7 @@ mod tests { fn test_hadamard_product() { let a = to_F_vec::(vec![1, 2, 3]); let b = to_F_vec::(vec![4, 5, 6]); - let res = hadamard_product(&a, &b).unwrap(); + let res = hadamard_product(&a, &b); assert_eq!(res, to_F_vec::(vec![4, 10, 18])); } @@ -286,7 +273,7 @@ mod tests { let vector = vec![1, 2, 3]; let A = to_F_matrix_sparse::(matrix); let z = to_F_vec::(vector); - let res = matrix_vector_product_sparse::(&(A.into()), &z).unwrap(); + let res = matrix_vector_product_sparse::(&(A.into()), &z); assert_eq!(res, to_F_vec::(vec![14, 32])); } From 05596445371a0ce728cd7b6004233bfa6dc39a43 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 14:45:23 +0200 Subject: [PATCH 037/100] change: Update CCS.is_sat() to use iterators The impl gets cleaner and also easier to update later. --- src/ccs.rs | 53 ++++++++++++++++++++++++----------------------------- 1 file changed, 24 insertions(+), 29 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 93aaa866a..8369aec77 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -76,7 +76,7 @@ pub struct CCSShape { #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] pub struct CCSWitness { // Vector W in F^{n - l - 1} - W: Vec, + w: Vec, } // XXX: Not sure this type is needed if we do have CCCSInstance and LCCCSInstance. @@ -89,7 +89,7 @@ pub struct CCSInstance { pub(crate) comm_W: Commitment, // Public input x in F^l - pub(crate) X: Vec, + pub(crate) x: Vec, } /// A type that holds the shape of a Committed CCS (CCCS) instance @@ -110,7 +110,7 @@ pub struct CCCSInstance { pub(crate) C: Commitment, // $x in F^l$ - pub(crate) X: Vec, + pub(crate) x: Vec, } // NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` @@ -126,7 +126,7 @@ pub struct CCCSWitness { #[serde(bound = "")] pub struct LCCCSInstance { pub(crate) C: Commitment, - pub(crate) X: Vec, + pub(crate) x: Vec, pub(crate) u: G::Scalar, pub(crate) v: Vec, } @@ -213,15 +213,8 @@ impl CCSShape { U: &CCSInstance, W: &CCSWitness, ) -> Result<(), NovaError> { - assert_eq!(W.W.len(), self.num_vars); - assert_eq!(U.X.len(), self.num_io); - - // NOTE: All matricies have the same number of rows, but in a SparseMatrix we need to check all of them - // Can probably be made more efficient by keeping track fo n_rows/n_cols at creation/insert time - let m = self - .M - .iter() - .fold(0, |acc, matrix| max(acc, matrix.n_rows())); + assert_eq!(W.w.len(), self.t); + assert_eq!(U.x.len(), self.l); // Sage code to check CCS relation: // @@ -236,28 +229,30 @@ impl CCSShape { // print("\nCCS relation check (∑ cᵢ ⋅ ◯ Mⱼ z == 0):", r == [0]*m) // // verify if ∑ cᵢ ⋅ ◯ Mⱼ z == 0 - let res_eq: bool = { - let mut r = vec![G::Scalar::ZERO; m]; - let z = concat(vec![W.W.clone(), vec![G::Scalar::ONE], U.X.clone()]); - for i in 0..self.q { - let mut hadamard_output = vec![G::Scalar::ONE; m]; - for j in &self.S[i] { - let mvp = matrix_vector_product_sparse(&self.M[*j], &z)?; - hadamard_output = hadamard_product(&hadamard_output, &mvp)?; - } + let z = concat(vec![vec![G::Scalar::ONE], U.x.clone(), W.w.clone()]); - let vep = vector_elem_product(&hadamard_output, &self.c[i])?; + let r = (0..self.q) + .into_iter() + .fold(vec![G::Scalar::ZERO; self.m], |r, idx| { + let hadamard_output = self.S[idx] + .iter() + .fold(vec![G::Scalar::ZERO; self.m], |acc, j| { + let mvp = matrix_vector_product_sparse(&self.M[*j], &z); + hadamard_product(&acc, &mvp) + }); - r = vector_add(&r, &vep)?; - } - r == vec![G::Scalar::ZERO; m] - }; + // Multiply by the coefficient of this step + let c_M_j_z: Vec<::Scalar> = + vector_elem_product(&hadamard_output, &self.c[idx]); + + vector_add(&r, &c_M_j_z) + }); // verify if comm_W is a commitment to W - let res_comm: bool = U.comm_W == CE::::commit(ck, &W.W); + let res_comm: bool = U.comm_W == CE::::commit(ck, &W.w); - if res_eq && res_comm { + if r == vec![G::Scalar::ZERO; self.m] && res_comm { Ok(()) } else { Err(NovaError::UnSat) From 10f1b9fd91889155badd5bc2a320dca3f9607688 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 15:58:25 +0200 Subject: [PATCH 038/100] change: Move matrix padding to SparseMatrix --- src/utils.rs | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/src/utils.rs b/src/utils.rs index 627902dde..3e046e649 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -3,6 +3,7 @@ use crate::errors::NovaError; use crate::spartan::polynomial::MultilinearPolynomial; use crate::traits::Group; use ff::{Field, PrimeField}; +use rayon::prelude::{IntoParallelRefMutIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; /// A matrix structure represented on a sparse form. @@ -67,6 +68,13 @@ impl SparseMatrix { Ok(()) } } + + pub(crate) fn pad(&mut self, n: usize) { + let prev_n = self.n_cols(); + self.0.par_iter_mut().for_each(|(_, c, _)| { + *c = if *c >= prev_n { *c + n - prev_n } else { *c }; + }); + } } impl From> for SparseMatrix { From 659ef7420574c7b1ab567d757949a7790d3c6772 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 15:59:50 +0200 Subject: [PATCH 039/100] change: Update `from_r1cs` & `pad` CCSShape fns --- src/ccs.rs | 65 ++++++++++++------------------------------------------ 1 file changed, 14 insertions(+), 51 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 8369aec77..c30463b45 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -267,9 +267,7 @@ impl CCSShape { const S1: [usize; 2] = [0, 1]; const S2: [usize; 1] = [2]; - let C0 = G::Scalar::ONE; - let C1 = -G::Scalar::ONE; - + let l = r1cs.num_io; // NOTE: All matricies have the same number of rows, but in a SparseMatrix we need to check all of them // TODO: Consider using SparseMatrix type in R1CSShape too // XXX: This can probably be made a lot better @@ -285,70 +283,35 @@ impl CCSShape { Self { num_cons: r1cs.num_cons, - num_vars: r1cs.num_vars, - num_io: r1cs.num_io, M: vec![r1cs.A.into(), r1cs.B.into(), r1cs.C.into()], t: T, + l, q: Q, d: D, S: vec![S1.to_vec(), S2.to_vec()], - c: vec![C0, C1], - m: m, - n: n, - s: s, - s_prime: s_prime, + c: vec![G::Scalar::ONE, -G::Scalar::ONE], + m, + n, + s, + s_prime, } } /// Pads the R1CSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&mut self) { - // XXX: Is this definitely always the same as m number of rows? - // equalize the number of variables and constraints - let m = max(self.num_vars, self.num_cons).next_power_of_two(); - - // check if the provided R1CSShape is already as required - if self.num_vars == m && self.num_cons == m { - return; - } + let (padded_m, padded_n) = (self.m.next_power_of_two(), self.n.next_power_of_two()); // check if the number of variables are as expected, then // we simply set the number of constraints to the next power of two - if self.num_vars == m { - *self = CCSShape { - num_cons: m, - num_vars: m, - num_io: self.num_io, - M: self.M.clone(), - t: self.t, - q: self.q, - d: self.d, - S: self.S.clone(), - c: self.c.clone(), - m: self.m, - n: self.n, - s: self.s, - s_prime: self.s_prime, - }; + if self.n != padded_n { + // Apply pad for each matrix in M + self.M.iter_mut().for_each(|m| m.pad(padded_n)); + self.n = padded_n; } - // otherwise, we need to pad the number of variables and renumber variable accesses - let num_vars_padded = m; - let apply_pad = |M: &mut SparseMatrix| { - M.0.par_iter_mut().for_each(|(_, c, _)| { - *c = if *c >= self.num_vars { - *c + num_vars_padded - self.num_vars - } else { - *c - }; - }); - }; - - // Apply pad for each matrix in M - let mut M_padded = self.M.clone(); - M_padded.iter_mut().for_each(|m| apply_pad(m)); - - // TODO: Sanity check if CCS padding is correct here + // We always update `m` even if it is the same (no need for `if`s). + self.m = padded_m; } } From ff4b44bb72fc7019cb3ec7549c4c0f1a5c56dfd9 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 16:13:03 +0200 Subject: [PATCH 040/100] change: Standarize `new` methods & remove CAPS --- src/ccs.rs | 35 +++++++++++++---------------------- 1 file changed, 13 insertions(+), 22 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index c30463b45..348d6fcab 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -86,7 +86,7 @@ pub struct CCSWitness { pub struct CCSInstance { // XXX: Move commitment out of CCSInstance for more clean conceptual separation? // (Pedersen) Commitment to a witness - pub(crate) comm_W: Commitment, + pub(crate) comm_w: Commitment, // Public input x in F^l pub(crate) x: Vec, @@ -250,7 +250,7 @@ impl CCSShape { }); // verify if comm_W is a commitment to W - let res_comm: bool = U.comm_W == CE::::commit(ck, &W.w); + let res_comm: bool = U.comm_w == CE::::commit(ck, &W.w); if r == vec![G::Scalar::ZERO; self.m] && res_comm { Ok(()) @@ -317,40 +317,31 @@ impl CCSShape { impl CCSWitness { /// A method to create a witness object using a vector of scalars - pub fn new(S: &CCSShape, W: &[G::Scalar]) -> Result, NovaError> { - if S.num_vars != W.len() { - Err(NovaError::InvalidWitnessLength) - } else { - Ok(CCSWitness { W: W.to_owned() }) - } + pub fn new(S: &CCSShape, witness: Vec) -> CCSWitness { + assert_eq!(S.t, witness.len()); + + Self { w: witness } } /// Commits to the witness using the supplied generators pub fn commit(&self, ck: &CommitmentKey) -> Commitment { - CE::::commit(ck, &self.W) + CE::::commit(ck, &self.w) } } impl CCSInstance { /// A method to create an instance object using consitituent elements pub fn new( - S: &CCSShape, - comm_W: &Commitment, - X: &[G::Scalar], + s: &CCSShape, + w_comm: &Commitment, + x: Vec, ) -> Result, NovaError> { - if S.num_io != X.len() { - Err(NovaError::InvalidInputLength) - } else { - Ok(CCSInstance { - comm_W: *comm_W, - X: X.to_owned(), - }) - } + assert_eq!(s.l, x.len()); + + Ok(CCSInstance { comm_w: *w_comm, x }) } } -use std::fmt::Debug; - impl CCSShape { pub fn multiply_matrices(&self, z: &Vec) -> Result>, NovaError> { let mut Mz: Vec> = Vec::new(); From af1e3d574e4b8114a8317345f815c03d9eaf50f4 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 16:13:37 +0200 Subject: [PATCH 041/100] remove: Unused CCShape::matrix_mul fn --- src/ccs.rs | 11 ----------- 1 file changed, 11 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index 348d6fcab..c6370b045 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -342,17 +342,6 @@ impl CCSInstance { } } -impl CCSShape { - pub fn multiply_matrices(&self, z: &Vec) -> Result>, NovaError> { - let mut Mz: Vec> = Vec::new(); - for matrix in &self.M { - let product = matrix_vector_product_sparse(matrix, z)?; - Mz.push(product); - } - Ok(Mz) - } -} - impl CCCSShape { // TODO: Sanity check this pub fn compute_sum_Mz( From 9b2f3c53e05307103b504747d85a20bebf69fe11 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 18:04:15 +0200 Subject: [PATCH 042/100] fix: Move test-only fns to test mod & fix sparse tests --- src/utils.rs | 45 ++++++++++++++++----------------------------- 1 file changed, 16 insertions(+), 29 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 3e046e649..b0b8553eb 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -28,7 +28,7 @@ impl SparseMatrix { .iter() .copied() .map(|r| r.0) - .fold(std::usize::MIN, |a, b| a.max(b)); + .fold(usize::MIN, |a, b| a.max(b)); max_row_idx + 1 } @@ -138,9 +138,9 @@ pub fn matrix_vector_product_sparse( vector: &Vec, ) -> Vec { assert_eq!( - matrix.n_rows(), + matrix.n_cols(), vector.len(), - "matrix rows != vector length" + "matrix cols != vector length" ); let mut res = vec![G::Scalar::ZERO; vector.len()]; for &(row, col, value) in matrix.0.iter() { @@ -159,21 +159,6 @@ pub fn hadamard_product(a: &Vec, b: &Vec) -> Vec { res } -#[allow(dead_code)] -pub fn to_F_vec(v: Vec) -> Vec { - v.iter().map(|x| F::from(*x)).collect() -} - -#[allow(dead_code)] -pub fn to_F_matrix(m: Vec>) -> Vec> { - m.iter().map(|x| to_F_vec(x.clone())).collect() -} - -#[allow(dead_code)] -pub fn to_F_matrix_sparse(m: Vec<(usize, usize, u64)>) -> Vec<(usize, usize, F)> { - m.iter().map(|x| (x.0, x.1, F::from(x.2))).collect() -} - pub fn sparse_matrix_to_mlp( matrix: &SparseMatrix, ) -> MultilinearPolynomial { @@ -207,16 +192,6 @@ pub fn sparse_matrix_to_mlp( MultilinearPolynomial::new(vec_padded) } -pub fn bit_to_index(bits: &[bool]) -> usize { - let mut index = 0; - for (i, &bit) in bits.iter().enumerate() { - if bit { - index += 1 << i; - } - } - index -} - /// Decompose an integer into a binary vector in little endian. pub fn bit_decompose(input: u64, num_var: usize) -> Vec { let mut res = Vec::with_capacity(num_var); @@ -233,6 +208,18 @@ mod tests { use super::*; use pasta_curves::{Ep, Fq}; + fn to_F_vec(v: Vec) -> Vec { + v.iter().map(|x| F::from(*x)).collect() + } + + fn to_F_matrix(m: Vec>) -> Vec> { + m.iter().map(|x| to_F_vec(x.clone())).collect() + } + + fn to_F_matrix_sparse(m: Vec<(usize, usize, u64)>) -> Vec<(usize, usize, F)> { + m.iter().map(|x| (x.0, x.1, F::from(x.2))).collect() + } + #[test] fn test_vector_add() { let a = to_F_vec::(vec![1, 2, 3]); @@ -283,7 +270,7 @@ mod tests { let z = to_F_vec::(vector); let res = matrix_vector_product_sparse::(&(A.into()), &z); - assert_eq!(res, to_F_vec::(vec![14, 32])); + assert_eq!(res, to_F_vec::(vec![14, 32, 0])); } #[test] From 2a79b9e93714243c1b22d8abaa1abd8982a1615d Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 18:07:37 +0200 Subject: [PATCH 043/100] change: Remove num_cons & unneeded Results --- src/ccs.rs | 58 +++++++++++++++++++++--------------------------------- 1 file changed, 22 insertions(+), 36 deletions(-) diff --git a/src/ccs.rs b/src/ccs.rs index c6370b045..357b4d830 100644 --- a/src/ccs.rs +++ b/src/ccs.rs @@ -48,8 +48,6 @@ pub struct CCS { #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] pub struct CCSShape { - pub(crate) num_cons: usize, - pub(crate) M: Vec>, // Num vars pub(crate) t: usize, @@ -79,12 +77,10 @@ pub struct CCSWitness { w: Vec, } -// XXX: Not sure this type is needed if we do have CCCSInstance and LCCCSInstance. /// A type that holds an CCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] pub struct CCSInstance { - // XXX: Move commitment out of CCSInstance for more clean conceptual separation? // (Pedersen) Commitment to a witness pub(crate) comm_w: Commitment, @@ -141,19 +137,16 @@ pub struct LCCCSWitness { impl CCS { // TODO: Update commitment_key variables here? This is currently based on R1CS with M length /// Samples public parameters for the specified number of constraints and variables in an CCS - pub fn commitment_key(S: &CCSShape) -> CommitmentKey { - let num_cons = S.num_cons; - let num_vars = S.t; - let total_nz = S.M.iter().fold(0, |acc, m| acc + m.0.len()); + pub fn commitment_key(shape: &CCSShape) -> CommitmentKey { + let total_nz = shape.M.iter().fold(0, |acc, m| acc + m.0.len()); - G::CE::setup(b"ck", max(max(num_cons, num_vars), total_nz)) + G::CE::setup(b"ck", max(max(shape.m, shape.t), total_nz)) } } impl CCSShape { /// Create an object of type `CCSShape` from the explicitly specified CCS matrices pub fn new( - num_cons: usize, M: &[SparseMatrix], t: usize, l: usize, @@ -161,19 +154,7 @@ impl CCSShape { d: usize, S: Vec>, c: Vec, - ) -> Result, NovaError> { - // Check matrix validity - - // Check that the row and column indexes are within the range of the number of constraints and variables - M.iter() - .map(|m| m.is_valid(num_cons, t, l)) - .collect::, NovaError>>()?; - - // We require the number of public inputs/outputs to be even - if l % 2 != 0 { - return Err(NovaError::OddInputLength); - } - + ) -> CCSShape { // Can probably be made more efficient by keeping track fo n_rows/n_cols at creation/insert time let m = M .iter() @@ -182,11 +163,20 @@ impl CCSShape { .iter() .fold(usize::MIN, |acc, matrix| max(acc, matrix.n_cols())); + // Check that the row and column indexes are within the range of the number of constraints and variables + assert!(M + .iter() + .map(|matrix| matrix.is_valid(m, t, l)) + .collect::, NovaError>>() + .is_ok()); + + // We require the number of public inputs/outputs to be even + assert_ne!(l % 2, 0, " number of public i/o has to be even"); + let s = m.log_2() as usize; let s_prime = n.log_2() as usize; - Ok(CCSShape { - num_cons, + CCSShape { M: M.to_vec(), t, l, @@ -198,7 +188,7 @@ impl CCSShape { n, s, s_prime, - }) + } } // NOTE: Not using previous used multiply_vec (r1cs.rs), see utils.rs @@ -282,7 +272,6 @@ impl CCSShape { let s_prime = n.log_2() as usize; Self { - num_cons: r1cs.num_cons, M: vec![r1cs.A.into(), r1cs.B.into(), r1cs.C.into()], t: T, l, @@ -515,22 +504,19 @@ pub mod test { (i1, W, X) }; - let W = { - let res = CCSWitness::new(&S, &vars); - assert!(res.is_ok()); - res.unwrap() - }; + let ccs_w = CCSWitness::new(&S, vars); + let U = { - let comm_W = W.commit(ck); - let res = CCSInstance::new(&S, &comm_W, &X); + let comm_W = ccs_w.commit(ck); + let res = CCSInstance::new(&S, &comm_W, X); assert!(res.is_ok()); res.unwrap() }; // check that generated instance is satisfiable - assert!(S.is_sat(ck, &U, &W).is_ok()); + assert!(S.is_sat(ck, &U, &ccs_w).is_ok()); - (O, U, W) + (O, U, ccs_w) }; } } From 1b8bba8afa24388ac98a74a23ae50404eed368b8 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 21 Jun 2023 18:08:03 +0200 Subject: [PATCH 044/100] fix: Add test feature flag to HyperCube tests mod --- src/hypercube.rs | 1 + 1 file changed, 1 insertion(+) diff --git a/src/hypercube.rs b/src/hypercube.rs index 9d8bfc093..b1eec086b 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -69,6 +69,7 @@ impl Iterator for BooleanHypercube { } } +#[cfg(test)] mod tests { use super::*; use pasta_curves::Fp; From 8075c15b7055b2f7e29b024389ddd060e7a885d5 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 22 Jun 2023 08:34:45 +0200 Subject: [PATCH 045/100] add: Migrate VirtualPolynomial from espresso/Hyperplonk --- src/utils.rs | 326 ++++++++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 325 insertions(+), 1 deletion(-) diff --git a/src/utils.rs b/src/utils.rs index b0b8553eb..f8f0f6103 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,9 +1,11 @@ //! Basic utils +use std::{cmp::max, collections::HashMap, marker::PhantomData, ops::Add, sync::Arc}; + use crate::errors::NovaError; use crate::spartan::polynomial::MultilinearPolynomial; use crate::traits::Group; use ff::{Field, PrimeField}; -use rayon::prelude::{IntoParallelRefMutIterator, ParallelIterator}; +use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; /// A matrix structure represented on a sparse form. @@ -203,6 +205,328 @@ pub fn bit_decompose(input: u64, num_var: usize) -> Vec { res } +// A bit of collage-programming here. +// As a tmp way to have multilinear polynomial product+addition. +// The idea is to re-evaluate once everything works and decide if we replace this code +// by something else. +// +// THIS CODE HAS BEEN TAKEN FROM THE ESPRESSO SYSTEMS LIB: +// +// +#[rustfmt::skip] +/// A virtual polynomial is a sum of products of multilinear polynomials; +/// where the multilinear polynomials are stored via their multilinear +/// extensions: `(coefficient, DenseMultilinearExtension)` +/// +/// * Number of products n = `polynomial.products.len()`, +/// * Number of multiplicands of ith product m_i = +/// `polynomial.products[i].1.len()`, +/// * Coefficient of ith product c_i = `polynomial.products[i].0` +/// +/// The resulting polynomial is +/// +/// $$ \sum_{i=0}^{n} c_i \cdot \prod_{j=0}^{m_i} P_{ij} $$ +/// +/// Example: +/// f = c0 * f0 * f1 * f2 + c1 * f3 * f4 +/// where f0 ... f4 are multilinear polynomials +/// +/// - flattened_ml_extensions stores the multilinear extension representation of +/// f0, f1, f2, f3 and f4 +/// - products is +/// \[ +/// (c0, \[0, 1, 2\]), +/// (c1, \[3, 4\]) +/// \] +/// - raw_pointers_lookup_table maps fi to i +/// +#[derive(Clone, Debug, Default, PartialEq)] +pub struct VirtualPolynomial { + /// Aux information about the multilinear polynomial + pub aux_info: VPAuxInfo, + /// list of reference to products (as usize) of multilinear extension + pub products: Vec<(F, Vec)>, + /// Stores multilinear extensions in which product multiplicand can refer + /// to. + pub flattened_ml_extensions: Vec>>, + /// Pointers to the above poly extensions + raw_pointers_lookup_table: HashMap<*const MultilinearPolynomial, usize>, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +/// Auxiliary information about the multilinear polynomial +pub struct VPAuxInfo { + /// max number of multiplicands in each product + pub max_degree: usize, + /// number of variables of the polynomial + pub num_variables: usize, + /// Associated field + #[doc(hidden)] + pub phantom: PhantomData, +} + +impl Add for &VirtualPolynomial { + type Output = VirtualPolynomial; + fn add(self, other: &VirtualPolynomial) -> Self::Output { + let mut res = self.clone(); + for products in other.products.iter() { + let cur: Vec>> = products + .1 + .iter() + .map(|&x| other.flattened_ml_extensions[x].clone()) + .collect(); + + res + .add_mle_list(cur, products.0) + .expect("add product failed"); + } + res + } +} + +// TODO: convert this into a trait +impl VirtualPolynomial { + /// Creates an empty virtual polynomial with `num_variables`. + pub fn new(num_variables: usize) -> Self { + VirtualPolynomial { + aux_info: VPAuxInfo { + max_degree: 0, + num_variables, + phantom: PhantomData::default(), + }, + products: Vec::new(), + flattened_ml_extensions: Vec::new(), + raw_pointers_lookup_table: HashMap::new(), + } + } + + /// Creates an new virtual polynomial from a MLE and its coefficient. + pub fn new_from_mle(mle: &Arc>, coefficient: F) -> Self { + let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(mle); + let mut hm = HashMap::new(); + hm.insert(mle_ptr, 0); + + VirtualPolynomial { + aux_info: VPAuxInfo { + // The max degree is the max degree of any individual variable + max_degree: 1, + num_variables: mle.get_num_vars(), + phantom: PhantomData::default(), + }, + // here `0` points to the first polynomial of `flattened_ml_extensions` + products: vec![(coefficient, vec![0])], + flattened_ml_extensions: vec![mle.clone()], + raw_pointers_lookup_table: hm, + } + } + + /// Add a product of list of multilinear extensions to self + /// Returns an error if the list is empty, or the MLE has a different + /// `num_vars` from self. + /// + /// The MLEs will be multiplied together, and then multiplied by the scalar + /// `coefficient`. + pub fn add_mle_list( + &mut self, + mle_list: impl IntoIterator>>, + coefficient: F, + ) -> Result<(), NovaError> { + let mle_list: Vec>> = mle_list.into_iter().collect(); + let mut indexed_product = Vec::with_capacity(mle_list.len()); + + if mle_list.is_empty() { + return Err(NovaError::VpArith); + } + + self.aux_info.max_degree = max(self.aux_info.max_degree, mle_list.len()); + + for mle in mle_list { + if mle.get_num_vars() != self.aux_info.num_variables { + return Err(NovaError::VpArith); + } + + let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(&mle); + if let Some(index) = self.raw_pointers_lookup_table.get(&mle_ptr) { + indexed_product.push(*index) + } else { + let curr_index = self.flattened_ml_extensions.len(); + self.flattened_ml_extensions.push(mle.clone()); + self.raw_pointers_lookup_table.insert(mle_ptr, curr_index); + indexed_product.push(curr_index); + } + } + self.products.push((coefficient, indexed_product)); + Ok(()) + } + + /// Multiple the current VirtualPolynomial by an MLE: + /// - add the MLE to the MLE list; + /// - multiple each product by MLE and its coefficient. + /// Returns an error if the MLE has a different `num_vars` from self. + pub fn mul_by_mle( + &mut self, + mle: Arc>, + coefficient: F, + ) -> Result<(), NovaError> { + if mle.get_num_vars() != self.aux_info.num_variables { + return Err(NovaError::VpArith); + } + + let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(&mle); + + // check if this mle already exists in the virtual polynomial + let mle_index = match self.raw_pointers_lookup_table.get(&mle_ptr) { + Some(&p) => p, + None => { + self + .raw_pointers_lookup_table + .insert(mle_ptr, self.flattened_ml_extensions.len()); + self.flattened_ml_extensions.push(mle); + self.flattened_ml_extensions.len() - 1 + } + }; + + for (prod_coef, indices) in self.products.iter_mut() { + // - add the MLE to the MLE list; + // - multiple each product by MLE and its coefficient. + indices.push(mle_index); + *prod_coef *= coefficient; + } + + // increase the max degree by one as the MLE has degree 1. + self.aux_info.max_degree += 1; + + Ok(()) + } + + /// Given virtual polynomial `p(x)` and scalar `s`, compute `s*p(x)` + pub fn scalar_mul(&mut self, s: &F) { + for (prod_coef, _) in self.products.iter_mut() { + *prod_coef *= s; + } + } + + /// Evaluate the virtual polynomial at point `point`. + /// Returns an error is point.len() does not match `num_variables`. + pub fn evaluate(&self, point: &[F]) -> Result { + if self.aux_info.num_variables != point.len() { + return Err(NovaError::VpArith); + } + + // Evaluate all the MLEs at `point` + let evals: Vec = self + .flattened_ml_extensions + .iter() + .map(|x| x.evaluate(point)) + .collect(); + + let res = self + .products + .iter() + .map(|(c, p)| *c * p.iter().map(|&i| evals[i]).product::()) + .sum(); + + Ok(res) + } + + // Input poly f(x) and a random vector r, output + // \hat f(x) = \sum_{x_i \in eval_x} f(x_i) eq(x, r) + // where + // eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) + // + // This function is used in ZeroCheck. + pub fn build_f_hat(&self, r: &[F]) -> Result { + if self.aux_info.num_variables != r.len() { + return Err(NovaError::VpArith); + } + + let eq_x_r = build_eq_x_r(r)?; + let mut res = self.clone(); + res.mul_by_mle(eq_x_r, F::ONE)?; + + Ok(res) + } +} + +/// This function build the eq(x, r) polynomial for any given r. +/// +/// Evaluate +/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) +/// over r, which is +/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) +pub fn build_eq_x_r(r: &[F]) -> Result>, NovaError> { + let evals = build_eq_x_r_vec(r)?; + let mle = MultilinearPolynomial::new(evals); + + Ok(Arc::new(mle)) +} + +/// This function build the eq(x, r) polynomial for any given r, and output the +/// evaluation of eq(x, r) in its vector form. +/// +/// Evaluate +/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) +/// over r, which is +/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) +pub fn build_eq_x_r_vec(r: &[F]) -> Result, NovaError> { + // we build eq(x,r) from its evaluations + // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars + // for example, with num_vars = 4, x is a binary vector of 4, then + // 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3) + // 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3) + // 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3) + // 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3) + // .... + // 1 1 1 1 -> r0 * r1 * r2 * r3 + // we will need 2^num_var evaluations + + let mut eval = Vec::new(); + build_eq_x_r_helper(r, &mut eval)?; + + Ok(eval) +} + +/// A helper function to build eq(x, r) recursively. +/// This function takes `r.len()` steps, and for each step it requires a maximum +/// `r.len()-1` multiplications. +fn build_eq_x_r_helper(r: &[F], buf: &mut Vec) -> Result<(), NovaError> { + if r.is_empty() { + return Err(NovaError::VpArith); + } else if r.len() == 1 { + // initializing the buffer with [1-r_0, r_0] + buf.push(F::ONE - r[0]); + buf.push(r[0]); + } else { + build_eq_x_r_helper(&r[1..], buf)?; + + // suppose at the previous step we received [b_1, ..., b_k] + // for the current step we will need + // if x_0 = 0: (1-r0) * [b_1, ..., b_k] + // if x_0 = 1: r0 * [b_1, ..., b_k] + // let mut res = vec![]; + // for &b_i in buf.iter() { + // let tmp = r[0] * b_i; + // res.push(b_i - tmp); + // res.push(tmp); + // } + // *buf = res; + + let mut res = vec![F::ZERO; buf.len() << 1]; + res.par_iter_mut().enumerate().for_each(|(i, val)| { + let bi = buf[i >> 1]; + let tmp = r[0] * bi; + if i & 1 == 0 { + *val = bi - tmp; + } else { + *val = tmp; + } + }); + *buf = res; + } + + Ok(()) +} + #[cfg(test)] mod tests { use super::*; From b81e432322627f0792b2da437ffcca402d5a67cc Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 22 Jun 2023 08:35:13 +0200 Subject: [PATCH 046/100] fix: Include err variant to handle VirtualPolynomial --- src/errors.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/errors.rs b/src/errors.rs index 3a2eac21d..bcc578b6d 100644 --- a/src/errors.rs +++ b/src/errors.rs @@ -53,4 +53,8 @@ pub enum NovaError { /// returned when the consistency with public IO and assignment used fails #[error("IncorrectWitness")] IncorrectWitness, + + /// Tmp error for VirtualPolynomial artih error + #[error("VpArith")] + VpArith, } From 0f26a234c96457b3468df4b8a95a1d89b6c7433a Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 22 Jun 2023 11:46:02 +0200 Subject: [PATCH 047/100] change: Move CCS stuff to it's own module --- src/ccs/cccs.rs | 141 ++++++++++++++ src/{ccs.rs => ccs/mod.rs} | 102 +---------- src/utils.rs | 364 ++++++------------------------------- 3 files changed, 195 insertions(+), 412 deletions(-) create mode 100644 src/ccs/cccs.rs rename src/{ccs.rs => ccs/mod.rs} (79%) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs new file mode 100644 index 000000000..3d27ee781 --- /dev/null +++ b/src/ccs/cccs.rs @@ -0,0 +1,141 @@ +use crate::hypercube::BooleanHypercube; +use crate::spartan::math::Math; +use crate::spartan::polynomial::MultilinearPolynomial; +use crate::utils::bit_decompose; +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, + errors::NovaError, + gadgets::{ + nonnative::{bignat::nat_to_limbs, util::f_to_nat}, + utils::scalar_as_base, + }, + r1cs::{R1CSInstance, R1CSShape, R1CSWitness, R1CS}, + traits::{ + commitment::CommitmentEngineTrait, commitment::CommitmentTrait, AbsorbInROTrait, Group, ROTrait, + }, + utils::*, + Commitment, CommitmentKey, CE, +}; +use bitvec::vec; +use core::{cmp::max, marker::PhantomData}; +use ff::Field; +use flate2::{write::ZlibEncoder, Compression}; +use itertools::concat; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Sha3_256}; +use std::ops::{Add, Mul}; +use std::sync::Arc; + +use super::virtual_poly::VirtualPolynomial; +use super::CCSShape; + +/// A type that holds the shape of a Committed CCS (CCCS) instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CCCSShape { + // Sequence of sparse MLE polynomials in s+s' variables M_MLE1, ..., M_MLEt + pub(crate) M_MLE: Vec>, + + pub(crate) ccs: CCSShape, +} + +/// A type that holds a CCCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CCCSInstance { + // Commitment to a multilinear polynomial in s' - 1 variables + pub(crate) C: Commitment, + + // $x in F^l$ + pub(crate) x: Vec, +} + +// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` +/// A type that holds a witness for a given CCCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct CCCSWitness { + // Multilinear polynomial w_mle in s' - 1 variables + pub(crate) w_mle: Vec, +} + +impl CCCSShape { + // TODO: Sanity check this + pub fn compute_sum_Mz( + &self, + M_j: &MultilinearPolynomial, + z: &MultilinearPolynomial, + s_prime: usize, + ) -> MultilinearPolynomial { + assert_eq!(M_j.get_num_vars(), s_prime); + assert_eq!(z.get_num_vars(), s_prime); + + let num_vars = M_j.get_num_vars(); + let two_to_num_vars = (2_usize).pow(num_vars as u32); + let mut result_coefficients = Vec::with_capacity(two_to_num_vars); + + for i in 0..two_to_num_vars { + let r = bit_decompose(i as u64, num_vars) + .into_iter() + .map(|bit| G::Scalar::from(if bit { 1 } else { 0 })) + .collect::>(); + + let value = M_j.evaluate(&r) * z.evaluate(&r); + result_coefficients.push(value); + } + + MultilinearPolynomial::new(result_coefficients) + } + + // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) + // polynomial over x + pub fn compute_q( + &self, + z: &Vec, + ) -> Result, &'static str> { + // XXX: Do we need to instrument this to use s_prime as n_vars somehow? + let z_mle = MultilinearPolynomial::new(z.clone()); + if z_mle.get_num_vars() != self.ccs.s_prime { + return Err("z_mle number of variables does not match ccs.s_prime"); + } + let mut q = MultilinearPolynomial::new(vec![G::Scalar::ZERO; self.ccs.s]); + + for i in 0..self.ccs.q { + let mut prod = MultilinearPolynomial::new(vec![G::Scalar::ONE; self.ccs.s]); + + for j in &self.ccs.S[i] { + let M_j = sparse_matrix_to_mlp(&self.ccs.M[*j]); + + let sum_Mz = self.compute_sum_Mz(&M_j, &z_mle, self.ccs.s_prime); + + // Fold this sum into the running product + prod = prod.mul(sum_Mz)?; + } + + // Multiply the product by the coefficient c_i + prod = prod.scalar_mul(&self.ccs.c[i]); + + // Add it to the running sum + q = q.add(prod)?; + } + + Ok(q) + + // Similar logic in Spartan + // let (mut Az, mut Bz, mut Cz) = pk.S.multiply_vec(&z)?; + //poly_Az: MultilinearPolynomial::new(Az.clone()), + } +} + + /// Computes Q(x) = eq(beta, x) * q(x) + /// = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) + /// polynomial over x + pub fn compute_Q( + &self, + z: &Vec, + beta: &[G::Scalar], + ) -> Result, NovaError> { + let q = self.compute_q(z)?; + q.build_f_hat(beta) + } +} diff --git a/src/ccs.rs b/src/ccs/mod.rs similarity index 79% rename from src/ccs.rs rename to src/ccs/mod.rs index 357b4d830..fb0c84327 100644 --- a/src/ccs.rs +++ b/src/ccs/mod.rs @@ -31,6 +31,9 @@ use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; +mod cccs; +pub(crate) mod virtual_poly; + // TODO: Committed CCS using MLE (see src/spartan/pp.rs) // TODO: Linearized CCS struct and methods, separate struct similar to RelaxedR1CS @@ -88,35 +91,6 @@ pub struct CCSInstance { pub(crate) x: Vec, } -/// A type that holds the shape of a Committed CCS (CCCS) instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CCCSShape { - // Sequence of sparse MLE polynomials in s+s' variables M_MLE1, ..., M_MLEt - pub(crate) M_MLE: Vec>, - - pub(crate) ccs: CCSShape, -} - -/// A type that holds a CCCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CCCSInstance { - // Commitment to a multilinear polynomial in s' - 1 variables - pub(crate) C: Commitment, - - // $x in F^l$ - pub(crate) x: Vec, -} - -// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` -/// A type that holds a witness for a given CCCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct CCCSWitness { - // Multilinear polynomial w_mle in s' - 1 variables - pub(crate) w_mle: Vec, -} - /// A type that holds a LCCCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] @@ -331,76 +305,6 @@ impl CCSInstance { } } -impl CCCSShape { - // TODO: Sanity check this - pub fn compute_sum_Mz( - &self, - M_j: &MultilinearPolynomial, - z: &MultilinearPolynomial, - s_prime: usize, - ) -> MultilinearPolynomial { - assert_eq!(M_j.get_num_vars(), s_prime); - assert_eq!(z.get_num_vars(), s_prime); - - let num_vars = M_j.get_num_vars(); - let two_to_num_vars = (2_usize).pow(num_vars as u32); - let mut result_coefficients = Vec::with_capacity(two_to_num_vars); - - for i in 0..two_to_num_vars { - let r = bit_decompose(i as u64, num_vars) - .into_iter() - .map(|bit| G::Scalar::from(if bit { 1 } else { 0 })) - .collect::>(); - - let value = M_j.evaluate(&r) * z.evaluate(&r); - result_coefficients.push(value); - } - - MultilinearPolynomial::new(result_coefficients) - } - - // XXX: Take below and util functions with a grain of salt, need to sanity check - - // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) - // polynomial over x - pub fn compute_q( - &self, - z: &Vec, - ) -> Result, &'static str> { - // XXX: Do we need to instrument this to use s_prime as n_vars somehow? - let z_mle = MultilinearPolynomial::new(z.clone()); - if z_mle.get_num_vars() != self.ccs.s_prime { - return Err("z_mle number of variables does not match ccs.s_prime"); - } - let mut q = MultilinearPolynomial::new(vec![G::Scalar::ZERO; self.ccs.s]); - - for i in 0..self.ccs.q { - let mut prod = MultilinearPolynomial::new(vec![G::Scalar::ONE; self.ccs.s]); - - for j in &self.ccs.S[i] { - let M_j = sparse_matrix_to_mlp(&self.ccs.M[*j]); - - let sum_Mz = self.compute_sum_Mz(&M_j, &z_mle, self.ccs.s_prime); - - // Fold this sum into the running product - prod = prod.mul(sum_Mz)?; - } - - // Multiply the product by the coefficient c_i - prod = prod.scalar_mul(&self.ccs.c[i]); - - // Add it to the running sum - q = q.add(prod)?; - } - - Ok(q) - - // Similar logic in Spartan - // let (mut Az, mut Bz, mut Cz) = pk.S.multiply_vec(&z)?; - //poly_Az: MultilinearPolynomial::new(Az.clone()), - } -} - #[cfg(test)] pub mod test { use super::*; diff --git a/src/utils.rs b/src/utils.rs index f8f0f6103..19f24f30a 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,11 +1,12 @@ //! Basic utils -use std::{cmp::max, collections::HashMap, marker::PhantomData, ops::Add, sync::Arc}; +use std::sync::Arc; use crate::errors::NovaError; use crate::spartan::polynomial::MultilinearPolynomial; use crate::traits::Group; use ff::{Field, PrimeField}; -use rayon::prelude::{IndexedParallelIterator, IntoParallelRefMutIterator, ParallelIterator}; +use rand_core::RngCore; +use rayon::prelude::{IntoParallelRefMutIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; /// A matrix structure represented on a sparse form. @@ -205,326 +206,63 @@ pub fn bit_decompose(input: u64, num_var: usize) -> Vec { res } -// A bit of collage-programming here. -// As a tmp way to have multilinear polynomial product+addition. -// The idea is to re-evaluate once everything works and decide if we replace this code -// by something else. -// -// THIS CODE HAS BEEN TAKEN FROM THE ESPRESSO SYSTEMS LIB: -// -// -#[rustfmt::skip] -/// A virtual polynomial is a sum of products of multilinear polynomials; -/// where the multilinear polynomials are stored via their multilinear -/// extensions: `(coefficient, DenseMultilinearExtension)` -/// -/// * Number of products n = `polynomial.products.len()`, -/// * Number of multiplicands of ith product m_i = -/// `polynomial.products[i].1.len()`, -/// * Coefficient of ith product c_i = `polynomial.products[i].0` -/// -/// The resulting polynomial is -/// -/// $$ \sum_{i=0}^{n} c_i \cdot \prod_{j=0}^{m_i} P_{ij} $$ -/// -/// Example: -/// f = c0 * f0 * f1 * f2 + c1 * f3 * f4 -/// where f0 ... f4 are multilinear polynomials -/// -/// - flattened_ml_extensions stores the multilinear extension representation of -/// f0, f1, f2, f3 and f4 -/// - products is -/// \[ -/// (c0, \[0, 1, 2\]), -/// (c1, \[3, 4\]) -/// \] -/// - raw_pointers_lookup_table maps fi to i -/// -#[derive(Clone, Debug, Default, PartialEq)] -pub struct VirtualPolynomial { - /// Aux information about the multilinear polynomial - pub aux_info: VPAuxInfo, - /// list of reference to products (as usize) of multilinear extension - pub products: Vec<(F, Vec)>, - /// Stores multilinear extensions in which product multiplicand can refer - /// to. - pub flattened_ml_extensions: Vec>>, - /// Pointers to the above poly extensions - raw_pointers_lookup_table: HashMap<*const MultilinearPolynomial, usize>, -} - -#[derive(Clone, Debug, Default, PartialEq, Eq)] -/// Auxiliary information about the multilinear polynomial -pub struct VPAuxInfo { - /// max number of multiplicands in each product - pub max_degree: usize, - /// number of variables of the polynomial - pub num_variables: usize, - /// Associated field - #[doc(hidden)] - pub phantom: PhantomData, -} - -impl Add for &VirtualPolynomial { - type Output = VirtualPolynomial; - fn add(self, other: &VirtualPolynomial) -> Self::Output { - let mut res = self.clone(); - for products in other.products.iter() { - let cur: Vec>> = products - .1 - .iter() - .map(|&x| other.flattened_ml_extensions[x].clone()) - .collect(); - - res - .add_mle_list(cur, products.0) - .expect("add product failed"); - } - res - } -} - -// TODO: convert this into a trait -impl VirtualPolynomial { - /// Creates an empty virtual polynomial with `num_variables`. - pub fn new(num_variables: usize) -> Self { - VirtualPolynomial { - aux_info: VPAuxInfo { - max_degree: 0, - num_variables, - phantom: PhantomData::default(), - }, - products: Vec::new(), - flattened_ml_extensions: Vec::new(), - raw_pointers_lookup_table: HashMap::new(), - } - } - - /// Creates an new virtual polynomial from a MLE and its coefficient. - pub fn new_from_mle(mle: &Arc>, coefficient: F) -> Self { - let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(mle); - let mut hm = HashMap::new(); - hm.insert(mle_ptr, 0); - - VirtualPolynomial { - aux_info: VPAuxInfo { - // The max degree is the max degree of any individual variable - max_degree: 1, - num_variables: mle.get_num_vars(), - phantom: PhantomData::default(), - }, - // here `0` points to the first polynomial of `flattened_ml_extensions` - products: vec![(coefficient, vec![0])], - flattened_ml_extensions: vec![mle.clone()], - raw_pointers_lookup_table: hm, - } - } - - /// Add a product of list of multilinear extensions to self - /// Returns an error if the list is empty, or the MLE has a different - /// `num_vars` from self. - /// - /// The MLEs will be multiplied together, and then multiplied by the scalar - /// `coefficient`. - pub fn add_mle_list( - &mut self, - mle_list: impl IntoIterator>>, - coefficient: F, - ) -> Result<(), NovaError> { - let mle_list: Vec>> = mle_list.into_iter().collect(); - let mut indexed_product = Vec::with_capacity(mle_list.len()); - - if mle_list.is_empty() { - return Err(NovaError::VpArith); - } - - self.aux_info.max_degree = max(self.aux_info.max_degree, mle_list.len()); - - for mle in mle_list { - if mle.get_num_vars() != self.aux_info.num_variables { - return Err(NovaError::VpArith); - } - - let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(&mle); - if let Some(index) = self.raw_pointers_lookup_table.get(&mle_ptr) { - indexed_product.push(*index) - } else { - let curr_index = self.flattened_ml_extensions.len(); - self.flattened_ml_extensions.push(mle.clone()); - self.raw_pointers_lookup_table.insert(mle_ptr, curr_index); - indexed_product.push(curr_index); - } - } - self.products.push((coefficient, indexed_product)); - Ok(()) - } - - /// Multiple the current VirtualPolynomial by an MLE: - /// - add the MLE to the MLE list; - /// - multiple each product by MLE and its coefficient. - /// Returns an error if the MLE has a different `num_vars` from self. - pub fn mul_by_mle( - &mut self, - mle: Arc>, - coefficient: F, - ) -> Result<(), NovaError> { - if mle.get_num_vars() != self.aux_info.num_variables { - return Err(NovaError::VpArith); - } - - let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(&mle); - - // check if this mle already exists in the virtual polynomial - let mle_index = match self.raw_pointers_lookup_table.get(&mle_ptr) { - Some(&p) => p, - None => { - self - .raw_pointers_lookup_table - .insert(mle_ptr, self.flattened_ml_extensions.len()); - self.flattened_ml_extensions.push(mle); - self.flattened_ml_extensions.len() - 1 - } - }; - - for (prod_coef, indices) in self.products.iter_mut() { - // - add the MLE to the MLE list; - // - multiple each product by MLE and its coefficient. - indices.push(mle_index); - *prod_coef *= coefficient; - } - - // increase the max degree by one as the MLE has degree 1. - self.aux_info.max_degree += 1; - - Ok(()) - } - - /// Given virtual polynomial `p(x)` and scalar `s`, compute `s*p(x)` - pub fn scalar_mul(&mut self, s: &F) { - for (prod_coef, _) in self.products.iter_mut() { - *prod_coef *= s; +/// Sample a random list of multilinear polynomials. +/// Returns +/// - the list of polynomials, +/// - its sum of polynomial evaluations over the boolean hypercube. +pub fn random_mle_list( + nv: usize, + degree: usize, + mut rng: &mut R, +) -> (Vec>>, F) { + let mut multiplicands = Vec::with_capacity(degree); + for _ in 0..degree { + multiplicands.push(Vec::with_capacity(1 << nv)) + } + let mut sum = F::ZERO; + + for _ in 0..(1 << nv) { + let mut product = F::ONE; + + for e in multiplicands.iter_mut() { + let val = F::random(&mut rng); + e.push(val); + product *= val; } + sum += product; } - /// Evaluate the virtual polynomial at point `point`. - /// Returns an error is point.len() does not match `num_variables`. - pub fn evaluate(&self, point: &[F]) -> Result { - if self.aux_info.num_variables != point.len() { - return Err(NovaError::VpArith); - } - - // Evaluate all the MLEs at `point` - let evals: Vec = self - .flattened_ml_extensions - .iter() - .map(|x| x.evaluate(point)) - .collect(); - - let res = self - .products - .iter() - .map(|(c, p)| *c * p.iter().map(|&i| evals[i]).product::()) - .sum(); + let list = multiplicands + .into_iter() + .map(|x| Arc::new(MultilinearPolynomial::new(x))) + .collect(); - Ok(res) - } + (list, sum) +} - // Input poly f(x) and a random vector r, output - // \hat f(x) = \sum_{x_i \in eval_x} f(x_i) eq(x, r) - // where - // eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) - // - // This function is used in ZeroCheck. - pub fn build_f_hat(&self, r: &[F]) -> Result { - if self.aux_info.num_variables != r.len() { - return Err(NovaError::VpArith); +// Build a randomize list of mle-s whose sum is zero. +pub fn random_zero_mle_list( + nv: usize, + degree: usize, + mut rng: &mut R, +) -> Vec>> { + let mut multiplicands = Vec::with_capacity(degree); + for _ in 0..degree { + multiplicands.push(Vec::with_capacity(1 << nv)) + } + for _ in 0..(1 << nv) { + multiplicands[0].push(F::ZERO); + for e in multiplicands.iter_mut().skip(1) { + e.push(F::random(&mut rng)); } - - let eq_x_r = build_eq_x_r(r)?; - let mut res = self.clone(); - res.mul_by_mle(eq_x_r, F::ONE)?; - - Ok(res) } -} -/// This function build the eq(x, r) polynomial for any given r. -/// -/// Evaluate -/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) -/// over r, which is -/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) -pub fn build_eq_x_r(r: &[F]) -> Result>, NovaError> { - let evals = build_eq_x_r_vec(r)?; - let mle = MultilinearPolynomial::new(evals); - - Ok(Arc::new(mle)) -} - -/// This function build the eq(x, r) polynomial for any given r, and output the -/// evaluation of eq(x, r) in its vector form. -/// -/// Evaluate -/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) -/// over r, which is -/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) -pub fn build_eq_x_r_vec(r: &[F]) -> Result, NovaError> { - // we build eq(x,r) from its evaluations - // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars - // for example, with num_vars = 4, x is a binary vector of 4, then - // 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3) - // 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3) - // 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3) - // 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3) - // .... - // 1 1 1 1 -> r0 * r1 * r2 * r3 - // we will need 2^num_var evaluations - - let mut eval = Vec::new(); - build_eq_x_r_helper(r, &mut eval)?; - - Ok(eval) -} - -/// A helper function to build eq(x, r) recursively. -/// This function takes `r.len()` steps, and for each step it requires a maximum -/// `r.len()-1` multiplications. -fn build_eq_x_r_helper(r: &[F], buf: &mut Vec) -> Result<(), NovaError> { - if r.is_empty() { - return Err(NovaError::VpArith); - } else if r.len() == 1 { - // initializing the buffer with [1-r_0, r_0] - buf.push(F::ONE - r[0]); - buf.push(r[0]); - } else { - build_eq_x_r_helper(&r[1..], buf)?; - - // suppose at the previous step we received [b_1, ..., b_k] - // for the current step we will need - // if x_0 = 0: (1-r0) * [b_1, ..., b_k] - // if x_0 = 1: r0 * [b_1, ..., b_k] - // let mut res = vec![]; - // for &b_i in buf.iter() { - // let tmp = r[0] * b_i; - // res.push(b_i - tmp); - // res.push(tmp); - // } - // *buf = res; - - let mut res = vec![F::ZERO; buf.len() << 1]; - res.par_iter_mut().enumerate().for_each(|(i, val)| { - let bi = buf[i >> 1]; - let tmp = r[0] * bi; - if i & 1 == 0 { - *val = bi - tmp; - } else { - *val = tmp; - } - }); - *buf = res; - } + let list = multiplicands + .into_iter() + .map(|x| Arc::new(MultilinearPolynomial::new(x))) + .collect(); - Ok(()) + list } #[cfg(test)] From 68afd5ea04e3a9207db08cd237d508ea020b7e8e Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 22 Jun 2023 11:46:23 +0200 Subject: [PATCH 048/100] add: Migrate VirtualPolynomial from espresso/HyperPlonk --- Cargo.toml | 1 + src/ccs/virtual_poly.rs | 498 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 499 insertions(+) create mode 100644 src/ccs/virtual_poly.rs diff --git a/Cargo.toml b/Cargo.toml index 6eae31f6d..c91d7895a 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -32,6 +32,7 @@ flate2 = "1.0" bitvec = "1.0" byteorder = "1.4.3" thiserror = "1.0" +rand = "0.8.4" halo2curves = { version="0.1.0", features = [ "derive_serde" ] } [target.'cfg(any(target_arch = "x86_64", target_arch = "aarch64"))'.dependencies] diff --git a/src/ccs/virtual_poly.rs b/src/ccs/virtual_poly.rs new file mode 100644 index 000000000..9a2873e0f --- /dev/null +++ b/src/ccs/virtual_poly.rs @@ -0,0 +1,498 @@ +use crate::hypercube::BooleanHypercube; +use crate::spartan::math::Math; +use crate::spartan::polynomial::MultilinearPolynomial; +use crate::utils::bit_decompose; +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, + errors::NovaError, + gadgets::{ + nonnative::{bignat::nat_to_limbs, util::f_to_nat}, + utils::scalar_as_base, + }, + r1cs::{R1CSInstance, R1CSShape, R1CSWitness, R1CS}, + traits::{ + commitment::CommitmentEngineTrait, commitment::CommitmentTrait, AbsorbInROTrait, Group, ROTrait, + }, + utils::*, + Commitment, CommitmentKey, CE, +}; +use bitvec::vec; +use core::{cmp::max, marker::PhantomData}; +use ff::{Field, PrimeField}; +use flate2::{write::ZlibEncoder, Compression}; +use itertools::concat; +use rand::Rng; +use rand_core::RngCore; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Sha3_256}; +use std::collections::HashMap; +use std::ops::{Add, Mul}; +use std::sync::Arc; + +// A bit of collage-programming here. +// As a tmp way to have multilinear polynomial product+addition. +// The idea is to re-evaluate once everything works and decide if we replace this code +// by something else. +// +// THIS CODE HAS BEEN TAKEN FpOM THE ESPRESSO SYSTEMS LIB: +// +// +#[rustfmt::skip] +/// A virtual polynomial is a sum of products of multilinear polynomials; +/// where the multilinear polynomials are stored via their multilinear +/// extensions: `(coefficient, DenseMultilinearExtension)` +/// +/// * Number of products n = `polynomial.products.len()`, +/// * Number of multiplicands of ith product m_i = +/// `polynomial.products[i].1.len()`, +/// * Coefficient of ith product c_i = `polynomial.products[i].0` +/// +/// The resulting polynomial is +/// +/// $$ \sum_{i=0}^{n} c_i \cdot \prod_{j=0}^{m_i} P_{ij} $$ +/// +/// Example: +/// f = c0 * f0 * f1 * f2 + c1 * f3 * f4 +/// where f0 ... f4 are multilinear polynomials +/// +/// - flattened_ml_extensions stores the multilinear extension representation of +/// f0, f1, f2, f3 and f4 +/// - products is +/// \[ +/// (c0, \[0, 1, 2\]), +/// (c1, \[3, 4\]) +/// \] +/// - raw_pointers_lookup_table maps fi to i +/// +#[derive(Clone, Debug, Default, PartialEq)] +pub struct VirtualPolynomial { + /// Aux information about the multilinear polynomial + pub aux_info: VPAuxInfo, + /// list of reference to products (as usize) of multilinear extension + pub products: Vec<(F, Vec)>, + /// Stores multilinear extensions in which product multiplicand can refer + /// to. + pub flattened_ml_extensions: Vec>>, + /// Pointers to the above poly extensions + raw_pointers_lookup_table: HashMap<*const MultilinearPolynomial, usize>, +} + +#[derive(Clone, Debug, Default, PartialEq, Eq)] +/// Auxiliary information about the multilinear polynomial +pub struct VPAuxInfo { + /// max number of multiplicands in each product + pub max_degree: usize, + /// number of variables of the polynomial + pub num_variables: usize, + /// Associated field + #[doc(hidden)] + pub phantom: PhantomData, +} + +impl Add for &VirtualPolynomial { + type Output = VirtualPolynomial; + fn add(self, other: &VirtualPolynomial) -> Self::Output { + let mut res = self.clone(); + for products in other.products.iter() { + let cur: Vec>> = products + .1 + .iter() + .map(|&x| other.flattened_ml_extensions[x].clone()) + .collect(); + + res + .add_mle_list(cur, products.0) + .expect("add product failed"); + } + res + } +} + +// TODO: convert this into a trait +impl VirtualPolynomial { + /// Creates an empty virtual polynomial with `num_variables`. + pub fn new(num_variables: usize) -> Self { + VirtualPolynomial { + aux_info: VPAuxInfo { + max_degree: 0, + num_variables, + phantom: PhantomData::default(), + }, + products: Vec::new(), + flattened_ml_extensions: Vec::new(), + raw_pointers_lookup_table: HashMap::new(), + } + } + + /// Creates an new virtual polynomial Fpom a MLE and its coefficient. + pub fn new_from_mle(mle: &Arc>, coefficient: F) -> Self { + let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(mle); + let mut hm = HashMap::new(); + hm.insert(mle_ptr, 0); + + VirtualPolynomial { + aux_info: VPAuxInfo { + // The max degree is the max degree of any individual variable + max_degree: 1, + num_variables: mle.get_num_vars(), + phantom: PhantomData::default(), + }, + // here `0` points to the first polynomial of `flattened_ml_extensions` + products: vec![(coefficient, vec![0])], + flattened_ml_extensions: vec![mle.clone()], + raw_pointers_lookup_table: hm, + } + } + + /// Add a product of list of multilinear extensions to self + /// Returns an error if the list is empty, or the MLE has a different + /// `num_vars` Fpom self. + /// + /// The MLEs will be multiplied together, and then multiplied by the scalar + /// `coefficient`. + pub fn add_mle_list( + &mut self, + mle_list: impl IntoIterator>>, + coefficient: F, + ) -> Result<(), NovaError> { + let mle_list: Vec>> = mle_list.into_iter().collect(); + let mut indexed_product = Vec::with_capacity(mle_list.len()); + + if mle_list.is_empty() { + return Err(NovaError::VpArith); + } + + self.aux_info.max_degree = max(self.aux_info.max_degree, mle_list.len()); + + for mle in mle_list { + if mle.get_num_vars() != self.aux_info.num_variables { + return Err(NovaError::VpArith); + } + + let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(&mle); + if let Some(index) = self.raw_pointers_lookup_table.get(&mle_ptr) { + indexed_product.push(*index) + } else { + let curr_index = self.flattened_ml_extensions.len(); + self.flattened_ml_extensions.push(mle.clone()); + self.raw_pointers_lookup_table.insert(mle_ptr, curr_index); + indexed_product.push(curr_index); + } + } + self.products.push((coefficient, indexed_product)); + Ok(()) + } + + /// Multiple the current VirtualPolynomial by an MLE: + /// - add the MLE to the MLE list; + /// - multiple each product by MLE and its coefficient. + /// Returns an error if the MLE has a different `num_vars` Fpom self. + pub fn mul_by_mle( + &mut self, + mle: Arc>, + coefficient: F, + ) -> Result<(), NovaError> { + if mle.get_num_vars() != self.aux_info.num_variables { + return Err(NovaError::VpArith); + } + + let mle_ptr: *const MultilinearPolynomial = Arc::as_ptr(&mle); + + // check if this mle already exists in the virtual polynomial + let mle_index = match self.raw_pointers_lookup_table.get(&mle_ptr) { + Some(&p) => p, + None => { + self + .raw_pointers_lookup_table + .insert(mle_ptr, self.flattened_ml_extensions.len()); + self.flattened_ml_extensions.push(mle); + self.flattened_ml_extensions.len() - 1 + } + }; + + for (prod_coef, indices) in self.products.iter_mut() { + // - add the MLE to the MLE list; + // - multiple each product by MLE and its coefficient. + indices.push(mle_index); + *prod_coef *= coefficient; + } + + // increase the max degree by one as the MLE has degree 1. + self.aux_info.max_degree += 1; + + Ok(()) + } + + /// Given virtual polynomial `p(x)` and scalar `s`, compute `s*p(x)` + pub fn scalar_mul(&mut self, s: &F) { + for (prod_coef, _) in self.products.iter_mut() { + *prod_coef *= s; + } + } + + /// Evaluate the virtual polynomial at point `point`. + /// Returns an error is point.len() does not match `num_variables`. + pub fn evaluate(&self, point: &[F]) -> Result { + if self.aux_info.num_variables != point.len() { + return Err(NovaError::VpArith); + } + + // Evaluate all the MLEs at `point` + let evals: Vec = self + .flattened_ml_extensions + .iter() + .map(|x| x.evaluate(point)) + .collect(); + + let res = self + .products + .iter() + .map(|(c, p)| *c * p.iter().map(|&i| evals[i]).product::()) + .sum(); + + Ok(res) + } + + /// Sample a random virtual polynomial, return the polynomial and its sum. + pub fn rand( + nv: usize, + num_multiplicands_range: (usize, usize), + num_products: usize, + mut rng: &mut R, + ) -> Result<(Self, F), NovaError> { + let mut sum = F::ZERO; + let mut poly = VirtualPolynomial::new(nv); + for _ in 0..num_products { + let coefficient = F::random(&mut rng); + let num_multiplicands = rng.gen_range(num_multiplicands_range.0..num_multiplicands_range.1); + let (product, product_sum) = random_mle_list(nv, num_multiplicands, rng); + + poly.add_mle_list(product.into_iter(), coefficient)?; + sum += product_sum * coefficient; + } + Ok((poly, sum)) + } + + /// Sample a random virtual polynomial that evaluates to zero everywhere + /// over the boolean hypercube. + pub fn rand_zero( + nv: usize, + num_multiplicands_range: (usize, usize), + num_products: usize, + mut rng: &mut R, + ) -> Result { + let coefficient = F::random(&mut rng); + let mut poly = VirtualPolynomial::new(nv); + for _ in 0..num_products { + let num_multiplicands = rng.gen_range(num_multiplicands_range.0..num_multiplicands_range.1); + let product = random_zero_mle_list(nv, num_multiplicands, rng); + + poly.add_mle_list(product.into_iter(), coefficient)?; + } + + Ok(poly) + } + + // Input poly f(x) and a random vector r, output + // \hat f(x) = \sum_{x_i \in eval_x} f(x_i) eq(x, r) + // where + // eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) + // + // This function is used in ZeroCheck. + pub fn build_f_hat(&self, r: &[F]) -> Result { + if self.aux_info.num_variables != r.len() { + return Err(NovaError::VpArith); + } + + let eq_x_r = build_eq_x_r(r)?; + let mut res = self.clone(); + res.mul_by_mle(eq_x_r, F::ONE)?; + + Ok(res) + } +} + +/// This function build the eq(x, r) polynomial for any given r. +/// +/// Evaluate +/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) +/// over r, which is +/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) +pub fn build_eq_x_r(r: &[F]) -> Result>, NovaError> { + let evals = build_eq_x_r_vec(r)?; + let mle = MultilinearPolynomial::new(evals); + + Ok(Arc::new(mle)) +} + +/// This function build the eq(x, r) polynomial for any given r, and output the +/// evaluation of eq(x, r) in its vector form. +/// +/// Evaluate +/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) +/// over r, which is +/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) +pub fn build_eq_x_r_vec(r: &[F]) -> Result, NovaError> { + // we build eq(x,r) Fpom its evaluations + // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars + // for example, with num_vars = 4, x is a binary vector of 4, then + // 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3) + // 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3) + // 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3) + // 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3) + // .... + // 1 1 1 1 -> r0 * r1 * r2 * r3 + // we will need 2^num_var evaluations + + let mut eval = Vec::new(); + build_eq_x_r_helper(r, &mut eval)?; + + Ok(eval) +} + +/// A helper function to build eq(x, r) recursively. +/// This function takes `r.len()` steps, and for each step it requires a maximum +/// `r.len()-1` multiplications. +fn build_eq_x_r_helper(r: &[F], buf: &mut Vec) -> Result<(), NovaError> { + if r.is_empty() { + return Err(NovaError::VpArith); + } else if r.len() == 1 { + // initializing the buffer with [1-r_0, r_0] + buf.push(F::ONE - r[0]); + buf.push(r[0]); + } else { + build_eq_x_r_helper(&r[1..], buf)?; + + // suppose at the previous step we received [b_1, ..., b_k] + // for the current step we will need + // if x_0 = 0: (1-r0) * [b_1, ..., b_k] + // if x_0 = 1: r0 * [b_1, ..., b_k] + // let mut res = vec![]; + // for &b_i in buf.iter() { + // let tmp = r[0] * b_i; + // res.push(b_i - tmp); + // res.push(tmp); + // } + // *buf = res; + + let mut res = vec![F::ZERO; buf.len() << 1]; + res.par_iter_mut().enumerate().for_each(|(i, val)| { + let bi = buf[i >> 1]; + let tmp = r[0] * bi; + if i & 1 == 0 { + *val = bi - tmp; + } else { + *val = tmp; + } + }); + *buf = res; + } + + Ok(()) +} + +#[cfg(test)] +mod test { + use super::*; + use pasta_curves::Fp; + use rand_core::OsRng; + + #[test] + fn test_virtual_polynomial_additions() -> Result<(), NovaError> { + let mut rng = OsRng; + for nv in 2..5 { + for num_products in 2..5 { + let base: Vec = (0..nv).map(|_| Fp::random(&mut rng)).collect(); + + let (a, _a_sum) = VirtualPolynomial::::rand(nv, (2, 3), num_products, &mut rng)?; + let (b, _b_sum) = VirtualPolynomial::::rand(nv, (2, 3), num_products, &mut rng)?; + let c = &a + &b; + + assert_eq!( + a.evaluate(base.as_ref())? + b.evaluate(base.as_ref())?, + c.evaluate(base.as_ref())? + ); + } + } + + Ok(()) + } + + #[test] + fn test_virtual_polynomial_mul_by_mle() -> Result<(), NovaError> { + let mut rng = OsRng; + for nv in 2..5 { + for num_products in 2..5 { + let base: Vec = (0..nv).map(|_| Fp::random(&mut rng)).collect(); + + let (a, _a_sum) = VirtualPolynomial::::rand(nv, (2, 3), num_products, &mut rng)?; + let (b, _b_sum) = random_mle_list(nv, 1, &mut rng); + let b_mle = b[0].clone(); + let coeff = Fp::random(&mut rng); + let b_vp = VirtualPolynomial::new_from_mle(&b_mle, coeff); + + let mut c = a.clone(); + + c.mul_by_mle(b_mle, coeff)?; + + assert_eq!( + a.evaluate(base.as_ref())? * b_vp.evaluate(base.as_ref())?, + c.evaluate(base.as_ref())? + ); + } + } + + Ok(()) + } + + #[test] + fn test_eq_xr() { + let mut rng = OsRng; + for nv in 4..10 { + let r: Vec = (0..nv).map(|_| Fp::random(&mut rng)).collect(); + let eq_x_r = build_eq_x_r(r.as_ref()).unwrap(); + let eq_x_r2 = build_eq_x_r_for_test(r.as_ref()); + assert_eq!(eq_x_r, eq_x_r2); + } + } + + /// Naive method to build eq(x, r). + /// Only used for testing purpose. + // Evaluate + // eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) + // over r, which is + // eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) + fn build_eq_x_r_for_test(r: &[F]) -> Arc> { + // we build eq(x,r) Fpom its evaluations + // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars + // for example, with num_vars = 4, x is a binary vector of 4, then + // 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3) + // 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3) + // 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3) + // 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3) + // .... + // 1 1 1 1 -> r0 * r1 * r2 * r3 + // we will need 2^num_var evaluations + + // First, we build array for {1 - r_i} + let one_minus_r: Vec = r.iter().map(|ri| F::ONE - ri).collect(); + + let num_var = r.len(); + let mut eval = vec![]; + + for i in 0..1 << num_var { + let mut current_eval = F::ONE; + let bit_sequence = bit_decompose(i, num_var); + + for (&bit, (ri, one_minus_ri)) in bit_sequence.iter().zip(r.iter().zip(one_minus_r.iter())) { + current_eval *= if bit { *ri } else { *one_minus_ri }; + } + eval.push(current_eval); + } + + let mle = MultilinearPolynomial::new(eval); + + Arc::new(mle) + } +} From f48dda8041b3b29341365c2d66207733b2930221 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 22 Jun 2023 11:55:38 +0200 Subject: [PATCH 049/100] fix: Correct q_computation & update to VirtualPoly usage --- src/ccs/cccs.rs | 64 ++++++++++++++++++++++++------------------------- 1 file changed, 32 insertions(+), 32 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 3d27ee781..d1442582b 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -89,43 +89,43 @@ impl CCCSShape { // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) // polynomial over x - pub fn compute_q( - &self, - z: &Vec, - ) -> Result, &'static str> { - // XXX: Do we need to instrument this to use s_prime as n_vars somehow? + pub fn compute_q(&self, z: &Vec) -> Result, NovaError> { let z_mle = MultilinearPolynomial::new(z.clone()); if z_mle.get_num_vars() != self.ccs.s_prime { - return Err("z_mle number of variables does not match ccs.s_prime"); + return Err(NovaError::VpArith); } - let mut q = MultilinearPolynomial::new(vec![G::Scalar::ZERO; self.ccs.s]); - - for i in 0..self.ccs.q { - let mut prod = MultilinearPolynomial::new(vec![G::Scalar::ONE; self.ccs.s]); - - for j in &self.ccs.S[i] { - let M_j = sparse_matrix_to_mlp(&self.ccs.M[*j]); - - let sum_Mz = self.compute_sum_Mz(&M_j, &z_mle, self.ccs.s_prime); - - // Fold this sum into the running product - prod = prod.mul(sum_Mz)?; - } - // Multiply the product by the coefficient c_i - prod = prod.scalar_mul(&self.ccs.c[i]); - - // Add it to the running sum - q = q.add(prod)?; - } - - Ok(q) - - // Similar logic in Spartan - // let (mut Az, mut Bz, mut Cz) = pk.S.multiply_vec(&z)?; - //poly_Az: MultilinearPolynomial::new(Az.clone()), + // Using `fold` requires to not have results inside. So we unwrap for now but + // a better approach is needed (we ca just keep the for loop otherwise.) + Ok((0..self.ccs.q).into_iter().fold( + VirtualPolynomial::::new(self.ccs.s), + |q, idx| { + let mut prod = VirtualPolynomial::::new(self.ccs.s); + + for j in &self.ccs.S[idx] { + let M_j = sparse_matrix_to_mlp(&self.ccs.M[*j]); + + let sum_Mz = self.compute_sum_Mz(&M_j, &z_mle, self.ccs.s_prime); + + // Fold this sum into the running product + if prod.products.is_empty() { + // If this is the first time we are adding something to this virtual polynomial, we need to + // explicitly add the products using add_mle_list() + // XXX is this true? improve API + prod + .add_mle_list([Arc::new(sum_Mz)], G::Scalar::ONE) + .unwrap(); + } else { + prod.mul_by_mle(Arc::new(sum_Mz), G::Scalar::ONE).unwrap(); + } + } + // Multiply by the product by the coefficient c_i + prod.scalar_mul(&self.ccs.c[idx]); + // Add it to the running sum + q.add(&prod) + }, + )) } -} /// Computes Q(x) = eq(beta, x) * q(x) /// = eq(beta, x) * \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) From 71668c71238c9b5fa8fc316119f5392af8cf0f0e Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 22 Jun 2023 22:03:57 +0200 Subject: [PATCH 050/100] add: Add CCS -> CCCS conversions --- src/ccs/mod.rs | 47 ++++++++++++++++++++++++++++++++++++++++++----- 1 file changed, 42 insertions(+), 5 deletions(-) diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index fb0c84327..f36907cb0 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -26,11 +26,14 @@ use core::{cmp::max, marker::PhantomData}; use ff::Field; use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; +use rand_core::RngCore; use rayon::prelude::*; use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; +use self::cccs::{CCCSInstance, CCCSShape, CCCSWitness}; + mod cccs; pub(crate) mod virtual_poly; @@ -108,13 +111,47 @@ pub struct LCCCSWitness { pub(crate) w_mle: Vec, } -impl CCS { +impl CCSShape { + pub(crate) fn to_cccs_shape(&self) -> CCCSShape { + let M_mle = self + .M + .iter() + .map(|matrix| sparse_matrix_to_mlp(matrix)) + .collect(); + CCCSShape { + M_MLE: M_mle, + ccs: self.clone(), + } + } + + // Transform the CCS instance into a CCCS instance by providing the required Commitment key. + pub fn to_cccs_artifacts( + &self, + rng: &mut R, + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + z: &[G::Scalar], + ) -> (CCCSInstance, CCCSWitness, CCCSShape) { + let w: Vec = z[(1 + self.l)..].to_vec(); + // XXX: API doesn't offer a way to handle this apparently? + // Need to investigate + let _r_w = G::Scalar::random(rng); + let C = <::CE as CommitmentEngineTrait>::commit(ck, &w); + + ( + CCCSInstance { + C, + x: z[1..(1 + self.l)].to_vec(), + }, + CCCSWitness { w_mle: w }, + self.to_cccs_shape(), + ) + } // TODO: Update commitment_key variables here? This is currently based on R1CS with M length /// Samples public parameters for the specified number of constraints and variables in an CCS - pub fn commitment_key(shape: &CCSShape) -> CommitmentKey { - let total_nz = shape.M.iter().fold(0, |acc, m| acc + m.0.len()); + pub fn commitment_key(&self) -> CommitmentKey { + let total_nz = self.M.iter().fold(0, |acc, m| acc + m.0.len()); - G::CE::setup(b"ck", max(max(shape.m, shape.t), total_nz)) + G::CE::setup(b"ck", max(max(self.m, self.t), total_nz)) } } @@ -386,7 +423,7 @@ pub mod test { let S = CCSShape::from_r1cs(S); // generate generators and ro constants - let _ck = CCS::::commitment_key(&S); + let _ck = S.commitment_key(); let _ro_consts = <::RO as ROTrait<::Base, ::Scalar>>::Constants::new(); From b2e8a6b7de491f6baefb27546b7c3be3fdba0f98 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 22 Jun 2023 23:47:32 +0200 Subject: [PATCH 051/100] change: Update/Improve a bit the HyperCube API --- src/hypercube.rs | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/src/hypercube.rs b/src/hypercube.rs index b1eec086b..5a96a7e39 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -4,18 +4,18 @@ use crate::utils::*; use ff::PrimeField; #[derive(Debug)] -pub struct BooleanHypercube { +pub struct BooleanHypercube { dimensions: usize, current: u64, max: u64, - coefficients: Vec, + coefficients: Vec, } -impl BooleanHypercube { - pub fn new(dimensions: usize, coefficients: Vec) -> Self { +impl BooleanHypercube { + pub fn new(dimensions: usize, coefficients: Vec) -> Self { assert!(coefficients.len() == 2_usize.pow(dimensions as u32)); - BooleanHypercube { + Self { dimensions, current: 0, max: 2_u32.pow(dimensions as u32) as u64, @@ -24,10 +24,10 @@ impl BooleanHypercube { } // Evaluate the multilinear polynomial at the given point - pub fn evaluate(&self, point: &[Scalar]) -> Scalar { + pub fn evaluate_at(&self, point: &[F]) -> F { assert!(point.len() == self.dimensions); - let mut result = Scalar::ZERO; + let mut result = F::ZERO; for i in 0..self.max as usize { let monomial = self.monomial(i, point); @@ -38,9 +38,9 @@ impl BooleanHypercube { } // This calculates a single monomial of the multilinear polynomial - fn monomial(&self, i: usize, point: &[Scalar]) -> Scalar { + fn monomial(&self, i: usize, point: &[F]) -> F { assert!(i < self.max as usize); - let mut result = Scalar::ONE; + let mut result = F::ONE; let bits = bit_decompose(i as u64, self.dimensions); @@ -95,6 +95,6 @@ mod tests { // The polynomial would be f(x, y, z) = 4x + 2y + z. // So, f(1, 1, 1) = 4*1 + 2*1 + 1 = 7. - assert_eq!(poly.evaluate(&point), Fp::from(7u64)); + assert_eq!(poly.evaluate_at(&point), Fp::from(7u64)); } } From 578431c7dc8e868e9d574cb65c297e9608f7c67e Mon Sep 17 00:00:00 2001 From: CPerezz Date: Fri, 23 Jun 2023 14:10:55 +0200 Subject: [PATCH 052/100] fix: Include n_cos & n_rows info in SparseMatrix This prevents having issues as shown in the tests now failing for MLE conversions. The issue is that empty rows/columns at the end of the matrixes cause the algorithms to actually fail. As we have a sense of the matrix dimensions only when we are in the context of CCS. But we don't when we're outside of it. --- src/utils.rs | 222 ++++++++++++++++++++++++--------------------------- 1 file changed, 103 insertions(+), 119 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 19f24f30a..ca7fb19b1 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -13,37 +13,52 @@ use serde::{Deserialize, Serialize}; /// First element is row index, second column, third value stored #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] -pub struct SparseMatrix(pub(crate) Vec<(usize, usize, G::Scalar)>); +pub struct SparseMatrix { + n_rows: usize, + n_cols: usize, + coeffs: Vec<(usize, usize, G::Scalar)>, +} impl SparseMatrix { - pub fn new() -> Self { - Self(vec![]) + pub fn new(n_rows: usize, n_cols: usize) -> Self { + Self { + n_rows, + n_cols, + coeffs: vec![], + } } - pub fn with_capacity(n: usize) -> Self { - Self(Vec::with_capacity(n)) + pub fn with_coeffs(n_rows: usize, n_cols: usize, coeffs: Vec<(usize, usize, G::Scalar)>) -> Self { + Self { + n_rows, + n_cols, + coeffs, + } } - // Find the maximum row index in the matrix + // Return the number of rows of this matrix. pub fn n_rows(&self) -> usize { - let max_row_idx = self - .0 - .iter() - .copied() - .map(|r| r.0) - .fold(usize::MIN, |a, b| a.max(b)); - max_row_idx + 1 + self.n_rows + } + + // Returns a mutable reference to the number of rows of this matrix. + pub fn n_rows_mut(&mut self) -> &mut usize { + &mut self.n_rows } - // Find the maximum column index in the matrix + // Return the number of cols of this matrix. pub fn n_cols(&self) -> usize { - let max_col_idx = self - .0 - .iter() - .copied() - .map(|r| r.1) - .fold(std::usize::MIN, |a, b| a.max(b)); - max_col_idx + 1 + self.n_cols + } + + // Returns a mutable reference to the number of cols of this matrix. + pub fn n_cols_mut(&mut self) -> &mut usize { + &mut self.n_cols + } + + // Return the non-0 coefficients of this matrix. + pub fn coeffs(&self) -> &[(usize, usize, G::Scalar)] { + self.coeffs.as_slice() } pub(crate) fn is_valid( @@ -52,46 +67,22 @@ impl SparseMatrix { num_vars: usize, num_io: usize, ) -> Result<(), NovaError> { - let res = self - .0 - .iter() - .copied() - .map(|(row, col, _val)| { - if row >= num_cons || col > num_io + num_vars { - Err(NovaError::InvalidIndex) - } else { - Ok(()) - } - }) - .collect::, NovaError>>(); - - if res.is_err() { + if self.n_rows >= num_cons || self.n_cols > num_io + num_vars { Err(NovaError::InvalidIndex) } else { Ok(()) } } + // XXX: Double check this pub(crate) fn pad(&mut self, n: usize) { - let prev_n = self.n_cols(); - self.0.par_iter_mut().for_each(|(_, c, _)| { + let prev_n = self.n_cols; + self.coeffs.par_iter_mut().for_each(|(_, c, _)| { *c = if *c >= prev_n { *c + n - prev_n } else { *c }; }); } } -impl From> for SparseMatrix { - fn from(matrix: Vec<(usize, usize, G::Scalar)>) -> SparseMatrix { - SparseMatrix(matrix) - } -} - -impl From<&Vec<(usize, usize, G::Scalar)>> for SparseMatrix { - fn from(matrix: &Vec<(usize, usize, G::Scalar)>) -> SparseMatrix { - SparseMatrix(matrix.clone()) - } -} - pub fn vector_add(a: &Vec, b: &Vec) -> Vec { assert_eq!(a.len(), b.len(), "Vector addition with different lengths"); let mut res = Vec::with_capacity(a.len()); @@ -140,13 +131,8 @@ pub fn matrix_vector_product_sparse( matrix: &SparseMatrix, vector: &Vec, ) -> Vec { - assert_eq!( - matrix.n_cols(), - vector.len(), - "matrix cols != vector length" - ); - let mut res = vec![G::Scalar::ZERO; vector.len()]; - for &(row, col, value) in matrix.0.iter() { + let mut res = vec![G::Scalar::ZERO; 4]; + for &(row, col, value) in matrix.coeffs.iter() { res[row] += value * vector[col]; } res @@ -165,23 +151,14 @@ pub fn hadamard_product(a: &Vec, b: &Vec) -> Vec { pub fn sparse_matrix_to_mlp( matrix: &SparseMatrix, ) -> MultilinearPolynomial { - let n_rows = matrix.n_rows(); - let n_cols = matrix.n_cols(); + let n_rows = 4; + let n_cols = 6usize; - // Since n_rows and n_cols already account for 0 indexing, - // The total number of elements would be n_rows * n_cols - let total_elements: usize = n_rows * n_cols; - let n_vars: usize = total_elements.next_power_of_two().trailing_zeros() as usize; + let n_vars: usize = n_cols.next_power_of_two().trailing_zeros() as usize; // Create a vector of zeros with size 2^n_vars let mut vec: Vec = vec![G::Scalar::ZERO; 2_usize.pow(n_vars as u32)]; - // Assign non-zero entries from the sparse matrix to the vector - for &(i, j, val) in matrix.0.iter() { - let index = i * n_cols + j; // Convert (i, j) into an index for a flat vector - vec[index] = val; - } - // Pad to 2^n_vars let vec_padded: Vec = [ vec.clone(), @@ -195,6 +172,8 @@ pub fn sparse_matrix_to_mlp( MultilinearPolynomial::new(vec_padded) } +// XXX: Create vec_to_mlp method and estract the padd + /// Decompose an integer into a binary vector in little endian. pub fn bit_decompose(input: u64, num_var: usize) -> Vec { let mut res = Vec::with_capacity(num_var); @@ -278,8 +257,19 @@ mod tests { m.iter().map(|x| to_F_vec(x.clone())).collect() } - fn to_F_matrix_sparse(m: Vec<(usize, usize, u64)>) -> Vec<(usize, usize, F)> { - m.iter().map(|x| (x.0, x.1, F::from(x.2))).collect() + #[test] + fn test_n_cols_sparse_matrix() { + let one = Fq::ONE; + let A = vec![ + (0, 1, one), + (1, 3, one), + (2, 1, one), + (2, 4, one), + (3, 0, Fq::from(5u64)), + (3, 5, one), + ]; + + assert_eq!(6, SparseMatrix::::with_coeffs(4, 6, A).n_cols()); } #[test] @@ -320,69 +310,63 @@ mod tests { #[test] fn test_matrix_vector_product_sparse() { let matrix = vec![ - (0, 0, 1), - (0, 1, 2), - (0, 2, 3), - (1, 0, 4), - (1, 1, 5), - (1, 2, 6), + (0, 0, Fq::from(1)), + (0, 1, Fq::from(2)), + (0, 2, Fq::from(3)), + (1, 0, Fq::from(4)), + (1, 1, Fq::from(5)), + (1, 2, Fq::from(6)), ]; - let vector = vec![1, 2, 3]; - let A = to_F_matrix_sparse::(matrix); - let z = to_F_vec::(vector); - let res = matrix_vector_product_sparse::(&(A.into()), &z); - assert_eq!(res, to_F_vec::(vec![14, 32, 0])); - } + let z = to_F_vec::(vec![1, 2, 3]); + let res = + matrix_vector_product_sparse::(&SparseMatrix::::with_coeffs(2, 3, matrix), &z); - #[test] - fn test_sparse_matrix_n_rows() { - let matrix = vec![ - (0, 0, 1), - (0, 1, 2), - (0, 2, 3), - (1, 0, 4), - (1, 1, 5), - (1, 2, 6), - ]; - let A: SparseMatrix = to_F_matrix_sparse::(matrix).into(); - assert_eq!(A.n_rows(), 2); + assert_eq!(res, to_F_vec::(vec![14, 32, 0])); } #[test] - fn test_sparse_matrix_n_cols() { + fn test_sparse_matrix_n_cols_rows() { let matrix = vec![ - (0, 0, 1), - (0, 1, 2), - (0, 2, 3), - (1, 0, 4), - (1, 1, 5), - (1, 2, 6), + (0, 0, Fq::from(1u64)), + (0, 1, Fq::from(2u64)), + (0, 2, Fq::from(3u64)), + (1, 0, Fq::from(4u64)), + (1, 1, Fq::from(5u64)), + (1, 2, Fq::from(6u64)), + (4, 5, Fq::from(1u64)), ]; - let A: SparseMatrix = to_F_matrix_sparse::(matrix).into(); - assert_eq!(A.n_cols(), 3); + let A = SparseMatrix::::with_coeffs(5, 6, matrix.clone()); + assert_eq!(A.n_cols(), 6); + assert_eq!(A.n_rows(), 5); + + // Since is sparse, the empty rows/cols at the end are not accounted unless we provide the info. + let A = SparseMatrix::::with_coeffs(10, 10, matrix); + assert_eq!(A.n_cols(), 10); + assert_eq!(A.n_rows(), 10); } + // XXX this test is not really testing much. Improve. #[test] fn test_sparse_matrix_to_mlp() { let matrix = vec![ - (0, 0, 2), - (0, 1, 3), - (0, 2, 4), - (0, 3, 4), - (1, 0, 4), - (1, 1, 11), - (1, 2, 14), - (1, 3, 14), - (2, 0, 2), - (2, 1, 8), - (2, 2, 17), - (2, 3, 17), - (3, 0, 420), - (3, 1, 4), - (3, 2, 2), + (0, 0, Fq::from(2)), + (0, 1, Fq::from(3)), + (0, 2, Fq::from(4)), + (0, 3, Fq::from(4)), + (1, 0, Fq::from(4)), + (1, 1, Fq::from(11)), + (1, 2, Fq::from(14)), + (1, 3, Fq::from(14)), + (2, 0, Fq::from(2)), + (2, 1, Fq::from(8)), + (2, 2, Fq::from(17)), + (2, 3, Fq::from(17)), + (3, 0, Fq::from(420)), + (3, 1, Fq::from(4)), + (3, 2, Fq::from(2)), ]; - let A: SparseMatrix = to_F_matrix_sparse::(matrix).into(); + let A = SparseMatrix::::with_coeffs(4, 4, matrix); // Convert the sparse matrix to a multilinear polynomial let mlp = sparse_matrix_to_mlp(&A); From bc94cb779f1112cef79c6c3ff7ffe048f69c719e Mon Sep 17 00:00:00 2001 From: CPerezz Date: Fri, 23 Jun 2023 14:13:38 +0200 Subject: [PATCH 053/100] change: Update `pad` and `from_r1cs` CCS methods. This solves some minor errors & issues with the CCS methods as well as updates the SparseMatrix calls to the new API/params used. --- src/ccs/mod.rs | 78 +++++++++++++++++++++++--------------------------- 1 file changed, 36 insertions(+), 42 deletions(-) diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index f36907cb0..30024b72b 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -83,6 +83,17 @@ pub struct CCSWitness { w: Vec, } +impl CCSWitness { + /// Create a CCSWitness instance from the witness vector. + pub fn new(witness: Vec) -> Self { + Self { w: witness } + } + + /// Commits to the witness using the supplied generators + pub fn commit(&self, ck: &CommitmentKey) -> Commitment { + CE::::commit(ck, &self.w) + } +} /// A type that holds an CCS instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] @@ -146,10 +157,10 @@ impl CCSShape { self.to_cccs_shape(), ) } - // TODO: Update commitment_key variables here? This is currently based on R1CS with M length + // XXX: Update commitment_key variables here? This is currently based on R1CS with M length /// Samples public parameters for the specified number of constraints and variables in an CCS pub fn commitment_key(&self) -> CommitmentKey { - let total_nz = self.M.iter().fold(0, |acc, m| acc + m.0.len()); + let total_nz = self.M.iter().fold(0, |acc, m| acc + m.coeffs().len()); G::CE::setup(b"ck", max(max(self.m, self.t), total_nz)) } @@ -214,7 +225,7 @@ impl CCSShape { U: &CCSInstance, W: &CCSWitness, ) -> Result<(), NovaError> { - assert_eq!(W.w.len(), self.t); + assert_eq!(W.w.len(), self.n - self.l - 1); assert_eq!(U.x.len(), self.l); // Sage code to check CCS relation: @@ -268,64 +279,47 @@ impl CCSShape { const S1: [usize; 2] = [0, 1]; const S2: [usize; 1] = [2]; - let l = r1cs.num_io; - // NOTE: All matricies have the same number of rows, but in a SparseMatrix we need to check all of them - // TODO: Consider using SparseMatrix type in R1CSShape too - // XXX: This can probably be made a lot better - let A: SparseMatrix = r1cs.A.clone().into(); - let B: SparseMatrix = r1cs.B.clone().into(); - let C: SparseMatrix = r1cs.C.clone().into(); - - let m = max(A.n_rows(), max(B.n_rows(), C.n_rows())); - let n = max(A.n_cols(), max(B.n_cols(), C.n_cols())); + // Generate the SparseMatrix vec + let A = SparseMatrix::with_coeffs(r1cs.num_cons, r1cs.num_vars, r1cs.A); + let B = SparseMatrix::with_coeffs(r1cs.num_cons, r1cs.num_vars, r1cs.B); + let C = SparseMatrix::with_coeffs(r1cs.num_cons, r1cs.num_vars, r1cs.C); - let s = m.log_2() as usize; - let s_prime = n.log_2() as usize; + // Assert all matrixes have the same row/column length. + assert_eq!(A.n_cols(), B.n_cols()); + assert_eq!(B.n_cols(), C.n_cols()); + assert_eq!(A.n_rows(), B.n_rows()); + assert_eq!(B.n_rows(), C.n_rows()); Self { - M: vec![r1cs.A.into(), r1cs.B.into(), r1cs.C.into()], + M: vec![A, B, C], t: T, - l, + l: r1cs.num_io, q: Q, d: D, S: vec![S1.to_vec(), S2.to_vec()], c: vec![G::Scalar::ONE, -G::Scalar::ONE], - m, - n, - s, - s_prime, + m: r1cs.num_cons, + n: r1cs.num_vars, + s: r1cs.num_cons.log_2() as usize, + s_prime: r1cs.num_vars.log_2() as usize, } } - /// Pads the R1CSShape so that the number of variables is a power of two + /// Pads the CCSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&mut self) { - let (padded_m, padded_n) = (self.m.next_power_of_two(), self.n.next_power_of_two()); + let padded_n = self.n.next_power_of_two(); // check if the number of variables are as expected, then // we simply set the number of constraints to the next power of two if self.n != padded_n { // Apply pad for each matrix in M - self.M.iter_mut().for_each(|m| m.pad(padded_n)); + self.M.iter_mut().for_each(|m| { + m.pad(padded_n); + *m.n_rows_mut() = padded_n + }); self.n = padded_n; } - - // We always update `m` even if it is the same (no need for `if`s). - self.m = padded_m; - } -} - -impl CCSWitness { - /// A method to create a witness object using a vector of scalars - pub fn new(S: &CCSShape, witness: Vec) -> CCSWitness { - assert_eq!(S.t, witness.len()); - - Self { w: witness } - } - - /// Commits to the witness using the supplied generators - pub fn commit(&self, ck: &CommitmentKey) -> Commitment { - CE::::commit(ck, &self.w) } } @@ -445,7 +439,7 @@ pub mod test { (i1, W, X) }; - let ccs_w = CCSWitness::new(&S, vars); + let ccs_w = CCSWitness::new(vars); let U = { let comm_W = ccs_w.commit(ck); From dec29ae88488ceeaf491bf0285643f5d337704ce Mon Sep 17 00:00:00 2001 From: CPerezz Date: Sun, 25 Jun 2023 13:31:41 +0200 Subject: [PATCH 054/100] remove: coefficients field from HyperCube & refactor This helps to reduce the complexity of the implementation while at the same time removing the possibility of handling a boolean hypercube with coefficients which is not needed now at all. We can always bring this back later. --- src/hypercube.rs | 76 +++++++++++++----------------------------------- 1 file changed, 20 insertions(+), 56 deletions(-) diff --git a/src/hypercube.rs b/src/hypercube.rs index 5a96a7e39..13c39e758 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -1,56 +1,33 @@ //! This module defines basic types related to Boolean hypercubes. +use std::marker::PhantomData; + use crate::utils::*; /// There's some overlap with polynomial.rs. use ff::PrimeField; #[derive(Debug)] -pub struct BooleanHypercube { - dimensions: usize, +pub(crate) struct BooleanHypercube { + n_vars: usize, current: u64, max: u64, - coefficients: Vec, + _f: PhantomData, } impl BooleanHypercube { - pub fn new(dimensions: usize, coefficients: Vec) -> Self { - assert!(coefficients.len() == 2_usize.pow(dimensions as u32)); - + pub(crate) fn new(n_vars: usize) -> Self { Self { - dimensions, + _f: PhantomData::, + n_vars, current: 0, - max: 2_u32.pow(dimensions as u32) as u64, - coefficients, + max: 2_u32.pow(n_vars as u32) as u64, } } - // Evaluate the multilinear polynomial at the given point - pub fn evaluate_at(&self, point: &[F]) -> F { - assert!(point.len() == self.dimensions); - - let mut result = F::ZERO; - - for i in 0..self.max as usize { - let monomial = self.monomial(i, point); - result = result + self.coefficients[i] * monomial; - } - - result - } - - // This calculates a single monomial of the multilinear polynomial - fn monomial(&self, i: usize, point: &[F]) -> F { + /// returns the entry at given i (which is the little-endian bit representation of i) + pub(crate) fn evaluate_at(&self, i: usize) -> Vec { assert!(i < self.max as usize); - let mut result = F::ONE; - - let bits = bit_decompose(i as u64, self.dimensions); - - for j in 0..self.dimensions { - if bits[j] { - result = result * point[j]; - } - } - - result + let bits = bit_decompose((i) as u64, self.n_vars); + bits.iter().map(|&x| F::from(x as u64)).collect() } } @@ -61,7 +38,7 @@ impl Iterator for BooleanHypercube { if self.current > self.max { None } else { - let bits = bit_decompose(self.current, self.dimensions); + let bits = bit_decompose(self.current, self.n_vars); let point: Vec = bits.iter().map(|&bit| Scalar::from(bit as u64)).collect(); self.current += 1; Some(point) @@ -72,29 +49,16 @@ impl Iterator for BooleanHypercube { #[cfg(test)] mod tests { use super::*; - use pasta_curves::Fp; + use ff::Field; + use pasta_curves::Fq; #[test] fn test_evaluate() { // Declare the coefficients in the order 1, x, y, xy, z, xz, yz, xyz. - let poly = BooleanHypercube::::new( - 3, - vec![ - Fp::from(0u64), - Fp::from(4u64), - Fp::from(2u64), - Fp::from(0u64), - Fp::from(1u64), - Fp::from(0u64), - Fp::from(0u64), - Fp::from(0u64), - ], - ); - - let point = vec![Fp::from(1u64), Fp::from(1u64), Fp::from(1u64)]; + let poly = BooleanHypercube::::new(3); - // The polynomial would be f(x, y, z) = 4x + 2y + z. - // So, f(1, 1, 1) = 4*1 + 2*1 + 1 = 7. - assert_eq!(poly.evaluate_at(&point), Fp::from(7u64)); + let point = 7usize; + // So, f(1, 1, 1) = 5. + assert_eq!(poly.evaluate_at(point), vec![Fq::ONE, Fq::ONE, Fq::ONE]); } } From a11cf9c049a7adb43b0a594f4d9e41d743796700 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Sun, 25 Jun 2023 15:38:14 +0200 Subject: [PATCH 055/100] change: Change HyperCube `evaluate_at` endianness --- src/hypercube.rs | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/src/hypercube.rs b/src/hypercube.rs index 13c39e758..590042029 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -23,11 +23,12 @@ impl BooleanHypercube { } } - /// returns the entry at given i (which is the little-endian bit representation of i) + /// returns the entry at given i (which is the big-endian bit representation of i) pub(crate) fn evaluate_at(&self, i: usize) -> Vec { assert!(i < self.max as usize); let bits = bit_decompose((i) as u64, self.n_vars); - bits.iter().map(|&x| F::from(x as u64)).collect() + dbg!(bits.clone()); + bits.iter().map(|&x| F::from(x as u64)).rev().collect() } } From 055751e5ce9746bed4ab027037c2f6c7d3fc59f5 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Sun, 25 Jun 2023 16:03:56 +0200 Subject: [PATCH 056/100] add: Port MLE support & tests passing --- src/ccs/mod.rs | 9 +-- src/hypercube.rs | 1 - src/utils.rs | 151 ++++++++++++++++++++++++----------------------- 3 files changed, 80 insertions(+), 81 deletions(-) diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 30024b72b..b602dd85e 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -124,11 +124,7 @@ pub struct LCCCSWitness { impl CCSShape { pub(crate) fn to_cccs_shape(&self) -> CCCSShape { - let M_mle = self - .M - .iter() - .map(|matrix| sparse_matrix_to_mlp(matrix)) - .collect(); + let M_mle = self.M.iter().map(|matrix| matrix.to_mle()).collect(); CCCSShape { M_MLE: M_mle, ccs: self.clone(), @@ -305,6 +301,7 @@ impl CCSShape { } } + // XXX: Review thiss /// Pads the CCSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&mut self) { @@ -315,7 +312,7 @@ impl CCSShape { if self.n != padded_n { // Apply pad for each matrix in M self.M.iter_mut().for_each(|m| { - m.pad(padded_n); + m.pad(); *m.n_rows_mut() = padded_n }); self.n = padded_n; diff --git a/src/hypercube.rs b/src/hypercube.rs index 590042029..ef0517985 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -27,7 +27,6 @@ impl BooleanHypercube { pub(crate) fn evaluate_at(&self, i: usize) -> Vec { assert!(i < self.max as usize); let bits = bit_decompose((i) as u64, self.n_vars); - dbg!(bits.clone()); bits.iter().map(|&x| F::from(x as u64)).rev().collect() } } diff --git a/src/utils.rs b/src/utils.rs index ca7fb19b1..04e5f99f1 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -74,13 +74,40 @@ impl SparseMatrix { } } - // XXX: Double check this - pub(crate) fn pad(&mut self, n: usize) { - let prev_n = self.n_cols; - self.coeffs.par_iter_mut().for_each(|(_, c, _)| { - *c = if *c >= prev_n { *c + n - prev_n } else { *c }; - }); + /// Pad matrix so that its columns and rows are powers of two + pub(crate) fn pad(&mut self) { + // Find the desired dimensions after padding + let rows = self.n_rows(); + let cols = self.n_cols(); + + // Since we padd with 0's and our matrix repr is sparse, we just need + // to update the rows and cols attrs of the matrix. + *self.n_rows_mut() = rows.next_power_of_two(); + *self.n_cols_mut() = cols.next_power_of_two(); } + + // Gives the MLE of the given matrix. + pub fn to_mle(&self) -> MultilinearPolynomial { + // Matrices might need to get padded before turned into an MLE + let mut padded_matrix = self.clone(); + padded_matrix.pad(); + + sparse_vec_to_mle::(self.n_rows(), self.n_cols(), padded_matrix.coeffs()) + } +} + +pub fn sparse_vec_to_mle( + n_rows: usize, + n_cols: usize, + v: &[(usize, usize, F)], +) -> MultilinearPolynomial { + let n_vars: usize = (log2(n_rows) + log2(n_cols)) as usize; // n_vars = s + s' + let mut padded_vec = vec![F::ZERO; 1 << n_vars]; + v.iter().copied().for_each(|(row, col, coeff)| { + padded_vec[(n_cols * row) + col] = coeff; + }); + + MultilinearPolynomial::new(padded_vec) } pub fn vector_add(a: &Vec, b: &Vec) -> Vec { @@ -131,7 +158,7 @@ pub fn matrix_vector_product_sparse( matrix: &SparseMatrix, vector: &Vec, ) -> Vec { - let mut res = vec![G::Scalar::ZERO; 4]; + let mut res = vec![G::Scalar::ZERO; matrix.n_cols()]; for &(row, col, value) in matrix.coeffs.iter() { res[row] += value * vector[col]; } @@ -148,32 +175,6 @@ pub fn hadamard_product(a: &Vec, b: &Vec) -> Vec { res } -pub fn sparse_matrix_to_mlp( - matrix: &SparseMatrix, -) -> MultilinearPolynomial { - let n_rows = 4; - let n_cols = 6usize; - - let n_vars: usize = n_cols.next_power_of_two().trailing_zeros() as usize; - - // Create a vector of zeros with size 2^n_vars - let mut vec: Vec = vec![G::Scalar::ZERO; 2_usize.pow(n_vars as u32)]; - - // Pad to 2^n_vars - let vec_padded: Vec = [ - vec.clone(), - std::iter::repeat(G::Scalar::ZERO) - .take((1 << n_vars) - vec.len()) - .collect(), - ] - .concat(); - - // Convert this vector into a MultilinearPolynomial - MultilinearPolynomial::new(vec_padded) -} - -// XXX: Create vec_to_mlp method and estract the padd - /// Decompose an integer into a binary vector in little endian. pub fn bit_decompose(input: u64, num_var: usize) -> Vec { let mut res = Vec::with_capacity(num_var); @@ -244,8 +245,20 @@ pub fn random_zero_mle_list( list } +pub(crate) fn log2(x: usize) -> u32 { + if x == 0 { + 0 + } else if x.is_power_of_two() { + 1usize.leading_zeros() - x.leading_zeros() + } else { + 0usize.leading_zeros() - x.leading_zeros() + } +} + #[cfg(test)] mod tests { + use crate::hypercube::BooleanHypercube; + use super::*; use pasta_curves::{Ep, Fq}; @@ -257,21 +270,6 @@ mod tests { m.iter().map(|x| to_F_vec(x.clone())).collect() } - #[test] - fn test_n_cols_sparse_matrix() { - let one = Fq::ONE; - let A = vec![ - (0, 1, one), - (1, 3, one), - (2, 1, one), - (2, 4, one), - (3, 0, Fq::from(5u64)), - (3, 5, one), - ]; - - assert_eq!(6, SparseMatrix::::with_coeffs(4, 6, A).n_cols()); - } - #[test] fn test_vector_add() { let a = to_F_vec::(vec![1, 2, 3]); @@ -346,32 +344,37 @@ mod tests { assert_eq!(A.n_rows(), 10); } - // XXX this test is not really testing much. Improve. #[test] - fn test_sparse_matrix_to_mlp() { - let matrix = vec![ - (0, 0, Fq::from(2)), - (0, 1, Fq::from(3)), - (0, 2, Fq::from(4)), - (0, 3, Fq::from(4)), - (1, 0, Fq::from(4)), - (1, 1, Fq::from(11)), - (1, 2, Fq::from(14)), - (1, 3, Fq::from(14)), - (2, 0, Fq::from(2)), - (2, 1, Fq::from(8)), - (2, 2, Fq::from(17)), - (2, 3, Fq::from(17)), - (3, 0, Fq::from(420)), - (3, 1, Fq::from(4)), - (3, 2, Fq::from(2)), - ]; - let A = SparseMatrix::::with_coeffs(4, 4, matrix); - - // Convert the sparse matrix to a multilinear polynomial - let mlp = sparse_matrix_to_mlp(&A); - - // A 4x4 matrix, thus 2bit x 2bit, thus 2^4=16 evals - assert_eq!(mlp.len(), 16); + fn test_matrix_to_mle() { + let matrix = SparseMatrix::::with_coeffs( + 5, + 5, + vec![ + (0usize, 0usize, Fq::from(1u64)), + (0, 1, Fq::from(2u64)), + (0, 2, Fq::from(3u64)), + (1, 0, Fq::from(4u64)), + (1, 1, Fq::from(5u64)), + (1, 2, Fq::from(6u64)), + (3, 4, Fq::from(1u64)), + ], + ); + + let A_mle = matrix.to_mle(); + assert_eq!(A_mle.len(), 64); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals + + // check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values + let bhc = BooleanHypercube::::new(A_mle.get_num_vars()); + + let mut padded_matrix = matrix.clone(); + padded_matrix.pad(); + padded_matrix + .coeffs() + .iter() + .copied() + .for_each(|(i, j, coeff)| { + let s_i_j = bhc.evaluate_at(i * matrix.n_cols() + j); + assert_eq!(A_mle.evaluate(&s_i_j), coeff); + }) } } From 145087579f435634701d9c1d1100ab565537b7a1 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Sun, 25 Jun 2023 16:40:36 +0200 Subject: [PATCH 057/100] fix: Vec * Sparse Matrix prod outp len --- src/utils.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 04e5f99f1..537b74a73 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -158,7 +158,7 @@ pub fn matrix_vector_product_sparse( matrix: &SparseMatrix, vector: &Vec, ) -> Vec { - let mut res = vec![G::Scalar::ZERO; matrix.n_cols()]; + let mut res = vec![G::Scalar::ZERO; matrix.n_rows()]; for &(row, col, value) in matrix.coeffs.iter() { res[row] += value * vector[col]; } @@ -166,7 +166,7 @@ pub fn matrix_vector_product_sparse( } pub fn hadamard_product(a: &Vec, b: &Vec) -> Vec { - assert_eq!(a.len(), b.len(), "Haddamard needs same len vectors"); + assert_eq!(a.len(), b.len(), "Hadamard needs same len vectors"); let mut res = Vec::with_capacity(a.len()); for i in 0..a.len() { res.push(a[i] * b[i]); From a057978a5d8c11186733f4f0101277c927d29027 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Sun, 25 Jun 2023 22:42:08 +0200 Subject: [PATCH 058/100] fix: Split Matrix_Vec prod into dense+sparse --- src/utils.rs | 30 +++++++++++++++++++++++------- 1 file changed, 23 insertions(+), 7 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 537b74a73..d8cb2b4e3 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -92,22 +92,38 @@ impl SparseMatrix { let mut padded_matrix = self.clone(); padded_matrix.pad(); - sparse_vec_to_mle::(self.n_rows(), self.n_cols(), padded_matrix.coeffs()) + sparse_vec_to_mle::( + self.n_rows(), + self.n_cols(), + padded_matrix.coeffs().to_vec(), + ) } } -pub fn sparse_vec_to_mle( +pub fn sparse_vec_to_mle( n_rows: usize, n_cols: usize, - v: &[(usize, usize, F)], -) -> MultilinearPolynomial { + v: Vec<(usize, usize, G::Scalar)>, +) -> MultilinearPolynomial { let n_vars: usize = (log2(n_rows) + log2(n_cols)) as usize; // n_vars = s + s' - let mut padded_vec = vec![F::ZERO; 1 << n_vars]; + let mut padded_vec = vec![G::Scalar::ZERO; 1 << n_vars]; v.iter().copied().for_each(|(row, col, coeff)| { padded_vec[(n_cols * row) + col] = coeff; }); - MultilinearPolynomial::new(padded_vec) + dense_vec_to_mle(n_vars, &padded_vec) +} + +pub fn dense_vec_to_mle(n_vars: usize, v: &Vec) -> MultilinearPolynomial { + // Pad to 2^n_vars + let v_padded: Vec = [ + v.clone(), + std::iter::repeat(F::ZERO) + .take((1 << n_vars) - v.len()) + .collect(), + ] + .concat(); + MultilinearPolynomial::new(v_padded) } pub fn vector_add(a: &Vec, b: &Vec) -> Vec { @@ -320,7 +336,7 @@ mod tests { let res = matrix_vector_product_sparse::(&SparseMatrix::::with_coeffs(2, 3, matrix), &z); - assert_eq!(res, to_F_vec::(vec![14, 32, 0])); + assert_eq!(res, to_F_vec::(vec![14, 32])); } #[test] From 1b60494e902e4284ab913323a81dfcccda925bc7 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Mon, 26 Jun 2023 18:50:21 +0200 Subject: [PATCH 059/100] this code is cursed --- src/ccs/cccs.rs | 375 +++++++++++++++++++++++++++++++++++--- src/ccs/mod.rs | 1 + src/hypercube.rs | 11 +- src/r1cs.rs | 8 +- src/spartan/polynomial.rs | 5 +- 5 files changed, 370 insertions(+), 30 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index d1442582b..a8c1e49e5 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -60,37 +60,48 @@ pub struct CCCSWitness { } impl CCCSShape { - // TODO: Sanity check this + /// Computes the MLE of the CCS's Matrix at index `j` and executes the reduction of it summing over the given z. pub fn compute_sum_Mz( &self, - M_j: &MultilinearPolynomial, + m_idx: usize, z: &MultilinearPolynomial, - s_prime: usize, ) -> MultilinearPolynomial { - assert_eq!(M_j.get_num_vars(), s_prime); + let s_prime = self.ccs.s_prime; + let M_j_mle = self.ccs.M[m_idx].to_mle(); assert_eq!(z.get_num_vars(), s_prime); + //dbg!(M_j_mle.clone()); + let mut sum_Mz = MultilinearPolynomial::new(vec![ + G::Scalar::ZERO; + 1 << (M_j_mle.get_num_vars() - s_prime) + ]); - let num_vars = M_j.get_num_vars(); - let two_to_num_vars = (2_usize).pow(num_vars as u32); - let mut result_coefficients = Vec::with_capacity(two_to_num_vars); + let bhc = BooleanHypercube::::new(s_prime); + bhc.into_iter().for_each(|bit_vec| { + // Perform the reduction + dbg!(bit_vec.clone()); + let mut M_j_y: MultilinearPolynomial<::Scalar> = M_j_mle.clone(); - for i in 0..two_to_num_vars { - let r = bit_decompose(i as u64, num_vars) - .into_iter() - .map(|bit| G::Scalar::from(if bit { 1 } else { 0 })) - .collect::>(); + for bit in bit_vec.iter() { + M_j_y.bound_poly_var_top(bit); + dbg!(M_j_y.clone()); + } - let value = M_j.evaluate(&r) * z.evaluate(&r); - result_coefficients.push(value); - } + let z_y = z.evaluate(&bit_vec); + // dbg!(z_y.clone()); + let M_j_z = M_j_y.scalar_mul(&z_y); + // dbg!(M_j_z.clone()); + // XXX: It's crazy to have results in the ops impls. Remove them! + sum_Mz = sum_Mz.clone().add(M_j_z).expect("This should not fail"); + // dbg!(sum_Mz.clone()); + }); - MultilinearPolynomial::new(result_coefficients) + sum_Mz } // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) // polynomial over x pub fn compute_q(&self, z: &Vec) -> Result, NovaError> { - let z_mle = MultilinearPolynomial::new(z.clone()); + let z_mle = dense_vec_to_mle::(6, z); if z_mle.get_num_vars() != self.ccs.s_prime { return Err(NovaError::VpArith); } @@ -102,10 +113,8 @@ impl CCCSShape { |q, idx| { let mut prod = VirtualPolynomial::::new(self.ccs.s); - for j in &self.ccs.S[idx] { - let M_j = sparse_matrix_to_mlp(&self.ccs.M[*j]); - - let sum_Mz = self.compute_sum_Mz(&M_j, &z_mle, self.ccs.s_prime); + for &j in &self.ccs.S[idx] { + let sum_Mz = self.compute_sum_Mz(j, &z_mle); // Fold this sum into the running product if prod.products.is_empty() { @@ -139,3 +148,327 @@ impl CCCSShape { q.build_f_hat(beta) } } + +#[cfg(test)] +mod tests { + + use crate::ccs::CCSInstance; + use crate::ccs::CCSWitness; + use crate::ccs::CCS; + + use super::*; + use ff::PrimeField; + use pasta_curves::pallas::Scalar; + use pasta_curves::Ep; + use pasta_curves::Fp; + use pasta_curves::Fq; + use rand_core::OsRng; + use rand_core::RngCore; + + // Deduplicate this + fn to_F_matrix(m: Vec>) -> Vec> { + m.iter().map(|x| to_F_vec(x.clone())).collect() + } + + // Deduplicate this + fn to_F_vec(v: Vec) -> Vec { + v.iter().map(|x| F::from(*x)).collect() + } + + fn vecs_to_slices(vecs: &[Vec]) -> Vec<&[T]> { + vecs.iter().map(Vec::as_slice).collect() + } + + fn gen_test_ccs( + z: &Vec, + rng: &mut R, + ) -> (CCSShape, CCSWitness, CCSInstance) { + let one = G::Scalar::ONE; + let A = vec![ + (0, 1, one), + (1, 3, one), + (2, 1, one), + (2, 4, one), + (3, 0, G::Scalar::from(5u64)), + (3, 5, one), + ]; + + let B = vec![(0, 1, one), (1, 1, one), (2, 0, one), (3, 0, one)]; + let C = vec![(0, 3, one), (1, 4, one), (2, 5, one), (3, 2, one)]; + + // 2. Take R1CS and convert to CCS + let ccs = CCSShape::from_r1cs(R1CSShape::new(4, 6, 1, &A, &B, &C).unwrap()); + // Generate other artifacts + let ck = CCSShape::::commitment_key(&ccs); + let ccs_w = CCSWitness::new(z[2..].to_vec()); + let ccs_instance = CCSInstance::new(&ccs, &ccs_w.commit(&ck), vec![z[1]]).unwrap(); + + ccs + .is_sat(&ck, &ccs_instance, &ccs_w) + .expect("This does not fail"); + (ccs, ccs_w, ccs_instance) + } + + /// Computes the z vector for the given input for Vitalik's equation. + pub fn get_test_z(input: u64) -> Vec { + // z = (1, io, w) + to_F_vec(vec![ + 1, + input, + input * input * input + input + 5, // x^3 + x + 5 + input * input, // x^2 + input * input * input, // x^2 * x + input * input * input + input, // x^3 + x + ]) + } + + #[test] + fn test_compute_sum_Mz_over_boolean_hypercube() -> () { + let z = get_test_z::(3); + let (ccs, _, _) = gen_test_ccs::(&z, &mut OsRng); + + // Generate other artifacts + let ck = CCSShape::::commitment_key(&ccs); + let (_, _, cccs) = ccs.to_cccs_artifacts(&mut OsRng, &ck, &z); + + let z_mle = dense_vec_to_mle(ccs.s_prime, &z); + // dbg!(z_mle.clone()); + + // check that evaluating over all the values x over the boolean hypercube, the result of + // the next for loop is equal to 0 + let mut r = Fq::zero(); + let bch = BooleanHypercube::new(ccs.s); + bch.into_iter().for_each(|x| { + // dbg!(x.clone()); + for i in 0..ccs.q { + let mut Sj_prod = Fq::one(); + for j in ccs.S[i].clone() { + let sum_Mz = cccs.compute_sum_Mz(j, &z_mle); + dbg!(sum_Mz.clone()); + let sum_Mz_x = sum_Mz.evaluate(&x); + dbg!(sum_Mz_x.clone()); + Sj_prod *= sum_Mz_x; + dbg!(Sj_prod.clone()); + } + r += (Sj_prod * ccs.c[i]); + } + // dbg!(r.clone()); + assert_eq!(r, Fq::ZERO); + }); + } + + /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the + /// hypercube, but to not-zero outside the hypercube. + #[test] + fn test_compute_q() { + let mut rng = OsRng; + + let z = get_test_z::(3); + let (ccs_shape, ccs_witness, ccs_instance) = gen_test_ccs(&z, &mut rng); + + // generate ck + let ck = CCSShape::::commitment_key(&ccs_shape); + // ensure CCS is satisfied + ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); + + // Generate CCCS artifacts + let cccs_shape = ccs_shape.to_cccs_shape(); + + let q = cccs_shape.compute_q(&z).unwrap(); + + // Evaluate inside the hypercube + for x in BooleanHypercube::new(ccs_shape.s).into_iter() { + assert_eq!(Fq::zero(), q.evaluate(&x).unwrap()); + } + + // Evaluate outside the hypercube + let beta: Vec = (0..ccs_shape.s).map(|_| Fq::random(&mut rng)).collect(); + assert_ne!(Fq::zero(), q.evaluate(&beta).unwrap()); + } + + #[test] + fn test_compute_Q() { + let mut rng = OsRng; + + let z = get_test_z::(3); + let (ccs_shape, ccs_witness, ccs_instance) = gen_test_ccs(&z, &mut rng); + + // generate ck + let ck = CCSShape::::commitment_key(&ccs_shape); + // ensure CCS is satisfied + ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); + + // Generate CCCS artifacts + let cccs_shape = ccs_shape.to_cccs_shape(); + + let beta: Vec = (0..ccs_shape.s).map(|_| Fq::random(&mut rng)).collect(); + + // Compute Q(x) = eq(beta, x) * q(x). + let Q = cccs_shape + .compute_Q(&z, &beta) + .expect("Computation of Q should not fail"); + + // Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y) + // which interpolates the multivariate polynomial q(x) inside the hypercube. + // + // Observe that summing Q(x) inside the hypercube, directly computes G(\beta). + // + // Now, G(x) is multilinear and agrees with q(x) inside the hypercube. Since q(x) vanishes inside the + // hypercube, this means that G(x) also vanishes in the hypercube. Since G(x) is multilinear and vanishes + // inside the hypercube, this makes it the zero polynomial. + // + // Hence, evaluating G(x) at a random beta should give zero. + + // Now sum Q(x) evaluations in the hypercube and expect it to be 0 + let r = BooleanHypercube::new(ccs_shape.s) + .into_iter() + .map(|x| Q.evaluate(&x).unwrap()) + .fold(Fq::zero(), |acc, result| acc + result); + assert_eq!(r, Fq::zero()); + } + + /// The polynomial G(x) (see above) interpolates q(x) inside the hypercube. + /// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point. + /// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside + #[test] + fn test_Q_against_q() -> () { + let mut rng = OsRng; + + let z = get_test_z::(3); + let (ccs_shape, ccs_witness, ccs_instance) = gen_test_ccs(&z, &mut rng); + + // generate ck + let ck = CCSShape::::commitment_key(&ccs_shape); + // ensure CCS is satisfied + ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); + + // Generate CCCS artifacts + let cccs_shape = ccs_shape.to_cccs_shape(); + + // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which + // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube + let q = cccs_shape + .compute_q(&z) + .expect("Computing q shoud not fail"); + for d in BooleanHypercube::new(ccs_shape.s) { + let Q_at_d = cccs_shape + .compute_Q(&z, &d) + .expect("Computing Q_at_d shouldn't fail"); + + // Get G(d) by summing over Q_d(x) over the hypercube + let G_at_d = BooleanHypercube::new(ccs_shape.s) + .into_iter() + .map(|x| Q_at_d.evaluate(&x).unwrap()) + .fold(Fq::zero(), |acc, result| acc + result); + assert_eq!(G_at_d, q.evaluate(&d).unwrap()); + } + + // Now test that they should disagree outside of the hypercube + let r: Vec = (0..ccs_shape.s).map(|_| Fq::random(&mut rng)).collect(); + let Q_at_r = cccs_shape + .compute_Q(&z, &r) + .expect("Computing Q_at_r shouldn't fail"); + + // Get G(d) by summing over Q_d(x) over the hypercube + let G_at_r = BooleanHypercube::new(ccs_shape.s) + .into_iter() + .map(|x| Q_at_r.evaluate(&x).unwrap()) + .fold(Fq::zero(), |acc, result| acc + result); + assert_ne!(G_at_r, q.evaluate(&r).unwrap()); + } + + #[test] + fn test_fix_variables() { + // + // + // + pub fn fix_variables( + poly: &MultilinearPolynomial, + partial_point: &[F], + ) -> MultilinearPolynomial { + assert!( + partial_point.len() <= poly.get_num_vars(), + "invalid size of partial point" + ); + let nv = poly.get_num_vars(); + let mut poly = poly.Z.to_vec(); + let dim = partial_point.len(); + // evaluate single variable of partial point from left to right + for (i, point) in partial_point.iter().enumerate().take(dim) { + poly = fix_one_variable_helper(&poly, nv - i, point); + } + + MultilinearPolynomial::::new(poly[..(1 << (nv - dim))].to_vec()) + } + + fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { + let mut res = vec![F::ZERO; 1 << (nv - 1)]; + + for i in 0..(1 << (nv - 1)) { + res[i] = data[i] + (data[(i << 1) + 1] - data[i << 1]) * point; + } + + res + } + // + // + // + + let A = SparseMatrix::::with_coeffs( + 4, + 4, + vec![ + (0, 0, Fq::from(2u64)), + (0, 1, Fq::from(3u64)), + (0, 2, Fq::from(4u64)), + (0, 3, Fq::from(4u64)), + (1, 0, Fq::from(4u64)), + (1, 1, Fq::from(11u64)), + (1, 2, Fq::from(14u64)), + (1, 3, Fq::from(14u64)), + (2, 0, Fq::from(2u64)), + (2, 1, Fq::from(8u64)), + (2, 2, Fq::from(17u64)), + (2, 3, Fq::from(17u64)), + (3, 0, Fq::from(420u64)), + (3, 1, Fq::from(4u64)), + (3, 2, Fq::from(2u64)), + (3, 3, Fq::ZERO), + ], + ); + + let A_mle = A.to_mle(); + dbg!(&A_mle); + let bhc = BooleanHypercube::::new(2); + for (i, y) in bhc.enumerate() { + let mut A_mle_op = A_mle.clone(); + + // for bit in y.clone() { + // A_mle_op.bound_poly_var_top(&bit) + // } + + A_mle_op = fix_variables(&A_mle, &y); + dbg!(A_mle_op.clone()); + println!("{:?}", y.clone()); + // Check that fixing first variables pins down a column + // i.e. fixing x to 0 will return the first column + // fixing x to 1 will return the second column etc. + let column_i: Vec = A + .clone() + .coeffs() + .iter() + .copied() + .filter_map(|(_, col, coeff)| if col == i { Some(coeff) } else { None }) + .collect(); + + assert_eq!(A_mle_op.Z, column_i); + + // // Now check that fixing last variables pins down a row + // // i.e. fixing y to 0 will return the first row + // // fixing y to 1 will return the second row etc. + // let row_i: Vec = A[i].clone(); + // let fix_right = fix_last_variables(&A_mle, &y); + // assert_eq!(fix_right.evaluations, row_i); + } + } +} diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index b602dd85e..a59d00891 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -153,6 +153,7 @@ impl CCSShape { self.to_cccs_shape(), ) } + // XXX: Update commitment_key variables here? This is currently based on R1CS with M length /// Samples public parameters for the specified number of constraints and variables in an CCS pub fn commitment_key(&self) -> CommitmentKey { diff --git a/src/hypercube.rs b/src/hypercube.rs index ef0517985..590710df1 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -4,10 +4,11 @@ use std::marker::PhantomData; use crate::utils::*; /// There's some overlap with polynomial.rs. use ff::PrimeField; +use itertools::Itertools; #[derive(Debug)] pub(crate) struct BooleanHypercube { - n_vars: usize, + pub(crate) n_vars: usize, current: u64, max: u64, _f: PhantomData, @@ -35,11 +36,15 @@ impl Iterator for BooleanHypercube { type Item = Vec; fn next(&mut self) -> Option { - if self.current > self.max { + if self.current >= self.max { None } else { let bits = bit_decompose(self.current, self.n_vars); - let point: Vec = bits.iter().map(|&bit| Scalar::from(bit as u64)).collect(); + let point: Vec = bits + .iter() + .map(|&bit| Scalar::from(bit as u64)) + .rev() + .collect(); self.current += 1; Some(point) } diff --git a/src/r1cs.rs b/src/r1cs.rs index 4b204e9eb..36133f5cc 100644 --- a/src/r1cs.rs +++ b/src/r1cs.rs @@ -118,10 +118,10 @@ impl R1CSShape { return Err(NovaError::InvalidIndex); } - // We require the number of public inputs/outputs to be even - if num_io % 2 != 0 { - return Err(NovaError::OddInputLength); - } + // // We require the number of public inputs/outputs to be even + // if num_io % 2 != 0 { + // return Err(NovaError::OddInputLength); + // } Ok(R1CSShape { num_cons, diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 8f45c2549..3c9f872d0 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -79,8 +79,8 @@ impl EqPolynomial { /// Vector $Z$ indicates $Z(e)$ where $e$ ranges from $0$ to $2^m-1$. #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct MultilinearPolynomial { - num_vars: usize, // the number of variables in the multilinear polynomial - Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs + num_vars: usize, // the number of variables in the multilinear polynomial + pub(crate) Z: Vec, // evaluations of the polynomial in all the 2^num_vars Boolean inputs } impl MultilinearPolynomial { @@ -104,6 +104,7 @@ impl MultilinearPolynomial { let n = self.len() / 2; let (left, right) = self.Z.split_at_mut(n); + // XXX: This literally does nothing at all.. What is this? let (right, _) = right.split_at(n); left From 827a1fffaf2a611a361f253ecc27898fc72d730e Mon Sep 17 00:00:00 2001 From: arnaucube Date: Mon, 26 Jun 2023 23:11:37 +0200 Subject: [PATCH 060/100] lift the curse: - BooleanHypercube iterator was reversing the bit representation - bound_poly_var_top is not fixing first variables but fixing last variables (=Espresso/hyperplonk's fix_last_variables method) - fix method fix_one_variable_helper Add also a couple of notes regarding sparse_vec_to_mle & bound_poly_var_top methods. Note: the title of this commit is in reference to the commit message of 22b3f6f3a728807a30ac47ff4ea105f52f541e6b --- src/ccs/cccs.rs | 96 +++++++++++++++++++-------------------- src/hypercube.rs | 6 +-- src/spartan/polynomial.rs | 1 + src/utils.rs | 5 ++ 4 files changed, 54 insertions(+), 54 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index a8c1e49e5..fce8f8549 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -18,7 +18,7 @@ use crate::{ }; use bitvec::vec; use core::{cmp::max, marker::PhantomData}; -use ff::Field; +use ff::{Field, PrimeField}; use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; use rayon::prelude::*; @@ -149,6 +149,35 @@ impl CCCSShape { } } +pub fn fix_variables( + poly: &MultilinearPolynomial, + partial_point: &[F], +) -> MultilinearPolynomial { + assert!( + partial_point.len() <= poly.get_num_vars(), + "invalid size of partial point" + ); + let nv = poly.get_num_vars(); + let mut poly = poly.Z.to_vec(); + let dim = partial_point.len(); + // evaluate single variable of partial point from left to right + for (i, point) in partial_point.iter().enumerate() { + poly = fix_one_variable_helper(&poly, nv - i, point); + } + + MultilinearPolynomial::::new(poly[..(1 << (nv - dim))].to_vec()) +} + +fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { + let mut res = vec![F::ZERO; 1 << (nv - 1)]; + + for i in 0..(1 << (nv - 1)) { + res[i] = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point; + } + + res +} + #[cfg(test)] mod tests { @@ -379,41 +408,6 @@ mod tests { #[test] fn test_fix_variables() { - // - // - // - pub fn fix_variables( - poly: &MultilinearPolynomial, - partial_point: &[F], - ) -> MultilinearPolynomial { - assert!( - partial_point.len() <= poly.get_num_vars(), - "invalid size of partial point" - ); - let nv = poly.get_num_vars(); - let mut poly = poly.Z.to_vec(); - let dim = partial_point.len(); - // evaluate single variable of partial point from left to right - for (i, point) in partial_point.iter().enumerate().take(dim) { - poly = fix_one_variable_helper(&poly, nv - i, point); - } - - MultilinearPolynomial::::new(poly[..(1 << (nv - dim))].to_vec()) - } - - fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { - let mut res = vec![F::ZERO; 1 << (nv - 1)]; - - for i in 0..(1 << (nv - 1)) { - res[i] = data[i] + (data[(i << 1) + 1] - data[i << 1]) * point; - } - - res - } - // - // - // - let A = SparseMatrix::::with_coeffs( 4, 4, @@ -438,18 +432,10 @@ mod tests { ); let A_mle = A.to_mle(); - dbg!(&A_mle); let bhc = BooleanHypercube::::new(2); for (i, y) in bhc.enumerate() { - let mut A_mle_op = A_mle.clone(); - - // for bit in y.clone() { - // A_mle_op.bound_poly_var_top(&bit) - // } - - A_mle_op = fix_variables(&A_mle, &y); - dbg!(A_mle_op.clone()); - println!("{:?}", y.clone()); + let A_mle_op = fix_variables(&A_mle, &y); + // Check that fixing first variables pins down a column // i.e. fixing x to 0 will return the first column // fixing x to 1 will return the second column etc. @@ -466,9 +452,21 @@ mod tests { // // Now check that fixing last variables pins down a row // // i.e. fixing y to 0 will return the first row // // fixing y to 1 will return the second row etc. - // let row_i: Vec = A[i].clone(); - // let fix_right = fix_last_variables(&A_mle, &y); - // assert_eq!(fix_right.evaluations, row_i); + let row_i: Vec = A + .clone() + .coeffs() + .iter() + .copied() + .filter_map(|(row, _, coeff)| if row == i { Some(coeff) } else { None }) + .collect(); + + let mut last_vars_fixed = A_mle.clone(); + // this is equivalent to Espresso/hyperplonk's 'fix_last_variables' mehthod + for bit in y.clone().iter().rev() { + last_vars_fixed.bound_poly_var_top(&bit) + } + + assert_eq!(last_vars_fixed.Z, row_i); } } } diff --git a/src/hypercube.rs b/src/hypercube.rs index 590710df1..2e3509607 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -40,11 +40,7 @@ impl Iterator for BooleanHypercube { None } else { let bits = bit_decompose(self.current, self.n_vars); - let point: Vec = bits - .iter() - .map(|&bit| Scalar::from(bit as u64)) - .rev() - .collect(); + let point: Vec = bits.iter().map(|&bit| Scalar::from(bit as u64)).collect(); self.current += 1; Some(point) } diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 3c9f872d0..dd2db921c 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -100,6 +100,7 @@ impl MultilinearPolynomial { self.Z.len() } + // NOTE: this is equivalent to Espresso/hyperplonk's 'fix_last_variables' mehthod pub fn bound_poly_var_top(&mut self, r: &Scalar) { let n = self.len() / 2; diff --git a/src/utils.rs b/src/utils.rs index d8cb2b4e3..c2272ca4a 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -100,6 +100,11 @@ impl SparseMatrix { } } +// NOTE: the method is called "sparse_vec_to_mle", but inputs are n_rows & n_cols, and a normal +// vector does not have rows&cols. This is because in this case the vec comes from matrix +// coefficients, maybe the method should be renamed, because is not to convert 'any' vector but a +// vector of matrix coefficients. A better option probably is to replace the two inputs n_rows & +// n_cols by directly the n_vars. pub fn sparse_vec_to_mle( n_rows: usize, n_cols: usize, From fe14cddb58c7b3ec710891f3ebe64575629cc4b2 Mon Sep 17 00:00:00 2001 From: arnaucube Date: Tue, 27 Jun 2023 10:44:46 +0200 Subject: [PATCH 061/100] fix SparseMatrix.to_mle() method, fix test_matrix_to_mle --- src/utils.rs | 62 +++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 47 insertions(+), 15 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index c2272ca4a..fe0cca5ac 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -93,8 +93,8 @@ impl SparseMatrix { padded_matrix.pad(); sparse_vec_to_mle::( - self.n_rows(), - self.n_cols(), + padded_matrix.n_rows(), + padded_matrix.n_cols(), padded_matrix.coeffs().to_vec(), ) } @@ -367,7 +367,7 @@ mod tests { #[test] fn test_matrix_to_mle() { - let matrix = SparseMatrix::::with_coeffs( + let A = SparseMatrix::::with_coeffs( 5, 5, vec![ @@ -381,21 +381,53 @@ mod tests { ], ); - let A_mle = matrix.to_mle(); + let A_mle = A.to_mle(); assert_eq!(A_mle.len(), 64); // 5x5 matrix, thus 3bit x 3bit, thus 2^6=64 evals + // hardcoded testvector to ensure that in the future the SparseMatrix.to_mle method holds + let expected = vec![ + Fq::from(1u64), + Fq::from(2u64), + Fq::from(3u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(4u64), + Fq::from(5u64), + Fq::from(6u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(0u64), + Fq::from(1u64), + // the rest are zeroes + ]; + assert_eq!(A_mle.Z[..29], expected); + assert_eq!(A_mle.Z[29..], vec![Fq::ZERO; 64 - 29]); + // check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values let bhc = BooleanHypercube::::new(A_mle.get_num_vars()); - - let mut padded_matrix = matrix.clone(); - padded_matrix.pad(); - padded_matrix - .coeffs() - .iter() - .copied() - .for_each(|(i, j, coeff)| { - let s_i_j = bhc.evaluate_at(i * matrix.n_cols() + j); - assert_eq!(A_mle.evaluate(&s_i_j), coeff); - }) + let mut A_padded = A.clone(); + A_padded.pad(); + for term in A_padded.coeffs.iter() { + let (i, j, coeff) = term; + let s_i_j = bhc.evaluate_at(i * A_padded.n_cols + j); + assert_eq!(&A_mle.evaluate(&s_i_j), coeff) + } } } From ce646e6fd574af153c61850c1fcd0c34ffe88e7d Mon Sep 17 00:00:00 2001 From: arnaucube Date: Tue, 27 Jun 2023 13:03:47 +0200 Subject: [PATCH 062/100] fix cccs.compute_sum_Mz method, small fix in compute_q, so by consequence compute_q & compute_Q now work fine --- src/ccs/cccs.rs | 37 ++++++++++++------------------------- 1 file changed, 12 insertions(+), 25 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index fce8f8549..a97d6c217 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -69,31 +69,23 @@ impl CCCSShape { let s_prime = self.ccs.s_prime; let M_j_mle = self.ccs.M[m_idx].to_mle(); assert_eq!(z.get_num_vars(), s_prime); - //dbg!(M_j_mle.clone()); + let mut sum_Mz = MultilinearPolynomial::new(vec![ G::Scalar::ZERO; 1 << (M_j_mle.get_num_vars() - s_prime) ]); let bhc = BooleanHypercube::::new(s_prime); - bhc.into_iter().for_each(|bit_vec| { - // Perform the reduction - dbg!(bit_vec.clone()); - let mut M_j_y: MultilinearPolynomial<::Scalar> = M_j_mle.clone(); - - for bit in bit_vec.iter() { - M_j_y.bound_poly_var_top(bit); - dbg!(M_j_y.clone()); - } + for y in bhc.into_iter() { + let M_j_y = fix_variables(&M_j_mle, &y); - let z_y = z.evaluate(&bit_vec); - // dbg!(z_y.clone()); + // reverse y to match spartan/polynomial evaluate + let y_rev: Vec = y.into_iter().rev().collect(); + let z_y = z.evaluate(&y_rev); let M_j_z = M_j_y.scalar_mul(&z_y); - // dbg!(M_j_z.clone()); // XXX: It's crazy to have results in the ops impls. Remove them! sum_Mz = sum_Mz.clone().add(M_j_z).expect("This should not fail"); - // dbg!(sum_Mz.clone()); - }); + } sum_Mz } @@ -101,8 +93,9 @@ impl CCCSShape { // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) // polynomial over x pub fn compute_q(&self, z: &Vec) -> Result, NovaError> { - let z_mle = dense_vec_to_mle::(6, z); + let z_mle = dense_vec_to_mle::(self.ccs.s_prime, z); if z_mle.get_num_vars() != self.ccs.s_prime { + // this check if redundant if dense_vec_to_mle is correct return Err(NovaError::VpArith); } @@ -261,29 +254,23 @@ mod tests { let (_, _, cccs) = ccs.to_cccs_artifacts(&mut OsRng, &ck, &z); let z_mle = dense_vec_to_mle(ccs.s_prime, &z); - // dbg!(z_mle.clone()); // check that evaluating over all the values x over the boolean hypercube, the result of // the next for loop is equal to 0 let mut r = Fq::zero(); let bch = BooleanHypercube::new(ccs.s); - bch.into_iter().for_each(|x| { - // dbg!(x.clone()); + for x in bch.into_iter() { for i in 0..ccs.q { let mut Sj_prod = Fq::one(); for j in ccs.S[i].clone() { let sum_Mz = cccs.compute_sum_Mz(j, &z_mle); - dbg!(sum_Mz.clone()); let sum_Mz_x = sum_Mz.evaluate(&x); - dbg!(sum_Mz_x.clone()); Sj_prod *= sum_Mz_x; - dbg!(Sj_prod.clone()); } - r += (Sj_prod * ccs.c[i]); + r += Sj_prod * ccs.c[i]; } - // dbg!(r.clone()); assert_eq!(r, Fq::ZERO); - }); + } } /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the From af45a4cd6dc6f3813c1f0e1d4ee6b9176bcf8f3b Mon Sep 17 00:00:00 2001 From: CPerezz Date: Tue, 27 Jun 2023 14:18:22 +0200 Subject: [PATCH 063/100] change: Get M_j_mle from precomputed CCCS M's --- src/ccs/cccs.rs | 2 +- src/ccs/mod.rs | 1 - 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index a97d6c217..26ceb439d 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -67,7 +67,7 @@ impl CCCSShape { z: &MultilinearPolynomial, ) -> MultilinearPolynomial { let s_prime = self.ccs.s_prime; - let M_j_mle = self.ccs.M[m_idx].to_mle(); + let M_j_mle = &self.M_MLE[m_idx]; assert_eq!(z.get_num_vars(), s_prime); let mut sum_Mz = MultilinearPolynomial::new(vec![ diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index a59d00891..8f725268c 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -302,7 +302,6 @@ impl CCSShape { } } - // XXX: Review thiss /// Pads the CCSShape so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&mut self) { From c800b39b419a99f4219f45efb397cd0f6521dac3 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Tue, 27 Jun 2023 15:11:26 +0200 Subject: [PATCH 064/100] remove: CCS struct which is unused --- src/ccs/cccs.rs | 1 - src/ccs/mod.rs | 11 +---------- 2 files changed, 1 insertion(+), 11 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 26ceb439d..674075b7e 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -176,7 +176,6 @@ mod tests { use crate::ccs::CCSInstance; use crate::ccs::CCSWitness; - use crate::ccs::CCS; use super::*; use ff::PrimeField; diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 8f725268c..7c850dbfd 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -35,18 +35,9 @@ use std::ops::{Add, Mul}; use self::cccs::{CCCSInstance, CCCSShape, CCCSWitness}; mod cccs; +mod lcccs; pub(crate) mod virtual_poly; -// TODO: Committed CCS using MLE (see src/spartan/pp.rs) -// TODO: Linearized CCS struct and methods, separate struct similar to RelaxedR1CS - -/// Public parameters for a given CCS -#[derive(Clone, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CCS { - _p: PhantomData, -} - /// A type that holds the shape of a CCS instance /// Unlike R1CS we have a list of matrices M instead of only A, B, C /// We also have t, q, d constants and c (vector), S (set) From b714a20f0fc31c49c55bc2f8c2974bc98ac44cec Mon Sep 17 00:00:00 2001 From: CPerezz Date: Tue, 27 Jun 2023 15:13:19 +0200 Subject: [PATCH 065/100] change: Plce impl blocks under their structs --- src/ccs/mod.rs | 155 ++++++++++++++++++++++--------------------------- 1 file changed, 68 insertions(+), 87 deletions(-) diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 7c850dbfd..2d7f07744 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -67,93 +67,6 @@ pub struct CCSShape { pub(crate) s_prime: usize, } -/// A type that holds a witness for a given CCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct CCSWitness { - // Vector W in F^{n - l - 1} - w: Vec, -} - -impl CCSWitness { - /// Create a CCSWitness instance from the witness vector. - pub fn new(witness: Vec) -> Self { - Self { w: witness } - } - - /// Commits to the witness using the supplied generators - pub fn commit(&self, ck: &CommitmentKey) -> Commitment { - CE::::commit(ck, &self.w) - } -} -/// A type that holds an CCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CCSInstance { - // (Pedersen) Commitment to a witness - pub(crate) comm_w: Commitment, - - // Public input x in F^l - pub(crate) x: Vec, -} - -/// A type that holds a LCCCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct LCCCSInstance { - pub(crate) C: Commitment, - pub(crate) x: Vec, - pub(crate) u: G::Scalar, - pub(crate) v: Vec, -} - -// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` -/// A type that holds a witness for a given LCCCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct LCCCSWitness { - pub(crate) w_mle: Vec, -} - -impl CCSShape { - pub(crate) fn to_cccs_shape(&self) -> CCCSShape { - let M_mle = self.M.iter().map(|matrix| matrix.to_mle()).collect(); - CCCSShape { - M_MLE: M_mle, - ccs: self.clone(), - } - } - - // Transform the CCS instance into a CCCS instance by providing the required Commitment key. - pub fn to_cccs_artifacts( - &self, - rng: &mut R, - ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, - z: &[G::Scalar], - ) -> (CCCSInstance, CCCSWitness, CCCSShape) { - let w: Vec = z[(1 + self.l)..].to_vec(); - // XXX: API doesn't offer a way to handle this apparently? - // Need to investigate - let _r_w = G::Scalar::random(rng); - let C = <::CE as CommitmentEngineTrait>::commit(ck, &w); - - ( - CCCSInstance { - C, - x: z[1..(1 + self.l)].to_vec(), - }, - CCCSWitness { w_mle: w }, - self.to_cccs_shape(), - ) - } - - // XXX: Update commitment_key variables here? This is currently based on R1CS with M length - /// Samples public parameters for the specified number of constraints and variables in an CCS - pub fn commitment_key(&self) -> CommitmentKey { - let total_nz = self.M.iter().fold(0, |acc, m| acc + m.coeffs().len()); - - G::CE::setup(b"ck", max(max(self.m, self.t), total_nz)) - } -} - impl CCSShape { /// Create an object of type `CCSShape` from the explicitly specified CCS matrices pub fn new( @@ -201,6 +114,45 @@ impl CCSShape { } } + pub(crate) fn to_cccs_shape(&self) -> CCCSShape { + let M_mle = self.M.iter().map(|matrix| matrix.to_mle()).collect(); + CCCSShape { + M_MLE: M_mle, + ccs: self.clone(), + } + } + + // Transform the CCS instance into a CCCS instance by providing the required Commitment key. + pub fn to_cccs_artifacts( + &self, + rng: &mut R, + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + z: &[G::Scalar], + ) -> (CCCSInstance, CCCSWitness, CCCSShape) { + let w: Vec = z[(1 + self.l)..].to_vec(); + // XXX: API doesn't offer a way to handle this apparently? + // Need to investigate + let _r_w = G::Scalar::random(rng); + let C = <::CE as CommitmentEngineTrait>::commit(ck, &w); + + ( + CCCSInstance { + C, + x: z[1..(1 + self.l)].to_vec(), + }, + CCCSWitness { w_mle: w }, + self.to_cccs_shape(), + ) + } + + // XXX: Update commitment_key variables here? This is currently based on R1CS with M length + /// Samples public parameters for the specified number of constraints and variables in an CCS + pub fn commitment_key(&self) -> CommitmentKey { + let total_nz = self.M.iter().fold(0, |acc, m| acc + m.coeffs().len()); + + G::CE::setup(b"ck", max(max(self.m, self.t), total_nz)) + } + // NOTE: Not using previous used multiply_vec (r1cs.rs), see utils.rs // NOTE: Equivalent to is_sat_relaxed (r1cs.rs) but for CCCSS/LCCCS? @@ -311,6 +263,35 @@ impl CCSShape { } } +/// A type that holds a witness for a given CCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +pub struct CCSWitness { + // Vector W in F^{n - l - 1} + w: Vec, +} + +impl CCSWitness { + /// Create a CCSWitness instance from the witness vector. + pub fn new(witness: Vec) -> Self { + Self { w: witness } + } + + /// Commits to the witness using the supplied generators + pub fn commit(&self, ck: &CommitmentKey) -> Commitment { + CE::::commit(ck, &self.w) + } +} +/// A type that holds an CCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct CCSInstance { + // (Pedersen) Commitment to a witness + pub(crate) comm_w: Commitment, + + // Public input x in F^l + pub(crate) x: Vec, +} + impl CCSInstance { /// A method to create an instance object using consitituent elements pub fn new( From d436f8784cba25ab11d2e8d62c73a857c6fa83a8 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Tue, 27 Jun 2023 18:28:06 +0200 Subject: [PATCH 066/100] change: Create utils folder under ccs --- src/ccs/cccs.rs | 2 +- src/ccs/mod.rs | 7 +------ src/ccs/util/mod.rs | 2 ++ src/ccs/{ => util}/virtual_poly.rs | 0 4 files changed, 4 insertions(+), 7 deletions(-) create mode 100644 src/ccs/util/mod.rs rename src/ccs/{ => util}/virtual_poly.rs (100%) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 674075b7e..7544e166e 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -27,7 +27,7 @@ use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; use std::sync::Arc; -use super::virtual_poly::VirtualPolynomial; +use super::util::virtual_poly::VirtualPolynomial; use super::CCSShape; /// A type that holds the shape of a Committed CCS (CCCS) instance diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 2d7f07744..91c233a4e 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -36,7 +36,7 @@ use self::cccs::{CCCSInstance, CCCSShape, CCCSWitness}; mod cccs; mod lcccs; -pub(crate) mod virtual_poly; +mod util; /// A type that holds the shape of a CCS instance /// Unlike R1CS we have a list of matrices M instead of only A, B, C @@ -153,11 +153,6 @@ impl CCSShape { G::CE::setup(b"ck", max(max(self.m, self.t), total_nz)) } - // NOTE: Not using previous used multiply_vec (r1cs.rs), see utils.rs - - // NOTE: Equivalent to is_sat_relaxed (r1cs.rs) but for CCCSS/LCCCS? - // Either here or as a separate method on LCCCS struct - /// Checks if the CCS instance is satisfiable given a witness and its shape pub fn is_sat( &self, diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs new file mode 100644 index 000000000..80bf80432 --- /dev/null +++ b/src/ccs/util/mod.rs @@ -0,0 +1,2 @@ +pub(crate) mod virtual_poly; +pub(crate) use virtual_poly::VirtualPolynomial; diff --git a/src/ccs/virtual_poly.rs b/src/ccs/util/virtual_poly.rs similarity index 100% rename from src/ccs/virtual_poly.rs rename to src/ccs/util/virtual_poly.rs From 8e2d99c1ca056b8f7013c7ead98e2fde1291f680 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 28 Jun 2023 09:20:54 +0200 Subject: [PATCH 067/100] change: Move Compute_sum_Mz into util mode This method was part of CCCS associated methods. But it doesn't make sense as LCCCS also requires it aside from other standalone functions. Hence, it is moved to a util module. --- src/ccs/cccs.rs | 35 +++------------------------- src/ccs/util/mod.rs | 56 +++++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 59 insertions(+), 32 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 7544e166e..85e32986c 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -27,6 +27,7 @@ use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; use std::sync::Arc; +use super::util::compute_sum_Mz; use super::util::virtual_poly::VirtualPolynomial; use super::CCSShape; @@ -60,36 +61,6 @@ pub struct CCCSWitness { } impl CCCSShape { - /// Computes the MLE of the CCS's Matrix at index `j` and executes the reduction of it summing over the given z. - pub fn compute_sum_Mz( - &self, - m_idx: usize, - z: &MultilinearPolynomial, - ) -> MultilinearPolynomial { - let s_prime = self.ccs.s_prime; - let M_j_mle = &self.M_MLE[m_idx]; - assert_eq!(z.get_num_vars(), s_prime); - - let mut sum_Mz = MultilinearPolynomial::new(vec![ - G::Scalar::ZERO; - 1 << (M_j_mle.get_num_vars() - s_prime) - ]); - - let bhc = BooleanHypercube::::new(s_prime); - for y in bhc.into_iter() { - let M_j_y = fix_variables(&M_j_mle, &y); - - // reverse y to match spartan/polynomial evaluate - let y_rev: Vec = y.into_iter().rev().collect(); - let z_y = z.evaluate(&y_rev); - let M_j_z = M_j_y.scalar_mul(&z_y); - // XXX: It's crazy to have results in the ops impls. Remove them! - sum_Mz = sum_Mz.clone().add(M_j_z).expect("This should not fail"); - } - - sum_Mz - } - // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) // polynomial over x pub fn compute_q(&self, z: &Vec) -> Result, NovaError> { @@ -107,7 +78,7 @@ impl CCCSShape { let mut prod = VirtualPolynomial::::new(self.ccs.s); for &j in &self.ccs.S[idx] { - let sum_Mz = self.compute_sum_Mz(j, &z_mle); + let sum_Mz = compute_sum_Mz::(&self.M_MLE[j], &z_mle); // Fold this sum into the running product if prod.products.is_empty() { @@ -262,7 +233,7 @@ mod tests { for i in 0..ccs.q { let mut Sj_prod = Fq::one(); for j in ccs.S[i].clone() { - let sum_Mz = cccs.compute_sum_Mz(j, &z_mle); + let sum_Mz: MultilinearPolynomial = compute_sum_Mz::(&cccs.M_MLE[j], &z_mle); let sum_Mz_x = sum_Mz.evaluate(&x); Sj_prod *= sum_Mz_x; } diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index 80bf80432..8d4588a95 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -1,2 +1,58 @@ +use crate::hypercube::BooleanHypercube; +use crate::spartan::math::Math; +use crate::spartan::polynomial::MultilinearPolynomial; +use crate::utils::bit_decompose; +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, + errors::NovaError, + gadgets::{ + nonnative::{bignat::nat_to_limbs, util::f_to_nat}, + utils::scalar_as_base, + }, + r1cs::{R1CSInstance, R1CSShape, R1CSWitness, R1CS}, + traits::{ + commitment::CommitmentEngineTrait, commitment::CommitmentTrait, AbsorbInROTrait, Group, ROTrait, + }, + utils::*, + Commitment, CommitmentKey, CE, +}; +use bitvec::vec; +use core::{cmp::max, marker::PhantomData}; +use ff::{Field, PrimeField}; +use flate2::{write::ZlibEncoder, Compression}; +use itertools::concat; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Sha3_256}; +use std::ops::{Add, Mul}; +use std::sync::Arc; + +use super::cccs::fix_variables; +use super::CCSShape; pub(crate) mod virtual_poly; pub(crate) use virtual_poly::VirtualPolynomial; + +/// Computes the MLE of the CCS's Matrix at index `j` and executes the reduction of it summing over the given z. +pub fn compute_sum_Mz( + M_mle: &MultilinearPolynomial, + z: &MultilinearPolynomial, +) -> MultilinearPolynomial { + let mut sum_Mz = MultilinearPolynomial::new(vec![ + G::Scalar::ZERO; + 1 << (M_mle.get_num_vars() - z.get_num_vars()) + ]); + + let bhc = BooleanHypercube::::new(z.get_num_vars()); + for y in bhc.into_iter() { + let M_y = fix_variables(&M_mle, &y); + + // reverse y to match spartan/polynomial evaluate + let y_rev: Vec = y.into_iter().rev().collect(); + let z_y = z.evaluate(&y_rev); + let M_z = M_y.scalar_mul(&z_y); + // XXX: It's crazy to have results in the ops impls. Remove them! + sum_Mz = sum_Mz.clone().add(M_z).expect("This should not fail"); + } + + sum_Mz +} From 99679fb12a854d529e64cbcb421ff129567746a8 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 28 Jun 2023 09:41:19 +0200 Subject: [PATCH 068/100] change: Move `fix_variables` & util tests to util mod --- src/ccs/util/mod.rs | 130 +++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 129 insertions(+), 1 deletion(-) diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index 8d4588a95..443baadb1 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -27,7 +27,6 @@ use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; use std::sync::Arc; -use super::cccs::fix_variables; use super::CCSShape; pub(crate) mod virtual_poly; pub(crate) use virtual_poly::VirtualPolynomial; @@ -56,3 +55,132 @@ pub fn compute_sum_Mz( sum_Mz } + +pub(crate) fn fix_variables( + poly: &MultilinearPolynomial, + partial_point: &[F], +) -> MultilinearPolynomial { + assert!( + partial_point.len() <= poly.get_num_vars(), + "invalid size of partial point" + ); + let nv = poly.get_num_vars(); + let mut poly = poly.Z.to_vec(); + let dim = partial_point.len(); + // evaluate single variable of partial point from left to right + for (i, point) in partial_point.iter().enumerate() { + poly = fix_one_variable_helper(&poly, nv - i, point); + } + + MultilinearPolynomial::::new(poly[..(1 << (nv - dim))].to_vec()) +} + +fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { + let mut res = vec![F::ZERO; 1 << (nv - 1)]; + + for i in 0..(1 << (nv - 1)) { + res[i] = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point; + } + + res +} + +#[cfg(test)] +mod tests { + use super::*; + use pasta_curves::{Ep, Fq}; + use rand_core::OsRng; + + #[test] + fn test_fix_variables() { + let A = SparseMatrix::::with_coeffs( + 4, + 4, + vec![ + (0, 0, Fq::from(2u64)), + (0, 1, Fq::from(3u64)), + (0, 2, Fq::from(4u64)), + (0, 3, Fq::from(4u64)), + (1, 0, Fq::from(4u64)), + (1, 1, Fq::from(11u64)), + (1, 2, Fq::from(14u64)), + (1, 3, Fq::from(14u64)), + (2, 0, Fq::from(2u64)), + (2, 1, Fq::from(8u64)), + (2, 2, Fq::from(17u64)), + (2, 3, Fq::from(17u64)), + (3, 0, Fq::from(420u64)), + (3, 1, Fq::from(4u64)), + (3, 2, Fq::from(2u64)), + (3, 3, Fq::ZERO), + ], + ); + + let A_mle = A.to_mle(); + let bhc = BooleanHypercube::::new(2); + for (i, y) in bhc.enumerate() { + let A_mle_op = fix_variables(&A_mle, &y); + + // Check that fixing first variables pins down a column + // i.e. fixing x to 0 will return the first column + // fixing x to 1 will return the second column etc. + let column_i: Vec = A + .clone() + .coeffs() + .iter() + .copied() + .filter_map(|(_, col, coeff)| if col == i { Some(coeff) } else { None }) + .collect(); + + assert_eq!(A_mle_op.Z, column_i); + + // // Now check that fixing last variables pins down a row + // // i.e. fixing y to 0 will return the first row + // // fixing y to 1 will return the second row etc. + let row_i: Vec = A + .clone() + .coeffs() + .iter() + .copied() + .filter_map(|(row, _, coeff)| if row == i { Some(coeff) } else { None }) + .collect(); + + let mut last_vars_fixed = A_mle.clone(); + // this is equivalent to Espresso/hyperplonk's 'fix_last_variables' mehthod + for bit in y.clone().iter().rev() { + last_vars_fixed.bound_poly_var_top(&bit) + } + + assert_eq!(last_vars_fixed.Z, row_i); + } + } + + #[test] + fn test_compute_sum_Mz_over_boolean_hypercube() -> () { + let z = CCSShape::::get_test_z(3); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z, &mut OsRng); + + // Generate other artifacts + let ck = CCSShape::::commitment_key(&ccs); + let (_, _, cccs) = ccs.to_cccs_artifacts(&mut OsRng, &ck, &z); + + let z_mle = dense_vec_to_mle(ccs.s_prime, &z); + + // check that evaluating over all the values x over the boolean hypercube, the result of + // the next for loop is equal to 0 + let mut r = Fq::zero(); + let bch = BooleanHypercube::new(ccs.s); + for x in bch.into_iter() { + for i in 0..ccs.q { + let mut Sj_prod = Fq::one(); + for j in ccs.S[i].clone() { + let sum_Mz: MultilinearPolynomial = compute_sum_Mz::(&cccs.M_MLE[j], &z_mle); + let sum_Mz_x = sum_Mz.evaluate(&x); + Sj_prod *= sum_Mz_x; + } + r += Sj_prod * ccs.c[i]; + } + assert_eq!(r, Fq::ZERO); + } + } +} From 8b73f47686b2fab9efa8e7c585842c7ad33d16d4 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 28 Jun 2023 09:41:55 +0200 Subject: [PATCH 069/100] change: Move CCS test fns as associated fns --- src/ccs/cccs.rs | 177 ++---------------------------------------------- src/ccs/mod.rs | 46 +++++++++++++ 2 files changed, 52 insertions(+), 171 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 85e32986c..4baee5bd3 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -113,35 +113,6 @@ impl CCCSShape { } } -pub fn fix_variables( - poly: &MultilinearPolynomial, - partial_point: &[F], -) -> MultilinearPolynomial { - assert!( - partial_point.len() <= poly.get_num_vars(), - "invalid size of partial point" - ); - let nv = poly.get_num_vars(); - let mut poly = poly.Z.to_vec(); - let dim = partial_point.len(); - // evaluate single variable of partial point from left to right - for (i, point) in partial_point.iter().enumerate() { - poly = fix_one_variable_helper(&poly, nv - i, point); - } - - MultilinearPolynomial::::new(poly[..(1 << (nv - dim))].to_vec()) -} - -fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> Vec { - let mut res = vec![F::ZERO; 1 << (nv - 1)]; - - for i in 0..(1 << (nv - 1)) { - res[i] = data[i << 1] + (data[(i << 1) + 1] - data[i << 1]) * point; - } - - res -} - #[cfg(test)] mod tests { @@ -171,86 +142,14 @@ mod tests { vecs.iter().map(Vec::as_slice).collect() } - fn gen_test_ccs( - z: &Vec, - rng: &mut R, - ) -> (CCSShape, CCSWitness, CCSInstance) { - let one = G::Scalar::ONE; - let A = vec![ - (0, 1, one), - (1, 3, one), - (2, 1, one), - (2, 4, one), - (3, 0, G::Scalar::from(5u64)), - (3, 5, one), - ]; - - let B = vec![(0, 1, one), (1, 1, one), (2, 0, one), (3, 0, one)]; - let C = vec![(0, 3, one), (1, 4, one), (2, 5, one), (3, 2, one)]; - - // 2. Take R1CS and convert to CCS - let ccs = CCSShape::from_r1cs(R1CSShape::new(4, 6, 1, &A, &B, &C).unwrap()); - // Generate other artifacts - let ck = CCSShape::::commitment_key(&ccs); - let ccs_w = CCSWitness::new(z[2..].to_vec()); - let ccs_instance = CCSInstance::new(&ccs, &ccs_w.commit(&ck), vec![z[1]]).unwrap(); - - ccs - .is_sat(&ck, &ccs_instance, &ccs_w) - .expect("This does not fail"); - (ccs, ccs_w, ccs_instance) - } - - /// Computes the z vector for the given input for Vitalik's equation. - pub fn get_test_z(input: u64) -> Vec { - // z = (1, io, w) - to_F_vec(vec![ - 1, - input, - input * input * input + input + 5, // x^3 + x + 5 - input * input, // x^2 - input * input * input, // x^2 * x - input * input * input + input, // x^3 + x - ]) - } - - #[test] - fn test_compute_sum_Mz_over_boolean_hypercube() -> () { - let z = get_test_z::(3); - let (ccs, _, _) = gen_test_ccs::(&z, &mut OsRng); - - // Generate other artifacts - let ck = CCSShape::::commitment_key(&ccs); - let (_, _, cccs) = ccs.to_cccs_artifacts(&mut OsRng, &ck, &z); - - let z_mle = dense_vec_to_mle(ccs.s_prime, &z); - - // check that evaluating over all the values x over the boolean hypercube, the result of - // the next for loop is equal to 0 - let mut r = Fq::zero(); - let bch = BooleanHypercube::new(ccs.s); - for x in bch.into_iter() { - for i in 0..ccs.q { - let mut Sj_prod = Fq::one(); - for j in ccs.S[i].clone() { - let sum_Mz: MultilinearPolynomial = compute_sum_Mz::(&cccs.M_MLE[j], &z_mle); - let sum_Mz_x = sum_Mz.evaluate(&x); - Sj_prod *= sum_Mz_x; - } - r += Sj_prod * ccs.c[i]; - } - assert_eq!(r, Fq::ZERO); - } - } - /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the /// hypercube, but to not-zero outside the hypercube. #[test] fn test_compute_q() { let mut rng = OsRng; - let z = get_test_z::(3); - let (ccs_shape, ccs_witness, ccs_instance) = gen_test_ccs(&z, &mut rng); + let z = CCSShape::::get_test_z(3); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z, &mut rng); // generate ck let ck = CCSShape::::commitment_key(&ccs_shape); @@ -276,8 +175,8 @@ mod tests { fn test_compute_Q() { let mut rng = OsRng; - let z = get_test_z::(3); - let (ccs_shape, ccs_witness, ccs_instance) = gen_test_ccs(&z, &mut rng); + let z = CCSShape::::get_test_z(3); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z, &mut rng); // generate ck let ck = CCSShape::::commitment_key(&ccs_shape); @@ -320,8 +219,8 @@ mod tests { fn test_Q_against_q() -> () { let mut rng = OsRng; - let z = get_test_z::(3); - let (ccs_shape, ccs_witness, ccs_instance) = gen_test_ccs(&z, &mut rng); + let z = CCSShape::::get_test_z(3); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z, &mut rng); // generate ck let ck = CCSShape::::commitment_key(&ccs_shape); @@ -362,68 +261,4 @@ mod tests { .fold(Fq::zero(), |acc, result| acc + result); assert_ne!(G_at_r, q.evaluate(&r).unwrap()); } - - #[test] - fn test_fix_variables() { - let A = SparseMatrix::::with_coeffs( - 4, - 4, - vec![ - (0, 0, Fq::from(2u64)), - (0, 1, Fq::from(3u64)), - (0, 2, Fq::from(4u64)), - (0, 3, Fq::from(4u64)), - (1, 0, Fq::from(4u64)), - (1, 1, Fq::from(11u64)), - (1, 2, Fq::from(14u64)), - (1, 3, Fq::from(14u64)), - (2, 0, Fq::from(2u64)), - (2, 1, Fq::from(8u64)), - (2, 2, Fq::from(17u64)), - (2, 3, Fq::from(17u64)), - (3, 0, Fq::from(420u64)), - (3, 1, Fq::from(4u64)), - (3, 2, Fq::from(2u64)), - (3, 3, Fq::ZERO), - ], - ); - - let A_mle = A.to_mle(); - let bhc = BooleanHypercube::::new(2); - for (i, y) in bhc.enumerate() { - let A_mle_op = fix_variables(&A_mle, &y); - - // Check that fixing first variables pins down a column - // i.e. fixing x to 0 will return the first column - // fixing x to 1 will return the second column etc. - let column_i: Vec = A - .clone() - .coeffs() - .iter() - .copied() - .filter_map(|(_, col, coeff)| if col == i { Some(coeff) } else { None }) - .collect(); - - assert_eq!(A_mle_op.Z, column_i); - - // // Now check that fixing last variables pins down a row - // // i.e. fixing y to 0 will return the first row - // // fixing y to 1 will return the second row etc. - let row_i: Vec = A - .clone() - .coeffs() - .iter() - .copied() - .filter_map(|(row, _, coeff)| if row == i { Some(coeff) } else { None }) - .collect(); - - let mut last_vars_fixed = A_mle.clone(); - // this is equivalent to Espresso/hyperplonk's 'fix_last_variables' mehthod - for bit in y.clone().iter().rev() { - last_vars_fixed.bound_poly_var_top(&bit) - } - - assert_eq!(last_vars_fixed.Z, row_i); - } - } } diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 91c233a4e..67411e5a7 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -256,6 +256,52 @@ impl CCSShape { self.n = padded_n; } } + + #[cfg(test)] + fn gen_test_ccs( + z: &Vec, + rng: &mut R, + ) -> (CCSShape, CCSWitness, CCSInstance) { + let one = G::Scalar::ONE; + let A = vec![ + (0, 1, one), + (1, 3, one), + (2, 1, one), + (2, 4, one), + (3, 0, G::Scalar::from(5u64)), + (3, 5, one), + ]; + + let B = vec![(0, 1, one), (1, 1, one), (2, 0, one), (3, 0, one)]; + let C = vec![(0, 3, one), (1, 4, one), (2, 5, one), (3, 2, one)]; + + // 2. Take R1CS and convert to CCS + let ccs = CCSShape::from_r1cs(R1CSShape::new(4, 6, 1, &A, &B, &C).unwrap()); + // Generate other artifacts + let ck = CCSShape::::commitment_key(&ccs); + let ccs_w = CCSWitness::new(z[2..].to_vec()); + let ccs_instance = CCSInstance::new(&ccs, &ccs_w.commit(&ck), vec![z[1]]).unwrap(); + + ccs + .is_sat(&ck, &ccs_instance, &ccs_w) + .expect("This does not fail"); + (ccs, ccs_w, ccs_instance) + } + + #[cfg(test)] + /// Computes the z vector for the given input for Vitalik's equation. + pub fn get_test_z(input: u64) -> Vec { + // z = (1, io, w) + let input = G::Scalar::from(input); + vec![ + G::Scalar::ONE, + input, + input * input * input + input + G::Scalar::from(5u64), // x^3 + x + 5 + input * input, // x^2 + input * input * input, // x^2 * x + input * input * input + input, // x^3 + x + ] + } } /// A type that holds a witness for a given CCS instance From eaadaf220afc29350ad5e420032e8f545e221dd6 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 28 Jun 2023 09:57:13 +0200 Subject: [PATCH 070/100] Add: Compute_all_sum_Mz_evals fn to utils module --- src/ccs/util/mod.rs | 22 ++++++++++++++++++++++ 1 file changed, 22 insertions(+) diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index 443baadb1..612ee8f88 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -85,6 +85,28 @@ fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> V res } +/// Return a vector of evaluations p_j(r) = \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) +/// for all j values in 0..self.t +// XXX: This fn needs tests!! +pub fn compute_all_sum_Mz_evals( + M_x_y_mle: &[MultilinearPolynomial], + // XXX: Can we just get the MLE? + z: &Vec, + r: &[G::Scalar], + s_prime: usize, +) -> Vec { + // Convert z to MLE + let z_y_mle = dense_vec_to_mle(s_prime, z); + + let mut v = Vec::with_capacity(M_x_y_mle.len()); + for M_i in M_x_y_mle { + let sum_Mz = compute_sum_Mz::(M_i, &z_y_mle); + let v_i = sum_Mz.evaluate(r); + v.push(v_i); + } + v +} + #[cfg(test)] mod tests { use super::*; From e3303cd2a305b8a509afb57a92e21f26be7fabf2 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 28 Jun 2023 10:00:44 +0200 Subject: [PATCH 071/100] add: Ck computation & instance satisfaction fns for LCCCS --- src/ccs/lcccs.rs | 94 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 94 insertions(+) create mode 100644 src/ccs/lcccs.rs diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs new file mode 100644 index 000000000..4ec60514b --- /dev/null +++ b/src/ccs/lcccs.rs @@ -0,0 +1,94 @@ +use super::util::{compute_sum_Mz, VirtualPolynomial}; +use super::{CCSShape, CCSWitness}; +use crate::ccs::util::compute_all_sum_Mz_evals; +use crate::hypercube::BooleanHypercube; +use crate::spartan::math::Math; +use crate::spartan::polynomial::MultilinearPolynomial; +use crate::utils::bit_decompose; +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, + errors::NovaError, + gadgets::{ + nonnative::{bignat::nat_to_limbs, util::f_to_nat}, + utils::scalar_as_base, + }, + r1cs::{R1CSInstance, R1CSShape, R1CSWitness, R1CS}, + traits::{ + commitment::CommitmentEngineTrait, commitment::CommitmentTrait, AbsorbInROTrait, Group, ROTrait, + }, + utils::*, + Commitment, CommitmentKey, CE, +}; +use bitvec::vec; +use core::{cmp::max, marker::PhantomData}; +use ff::{Field, PrimeField}; +use flate2::{write::ZlibEncoder, Compression}; +use itertools::concat; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Sha3_256}; +use std::ops::{Add, Mul}; +use std::sync::Arc; + +/// A type that holds a LCCCS instance +#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[serde(bound = "")] +pub struct LCCCS { + pub(crate) C: Commitment, + pub(crate) x: Vec, + pub(crate) u: G::Scalar, + pub(crate) v: Vec, + // Random evaluation point for the v_i + pub r_x: Vec, + // This should not need to be here. Should be a reference only. + pub(crate) matrix_mles: Vec>, + pub(crate) ccs: CCSShape, +} + +impl LCCCS { + // XXX: Double check that this is indeed correct. + /// Samples public parameters for the specified number of constraints and variables in an CCS + pub fn commitment_key(&self) -> CommitmentKey { + let total_nz = self.ccs.M.iter().fold(0, |acc, m| acc + m.coeffs().len()); + + G::CE::setup(b"ck", max(max(self.ccs.m, self.ccs.t), total_nz)) + } + + /// Compute all L_j(x) polynomials + // Can we recieve the MLE of z directy? + pub fn compute_Ls(&self, z: &Vec) -> Vec> { + let z_mle = dense_vec_to_mle(self.ccs.s_prime, z); + + let mut vec_L_j_x = Vec::with_capacity(self.ccs.t); + for M_j in self.matrix_mles.iter() { + // Sanity check + assert_eq!(z_mle.get_num_vars(), self.ccs.s_prime); + let sum_Mz = compute_sum_Mz::(&M_j, &z_mle); + let sum_Mz_virtual = + VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), G::Scalar::ONE); + let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap(); + vec_L_j_x.push(L_j_x); + } + + vec_L_j_x + } + + /// Checks if the CCS instance is satisfiable given a witness and its shape + pub fn is_sat( + &self, + ck: &CommitmentKey, + lcccs: &LCCCS, + W: &CCSWitness, + ) -> Result<(), NovaError> { + // check that C is the commitment of w. Notice that this is not verifying a Pedersen + // opening, but checking that the Commmitment comes from committing to the witness. + assert_eq!(self.C, CE::::commit(ck, &W.w)); + + // check CCS relation + let z: Vec = [vec![self.u], self.x.clone(), W.w.to_vec()].concat(); + let computed_v = + compute_all_sum_Mz_evals::(&lcccs.matrix_mles, &z, &self.r_x, self.ccs.s_prime); + assert_eq!(computed_v, self.v); + Ok(()) + } +} From f5a13a4dc516e61879ce846281fe2b004b71e9fd Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 28 Jun 2023 10:10:20 +0200 Subject: [PATCH 072/100] remove: Rng param in gen_test_ccs instance fn --- src/ccs/cccs.rs | 6 +++--- src/ccs/mod.rs | 5 +---- src/ccs/util/mod.rs | 2 +- 3 files changed, 5 insertions(+), 8 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 4baee5bd3..b2893a186 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -149,7 +149,7 @@ mod tests { let mut rng = OsRng; let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z, &mut rng); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); // generate ck let ck = CCSShape::::commitment_key(&ccs_shape); @@ -176,7 +176,7 @@ mod tests { let mut rng = OsRng; let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z, &mut rng); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); // generate ck let ck = CCSShape::::commitment_key(&ccs_shape); @@ -220,7 +220,7 @@ mod tests { let mut rng = OsRng; let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z, &mut rng); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); // generate ck let ck = CCSShape::::commitment_key(&ccs_shape); diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 67411e5a7..1b7e5c7a9 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -258,10 +258,7 @@ impl CCSShape { } #[cfg(test)] - fn gen_test_ccs( - z: &Vec, - rng: &mut R, - ) -> (CCSShape, CCSWitness, CCSInstance) { + fn gen_test_ccs(z: &Vec) -> (CCSShape, CCSWitness, CCSInstance) { let one = G::Scalar::ONE; let A = vec![ (0, 1, one), diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index 612ee8f88..72993d7eb 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -180,7 +180,7 @@ mod tests { #[test] fn test_compute_sum_Mz_over_boolean_hypercube() -> () { let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z, &mut OsRng); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); // Generate other artifacts let ck = CCSShape::::commitment_key(&ccs); From bf589be308f455f64e40fabce175b7af7cb020cc Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 28 Jun 2023 11:50:49 +0200 Subject: [PATCH 073/100] Add: CCS to LCCCS transformation & v_j s comp --- src/ccs/mod.rs | 45 ++++++++++++++++++++++++++++++++++++++++++++- 1 file changed, 44 insertions(+), 1 deletion(-) diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 1b7e5c7a9..e215dd97d 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -33,6 +33,8 @@ use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; use self::cccs::{CCCSInstance, CCCSShape, CCCSWitness}; +use self::lcccs::LCCCS; +use self::util::compute_all_sum_Mz_evals; mod cccs; mod lcccs; @@ -122,7 +124,7 @@ impl CCSShape { } } - // Transform the CCS instance into a CCCS instance by providing the required Commitment key. + // Transform the CCS instance into a CCCS instance by providing a commitment key. pub fn to_cccs_artifacts( &self, rng: &mut R, @@ -145,6 +147,47 @@ impl CCSShape { ) } + /// Transform the CCS instance into an LCCCS instance by providing a commitment key. + pub fn to_lcccs( + &self, + mut rng: &mut R, + ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, + z: &[G::Scalar], + ) -> (LCCCS, CCSWitness) { + let w: Vec = z[(1 + self.l)..].to_vec(); + let r_w = G::Scalar::random(&mut rng); + let C = <::CE as CommitmentEngineTrait>::commit(ck, &w); + + // XXX: API doesn't offer a way to handle this?? + let _r_x: Vec = (0..self.s).map(|_| G::Scalar::random(&mut rng)).collect(); + + let v = self.compute_v_j(z, &_r_x); + // XXX: Is absurd to compute these again here. We should take care of this. + let matrix_mles: Vec> = + self.M.iter().map(|matrix| matrix.to_mle()).collect(); + + ( + LCCCS:: { + ccs: self.clone(), + C, + u: G::Scalar::ONE, + x: z[1..(1 + self.l)].to_vec(), + r_x: _r_x, + v, + matrix_mles, + }, + CCSWitness:: { w }, + ) + } + + /// Compute v_j values of the linearized committed CCS form + /// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) + fn compute_v_j(&self, z: &[G::Scalar], r: &[G::Scalar]) -> Vec { + let M_mle: Vec> = + self.M.iter().map(|matrix| matrix.to_mle()).collect(); + compute_all_sum_Mz_evals::(&M_mle, &z.to_vec(), r, self.s_prime) + } + // XXX: Update commitment_key variables here? This is currently based on R1CS with M length /// Samples public parameters for the specified number of constraints and variables in an CCS pub fn commitment_key(&self) -> CommitmentKey { From 45e8391423c05159d673e97b51639a8228d72598 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Wed, 28 Jun 2023 17:43:04 +0200 Subject: [PATCH 074/100] add: LCCCS tests with test_lcccs_v_j FAILING --- src/ccs/lcccs.rs | 129 ++++++++++++++++++++++++++++++++++++++------ src/ccs/util/mod.rs | 2 +- 2 files changed, 114 insertions(+), 17 deletions(-) diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index 4ec60514b..7f4b687f1 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -54,6 +54,25 @@ impl LCCCS { G::CE::setup(b"ck", max(max(self.ccs.m, self.ccs.t), total_nz)) } + /// Checks if the CCS instance is satisfiable given a witness and its shape + pub fn is_sat(&self, ck: &CommitmentKey, W: &CCSWitness) -> Result<(), NovaError> { + // check that C is the commitment of w. Notice that this is not verifying a Pedersen + // opening, but checking that the Commmitment comes from committing to the witness. + let comm_eq = self.C == CE::::commit(ck, &W.w); + + // check CCS relation + let z: Vec = [vec![self.u], self.x.clone(), W.w.to_vec()].concat(); + let computed_v = + compute_all_sum_Mz_evals::(&self.matrix_mles, &z, &self.r_x, self.ccs.s_prime); + let vs_eq = computed_v == self.v; + + if vs_eq && comm_eq { + Ok(()) + } else { + Err(NovaError::UnSat) + } + } + /// Compute all L_j(x) polynomials // Can we recieve the MLE of z directy? pub fn compute_Ls(&self, z: &Vec) -> Vec> { @@ -63,6 +82,7 @@ impl LCCCS { for M_j in self.matrix_mles.iter() { // Sanity check assert_eq!(z_mle.get_num_vars(), self.ccs.s_prime); + let sum_Mz = compute_sum_Mz::(&M_j, &z_mle); let sum_Mz_virtual = VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), G::Scalar::ONE); @@ -72,23 +92,100 @@ impl LCCCS { vec_L_j_x } +} - /// Checks if the CCS instance is satisfiable given a witness and its shape - pub fn is_sat( - &self, - ck: &CommitmentKey, - lcccs: &LCCCS, - W: &CCSWitness, - ) -> Result<(), NovaError> { - // check that C is the commitment of w. Notice that this is not verifying a Pedersen - // opening, but checking that the Commmitment comes from committing to the witness. - assert_eq!(self.C, CE::::commit(ck, &W.w)); +#[cfg(test)] +mod tests { + use pasta_curves::{Ep, Fq}; + use rand_core::OsRng; - // check CCS relation - let z: Vec = [vec![self.u], self.x.clone(), W.w.to_vec()].concat(); - let computed_v = - compute_all_sum_Mz_evals::(&lcccs.matrix_mles, &z, &self.r_x, self.ccs.s_prime); - assert_eq!(computed_v, self.v); - Ok(()) + use super::*; + + #[test] + fn satisfied_ccs_is_satisfied_lcccs() { + // Gen test vectors & artifacts + let z = CCSShape::::get_test_z(3); + let (ccs, witness, instance) = CCSShape::::gen_test_ccs(&z); + let ck = ccs.commitment_key(); + assert!(ccs.is_sat(&ck, &instance, &witness).is_ok()); + + // Wrong z so that the relation does not hold + let mut bad_z = z.clone(); + bad_z[3] = Fq::ZERO; + + // LCCCS with the correct z should pass + let (lcccs, _) = ccs.to_lcccs(&mut OsRng, &ck, &z); + assert!(lcccs.is_sat(&ck, &witness).is_ok()); + + // LCCCS with the wrong z should not pass `is_sat`. + // LCCCS with the correct z should pass + let (lcccs, _) = ccs.to_lcccs(&mut OsRng, &ck, &bad_z); + assert!(lcccs.is_sat(&ck, &witness).is_err()); + } + + #[test] + /// Test linearized CCCS v_j against the L_j(x) + fn test_lcccs_v_j() -> () { + let mut rng = OsRng; + + // Gen test vectors & artifacts + let z = CCSShape::::get_test_z(3); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + let ck = ccs.commitment_key(); + + // Get LCCCS + let (lcccs, _) = ccs.to_lcccs(&mut rng, &ck, &z); + + let vec_L_j_x = lcccs.compute_Ls(&z); + assert_eq!(vec_L_j_x.len(), lcccs.v.len()); + + for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { + let sum_L_j_x = BooleanHypercube::new(ccs.s) + .into_iter() + .map(|y| L_j_x.evaluate(&y).unwrap()) + .fold(Fq::ZERO, |acc, result| acc + result); + assert_eq!(v_i, sum_L_j_x); + } + } + + /// Given a bad z, check that the v_j should not match with the L_j(x) + #[test] + fn test_bad_v_j() -> () { + let mut rng = OsRng; + + // Gen test vectors & artifacts + let z = CCSShape::::get_test_z(3); + let (ccs, witness, instance) = CCSShape::::gen_test_ccs(&z); + let ck = ccs.commitment_key(); + + // Mutate z so that the relation does not hold + let mut bad_z = z.clone(); + bad_z[3] = Fq::ZERO; + + // Compute v_j with the right z + let (lcccs, _) = ccs.to_lcccs(&mut rng, &ck, &z); + // Assert LCCCS is satisfied with the original Z + assert!(lcccs.is_sat(&ck, &witness).is_ok()); + + // Bad compute L_j(x) with the bad z + let vec_L_j_x = lcccs.compute_Ls(&bad_z); + assert_eq!(vec_L_j_x.len(), lcccs.v.len()); + // Assert LCCCS is not satisfied with the bad Z + assert!(lcccs.is_sat(&ck, &CCSWitness { w: bad_z }).is_err()); + + // Make sure that the LCCCS is not satisfied given these L_j(x) + // i.e. summing L_j(x) over the hypercube should not give v_j for all j + let mut satisfied = true; + for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { + let sum_L_j_x = BooleanHypercube::new(ccs.s) + .into_iter() + .map(|y| L_j_x.evaluate(&y).unwrap()) + .fold(Fq::ZERO, |acc, result| acc + result); + if v_i != sum_L_j_x { + satisfied = false; + } + } + + assert_eq!(satisfied, false); } } diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index 72993d7eb..d32c603b3 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -180,7 +180,7 @@ mod tests { #[test] fn test_compute_sum_Mz_over_boolean_hypercube() -> () { let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + let (ccs, witness, _) = CCSShape::::gen_test_ccs(&z); // Generate other artifacts let ck = CCSShape::::commitment_key(&ccs); From a14d27df48d530d0a0364add6eb77f6468226f54 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 29 Jun 2023 15:46:06 +0200 Subject: [PATCH 075/100] fix: compute_all_sum_Mz_evals with test This adds a test for the `compute_all_sum_Mz_evals` fn and fixes it by reversing the order of iteration over `r_x` so that it matches the expectancies from Spartan/Polynomial lib impl. --- src/ccs/util/mod.rs | 29 ++++++++++++++++++++++++++--- 1 file changed, 26 insertions(+), 3 deletions(-) diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index d32c603b3..b7ccb7da7 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -87,7 +87,6 @@ fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> V /// Return a vector of evaluations p_j(r) = \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) /// for all j values in 0..self.t -// XXX: This fn needs tests!! pub fn compute_all_sum_Mz_evals( M_x_y_mle: &[MultilinearPolynomial], // XXX: Can we just get the MLE? @@ -101,7 +100,13 @@ pub fn compute_all_sum_Mz_evals( let mut v = Vec::with_capacity(M_x_y_mle.len()); for M_i in M_x_y_mle { let sum_Mz = compute_sum_Mz::(M_i, &z_y_mle); - let v_i = sum_Mz.evaluate(r); + + // XXX: We need a better way to do this. Sum_Mz has also the same issue. + // reverse the `r` given to evaluate to match Spartan/Nova endianness. + let mut r = r.to_vec(); + r.reverse(); + + let v_i = sum_Mz.evaluate(&r); v.push(v_i); } v @@ -180,7 +185,7 @@ mod tests { #[test] fn test_compute_sum_Mz_over_boolean_hypercube() -> () { let z = CCSShape::::get_test_z(3); - let (ccs, witness, _) = CCSShape::::gen_test_ccs(&z); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); // Generate other artifacts let ck = CCSShape::::commitment_key(&ccs); @@ -205,4 +210,22 @@ mod tests { assert_eq!(r, Fq::ZERO); } } + + #[test] + fn test_compute_all_sum_Mz_evals() { + let z = CCSShape::::get_test_z(3); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + + // Generate other artifacts + let ck = CCSShape::::commitment_key(&ccs); + let (_, _, cccs) = ccs.to_cccs_artifacts(&mut OsRng, &ck, &z); + + let mut r = vec![Fq::ONE, Fq::ZERO]; + let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); + assert_eq!(res, vec![Fq::from(9u64), Fq::from(3u64), Fq::from(27u64)]); + + r.reverse(); + let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); + assert_eq!(res, vec![Fq::from(30u64), Fq::from(1u64), Fq::from(30u64)]) + } } From 3c1017c874da7d2a6104c1b6ff5038b7f208e46f Mon Sep 17 00:00:00 2001 From: CPerezz Date: Thu, 29 Jun 2023 15:52:22 +0200 Subject: [PATCH 076/100] fix: test_lcccs_v_j working --- src/ccs/lcccs.rs | 3 +-- src/ccs/mod.rs | 1 + 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index 7f4b687f1..6dfd037a3 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -84,8 +84,7 @@ impl LCCCS { assert_eq!(z_mle.get_num_vars(), self.ccs.s_prime); let sum_Mz = compute_sum_Mz::(&M_j, &z_mle); - let sum_Mz_virtual = - VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz.clone()), G::Scalar::ONE); + let sum_Mz_virtual = VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz), G::Scalar::ONE); let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap(); vec_L_j_x.push(L_j_x); } diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index e215dd97d..aa6d26099 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -183,6 +183,7 @@ impl CCSShape { /// Compute v_j values of the linearized committed CCS form /// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) fn compute_v_j(&self, z: &[G::Scalar], r: &[G::Scalar]) -> Vec { + // XXX: Should these be MLE already? let M_mle: Vec> = self.M.iter().map(|matrix| matrix.to_mle()).collect(); compute_all_sum_Mz_evals::(&M_mle, &z.to_vec(), r, self.s_prime) From 2a1fa7a52d35d412a997ffa1b34fc17f89008857 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Fri, 30 Jun 2023 12:05:21 +0200 Subject: [PATCH 077/100] add: Basic multifolding functions & tests Includes fns to compute thetas & sigmas, C from thetas and sigmans and computation of `g` poly. It also includes tests for g computation. --- src/ccs/multifolding.rs | 197 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 197 insertions(+) create mode 100644 src/ccs/multifolding.rs diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs new file mode 100644 index 000000000..53f94f376 --- /dev/null +++ b/src/ccs/multifolding.rs @@ -0,0 +1,197 @@ +use super::cccs::CCCSShape; +use super::lcccs::LCCCS; +use super::util::{compute_sum_Mz, VirtualPolynomial}; +use super::{CCSShape, CCSWitness}; +use crate::ccs::util::compute_all_sum_Mz_evals; +use crate::hypercube::BooleanHypercube; +use crate::spartan::math::Math; +use crate::spartan::polynomial::MultilinearPolynomial; +use crate::utils::bit_decompose; +use crate::{ + constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, + errors::NovaError, + gadgets::{ + nonnative::{bignat::nat_to_limbs, util::f_to_nat}, + utils::scalar_as_base, + }, + r1cs::{R1CSInstance, R1CSShape, R1CSWitness, R1CS}, + traits::{ + commitment::CommitmentEngineTrait, commitment::CommitmentTrait, AbsorbInROTrait, Group, ROTrait, + }, + utils::*, + Commitment, CommitmentKey, CE, +}; +use bitvec::vec; +use core::{cmp::max, marker::PhantomData}; +use ff::{Field, PrimeField}; +use flate2::{write::ZlibEncoder, Compression}; +use itertools::concat; +use rayon::prelude::*; +use serde::{Deserialize, Serialize}; +use sha3::{Digest, Sha3_256}; +use std::ops::{Add, Mul}; +use std::sync::Arc; + +// XXX: THe idea is to have Multifolding as IVC instance in the future, holding the main CCS +// instances. Then the rest of CCS, CCCS, LCCCS hold references to it. +// Is our single source of data. +#[derive(Debug)] +pub struct Multifolding { + ccs: CCSShape, + ccs_mle: Vec>, +} + +impl Multifolding { + /// Compute sigma_i and theta_i from step 4 + pub fn compute_sigmas_and_thetas( + &self, + z1: &Vec, + z2: &Vec, + r_x_prime: &[G::Scalar], + ) -> (Vec, Vec) { + ( + // sigmas + compute_all_sum_Mz_evals::(&self.ccs_mle, z1, r_x_prime, self.ccs.s_prime), + // thetas + compute_all_sum_Mz_evals::(&self.ccs_mle, z2, r_x_prime, self.ccs.s_prime), + ) + } + + /// Compute the right-hand-side of step 5 of the multifolding scheme + pub fn compute_c_from_sigmas_and_thetas( + &self, + sigmas: &[G::Scalar], + thetas: &[G::Scalar], + gamma: G::Scalar, + beta: &[G::Scalar], + r_x: &[G::Scalar], + r_x_prime: &[G::Scalar], + ) -> G::Scalar { + let mut c = G::Scalar::ZERO; + + let e1 = eq_eval(r_x, r_x_prime); + let e2 = eq_eval(beta, r_x_prime); + + // (sum gamma^j * e1 * sigma_j) + for (j, sigma_j) in sigmas.iter().enumerate() { + let gamma_j = gamma.pow([j as u64]); + c += gamma_j * e1 * sigma_j; + } + + // + gamma^{t+1} * e2 * sum c_i * prod theta_j + let mut lhs = G::Scalar::ZERO; + for i in 0..self.ccs.q { + let mut prod = G::Scalar::ONE; + for j in self.ccs.S[i].clone() { + prod *= thetas[j]; + } + lhs += self.ccs.c[i] * prod; + } + let gamma_t1 = gamma.pow([(self.ccs.t + 1) as u64]); + c += gamma_t1 * e2 * lhs; + c + } + + /// Compute g(x) polynomial for the given inputs. + pub fn compute_g( + running_instance: &LCCCS, + cccs_instance: &CCCSShape, + z1: &Vec, + z2: &Vec, + gamma: G::Scalar, + beta: &[G::Scalar], + ) -> VirtualPolynomial { + let mut vec_L = running_instance.compute_Ls(z1); + let mut Q = cccs_instance + .compute_Q(z2, beta) + .expect("TQ comp should not fail"); + let mut g = vec_L[0].clone(); + for (j, L_j) in vec_L.iter_mut().enumerate().skip(1) { + let gamma_j = gamma.pow([j as u64]); + L_j.scalar_mul(&gamma_j); + g = g.add(L_j); + } + let gamma_t1 = gamma.pow([(cccs_instance.ccs.t + 1) as u64]); + Q.scalar_mul(&gamma_t1); + g = g.add(&Q); + g + } +} + +/// Evaluate eq polynomial. +pub fn eq_eval(x: &[F], y: &[F]) -> F { + assert_eq!(x.len(), y.len()); + + let mut res = F::ONE; + for (&xi, &yi) in x.iter().zip(y.iter()) { + let xi_yi = xi * yi; + res *= xi_yi + xi_yi - xi - yi + F::ONE; + } + res +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::ccs::util::virtual_poly::build_eq_x_r; + use pasta_curves::{Ep, Fq}; + use rand_core::OsRng; + // NIMFS: Non Interactive Multifolding Scheme + type NIMFS = Multifolding; + + #[test] + fn test_compute_g() { + let z1 = CCSShape::::get_test_z(3); + let z2 = CCSShape::::get_test_z(4); + + let (_, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z2); + let (ccs, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z1); + let ck = ccs.commitment_key(); + + assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); + assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); + + let mut rng = OsRng; // TMP + let gamma: Fq = Fq::random(&mut rng); + let beta: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); + + let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &ck, &z1); + let cccs_instance = ccs.to_cccs_shape(); + + let mut sum_v_j_gamma = Fq::zero(); + for j in 0..lcccs_instance.v.len() { + let gamma_j = gamma.pow([j as u64]); + sum_v_j_gamma += lcccs_instance.v[j] * gamma_j; + } + + // Compute g(x) with that r_x + let g = NIMFS::compute_g(&lcccs_instance, &cccs_instance, &z1, &z2, gamma, &beta); + + // evaluate g(x) over x \in {0,1}^s + let mut g_on_bhc = Fq::zero(); + for x in BooleanHypercube::new(ccs.s).into_iter() { + g_on_bhc += g.evaluate(&x).unwrap(); + } + + // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s + let mut sum_Lj_on_bhc = Fq::zero(); + let vec_L = lcccs_instance.compute_Ls(&z1); + for x in BooleanHypercube::new(ccs.s).into_iter() { + for j in 0..vec_L.len() { + let gamma_j = gamma.pow([j as u64]); + sum_Lj_on_bhc += vec_L[j].evaluate(&x).unwrap() * gamma_j; + } + } + + // Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q' + assert_ne!(g_on_bhc, Fq::zero()); + + // evaluating g(x) over the boolean hypercube should give the same result as evaluating the + // sum of gamma^j * Lj(x) over the boolean hypercube + assert_eq!(g_on_bhc, sum_Lj_on_bhc); + + // evaluating g(x) over the boolean hypercube should give the same result as evaluating the + // sum of gamma^j * v_j over j \in [t] + assert_eq!(g_on_bhc, sum_v_j_gamma); + } +} From a2d44082d08595f5a37d3135b596c219acee8e65 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Fri, 30 Jun 2023 12:19:48 +0200 Subject: [PATCH 078/100] add: Failing test for sigmas_and_thetas comp --- src/ccs/lcccs.rs | 2 ++ src/ccs/mod.rs | 6 +++-- src/ccs/multifolding.rs | 51 +++++++++++++++++++++++++++++++++++++++++ 3 files changed, 57 insertions(+), 2 deletions(-) diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index 6dfd037a3..b6ed9d4ed 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -187,4 +187,6 @@ mod tests { assert_eq!(satisfied, false); } + + // XXX: Missing folding test. } diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index aa6d26099..6aacf3caf 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -38,6 +38,7 @@ use self::util::compute_all_sum_Mz_evals; mod cccs; mod lcccs; +mod multifolding; mod util; /// A type that holds the shape of a CCS instance @@ -198,6 +199,7 @@ impl CCSShape { } /// Checks if the CCS instance is satisfiable given a witness and its shape + // XXX: Probably is better to completelly remove the abstraction of Instance and witness and just deal with Z. pub fn is_sat( &self, ck: &CommitmentKey, @@ -302,7 +304,7 @@ impl CCSShape { } #[cfg(test)] - fn gen_test_ccs(z: &Vec) -> (CCSShape, CCSWitness, CCSInstance) { + pub(crate) fn gen_test_ccs(z: &Vec) -> (CCSShape, CCSWitness, CCSInstance) { let one = G::Scalar::ONE; let A = vec![ (0, 1, one), @@ -331,7 +333,7 @@ impl CCSShape { #[cfg(test)] /// Computes the z vector for the given input for Vitalik's equation. - pub fn get_test_z(input: u64) -> Vec { + pub(crate) fn get_test_z(input: u64) -> Vec { // z = (1, io, w) let input = G::Scalar::from(input); vec![ diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 53f94f376..47d9e5390 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -41,6 +41,14 @@ pub struct Multifolding { ccs_mle: Vec>, } +impl Multifolding { + /// Generates a new Multifolding instance based on the given CCS. + pub fn new(ccs: CCSShape) -> Self { + let ccs_mle = ccs.M.iter().map(|matrix| matrix.to_mle()).collect(); + Self { ccs, ccs_mle } + } +} + impl Multifolding { /// Compute sigma_i and theta_i from step 4 pub fn compute_sigmas_and_thetas( @@ -194,4 +202,47 @@ mod tests { // sum of gamma^j * v_j over j \in [t] assert_eq!(g_on_bhc, sum_v_j_gamma); } + + #[test] + fn test_compute_sigmas_and_thetas() -> () { + let z1 = CCSShape::::get_test_z(3); + let z2 = CCSShape::::get_test_z(4); + + let (_, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z2); + let (ccs, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z1); + let ck = ccs.commitment_key(); + + assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); + assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); + + let mut rng = OsRng; + let gamma: Fq = Fq::random(&mut rng); + let beta: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); + let r_x_prime: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); + + // Initialize a multifolding object + let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &ck, &z1); + let (cccs_instance) = ccs.to_cccs_shape(); + + // Generate a new multifolding instance + let mf = NIMFS::new(ccs); + + let (sigmas, thetas) = mf.compute_sigmas_and_thetas(&z1, &z2, &r_x_prime); + + let g = NIMFS::compute_g(&lcccs_instance, &cccs_instance, &z1, &z2, gamma, &beta); + + // we expect g(r_x_prime) to be equal to: + // c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j + // from compute_c_from_sigmas_and_thetas + let expected_c = g.evaluate(&r_x_prime).unwrap(); + let c = mf.compute_c_from_sigmas_and_thetas( + &sigmas, + &thetas, + gamma, + &beta, + &lcccs_instance.r_x, + &r_x_prime, + ); + assert_eq!(c, expected_c); + } } From 78a405924f7eb09bd269640b03d16943cbffce98 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Fri, 7 Jul 2023 15:19:07 +0200 Subject: [PATCH 079/100] fix: test_compute_sigmas_and_thetas This fixes the endianness of `r_x_prime` which was the issue that prevented the `test_compute_sigmas_and_thetas` to pass. --- src/ccs/multifolding.rs | 40 ++++++++++++++++++++++++++++++++++------ 1 file changed, 34 insertions(+), 6 deletions(-) diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 47d9e5390..aec25abc3 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -159,7 +159,7 @@ mod tests { assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); - let mut rng = OsRng; // TMP + let mut rng = OsRng; let gamma: Fq = Fq::random(&mut rng); let beta: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); @@ -225,17 +225,45 @@ mod tests { let (cccs_instance) = ccs.to_cccs_shape(); // Generate a new multifolding instance - let mf = NIMFS::new(ccs); + let nimfs = NIMFS::new(ccs.clone()); - let (sigmas, thetas) = mf.compute_sigmas_and_thetas(&z1, &z2, &r_x_prime); + let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&z1, &z2, &r_x_prime); let g = NIMFS::compute_g(&lcccs_instance, &cccs_instance, &z1, &z2, gamma, &beta); + // Assert `g` is correctly computed here. + { + // evaluate g(x) over x \in {0,1}^s + let mut g_on_bhc = Fq::zero(); + for x in BooleanHypercube::new(ccs.s).into_iter() { + g_on_bhc += g.evaluate(&x).unwrap(); + } + // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s + let mut sum_Lj_on_bhc = Fq::zero(); + let vec_L = lcccs_instance.compute_Ls(&z1); + for x in BooleanHypercube::new(ccs.s).into_iter() { + for j in 0..vec_L.len() { + let gamma_j = gamma.pow([j as u64]); + sum_Lj_on_bhc += vec_L[j].evaluate(&x).unwrap() * gamma_j; + } + } + + // evaluating g(x) over the boolean hypercube should give the same result as evaluating the + // sum of gamma^j * Lj(x) over the boolean hypercube + assert_eq!(g_on_bhc, sum_Lj_on_bhc); + }; + + // XXX: We need a better way to do this. Sum_Mz has also the same issue. + // reverse the `r` given to evaluate to match Spartan/Nova endianness. + let mut revsersed = r_x_prime.clone(); + revsersed.reverse(); + // we expect g(r_x_prime) to be equal to: // c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j - // from compute_c_from_sigmas_and_thetas - let expected_c = g.evaluate(&r_x_prime).unwrap(); - let c = mf.compute_c_from_sigmas_and_thetas( + // from `compute_c_from_sigmas_and_thetas` + let expected_c = g.evaluate(&revsersed).unwrap(); + + let c = nimfs.compute_c_from_sigmas_and_thetas( &sigmas, &thetas, gamma, From d37afdb160ef9658430be6450dbb1d8f19f1f931 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Fri, 7 Jul 2023 17:31:01 +0200 Subject: [PATCH 080/100] add: Folding functions pending for tests to pass --- src/ccs/multifolding.rs | 110 +++++++++++++++++++++++++++++++++++++++- 1 file changed, 109 insertions(+), 1 deletion(-) diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index aec25abc3..a526d15ba 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -1,4 +1,4 @@ -use super::cccs::CCCSShape; +use super::cccs::{CCCSInstance, CCCSShape}; use super::lcccs::LCCCS; use super::util::{compute_sum_Mz, VirtualPolynomial}; use super::{CCSShape, CCSWitness}; @@ -124,6 +124,71 @@ impl Multifolding { g = g.add(&Q); g } + + // XXX: This might need to be mutable if we want to hold an LCCCS instance as the IVC inside the + // NIMFS object. + pub fn fold( + &self, + lcccs1: &LCCCS, + cccs2: &CCCSInstance, + sigmas: &[G::Scalar], + thetas: &[G::Scalar], + r_x_prime: Vec, + rho: G::Scalar, + ) -> LCCCS { + let C = lcccs1.C + cccs2.C.mul(rho); + let u = lcccs1.u + rho; + let x: Vec = lcccs1 + .x + .iter() + .zip( + cccs2 + .x + .iter() + .map(|x_i| *x_i * rho) + .collect::>(), + ) + .map(|(a_i, b_i)| *a_i + b_i) + .collect(); + let v: Vec = sigmas + .iter() + .zip( + thetas + .iter() + .map(|x_i| *x_i * rho) + .collect::>(), + ) + .map(|(a_i, b_i)| *a_i + b_i) + .collect(); + + LCCCS { + matrix_mles: lcccs1.matrix_mles.clone(), + C, + ccs: lcccs1.ccs.clone(), + u, + x, + r_x: r_x_prime, + v, + } + } + + pub fn fold_witness(w1: &CCSWitness, w2: &CCSWitness, rho: G::Scalar) -> CCSWitness { + let w = w1 + .w + .iter() + .zip( + w2.w + .iter() + .map(|x_i| *x_i * rho) + .collect::>(), + ) + .map(|(a_i, b_i)| *a_i + b_i) + .collect(); + + // XXX: There's no handling of r_w atm. So we will ingore until all folding is implemented, + // let r_w = w1.r_w + rho * w2.r_w; + CCSWitness { w } + } } /// Evaluate eq polynomial. @@ -273,4 +338,47 @@ mod tests { ); assert_eq!(c, expected_c); } + + #[test] + fn test_lcccs_fold() { + let z1 = CCSShape::::get_test_z(3); + let z2 = CCSShape::::get_test_z(4); + + let (ccs, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z2); + let (_, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z1); + let ck = ccs.commitment_key(); + + assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); + assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); + + let mut rng = OsRng; + let r_x_prime: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); + + // Generate a new multifolding instance + let mut nimfs = NIMFS::new(ccs.clone()); + + let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&z1, &z2, &r_x_prime); + + // Initialize a multifolding object + let (lcccs_instance, lcccs_witness) = ccs.to_lcccs(&mut rng, &ck, &z1); + assert!(lcccs_instance.is_sat(&ck, &lcccs_witness).is_ok()); + let (cccs_instance, _, _) = ccs.to_cccs_artifacts(&mut rng, &ck, &z2); + + let mut rng = OsRng; + let rho = Fq::random(&mut rng); + + let folded = nimfs.fold( + &lcccs_instance, + &cccs_instance, + &sigmas, + &thetas, + r_x_prime, + rho, + ); + + let w_folded = NIMFS::fold_witness(&ccs_witness_1, &ccs_witness_2, rho); + + // check lcccs relation + assert!(folded.is_sat(&ck, &w_folded).is_ok()); + } } From 09cef38aa78232e506b1a980f9f54ca72fa4e136 Mon Sep 17 00:00:00 2001 From: CPerezz Date: Mon, 10 Jul 2023 11:59:10 +0200 Subject: [PATCH 081/100] tmp: Check CCCS instance correctness failing --- src/ccs/cccs.rs | 27 ++++++++++++++++++++++++++- src/ccs/lcccs.rs | 5 +++-- src/ccs/multifolding.rs | 6 ++++-- 3 files changed, 33 insertions(+), 5 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index b2893a186..3fb716938 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -29,7 +29,7 @@ use std::sync::Arc; use super::util::compute_sum_Mz; use super::util::virtual_poly::VirtualPolynomial; -use super::CCSShape; +use super::{CCSShape, CCSWitness}; /// A type that holds the shape of a Committed CCS (CCCS) instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] @@ -111,6 +111,31 @@ impl CCCSShape { let q = self.compute_q(z)?; q.build_f_hat(beta) } + + /// Perform the check of the CCCS instance described at section 4.1 + pub fn is_sat( + &self, + ck: &CommitmentKey, + w: &CCSWitness, + x: &CCCSInstance, + ) -> Result<(), NovaError> { + // check that C is the commitment of w. Notice that this is not verifying a Pedersen + // opening, but checking that the Commmitment comes from committing to the witness. + assert_eq!(x.C, CE::::commit(ck, &w.w)); + + // check CCCS relation + let z: Vec = [vec![G::Scalar::ONE], x.x.clone(), w.w.to_vec()].concat(); + + // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube + let q_x = self.compute_q(&z).unwrap(); + for x in BooleanHypercube::new(self.ccs.s) { + if !q_x.evaluate(&x).unwrap().is_zero().unwrap_u8() == 0 { + return Err(NovaError::UnSat); + } + } + + Ok(()) + } } #[cfg(test)] diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index b6ed9d4ed..fc7304592 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -66,6 +66,9 @@ impl LCCCS { compute_all_sum_Mz_evals::(&self.matrix_mles, &z, &self.r_x, self.ccs.s_prime); let vs_eq = computed_v == self.v; + dbg!(vs_eq); + dbg!(comm_eq); + if vs_eq && comm_eq { Ok(()) } else { @@ -187,6 +190,4 @@ mod tests { assert_eq!(satisfied, false); } - - // XXX: Missing folding test. } diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index a526d15ba..afb61a99e 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -362,9 +362,11 @@ mod tests { // Initialize a multifolding object let (lcccs_instance, lcccs_witness) = ccs.to_lcccs(&mut rng, &ck, &z1); assert!(lcccs_instance.is_sat(&ck, &lcccs_witness).is_ok()); - let (cccs_instance, _, _) = ccs.to_cccs_artifacts(&mut rng, &ck, &z2); + let (cccs_instance, cccs_witness, cccs_shape) = ccs.to_cccs_artifacts(&mut rng, &ck, &z2); + assert!(cccs_shape + .is_sat(&ck, &ccs_witness_2, &cccs_instance) + .is_ok()); - let mut rng = OsRng; let rho = Fq::random(&mut rng); let folded = nimfs.fold( From a33e7c3c20b5706b559a4cae0d05055b36b8722e Mon Sep 17 00:00:00 2001 From: oskarth Date: Tue, 11 Jul 2023 12:27:24 +0800 Subject: [PATCH 082/100] test(multifolding): Fix CCCS assert Was using wrong witness. Also change z order and make variables more more eplicit. --- src/ccs/multifolding.rs | 24 +++++++++++++----------- 1 file changed, 13 insertions(+), 11 deletions(-) diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index afb61a99e..60003572f 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -344,27 +344,29 @@ mod tests { let z1 = CCSShape::::get_test_z(3); let z2 = CCSShape::::get_test_z(4); - let (ccs, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z2); - let (_, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z1); - let ck = ccs.commitment_key(); + let (ccs1, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z1); + let (_ccs2, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z2); + let ck = ccs1.commitment_key(); - assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); - assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); + assert!(ccs1.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); + assert!(ccs1.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); let mut rng = OsRng; - let r_x_prime: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); + let r_x_prime: Vec = (0..ccs1.s).map(|_| Fq::random(&mut rng)).collect(); // Generate a new multifolding instance - let mut nimfs = NIMFS::new(ccs.clone()); + let mut nimfs = NIMFS::new(ccs1.clone()); - let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&z1, &z2, &r_x_prime); + let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&z2, &z1, &r_x_prime); // Initialize a multifolding object - let (lcccs_instance, lcccs_witness) = ccs.to_lcccs(&mut rng, &ck, &z1); + let (lcccs_instance, lcccs_witness) = ccs1.to_lcccs(&mut rng, &ck, &z2); assert!(lcccs_instance.is_sat(&ck, &lcccs_witness).is_ok()); - let (cccs_instance, cccs_witness, cccs_shape) = ccs.to_cccs_artifacts(&mut rng, &ck, &z2); + + let (cccs_instance, cccs_witness, cccs_shape) = ccs1.to_cccs_artifacts(&mut rng, &ck, &z1); + assert!(cccs_shape - .is_sat(&ck, &ccs_witness_2, &cccs_instance) + .is_sat(&ck, &ccs_witness_1, &cccs_instance) .is_ok()); let rho = Fq::random(&mut rng); From 216e0e61c90937056ab3ecc042c56586c07dfec5 Mon Sep 17 00:00:00 2001 From: oskarth Date: Tue, 11 Jul 2023 13:00:23 +0800 Subject: [PATCH 083/100] refactor(ccs): Remove special CCCSWitness Should be the same according to current logic and multifolding-poc. Also change to to_cccs. --- src/ccs/cccs.rs | 8 -------- src/ccs/mod.rs | 8 ++++---- src/ccs/multifolding.rs | 2 +- src/ccs/util/mod.rs | 4 ++-- 4 files changed, 7 insertions(+), 15 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 3fb716938..052508cf6 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -52,14 +52,6 @@ pub struct CCCSInstance { pub(crate) x: Vec, } -// NOTE: We deal with `r` parameter later in `nimfs.rs` when running `execute_sequence` with `ro_consts` -/// A type that holds a witness for a given CCCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -pub struct CCCSWitness { - // Multilinear polynomial w_mle in s' - 1 variables - pub(crate) w_mle: Vec, -} - impl CCCSShape { // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) // polynomial over x diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 6aacf3caf..daf005bd2 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -32,7 +32,7 @@ use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; -use self::cccs::{CCCSInstance, CCCSShape, CCCSWitness}; +use self::cccs::{CCCSInstance, CCCSShape}; use self::lcccs::LCCCS; use self::util::compute_all_sum_Mz_evals; @@ -126,12 +126,12 @@ impl CCSShape { } // Transform the CCS instance into a CCCS instance by providing a commitment key. - pub fn to_cccs_artifacts( + pub fn to_cccs( &self, rng: &mut R, ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, z: &[G::Scalar], - ) -> (CCCSInstance, CCCSWitness, CCCSShape) { + ) -> (CCCSInstance, CCSWitness, CCCSShape) { let w: Vec = z[(1 + self.l)..].to_vec(); // XXX: API doesn't offer a way to handle this apparently? // Need to investigate @@ -143,7 +143,7 @@ impl CCSShape { C, x: z[1..(1 + self.l)].to_vec(), }, - CCCSWitness { w_mle: w }, + CCSWitness { w: w }, self.to_cccs_shape(), ) } diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 60003572f..da3cbc9d2 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -363,7 +363,7 @@ mod tests { let (lcccs_instance, lcccs_witness) = ccs1.to_lcccs(&mut rng, &ck, &z2); assert!(lcccs_instance.is_sat(&ck, &lcccs_witness).is_ok()); - let (cccs_instance, cccs_witness, cccs_shape) = ccs1.to_cccs_artifacts(&mut rng, &ck, &z1); + let (cccs_instance, cccs_witness, cccs_shape) = ccs1.to_cccs(&mut rng, &ck, &z1); assert!(cccs_shape .is_sat(&ck, &ccs_witness_1, &cccs_instance) diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index b7ccb7da7..b7b59a9c8 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -189,7 +189,7 @@ mod tests { // Generate other artifacts let ck = CCSShape::::commitment_key(&ccs); - let (_, _, cccs) = ccs.to_cccs_artifacts(&mut OsRng, &ck, &z); + let (_, _, cccs) = ccs.to_cccs(&mut OsRng, &ck, &z); let z_mle = dense_vec_to_mle(ccs.s_prime, &z); @@ -218,7 +218,7 @@ mod tests { // Generate other artifacts let ck = CCSShape::::commitment_key(&ccs); - let (_, _, cccs) = ccs.to_cccs_artifacts(&mut OsRng, &ck, &z); + let (_, _, cccs) = ccs.to_cccs(&mut OsRng, &ck, &z); let mut r = vec![Fq::ONE, Fq::ZERO]; let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); From f3bcf68ddaf1b56735fcfcbc2f800be23c534d8d Mon Sep 17 00:00:00 2001 From: oskarth Date: Tue, 11 Jul 2023 13:05:17 +0800 Subject: [PATCH 084/100] tests(ccs): Fix test_lcccs_fold test Fix variable order and make used variables more explicit to correspond to multifolding-poc --- src/ccs/multifolding.rs | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index da3cbc9d2..9e19e93c2 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -344,29 +344,31 @@ mod tests { let z1 = CCSShape::::get_test_z(3); let z2 = CCSShape::::get_test_z(4); - let (ccs1, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z1); - let (_ccs2, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z2); - let ck = ccs1.commitment_key(); + // ccs stays the same regardless of z1 or z2 + let (ccs, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z1); + let (_, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z2); + let ck = ccs.commitment_key(); - assert!(ccs1.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); - assert!(ccs1.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); + assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); + assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); let mut rng = OsRng; - let r_x_prime: Vec = (0..ccs1.s).map(|_| Fq::random(&mut rng)).collect(); + let r_x_prime: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); // Generate a new multifolding instance - let mut nimfs = NIMFS::new(ccs1.clone()); + let mut nimfs = NIMFS::new(ccs.clone()); - let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&z2, &z1, &r_x_prime); + let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&z1, &z2, &r_x_prime); // Initialize a multifolding object - let (lcccs_instance, lcccs_witness) = ccs1.to_lcccs(&mut rng, &ck, &z2); - assert!(lcccs_instance.is_sat(&ck, &lcccs_witness).is_ok()); + let (lcccs_instance, lcccs_witness) = ccs.to_lcccs(&mut rng, &ck, &z1); - let (cccs_instance, cccs_witness, cccs_shape) = ccs1.to_cccs(&mut rng, &ck, &z1); + let (cccs_instance, cccs_witness, cccs_shape) = ccs.to_cccs(&mut rng, &ck, &z2); + + assert!(lcccs_instance.is_sat(&ck, &lcccs_witness).is_ok()); assert!(cccs_shape - .is_sat(&ck, &ccs_witness_1, &cccs_instance) + .is_sat(&ck, &ccs_witness_2, &cccs_instance) .is_ok()); let rho = Fq::random(&mut rng); @@ -380,7 +382,7 @@ mod tests { rho, ); - let w_folded = NIMFS::fold_witness(&ccs_witness_1, &ccs_witness_2, rho); + let w_folded = NIMFS::fold_witness(&lcccs_witness, &cccs_witness, rho); // check lcccs relation assert!(folded.is_sat(&ck, &w_folded).is_ok()); From af959fbaaf22686f1b2c31e1ba79a1f043487c61 Mon Sep 17 00:00:00 2001 From: oskarth Date: Tue, 11 Jul 2023 13:23:12 +0800 Subject: [PATCH 085/100] chore: fix errors after rebase --- src/spartan/mod.rs | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index 5edbddf01..bde284b1e 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -1,6 +1,6 @@ //! This module implements RelaxedR1CSSNARKTrait using Spartan that is generic //! over the polynomial commitment and evaluation argument (i.e., a PCS) -mod math; +pub(crate) mod math; pub(crate) mod polynomial; pub mod ppsnark; pub mod snark; From 4828a20977fcbc284c3032ddddd6b5c7e2261dea Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 12 Jul 2023 12:09:31 +0800 Subject: [PATCH 086/100] chore: fix merge artifact --- src/spartan/mod.rs | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index bde284b1e..cfb3222bd 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -1,5 +1,9 @@ //! This module implements RelaxedR1CSSNARKTrait using Spartan that is generic //! over the polynomial commitment and evaluation argument (i.e., a PCS) +//! We provide two implementations, one in snark.rs (which does not use any preprocessing) +//! and another in ppsnark.rs (which uses preprocessing to keep the verifier's state small if the PCS scheme provides a succinct verifier) +//! We also provide direct.rs that allows proving a step circuit directly with either of the two SNARKs. +pub mod direct; pub(crate) mod math; pub(crate) mod polynomial; pub mod ppsnark; From 6f1ed227b16fb2a302db63272769ce641b78aa56 Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 12 Jul 2023 12:16:39 +0800 Subject: [PATCH 087/100] chore: Restore global warnings and unused Replace with local allow unused, works with hypernova flag on and off --- src/ccs/mod.rs | 1 + src/hypercube.rs | 1 + src/lib.rs | 4 ++-- src/spartan/polynomial.rs | 1 + src/utils.rs | 1 + 5 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index daf005bd2..6027d6551 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -1,6 +1,7 @@ //! This module defines CCS related types and functions. #![allow(unused_imports)] #![allow(dead_code)] +#![allow(unused)] #![allow(clippy::type_complexity)] use crate::hypercube::BooleanHypercube; diff --git a/src/hypercube.rs b/src/hypercube.rs index 2e3509607..bc13cde93 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -1,4 +1,5 @@ //! This module defines basic types related to Boolean hypercubes. +#![allow(unused)] use std::marker::PhantomData; use crate::utils::*; diff --git a/src/lib.rs b/src/lib.rs index f007e64cf..b24072643 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -1,7 +1,7 @@ //! This library implements Nova, a high-speed recursive SNARK. #![deny( - //warnings, - //unused, + warnings, + unused, future_incompatible, nonstandard_style, rust_2018_idioms, diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index dd2db921c..8c6f8dd9b 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -142,6 +142,7 @@ impl MultilinearPolynomial { } // Multiplies `self` by a scalar. + #[allow(unused)] pub fn scalar_mul(&self, scalar: &Scalar) -> Self { let mut new_poly = self.clone(); for z in &mut new_poly.Z { diff --git a/src/utils.rs b/src/utils.rs index fe0cca5ac..5d78281fb 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -1,4 +1,5 @@ //! Basic utils +#![allow(unused)] use std::sync::Arc; use crate::errors::NovaError; From 20ba46c808f1cc4e19e6e324318d8348ea5f05b0 Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 12 Jul 2023 12:26:38 +0800 Subject: [PATCH 088/100] refactor: Enable requirement of even public IO by default Hacky solution with feature flag for experimental hypernova for now, need to adjust test case to fix better --- src/ccs/mod.rs | 2 ++ src/r1cs.rs | 10 ++++++++-- 2 files changed, 10 insertions(+), 2 deletions(-) diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 6027d6551..2457b0bed 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -320,6 +320,8 @@ impl CCSShape { let C = vec![(0, 3, one), (1, 4, one), (2, 5, one), (3, 2, one)]; // 2. Take R1CS and convert to CCS + // TODO: The third argument should be 2 or similar, need to adjust test case + // See https://github.com/privacy-scaling-explorations/Nova/issues/30 let ccs = CCSShape::from_r1cs(R1CSShape::new(4, 6, 1, &A, &B, &C).unwrap()); // Generate other artifacts let ck = CCSShape::::commitment_key(&ccs); diff --git a/src/r1cs.rs b/src/r1cs.rs index 36133f5cc..76d93ed46 100644 --- a/src/r1cs.rs +++ b/src/r1cs.rs @@ -118,11 +118,17 @@ impl R1CSShape { return Err(NovaError::InvalidIndex); } - // // We require the number of public inputs/outputs to be even + // We require the number of public inputs/outputs to be even + #[cfg(not(feature = "hypernova"))] + if num_io % 2 != 0 { + return Err(NovaError::OddInputLength); + } + + // TODO: See https://github.com/privacy-scaling-explorations/Nova/issues/30 + #[cfg(feature = "hypernova")] // if num_io % 2 != 0 { // return Err(NovaError::OddInputLength); // } - Ok(R1CSShape { num_cons, num_vars, From cde23f39cb143cf2a327cf0fd0e78bb16b01cc07 Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 14 Jul 2023 16:31:51 +0800 Subject: [PATCH 089/100] refactor(utils): SparseMatrix taking F instead of G (#33) Addresses https://github.com/privacy-scaling-explorations/Nova/issues/32 --- src/ccs/mod.rs | 4 ++-- src/ccs/util/mod.rs | 2 +- src/utils.rs | 41 ++++++++++++++++++++--------------------- 3 files changed, 23 insertions(+), 24 deletions(-) diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 2457b0bed..78b609962 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -49,7 +49,7 @@ mod util; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] pub struct CCSShape { - pub(crate) M: Vec>, + pub(crate) M: Vec>, // Num vars pub(crate) t: usize, // Number of public witness @@ -74,7 +74,7 @@ pub struct CCSShape { impl CCSShape { /// Create an object of type `CCSShape` from the explicitly specified CCS matrices pub fn new( - M: &[SparseMatrix], + M: &[SparseMatrix], t: usize, l: usize, q: usize, diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index b7b59a9c8..e1168d358 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -120,7 +120,7 @@ mod tests { #[test] fn test_fix_variables() { - let A = SparseMatrix::::with_coeffs( + let A = SparseMatrix::::with_coeffs( 4, 4, vec![ diff --git a/src/utils.rs b/src/utils.rs index 5d78281fb..1646665de 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -13,14 +13,13 @@ use serde::{Deserialize, Serialize}; /// A matrix structure represented on a sparse form. /// First element is row index, second column, third value stored #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct SparseMatrix { +pub struct SparseMatrix { n_rows: usize, n_cols: usize, - coeffs: Vec<(usize, usize, G::Scalar)>, + coeffs: Vec<(usize, usize, F)>, } -impl SparseMatrix { +impl SparseMatrix { pub fn new(n_rows: usize, n_cols: usize) -> Self { Self { n_rows, @@ -29,7 +28,7 @@ impl SparseMatrix { } } - pub fn with_coeffs(n_rows: usize, n_cols: usize, coeffs: Vec<(usize, usize, G::Scalar)>) -> Self { + pub fn with_coeffs(n_rows: usize, n_cols: usize, coeffs: Vec<(usize, usize, F)>) -> Self { Self { n_rows, n_cols, @@ -58,7 +57,7 @@ impl SparseMatrix { } // Return the non-0 coefficients of this matrix. - pub fn coeffs(&self) -> &[(usize, usize, G::Scalar)] { + pub fn coeffs(&self) -> &[(usize, usize, F)] { self.coeffs.as_slice() } @@ -88,12 +87,12 @@ impl SparseMatrix { } // Gives the MLE of the given matrix. - pub fn to_mle(&self) -> MultilinearPolynomial { + pub fn to_mle(&self) -> MultilinearPolynomial { // Matrices might need to get padded before turned into an MLE let mut padded_matrix = self.clone(); padded_matrix.pad(); - sparse_vec_to_mle::( + sparse_vec_to_mle::( padded_matrix.n_rows(), padded_matrix.n_cols(), padded_matrix.coeffs().to_vec(), @@ -106,13 +105,13 @@ impl SparseMatrix { // coefficients, maybe the method should be renamed, because is not to convert 'any' vector but a // vector of matrix coefficients. A better option probably is to replace the two inputs n_rows & // n_cols by directly the n_vars. -pub fn sparse_vec_to_mle( +pub fn sparse_vec_to_mle( n_rows: usize, n_cols: usize, - v: Vec<(usize, usize, G::Scalar)>, -) -> MultilinearPolynomial { + v: Vec<(usize, usize, F)>, +) -> MultilinearPolynomial { let n_vars: usize = (log2(n_rows) + log2(n_cols)) as usize; // n_vars = s + s' - let mut padded_vec = vec![G::Scalar::ZERO; 1 << n_vars]; + let mut padded_vec = vec![F::ZERO; 1 << n_vars]; v.iter().copied().for_each(|(row, col, coeff)| { padded_vec[(n_cols * row) + col] = coeff; }); @@ -176,11 +175,11 @@ pub fn matrix_vector_product(matrix: &Vec>, vector: &Vec( - matrix: &SparseMatrix, - vector: &Vec, -) -> Vec { - let mut res = vec![G::Scalar::ZERO; matrix.n_rows()]; +pub fn matrix_vector_product_sparse( + matrix: &SparseMatrix, + vector: &Vec, +) -> Vec { + let mut res = vec![F::ZERO; matrix.n_rows()]; for &(row, col, value) in matrix.coeffs.iter() { res[row] += value * vector[col]; } @@ -340,7 +339,7 @@ mod tests { let z = to_F_vec::(vec![1, 2, 3]); let res = - matrix_vector_product_sparse::(&SparseMatrix::::with_coeffs(2, 3, matrix), &z); + matrix_vector_product_sparse::(&SparseMatrix::::with_coeffs(2, 3, matrix), &z); assert_eq!(res, to_F_vec::(vec![14, 32])); } @@ -356,19 +355,19 @@ mod tests { (1, 2, Fq::from(6u64)), (4, 5, Fq::from(1u64)), ]; - let A = SparseMatrix::::with_coeffs(5, 6, matrix.clone()); + let A = SparseMatrix::::with_coeffs(5, 6, matrix.clone()); assert_eq!(A.n_cols(), 6); assert_eq!(A.n_rows(), 5); // Since is sparse, the empty rows/cols at the end are not accounted unless we provide the info. - let A = SparseMatrix::::with_coeffs(10, 10, matrix); + let A = SparseMatrix::::with_coeffs(10, 10, matrix); assert_eq!(A.n_cols(), 10); assert_eq!(A.n_rows(), 10); } #[test] fn test_matrix_to_mle() { - let A = SparseMatrix::::with_coeffs( + let A = SparseMatrix::::with_coeffs( 5, 5, vec![ From 3fdaeabed0bfee82049cba9c6cf12c50b76e9a36 Mon Sep 17 00:00:00 2001 From: oskarth Date: Thu, 13 Jul 2023 19:29:16 +0800 Subject: [PATCH 090/100] test(utils): Make generic over Field Addresses https://github.com/microsoft/Nova/pull/175#discussion_r1214691619 --- src/utils.rs | 185 +++++++++++++++++++++++++++++---------------------- 1 file changed, 107 insertions(+), 78 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index 1646665de..b99c622b2 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -278,9 +278,9 @@ pub(crate) fn log2(x: usize) -> u32 { #[cfg(test)] mod tests { + use super::*; use crate::hypercube::BooleanHypercube; - use super::*; use pasta_curves::{Ep, Fq}; fn to_F_vec(v: Vec) -> Vec { @@ -291,69 +291,63 @@ mod tests { m.iter().map(|x| to_F_vec(x.clone())).collect() } - #[test] - fn test_vector_add() { - let a = to_F_vec::(vec![1, 2, 3]); - let b = to_F_vec::(vec![4, 5, 6]); - let res = vector_add(&a, &b); - assert_eq!(res, to_F_vec::(vec![5, 7, 9])); + fn test_vector_add_with() { + let a = to_F_vec::(vec![1, 2, 3]); + let b = to_F_vec::(vec![4, 5, 6]); + let res = vector_add::(&a, &b); + assert_eq!(res, to_F_vec::(vec![5, 7, 9])); } - #[test] - fn test_vector_elem_product() { - let a = to_F_vec::(vec![1, 2, 3]); - let e = Fq::from(2); + fn test_vector_elem_product_with() { + let a = to_F_vec::(vec![1, 2, 3]); + let e = F::from(2); let res = vector_elem_product(&a, &e); - assert_eq!(res, to_F_vec::(vec![2, 4, 6])); + assert_eq!(res, to_F_vec::(vec![2, 4, 6])); } - #[test] - fn test_matrix_vector_product() { + fn test_matrix_vector_product_with() { let matrix = vec![vec![1, 2, 3], vec![4, 5, 6]]; let vector = vec![1, 2, 3]; - let A = to_F_matrix::(matrix); - let z = to_F_vec::(vector); + let A = to_F_matrix::(matrix); + let z = to_F_vec::(vector); let res = matrix_vector_product(&A, &z); - assert_eq!(res, to_F_vec::(vec![14, 32])); + assert_eq!(res, to_F_vec::(vec![14, 32])); } - #[test] - fn test_hadamard_product() { - let a = to_F_vec::(vec![1, 2, 3]); - let b = to_F_vec::(vec![4, 5, 6]); + fn test_hadamard_product_with() { + let a = to_F_vec::(vec![1, 2, 3]); + let b = to_F_vec::(vec![4, 5, 6]); let res = hadamard_product(&a, &b); - assert_eq!(res, to_F_vec::(vec![4, 10, 18])); + assert_eq!(res, to_F_vec::(vec![4, 10, 18])); } - #[test] - fn test_matrix_vector_product_sparse() { + fn test_matrix_vector_product_sparse_with() { let matrix = vec![ - (0, 0, Fq::from(1)), - (0, 1, Fq::from(2)), - (0, 2, Fq::from(3)), - (1, 0, Fq::from(4)), - (1, 1, Fq::from(5)), - (1, 2, Fq::from(6)), + (0, 0, G::Scalar::from(1u64)), + (0, 1, G::Scalar::from(2u64)), + (0, 2, G::Scalar::from(3u64)), + (1, 0, G::Scalar::from(4u64)), + (1, 1, G::Scalar::from(5u64)), + (1, 2, G::Scalar::from(6u64)), ]; let z = to_F_vec::(vec![1, 2, 3]); let res = matrix_vector_product_sparse::(&SparseMatrix::::with_coeffs(2, 3, matrix), &z); - assert_eq!(res, to_F_vec::(vec![14, 32])); + assert_eq!(res, to_F_vec::(vec![14, 32])); } - #[test] - fn test_sparse_matrix_n_cols_rows() { + fn test_sparse_matrix_n_cols_rows_with() { let matrix = vec![ - (0, 0, Fq::from(1u64)), - (0, 1, Fq::from(2u64)), - (0, 2, Fq::from(3u64)), - (1, 0, Fq::from(4u64)), - (1, 1, Fq::from(5u64)), - (1, 2, Fq::from(6u64)), - (4, 5, Fq::from(1u64)), + (0, 0, G::Scalar::from(1u64)), + (0, 1, G::Scalar::from(2u64)), + (0, 2, G::Scalar::from(3u64)), + (1, 0, G::Scalar::from(4u64)), + (1, 1, G::Scalar::from(5u64)), + (1, 2, G::Scalar::from(6u64)), + (4, 5, G::Scalar::from(1u64)), ]; let A = SparseMatrix::::with_coeffs(5, 6, matrix.clone()); assert_eq!(A.n_cols(), 6); @@ -371,13 +365,13 @@ mod tests { 5, 5, vec![ - (0usize, 0usize, Fq::from(1u64)), - (0, 1, Fq::from(2u64)), - (0, 2, Fq::from(3u64)), - (1, 0, Fq::from(4u64)), - (1, 1, Fq::from(5u64)), - (1, 2, Fq::from(6u64)), - (3, 4, Fq::from(1u64)), + (0usize, 0usize, G::Scalar::from(1u64)), + (0, 1, G::Scalar::from(2u64)), + (0, 2, G::Scalar::from(3u64)), + (1, 0, G::Scalar::from(4u64)), + (1, 1, G::Scalar::from(5u64)), + (1, 2, G::Scalar::from(6u64)), + (3, 4, G::Scalar::from(1u64)), ], ); @@ -386,42 +380,42 @@ mod tests { // hardcoded testvector to ensure that in the future the SparseMatrix.to_mle method holds let expected = vec![ - Fq::from(1u64), - Fq::from(2u64), - Fq::from(3u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(4u64), - Fq::from(5u64), - Fq::from(6u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(0u64), - Fq::from(1u64), + G::Scalar::from(1u64), + G::Scalar::from(2u64), + G::Scalar::from(3u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(4u64), + G::Scalar::from(5u64), + G::Scalar::from(6u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(0u64), + G::Scalar::from(1u64), // the rest are zeroes ]; assert_eq!(A_mle.Z[..29], expected); - assert_eq!(A_mle.Z[29..], vec![Fq::ZERO; 64 - 29]); + assert_eq!(A_mle.Z[29..], vec![G::Scalar::ZERO; 64 - 29]); // check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values - let bhc = BooleanHypercube::::new(A_mle.get_num_vars()); + let bhc = BooleanHypercube::::new(A_mle.get_num_vars()); let mut A_padded = A.clone(); A_padded.pad(); for term in A_padded.coeffs.iter() { @@ -430,4 +424,39 @@ mod tests { assert_eq!(&A_mle.evaluate(&s_i_j), coeff) } } + + #[test] + fn test_vector_add() { + test_vector_add_with::(); + } + + #[test] + fn test_vector_elem_product() { + test_vector_elem_product_with::(); + } + + #[test] + fn test_matrix_vector_product() { + test_matrix_vector_product_with::(); + } + + #[test] + fn test_hadamard_product() { + test_hadamard_product_with::(); + } + + #[test] + fn test_matrix_vector_product_sparse() { + test_matrix_vector_product_sparse_with::(); + } + + #[test] + fn test_sparse_matrix_n_cols_rows() { + test_sparse_matrix_n_cols_rows_with::(); + } + + #[test] + fn test_matrix_to_mle() { + test_matrix_to_mle_with::(); + } } From 4ae0cf70141a9036c719e356a69fd36ab9295b02 Mon Sep 17 00:00:00 2001 From: oskarth Date: Fri, 14 Jul 2023 16:16:05 +0800 Subject: [PATCH 091/100] test(utils): Make generic over PrimeField for SparseMatrix --- src/utils.rs | 130 +++++++++++++++++++++++++-------------------------- 1 file changed, 64 insertions(+), 66 deletions(-) diff --git a/src/utils.rs b/src/utils.rs index b99c622b2..478ad961b 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -281,7 +281,7 @@ mod tests { use super::*; use crate::hypercube::BooleanHypercube; - use pasta_curves::{Ep, Fq}; + use pasta_curves::Fq; fn to_F_vec(v: Vec) -> Vec { v.iter().map(|x| F::from(*x)).collect() @@ -322,56 +322,54 @@ mod tests { assert_eq!(res, to_F_vec::(vec![4, 10, 18])); } - fn test_matrix_vector_product_sparse_with() { + fn test_matrix_vector_product_sparse_with() { let matrix = vec![ - (0, 0, G::Scalar::from(1u64)), - (0, 1, G::Scalar::from(2u64)), - (0, 2, G::Scalar::from(3u64)), - (1, 0, G::Scalar::from(4u64)), - (1, 1, G::Scalar::from(5u64)), - (1, 2, G::Scalar::from(6u64)), + (0, 0, F::from(1u64)), + (0, 1, F::from(2u64)), + (0, 2, F::from(3u64)), + (1, 0, F::from(4u64)), + (1, 1, F::from(5u64)), + (1, 2, F::from(6u64)), ]; - let z = to_F_vec::(vec![1, 2, 3]); - let res = - matrix_vector_product_sparse::(&SparseMatrix::::with_coeffs(2, 3, matrix), &z); + let z = to_F_vec::(vec![1, 2, 3]); + let res = matrix_vector_product_sparse::(&SparseMatrix::::with_coeffs(2, 3, matrix), &z); - assert_eq!(res, to_F_vec::(vec![14, 32])); + assert_eq!(res, to_F_vec::(vec![14, 32])); } - fn test_sparse_matrix_n_cols_rows_with() { + fn test_sparse_matrix_n_cols_rows_with() { let matrix = vec![ - (0, 0, G::Scalar::from(1u64)), - (0, 1, G::Scalar::from(2u64)), - (0, 2, G::Scalar::from(3u64)), - (1, 0, G::Scalar::from(4u64)), - (1, 1, G::Scalar::from(5u64)), - (1, 2, G::Scalar::from(6u64)), - (4, 5, G::Scalar::from(1u64)), + (0, 0, F::from(1u64)), + (0, 1, F::from(2u64)), + (0, 2, F::from(3u64)), + (1, 0, F::from(4u64)), + (1, 1, F::from(5u64)), + (1, 2, F::from(6u64)), + (4, 5, F::from(1u64)), ]; - let A = SparseMatrix::::with_coeffs(5, 6, matrix.clone()); + let A = SparseMatrix::::with_coeffs(5, 6, matrix.clone()); assert_eq!(A.n_cols(), 6); assert_eq!(A.n_rows(), 5); // Since is sparse, the empty rows/cols at the end are not accounted unless we provide the info. - let A = SparseMatrix::::with_coeffs(10, 10, matrix); + let A = SparseMatrix::::with_coeffs(10, 10, matrix); assert_eq!(A.n_cols(), 10); assert_eq!(A.n_rows(), 10); } - #[test] - fn test_matrix_to_mle() { - let A = SparseMatrix::::with_coeffs( + fn test_matrix_to_mle_with() { + let A = SparseMatrix::::with_coeffs( 5, 5, vec![ - (0usize, 0usize, G::Scalar::from(1u64)), - (0, 1, G::Scalar::from(2u64)), - (0, 2, G::Scalar::from(3u64)), - (1, 0, G::Scalar::from(4u64)), - (1, 1, G::Scalar::from(5u64)), - (1, 2, G::Scalar::from(6u64)), - (3, 4, G::Scalar::from(1u64)), + (0usize, 0usize, F::from(1u64)), + (0, 1, F::from(2u64)), + (0, 2, F::from(3u64)), + (1, 0, F::from(4u64)), + (1, 1, F::from(5u64)), + (1, 2, F::from(6u64)), + (3, 4, F::from(1u64)), ], ); @@ -380,42 +378,42 @@ mod tests { // hardcoded testvector to ensure that in the future the SparseMatrix.to_mle method holds let expected = vec![ - G::Scalar::from(1u64), - G::Scalar::from(2u64), - G::Scalar::from(3u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(4u64), - G::Scalar::from(5u64), - G::Scalar::from(6u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(0u64), - G::Scalar::from(1u64), + F::from(1u64), + F::from(2u64), + F::from(3u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(4u64), + F::from(5u64), + F::from(6u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(0u64), + F::from(1u64), // the rest are zeroes ]; assert_eq!(A_mle.Z[..29], expected); - assert_eq!(A_mle.Z[29..], vec![G::Scalar::ZERO; 64 - 29]); + assert_eq!(A_mle.Z[29..], vec![F::ZERO; 64 - 29]); // check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values - let bhc = BooleanHypercube::::new(A_mle.get_num_vars()); + let bhc = BooleanHypercube::::new(A_mle.get_num_vars()); let mut A_padded = A.clone(); A_padded.pad(); for term in A_padded.coeffs.iter() { @@ -447,16 +445,16 @@ mod tests { #[test] fn test_matrix_vector_product_sparse() { - test_matrix_vector_product_sparse_with::(); + test_matrix_vector_product_sparse_with::(); } #[test] fn test_sparse_matrix_n_cols_rows() { - test_sparse_matrix_n_cols_rows_with::(); + test_sparse_matrix_n_cols_rows_with::(); } #[test] fn test_matrix_to_mle() { - test_matrix_to_mle_with::(); + test_matrix_to_mle_with::(); } } From 3437fae6932cfb9d327886fb14783da102f686dc Mon Sep 17 00:00:00 2001 From: CPerezz Date: Fri, 14 Jul 2023 12:20:33 +0200 Subject: [PATCH 092/100] fix: Address all Clippy suggestions/lints As seen in previous CI runs: - 4ae0cf70141a9036c719e356a69fd36ab9295b02 - cde23f39cb143cf2a327cf0fd0e78bb16b01cc07 - ca55e738fdac5367f906889bae3e2b9d7126ad00 The clippy job is causing the CI to fail due to missing fixes on the code. This gets up to speed the codebase with Clippy recommendations and leaves the Uppercase warning disabled. --- src/ccs/cccs.rs | 18 +++++++---------- src/ccs/lcccs.rs | 10 ++++------ src/ccs/mod.rs | 41 ++++++++++++++++++--------------------- src/ccs/multifolding.rs | 18 ++++++++--------- src/ccs/util/mod.rs | 6 +++--- src/lib.rs | 1 + src/spartan/polynomial.rs | 6 +++--- src/utils.rs | 24 +++++++++++------------ 8 files changed, 57 insertions(+), 67 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 052508cf6..c73710b38 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -64,9 +64,8 @@ impl CCCSShape { // Using `fold` requires to not have results inside. So we unwrap for now but // a better approach is needed (we ca just keep the for loop otherwise.) - Ok((0..self.ccs.q).into_iter().fold( - VirtualPolynomial::::new(self.ccs.s), - |q, idx| { + Ok( + (0..self.ccs.q).fold(VirtualPolynomial::::new(self.ccs.s), |q, idx| { let mut prod = VirtualPolynomial::::new(self.ccs.s); for &j in &self.ccs.S[idx] { @@ -88,8 +87,8 @@ impl CCCSShape { prod.scalar_mul(&self.ccs.c[idx]); // Add it to the running sum q.add(&prod) - }, - )) + }), + ) } /// Computes Q(x) = eq(beta, x) * q(x) @@ -179,9 +178,9 @@ mod tests { let q = cccs_shape.compute_q(&z).unwrap(); // Evaluate inside the hypercube - for x in BooleanHypercube::new(ccs_shape.s).into_iter() { + BooleanHypercube::new(ccs_shape.s).for_each(|x| { assert_eq!(Fq::zero(), q.evaluate(&x).unwrap()); - } + }); // Evaluate outside the hypercube let beta: Vec = (0..ccs_shape.s).map(|_| Fq::random(&mut rng)).collect(); @@ -223,7 +222,6 @@ mod tests { // Now sum Q(x) evaluations in the hypercube and expect it to be 0 let r = BooleanHypercube::new(ccs_shape.s) - .into_iter() .map(|x| Q.evaluate(&x).unwrap()) .fold(Fq::zero(), |acc, result| acc + result); assert_eq!(r, Fq::zero()); @@ -233,7 +231,7 @@ mod tests { /// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point. /// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside #[test] - fn test_Q_against_q() -> () { + fn test_Q_against_q() { let mut rng = OsRng; let z = CCSShape::::get_test_z(3); @@ -259,7 +257,6 @@ mod tests { // Get G(d) by summing over Q_d(x) over the hypercube let G_at_d = BooleanHypercube::new(ccs_shape.s) - .into_iter() .map(|x| Q_at_d.evaluate(&x).unwrap()) .fold(Fq::zero(), |acc, result| acc + result); assert_eq!(G_at_d, q.evaluate(&d).unwrap()); @@ -273,7 +270,6 @@ mod tests { // Get G(d) by summing over Q_d(x) over the hypercube let G_at_r = BooleanHypercube::new(ccs_shape.s) - .into_iter() .map(|x| Q_at_r.evaluate(&x).unwrap()) .fold(Fq::zero(), |acc, result| acc + result); assert_ne!(G_at_r, q.evaluate(&r).unwrap()); diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index fc7304592..8b3720e4b 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -86,7 +86,7 @@ impl LCCCS { // Sanity check assert_eq!(z_mle.get_num_vars(), self.ccs.s_prime); - let sum_Mz = compute_sum_Mz::(&M_j, &z_mle); + let sum_Mz = compute_sum_Mz::(M_j, &z_mle); let sum_Mz_virtual = VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz), G::Scalar::ONE); let L_j_x = sum_Mz_virtual.build_f_hat(&self.r_x).unwrap(); vec_L_j_x.push(L_j_x); @@ -127,7 +127,7 @@ mod tests { #[test] /// Test linearized CCCS v_j against the L_j(x) - fn test_lcccs_v_j() -> () { + fn test_lcccs_v_j() { let mut rng = OsRng; // Gen test vectors & artifacts @@ -143,7 +143,6 @@ mod tests { for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { let sum_L_j_x = BooleanHypercube::new(ccs.s) - .into_iter() .map(|y| L_j_x.evaluate(&y).unwrap()) .fold(Fq::ZERO, |acc, result| acc + result); assert_eq!(v_i, sum_L_j_x); @@ -152,7 +151,7 @@ mod tests { /// Given a bad z, check that the v_j should not match with the L_j(x) #[test] - fn test_bad_v_j() -> () { + fn test_bad_v_j() { let mut rng = OsRng; // Gen test vectors & artifacts @@ -180,7 +179,6 @@ mod tests { let mut satisfied = true; for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { let sum_L_j_x = BooleanHypercube::new(ccs.s) - .into_iter() .map(|y| L_j_x.evaluate(&y).unwrap()) .fold(Fq::ZERO, |acc, result| acc + result); if v_i != sum_L_j_x { @@ -188,6 +186,6 @@ mod tests { } } - assert_eq!(satisfied, false); + assert!(!satisfied); } } diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 78b609962..3249fc4ce 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -100,8 +100,8 @@ impl CCSShape { // We require the number of public inputs/outputs to be even assert_ne!(l % 2, 0, " number of public i/o has to be even"); - let s = m.log_2() as usize; - let s_prime = n.log_2() as usize; + let s = m.log_2(); + let s_prime = n.log_2(); CCSShape { M: M.to_vec(), @@ -144,7 +144,7 @@ impl CCSShape { C, x: z[1..(1 + self.l)].to_vec(), }, - CCSWitness { w: w }, + CCSWitness { w }, self.to_cccs_shape(), ) } @@ -226,22 +226,19 @@ impl CCSShape { let z = concat(vec![vec![G::Scalar::ONE], U.x.clone(), W.w.clone()]); - let r = (0..self.q) - .into_iter() - .fold(vec![G::Scalar::ZERO; self.m], |r, idx| { - let hadamard_output = self.S[idx] - .iter() - .fold(vec![G::Scalar::ZERO; self.m], |acc, j| { - let mvp = matrix_vector_product_sparse(&self.M[*j], &z); - hadamard_product(&acc, &mvp) - }); - - // Multiply by the coefficient of this step - let c_M_j_z: Vec<::Scalar> = - vector_elem_product(&hadamard_output, &self.c[idx]); - - vector_add(&r, &c_M_j_z) - }); + let r = (0..self.q).fold(vec![G::Scalar::ZERO; self.m], |r, idx| { + let hadamard_output = self.S[idx] + .iter() + .fold(vec![G::Scalar::ZERO; self.m], |acc, j| { + let mvp = matrix_vector_product_sparse(&self.M[*j], &z); + hadamard_product(&acc, &mvp) + }); + + // Multiply by the coefficient of this step + let c_M_j_z: Vec<::Scalar> = vector_elem_product(&hadamard_output, self.c[idx]); + + vector_add(&r, &c_M_j_z) + }); // verify if comm_W is a commitment to W let res_comm: bool = U.comm_w == CE::::commit(ck, &W.w); @@ -282,8 +279,8 @@ impl CCSShape { c: vec![G::Scalar::ONE, -G::Scalar::ONE], m: r1cs.num_cons, n: r1cs.num_vars, - s: r1cs.num_cons.log_2() as usize, - s_prime: r1cs.num_vars.log_2() as usize, + s: r1cs.num_cons.log_2(), + s_prime: r1cs.num_vars.log_2(), } } @@ -305,7 +302,7 @@ impl CCSShape { } #[cfg(test)] - pub(crate) fn gen_test_ccs(z: &Vec) -> (CCSShape, CCSWitness, CCSInstance) { + pub(crate) fn gen_test_ccs(z: &[G::Scalar]) -> (CCSShape, CCSWitness, CCSInstance) { let one = G::Scalar::ONE; let A = vec![ (0, 1, one), diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 9e19e93c2..b080ac8ac 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -242,17 +242,17 @@ mod tests { // evaluate g(x) over x \in {0,1}^s let mut g_on_bhc = Fq::zero(); - for x in BooleanHypercube::new(ccs.s).into_iter() { + for x in BooleanHypercube::new(ccs.s) { g_on_bhc += g.evaluate(&x).unwrap(); } // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s let mut sum_Lj_on_bhc = Fq::zero(); let vec_L = lcccs_instance.compute_Ls(&z1); - for x in BooleanHypercube::new(ccs.s).into_iter() { - for j in 0..vec_L.len() { + for x in BooleanHypercube::new(ccs.s) { + for (j, coeff) in vec_L.iter().enumerate() { let gamma_j = gamma.pow([j as u64]); - sum_Lj_on_bhc += vec_L[j].evaluate(&x).unwrap() * gamma_j; + sum_Lj_on_bhc += coeff.evaluate(&x).unwrap() * gamma_j; } } @@ -269,7 +269,7 @@ mod tests { } #[test] - fn test_compute_sigmas_and_thetas() -> () { + fn test_compute_sigmas_and_thetas() { let z1 = CCSShape::::get_test_z(3); let z2 = CCSShape::::get_test_z(4); @@ -300,16 +300,16 @@ mod tests { { // evaluate g(x) over x \in {0,1}^s let mut g_on_bhc = Fq::zero(); - for x in BooleanHypercube::new(ccs.s).into_iter() { + for x in BooleanHypercube::new(ccs.s) { g_on_bhc += g.evaluate(&x).unwrap(); } // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s let mut sum_Lj_on_bhc = Fq::zero(); let vec_L = lcccs_instance.compute_Ls(&z1); - for x in BooleanHypercube::new(ccs.s).into_iter() { - for j in 0..vec_L.len() { + for x in BooleanHypercube::new(ccs.s) { + for (j, coeff) in vec_L.iter().enumerate() { let gamma_j = gamma.pow([j as u64]); - sum_Lj_on_bhc += vec_L[j].evaluate(&x).unwrap() * gamma_j; + sum_Lj_on_bhc += coeff.evaluate(&x).unwrap() * gamma_j; } } diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index e1168d358..d9ebfdfac 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -43,7 +43,7 @@ pub fn compute_sum_Mz( let bhc = BooleanHypercube::::new(z.get_num_vars()); for y in bhc.into_iter() { - let M_y = fix_variables(&M_mle, &y); + let M_y = fix_variables(M_mle, &y); // reverse y to match spartan/polynomial evaluate let y_rev: Vec = y.into_iter().rev().collect(); @@ -175,7 +175,7 @@ mod tests { let mut last_vars_fixed = A_mle.clone(); // this is equivalent to Espresso/hyperplonk's 'fix_last_variables' mehthod for bit in y.clone().iter().rev() { - last_vars_fixed.bound_poly_var_top(&bit) + last_vars_fixed.bound_poly_var_top(bit) } assert_eq!(last_vars_fixed.Z, row_i); @@ -183,7 +183,7 @@ mod tests { } #[test] - fn test_compute_sum_Mz_over_boolean_hypercube() -> () { + fn test_compute_sum_Mz_over_boolean_hypercube() { let z = CCSShape::::get_test_z(3); let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); diff --git a/src/lib.rs b/src/lib.rs index b24072643..8a5040128 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -9,6 +9,7 @@ )] #![allow(non_snake_case)] #![allow(clippy::type_complexity)] +#![allow(clippy::upper_case_acronyms)] #![forbid(unsafe_code)] // private modules diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 8c6f8dd9b..55d4e43ca 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -270,11 +270,11 @@ mod tests { assert_eq!(y, Fp::one()); let eval_list = eq_poly.evals(); - for i in 0..(2_usize).pow(3) { + for (i, &coeff) in eval_list.iter().enumerate().take((2_usize).pow(3)) { if i == 5 { - assert_eq!(eval_list[i], Fp::one()); + assert_eq!(coeff, Fp::one()); } else { - assert_eq!(eval_list[i], Fp::zero()); + assert_eq!(coeff, Fp::zero()); } } } diff --git a/src/utils.rs b/src/utils.rs index 478ad961b..969ccef80 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -141,10 +141,10 @@ pub fn vector_add(a: &Vec, b: &Vec) -> Vec { res } -pub fn vector_elem_product(a: &Vec, e: &F) -> Vec { +pub fn vector_elem_product(a: &Vec, e: F) -> Vec { let mut res = Vec::with_capacity(a.len()); - for i in 0..a.len() { - res.push(a[i] * e); + for &item in a { + res.push(item * e); } res @@ -161,10 +161,10 @@ pub fn matrix_vector_product(matrix: &Vec>, vector: &Vec(matrix: &Vec>, vector: &Vec( matrix: &SparseMatrix, - vector: &Vec, + vector: &[F], ) -> Vec { let mut res = vec![F::ZERO; matrix.n_rows()]; for &(row, col, value) in matrix.coeffs.iter() { @@ -258,12 +258,10 @@ pub fn random_zero_mle_list( } } - let list = multiplicands + multiplicands .into_iter() .map(|x| Arc::new(MultilinearPolynomial::new(x))) - .collect(); - - list + .collect() } pub(crate) fn log2(x: usize) -> u32 { @@ -301,7 +299,7 @@ mod tests { fn test_vector_elem_product_with() { let a = to_F_vec::(vec![1, 2, 3]); let e = F::from(2); - let res = vector_elem_product(&a, &e); + let res = vector_elem_product(&a, e); assert_eq!(res, to_F_vec::(vec![2, 4, 6])); } @@ -414,7 +412,7 @@ mod tests { // check that the A_mle evaluated over the boolean hypercube equals the matrix A_i_j values let bhc = BooleanHypercube::::new(A_mle.get_num_vars()); - let mut A_padded = A.clone(); + let mut A_padded = A; A_padded.pad(); for term in A_padded.coeffs.iter() { let (i, j, coeff) = term; From 6fa448fda4036f003f8eec2d38aaa639f34de33e Mon Sep 17 00:00:00 2001 From: arnaucube Date: Sun, 16 Jul 2023 17:24:35 +0200 Subject: [PATCH 093/100] Update bit_decompose def: - Move bit_decompose into hypercube.rs which is closer conceptually and only used there (except for a VirtualPolynomial test) - Remove unused bit_decompose imports from other files --- src/ccs/cccs.rs | 1 - src/ccs/lcccs.rs | 1 - src/ccs/mod.rs | 1 - src/ccs/multifolding.rs | 1 - src/ccs/util/mod.rs | 1 - src/ccs/util/virtual_poly.rs | 2 +- src/hypercube.rs | 12 +++++++++++- src/utils.rs | 11 ----------- 8 files changed, 12 insertions(+), 18 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index c73710b38..ba801cf33 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -1,7 +1,6 @@ use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; use crate::spartan::polynomial::MultilinearPolynomial; -use crate::utils::bit_decompose; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index 8b3720e4b..91abecdfa 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -4,7 +4,6 @@ use crate::ccs::util::compute_all_sum_Mz_evals; use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; use crate::spartan::polynomial::MultilinearPolynomial; -use crate::utils::bit_decompose; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 3249fc4ce..edb292d5c 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -7,7 +7,6 @@ use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; use crate::spartan::polynomial::MultilinearPolynomial; -use crate::utils::bit_decompose; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index b080ac8ac..5296f5296 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -6,7 +6,6 @@ use crate::ccs::util::compute_all_sum_Mz_evals; use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; use crate::spartan::polynomial::MultilinearPolynomial; -use crate::utils::bit_decompose; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index d9ebfdfac..9cc10533c 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -1,7 +1,6 @@ use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; use crate::spartan::polynomial::MultilinearPolynomial; -use crate::utils::bit_decompose; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, diff --git a/src/ccs/util/virtual_poly.rs b/src/ccs/util/virtual_poly.rs index 9a2873e0f..b632f8d9f 100644 --- a/src/ccs/util/virtual_poly.rs +++ b/src/ccs/util/virtual_poly.rs @@ -1,7 +1,6 @@ use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; use crate::spartan::polynomial::MultilinearPolynomial; -use crate::utils::bit_decompose; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, @@ -395,6 +394,7 @@ fn build_eq_x_r_helper(r: &[F], buf: &mut Vec) -> Result<(), N #[cfg(test)] mod test { use super::*; + use crate::hypercube::bit_decompose; use pasta_curves::Fp; use rand_core::OsRng; diff --git a/src/hypercube.rs b/src/hypercube.rs index bc13cde93..12a376bc3 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -2,7 +2,6 @@ #![allow(unused)] use std::marker::PhantomData; -use crate::utils::*; /// There's some overlap with polynomial.rs. use ff::PrimeField; use itertools::Itertools; @@ -48,6 +47,17 @@ impl Iterator for BooleanHypercube { } } +/// Decompose an integer into a binary vector in little endian. +pub fn bit_decompose(input: u64, num_var: usize) -> Vec { + let mut res = Vec::with_capacity(num_var); + let mut i = input; + for _ in 0..num_var { + res.push(i & 1 == 1); + i >>= 1; + } + res +} + #[cfg(test)] mod tests { use super::*; diff --git a/src/utils.rs b/src/utils.rs index 969ccef80..c1309492f 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -196,17 +196,6 @@ pub fn hadamard_product(a: &Vec, b: &Vec) -> Vec { res } -/// Decompose an integer into a binary vector in little endian. -pub fn bit_decompose(input: u64, num_var: usize) -> Vec { - let mut res = Vec::with_capacity(num_var); - let mut i = input; - for _ in 0..num_var { - res.push(i & 1 == 1); - i >>= 1; - } - res -} - /// Sample a random list of multilinear polynomials. /// Returns /// - the list of polynomials, From a23c6ab7db8d0af664204d75adf310b583bc9e80 Mon Sep 17 00:00:00 2001 From: oskarth Date: Thu, 20 Jul 2023 15:56:16 +0800 Subject: [PATCH 094/100] refactor(virtual_poly): Remove unnecessary default from PhantomData (#39) Fixes additional clippy errors I see when I upgraded my Rust toolchain (had some issues with clippy and Homebrew) ``` error: use of `default` to create a unit struct --> src/ccs/util/virtual_poly.rs:120:29 | 120 | phantom: PhantomData::default(), | ^^^^^^^^^^^ help: remove this call to `default` | = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#default_constructed_unit_structs note: the lint level is defined here ``` --- src/ccs/util/virtual_poly.rs | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/ccs/util/virtual_poly.rs b/src/ccs/util/virtual_poly.rs index b632f8d9f..5ecf94854 100644 --- a/src/ccs/util/virtual_poly.rs +++ b/src/ccs/util/virtual_poly.rs @@ -116,7 +116,7 @@ impl VirtualPolynomial { aux_info: VPAuxInfo { max_degree: 0, num_variables, - phantom: PhantomData::default(), + phantom: PhantomData, }, products: Vec::new(), flattened_ml_extensions: Vec::new(), @@ -135,7 +135,7 @@ impl VirtualPolynomial { // The max degree is the max degree of any individual variable max_degree: 1, num_variables: mle.get_num_vars(), - phantom: PhantomData::default(), + phantom: PhantomData, }, // here `0` points to the first polynomial of `flattened_ml_extensions` products: vec![(coefficient, vec![0])], From 9a508b2b742cf567cbf6b8daaf11c4e7c032e5be Mon Sep 17 00:00:00 2001 From: oskarth Date: Thu, 20 Jul 2023 16:56:41 +0800 Subject: [PATCH 095/100] refactor: Make tests generic w.r.t. curve/group (#40) Addresses https://github.com/privacy-scaling-explorations/Nova/issues/34 Should cover all files --- src/ccs/cccs.rs | 76 +++++++++++++++---------- src/ccs/lcccs.rs | 58 +++++++++++-------- src/ccs/mod.rs | 26 +++++---- src/ccs/multifolding.rs | 81 +++++++++++++++------------ src/ccs/util/mod.rs | 109 ++++++++++++++++++++++-------------- src/hypercube.rs | 13 +++-- src/spartan/polynomial.rs | 114 +++++++++++++++++++++++--------------- 7 files changed, 287 insertions(+), 190 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index ba801cf33..68ffb520d 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -157,17 +157,14 @@ mod tests { vecs.iter().map(Vec::as_slice).collect() } - /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the - /// hypercube, but to not-zero outside the hypercube. - #[test] - fn test_compute_q() { + fn test_compute_q_with() { let mut rng = OsRng; - let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); + let z = CCSShape::::get_test_z(3); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); // generate ck - let ck = CCSShape::::commitment_key(&ccs_shape); + let ck = CCSShape::::commitment_key(&ccs_shape); // ensure CCS is satisfied ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); @@ -178,30 +175,33 @@ mod tests { // Evaluate inside the hypercube BooleanHypercube::new(ccs_shape.s).for_each(|x| { - assert_eq!(Fq::zero(), q.evaluate(&x).unwrap()); + assert_eq!(G::Scalar::ZERO, q.evaluate(&x).unwrap()); }); // Evaluate outside the hypercube - let beta: Vec = (0..ccs_shape.s).map(|_| Fq::random(&mut rng)).collect(); - assert_ne!(Fq::zero(), q.evaluate(&beta).unwrap()); + let beta: Vec = (0..ccs_shape.s) + .map(|_| G::Scalar::random(&mut rng)) + .collect(); + assert_ne!(G::Scalar::ZERO, q.evaluate(&beta).unwrap()); } - #[test] - fn test_compute_Q() { + fn test_compute_Q_with() { let mut rng = OsRng; - let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); + let z = CCSShape::::get_test_z(3); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); // generate ck - let ck = CCSShape::::commitment_key(&ccs_shape); + let ck = CCSShape::::commitment_key(&ccs_shape); // ensure CCS is satisfied ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); // Generate CCCS artifacts let cccs_shape = ccs_shape.to_cccs_shape(); - let beta: Vec = (0..ccs_shape.s).map(|_| Fq::random(&mut rng)).collect(); + let beta: Vec = (0..ccs_shape.s) + .map(|_| G::Scalar::random(&mut rng)) + .collect(); // Compute Q(x) = eq(beta, x) * q(x). let Q = cccs_shape @@ -222,22 +222,18 @@ mod tests { // Now sum Q(x) evaluations in the hypercube and expect it to be 0 let r = BooleanHypercube::new(ccs_shape.s) .map(|x| Q.evaluate(&x).unwrap()) - .fold(Fq::zero(), |acc, result| acc + result); - assert_eq!(r, Fq::zero()); + .fold(G::Scalar::ZERO, |acc, result| acc + result); + assert_eq!(r, G::Scalar::ZERO); } - /// The polynomial G(x) (see above) interpolates q(x) inside the hypercube. - /// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point. - /// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside - #[test] - fn test_Q_against_q() { + fn test_Q_against_q_with() { let mut rng = OsRng; - let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); + let z = CCSShape::::get_test_z(3); + let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); // generate ck - let ck = CCSShape::::commitment_key(&ccs_shape); + let ck = CCSShape::::commitment_key(&ccs_shape); // ensure CCS is satisfied ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); @@ -257,12 +253,14 @@ mod tests { // Get G(d) by summing over Q_d(x) over the hypercube let G_at_d = BooleanHypercube::new(ccs_shape.s) .map(|x| Q_at_d.evaluate(&x).unwrap()) - .fold(Fq::zero(), |acc, result| acc + result); + .fold(G::Scalar::ZERO, |acc, result| acc + result); assert_eq!(G_at_d, q.evaluate(&d).unwrap()); } // Now test that they should disagree outside of the hypercube - let r: Vec = (0..ccs_shape.s).map(|_| Fq::random(&mut rng)).collect(); + let r: Vec = (0..ccs_shape.s) + .map(|_| G::Scalar::random(&mut rng)) + .collect(); let Q_at_r = cccs_shape .compute_Q(&z, &r) .expect("Computing Q_at_r shouldn't fail"); @@ -270,7 +268,27 @@ mod tests { // Get G(d) by summing over Q_d(x) over the hypercube let G_at_r = BooleanHypercube::new(ccs_shape.s) .map(|x| Q_at_r.evaluate(&x).unwrap()) - .fold(Fq::zero(), |acc, result| acc + result); + .fold(G::Scalar::ZERO, |acc, result| acc + result); assert_ne!(G_at_r, q.evaluate(&r).unwrap()); } + + /// Do some sanity checks on q(x). It's a multivariable polynomial and it should evaluate to zero inside the + /// hypercube, but to not-zero outside the hypercube. + #[test] + fn test_compute_q() { + test_compute_q_with::(); + } + + #[test] + fn test_compute_Q() { + test_compute_Q_with::(); + } + + /// The polynomial G(x) (see above) interpolates q(x) inside the hypercube. + /// Summing Q(x) over the hypercube is equivalent to evaluating G(x) at some point. + /// This test makes sure that G(x) agrees with q(x) inside the hypercube, but not outside + #[test] + fn test_Q_against_q() { + test_Q_against_q_with::(); + } } diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index 91abecdfa..995f3c984 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -102,36 +102,33 @@ mod tests { use super::*; - #[test] - fn satisfied_ccs_is_satisfied_lcccs() { + fn satisfied_ccs_is_satisfied_lcccs_with() { // Gen test vectors & artifacts - let z = CCSShape::::get_test_z(3); - let (ccs, witness, instance) = CCSShape::::gen_test_ccs(&z); + let z = CCSShape::::get_test_z(3); + let (ccs, witness, instance) = CCSShape::::gen_test_ccs(&z); let ck = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &instance, &witness).is_ok()); - // Wrong z so that the relation does not hold - let mut bad_z = z.clone(); - bad_z[3] = Fq::ZERO; - // LCCCS with the correct z should pass let (lcccs, _) = ccs.to_lcccs(&mut OsRng, &ck, &z); assert!(lcccs.is_sat(&ck, &witness).is_ok()); + // Wrong z so that the relation does not hold + let mut bad_z = z; + bad_z[3] = G::Scalar::ZERO; + // LCCCS with the wrong z should not pass `is_sat`. // LCCCS with the correct z should pass let (lcccs, _) = ccs.to_lcccs(&mut OsRng, &ck, &bad_z); assert!(lcccs.is_sat(&ck, &witness).is_err()); } - #[test] - /// Test linearized CCCS v_j against the L_j(x) - fn test_lcccs_v_j() { + fn test_lcccs_v_j_with() { let mut rng = OsRng; // Gen test vectors & artifacts - let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + let z = CCSShape::::get_test_z(3); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); let ck = ccs.commitment_key(); // Get LCCCS @@ -143,29 +140,25 @@ mod tests { for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { let sum_L_j_x = BooleanHypercube::new(ccs.s) .map(|y| L_j_x.evaluate(&y).unwrap()) - .fold(Fq::ZERO, |acc, result| acc + result); + .fold(G::Scalar::ZERO, |acc, result| acc + result); assert_eq!(v_i, sum_L_j_x); } } - /// Given a bad z, check that the v_j should not match with the L_j(x) - #[test] - fn test_bad_v_j() { + fn test_bad_v_j_with() { let mut rng = OsRng; // Gen test vectors & artifacts - let z = CCSShape::::get_test_z(3); - let (ccs, witness, instance) = CCSShape::::gen_test_ccs(&z); + let z = CCSShape::::get_test_z(3); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); let ck = ccs.commitment_key(); // Mutate z so that the relation does not hold let mut bad_z = z.clone(); - bad_z[3] = Fq::ZERO; + bad_z[3] = G::Scalar::ZERO; - // Compute v_j with the right z + // Get LCCCS let (lcccs, _) = ccs.to_lcccs(&mut rng, &ck, &z); - // Assert LCCCS is satisfied with the original Z - assert!(lcccs.is_sat(&ck, &witness).is_ok()); // Bad compute L_j(x) with the bad z let vec_L_j_x = lcccs.compute_Ls(&bad_z); @@ -179,7 +172,7 @@ mod tests { for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { let sum_L_j_x = BooleanHypercube::new(ccs.s) .map(|y| L_j_x.evaluate(&y).unwrap()) - .fold(Fq::ZERO, |acc, result| acc + result); + .fold(G::Scalar::ZERO, |acc, result| acc + result); if v_i != sum_L_j_x { satisfied = false; } @@ -187,4 +180,21 @@ mod tests { assert!(!satisfied); } + + #[test] + fn satisfied_ccs_is_satisfied_lcccs() { + satisfied_ccs_is_satisfied_lcccs_with::(); + } + + #[test] + /// Test linearized CCCS v_j against the L_j(x) + fn test_lcccs_v_j() { + test_lcccs_v_j_with::(); + } + + /// Given a bad z, check that the v_j should not match with the L_j(x) + #[test] + fn test_bad_v_j() { + test_bad_v_j_with::(); + } } diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index edb292d5c..5b638aa9b 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -399,16 +399,14 @@ pub mod test { use ff::{Field, PrimeField}; use rand::rngs::OsRng; - type S = pasta_curves::pallas::Scalar; - type G = pasta_curves::pallas::Point; + use pasta_curves::Ep; - #[test] - fn test_tiny_ccs() { + fn test_tiny_ccs_with() { // 1. Generate valid R1CS Shape // 2. Convert to CCS // 3. Test that it is satisfiable - let one = S::one(); + let one = G::Scalar::ONE; let (num_cons, num_vars, num_io, A, B, C) = { let num_cons = 4; let num_vars = 4; @@ -425,9 +423,9 @@ pub mod test { // constraint and a column for every entry in z = (vars, u, inputs) // An R1CS instance is satisfiable iff: // Az \circ Bz = u \cdot Cz + E, where z = (vars, 1, inputs) - let mut A: Vec<(usize, usize, S)> = Vec::new(); - let mut B: Vec<(usize, usize, S)> = Vec::new(); - let mut C: Vec<(usize, usize, S)> = Vec::new(); + let mut A: Vec<(usize, usize, G::Scalar)> = Vec::new(); + let mut B: Vec<(usize, usize, G::Scalar)> = Vec::new(); + let mut C: Vec<(usize, usize, G::Scalar)> = Vec::new(); // constraint 0 entries in (A,B,C) // `I0 * I0 - Z0 = 0` @@ -470,12 +468,11 @@ pub mod test { // generate generators and ro constants let _ck = S.commitment_key(); - let _ro_consts = - <::RO as ROTrait<::Base, ::Scalar>>::Constants::new(); + let _ro_consts = >::Constants::new(); // 3. Test that CCS is satisfiable let _rand_inst_witness_generator = - |ck: &CommitmentKey, I: &S| -> (S, CCSInstance, CCSWitness) { + |ck: &CommitmentKey, I: &G::Scalar| -> (G::Scalar, CCSInstance, CCSWitness) { let i0 = *I; // compute a satisfying (vars, X) tuple @@ -486,7 +483,7 @@ pub mod test { let i1 = z2 + one + one + one + one + one; // constraint 3 // store the witness and IO for the instance - let W = vec![z0, z1, z2, S::zero()]; + let W = vec![z0, z1, z2, G::Scalar::ZERO]; let X = vec![i0, i1]; (i1, W, X) }; @@ -506,4 +503,9 @@ pub mod test { (O, U, ccs_w) }; } + + #[test] + fn test_tiny_ccs() { + test_tiny_ccs_with::(); + } } diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 5296f5296..ed6c39d3b 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -205,48 +205,48 @@ pub fn eq_eval(x: &[F], y: &[F]) -> F { #[cfg(test)] mod tests { use super::*; - use crate::ccs::util::virtual_poly::build_eq_x_r; + use crate::ccs::{test, util::virtual_poly::build_eq_x_r}; use pasta_curves::{Ep, Fq}; use rand_core::OsRng; // NIMFS: Non Interactive Multifolding Scheme - type NIMFS = Multifolding; + type NIMFS = Multifolding; - #[test] - fn test_compute_g() { - let z1 = CCSShape::::get_test_z(3); - let z2 = CCSShape::::get_test_z(4); + fn test_compute_g_with() { + let z1 = CCSShape::::get_test_z(3); + let z2 = CCSShape::::get_test_z(4); - let (_, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z2); - let (ccs, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z1); + let (_, ccs_witness_1, ccs_instance_1) = CCSShape::::gen_test_ccs(&z2); + let (ccs, ccs_witness_2, ccs_instance_2) = CCSShape::::gen_test_ccs(&z1); let ck = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); let mut rng = OsRng; - let gamma: Fq = Fq::random(&mut rng); - let beta: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); + let gamma: G::Scalar = G::Scalar::random(&mut rng); + let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &ck, &z1); let cccs_instance = ccs.to_cccs_shape(); - let mut sum_v_j_gamma = Fq::zero(); + let mut sum_v_j_gamma = G::Scalar::ZERO; for j in 0..lcccs_instance.v.len() { let gamma_j = gamma.pow([j as u64]); sum_v_j_gamma += lcccs_instance.v[j] * gamma_j; } // Compute g(x) with that r_x + let g = NIMFS::compute_g(&lcccs_instance, &cccs_instance, &z1, &z2, gamma, &beta); // evaluate g(x) over x \in {0,1}^s - let mut g_on_bhc = Fq::zero(); + let mut g_on_bhc = G::Scalar::ZERO; for x in BooleanHypercube::new(ccs.s) { g_on_bhc += g.evaluate(&x).unwrap(); } // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s - let mut sum_Lj_on_bhc = Fq::zero(); + let mut sum_Lj_on_bhc = G::Scalar::ZERO; let vec_L = lcccs_instance.compute_Ls(&z1); for x in BooleanHypercube::new(ccs.s) { for (j, coeff) in vec_L.iter().enumerate() { @@ -256,7 +256,7 @@ mod tests { } // Q(x) over bhc is assumed to be zero, as checked in the test 'test_compute_Q' - assert_ne!(g_on_bhc, Fq::zero()); + assert_ne!(g_on_bhc, G::Scalar::ZERO); // evaluating g(x) over the boolean hypercube should give the same result as evaluating the // sum of gamma^j * Lj(x) over the boolean hypercube @@ -267,22 +267,21 @@ mod tests { assert_eq!(g_on_bhc, sum_v_j_gamma); } - #[test] - fn test_compute_sigmas_and_thetas() { - let z1 = CCSShape::::get_test_z(3); - let z2 = CCSShape::::get_test_z(4); + fn test_compute_sigmas_and_thetas_with() { + let z1 = CCSShape::::get_test_z(3); + let z2 = CCSShape::::get_test_z(4); - let (_, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z2); - let (ccs, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z1); - let ck = ccs.commitment_key(); + let (_, ccs_witness_1, ccs_instance_1) = CCSShape::::gen_test_ccs(&z2); + let (ccs, ccs_witness_2, ccs_instance_2) = CCSShape::::gen_test_ccs(&z1); + let ck: CommitmentKey = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); let mut rng = OsRng; - let gamma: Fq = Fq::random(&mut rng); - let beta: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); - let r_x_prime: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); + let gamma: G::Scalar = G::Scalar::random(&mut rng); + let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); + let r_x_prime: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); // Initialize a multifolding object let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &ck, &z1); @@ -298,12 +297,12 @@ mod tests { // Assert `g` is correctly computed here. { // evaluate g(x) over x \in {0,1}^s - let mut g_on_bhc = Fq::zero(); + let mut g_on_bhc = G::Scalar::ZERO; for x in BooleanHypercube::new(ccs.s) { g_on_bhc += g.evaluate(&x).unwrap(); } // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s - let mut sum_Lj_on_bhc = Fq::zero(); + let mut sum_Lj_on_bhc = G::Scalar::ZERO; let vec_L = lcccs_instance.compute_Ls(&z1); for x in BooleanHypercube::new(ccs.s) { for (j, coeff) in vec_L.iter().enumerate() { @@ -339,20 +338,24 @@ mod tests { } #[test] - fn test_lcccs_fold() { - let z1 = CCSShape::::get_test_z(3); - let z2 = CCSShape::::get_test_z(4); + fn test_compute_g() { + test_compute_g_with::(); + } + + fn test_lccs_fold_with() { + let z1 = CCSShape::::get_test_z(3); + let z2 = CCSShape::::get_test_z(4); // ccs stays the same regardless of z1 or z2 - let (ccs, ccs_witness_1, ccs_instance_1) = CCSShape::gen_test_ccs(&z1); - let (_, ccs_witness_2, ccs_instance_2) = CCSShape::gen_test_ccs(&z2); - let ck = ccs.commitment_key(); + let (ccs, ccs_witness_1, ccs_instance_1) = CCSShape::::gen_test_ccs(&z1); + let (_, ccs_witness_2, ccs_instance_2) = CCSShape::::gen_test_ccs(&z2); + let ck: CommitmentKey = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); let mut rng = OsRng; - let r_x_prime: Vec = (0..ccs.s).map(|_| Fq::random(&mut rng)).collect(); + let r_x_prime: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); // Generate a new multifolding instance let mut nimfs = NIMFS::new(ccs.clone()); @@ -370,7 +373,7 @@ mod tests { .is_sat(&ck, &ccs_witness_2, &cccs_instance) .is_ok()); - let rho = Fq::random(&mut rng); + let rho = G::Scalar::random(&mut rng); let folded = nimfs.fold( &lcccs_instance, @@ -386,4 +389,14 @@ mod tests { // check lcccs relation assert!(folded.is_sat(&ck, &w_folded).is_ok()); } + + #[test] + fn test_compute_sigmas_and_thetas() { + test_compute_sigmas_and_thetas_with::() + } + + #[test] + fn test_lcccs_fold() { + test_lccs_fold_with::() + } } diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index 9cc10533c..cf8831b41 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -117,40 +117,39 @@ mod tests { use pasta_curves::{Ep, Fq}; use rand_core::OsRng; - #[test] - fn test_fix_variables() { - let A = SparseMatrix::::with_coeffs( + fn test_fix_variables_with() { + let A = SparseMatrix::::with_coeffs( 4, 4, vec![ - (0, 0, Fq::from(2u64)), - (0, 1, Fq::from(3u64)), - (0, 2, Fq::from(4u64)), - (0, 3, Fq::from(4u64)), - (1, 0, Fq::from(4u64)), - (1, 1, Fq::from(11u64)), - (1, 2, Fq::from(14u64)), - (1, 3, Fq::from(14u64)), - (2, 0, Fq::from(2u64)), - (2, 1, Fq::from(8u64)), - (2, 2, Fq::from(17u64)), - (2, 3, Fq::from(17u64)), - (3, 0, Fq::from(420u64)), - (3, 1, Fq::from(4u64)), - (3, 2, Fq::from(2u64)), - (3, 3, Fq::ZERO), + (0, 0, F::from(2u64)), + (0, 1, F::from(3u64)), + (0, 2, F::from(4u64)), + (0, 3, F::from(4u64)), + (1, 0, F::from(4u64)), + (1, 1, F::from(11u64)), + (1, 2, F::from(14u64)), + (1, 3, F::from(14u64)), + (2, 0, F::from(2u64)), + (2, 1, F::from(8u64)), + (2, 2, F::from(17u64)), + (2, 3, F::from(17u64)), + (3, 0, F::from(420u64)), + (3, 1, F::from(4u64)), + (3, 2, F::from(2u64)), + (3, 3, F::ZERO), ], ); let A_mle = A.to_mle(); - let bhc = BooleanHypercube::::new(2); + let bhc = BooleanHypercube::::new(2); for (i, y) in bhc.enumerate() { let A_mle_op = fix_variables(&A_mle, &y); // Check that fixing first variables pins down a column // i.e. fixing x to 0 will return the first column // fixing x to 1 will return the second column etc. - let column_i: Vec = A + let column_i: Vec = A .clone() .coeffs() .iter() @@ -163,7 +162,7 @@ mod tests { // // Now check that fixing last variables pins down a row // // i.e. fixing y to 0 will return the first row // // fixing y to 1 will return the second row etc. - let row_i: Vec = A + let row_i: Vec = A .clone() .coeffs() .iter() @@ -181,50 +180,78 @@ mod tests { } } - #[test] - fn test_compute_sum_Mz_over_boolean_hypercube() { - let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + fn test_compute_sum_Mz_over_boolean_hypercube_with() { + let z = CCSShape::::get_test_z(3); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); // Generate other artifacts - let ck = CCSShape::::commitment_key(&ccs); + let ck = CCSShape::::commitment_key(&ccs); let (_, _, cccs) = ccs.to_cccs(&mut OsRng, &ck, &z); let z_mle = dense_vec_to_mle(ccs.s_prime, &z); // check that evaluating over all the values x over the boolean hypercube, the result of // the next for loop is equal to 0 - let mut r = Fq::zero(); + let mut r = G::Scalar::ZERO; let bch = BooleanHypercube::new(ccs.s); for x in bch.into_iter() { for i in 0..ccs.q { - let mut Sj_prod = Fq::one(); + let mut Sj_prod = G::Scalar::ONE; for j in ccs.S[i].clone() { - let sum_Mz: MultilinearPolynomial = compute_sum_Mz::(&cccs.M_MLE[j], &z_mle); + let sum_Mz: MultilinearPolynomial = + compute_sum_Mz::(&cccs.M_MLE[j], &z_mle); let sum_Mz_x = sum_Mz.evaluate(&x); Sj_prod *= sum_Mz_x; } r += Sj_prod * ccs.c[i]; } - assert_eq!(r, Fq::ZERO); + assert_eq!(r, G::Scalar::ZERO); } } - #[test] - fn test_compute_all_sum_Mz_evals() { - let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + fn test_compute_all_sum_Mz_evals_with() { + let z = CCSShape::::get_test_z(3); + let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); // Generate other artifacts - let ck = CCSShape::::commitment_key(&ccs); + let ck = CCSShape::::commitment_key(&ccs); let (_, _, cccs) = ccs.to_cccs(&mut OsRng, &ck, &z); - let mut r = vec![Fq::ONE, Fq::ZERO]; - let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); - assert_eq!(res, vec![Fq::from(9u64), Fq::from(3u64), Fq::from(27u64)]); + let mut r = vec![G::Scalar::ONE, G::Scalar::ZERO]; + let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); + assert_eq!( + res, + vec![ + G::Scalar::from(9u64), + G::Scalar::from(3u64), + G::Scalar::from(27u64) + ] + ); r.reverse(); - let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); - assert_eq!(res, vec![Fq::from(30u64), Fq::from(1u64), Fq::from(30u64)]) + let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); + assert_eq!( + res, + vec![ + G::Scalar::from(30u64), + G::Scalar::from(1u64), + G::Scalar::from(30u64) + ] + ) + } + + #[test] + fn test_fix_variables() { + test_fix_variables_with::(); + } + + #[test] + fn test_compute_sum_Mz_over_boolean_hypercube() { + test_compute_sum_Mz_over_boolean_hypercube_with::(); + } + + #[test] + fn test_compute_all_sum_Mz_evals() { + test_compute_all_sum_Mz_evals_with::(); } } diff --git a/src/hypercube.rs b/src/hypercube.rs index 12a376bc3..66a925e1c 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -64,13 +64,16 @@ mod tests { use ff::Field; use pasta_curves::Fq; - #[test] - fn test_evaluate() { - // Declare the coefficients in the order 1, x, y, xy, z, xz, yz, xyz. - let poly = BooleanHypercube::::new(3); + fn test_evaluate_with() { + let poly = BooleanHypercube::::new(3); let point = 7usize; // So, f(1, 1, 1) = 5. - assert_eq!(poly.evaluate_at(point), vec![Fq::ONE, Fq::ONE, Fq::ONE]); + assert_eq!(poly.evaluate_at(point), vec![F::ONE, F::ONE, F::ONE]); + } + + #[test] + fn test_evaluate() { + test_evaluate_with::(); } } diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 55d4e43ca..8dd1dde5d 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -253,101 +253,125 @@ mod tests { use super::*; use pasta_curves::Fp; - fn make_mlp(len: usize, value: Fp) -> MultilinearPolynomial { + fn make_mlp(len: usize, value: F) -> MultilinearPolynomial { MultilinearPolynomial { num_vars: len.count_ones() as usize, Z: vec![value; len], } } - #[test] - fn test_eq_polynomial() { - let eq_poly = EqPolynomial::::new(vec![Fp::one(), Fp::zero(), Fp::one()]); - let y = eq_poly.evaluate(vec![Fp::one(), Fp::one(), Fp::one()].as_slice()); - assert_eq!(y, Fp::zero()); + fn test_eq_polynomial_with() { + let eq_poly = EqPolynomial::::new(vec![F::ONE, F::ZERO, F::ONE]); + let y = eq_poly.evaluate(vec![F::ONE, F::ONE, F::ONE].as_slice()); + assert_eq!(y, F::ZERO); - let y = eq_poly.evaluate(vec![Fp::one(), Fp::zero(), Fp::one()].as_slice()); - assert_eq!(y, Fp::one()); + let y = eq_poly.evaluate(vec![F::ONE, F::ZERO, F::ONE].as_slice()); + assert_eq!(y, F::ONE); let eval_list = eq_poly.evals(); for (i, &coeff) in eval_list.iter().enumerate().take((2_usize).pow(3)) { if i == 5 { - assert_eq!(coeff, Fp::one()); + assert_eq!(coeff, F::ONE); } else { - assert_eq!(coeff, Fp::zero()); + assert_eq!(coeff, F::ZERO); } } } - #[test] - fn test_multilinear_polynomial() { + fn test_multilinear_polynomial_with() { // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, 0, 1, 0, 2]. - let TWO = Fp::from(2); + let TWO = F::from(2); let Z = vec![ - Fp::zero(), - Fp::zero(), - Fp::zero(), - Fp::one(), - Fp::zero(), - Fp::one(), - Fp::zero(), + F::ZERO, + F::ZERO, + F::ZERO, + F::ONE, + F::ZERO, + F::ONE, + F::ZERO, TWO, ]; - let m_poly = MultilinearPolynomial::::new(Z.clone()); + let m_poly = MultilinearPolynomial::::new(Z.clone()); assert_eq!(m_poly.get_num_vars(), 3); - let x = vec![Fp::one(), Fp::one(), Fp::one()]; + let x = vec![F::ONE, F::ONE, F::ONE]; assert_eq!(m_poly.evaluate(x.as_slice()), TWO); - let y = MultilinearPolynomial::::evaluate_with(Z.as_slice(), x.as_slice()); + let y = MultilinearPolynomial::::evaluate_with(Z.as_slice(), x.as_slice()); assert_eq!(y, TWO); } - #[test] - fn test_sparse_polynomial() { + fn test_sparse_polynomial_with() { // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, 0, 1, 0, 2]. - let TWO = Fp::from(2); - let Z = vec![(3, Fp::one()), (5, Fp::one()), (7, TWO)]; - let m_poly = SparsePolynomial::::new(3, Z); + let TWO = F::from(2); + let Z = vec![(3, F::ONE), (5, F::ONE), (7, TWO)]; + let m_poly = SparsePolynomial::::new(3, Z); - let x = vec![Fp::one(), Fp::one(), Fp::one()]; + let x = vec![F::ONE, F::ONE, F::ONE]; assert_eq!(m_poly.evaluate(x.as_slice()), TWO); - let x = vec![Fp::one(), Fp::zero(), Fp::one()]; - assert_eq!(m_poly.evaluate(x.as_slice()), Fp::one()); + let x = vec![F::ONE, F::ZERO, F::ONE]; + assert_eq!(m_poly.evaluate(x.as_slice()), F::ONE); } #[test] - fn test_mlp_add() { - let mlp1 = make_mlp(4, Fp::from(3)); - let mlp2 = make_mlp(4, Fp::from(7)); + fn test_eq_polynomial() { + test_eq_polynomial_with::(); + } + + #[test] + fn test_multilinear_polynomial() { + test_multilinear_polynomial_with::(); + } + + #[test] + fn test_sparse_polynomial() { + test_sparse_polynomial_with::(); + } + + fn test_mlp_add_with() { + let mlp1 = make_mlp(4, F::from(3)); + let mlp2 = make_mlp(4, F::from(7)); let mlp3 = mlp1.add(mlp2).unwrap(); - assert_eq!(mlp3.Z, vec![Fp::from(10); 4]); + assert_eq!(mlp3.Z, vec![F::from(10); 4]); } - #[test] - fn test_mlp_scalar_mul() { - let mlp = make_mlp(4, Fp::from(3)); + fn test_mlp_scalar_mul_with() { + let mlp = make_mlp(4, F::from(3)); - let mlp2 = mlp.scalar_mul(&Fp::from(2)); + let mlp2 = mlp.scalar_mul(&F::from(2)); - assert_eq!(mlp2.Z, vec![Fp::from(6); 4]); + assert_eq!(mlp2.Z, vec![F::from(6); 4]); } - #[test] - fn test_mlp_mul() { - let mlp1 = make_mlp(4, Fp::from(3)); - let mlp2 = make_mlp(4, Fp::from(7)); + fn test_mlp_mul_with() { + let mlp1 = make_mlp(4, F::from(3)); + let mlp2 = make_mlp(4, F::from(7)); let mlp3 = mlp1.mul(mlp2).unwrap(); - assert_eq!(mlp3.Z, vec![Fp::from(21); 4]); + assert_eq!(mlp3.Z, vec![F::from(21); 4]); + } + + #[test] + fn test_mlp_add() { + test_mlp_add_with::(); + } + + #[test] + fn test_mlp_scalar_mul() { + test_mlp_scalar_mul_with::(); + } + + #[test] + fn test_mlp_mul() { + test_mlp_mul_with::(); } } From 1119702976e1476b06e69cd52fa25adbb7ea594a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20P=C3=A9rez?= <37264926+CPerezz@users.noreply.github.com> Date: Mon, 31 Jul 2023 18:44:17 +0200 Subject: [PATCH 096/100] NIMFS-centric API refactor for multifolding impl (#41) This is a proposal for the new API of the Hypernova/multifolding current impl. The idea is that the API is [NIMFS](https://github.com/privacy-scaling-explorations/Nova/blob/change%2FNIMFS_centric_API/src/ccs/multifolding.rs#L39-L44)-centric. That means, the end user only needs to deal for now with `CCS`(this can be prevented) and the `NIMFS` object. The rest of interactions with `CCCS` & `LCCCS` have been "masked" or at the very least, is not necessary to import these structs to have full functionallity. The current workflow that this API brings can be clearly seen here: ```rust let ccs = CCS::::from_r1cs(r1cs_shape); let mles = ccs.matrix_mles(); let mut nimfs = NIMFS::init( &mut rng, ccs, mles, // Note we constructed z on the fly with the previously-used witness. vec![ G::Scalar::ONE, G::Scalar::from(3u64), G::Scalar::from(35u64), ], ); // Now, the NIMFS should satisfy correctly as we have inputed valid starting inpuits for the first LCCCS contained instance: assert!(nimfs.is_sat().is_ok()); // Now let's create a valid CCCS instance and fold it: let valid_cccs = nimfs.gen_cccs(vec![ G::Scalar::ONE, G::Scalar::from(2u64), G::Scalar::from(15u64), ]); nimfs.fold(&mut rng, valid_cccs); // Since the instance was correct, the NIMFS should still be satisfied. assert!(nimfs.is_sat().is_ok()); // Now let's create am invalid CCCS instance and fold it: let invalid_cccs = nimfs.gen_cccs(vec![ G::Scalar::ONE, G::Scalar::from(5u64), G::Scalar::from(55u64), ]); nimfs.fold(&mut rng, invalid_cccs); // Since the instance was wrong, the NIMFS should not be satisfied correctly. assert!(nimfs.is_sat().is_err()); ``` This is part of a test, located in https://github.com/privacy-scaling-explorations/Nova/blob/change%2FNIMFS_centric_API/tests/nimfs.rs The idea is that the user needs to type as less as possible in order to get a fold done. --- src/ccs/cccs.rs | 153 ++++++++++--------- src/ccs/lcccs.rs | 115 ++++++++------- src/ccs/mod.rs | 116 ++++----------- src/ccs/multifolding.rs | 302 +++++++++++++++++++------------------- src/ccs/util/mod.rs | 31 ++-- src/lib.rs | 9 +- src/spartan/polynomial.rs | 1 - src/utils.rs | 8 +- tests/nimfs.rs | 106 +++++++++++++ 9 files changed, 450 insertions(+), 391 deletions(-) create mode 100644 tests/nimfs.rs diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 68ffb520d..c9e0bbeeb 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -28,35 +28,43 @@ use std::sync::Arc; use super::util::compute_sum_Mz; use super::util::virtual_poly::VirtualPolynomial; -use super::{CCSShape, CCSWitness}; +use super::CCS; /// A type that holds the shape of a Committed CCS (CCCS) instance #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] -pub struct CCCSShape { - // Sequence of sparse MLE polynomials in s+s' variables M_MLE1, ..., M_MLEt - pub(crate) M_MLE: Vec>, - - pub(crate) ccs: CCSShape, +pub struct CCCS { + // The `z` vector used as input for this instance. + pub(crate) z: Vec, + // Commitment to the witness of `z`. + pub(crate) w_comm: Commitment, } -/// A type that holds a CCCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] -#[serde(bound = "")] -pub struct CCCSInstance { - // Commitment to a multilinear polynomial in s' - 1 variables - pub(crate) C: Commitment, +impl CCCS { + /// Generates a new CCCS given a reference to it's original CCS repr and it's public and private inputs. + pub(crate) fn new( + ccs: &CCS, + ccs_matrix_mle: &[MultilinearPolynomial], + z: Vec, + ck: &CommitmentKey, + ) -> Self { + let w_comm = CE::::commit(ck, &z[(1 + ccs.l)..]); - // $x in F^l$ - pub(crate) x: Vec, -} + Self { + z: z.to_vec(), + w_comm, + } + } -impl CCCSShape { - // Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) - // polynomial over x - pub fn compute_q(&self, z: &Vec) -> Result, NovaError> { - let z_mle = dense_vec_to_mle::(self.ccs.s_prime, z); - if z_mle.get_num_vars() != self.ccs.s_prime { + /// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) + /// polynomial over x + pub(crate) fn compute_q( + &self, + ccs: &CCS, + ccs_mles: &[MultilinearPolynomial], + ) -> Result, NovaError> { + let z_mle = dense_vec_to_mle::(ccs.s_prime, &self.z); + if z_mle.get_num_vars() != ccs.s_prime { // this check if redundant if dense_vec_to_mle is correct return Err(NovaError::VpArith); } @@ -64,11 +72,11 @@ impl CCCSShape { // Using `fold` requires to not have results inside. So we unwrap for now but // a better approach is needed (we ca just keep the for loop otherwise.) Ok( - (0..self.ccs.q).fold(VirtualPolynomial::::new(self.ccs.s), |q, idx| { - let mut prod = VirtualPolynomial::::new(self.ccs.s); + (0..ccs.q).fold(VirtualPolynomial::::new(ccs.s), |q, idx| { + let mut prod = VirtualPolynomial::::new(ccs.s); - for &j in &self.ccs.S[idx] { - let sum_Mz = compute_sum_Mz::(&self.M_MLE[j], &z_mle); + for &j in &ccs.S[idx] { + let sum_Mz = compute_sum_Mz::(&ccs_mles[j], &z_mle); // Fold this sum into the running product if prod.products.is_empty() { @@ -83,7 +91,7 @@ impl CCCSShape { } } // Multiply by the product by the coefficient c_i - prod.scalar_mul(&self.ccs.c[idx]); + prod.scalar_mul(&ccs.c[idx]); // Add it to the running sum q.add(&prod) }), @@ -95,30 +103,28 @@ impl CCCSShape { /// polynomial over x pub fn compute_Q( &self, - z: &Vec, + ccs: &CCS, + ccs_mles: &[MultilinearPolynomial], beta: &[G::Scalar], ) -> Result, NovaError> { - let q = self.compute_q(z)?; + let q = self.compute_q(ccs, ccs_mles)?; q.build_f_hat(beta) } /// Perform the check of the CCCS instance described at section 4.1 pub fn is_sat( &self, + ccs: &CCS, + ccs_mles: &[MultilinearPolynomial], ck: &CommitmentKey, - w: &CCSWitness, - x: &CCCSInstance, ) -> Result<(), NovaError> { // check that C is the commitment of w. Notice that this is not verifying a Pedersen // opening, but checking that the Commmitment comes from committing to the witness. - assert_eq!(x.C, CE::::commit(ck, &w.w)); - - // check CCCS relation - let z: Vec = [vec![G::Scalar::ONE], x.x.clone(), w.w.to_vec()].concat(); + assert_eq!(self.w_comm, CE::::commit(ck, &self.z[(1 + ccs.l)..])); // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube - let q_x = self.compute_q(&z).unwrap(); - for x in BooleanHypercube::new(self.ccs.s) { + let q_x = self.compute_q(ccs, ccs_mles).unwrap(); + for x in BooleanHypercube::new(ccs.s) { if !q_x.evaluate(&x).unwrap().is_zero().unwrap_u8() == 0 { return Err(NovaError::UnSat); } @@ -160,52 +166,45 @@ mod tests { fn test_compute_q_with() { let mut rng = OsRng; - let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); + let z = CCS::::get_test_z(3); + let (ccs, ccs_witness, ccs_instance, mles) = CCS::::gen_test_ccs(&z); // generate ck - let ck = CCSShape::::commitment_key(&ccs_shape); + let ck = CCS::::commitment_key(&ccs); // ensure CCS is satisfied - ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); + ccs.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); // Generate CCCS artifacts - let cccs_shape = ccs_shape.to_cccs_shape(); - - let q = cccs_shape.compute_q(&z).unwrap(); + let cccs = CCCS::new(&ccs, &mles, z, &ck); + let q = cccs.compute_q(&ccs, &mles).unwrap(); // Evaluate inside the hypercube - BooleanHypercube::new(ccs_shape.s).for_each(|x| { + BooleanHypercube::new(ccs.s).for_each(|x| { assert_eq!(G::Scalar::ZERO, q.evaluate(&x).unwrap()); }); // Evaluate outside the hypercube - let beta: Vec = (0..ccs_shape.s) - .map(|_| G::Scalar::random(&mut rng)) - .collect(); + let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); assert_ne!(G::Scalar::ZERO, q.evaluate(&beta).unwrap()); } fn test_compute_Q_with() { let mut rng = OsRng; - let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); + let z = CCS::::get_test_z(3); + let (ccs, ccs_witness, ccs_instance, mles) = CCS::::gen_test_ccs(&z); // generate ck - let ck = CCSShape::::commitment_key(&ccs_shape); + let ck = CCS::::commitment_key(&ccs); // ensure CCS is satisfied - ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); + ccs.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); // Generate CCCS artifacts - let cccs_shape = ccs_shape.to_cccs_shape(); - - let beta: Vec = (0..ccs_shape.s) - .map(|_| G::Scalar::random(&mut rng)) - .collect(); - + let cccs = CCCS::new(&ccs, &mles, z, &ck); + let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); // Compute Q(x) = eq(beta, x) * q(x). - let Q = cccs_shape - .compute_Q(&z, &beta) + let Q = cccs + .compute_Q(&ccs, &mles, &beta) .expect("Computation of Q should not fail"); // Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y) @@ -220,7 +219,7 @@ mod tests { // Hence, evaluating G(x) at a random beta should give zero. // Now sum Q(x) evaluations in the hypercube and expect it to be 0 - let r = BooleanHypercube::new(ccs_shape.s) + let r = BooleanHypercube::new(ccs.s) .map(|x| Q.evaluate(&x).unwrap()) .fold(G::Scalar::ZERO, |acc, result| acc + result); assert_eq!(r, G::Scalar::ZERO); @@ -229,44 +228,42 @@ mod tests { fn test_Q_against_q_with() { let mut rng = OsRng; - let z = CCSShape::::get_test_z(3); - let (ccs_shape, ccs_witness, ccs_instance) = CCSShape::::gen_test_ccs(&z); + let z = CCS::::get_test_z(3); + let (ccs, ccs_witness, ccs_instance, mles) = CCS::::gen_test_ccs(&z); // generate ck - let ck = CCSShape::::commitment_key(&ccs_shape); + let ck = CCS::::commitment_key(&ccs); // ensure CCS is satisfied - ccs_shape.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); + ccs.is_sat(&ck, &ccs_instance, &ccs_witness).unwrap(); // Generate CCCS artifacts - let cccs_shape = ccs_shape.to_cccs_shape(); - + let cccs = CCCS::new(&ccs, &mles, z, &ck); // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube - let q = cccs_shape - .compute_q(&z) + let q = cccs + .compute_q(&ccs, &mles) .expect("Computing q shoud not fail"); - for d in BooleanHypercube::new(ccs_shape.s) { - let Q_at_d = cccs_shape - .compute_Q(&z, &d) + + for d in BooleanHypercube::new(ccs.s) { + let Q_at_d = cccs + .compute_Q(&ccs, &mles, &d) .expect("Computing Q_at_d shouldn't fail"); // Get G(d) by summing over Q_d(x) over the hypercube - let G_at_d = BooleanHypercube::new(ccs_shape.s) + let G_at_d = BooleanHypercube::new(ccs.s) .map(|x| Q_at_d.evaluate(&x).unwrap()) .fold(G::Scalar::ZERO, |acc, result| acc + result); assert_eq!(G_at_d, q.evaluate(&d).unwrap()); } // Now test that they should disagree outside of the hypercube - let r: Vec = (0..ccs_shape.s) - .map(|_| G::Scalar::random(&mut rng)) - .collect(); - let Q_at_r = cccs_shape - .compute_Q(&z, &r) + let r: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); + let Q_at_r = cccs + .compute_Q(&ccs, &mles, &r) .expect("Computing Q_at_r shouldn't fail"); // Get G(d) by summing over Q_d(x) over the hypercube - let G_at_r = BooleanHypercube::new(ccs_shape.s) + let G_at_r = BooleanHypercube::new(ccs.s) .map(|x| Q_at_r.evaluate(&x).unwrap()) .fold(G::Scalar::ZERO, |acc, result| acc + result); assert_ne!(G_at_r, q.evaluate(&r).unwrap()); diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index 995f3c984..d38e486aa 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -1,5 +1,5 @@ use super::util::{compute_sum_Mz, VirtualPolynomial}; -use super::{CCSShape, CCSWitness}; +use super::{CCSWitness, CCCS, CCS}; use crate::ccs::util::compute_all_sum_Mz_evals; use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; @@ -23,6 +23,7 @@ use core::{cmp::max, marker::PhantomData}; use ff::{Field, PrimeField}; use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; +use rand_core::RngCore; use rayon::prelude::*; use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; @@ -30,44 +31,55 @@ use std::ops::{Add, Mul}; use std::sync::Arc; /// A type that holds a LCCCS instance -#[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] pub struct LCCCS { - pub(crate) C: Commitment, - pub(crate) x: Vec, - pub(crate) u: G::Scalar, + pub(crate) w_comm: Commitment, pub(crate) v: Vec, // Random evaluation point for the v_i - pub r_x: Vec, - // This should not need to be here. Should be a reference only. - pub(crate) matrix_mles: Vec>, - pub(crate) ccs: CCSShape, + pub(crate) r_x: Vec, + pub(crate) z: Vec, } impl LCCCS { - // XXX: Double check that this is indeed correct. - /// Samples public parameters for the specified number of constraints and variables in an CCS - pub fn commitment_key(&self) -> CommitmentKey { - let total_nz = self.ccs.M.iter().fold(0, |acc, m| acc + m.coeffs().len()); - - G::CE::setup(b"ck", max(max(self.ccs.m, self.ccs.t), total_nz)) + /// Generates a new LCCCS instance from a given randomness, CommitmentKey & witness input vector. + /// This should only be used to probably test or setup the initial NIMFS instance. + pub(crate) fn new( + ccs: &CCS, + ccs_m_mle: &[MultilinearPolynomial], + ck: &CommitmentKey, + z: Vec, + mut rng: &mut R, + ) -> Self { + // XXX: API doesn't offer a way to handle this?? + let _r_w = G::Scalar::random(&mut rng); + let w_comm = <::CE as CommitmentEngineTrait>::commit(ck, &z[(1 + ccs.l)..]); + + // Evaluation points for `v` + let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); + let v = ccs.compute_v_j(&z, &r_x, ccs_m_mle); + + Self { w_comm, v, r_x, z } } /// Checks if the CCS instance is satisfiable given a witness and its shape - pub fn is_sat(&self, ck: &CommitmentKey, W: &CCSWitness) -> Result<(), NovaError> { + pub fn is_sat( + &self, + ccs: &CCS, + ccs_m_mle: &[MultilinearPolynomial], + ck: &CommitmentKey, + ) -> Result<(), NovaError> { + dbg!(self.z.clone()); + let w = &self.z[(1 + ccs.l)..]; // check that C is the commitment of w. Notice that this is not verifying a Pedersen // opening, but checking that the Commmitment comes from committing to the witness. - let comm_eq = self.C == CE::::commit(ck, &W.w); + let comm_eq = self.w_comm == CE::::commit(ck, w); - // check CCS relation - let z: Vec = [vec![self.u], self.x.clone(), W.w.to_vec()].concat(); - let computed_v = - compute_all_sum_Mz_evals::(&self.matrix_mles, &z, &self.r_x, self.ccs.s_prime); + let computed_v = compute_all_sum_Mz_evals::(ccs_m_mle, &self.z, &self.r_x, ccs.s_prime); + dbg!(self.v.clone()); + dbg!(computed_v.clone()); let vs_eq = computed_v == self.v; - dbg!(vs_eq); - dbg!(comm_eq); - if vs_eq && comm_eq { Ok(()) } else { @@ -75,15 +87,18 @@ impl LCCCS { } } - /// Compute all L_j(x) polynomials - // Can we recieve the MLE of z directy? - pub fn compute_Ls(&self, z: &Vec) -> Vec> { - let z_mle = dense_vec_to_mle(self.ccs.s_prime, z); + /// Compute all L_j(x) polynomials. + pub fn compute_Ls( + &self, + ccs: &CCS, + ccs_m_mle: &[MultilinearPolynomial], + ) -> Vec> { + let z_mle = dense_vec_to_mle(ccs.s_prime, self.z.as_slice()); - let mut vec_L_j_x = Vec::with_capacity(self.ccs.t); - for M_j in self.matrix_mles.iter() { + let mut vec_L_j_x = Vec::with_capacity(ccs.t); + for M_j in ccs_m_mle.iter() { // Sanity check - assert_eq!(z_mle.get_num_vars(), self.ccs.s_prime); + assert_eq!(z_mle.get_num_vars(), ccs.s_prime); let sum_Mz = compute_sum_Mz::(M_j, &z_mle); let sum_Mz_virtual = VirtualPolynomial::new_from_mle(&Arc::new(sum_Mz), G::Scalar::ONE); @@ -104,37 +119,36 @@ mod tests { fn satisfied_ccs_is_satisfied_lcccs_with() { // Gen test vectors & artifacts - let z = CCSShape::::get_test_z(3); - let (ccs, witness, instance) = CCSShape::::gen_test_ccs(&z); + let z = CCS::::get_test_z(3); + let (ccs, witness, instance, mles) = CCS::::gen_test_ccs(&z); let ck = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &instance, &witness).is_ok()); // LCCCS with the correct z should pass - let (lcccs, _) = ccs.to_lcccs(&mut OsRng, &ck, &z); - assert!(lcccs.is_sat(&ck, &witness).is_ok()); + let mut lcccs = LCCCS::new(&ccs, &mles, &ck, z.clone(), &mut OsRng); + assert!(lcccs.is_sat(&ccs, &mles, &ck).is_ok()); // Wrong z so that the relation does not hold let mut bad_z = z; bad_z[3] = G::Scalar::ZERO; // LCCCS with the wrong z should not pass `is_sat`. - // LCCCS with the correct z should pass - let (lcccs, _) = ccs.to_lcccs(&mut OsRng, &ck, &bad_z); - assert!(lcccs.is_sat(&ck, &witness).is_err()); + lcccs.z = bad_z; + assert!(lcccs.is_sat(&ccs, &mles, &ck).is_err()); } fn test_lcccs_v_j_with() { let mut rng = OsRng; // Gen test vectors & artifacts - let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + let z = CCS::::get_test_z(3); + let (ccs, _, _, mles) = CCS::::gen_test_ccs(&z); let ck = ccs.commitment_key(); // Get LCCCS - let (lcccs, _) = ccs.to_lcccs(&mut rng, &ck, &z); + let lcccs = LCCCS::new(&ccs, &mles, &ck, z, &mut OsRng); - let vec_L_j_x = lcccs.compute_Ls(&z); + let vec_L_j_x = lcccs.compute_Ls(&ccs, &mles); assert_eq!(vec_L_j_x.len(), lcccs.v.len()); for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { @@ -149,22 +163,25 @@ mod tests { let mut rng = OsRng; // Gen test vectors & artifacts - let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + let z = CCS::::get_test_z(3); + let (ccs, witness, instance, mles) = CCS::::gen_test_ccs(&z); let ck = ccs.commitment_key(); // Mutate z so that the relation does not hold let mut bad_z = z.clone(); bad_z[3] = G::Scalar::ZERO; - // Get LCCCS - let (lcccs, _) = ccs.to_lcccs(&mut rng, &ck, &z); + // Compute v_j with the right z + let mut lcccs = LCCCS::new(&ccs, &mles, &ck, z, &mut OsRng); + // Assert LCCCS is satisfied with the original Z + assert!(lcccs.is_sat(&ccs, &mles, &ck).is_ok()); - // Bad compute L_j(x) with the bad z - let vec_L_j_x = lcccs.compute_Ls(&bad_z); + // Compute L_j(x) with the bad z + lcccs.z = bad_z; + let vec_L_j_x = lcccs.compute_Ls(&ccs, &mles); assert_eq!(vec_L_j_x.len(), lcccs.v.len()); // Assert LCCCS is not satisfied with the bad Z - assert!(lcccs.is_sat(&ck, &CCSWitness { w: bad_z }).is_err()); + assert!(lcccs.is_sat(&ccs, &mles, &ck).is_err()); // Make sure that the LCCCS is not satisfied given these L_j(x) // i.e. summing L_j(x) over the hypercube should not give v_j for all j diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 5b638aa9b..1b3ba9316 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -32,9 +32,10 @@ use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; -use self::cccs::{CCCSInstance, CCCSShape}; -use self::lcccs::LCCCS; -use self::util::compute_all_sum_Mz_evals; +pub use cccs::CCCS; +pub use lcccs::LCCCS; +pub use multifolding::NIMFS; +use util::compute_all_sum_Mz_evals; mod cccs; mod lcccs; @@ -47,7 +48,7 @@ mod util; /// As well as m, n, s, s_prime #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] -pub struct CCSShape { +pub struct CCS { pub(crate) M: Vec>, // Num vars pub(crate) t: usize, @@ -70,8 +71,8 @@ pub struct CCSShape { pub(crate) s_prime: usize, } -impl CCSShape { - /// Create an object of type `CCSShape` from the explicitly specified CCS matrices +impl CCS { + /// Create an object of type `CCS` from the explicitly specified CCS matrices pub fn new( M: &[SparseMatrix], t: usize, @@ -80,7 +81,7 @@ impl CCSShape { d: usize, S: Vec>, c: Vec, - ) -> CCSShape { + ) -> CCS { // Can probably be made more efficient by keeping track fo n_rows/n_cols at creation/insert time let m = M .iter() @@ -102,7 +103,7 @@ impl CCSShape { let s = m.log_2(); let s_prime = n.log_2(); - CCSShape { + CCS { M: M.to_vec(), t, l, @@ -117,77 +118,15 @@ impl CCSShape { } } - pub(crate) fn to_cccs_shape(&self) -> CCCSShape { - let M_mle = self.M.iter().map(|matrix| matrix.to_mle()).collect(); - CCCSShape { - M_MLE: M_mle, - ccs: self.clone(), - } - } - - // Transform the CCS instance into a CCCS instance by providing a commitment key. - pub fn to_cccs( - &self, - rng: &mut R, - ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, - z: &[G::Scalar], - ) -> (CCCSInstance, CCSWitness, CCCSShape) { - let w: Vec = z[(1 + self.l)..].to_vec(); - // XXX: API doesn't offer a way to handle this apparently? - // Need to investigate - let _r_w = G::Scalar::random(rng); - let C = <::CE as CommitmentEngineTrait>::commit(ck, &w); - - ( - CCCSInstance { - C, - x: z[1..(1 + self.l)].to_vec(), - }, - CCSWitness { w }, - self.to_cccs_shape(), - ) - } - - /// Transform the CCS instance into an LCCCS instance by providing a commitment key. - pub fn to_lcccs( - &self, - mut rng: &mut R, - ck: &<::CE as CommitmentEngineTrait>::CommitmentKey, - z: &[G::Scalar], - ) -> (LCCCS, CCSWitness) { - let w: Vec = z[(1 + self.l)..].to_vec(); - let r_w = G::Scalar::random(&mut rng); - let C = <::CE as CommitmentEngineTrait>::commit(ck, &w); - - // XXX: API doesn't offer a way to handle this?? - let _r_x: Vec = (0..self.s).map(|_| G::Scalar::random(&mut rng)).collect(); - - let v = self.compute_v_j(z, &_r_x); - // XXX: Is absurd to compute these again here. We should take care of this. - let matrix_mles: Vec> = - self.M.iter().map(|matrix| matrix.to_mle()).collect(); - - ( - LCCCS:: { - ccs: self.clone(), - C, - u: G::Scalar::ONE, - x: z[1..(1 + self.l)].to_vec(), - r_x: _r_x, - v, - matrix_mles, - }, - CCSWitness:: { w }, - ) - } - /// Compute v_j values of the linearized committed CCS form /// Given `r`, compute: \sum_{y \in {0,1}^s'} M_j(r, y) * z(y) - fn compute_v_j(&self, z: &[G::Scalar], r: &[G::Scalar]) -> Vec { - // XXX: Should these be MLE already? - let M_mle: Vec> = - self.M.iter().map(|matrix| matrix.to_mle()).collect(); - compute_all_sum_Mz_evals::(&M_mle, &z.to_vec(), r, self.s_prime) + fn compute_v_j( + &self, + z: &[G::Scalar], + r: &[G::Scalar], + ccs_matrix_mles: &[MultilinearPolynomial], + ) -> Vec { + compute_all_sum_Mz_evals::(ccs_matrix_mles, z, r, self.s_prime) } // XXX: Update commitment_key variables here? This is currently based on R1CS with M length @@ -249,6 +188,7 @@ impl CCSShape { } } + /// Generate a CCS instance from an [`R1CSShape`] instance. pub fn from_r1cs(r1cs: R1CSShape) -> Self { // These contants are used for R1CS-to-CCS, see the paper for more details const T: usize = 3; @@ -283,7 +223,7 @@ impl CCSShape { } } - /// Pads the CCSShape so that the number of variables is a power of two + /// Pads the CCS so that the number of variables is a power of two /// Renumbers variables to accomodate padded variables pub fn pad(&mut self) { let padded_n = self.n.next_power_of_two(); @@ -301,7 +241,14 @@ impl CCSShape { } #[cfg(test)] - pub(crate) fn gen_test_ccs(z: &[G::Scalar]) -> (CCSShape, CCSWitness, CCSInstance) { + pub(crate) fn gen_test_ccs( + z: &[G::Scalar], + ) -> ( + CCS, + CCSWitness, + CCSInstance, + Vec>, + ) { let one = G::Scalar::ONE; let A = vec![ (0, 1, one), @@ -318,16 +265,17 @@ impl CCSShape { // 2. Take R1CS and convert to CCS // TODO: The third argument should be 2 or similar, need to adjust test case // See https://github.com/privacy-scaling-explorations/Nova/issues/30 - let ccs = CCSShape::from_r1cs(R1CSShape::new(4, 6, 1, &A, &B, &C).unwrap()); + let ccs = CCS::from_r1cs(R1CSShape::new(4, 6, 1, &A, &B, &C).unwrap()); // Generate other artifacts - let ck = CCSShape::::commitment_key(&ccs); + let ck = CCS::::commitment_key(&ccs); let ccs_w = CCSWitness::new(z[2..].to_vec()); let ccs_instance = CCSInstance::new(&ccs, &ccs_w.commit(&ck), vec![z[1]]).unwrap(); + let ccs_mles = ccs.M.iter().map(|m| m.to_mle()).collect(); ccs .is_sat(&ck, &ccs_instance, &ccs_w) .expect("This does not fail"); - (ccs, ccs_w, ccs_instance) + (ccs, ccs_w, ccs_instance, ccs_mles) } #[cfg(test)] @@ -378,7 +326,7 @@ pub struct CCSInstance { impl CCSInstance { /// A method to create an instance object using consitituent elements pub fn new( - s: &CCSShape, + s: &CCS, w_comm: &Commitment, x: Vec, ) -> Result, NovaError> { @@ -464,7 +412,7 @@ pub mod test { }; // 2. Take R1CS and convert to CCS - let S = CCSShape::from_r1cs(S); + let S = CCS::from_r1cs(S); // generate generators and ro constants let _ck = S.commitment_key(); diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index ed6c39d3b..3788c5b40 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -1,7 +1,7 @@ -use super::cccs::{CCCSInstance, CCCSShape}; +use super::cccs::{self, CCCS}; use super::lcccs::LCCCS; use super::util::{compute_sum_Mz, VirtualPolynomial}; -use super::{CCSShape, CCSWitness}; +use super::{CCSWitness, CCS}; use crate::ccs::util::compute_all_sum_Mz_evals; use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; @@ -25,58 +25,106 @@ use core::{cmp::max, marker::PhantomData}; use ff::{Field, PrimeField}; use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; +use rand_core::RngCore; use rayon::prelude::*; use serde::{Deserialize, Serialize}; use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; use std::sync::Arc; -// XXX: THe idea is to have Multifolding as IVC instance in the future, holding the main CCS -// instances. Then the rest of CCS, CCCS, LCCCS hold references to it. -// Is our single source of data. +/// The NIMFS (Non-Interactive MultiFolding Scheme) structure is the center of operations of the folding scheme. +/// Once generated, it allows us to fold any upcomming CCCS instances within it without needing to do much. +// XXX: Pending to add doc examples. #[derive(Debug)] -pub struct Multifolding { - ccs: CCSShape, +pub struct NIMFS { + ccs: CCS, ccs_mle: Vec>, + ck: CommitmentKey, + lcccs: LCCCS, } -impl Multifolding { - /// Generates a new Multifolding instance based on the given CCS. - pub fn new(ccs: CCSShape) -> Self { - let ccs_mle = ccs.M.iter().map(|matrix| matrix.to_mle()).collect(); - Self { ccs, ccs_mle } +impl NIMFS { + /// Generates a new NIMFS instance based on the given CCS instance, it's matrix mle's, an existing LCCCS instance and a commitment key to the CCS. + pub fn new( + ccs: CCS, + ccs_mle: Vec>, + lcccs: LCCCS, + ck: CommitmentKey, + ) -> Self { + Self { + ccs, + ccs_mle, + ck, + lcccs, + } + } + + /// Initializes a NIMFS instance given the CCS of it and a first witness vector that satifies it. + // XXX: This should probably return an error as we should check whether is satisfied or not. + pub fn init(mut rng: &mut R, ccs: CCS, z: Vec) -> Self { + let ccs_mle: Vec> = + ccs.M.iter().map(|matrix| matrix.to_mle()).collect(); + let w: Vec = z[(1 + ccs.l)..].to_vec(); + let ck = ccs.commitment_key(); + let r_w = G::Scalar::random(&mut rng); + let w_comm = ::CE::commit(&ck, &w); + + let r_x: Vec = vec![G::Scalar::random(&mut rng); ccs.s]; + let v = ccs.compute_v_j(&z, &r_x, &ccs_mle); + + let lcccs: LCCCS = LCCCS::new(&ccs, &ccs_mle, &ck, z, &mut rng); + + Self { + ccs, + ccs_mle, + lcccs, + ck, + } + } + + /// Generates a new [`CCCS`] instance ready to be folded. + pub fn new_cccs(&self, z: Vec) -> CCCS { + CCCS::new(&self.ccs, &self.ccs_mle, z, &self.ck) + } + + /// This function checks whether the current IVC after the last fold performed is satisfied and returns an error if it isn't. + pub fn is_sat(&self) -> Result<(), NovaError> { + self.lcccs.is_sat(&self.ccs, &self.ccs_mle, &self.ck) } -} -impl Multifolding { - /// Compute sigma_i and theta_i from step 4 + /// Compute sigma_i and theta_i from step 4. pub fn compute_sigmas_and_thetas( &self, - z1: &Vec, - z2: &Vec, + // Note `z2` represents the input of the incoming CCCS instance. + // As the current IVC accumulated input is holded inside of the NIMFS(`self`). + z: &[G::Scalar], r_x_prime: &[G::Scalar], ) -> (Vec, Vec) { ( // sigmas - compute_all_sum_Mz_evals::(&self.ccs_mle, z1, r_x_prime, self.ccs.s_prime), + compute_all_sum_Mz_evals::( + &self.ccs_mle, + self.lcccs.z.as_slice(), + r_x_prime, + self.ccs.s_prime, + ), // thetas - compute_all_sum_Mz_evals::(&self.ccs_mle, z2, r_x_prime, self.ccs.s_prime), + compute_all_sum_Mz_evals::(&self.ccs_mle, z, r_x_prime, self.ccs.s_prime), ) } - /// Compute the right-hand-side of step 5 of the multifolding scheme + /// Compute the right-hand-side of step 5 of the NIMFS scheme pub fn compute_c_from_sigmas_and_thetas( &self, sigmas: &[G::Scalar], thetas: &[G::Scalar], gamma: G::Scalar, beta: &[G::Scalar], - r_x: &[G::Scalar], r_x_prime: &[G::Scalar], ) -> G::Scalar { let mut c = G::Scalar::ZERO; - let e1 = eq_eval(r_x, r_x_prime); + let e1 = eq_eval(&self.lcccs.r_x, r_x_prime); let e2 = eq_eval(beta, r_x_prime); // (sum gamma^j * e1 * sigma_j) @@ -101,55 +149,42 @@ impl Multifolding { /// Compute g(x) polynomial for the given inputs. pub fn compute_g( - running_instance: &LCCCS, - cccs_instance: &CCCSShape, - z1: &Vec, - z2: &Vec, + &self, + cccs: &CCCS, gamma: G::Scalar, beta: &[G::Scalar], ) -> VirtualPolynomial { - let mut vec_L = running_instance.compute_Ls(z1); - let mut Q = cccs_instance - .compute_Q(z2, beta) - .expect("TQ comp should not fail"); + let mut vec_L = self.lcccs.compute_Ls(&self.ccs, &self.ccs_mle); + + let mut Q = cccs + .compute_Q(&self.ccs, &self.ccs_mle, beta) + .expect("Q comp should not fail"); + let mut g = vec_L[0].clone(); + for (j, L_j) in vec_L.iter_mut().enumerate().skip(1) { let gamma_j = gamma.pow([j as u64]); L_j.scalar_mul(&gamma_j); g = g.add(L_j); } - let gamma_t1 = gamma.pow([(cccs_instance.ccs.t + 1) as u64]); + + let gamma_t1 = gamma.pow([(self.ccs.t + 1) as u64]); Q.scalar_mul(&gamma_t1); g = g.add(&Q); g } - // XXX: This might need to be mutable if we want to hold an LCCCS instance as the IVC inside the - // NIMFS object. - pub fn fold( - &self, - lcccs1: &LCCCS, - cccs2: &CCCSInstance, - sigmas: &[G::Scalar], - thetas: &[G::Scalar], - r_x_prime: Vec, - rho: G::Scalar, - ) -> LCCCS { - let C = lcccs1.C + cccs2.C.mul(rho); - let u = lcccs1.u + rho; - let x: Vec = lcccs1 - .x - .iter() - .zip( - cccs2 - .x - .iter() - .map(|x_i| *x_i * rho) - .collect::>(), - ) - .map(|(a_i, b_i)| *a_i + b_i) - .collect(); - let v: Vec = sigmas + /// This folds an upcoming CCCS instance into the running LCCCS instance contained within the NIMFS object. + pub fn fold(&mut self, mut rng: &mut R, cccs: CCCS) { + // Compute r_x_prime and rho from a given randomnes. + let r_x_prime = vec![G::Scalar::random(&mut rng); self.ccs.s]; + let rho = G::Scalar::random(&mut rng); + + // Compute sigmas an thetas to fold `v`s. + let (sigmas, thetas) = self.compute_sigmas_and_thetas(&cccs.z, &r_x_prime); + + // Compute new v from sigmas and thetas. + let folded_v: Vec = sigmas .iter() .zip( thetas @@ -160,33 +195,24 @@ impl Multifolding { .map(|(a_i, b_i)| *a_i + b_i) .collect(); - LCCCS { - matrix_mles: lcccs1.matrix_mles.clone(), - C, - ccs: lcccs1.ccs.clone(), - u, - x, - r_x: r_x_prime, - v, - } + // Here we perform steps 7 & 8 of the section 5 of the paper. Were we actually fold LCCCS & CCCS instances. + self.lcccs.w_comm += cccs.w_comm.mul(rho); + self.lcccs.v = folded_v; + self.lcccs.r_x = r_x_prime; + self.fold_z(cccs, rho); } - pub fn fold_witness(w1: &CCSWitness, w2: &CCSWitness, rho: G::Scalar) -> CCSWitness { - let w = w1 - .w - .iter() - .zip( - w2.w - .iter() - .map(|x_i| *x_i * rho) - .collect::>(), - ) - .map(|(a_i, b_i)| *a_i + b_i) - .collect(); + /// Folds the current `z` vector of the upcomming CCCS instance together with the LCCCS instance that is contained inside of the NIMFS object. + fn fold_z(&mut self, cccs: CCCS, rho: G::Scalar) { + // Update u first. + self.lcccs.z[0] += rho; + self.lcccs.z[1..] + .iter_mut() + .zip(cccs.z[1..].iter().map(|x_i| *x_i * rho)) + .for_each(|(a_i, b_i)| *a_i += b_i); // XXX: There's no handling of r_w atm. So we will ingore until all folding is implemented, // let r_w = w1.r_w + rho * w2.r_w; - CCSWitness { w } } } @@ -208,15 +234,13 @@ mod tests { use crate::ccs::{test, util::virtual_poly::build_eq_x_r}; use pasta_curves::{Ep, Fq}; use rand_core::OsRng; - // NIMFS: Non Interactive Multifolding Scheme - type NIMFS = Multifolding; fn test_compute_g_with() { - let z1 = CCSShape::::get_test_z(3); - let z2 = CCSShape::::get_test_z(4); + let z1 = CCS::::get_test_z(3); + let z2 = CCS::::get_test_z(4); - let (_, ccs_witness_1, ccs_instance_1) = CCSShape::::gen_test_ccs(&z2); - let (ccs, ccs_witness_2, ccs_instance_2) = CCSShape::::gen_test_ccs(&z1); + let (_, ccs_witness_1, ccs_instance_1, mles) = CCS::::gen_test_ccs(&z2); + let (ccs, ccs_witness_2, ccs_instance_2, _) = CCS::::gen_test_ccs(&z1); let ck = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); @@ -226,18 +250,19 @@ mod tests { let gamma: G::Scalar = G::Scalar::random(&mut rng); let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); - let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &ck, &z1); - let cccs_instance = ccs.to_cccs_shape(); + let lcccs = LCCCS::new(&ccs, &mles, &ck, z1, &mut OsRng); + let cccs = CCCS::new(&ccs, &mles, z2, &ck); let mut sum_v_j_gamma = G::Scalar::ZERO; - for j in 0..lcccs_instance.v.len() { + for j in 0..lcccs.v.len() { let gamma_j = gamma.pow([j as u64]); - sum_v_j_gamma += lcccs_instance.v[j] * gamma_j; + sum_v_j_gamma += lcccs.v[j] * gamma_j; } - // Compute g(x) with that r_x + let nimfs = NIMFS::::new(ccs.clone(), mles.clone(), lcccs.clone(), ck.clone()); - let g = NIMFS::compute_g(&lcccs_instance, &cccs_instance, &z1, &z2, gamma, &beta); + // Compute g(x) with that r_x + let g = nimfs.compute_g(&cccs, gamma, &beta); // evaluate g(x) over x \in {0,1}^s let mut g_on_bhc = G::Scalar::ZERO; @@ -247,7 +272,7 @@ mod tests { // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s let mut sum_Lj_on_bhc = G::Scalar::ZERO; - let vec_L = lcccs_instance.compute_Ls(&z1); + let vec_L = lcccs.compute_Ls(&ccs, &mles); for x in BooleanHypercube::new(ccs.s) { for (j, coeff) in vec_L.iter().enumerate() { let gamma_j = gamma.pow([j as u64]); @@ -268,11 +293,11 @@ mod tests { } fn test_compute_sigmas_and_thetas_with() { - let z1 = CCSShape::::get_test_z(3); - let z2 = CCSShape::::get_test_z(4); + let z1 = CCS::::get_test_z(3); + let z2 = CCS::::get_test_z(4); - let (_, ccs_witness_1, ccs_instance_1) = CCSShape::::gen_test_ccs(&z2); - let (ccs, ccs_witness_2, ccs_instance_2) = CCSShape::::gen_test_ccs(&z1); + let (_, ccs_witness_1, ccs_instance_1, mles) = CCS::::gen_test_ccs(&z2); + let (ccs, ccs_witness_2, ccs_instance_2, _) = CCS::::gen_test_ccs(&z1); let ck: CommitmentKey = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); @@ -283,17 +308,15 @@ mod tests { let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); let r_x_prime: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); - // Initialize a multifolding object - let (lcccs_instance, _) = ccs.to_lcccs(&mut rng, &ck, &z1); - let (cccs_instance) = ccs.to_cccs_shape(); - - // Generate a new multifolding instance - let nimfs = NIMFS::new(ccs.clone()); + let lcccs = LCCCS::new(&ccs, &mles, &ck, z1, &mut OsRng); + let cccs = CCCS::new(&ccs, &mles, z2, &ck); - let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&z1, &z2, &r_x_prime); + // Generate a new NIMFS instance + let nimfs = NIMFS::::new(ccs.clone(), mles.clone(), lcccs, ck.clone()); - let g = NIMFS::compute_g(&lcccs_instance, &cccs_instance, &z1, &z2, gamma, &beta); + let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&cccs.z, &r_x_prime); + let g = nimfs.compute_g(&cccs, gamma, &beta); // Assert `g` is correctly computed here. { // evaluate g(x) over x \in {0,1}^s @@ -303,7 +326,7 @@ mod tests { } // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s let mut sum_Lj_on_bhc = G::Scalar::ZERO; - let vec_L = lcccs_instance.compute_Ls(&z1); + let vec_L = nimfs.lcccs.compute_Ls(&ccs, &mles); for x in BooleanHypercube::new(ccs.s) { for (j, coeff) in vec_L.iter().enumerate() { let gamma_j = gamma.pow([j as u64]); @@ -326,68 +349,43 @@ mod tests { // from `compute_c_from_sigmas_and_thetas` let expected_c = g.evaluate(&revsersed).unwrap(); - let c = nimfs.compute_c_from_sigmas_and_thetas( - &sigmas, - &thetas, - gamma, - &beta, - &lcccs_instance.r_x, - &r_x_prime, - ); + let c = nimfs.compute_c_from_sigmas_and_thetas(&sigmas, &thetas, gamma, &beta, &r_x_prime); assert_eq!(c, expected_c); } - #[test] fn test_compute_g() { test_compute_g_with::(); } fn test_lccs_fold_with() { - let z1 = CCSShape::::get_test_z(3); - let z2 = CCSShape::::get_test_z(4); + let mut rng = OsRng; + + let z1 = CCS::::get_test_z(3); + let z2 = CCS::::get_test_z(4); // ccs stays the same regardless of z1 or z2 - let (ccs, ccs_witness_1, ccs_instance_1) = CCSShape::::gen_test_ccs(&z1); - let (_, ccs_witness_2, ccs_instance_2) = CCSShape::::gen_test_ccs(&z2); + let (ccs, ccs_witness_1, ccs_instance_1, mles) = CCS::::gen_test_ccs(&z1); + let (_, ccs_witness_2, ccs_instance_2, _) = CCS::gen_test_ccs(&z2); let ck: CommitmentKey = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); - let mut rng = OsRng; - let r_x_prime: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); - - // Generate a new multifolding instance - let mut nimfs = NIMFS::new(ccs.clone()); - - let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&z1, &z2, &r_x_prime); - - // Initialize a multifolding object - let (lcccs_instance, lcccs_witness) = ccs.to_lcccs(&mut rng, &ck, &z1); - - let (cccs_instance, cccs_witness, cccs_shape) = ccs.to_cccs(&mut rng, &ck, &z2); - - assert!(lcccs_instance.is_sat(&ck, &lcccs_witness).is_ok()); - - assert!(cccs_shape - .is_sat(&ck, &ccs_witness_2, &cccs_instance) - .is_ok()); - - let rho = G::Scalar::random(&mut rng); - - let folded = nimfs.fold( - &lcccs_instance, - &cccs_instance, - &sigmas, - &thetas, - r_x_prime, - rho, - ); - - let w_folded = NIMFS::fold_witness(&lcccs_witness, &cccs_witness, rho); - - // check lcccs relation - assert!(folded.is_sat(&ck, &w_folded).is_ok()); + // Generate a new NIMFS instance + let mut nimfs = NIMFS::init(&mut rng, ccs.clone(), z1); + assert!(nimfs.is_sat().is_ok()); + + // check folding correct stuff still alows the NIMFS to be satisfied correctly. + let cccs = nimfs.new_cccs(z2); + assert!(cccs.is_sat(&ccs, &mles, &ck).is_ok()); + nimfs.fold(&mut rng, cccs); + assert!(nimfs.is_sat().is_ok()); + + // // Folding garbage should cause a failure + // let cccs = nimfs.new_cccs(vec![Fq::ONE, Fq::ONE, Fq::ONE]); + // nimfs.fold(&mut rng, cccs); + // assert!(nimfs.is_sat().is_err()); + // XXX: Should this indeed pass as it does now? } #[test] diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index cf8831b41..23616b354 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -26,7 +26,7 @@ use sha3::{Digest, Sha3_256}; use std::ops::{Add, Mul}; use std::sync::Arc; -use super::CCSShape; +use super::CCS; pub(crate) mod virtual_poly; pub(crate) use virtual_poly::VirtualPolynomial; @@ -88,8 +88,7 @@ fn fix_one_variable_helper(data: &[F], nv: usize, point: &F) -> V /// for all j values in 0..self.t pub fn compute_all_sum_Mz_evals( M_x_y_mle: &[MultilinearPolynomial], - // XXX: Can we just get the MLE? - z: &Vec, + z: &[G::Scalar], r: &[G::Scalar], s_prime: usize, ) -> Vec { @@ -113,6 +112,8 @@ pub fn compute_all_sum_Mz_evals( #[cfg(test)] mod tests { + use crate::ccs::cccs::CCCS; + use super::*; use pasta_curves::{Ep, Fq}; use rand_core::OsRng; @@ -181,14 +182,13 @@ mod tests { } fn test_compute_sum_Mz_over_boolean_hypercube_with() { - let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); + let z = CCS::::get_test_z(3); + let (ccs, _, _, mles) = CCS::::gen_test_ccs(&z); // Generate other artifacts - let ck = CCSShape::::commitment_key(&ccs); - let (_, _, cccs) = ccs.to_cccs(&mut OsRng, &ck, &z); - + let ck = CCS::::commitment_key(&ccs); let z_mle = dense_vec_to_mle(ccs.s_prime, &z); + let cccs = CCCS::new(&ccs, &mles, z, &ck); // check that evaluating over all the values x over the boolean hypercube, the result of // the next for loop is equal to 0 @@ -198,8 +198,7 @@ mod tests { for i in 0..ccs.q { let mut Sj_prod = G::Scalar::ONE; for j in ccs.S[i].clone() { - let sum_Mz: MultilinearPolynomial = - compute_sum_Mz::(&cccs.M_MLE[j], &z_mle); + let sum_Mz: MultilinearPolynomial = compute_sum_Mz::(&mles[j], &z_mle); let sum_Mz_x = sum_Mz.evaluate(&x); Sj_prod *= sum_Mz_x; } @@ -210,15 +209,11 @@ mod tests { } fn test_compute_all_sum_Mz_evals_with() { - let z = CCSShape::::get_test_z(3); - let (ccs, _, _) = CCSShape::::gen_test_ccs(&z); - - // Generate other artifacts - let ck = CCSShape::::commitment_key(&ccs); - let (_, _, cccs) = ccs.to_cccs(&mut OsRng, &ck, &z); + let z = CCS::::get_test_z(3); + let (ccs, _, _, mles) = CCS::::gen_test_ccs(&z); let mut r = vec![G::Scalar::ONE, G::Scalar::ZERO]; - let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); + let res = compute_all_sum_Mz_evals::(&mles, z.as_slice(), &r, ccs.s_prime); assert_eq!( res, vec![ @@ -229,7 +224,7 @@ mod tests { ); r.reverse(); - let res = compute_all_sum_Mz_evals::(cccs.M_MLE.as_slice(), &z, &r, ccs.s_prime); + let res = compute_all_sum_Mz_evals::(&mles, z.as_slice(), &r, ccs.s_prime); assert_eq!( res, vec![ diff --git a/src/lib.rs b/src/lib.rs index 8a5040128..d2df81a8d 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -27,17 +27,14 @@ pub mod traits; // experimental modules #[cfg(feature = "hypernova")] -mod ccs; +pub mod ccs; #[cfg(feature = "hypernova")] mod hypercube; #[cfg(feature = "hypernova")] mod utils; -use crate::bellperson::{ - r1cs::{NovaShape, NovaWitness}, - shape_cs::ShapeCS, - solver::SatisfyingAssignment, -}; +pub use crate::bellperson::{r1cs::NovaShape, shape_cs::ShapeCS}; +use crate::bellperson::{r1cs::NovaWitness, solver::SatisfyingAssignment}; use ::bellperson::{Circuit, ConstraintSystem}; use circuit::{NovaAugmentedCircuit, NovaAugmentedCircuitInputs, NovaAugmentedCircuitParams}; use constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_WITHOUT_IO_FOR_CRHF, NUM_HASH_BITS}; diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 8dd1dde5d..b56969e3a 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -105,7 +105,6 @@ impl MultilinearPolynomial { let n = self.len() / 2; let (left, right) = self.Z.split_at_mut(n); - // XXX: This literally does nothing at all.. What is this? let (right, _) = right.split_at(n); left diff --git a/src/utils.rs b/src/utils.rs index c1309492f..51e02ccd5 100644 --- a/src/utils.rs +++ b/src/utils.rs @@ -6,6 +6,7 @@ use crate::errors::NovaError; use crate::spartan::polynomial::MultilinearPolynomial; use crate::traits::Group; use ff::{Field, PrimeField}; +use itertools::Itertools; use rand_core::RngCore; use rayon::prelude::{IntoParallelRefMutIterator, ParallelIterator}; use serde::{Deserialize, Serialize}; @@ -119,13 +120,14 @@ pub fn sparse_vec_to_mle( dense_vec_to_mle(n_vars, &padded_vec) } -pub fn dense_vec_to_mle(n_vars: usize, v: &Vec) -> MultilinearPolynomial { +pub fn dense_vec_to_mle(n_vars: usize, v: &[F]) -> MultilinearPolynomial { // Pad to 2^n_vars let v_padded: Vec = [ - v.clone(), + v, std::iter::repeat(F::ZERO) .take((1 << n_vars) - v.len()) - .collect(), + .collect_vec() + .as_slice(), ] .concat(); MultilinearPolynomial::new(v_padded) diff --git a/tests/nimfs.rs b/tests/nimfs.rs new file mode 100644 index 000000000..b5c5e0094 --- /dev/null +++ b/tests/nimfs.rs @@ -0,0 +1,106 @@ +#![cfg(feature = "hypernova")] +use std::marker::PhantomData; + +use bellperson::{gadgets::num::AllocatedNum, ConstraintSystem, SynthesisError}; +use ff::{Field, PrimeField}; +use nova_snark::{ + ccs::{CCS, NIMFS}, + traits::{circuit::StepCircuit, Group}, + NovaShape, ShapeCS, +}; +use pasta_curves::Ep; +use rand_core::OsRng; + +#[derive(Clone, Debug, Default)] +struct CubicCircuit { + _p: PhantomData, +} + +impl StepCircuit for CubicCircuit +where + F: PrimeField, +{ + fn arity(&self) -> usize { + 1 + } + + fn synthesize>( + &self, + cs: &mut CS, + z: &[AllocatedNum], + ) -> Result>, SynthesisError> { + // Consider a cubic equation: `x^3 + x + 5 = y`, where `x` and `y` are respectively the input and output. + let x = &z[0]; + let x_sq = x.square(cs.namespace(|| "x_sq"))?; + let x_cu = x_sq.mul(cs.namespace(|| "x_cu"), x)?; + let y = &z[1]; + + cs.enforce( + || "y = x^3 + x + 5", + |lc| { + lc + x_cu.get_variable() + + x.get_variable() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + + CS::one() + }, + |lc| lc + CS::one(), + |lc| lc + y.get_variable(), + ); + + Ok(vec![y.clone()]) + } + + fn output(&self, z: &[F]) -> Vec { + vec![z[0] * z[0] * z[0] + z[0] + F::from(5u64)] + } +} + +#[test] +fn integration_folding() { + integration_folding_test::() +} + +fn integration_folding_test() { + // Generate some randomness. + let mut rng = OsRng; + + let circuit = CubicCircuit::::default(); + let mut cs: ShapeCS = ShapeCS::new(); + // Generate the inputs: + // Here we need both the R1CSShape so that we can generate the CCS -> NIMFS and also the witness values. + let three = AllocatedNum::alloc(&mut cs, || Ok(G::Scalar::from(3u64))).unwrap(); + let thirty_five = AllocatedNum::alloc(&mut cs, || Ok(G::Scalar::from(35u64))).unwrap(); + let _ = circuit.synthesize(&mut cs, &[three, thirty_five]); + let (r1cs_shape, _) = cs.r1cs_shape(); + + let ccs = CCS::::from_r1cs(r1cs_shape); + + // Generate NIMFS object. + let mut nimfs = NIMFS::init( + &mut rng, + ccs, + // Note we constructed z on the fly with the previously-used witness. + vec![ + G::Scalar::ONE, + G::Scalar::from(3u64), + G::Scalar::from(35u64), + ], + ); + + // Now, the NIMFS should satisfy correctly as we have inputed valid starting inpuits for the first LCCCS contained instance: + assert!(nimfs.is_sat().is_ok()); + + // Now let's create a valid CCCS instance and fold it: + let valid_cccs = nimfs.new_cccs(vec![ + G::Scalar::ONE, + G::Scalar::from(2u64), + G::Scalar::from(15u64), + ]); + nimfs.fold(&mut rng, valid_cccs); + + // Since the instance was correct, the NIMFS should still be satisfied. + assert!(nimfs.is_sat().is_ok()); +} From bb7a651faade52153738920a8bf5c09cefab4d52 Mon Sep 17 00:00:00 2001 From: oskarth Date: Wed, 2 Aug 2023 08:57:38 +0200 Subject: [PATCH 097/100] Use polynomial.rs instead of virtual_poly for equality polynomial (#42) - Remove custom eq_poly and use EqPolynomial instead - Update polynomial docs - Ensures order of evaluation is consistent between for VirtualPoly and EqPolynomial - Remove old build_eq_x_r_vec and build_eq_x_r_helper - Tests around boolean hypercube and other introduced functions to make endianness explicit etc Addresses https://github.com/privacy-scaling-explorations/Nova/issues/19 --- src/ccs/multifolding.rs | 20 +---- src/ccs/util/virtual_poly.rs | 145 +++++++++++++++++++---------------- src/hypercube.rs | 94 ++++++++++++++++++++++- src/spartan/mod.rs | 2 +- src/spartan/polynomial.rs | 63 +++++++++++---- 5 files changed, 224 insertions(+), 100 deletions(-) diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 3788c5b40..546a15ebc 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -5,7 +5,7 @@ use super::{CCSWitness, CCS}; use crate::ccs::util::compute_all_sum_Mz_evals; use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; -use crate::spartan::polynomial::MultilinearPolynomial; +use crate::spartan::polynomial::{EqPolynomial, MultilinearPolynomial}; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, @@ -124,8 +124,8 @@ impl NIMFS { ) -> G::Scalar { let mut c = G::Scalar::ZERO; - let e1 = eq_eval(&self.lcccs.r_x, r_x_prime); - let e2 = eq_eval(beta, r_x_prime); + let e1 = EqPolynomial::new(self.lcccs.r_x.to_vec()).evaluate(r_x_prime); + let e2 = EqPolynomial::new(beta.to_vec()).evaluate(r_x_prime); // (sum gamma^j * e1 * sigma_j) for (j, sigma_j) in sigmas.iter().enumerate() { @@ -216,22 +216,10 @@ impl NIMFS { } } -/// Evaluate eq polynomial. -pub fn eq_eval(x: &[F], y: &[F]) -> F { - assert_eq!(x.len(), y.len()); - - let mut res = F::ONE; - for (&xi, &yi) in x.iter().zip(y.iter()) { - let xi_yi = xi * yi; - res *= xi_yi + xi_yi - xi - yi + F::ONE; - } - res -} - #[cfg(test)] mod tests { use super::*; - use crate::ccs::{test, util::virtual_poly::build_eq_x_r}; + use crate::ccs::test; use pasta_curves::{Ep, Fq}; use rand_core::OsRng; diff --git a/src/ccs/util/virtual_poly.rs b/src/ccs/util/virtual_poly.rs index 5ecf94854..f1bcdf2a8 100644 --- a/src/ccs/util/virtual_poly.rs +++ b/src/ccs/util/virtual_poly.rs @@ -1,6 +1,6 @@ use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; -use crate::spartan::polynomial::MultilinearPolynomial; +use crate::spartan::polynomial::{EqPolynomial, MultilinearPolynomial}; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, @@ -29,12 +29,11 @@ use std::collections::HashMap; use std::ops::{Add, Mul}; use std::sync::Arc; -// A bit of collage-programming here. -// As a tmp way to have multilinear polynomial product+addition. +// NOTE: This is a temporary solution to have multilinear polynomial product+addition. // The idea is to re-evaluate once everything works and decide if we replace this code // by something else. // -// THIS CODE HAS BEEN TAKEN FpOM THE ESPRESSO SYSTEMS LIB: +// THIS CODE HAS BEEN TAKEN FROM THE ESPRESSO SYSTEMS LIB AND ADAPTED TO OUR NEEDS.: // // #[rustfmt::skip] @@ -305,6 +304,7 @@ impl VirtualPolynomial { } let eq_x_r = build_eq_x_r(r)?; + let mut res = self.clone(); res.mul_by_mle(eq_x_r, F::ONE)?; @@ -319,76 +319,43 @@ impl VirtualPolynomial { /// over r, which is /// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) pub fn build_eq_x_r(r: &[F]) -> Result>, NovaError> { - let evals = build_eq_x_r_vec(r)?; - let mle = MultilinearPolynomial::new(evals); + let eq_polynomial = EqPolynomial::new(r.to_vec()); + let mut evaluations = eq_polynomial.evals(); + + // Re-orders the evaluations to match endianness of VirtualPoly + // + // NOTE: We probably want to benchmark this, + // but given that numbers of evaluations is small it might not be that bad + let permutation = generate_permutation(evaluations.len()); + reorder_vector(&mut evaluations, &permutation); + + let mle = MultilinearPolynomial::new(evaluations); Ok(Arc::new(mle)) } -/// This function build the eq(x, r) polynomial for any given r, and output the -/// evaluation of eq(x, r) in its vector form. -/// -/// Evaluate -/// eq(x,y) = \prod_i=1^num_var (x_i * y_i + (1-x_i)*(1-y_i)) -/// over r, which is -/// eq(x,y) = \prod_i=1^num_var (x_i * r_i + (1-x_i)*(1-r_i)) -pub fn build_eq_x_r_vec(r: &[F]) -> Result, NovaError> { - // we build eq(x,r) Fpom its evaluations - // we want to evaluate eq(x,r) over x \in {0, 1}^num_vars - // for example, with num_vars = 4, x is a binary vector of 4, then - // 0 0 0 0 -> (1-r0) * (1-r1) * (1-r2) * (1-r3) - // 1 0 0 0 -> r0 * (1-r1) * (1-r2) * (1-r3) - // 0 1 0 0 -> (1-r0) * r1 * (1-r2) * (1-r3) - // 1 1 0 0 -> r0 * r1 * (1-r2) * (1-r3) - // .... - // 1 1 1 1 -> r0 * r1 * r2 * r3 - // we will need 2^num_var evaluations - - let mut eval = Vec::new(); - build_eq_x_r_helper(r, &mut eval)?; - - Ok(eval) +/// Generates a permutation vector for a size `n` by reversing binary indices. +fn generate_permutation(n: usize) -> Vec { + (0..n) + .map(|i| { + let log_n = (n as f64).log2() as usize; // number of bits needed for the index + let mut res = 0; + for j in 0..log_n { + let bit = (i >> j) & 1; + res |= bit << (log_n - 1 - j); + } + res + }) + .collect() } -/// A helper function to build eq(x, r) recursively. -/// This function takes `r.len()` steps, and for each step it requires a maximum -/// `r.len()-1` multiplications. -fn build_eq_x_r_helper(r: &[F], buf: &mut Vec) -> Result<(), NovaError> { - if r.is_empty() { - return Err(NovaError::VpArith); - } else if r.len() == 1 { - // initializing the buffer with [1-r_0, r_0] - buf.push(F::ONE - r[0]); - buf.push(r[0]); - } else { - build_eq_x_r_helper(&r[1..], buf)?; - - // suppose at the previous step we received [b_1, ..., b_k] - // for the current step we will need - // if x_0 = 0: (1-r0) * [b_1, ..., b_k] - // if x_0 = 1: r0 * [b_1, ..., b_k] - // let mut res = vec![]; - // for &b_i in buf.iter() { - // let tmp = r[0] * b_i; - // res.push(b_i - tmp); - // res.push(tmp); - // } - // *buf = res; - - let mut res = vec![F::ZERO; buf.len() << 1]; - res.par_iter_mut().enumerate().for_each(|(i, val)| { - let bi = buf[i >> 1]; - let tmp = r[0] * bi; - if i & 1 == 0 { - *val = bi - tmp; - } else { - *val = tmp; - } - }); - *buf = res; +/// Reorders a vector based on a given permutation vector. +fn reorder_vector(vec: &mut Vec, permutation: &[usize]) { + let mut temp = vec.clone(); + for (i, &index) in permutation.iter().enumerate() { + temp[i] = vec[index].clone(); } - - Ok(()) + *vec = temp; } #[cfg(test)] @@ -495,4 +462,48 @@ mod test { Arc::new(mle) } + + #[cfg(test)] + mod tests { + use super::*; + use pasta_curves::Fq; + use rand_core::OsRng; + + #[test] + fn test_generate_permutation() { + assert_eq!(generate_permutation(2), vec![0, 1]); + assert_eq!(generate_permutation(4), vec![0, 2, 1, 3]); + assert_eq!(generate_permutation(8), vec![0, 4, 2, 6, 1, 5, 3, 7]); + } + + #[test] + fn test_reorder_vector() { + let mut vec = vec![10, 20, 30, 40, 50, 60, 70, 80]; + let permutation = vec![0, 4, 2, 6, 1, 5, 3, 7]; + reorder_vector(&mut vec, &permutation); + assert_eq!(vec, vec![10, 50, 30, 70, 20, 60, 40, 80]); + } + + #[test] + fn test_build_f_hat() { + let mut rng = OsRng; + let num_vars = 3; // You can change this value according to your requirement + + // Create a VirtualPolynomial + let poly = VirtualPolynomial::::new(num_vars); + let r: Vec = (0..num_vars).map(|_| Fq::random(&mut rng)).collect(); + + // Test with correct input length + let result = poly.build_f_hat(&r); + assert!(result.is_ok(), "Failed with correct input length"); + + // Test with incorrect input length + let bad_r: Vec = (0..num_vars + 1).map(|_| Fq::random(&mut rng)).collect(); + let result = poly.build_f_hat(&bad_r); + assert!( + matches!(result, Err(NovaError::VpArith)), + "Did not fail with incorrect input length" + ); + } + } } diff --git a/src/hypercube.rs b/src/hypercube.rs index 66a925e1c..373290233 100644 --- a/src/hypercube.rs +++ b/src/hypercube.rs @@ -25,11 +25,23 @@ impl BooleanHypercube { } /// returns the entry at given i (which is the big-endian bit representation of i) - pub(crate) fn evaluate_at(&self, i: usize) -> Vec { + pub(crate) fn evaluate_at_big(&self, i: usize) -> Vec { + assert!(i < self.max as usize); + let bits = bit_decompose((i) as u64, self.n_vars); + bits.iter().map(|&x| F::from(x as u64)).collect() + } + + /// returns the entry at given i (which is the little-endian bit representation of i) + pub(crate) fn evaluate_at_little(&self, i: usize) -> Vec { assert!(i < self.max as usize); let bits = bit_decompose((i) as u64, self.n_vars); bits.iter().map(|&x| F::from(x as u64)).rev().collect() } + + pub(crate) fn evaluate_at(&self, i: usize) -> Vec { + // This is what we are currently using + self.evaluate_at_little(i) + } } impl Iterator for BooleanHypercube { @@ -72,8 +84,88 @@ mod tests { assert_eq!(poly.evaluate_at(point), vec![F::ONE, F::ONE, F::ONE]); } + fn test_big_endian_eval_with() { + let mut hypercube = BooleanHypercube::::new(3); + + let expected_outputs = vec![ + vec![F::ZERO, F::ZERO, F::ZERO], + vec![F::ONE, F::ZERO, F::ZERO], + vec![F::ZERO, F::ONE, F::ZERO], + vec![F::ONE, F::ONE, F::ZERO], + vec![F::ZERO, F::ZERO, F::ONE], + vec![F::ONE, F::ZERO, F::ONE], + vec![F::ZERO, F::ONE, F::ONE], + vec![F::ONE, F::ONE, F::ONE], + ]; + + for (i, _) in expected_outputs + .iter() + .enumerate() + .take(hypercube.max as usize) + { + assert_eq!(hypercube.evaluate_at_big(i), expected_outputs[i]); + } + } + + fn test_big_endian_next_with() { + let mut hypercube = BooleanHypercube::::new(3); + + let expected_outputs = vec![ + vec![F::ZERO, F::ZERO, F::ZERO], + vec![F::ONE, F::ZERO, F::ZERO], + vec![F::ZERO, F::ONE, F::ZERO], + vec![F::ONE, F::ONE, F::ZERO], + vec![F::ZERO, F::ZERO, F::ONE], + vec![F::ONE, F::ZERO, F::ONE], + vec![F::ZERO, F::ONE, F::ONE], + vec![F::ONE, F::ONE, F::ONE], + ]; + + for expected_output in expected_outputs { + let actual_output = hypercube.next().unwrap(); + assert_eq!(actual_output, expected_output); + } + } + + fn test_little_endian_eval_with() { + let mut hypercube = BooleanHypercube::::new(3); + + let expected_outputs = vec![ + vec![F::ZERO, F::ZERO, F::ZERO], + vec![F::ZERO, F::ZERO, F::ONE], + vec![F::ZERO, F::ONE, F::ZERO], + vec![F::ZERO, F::ONE, F::ONE], + vec![F::ONE, F::ZERO, F::ZERO], + vec![F::ONE, F::ZERO, F::ONE], + vec![F::ONE, F::ONE, F::ZERO], + vec![F::ONE, F::ONE, F::ONE], + ]; + + for (i, _) in expected_outputs + .iter() + .enumerate() + .take(hypercube.max as usize) + { + assert_eq!(hypercube.evaluate_at_little(i), expected_outputs[i]); + } + } + #[test] fn test_evaluate() { test_evaluate_with::(); } + #[test] + fn test_big_endian_eval() { + test_big_endian_eval_with::(); + } + + #[test] + fn test_big_endian_next() { + test_big_endian_next_with::(); + } + + #[test] + fn test_little_endian_eval() { + test_little_endian_eval_with::(); + } } diff --git a/src/spartan/mod.rs b/src/spartan/mod.rs index cfb3222bd..23a35f710 100644 --- a/src/spartan/mod.rs +++ b/src/spartan/mod.rs @@ -5,7 +5,7 @@ //! We also provide direct.rs that allows proving a step circuit directly with either of the two SNARKs. pub mod direct; pub(crate) mod math; -pub(crate) mod polynomial; +pub mod polynomial; pub mod ppsnark; pub mod snark; mod sumcheck; diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index b56969e3a..97255d652 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -1,4 +1,9 @@ -//! This module defines basic types related to polynomials +//! This module provides foundational types and functions for manipulating multilinear polynomials in the context of cryptographic computations. +//! +//! Main components: +//! - `EqPolynomial`: Represents multilinear extension of equality polynomials, evaluated based on binary input values. +//! - `MultilinearPolynomial`: Dense representation of multilinear polynomials, represented by evaluations over all possible binary inputs. +//! - `SparsePolynomial`: Efficient representation of sparse multilinear polynomials, storing only non-zero evaluations. use core::ops::Index; use ff::PrimeField; use rayon::prelude::*; @@ -7,28 +12,35 @@ use std::ops::{Add, Mul}; use crate::spartan::math::Math; -/// The multilinear extension polynomial, denoted as $\tilde{eq}$, is defined as follows: +/// Represents the multilinear extension polynomial (MLE) of the equality polynomial $eq(x,e)$, denoted as $\tilde{eq}(x, e)$. /// +/// The polynomial is defined by the formula: /// $$ /// \tilde{eq}(x, e) = \prod_{i=0}^m(e_i * x_i + (1 - e_i) * (1 - x_i)) /// $$ /// -/// This polynomial evaluates to 1 only when each component $x_i$ is equal to its corresponding component $e_i$. -/// Otherwise, it evaluates to 0. +/// Each element in the vector `r` corresponds to a component $e_i$, representing a bit from the binary representation of an input value $e$. +/// This polynomial evaluates to 1 if every component $x_i$ equals its corresponding $e_i$, and 0 otherwise. /// -/// The vector r contains all the values of e_i, where e_i represents the individual bits of a binary representation of e. -/// For example, let's consider e = 6, which in binary is 0b110. In this case, the vector r would be [1, 1, 0]. -pub(crate) struct EqPolynomial { +/// For instance, for e = 6 (with a binary representation of 0b110), the vector r would be [1, 1, 0]. +pub struct EqPolynomial { r: Vec, } impl EqPolynomial { - /// Creates a new polynomial from its succinct specification + /// Creates a new `EqPolynomial` from a vector of Scalars `r`. + /// + /// Each Scalar in `r` corresponds to a bit from the binary representation of an input value `e`. pub fn new(r: Vec) -> Self { EqPolynomial { r } } - /// Evaluates the polynomial at the specified point + /// Evaluates the `EqPolynomial` at a given point `rx`. + /// + /// This function computes the value of the polynomial at the point specified by `rx`. + /// It expects `rx` to have the same length as the internal vector `r`. + /// + /// Panics if `rx` and `r` have different lengths. pub fn evaluate(&self, rx: &[Scalar]) -> Scalar { assert_eq!(self.r.len(), rx.len()); (0..rx.len()) @@ -36,7 +48,9 @@ impl EqPolynomial { .fold(Scalar::ONE, |acc, item| acc * item) } - /// Evaluates the polynomial at all the `2^|r|` points, ranging from 0 to `2^|r| - 1`. + /// Evaluates the `EqPolynomial` at all the `2^|r|` points in its domain. + /// + /// Returns a vector of Scalars, each corresponding to the polynomial evaluation at a specific point. pub fn evals(&self) -> Vec { let ell = self.r.len(); let mut evals: Vec = vec![Scalar::ZERO; (2_usize).pow(ell as u32)]; @@ -57,11 +71,12 @@ impl EqPolynomial { size *= 2; } + evals } } -/// A multilinear extension of a polynomial $Z(\cdot)$, donate it as $\tilde{Z}(x_1, ..., x_m)$ +/// A multilinear extension of a polynomial $Z(\cdot)$, denote it as $\tilde{Z}(x_1, ..., x_m)$ /// where the degree of each variable is at most one. /// /// This is the dense representation of a multilinear poynomial. @@ -84,6 +99,9 @@ pub struct MultilinearPolynomial { } impl MultilinearPolynomial { + /// Creates a new MultilinearPolynomial from the given evaluations. + /// + /// The number of evaluations must be a power of two. pub fn new(Z: Vec) -> Self { assert_eq!(Z.len(), (2_usize).pow((Z.len() as f64).log2() as u32)); MultilinearPolynomial { @@ -92,15 +110,26 @@ impl MultilinearPolynomial { } } + /// Returns the number of variables in the multilinear polynomial pub fn get_num_vars(&self) -> usize { self.num_vars } + /// Returns the total number of evaluations. pub fn len(&self) -> usize { self.Z.len() } - // NOTE: this is equivalent to Espresso/hyperplonk's 'fix_last_variables' mehthod + /// Checks if the multilinear polynomial is empty. + /// + /// This method returns true if the polynomial has no evaluations, and false otherwise. + pub fn is_empty(&self) -> bool { + self.Z.is_empty() + } + + /// Bounds the polynomial's top variable using the given scalar. + /// + /// This operation modifies the polynomial in-place. pub fn bound_poly_var_top(&mut self, r: &Scalar) { let n = self.len() / 2; @@ -118,7 +147,10 @@ impl MultilinearPolynomial { self.num_vars -= 1; } - // returns Z(r) in O(n) time + /// Evaluates the polynomial at the given point. + /// Returns Z(r) in O(n) time. + /// + /// The point must have a value for each variable. pub fn evaluate(&self, r: &[Scalar]) -> Scalar { // r must have a value for each variable assert_eq!(r.len(), self.get_num_vars()); @@ -131,6 +163,7 @@ impl MultilinearPolynomial { .reduce(|| Scalar::ZERO, |x, y| x + y) } + /// Evaluates the polynomial with the given evaluations and point. pub fn evaluate_with(Z: &[Scalar], r: &[Scalar]) -> Scalar { EqPolynomial::new(r.to_vec()) .evals() @@ -140,7 +173,7 @@ impl MultilinearPolynomial { .reduce(|| Scalar::ZERO, |x, y| x + y) } - // Multiplies `self` by a scalar. + /// Multiplies the polynomial by a scalar. #[allow(unused)] pub fn scalar_mul(&self, scalar: &Scalar) -> Self { let mut new_poly = self.clone(); @@ -304,7 +337,7 @@ mod tests { } fn test_sparse_polynomial_with() { - // Let the polynomial has 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3 + // Let the polynomial have 3 variables, p(x_1, x_2, x_3) = (x_1 + x_2) * x_3 // Evaluations of the polynomial at boolean cube are [0, 0, 0, 1, 0, 1, 0, 2]. let TWO = F::from(2); From 0c8915b4ee92058fd073c10838498c8199a4bfcd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20P=C3=A9rez?= <37264926+CPerezz@users.noreply.github.com> Date: Wed, 2 Aug 2023 12:14:18 +0200 Subject: [PATCH 098/100] Transcript usage inclusion into the codebase (#43) The PR introduces usage from the associated trait/type of `Group` called `TranscriptEngineTrait` to perform the Fiat-Shamir inside the multifolding for the Sigmas and Thetas computations as well as gammas. This should be rebased on the top of `main` once #41 is merged and then reviewed. As there could be places where FS should be applied but it isn't. --- src/ccs/lcccs.rs | 17 ++++++----- src/ccs/multifolding.rs | 65 ++++++++++++++++++++++++++++------------- src/provider/pasta.rs | 9 ++++++ tests/nimfs.rs | 8 ++--- 4 files changed, 64 insertions(+), 35 deletions(-) diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index d38e486aa..494c6e5cb 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -44,19 +44,16 @@ pub struct LCCCS { impl LCCCS { /// Generates a new LCCCS instance from a given randomness, CommitmentKey & witness input vector. /// This should only be used to probably test or setup the initial NIMFS instance. - pub(crate) fn new( + pub(crate) fn new( ccs: &CCS, ccs_m_mle: &[MultilinearPolynomial], ck: &CommitmentKey, z: Vec, - mut rng: &mut R, + r_x: Vec, ) -> Self { - // XXX: API doesn't offer a way to handle this?? - let _r_w = G::Scalar::random(&mut rng); let w_comm = <::CE as CommitmentEngineTrait>::commit(ck, &z[(1 + ccs.l)..]); // Evaluation points for `v` - let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); let v = ccs.compute_v_j(&z, &r_x, ccs_m_mle); Self { w_comm, v, r_x, z } @@ -125,7 +122,8 @@ mod tests { assert!(ccs.is_sat(&ck, &instance, &witness).is_ok()); // LCCCS with the correct z should pass - let mut lcccs = LCCCS::new(&ccs, &mles, &ck, z.clone(), &mut OsRng); + let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut OsRng)).collect(); + let mut lcccs = LCCCS::new(&ccs, &mles, &ck, z.clone(), r_x); assert!(lcccs.is_sat(&ccs, &mles, &ck).is_ok()); // Wrong z so that the relation does not hold @@ -145,8 +143,10 @@ mod tests { let (ccs, _, _, mles) = CCS::::gen_test_ccs(&z); let ck = ccs.commitment_key(); + let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); + // Get LCCCS - let lcccs = LCCCS::new(&ccs, &mles, &ck, z, &mut OsRng); + let lcccs = LCCCS::new(&ccs, &mles, &ck, z, r_x); let vec_L_j_x = lcccs.compute_Ls(&ccs, &mles); assert_eq!(vec_L_j_x.len(), lcccs.v.len()); @@ -172,7 +172,8 @@ mod tests { bad_z[3] = G::Scalar::ZERO; // Compute v_j with the right z - let mut lcccs = LCCCS::new(&ccs, &mles, &ck, z, &mut OsRng); + let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); + let mut lcccs = LCCCS::new(&ccs, &mles, &ck, z, r_x); // Assert LCCCS is satisfied with the original Z assert!(lcccs.is_sat(&ccs, &mles, &ck).is_ok()); diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 546a15ebc..6867ef3f1 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -6,6 +6,7 @@ use crate::ccs::util::compute_all_sum_Mz_evals; use crate::hypercube::BooleanHypercube; use crate::spartan::math::Math; use crate::spartan::polynomial::{EqPolynomial, MultilinearPolynomial}; +use crate::traits::{TranscriptEngineTrait, TranscriptReprTrait}; use crate::{ constants::{BN_LIMB_WIDTH, BN_N_LIMBS, NUM_FE_FOR_RO, NUM_HASH_BITS}, errors::NovaError, @@ -41,6 +42,7 @@ pub struct NIMFS { ccs_mle: Vec>, ck: CommitmentKey, lcccs: LCCCS, + transcript: G::TE, } impl NIMFS { @@ -56,29 +58,40 @@ impl NIMFS { ccs_mle, ck, lcccs, + transcript: TranscriptEngineTrait::new(b"NIMFS"), } } /// Initializes a NIMFS instance given the CCS of it and a first witness vector that satifies it. // XXX: This should probably return an error as we should check whether is satisfied or not. - pub fn init(mut rng: &mut R, ccs: CCS, z: Vec) -> Self { + pub fn init(ccs: CCS, z: Vec, label: &'static [u8]) -> Self { + let mut transcript: G::TE = TranscriptEngineTrait::new(label); let ccs_mle: Vec> = ccs.M.iter().map(|matrix| matrix.to_mle()).collect(); + + // Add the first round of witness to the transcript. let w: Vec = z[(1 + ccs.l)..].to_vec(); + TranscriptEngineTrait::::absorb(&mut transcript, b"og_w", &w); + let ck = ccs.commitment_key(); - let r_w = G::Scalar::random(&mut rng); let w_comm = ::CE::commit(&ck, &w); - let r_x: Vec = vec![G::Scalar::random(&mut rng); ccs.s]; - let v = ccs.compute_v_j(&z, &r_x, &ccs_mle); + // Query challenge to get initial `r_x`. + let r_x: Vec = vec![ + TranscriptEngineTrait::::squeeze(&mut transcript, b"r_x") + .expect("This should never fail"); + ccs.s + ]; - let lcccs: LCCCS = LCCCS::new(&ccs, &ccs_mle, &ck, z, &mut rng); + // Gen LCCCS initial instance. + let lcccs: LCCCS = LCCCS::new(&ccs, &ccs_mle, &ck, z, r_x); Self { ccs, ccs_mle, lcccs, ck, + transcript, } } @@ -87,6 +100,15 @@ impl NIMFS { CCCS::new(&self.ccs, &self.ccs_mle, z, &self.ck) } + /// Generates a new `r_x` vector using the NIMFS challenge query method. + pub(crate) fn gen_r_x(&mut self) -> Vec { + vec![ + TranscriptEngineTrait::::squeeze(&mut self.transcript, b"r_x") + .expect("This should never fail"); + self.ccs.s + ] + } + /// This function checks whether the current IVC after the last fold performed is satisfied and returns an error if it isn't. pub fn is_sat(&self) -> Result<(), NovaError> { self.lcccs.is_sat(&self.ccs, &self.ccs_mle, &self.ck) @@ -175,10 +197,12 @@ impl NIMFS { } /// This folds an upcoming CCCS instance into the running LCCCS instance contained within the NIMFS object. - pub fn fold(&mut self, mut rng: &mut R, cccs: CCCS) { - // Compute r_x_prime and rho from a given randomnes. - let r_x_prime = vec![G::Scalar::random(&mut rng); self.ccs.s]; - let rho = G::Scalar::random(&mut rng); + pub fn fold(&mut self, cccs: CCCS) { + // Compute r_x_prime and rho from challenging the transcript. + let r_x_prime = self.gen_r_x(); + // Challenge the transcript once more to obtain `rho` + let rho = TranscriptEngineTrait::::squeeze(&mut self.transcript, b"rho") + .expect("This should not fail"); // Compute sigmas an thetas to fold `v`s. let (sigmas, thetas) = self.compute_sigmas_and_thetas(&cccs.z, &r_x_prime); @@ -237,8 +261,9 @@ mod tests { let mut rng = OsRng; let gamma: G::Scalar = G::Scalar::random(&mut rng); let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); + let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut OsRng)).collect(); - let lcccs = LCCCS::new(&ccs, &mles, &ck, z1, &mut OsRng); + let lcccs = LCCCS::new(&ccs, &mles, &ck, z1, r_x); let cccs = CCCS::new(&ccs, &mles, z2, &ck); let mut sum_v_j_gamma = G::Scalar::ZERO; @@ -294,15 +319,15 @@ mod tests { let mut rng = OsRng; let gamma: G::Scalar = G::Scalar::random(&mut rng); let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); - let r_x_prime: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); + let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut OsRng)).collect(); - let lcccs = LCCCS::new(&ccs, &mles, &ck, z1, &mut OsRng); + let lcccs = LCCCS::new(&ccs, &mles, &ck, z1, r_x.clone()); let cccs = CCCS::new(&ccs, &mles, z2, &ck); // Generate a new NIMFS instance let nimfs = NIMFS::::new(ccs.clone(), mles.clone(), lcccs, ck.clone()); - let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&cccs.z, &r_x_prime); + let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&cccs.z, r_x.as_slice()); let g = nimfs.compute_g(&cccs, gamma, &beta); // Assert `g` is correctly computed here. @@ -329,15 +354,15 @@ mod tests { // XXX: We need a better way to do this. Sum_Mz has also the same issue. // reverse the `r` given to evaluate to match Spartan/Nova endianness. - let mut revsersed = r_x_prime.clone(); - revsersed.reverse(); + let mut reversed = r_x.clone(); + reversed.reverse(); // we expect g(r_x_prime) to be equal to: // c = (sum gamma^j * e1 * sigma_j) + gamma^{t+1} * e2 * sum c_i * prod theta_j // from `compute_c_from_sigmas_and_thetas` - let expected_c = g.evaluate(&revsersed).unwrap(); + let expected_c = g.evaluate(&reversed).unwrap(); - let c = nimfs.compute_c_from_sigmas_and_thetas(&sigmas, &thetas, gamma, &beta, &r_x_prime); + let c = nimfs.compute_c_from_sigmas_and_thetas(&sigmas, &thetas, gamma, &beta, &r_x); assert_eq!(c, expected_c); } @@ -346,8 +371,6 @@ mod tests { } fn test_lccs_fold_with() { - let mut rng = OsRng; - let z1 = CCS::::get_test_z(3); let z2 = CCS::::get_test_z(4); @@ -360,13 +383,13 @@ mod tests { assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); // Generate a new NIMFS instance - let mut nimfs = NIMFS::init(&mut rng, ccs.clone(), z1); + let mut nimfs = NIMFS::init(ccs.clone(), z1, b"test_NIMFS"); assert!(nimfs.is_sat().is_ok()); // check folding correct stuff still alows the NIMFS to be satisfied correctly. let cccs = nimfs.new_cccs(z2); assert!(cccs.is_sat(&ccs, &mles, &ck).is_ok()); - nimfs.fold(&mut rng, cccs); + nimfs.fold(cccs); assert!(nimfs.is_sat().is_ok()); // // Folding garbage should cause a failure diff --git a/src/provider/pasta.rs b/src/provider/pasta.rs index de3c31be1..1f5284ebc 100644 --- a/src/provider/pasta.rs +++ b/src/provider/pasta.rs @@ -207,6 +207,15 @@ impl TranscriptReprTrait for pallas::Scalar { } } +impl TranscriptReprTrait for Vec { + fn to_transcript_bytes(&self) -> Vec { + self + .iter() + .flat_map(|scalar| scalar.to_transcript_bytes()) + .collect() + } +} + impl_traits!( pallas, PallasCompressedElementWrapper, diff --git a/tests/nimfs.rs b/tests/nimfs.rs index b5c5e0094..82ab2c2ae 100644 --- a/tests/nimfs.rs +++ b/tests/nimfs.rs @@ -9,7 +9,6 @@ use nova_snark::{ NovaShape, ShapeCS, }; use pasta_curves::Ep; -use rand_core::OsRng; #[derive(Clone, Debug, Default)] struct CubicCircuit { @@ -64,9 +63,6 @@ fn integration_folding() { } fn integration_folding_test() { - // Generate some randomness. - let mut rng = OsRng; - let circuit = CubicCircuit::::default(); let mut cs: ShapeCS = ShapeCS::new(); // Generate the inputs: @@ -80,7 +76,6 @@ fn integration_folding_test() { // Generate NIMFS object. let mut nimfs = NIMFS::init( - &mut rng, ccs, // Note we constructed z on the fly with the previously-used witness. vec![ @@ -88,6 +83,7 @@ fn integration_folding_test() { G::Scalar::from(3u64), G::Scalar::from(35u64), ], + b"test_nimfs", ); // Now, the NIMFS should satisfy correctly as we have inputed valid starting inpuits for the first LCCCS contained instance: @@ -99,7 +95,7 @@ fn integration_folding_test() { G::Scalar::from(2u64), G::Scalar::from(15u64), ]); - nimfs.fold(&mut rng, valid_cccs); + nimfs.fold(valid_cccs); // Since the instance was correct, the NIMFS should still be satisfied. assert!(nimfs.is_sat().is_ok()); From 5f2444663a1e4cafbf19b16df74271e51ae3000f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Carlos=20P=C3=A9rez?= <37264926+CPerezz@users.noreply.github.com> Date: Fri, 4 Aug 2023 11:08:59 +0200 Subject: [PATCH 099/100] Remove witness from CCCS & LCCCS instances (#48) Since the Verifier in the protocol does not have access to the witnesses, we should not have `z`s inside of the LCCCS and CCCS instances and instead, work with the witness sepparatedly. This PR implements this sort of behaviour. Resolves: #46 --- src/ccs/cccs.rs | 44 +++++++++----- src/ccs/lcccs.rs | 78 ++++++++++++++++-------- src/ccs/multifolding.rs | 131 +++++++++++++++++++++++++++------------- tests/nimfs.rs | 14 ++++- 4 files changed, 183 insertions(+), 84 deletions(-) diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index c9e0bbeeb..8eb01ed8d 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -34,8 +34,8 @@ use super::CCS; #[derive(Clone, Debug, PartialEq, Eq, Serialize, Deserialize)] #[serde(bound = "")] pub struct CCCS { - // The `z` vector used as input for this instance. - pub(crate) z: Vec, + // The `x` vector represents public IO. + pub(crate) x: Option>, // Commitment to the witness of `z`. pub(crate) w_comm: Commitment, } @@ -48,22 +48,34 @@ impl CCCS { z: Vec, ck: &CommitmentKey, ) -> Self { - let w_comm = CE::::commit(ck, &z[(1 + ccs.l)..]); - Self { - z: z.to_vec(), - w_comm, + x: if ccs.l == 0 { + None + } else { + Some(z[(1..ccs.l + 1)].to_vec()) + }, + w_comm: CE::::commit(ck, &z[(1 + ccs.l)..]), } } + pub(crate) fn construct_z(&self, witness: &[G::Scalar]) -> Vec { + concat(vec![ + vec![G::Scalar::ONE], + self.x.clone().unwrap_or(vec![]), + witness.to_vec(), + ]) + } + /// Computes q(x) = \sum^q c_i * \prod_{j \in S_i} ( \sum_{y \in {0,1}^s'} M_j(x, y) * z(y) ) /// polynomial over x pub(crate) fn compute_q( &self, ccs: &CCS, ccs_mles: &[MultilinearPolynomial], + witness: &[G::Scalar], ) -> Result, NovaError> { - let z_mle = dense_vec_to_mle::(ccs.s_prime, &self.z); + let tmp_z = self.construct_z(witness); + let z_mle = dense_vec_to_mle::(ccs.s_prime, &tmp_z); if z_mle.get_num_vars() != ccs.s_prime { // this check if redundant if dense_vec_to_mle is correct return Err(NovaError::VpArith); @@ -106,8 +118,9 @@ impl CCCS { ccs: &CCS, ccs_mles: &[MultilinearPolynomial], beta: &[G::Scalar], + witness: &[G::Scalar], ) -> Result, NovaError> { - let q = self.compute_q(ccs, ccs_mles)?; + let q = self.compute_q(ccs, ccs_mles, witness)?; q.build_f_hat(beta) } @@ -117,13 +130,14 @@ impl CCCS { ccs: &CCS, ccs_mles: &[MultilinearPolynomial], ck: &CommitmentKey, + witness: &[G::Scalar], ) -> Result<(), NovaError> { // check that C is the commitment of w. Notice that this is not verifying a Pedersen // opening, but checking that the Commmitment comes from committing to the witness. - assert_eq!(self.w_comm, CE::::commit(ck, &self.z[(1 + ccs.l)..])); + assert_eq!(self.w_comm, CE::::commit(ck, witness)); // A CCCS relation is satisfied if the q(x) multivariate polynomial evaluates to zero in the hypercube - let q_x = self.compute_q(ccs, ccs_mles).unwrap(); + let q_x = self.compute_q(ccs, ccs_mles, witness).unwrap(); for x in BooleanHypercube::new(ccs.s) { if !q_x.evaluate(&x).unwrap().is_zero().unwrap_u8() == 0 { return Err(NovaError::UnSat); @@ -176,7 +190,7 @@ mod tests { // Generate CCCS artifacts let cccs = CCCS::new(&ccs, &mles, z, &ck); - let q = cccs.compute_q(&ccs, &mles).unwrap(); + let q = cccs.compute_q(&ccs, &mles, &ccs_witness.w).unwrap(); // Evaluate inside the hypercube BooleanHypercube::new(ccs.s).for_each(|x| { @@ -204,7 +218,7 @@ mod tests { let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); // Compute Q(x) = eq(beta, x) * q(x). let Q = cccs - .compute_Q(&ccs, &mles, &beta) + .compute_Q(&ccs, &mles, &beta, &ccs_witness.w) .expect("Computation of Q should not fail"); // Let's consider the multilinear polynomial G(x) = \sum_{y \in {0, 1}^s} eq(x, y) q(y) @@ -241,12 +255,12 @@ mod tests { // Now test that if we create Q(x) with eq(d,y) where d is inside the hypercube, \sum Q(x) should be G(d) which // should be equal to q(d), since G(x) interpolates q(x) inside the hypercube let q = cccs - .compute_q(&ccs, &mles) + .compute_q(&ccs, &mles, &ccs_witness.w) .expect("Computing q shoud not fail"); for d in BooleanHypercube::new(ccs.s) { let Q_at_d = cccs - .compute_Q(&ccs, &mles, &d) + .compute_Q(&ccs, &mles, &d, &ccs_witness.w) .expect("Computing Q_at_d shouldn't fail"); // Get G(d) by summing over Q_d(x) over the hypercube @@ -259,7 +273,7 @@ mod tests { // Now test that they should disagree outside of the hypercube let r: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); let Q_at_r = cccs - .compute_Q(&ccs, &mles, &r) + .compute_Q(&ccs, &mles, &r, &ccs_witness.w) .expect("Computing Q_at_r shouldn't fail"); // Get G(d) by summing over Q_d(x) over the hypercube diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index 494c6e5cb..22fb51770 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -34,11 +34,16 @@ use std::sync::Arc; #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] pub struct LCCCS { + /// Commitment to witness pub(crate) w_comm: Commitment, + /// Vector of v_i (result of folding thetas and sigmas). pub(crate) v: Vec, - // Random evaluation point for the v_i + /// Random evaluation point for the v_i pub(crate) r_x: Vec, - pub(crate) z: Vec, + /// Public input/output + pub(crate) x: Option>, + /// Relaxation factor of z for folded LCCCS + pub(crate) u: G::Scalar, } impl LCCCS { @@ -56,7 +61,28 @@ impl LCCCS { // Evaluation points for `v` let v = ccs.compute_v_j(&z, &r_x, ccs_m_mle); - Self { w_comm, v, r_x, z } + // Circuit might not have public IO. Hence, if so, we default it to zero. + let x = if ccs.l == 0 { + None + } else { + Some(z[1..ccs.l + 1].to_vec()) + }; + + Self { + w_comm, + v, + r_x, + u: G::Scalar::ONE, + x, + } + } + + pub(crate) fn construct_z(&self, witness: &[G::Scalar]) -> Vec { + concat(vec![ + vec![self.u], + self.x.clone().unwrap_or(vec![]), + witness.to_vec(), + ]) } /// Checks if the CCS instance is satisfiable given a witness and its shape @@ -65,16 +91,19 @@ impl LCCCS { ccs: &CCS, ccs_m_mle: &[MultilinearPolynomial], ck: &CommitmentKey, + witness: &[G::Scalar], ) -> Result<(), NovaError> { - dbg!(self.z.clone()); - let w = &self.z[(1 + ccs.l)..]; // check that C is the commitment of w. Notice that this is not verifying a Pedersen // opening, but checking that the Commmitment comes from committing to the witness. - let comm_eq = self.w_comm == CE::::commit(ck, w); + let comm_eq = self.w_comm == CE::::commit(ck, witness); + + let computed_v = compute_all_sum_Mz_evals::( + ccs_m_mle, + &self.construct_z(witness), + &self.r_x, + ccs.s_prime, + ); - let computed_v = compute_all_sum_Mz_evals::(ccs_m_mle, &self.z, &self.r_x, ccs.s_prime); - dbg!(self.v.clone()); - dbg!(computed_v.clone()); let vs_eq = computed_v == self.v; if vs_eq && comm_eq { @@ -89,8 +118,9 @@ impl LCCCS { &self, ccs: &CCS, ccs_m_mle: &[MultilinearPolynomial], + lcccs_witness: &[G::Scalar], ) -> Vec> { - let z_mle = dense_vec_to_mle(ccs.s_prime, self.z.as_slice()); + let z_mle = dense_vec_to_mle(ccs.s_prime, self.construct_z(lcccs_witness).as_slice()); let mut vec_L_j_x = Vec::with_capacity(ccs.t); for M_j in ccs_m_mle.iter() { @@ -124,15 +154,14 @@ mod tests { // LCCCS with the correct z should pass let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut OsRng)).collect(); let mut lcccs = LCCCS::new(&ccs, &mles, &ck, z.clone(), r_x); - assert!(lcccs.is_sat(&ccs, &mles, &ck).is_ok()); + assert!(lcccs.is_sat(&ccs, &mles, &ck, &witness.w).is_ok()); - // Wrong z so that the relation does not hold - let mut bad_z = z; - bad_z[3] = G::Scalar::ZERO; + // Wrong witness so that the relation does not hold + let mut bad_witness = witness.w.clone(); + bad_witness[2] = G::Scalar::ZERO; // LCCCS with the wrong z should not pass `is_sat`. - lcccs.z = bad_z; - assert!(lcccs.is_sat(&ccs, &mles, &ck).is_err()); + assert!(lcccs.is_sat(&ccs, &mles, &ck, &bad_witness).is_err()); } fn test_lcccs_v_j_with() { @@ -140,7 +169,7 @@ mod tests { // Gen test vectors & artifacts let z = CCS::::get_test_z(3); - let (ccs, _, _, mles) = CCS::::gen_test_ccs(&z); + let (ccs, witness, _, mles) = CCS::::gen_test_ccs(&z); let ck = ccs.commitment_key(); let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); @@ -148,7 +177,7 @@ mod tests { // Get LCCCS let lcccs = LCCCS::new(&ccs, &mles, &ck, z, r_x); - let vec_L_j_x = lcccs.compute_Ls(&ccs, &mles); + let vec_L_j_x = lcccs.compute_Ls(&ccs, &mles, &witness.w); assert_eq!(vec_L_j_x.len(), lcccs.v.len()); for (v_i, L_j_x) in lcccs.v.into_iter().zip(vec_L_j_x) { @@ -167,22 +196,21 @@ mod tests { let (ccs, witness, instance, mles) = CCS::::gen_test_ccs(&z); let ck = ccs.commitment_key(); - // Mutate z so that the relation does not hold - let mut bad_z = z.clone(); - bad_z[3] = G::Scalar::ZERO; + // Mutate witness so that the relation does not hold + let mut bad_witness = witness.w.clone(); + bad_witness[2] = G::Scalar::ZERO; // Compute v_j with the right z let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); let mut lcccs = LCCCS::new(&ccs, &mles, &ck, z, r_x); // Assert LCCCS is satisfied with the original Z - assert!(lcccs.is_sat(&ccs, &mles, &ck).is_ok()); + assert!(lcccs.is_sat(&ccs, &mles, &ck, &witness.w).is_ok()); // Compute L_j(x) with the bad z - lcccs.z = bad_z; - let vec_L_j_x = lcccs.compute_Ls(&ccs, &mles); + let vec_L_j_x = lcccs.compute_Ls(&ccs, &mles, &bad_witness); assert_eq!(vec_L_j_x.len(), lcccs.v.len()); // Assert LCCCS is not satisfied with the bad Z - assert!(lcccs.is_sat(&ccs, &mles, &ck).is_err()); + assert!(lcccs.is_sat(&ccs, &mles, &ck, &bad_witness).is_err()); // Make sure that the LCCCS is not satisfied given these L_j(x) // i.e. summing L_j(x) over the hypercube should not give v_j for all j diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 6867ef3f1..8aade3407 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -110,28 +110,35 @@ impl NIMFS { } /// This function checks whether the current IVC after the last fold performed is satisfied and returns an error if it isn't. - pub fn is_sat(&self) -> Result<(), NovaError> { - self.lcccs.is_sat(&self.ccs, &self.ccs_mle, &self.ck) + pub fn is_sat(&self, witness: &[G::Scalar]) -> Result<(), NovaError> { + self + .lcccs + .is_sat(&self.ccs, &self.ccs_mle, &self.ck, witness) } /// Compute sigma_i and theta_i from step 4. pub fn compute_sigmas_and_thetas( &self, - // Note `z2` represents the input of the incoming CCCS instance. - // As the current IVC accumulated input is holded inside of the NIMFS(`self`). - z: &[G::Scalar], + cccs_witness: &[G::Scalar], + cccs: &CCCS, + lcccs_witness: &[G::Scalar], r_x_prime: &[G::Scalar], ) -> (Vec, Vec) { ( // sigmas compute_all_sum_Mz_evals::( &self.ccs_mle, - self.lcccs.z.as_slice(), + self.lcccs.construct_z(lcccs_witness).as_slice(), r_x_prime, self.ccs.s_prime, ), // thetas - compute_all_sum_Mz_evals::(&self.ccs_mle, z, r_x_prime, self.ccs.s_prime), + compute_all_sum_Mz_evals::( + &self.ccs_mle, + cccs.construct_z(cccs_witness).as_slice(), + r_x_prime, + self.ccs.s_prime, + ), ) } @@ -170,16 +177,20 @@ impl NIMFS { } /// Compute g(x) polynomial for the given inputs. - pub fn compute_g( + pub(crate) fn compute_g( &self, + lcccs_witness: &[G::Scalar], cccs: &CCCS, + cccs_witness: &[G::Scalar], gamma: G::Scalar, beta: &[G::Scalar], ) -> VirtualPolynomial { - let mut vec_L = self.lcccs.compute_Ls(&self.ccs, &self.ccs_mle); + let mut vec_L = self + .lcccs + .compute_Ls(&self.ccs, &self.ccs_mle, lcccs_witness); let mut Q = cccs - .compute_Q(&self.ccs, &self.ccs_mle, beta) + .compute_Q(&self.ccs, &self.ccs_mle, beta, cccs_witness) .expect("Q comp should not fail"); let mut g = vec_L[0].clone(); @@ -196,17 +207,25 @@ impl NIMFS { g } - /// This folds an upcoming CCCS instance into the running LCCCS instance contained within the NIMFS object. - pub fn fold(&mut self, cccs: CCCS) { + /// Generates the required elements to be able to fold. + pub fn prepare_folding(&mut self) -> (Vec, G::Scalar) { // Compute r_x_prime and rho from challenging the transcript. let r_x_prime = self.gen_r_x(); // Challenge the transcript once more to obtain `rho` let rho = TranscriptEngineTrait::::squeeze(&mut self.transcript, b"rho") .expect("This should not fail"); + (r_x_prime, rho) + } - // Compute sigmas an thetas to fold `v`s. - let (sigmas, thetas) = self.compute_sigmas_and_thetas(&cccs.z, &r_x_prime); - + /// This folds an upcoming CCCS instance into the running LCCCS instance contained within the NIMFS object. + pub fn fold( + &mut self, + cccs: &CCCS, + sigmas: Vec, + thetas: Vec, + r_x_prime: Vec, + rho: G::Scalar, + ) { // Compute new v from sigmas and thetas. let folded_v: Vec = sigmas .iter() @@ -219,20 +238,38 @@ impl NIMFS { .map(|(a_i, b_i)| *a_i + b_i) .collect(); + // Fold x's + + if self.lcccs.x.is_some() && cccs.x.is_some() { + // Use unsafe and unwrap_unchecked?? + self + .lcccs + .x + .as_mut() + .unwrap() + .iter_mut() + .zip(cccs.x.as_ref().unwrap().iter().map(|x| *x * rho)) + .for_each(|(x_lcccs, x_cccs)| *x_lcccs += x_cccs); + }; + // Here we perform steps 7 & 8 of the section 5 of the paper. Were we actually fold LCCCS & CCCS instances. self.lcccs.w_comm += cccs.w_comm.mul(rho); self.lcccs.v = folded_v; self.lcccs.r_x = r_x_prime; - self.fold_z(cccs, rho); + self.lcccs.u += rho; } /// Folds the current `z` vector of the upcomming CCCS instance together with the LCCCS instance that is contained inside of the NIMFS object. - fn fold_z(&mut self, cccs: CCCS, rho: G::Scalar) { - // Update u first. - self.lcccs.z[0] += rho; - self.lcccs.z[1..] + pub fn fold_witness( + &mut self, + cccs: &CCCS, + cccs_witness: &[G::Scalar], + lcccs_witness: &mut [G::Scalar], + rho: G::Scalar, + ) { + lcccs_witness .iter_mut() - .zip(cccs.z[1..].iter().map(|x_i| *x_i * rho)) + .zip(cccs_witness.iter().map(|cccs_w| *cccs_w * rho)) .for_each(|(a_i, b_i)| *a_i += b_i); // XXX: There's no handling of r_w atm. So we will ingore until all folding is implemented, @@ -251,20 +288,23 @@ mod tests { let z1 = CCS::::get_test_z(3); let z2 = CCS::::get_test_z(4); - let (_, ccs_witness_1, ccs_instance_1, mles) = CCS::::gen_test_ccs(&z2); - let (ccs, ccs_witness_2, ccs_instance_2, _) = CCS::::gen_test_ccs(&z1); + let (_, ccs_witness_1, ccs_instance_1, mles) = CCS::::gen_test_ccs(&z1); + let (ccs, ccs_witness_2, ccs_instance_2, _) = CCS::::gen_test_ccs(&z2); + let ck = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); let mut rng = OsRng; - let gamma: G::Scalar = G::Scalar::random(&mut rng); - let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut rng)).collect(); + let gamma: G::Scalar = G::Scalar::random(&mut OsRng); + let beta: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut OsRng)).collect(); let r_x: Vec = (0..ccs.s).map(|_| G::Scalar::random(&mut OsRng)).collect(); let lcccs = LCCCS::new(&ccs, &mles, &ck, z1, r_x); + assert!(lcccs.is_sat(&ccs, &mles, &ck, &ccs_witness_1.w).is_ok()); let cccs = CCCS::new(&ccs, &mles, z2, &ck); + assert!(cccs.is_sat(&ccs, &mles, &ck, &ccs_witness_2.w).is_ok()); let mut sum_v_j_gamma = G::Scalar::ZERO; for j in 0..lcccs.v.len() { @@ -275,7 +315,7 @@ mod tests { let nimfs = NIMFS::::new(ccs.clone(), mles.clone(), lcccs.clone(), ck.clone()); // Compute g(x) with that r_x - let g = nimfs.compute_g(&cccs, gamma, &beta); + let g = nimfs.compute_g(&ccs_witness_1.w, &cccs, &ccs_witness_2.w, gamma, &beta); // evaluate g(x) over x \in {0,1}^s let mut g_on_bhc = G::Scalar::ZERO; @@ -285,7 +325,7 @@ mod tests { // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s let mut sum_Lj_on_bhc = G::Scalar::ZERO; - let vec_L = lcccs.compute_Ls(&ccs, &mles); + let vec_L = lcccs.compute_Ls(&ccs, &mles, &ccs_witness_1.w); for x in BooleanHypercube::new(ccs.s) { for (j, coeff) in vec_L.iter().enumerate() { let gamma_j = gamma.pow([j as u64]); @@ -309,8 +349,8 @@ mod tests { let z1 = CCS::::get_test_z(3); let z2 = CCS::::get_test_z(4); - let (_, ccs_witness_1, ccs_instance_1, mles) = CCS::::gen_test_ccs(&z2); - let (ccs, ccs_witness_2, ccs_instance_2, _) = CCS::::gen_test_ccs(&z1); + let (_, ccs_witness_1, ccs_instance_1, mles) = CCS::::gen_test_ccs(&z1); + let (ccs, ccs_witness_2, ccs_instance_2, _) = CCS::::gen_test_ccs(&z2); let ck: CommitmentKey = ccs.commitment_key(); assert!(ccs.is_sat(&ck, &ccs_instance_1, &ccs_witness_1).is_ok()); @@ -326,10 +366,12 @@ mod tests { // Generate a new NIMFS instance let nimfs = NIMFS::::new(ccs.clone(), mles.clone(), lcccs, ck.clone()); + let nimfs_witness = ccs_witness_1.w.clone(); - let (sigmas, thetas) = nimfs.compute_sigmas_and_thetas(&cccs.z, r_x.as_slice()); + let (sigmas, thetas) = + nimfs.compute_sigmas_and_thetas(&ccs_witness_2.w, &cccs, &nimfs_witness, &r_x); - let g = nimfs.compute_g(&cccs, gamma, &beta); + let g = nimfs.compute_g(&nimfs_witness, &cccs, &ccs_witness_2.w, gamma, &beta); // Assert `g` is correctly computed here. { // evaluate g(x) over x \in {0,1}^s @@ -339,7 +381,7 @@ mod tests { } // evaluate sum_{j \in [t]} (gamma^j * Lj(x)) over x \in {0,1}^s let mut sum_Lj_on_bhc = G::Scalar::ZERO; - let vec_L = nimfs.lcccs.compute_Ls(&ccs, &mles); + let vec_L = nimfs.lcccs.compute_Ls(&ccs, &mles, &nimfs_witness); for x in BooleanHypercube::new(ccs.s) { for (j, coeff) in vec_L.iter().enumerate() { let gamma_j = gamma.pow([j as u64]); @@ -366,10 +408,6 @@ mod tests { assert_eq!(c, expected_c); } - fn test_compute_g() { - test_compute_g_with::(); - } - fn test_lccs_fold_with() { let z1 = CCS::::get_test_z(3); let z2 = CCS::::get_test_z(4); @@ -383,14 +421,20 @@ mod tests { assert!(ccs.is_sat(&ck, &ccs_instance_2, &ccs_witness_2).is_ok()); // Generate a new NIMFS instance - let mut nimfs = NIMFS::init(ccs.clone(), z1, b"test_NIMFS"); - assert!(nimfs.is_sat().is_ok()); + let mut nimfs = NIMFS::init(ccs.clone(), z1, b"Test NIMFS"); + let mut nimfs_witness = ccs_witness_1.w.clone(); + assert!(nimfs.is_sat(&nimfs_witness).is_ok()); // check folding correct stuff still alows the NIMFS to be satisfied correctly. let cccs = nimfs.new_cccs(z2); - assert!(cccs.is_sat(&ccs, &mles, &ck).is_ok()); - nimfs.fold(cccs); - assert!(nimfs.is_sat().is_ok()); + assert!(cccs.is_sat(&ccs, &mles, &ck, &ccs_witness_2.w).is_ok()); + + let (r_x_prime, rho) = nimfs.prepare_folding(); + let (sigmas, thetas) = + nimfs.compute_sigmas_and_thetas(&ccs_witness_2.w, &cccs, &nimfs_witness, &r_x_prime); + nimfs.fold(&cccs, sigmas, thetas, r_x_prime, rho); + nimfs.fold_witness(&cccs, &ccs_witness_2.w, &mut nimfs_witness, rho); + assert!(nimfs.is_sat(&nimfs_witness).is_ok()); // // Folding garbage should cause a failure // let cccs = nimfs.new_cccs(vec![Fq::ONE, Fq::ONE, Fq::ONE]); @@ -399,6 +443,11 @@ mod tests { // XXX: Should this indeed pass as it does now? } + #[test] + fn test_compute_g() { + test_compute_g_with::(); + } + #[test] fn test_compute_sigmas_and_thetas() { test_compute_sigmas_and_thetas_with::() diff --git a/tests/nimfs.rs b/tests/nimfs.rs index 82ab2c2ae..d3deaa4ef 100644 --- a/tests/nimfs.rs +++ b/tests/nimfs.rs @@ -86,8 +86,10 @@ fn integration_folding_test() { b"test_nimfs", ); + let mut nimfs_witness = vec![G::Scalar::from(3u64), G::Scalar::from(35u64)]; + // Now, the NIMFS should satisfy correctly as we have inputed valid starting inpuits for the first LCCCS contained instance: - assert!(nimfs.is_sat().is_ok()); + assert!(nimfs.is_sat(&nimfs_witness).is_ok()); // Now let's create a valid CCCS instance and fold it: let valid_cccs = nimfs.new_cccs(vec![ @@ -95,8 +97,14 @@ fn integration_folding_test() { G::Scalar::from(2u64), G::Scalar::from(15u64), ]); - nimfs.fold(valid_cccs); + let cccs_witness = vec![G::Scalar::from(2u64), G::Scalar::from(15u64)]; + + let (r_x_prime, rho) = nimfs.prepare_folding(); + let (sigmas, thetas) = + nimfs.compute_sigmas_and_thetas(&cccs_witness, &valid_cccs, &nimfs_witness, &r_x_prime); + nimfs.fold(&valid_cccs, sigmas, thetas, r_x_prime, rho); + nimfs.fold_witness(&valid_cccs, &cccs_witness, &mut nimfs_witness, rho); // Since the instance was correct, the NIMFS should still be satisfied. - assert!(nimfs.is_sat().is_ok()); + assert!(nimfs.is_sat(&nimfs_witness).is_ok()); } From a18afe034422e5c6bebce29df3c34f169239dd32 Mon Sep 17 00:00:00 2001 From: oskarth Date: Mon, 7 Aug 2023 14:38:56 +0100 Subject: [PATCH 100/100] Chore/sync upstream (#51) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Sync upstream --------- Co-authored-by: François Garillot <4142+huitseeker@users.noreply.github.com> Co-authored-by: Srinath Setty --- Cargo.toml | 5 +- benches/compressed-snark.rs | 97 ++++++++++++++- benches/compute-digest.rs | 2 +- benches/recursive-snark.rs | 2 +- benches/sha256.rs | 4 +- examples/minroot.rs | 2 +- src/bellperson/r1cs.rs | 10 +- src/bellperson/shape_cs.rs | 20 +--- src/bellperson/solver.rs | 17 +-- src/ccs/cccs.rs | 1 - src/ccs/lcccs.rs | 1 - src/ccs/mod.rs | 1 - src/ccs/multifolding.rs | 1 - src/ccs/util/mod.rs | 1 - src/ccs/util/virtual_poly.rs | 1 - src/circuit.rs | 58 ++++----- src/gadgets/ecc.rs | 6 +- src/gadgets/nonnative/bignat.rs | 79 +++++++++++- src/gadgets/nonnative/util.rs | 2 +- src/lib.rs | 52 ++++---- src/provider/ipa_pc.rs | 2 +- src/provider/pasta.rs | 4 +- src/provider/pedersen.rs | 4 +- src/r1cs.rs | 10 ++ src/spartan/polynomial.rs | 6 +- src/spartan/ppsnark.rs | 189 +++++++++-------------------- src/spartan/snark.rs | 17 +-- src/spartan/sumcheck.rs | 206 +++++++++++++++++--------------- src/traits/commitment.rs | 10 +- src/traits/mod.rs | 11 +- 30 files changed, 434 insertions(+), 387 deletions(-) diff --git a/Cargo.toml b/Cargo.toml index eb1909655..26dc0b98d 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -1,6 +1,6 @@ [package] name = "nova-snark" -version = "0.22.0" +version = "0.23.0" authors = ["Srinath Setty "] edition = "2021" description = "Recursive zkSNARKs without trusted setup" @@ -28,7 +28,6 @@ num-traits = "0.2" num-integer = "0.1" serde = { version = "1.0", features = ["derive"] } bincode = "1.3" -flate2 = "1.0" bitvec = "1.0" byteorder = "1.4.3" thiserror = "1.0" @@ -45,10 +44,12 @@ getrandom = { version = "0.2.0", default-features = false, features = ["js"] } [dev-dependencies] criterion = { version = "0.4", features = ["html_reports"] } rand = "0.8.4" +flate2 = "1.0" hex = "0.4.3" pprof = { version = "0.11" } cfg-if = "1.0.0" sha2 = "0.10.7" +proptest = "1.2.0" [[bench]] name = "recursive-snark" diff --git a/benches/compressed-snark.rs b/benches/compressed-snark.rs index 4effe2620..2402217e2 100644 --- a/benches/compressed-snark.rs +++ b/benches/compressed-snark.rs @@ -17,8 +17,12 @@ type G1 = pasta_curves::pallas::Point; type G2 = pasta_curves::vesta::Point; type EE1 = nova_snark::provider::ipa_pc::EvaluationEngine; type EE2 = nova_snark::provider::ipa_pc::EvaluationEngine; +// SNARKs without computational commitments type S1 = nova_snark::spartan::snark::RelaxedR1CSSNARK; type S2 = nova_snark::spartan::snark::RelaxedR1CSSNARK; +// SNARKs with computational commitments +type SS1 = nova_snark::spartan::ppsnark::RelaxedR1CSSNARK; +type SS2 = nova_snark::spartan::ppsnark::RelaxedR1CSSNARK; type C1 = NonTrivialTestCircuit<::Scalar>; type C2 = TrivialTestCircuit<::Scalar>; @@ -31,13 +35,13 @@ cfg_if::cfg_if! { criterion_group! { name = compressed_snark; config = Criterion::default().warm_up_time(Duration::from_millis(3000)).with_profiler(pprof::criterion::PProfProfiler::new(100, pprof::criterion::Output::Flamegraph(None))); - targets = bench_compressed_snark + targets = bench_compressed_snark, bench_compressed_snark_with_computational_commitments } } else { criterion_group! { name = compressed_snark; config = Criterion::default().warm_up_time(Duration::from_millis(3000)); - targets = bench_compressed_snark + targets = bench_compressed_snark, bench_compressed_snark_with_computational_commitments } } } @@ -61,7 +65,7 @@ fn bench_compressed_snark(c: &mut Criterion) { let c_secondary = TrivialTestCircuit::default(); // Produce public parameters - let pp = PublicParams::::setup(c_primary.clone(), c_secondary.clone()); + let pp = PublicParams::::setup(&c_primary, &c_secondary); // Produce prover and verifier keys for CompressedSNARK let (pk, vk) = CompressedSNARK::<_, _, _, _, S1, S2>::setup(&pp).unwrap(); @@ -129,6 +133,93 @@ fn bench_compressed_snark(c: &mut Criterion) { } } +fn bench_compressed_snark_with_computational_commitments(c: &mut Criterion) { + let num_samples = 10; + let num_cons_verifier_circuit_primary = 9819; + // we vary the number of constraints in the step circuit + for &num_cons_in_augmented_circuit in [9819, 16384, 32768, 65536, 131072, 262144].iter() { + // number of constraints in the step circuit + let num_cons = num_cons_in_augmented_circuit - num_cons_verifier_circuit_primary; + + let mut group = c.benchmark_group(format!( + "CompressedSNARK-Commitments-StepCircuitSize-{num_cons}" + )); + group + .sampling_mode(SamplingMode::Flat) + .sample_size(num_samples); + + let c_primary = NonTrivialTestCircuit::new(num_cons); + let c_secondary = TrivialTestCircuit::default(); + + // Produce public parameters + let pp = PublicParams::::setup(&c_primary, &c_secondary); + + // Produce prover and verifier keys for CompressedSNARK + let (pk, vk) = CompressedSNARK::<_, _, _, _, SS1, SS2>::setup(&pp).unwrap(); + + // produce a recursive SNARK + let num_steps = 3; + let mut recursive_snark: RecursiveSNARK = RecursiveSNARK::new( + &pp, + &c_primary, + &c_secondary, + vec![::Scalar::from(2u64)], + vec![::Scalar::from(2u64)], + ); + + for i in 0..num_steps { + let res = recursive_snark.prove_step( + &pp, + &c_primary, + &c_secondary, + vec![::Scalar::from(2u64)], + vec![::Scalar::from(2u64)], + ); + assert!(res.is_ok()); + + // verify the recursive snark at each step of recursion + let res = recursive_snark.verify( + &pp, + i + 1, + &[::Scalar::from(2u64)], + &[::Scalar::from(2u64)], + ); + assert!(res.is_ok()); + } + + // Bench time to produce a compressed SNARK + group.bench_function("Prove", |b| { + b.iter(|| { + assert!(CompressedSNARK::<_, _, _, _, SS1, SS2>::prove( + black_box(&pp), + black_box(&pk), + black_box(&recursive_snark) + ) + .is_ok()); + }) + }); + let res = CompressedSNARK::<_, _, _, _, SS1, SS2>::prove(&pp, &pk, &recursive_snark); + assert!(res.is_ok()); + let compressed_snark = res.unwrap(); + + // Benchmark the verification time + group.bench_function("Verify", |b| { + b.iter(|| { + assert!(black_box(&compressed_snark) + .verify( + black_box(&vk), + black_box(num_steps), + black_box(vec![::Scalar::from(2u64)]), + black_box(vec![::Scalar::from(2u64)]), + ) + .is_ok()); + }) + }); + + group.finish(); + } +} + #[derive(Clone, Debug, Default)] struct NonTrivialTestCircuit { num_cons: usize, diff --git a/benches/compute-digest.rs b/benches/compute-digest.rs index 47bdda1dc..501055656 100644 --- a/benches/compute-digest.rs +++ b/benches/compute-digest.rs @@ -27,7 +27,7 @@ criterion_main!(compute_digest); fn bench_compute_digest(c: &mut Criterion) { c.bench_function("compute_digest", |b| { b.iter(|| { - PublicParams::::setup(black_box(C1::new(10)), black_box(C2::default())) + PublicParams::::setup(black_box(&C1::new(10)), black_box(&C2::default())) }) }); } diff --git a/benches/recursive-snark.rs b/benches/recursive-snark.rs index eed8d48fa..5af803f83 100644 --- a/benches/recursive-snark.rs +++ b/benches/recursive-snark.rs @@ -56,7 +56,7 @@ fn bench_recursive_snark(c: &mut Criterion) { let c_secondary = TrivialTestCircuit::default(); // Produce public parameters - let pp = PublicParams::::setup(c_primary.clone(), c_secondary.clone()); + let pp = PublicParams::::setup(&c_primary, &c_secondary); // Bench time to produce a recursive SNARK; // we execute a certain number of warm-up steps since executing diff --git a/benches/sha256.rs b/benches/sha256.rs index f35500f86..642c69912 100644 --- a/benches/sha256.rs +++ b/benches/sha256.rs @@ -200,8 +200,8 @@ fn bench_recursive_snark(c: &mut Criterion) { group.sample_size(10); // Produce public parameters - let pp = - PublicParams::::setup(circuit_primary.clone(), TrivialTestCircuit::default()); + let ttc = TrivialTestCircuit::default(); + let pp = PublicParams::::setup(&circuit_primary, &ttc); let circuit_secondary = TrivialTestCircuit::default(); let z0_primary = vec![::Scalar::from(2u64)]; diff --git a/examples/minroot.rs b/examples/minroot.rs index 75c2d41df..dd5c8d60c 100644 --- a/examples/minroot.rs +++ b/examples/minroot.rs @@ -172,7 +172,7 @@ fn main() { G2, MinRootCircuit<::Scalar>, TrivialTestCircuit<::Scalar>, - >::setup(circuit_primary.clone(), circuit_secondary.clone()); + >::setup(&circuit_primary, &circuit_secondary); println!("PublicParams::setup, took {:?} ", start.elapsed()); println!( diff --git a/src/bellperson/r1cs.rs b/src/bellperson/r1cs.rs index 56710f76e..ae3388dfb 100644 --- a/src/bellperson/r1cs.rs +++ b/src/bellperson/r1cs.rs @@ -28,10 +28,7 @@ pub trait NovaShape { fn r1cs_shape(&self) -> (R1CSShape, CommitmentKey); } -impl NovaWitness for SatisfyingAssignment -where - G::Scalar: PrimeField, -{ +impl NovaWitness for SatisfyingAssignment { fn r1cs_instance_and_witness( &self, shape: &R1CSShape, @@ -48,10 +45,7 @@ where } } -impl NovaShape for ShapeCS -where - G::Scalar: PrimeField, -{ +impl NovaShape for ShapeCS { fn r1cs_shape(&self) -> (R1CSShape, CommitmentKey) { let mut A: Vec<(usize, usize, G::Scalar)> = Vec::new(); let mut B: Vec<(usize, usize, G::Scalar)> = Vec::new(); diff --git a/src/bellperson/shape_cs.rs b/src/bellperson/shape_cs.rs index bb964636d..a80be8c55 100644 --- a/src/bellperson/shape_cs.rs +++ b/src/bellperson/shape_cs.rs @@ -48,10 +48,7 @@ impl Ord for OrderedVariable { #[allow(clippy::upper_case_acronyms)] /// `ShapeCS` is a `ConstraintSystem` for creating `R1CSShape`s for a circuit. -pub struct ShapeCS -where - G::Scalar: PrimeField + Field, -{ +pub struct ShapeCS { named_objects: HashMap, current_namespace: Vec, #[allow(clippy::type_complexity)] @@ -92,10 +89,7 @@ fn proc_lc( map } -impl ShapeCS -where - G::Scalar: PrimeField, -{ +impl ShapeCS { /// Create a new, default `ShapeCS`, pub fn new() -> Self { ShapeCS::default() @@ -216,10 +210,7 @@ where } } -impl Default for ShapeCS -where - G::Scalar: PrimeField, -{ +impl Default for ShapeCS { fn default() -> Self { let mut map = HashMap::new(); map.insert("ONE".into(), NamedObject::Var(ShapeCS::::one())); @@ -233,10 +224,7 @@ where } } -impl ConstraintSystem for ShapeCS -where - G::Scalar: PrimeField, -{ +impl ConstraintSystem for ShapeCS { type Root = Self; fn alloc(&mut self, annotation: A, _f: F) -> Result diff --git a/src/bellperson/solver.rs b/src/bellperson/solver.rs index 0eaf088ce..2357724a9 100644 --- a/src/bellperson/solver.rs +++ b/src/bellperson/solver.rs @@ -1,7 +1,7 @@ //! Support for generating R1CS witness using bellperson. use crate::traits::Group; -use ff::{Field, PrimeField}; +use ff::Field; use bellperson::{ multiexp::DensityTracker, ConstraintSystem, Index, LinearCombination, SynthesisError, Variable, @@ -9,10 +9,7 @@ use bellperson::{ /// A `ConstraintSystem` which calculates witness values for a concrete instance of an R1CS circuit. #[derive(PartialEq)] -pub struct SatisfyingAssignment -where - G::Scalar: PrimeField, -{ +pub struct SatisfyingAssignment { // Density of queries a_aux_density: DensityTracker, b_input_density: DensityTracker, @@ -29,10 +26,7 @@ where } use std::fmt; -impl fmt::Debug for SatisfyingAssignment -where - G::Scalar: PrimeField, -{ +impl fmt::Debug for SatisfyingAssignment { fn fmt(&self, fmt: &mut fmt::Formatter<'_>) -> fmt::Result { fmt .debug_struct("SatisfyingAssignment") @@ -69,10 +63,7 @@ where } } -impl ConstraintSystem for SatisfyingAssignment -where - G::Scalar: PrimeField, -{ +impl ConstraintSystem for SatisfyingAssignment { type Root = Self; fn new() -> Self { diff --git a/src/ccs/cccs.rs b/src/ccs/cccs.rs index 8eb01ed8d..6c67f6214 100644 --- a/src/ccs/cccs.rs +++ b/src/ccs/cccs.rs @@ -18,7 +18,6 @@ use crate::{ use bitvec::vec; use core::{cmp::max, marker::PhantomData}; use ff::{Field, PrimeField}; -use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; use rayon::prelude::*; use serde::{Deserialize, Serialize}; diff --git a/src/ccs/lcccs.rs b/src/ccs/lcccs.rs index 22fb51770..96c6f01a3 100644 --- a/src/ccs/lcccs.rs +++ b/src/ccs/lcccs.rs @@ -21,7 +21,6 @@ use crate::{ use bitvec::vec; use core::{cmp::max, marker::PhantomData}; use ff::{Field, PrimeField}; -use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; use rand_core::RngCore; use rayon::prelude::*; diff --git a/src/ccs/mod.rs b/src/ccs/mod.rs index 1b3ba9316..d56a6af77 100644 --- a/src/ccs/mod.rs +++ b/src/ccs/mod.rs @@ -24,7 +24,6 @@ use crate::{ use bitvec::vec; use core::{cmp::max, marker::PhantomData}; use ff::Field; -use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; use rand_core::RngCore; use rayon::prelude::*; diff --git a/src/ccs/multifolding.rs b/src/ccs/multifolding.rs index 8aade3407..188c5e07b 100644 --- a/src/ccs/multifolding.rs +++ b/src/ccs/multifolding.rs @@ -24,7 +24,6 @@ use crate::{ use bitvec::vec; use core::{cmp::max, marker::PhantomData}; use ff::{Field, PrimeField}; -use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; use rand_core::RngCore; use rayon::prelude::*; diff --git a/src/ccs/util/mod.rs b/src/ccs/util/mod.rs index 23616b354..02fd1af27 100644 --- a/src/ccs/util/mod.rs +++ b/src/ccs/util/mod.rs @@ -18,7 +18,6 @@ use crate::{ use bitvec::vec; use core::{cmp::max, marker::PhantomData}; use ff::{Field, PrimeField}; -use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; use rayon::prelude::*; use serde::{Deserialize, Serialize}; diff --git a/src/ccs/util/virtual_poly.rs b/src/ccs/util/virtual_poly.rs index f1bcdf2a8..c48226469 100644 --- a/src/ccs/util/virtual_poly.rs +++ b/src/ccs/util/virtual_poly.rs @@ -18,7 +18,6 @@ use crate::{ use bitvec::vec; use core::{cmp::max, marker::PhantomData}; use ff::{Field, PrimeField}; -use flate2::{write::ZlibEncoder, Compression}; use itertools::concat; use rand::Rng; use rand_core::RngCore; diff --git a/src/circuit.rs b/src/circuit.rs index 60744f0e2..426ce18a5 100644 --- a/src/circuit.rs +++ b/src/circuit.rs @@ -40,7 +40,7 @@ pub struct NovaAugmentedCircuitParams { } impl NovaAugmentedCircuitParams { - pub fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { + pub const fn new(limb_width: usize, n_limbs: usize, is_primary_circuit: bool) -> Self { Self { limb_width, n_limbs, @@ -87,19 +87,19 @@ impl NovaAugmentedCircuitInputs { /// The augmented circuit F' in Nova that includes a step circuit F /// and the circuit for the verifier in Nova's non-interactive folding scheme -pub struct NovaAugmentedCircuit> { - params: NovaAugmentedCircuitParams, +pub struct NovaAugmentedCircuit<'a, G: Group, SC: StepCircuit> { + params: &'a NovaAugmentedCircuitParams, ro_consts: ROConstantsCircuit, inputs: Option>, - step_circuit: SC, // The function that is applied for each step + step_circuit: &'a SC, // The function that is applied for each step } -impl> NovaAugmentedCircuit { +impl<'a, G: Group, SC: StepCircuit> NovaAugmentedCircuit<'a, G, SC> { /// Create a new verification circuit for the input relaxed r1cs instances - pub fn new( - params: NovaAugmentedCircuitParams, + pub const fn new( + params: &'a NovaAugmentedCircuitParams, inputs: Option>, - step_circuit: SC, + step_circuit: &'a SC, ro_consts: ROConstantsCircuit, ) -> Self { Self { @@ -262,8 +262,8 @@ impl> NovaAugmentedCircuit { } } -impl> Circuit<::Base> - for NovaAugmentedCircuit +impl<'a, G: Group, SC: StepCircuit> Circuit<::Base> + for NovaAugmentedCircuit<'a, G, SC> { fn synthesize::Base>>( self, @@ -396,27 +396,19 @@ mod tests { G1: Group::Scalar>, G2: Group::Scalar>, { + let ttc1 = TrivialTestCircuit::default(); // Initialize the shape and ck for the primary - let circuit1: NovaAugmentedCircuit::Base>> = - NovaAugmentedCircuit::new( - primary_params.clone(), - None, - TrivialTestCircuit::default(), - ro_consts1.clone(), - ); + let circuit1: NovaAugmentedCircuit<'_, G2, TrivialTestCircuit<::Base>> = + NovaAugmentedCircuit::new(&primary_params, None, &ttc1, ro_consts1.clone()); let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit1.synthesize(&mut cs); let (shape1, ck1) = cs.r1cs_shape(); assert_eq!(cs.num_constraints(), num_constraints_primary); + let ttc2 = TrivialTestCircuit::default(); // Initialize the shape and ck for the secondary - let circuit2: NovaAugmentedCircuit::Base>> = - NovaAugmentedCircuit::new( - secondary_params.clone(), - None, - TrivialTestCircuit::default(), - ro_consts2.clone(), - ); + let circuit2: NovaAugmentedCircuit<'_, G1, TrivialTestCircuit<::Base>> = + NovaAugmentedCircuit::new(&secondary_params, None, &ttc2, ro_consts2.clone()); let mut cs: ShapeCS = ShapeCS::new(); let _ = circuit2.synthesize(&mut cs); let (shape2, ck2) = cs.r1cs_shape(); @@ -434,13 +426,8 @@ mod tests { None, None, ); - let circuit1: NovaAugmentedCircuit::Base>> = - NovaAugmentedCircuit::new( - primary_params, - Some(inputs1), - TrivialTestCircuit::default(), - ro_consts1, - ); + let circuit1: NovaAugmentedCircuit<'_, G2, TrivialTestCircuit<::Base>> = + NovaAugmentedCircuit::new(&primary_params, Some(inputs1), &ttc1, ro_consts1); let _ = circuit1.synthesize(&mut cs1); let (inst1, witness1) = cs1.r1cs_instance_and_witness(&shape1, &ck1).unwrap(); // Make sure that this is satisfiable @@ -458,13 +445,8 @@ mod tests { Some(inst1), None, ); - let circuit2: NovaAugmentedCircuit::Base>> = - NovaAugmentedCircuit::new( - secondary_params, - Some(inputs2), - TrivialTestCircuit::default(), - ro_consts2, - ); + let circuit2: NovaAugmentedCircuit<'_, G1, TrivialTestCircuit<::Base>> = + NovaAugmentedCircuit::new(&secondary_params, Some(inputs2), &ttc2, ro_consts2); let _ = circuit2.synthesize(&mut cs2); let (inst2, witness2) = cs2.r1cs_instance_and_witness(&shape2, &ck2).unwrap(); // Make sure that it is satisfiable diff --git a/src/gadgets/ecc.rs b/src/gadgets/ecc.rs index e3ed1c090..f6c2d1dde 100644 --- a/src/gadgets/ecc.rs +++ b/src/gadgets/ecc.rs @@ -81,7 +81,7 @@ where } /// Returns coordinates associated with the point. - pub fn get_coordinates( + pub const fn get_coordinates( &self, ) -> ( &AllocatedNum, @@ -570,7 +570,7 @@ where G: Group, { /// Creates a new AllocatedPointNonInfinity from the specified coordinates - pub fn new(x: AllocatedNum, y: AllocatedNum) -> Self { + pub const fn new(x: AllocatedNum, y: AllocatedNum) -> Self { Self { x, y } } @@ -610,7 +610,7 @@ where } /// Returns coordinates associated with the point. - pub fn get_coordinates(&self) -> (&AllocatedNum, &AllocatedNum) { + pub const fn get_coordinates(&self) -> (&AllocatedNum, &AllocatedNum) { (&self.x, &self.y) } diff --git a/src/gadgets/nonnative/bignat.rs b/src/gadgets/nonnative/bignat.rs index 9db484802..eb3144ef8 100644 --- a/src/gadgets/nonnative/bignat.rs +++ b/src/gadgets/nonnative/bignat.rs @@ -783,7 +783,9 @@ impl Polynomial { #[cfg(test)] mod tests { use super::*; - use bellperson::Circuit; + use bellperson::{gadgets::test::TestConstraintSystem, Circuit}; + use pasta_curves::pallas::Scalar; + use proptest::prelude::*; pub struct PolynomialMultiplier { pub a: Vec, @@ -818,4 +820,79 @@ mod tests { Ok(()) } } + + #[test] + fn test_polynomial_multiplier_circuit() { + let mut cs = TestConstraintSystem::::new(); + + let circuit = PolynomialMultiplier { + a: [1, 1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), + b: [1, 1].iter().map(|i| Scalar::from_u128(*i)).collect(), + }; + + circuit.synthesize(&mut cs).expect("synthesis failed"); + + if let Some(token) = cs.which_is_unsatisfied() { + eprintln!("Error: {} is unsatisfied", token); + } + } + + #[derive(Debug)] + pub struct BigNatBitDecompInputs { + pub n: BigInt, + } + + pub struct BigNatBitDecompParams { + pub limb_width: usize, + pub n_limbs: usize, + } + + pub struct BigNatBitDecomp { + inputs: Option, + params: BigNatBitDecompParams, + } + + impl Circuit for BigNatBitDecomp { + fn synthesize>(self, cs: &mut CS) -> Result<(), SynthesisError> { + let n = BigNat::alloc_from_nat( + cs.namespace(|| "n"), + || Ok(self.inputs.grab()?.n.clone()), + self.params.limb_width, + self.params.n_limbs, + )?; + n.decompose(cs.namespace(|| "decomp"))?; + Ok(()) + } + } + + proptest! { + + #![proptest_config(ProptestConfig { + cases: 10, // this test is costlier as max n gets larger + .. ProptestConfig::default() + })] + #[test] + fn test_big_nat_can_decompose(n in any::(), limb_width in 40u8..200) { + let n = n as usize; + + let n_limbs = if n == 0 { + 1 + } else { + (n - 1) / limb_width as usize + 1 + }; + + let circuit = BigNatBitDecomp { + inputs: Some(BigNatBitDecompInputs { + n: BigInt::from(n), + }), + params: BigNatBitDecompParams { + limb_width: limb_width as usize, + n_limbs, + }, + }; + let mut cs = TestConstraintSystem::::new(); + circuit.synthesize(&mut cs).expect("synthesis failed"); + prop_assert!(cs.is_satisfied()); + } + } } diff --git a/src/gadgets/nonnative/util.rs b/src/gadgets/nonnative/util.rs index e0cceee58..486270d25 100644 --- a/src/gadgets/nonnative/util.rs +++ b/src/gadgets/nonnative/util.rs @@ -69,7 +69,7 @@ pub struct Num { } impl Num { - pub fn new(value: Option, num: LinearCombination) -> Self { + pub const fn new(value: Option, num: LinearCombination) -> Self { Self { value, num } } pub fn alloc(mut cs: CS, value: F) -> Result diff --git a/src/lib.rs b/src/lib.rs index d2df81a8d..78a0e1389 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -88,7 +88,7 @@ where C2: StepCircuit, { /// Create a new `PublicParams` - pub fn setup(c_primary: C1, c_secondary: C2) -> Self { + pub fn setup(c_primary: &C1, c_secondary: &C2) -> Self { let augmented_circuit_params_primary = NovaAugmentedCircuitParams::new(BN_LIMB_WIDTH, BN_N_LIMBS, true); let augmented_circuit_params_secondary = @@ -105,8 +105,8 @@ where let ro_consts_circuit_secondary: ROConstantsCircuit = ROConstantsCircuit::::new(); // Initialize ck for the primary - let circuit_primary: NovaAugmentedCircuit = NovaAugmentedCircuit::new( - augmented_circuit_params_primary.clone(), + let circuit_primary: NovaAugmentedCircuit<'_, G2, C1> = NovaAugmentedCircuit::new( + &augmented_circuit_params_primary, None, c_primary, ro_consts_circuit_primary.clone(), @@ -116,8 +116,8 @@ where let (r1cs_shape_primary, ck_primary) = cs.r1cs_shape(); // Initialize ck for the secondary - let circuit_secondary: NovaAugmentedCircuit = NovaAugmentedCircuit::new( - augmented_circuit_params_secondary.clone(), + let circuit_secondary: NovaAugmentedCircuit<'_, G1, C2> = NovaAugmentedCircuit::new( + &augmented_circuit_params_secondary, None, c_secondary, ro_consts_circuit_secondary.clone(), @@ -151,7 +151,7 @@ where } /// Returns the number of constraints in the primary and secondary circuits - pub fn num_constraints(&self) -> (usize, usize) { + pub const fn num_constraints(&self) -> (usize, usize) { ( self.r1cs_shape_primary.num_cons, self.r1cs_shape_secondary.num_cons, @@ -159,7 +159,7 @@ where } /// Returns the number of variables in the primary and secondary circuits - pub fn num_variables(&self) -> (usize, usize) { + pub const fn num_variables(&self) -> (usize, usize) { ( self.r1cs_shape_primary.num_vars, self.r1cs_shape_secondary.num_vars, @@ -221,10 +221,10 @@ where None, ); - let circuit_primary: NovaAugmentedCircuit = NovaAugmentedCircuit::new( - pp.augmented_circuit_params_primary.clone(), + let circuit_primary: NovaAugmentedCircuit<'_, G2, C1> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, Some(inputs_primary), - c_primary.clone(), + c_primary, pp.ro_consts_circuit_primary.clone(), ); let _ = circuit_primary.synthesize(&mut cs_primary); @@ -244,10 +244,10 @@ where Some(u_primary.clone()), None, ); - let circuit_secondary: NovaAugmentedCircuit = NovaAugmentedCircuit::new( - pp.augmented_circuit_params_secondary.clone(), + let circuit_secondary: NovaAugmentedCircuit<'_, G1, C2> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, Some(inputs_secondary), - c_secondary.clone(), + c_secondary, pp.ro_consts_circuit_secondary.clone(), ); let _ = circuit_secondary.synthesize(&mut cs_secondary); @@ -333,10 +333,10 @@ where Some(Commitment::::decompress(&nifs_secondary.comm_T)?), ); - let circuit_primary: NovaAugmentedCircuit = NovaAugmentedCircuit::new( - pp.augmented_circuit_params_primary.clone(), + let circuit_primary: NovaAugmentedCircuit<'_, G2, C1> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_primary, Some(inputs_primary), - c_primary.clone(), + c_primary, pp.ro_consts_circuit_primary.clone(), ); let _ = circuit_primary.synthesize(&mut cs_primary); @@ -370,10 +370,10 @@ where Some(Commitment::::decompress(&nifs_primary.comm_T)?), ); - let circuit_secondary: NovaAugmentedCircuit = NovaAugmentedCircuit::new( - pp.augmented_circuit_params_secondary.clone(), + let circuit_secondary: NovaAugmentedCircuit<'_, G1, C2> = NovaAugmentedCircuit::new( + &pp.augmented_circuit_params_secondary, Some(inputs_secondary), - c_secondary.clone(), + c_secondary, pp.ro_consts_circuit_secondary.clone(), ); let _ = circuit_secondary.synthesize(&mut cs_secondary); @@ -870,7 +870,7 @@ mod tests { T1: StepCircuit, T2: StepCircuit, { - let pp = PublicParams::::setup(circuit1, circuit2); + let pp = PublicParams::::setup(&circuit1, &circuit2); let digest_str = pp .digest @@ -934,7 +934,7 @@ mod tests { G2, TrivialTestCircuit<::Scalar>, TrivialTestCircuit<::Scalar>, - >::setup(test_circuit1.clone(), test_circuit2.clone()); + >::setup(&test_circuit1, &test_circuit2); let num_steps = 1; @@ -990,7 +990,7 @@ mod tests { G2, TrivialTestCircuit<::Scalar>, CubicCircuit<::Scalar>, - >::setup(circuit_primary.clone(), circuit_secondary.clone()); + >::setup(&circuit_primary, &circuit_secondary); let num_steps = 3; @@ -1077,7 +1077,7 @@ mod tests { G2, TrivialTestCircuit<::Scalar>, CubicCircuit<::Scalar>, - >::setup(circuit_primary.clone(), circuit_secondary.clone()); + >::setup(&circuit_primary, &circuit_secondary); let num_steps = 3; @@ -1172,7 +1172,7 @@ mod tests { G2, TrivialTestCircuit<::Scalar>, CubicCircuit<::Scalar>, - >::setup(circuit_primary.clone(), circuit_secondary.clone()); + >::setup(&circuit_primary, &circuit_secondary); let num_steps = 3; @@ -1344,7 +1344,7 @@ mod tests { G2, FifthRootCheckingCircuit<::Scalar>, TrivialTestCircuit<::Scalar>, - >::setup(circuit_primary, circuit_secondary.clone()); + >::setup(&circuit_primary, &circuit_secondary); let num_steps = 3; @@ -1422,7 +1422,7 @@ mod tests { G2, TrivialTestCircuit<::Scalar>, CubicCircuit<::Scalar>, - >::setup(test_circuit1.clone(), test_circuit2.clone()); + >::setup(&test_circuit1, &test_circuit2); let num_steps = 1; diff --git a/src/provider/ipa_pc.rs b/src/provider/ipa_pc.rs index fa8068bbd..0ae536abc 100644 --- a/src/provider/ipa_pc.rs +++ b/src/provider/ipa_pc.rs @@ -177,7 +177,7 @@ where G: Group, CommitmentKey: CommitmentKeyExtTrait, { - fn protocol_name() -> &'static [u8] { + const fn protocol_name() -> &'static [u8] { b"IPA" } diff --git a/src/provider/pasta.rs b/src/provider/pasta.rs index 1f5284ebc..471ee3280 100644 --- a/src/provider/pasta.rs +++ b/src/provider/pasta.rs @@ -31,7 +31,7 @@ pub struct PallasCompressedElementWrapper { impl PallasCompressedElementWrapper { /// Wraps repr into the wrapper - pub fn new(repr: [u8; 32]) -> Self { + pub const fn new(repr: [u8; 32]) -> Self { Self { repr } } } @@ -44,7 +44,7 @@ pub struct VestaCompressedElementWrapper { impl VestaCompressedElementWrapper { /// Wraps repr into the wrapper - pub fn new(repr: [u8; 32]) -> Self { + pub const fn new(repr: [u8; 32]) -> Self { Self { repr } } } diff --git a/src/provider/pedersen.rs b/src/provider/pedersen.rs index bad1247e9..fe00c52fd 100644 --- a/src/provider/pedersen.rs +++ b/src/provider/pedersen.rs @@ -203,7 +203,9 @@ impl CommitmentEngineTrait for CommitmentEngine { } } -pub(crate) trait CommitmentKeyExtTrait { +/// A trait listing properties of a commitment key that can be managed in a divide-and-conquer fashion +pub trait CommitmentKeyExtTrait { + /// Holds the type of the commitment engine type CE: CommitmentEngineTrait; /// Splits the commitment key into two pieces at a specified point diff --git a/src/r1cs.rs b/src/r1cs.rs index 76d93ed46..105101ec7 100644 --- a/src/r1cs.rs +++ b/src/r1cs.rs @@ -139,6 +139,16 @@ impl R1CSShape { }) } + // Checks regularity conditions on the R1CSShape, required in Spartan-class SNARKs + // Panics if num_cons, num_vars, or num_io are not powers of two, or if num_io > num_vars + #[inline] + pub(crate) fn check_regular_shape(&self) { + assert_eq!(self.num_cons.next_power_of_two(), self.num_cons); + assert_eq!(self.num_vars.next_power_of_two(), self.num_vars); + assert_eq!(self.num_io.next_power_of_two(), self.num_io); + assert!(self.num_io < self.num_vars); + } + pub fn multiply_vec( &self, z: &[G::Scalar], diff --git a/src/spartan/polynomial.rs b/src/spartan/polynomial.rs index 97255d652..ee8ecba54 100644 --- a/src/spartan/polynomial.rs +++ b/src/spartan/polynomial.rs @@ -31,7 +31,7 @@ impl EqPolynomial { /// Creates a new `EqPolynomial` from a vector of Scalars `r`. /// /// Each Scalar in `r` corresponds to a bit from the binary representation of an input value `e`. - pub fn new(r: Vec) -> Self { + pub const fn new(r: Vec) -> Self { EqPolynomial { r } } @@ -111,7 +111,7 @@ impl MultilinearPolynomial { } /// Returns the number of variables in the multilinear polynomial - pub fn get_num_vars(&self) -> usize { + pub const fn get_num_vars(&self) -> usize { self.num_vars } @@ -160,7 +160,7 @@ impl MultilinearPolynomial { (0..chis.len()) .into_par_iter() .map(|i| chis[i] * self.Z[i]) - .reduce(|| Scalar::ZERO, |x, y| x + y) + .sum() } /// Evaluates the polynomial with the given evaluations and point. diff --git a/src/spartan/ppsnark.rs b/src/spartan/ppsnark.rs index 0011f463b..57bec8fe7 100644 --- a/src/spartan/ppsnark.rs +++ b/src/spartan/ppsnark.rs @@ -119,51 +119,34 @@ impl R1CSShapeSparkRepr { max(total_nz, max(2 * S.num_vars, S.num_cons)).next_power_of_two() }; - let row = { - let mut r = S - .A - .iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(r, _, _)| *r) - .collect::>(); - r.resize(N, 0usize); - r - }; + let (mut row, mut col) = (vec![0usize; N], vec![0usize; N]); - let col = { - let mut c = S - .A - .iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(_, c, _)| *c) - .collect::>(); - c.resize(N, 0usize); - c - }; + for (i, (r, c, _)) in S.A.iter().chain(S.B.iter()).chain(S.C.iter()).enumerate() { + row[i] = *r; + col[i] = *c; + } let val_A = { - let mut val = S.A.iter().map(|(_, _, v)| *v).collect::>(); - val.resize(N, G::Scalar::ZERO); + let mut val = vec![G::Scalar::ZERO; N]; + for (i, (_, _, v)) in S.A.iter().enumerate() { + val[i] = *v; + } val }; let val_B = { - // prepend zeros - let mut val = vec![G::Scalar::ZERO; S.A.len()]; - val.extend(S.B.iter().map(|(_, _, v)| *v).collect::>()); - // append zeros - val.resize(N, G::Scalar::ZERO); + let mut val = vec![G::Scalar::ZERO; N]; + for (i, (_, _, v)) in S.B.iter().enumerate() { + val[S.A.len() + i] = *v; + } val }; let val_C = { - // prepend zeros - let mut val = vec![G::Scalar::ZERO; S.A.len() + S.B.len()]; - val.extend(S.C.iter().map(|(_, _, v)| *v).collect::>()); - // append zeros - val.resize(N, G::Scalar::ZERO); + let mut val = vec![G::Scalar::ZERO; N]; + for (i, (_, _, v)) in S.C.iter().enumerate() { + val[S.A.len() + S.B.len() + i] = *v; + } val }; @@ -265,29 +248,30 @@ impl R1CSShapeSparkRepr { let mem_row = EqPolynomial::new(r_x_padded).evals(); let mem_col = { - let mut z = z.to_vec(); - z.resize(self.N, G::Scalar::ZERO); - z + let mut val = vec![G::Scalar::ZERO; self.N]; + for (i, v) in z.iter().enumerate() { + val[i] = *v; + } + val }; - let mut E_row = S - .A - .iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(r, _, _)| mem_row[*r]) - .collect::>(); - - let mut E_col = S - .A - .iter() - .chain(S.B.iter()) - .chain(S.C.iter()) - .map(|(_, c, _)| mem_col[*c]) - .collect::>(); + let (E_row, E_col) = { + let mut E_row = vec![mem_row[0]; self.N]; // we place mem_row[0] since resized row is appended with 0s + let mut E_col = vec![mem_col[0]; self.N]; - E_row.resize(self.N, mem_row[0]); // we place mem_row[0] since resized row is appended with 0s - E_col.resize(self.N, mem_col[0]); + for (i, (val_r, val_c)) in S + .A + .iter() + .chain(S.B.iter()) + .chain(S.C.iter()) + .map(|(r, c, _)| (mem_row[*r], mem_col[*c])) + .enumerate() + { + E_row[i] = val_r; + E_col[i] = val_c; + } + (E_row, E_col) + }; (mem_row, mem_col, E_row, E_col) } @@ -411,12 +395,10 @@ impl ProductSumcheckInstance { let poly_A = MultilinearPolynomial::new(EqPolynomial::new(rand_eq).evals()); let poly_B_vec = left_vec - .clone() .into_par_iter() .map(MultilinearPolynomial::new) .collect::>(); let poly_C_vec = right_vec - .clone() .into_par_iter() .map(MultilinearPolynomial::new) .collect::>(); @@ -477,43 +459,10 @@ impl SumcheckEngine for ProductSumcheckInstance { .zip(self.poly_C_vec.iter()) .zip(self.poly_D_vec.iter()) .map(|((poly_B, poly_C), poly_D)| { - let len = poly_B.len() / 2; // Make an iterator returning the contributions to the evaluations - let (eval_point_0, eval_point_2, eval_point_3) = (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; - let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; - let eval_point_2 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; - let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; - let eval_point_3 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - (eval_point_0, eval_point_2, eval_point_3) - }) - .reduce( - || (G::Scalar::ZERO, G::Scalar::ZERO, G::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), - ); + let (eval_point_0, eval_point_2, eval_point_3) = + SumcheckProof::::compute_eval_points_cubic(poly_A, poly_B, poly_C, poly_D, &comb_func); + vec![eval_point_0, eval_point_2, eval_point_3] }) .collect::>>() @@ -584,44 +533,10 @@ impl SumcheckEngine for OuterSumcheckInstance { poly_C_comp: &G::Scalar, poly_D_comp: &G::Scalar| -> G::Scalar { *poly_A_comp * (*poly_B_comp * *poly_C_comp - *poly_D_comp) }; - let len = poly_A.len() / 2; // Make an iterator returning the contributions to the evaluations - let (eval_point_0, eval_point_2, eval_point_3) = (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; - let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; - let eval_point_2 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; - let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; - let eval_point_3 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - (eval_point_0, eval_point_2, eval_point_3) - }) - .reduce( - || (G::Scalar::ZERO, G::Scalar::ZERO, G::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), - ); + let (eval_point_0, eval_point_2, eval_point_3) = + SumcheckProof::::compute_eval_points_cubic(poly_A, poly_B, poly_C, poly_D, &comb_func); vec![vec![eval_point_0, eval_point_2, eval_point_3]] } @@ -673,6 +588,8 @@ impl SumcheckEngine for InnerSumcheckInstance { -> G::Scalar { *poly_A_comp * *poly_B_comp * *poly_C_comp }; let len = poly_A.len() / 2; + // TODO: make this call a function in sumcheck.rs by writing an n-ary variant of crate::spartan::sumcheck::SumcheckProof::::compute_eval_points_cubic + // once #[feature(array_methods)] stabilizes (this n-ary variant would need array::each_ref) // Make an iterator returning the contributions to the evaluations let (eval_point_0, eval_point_2, eval_point_3) = (0..len) .into_par_iter() @@ -862,7 +779,7 @@ impl> RelaxedR1CSSNARK let mut e = claim; let mut r: Vec = Vec::new(); - let mut cubic_polys: Vec> = Vec::new(); + let mut cubic_polys: Vec> = Vec::new(); let num_rounds = mem.size().log_2(); for _i in 0..num_rounds { let mut evals: Vec> = Vec::new(); @@ -967,10 +884,7 @@ impl> RelaxedR1CSSNARKTrait> RelaxedR1CSSNARKTrait> RelaxedR1CSSNARKTrait> RelaxedR1CSSNARKTrait G::Scalar { (0..M.len()) - .collect::>() - .par_iter() - .map(|&i| { + .into_par_iter() + .map(|i| { let (row, col, val) = M[i]; T_x[row] * T_y[col] * val }) - .reduce(|| G::Scalar::ZERO, |acc, x| acc + x) + .sum() }; let (T_x, T_y) = rayon::join( @@ -436,9 +432,8 @@ impl> RelaxedR1CSSNARKTrait>() - .par_iter() - .map(|&i| evaluate_with_table(M_vec[i], &T_x, &T_y)) + .into_par_iter() + .map(|i| evaluate_with_table(M_vec[i], &T_x, &T_y)) .collect() }; diff --git a/src/spartan/sumcheck.rs b/src/spartan/sumcheck.rs index 01d99c5f2..fc47a56bb 100644 --- a/src/spartan/sumcheck.rs +++ b/src/spartan/sumcheck.rs @@ -3,19 +3,18 @@ use super::polynomial::MultilinearPolynomial; use crate::errors::NovaError; use crate::traits::{Group, TranscriptEngineTrait, TranscriptReprTrait}; -use core::marker::PhantomData; -use ff::Field; +use ff::{Field, PrimeField}; use rayon::prelude::*; use serde::{Deserialize, Serialize}; #[derive(Clone, Debug, Serialize, Deserialize)] #[serde(bound = "")] pub(crate) struct SumcheckProof { - compressed_polys: Vec>, + compressed_polys: Vec>, } impl SumcheckProof { - pub fn new(compressed_polys: Vec>) -> Self { + pub fn new(compressed_polys: Vec>) -> Self { Self { compressed_polys } } @@ -61,6 +60,34 @@ impl SumcheckProof { Ok((e, r)) } + #[inline] + pub(in crate::spartan) fn compute_eval_points_quadratic( + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + comb_func: &F, + ) -> (G::Scalar, G::Scalar) + where + F: Fn(&G::Scalar, &G::Scalar) -> G::Scalar + Sync, + { + let len = poly_A.len() / 2; + (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point); + (eval_point_0, eval_point_2) + }) + .reduce( + || (G::Scalar::ZERO, G::Scalar::ZERO), + |a, b| (a.0 + b.0, a.1 + b.1), + ) + } + pub fn prove_quad( claim: &G::Scalar, num_rounds: usize, @@ -73,29 +100,12 @@ impl SumcheckProof { F: Fn(&G::Scalar, &G::Scalar) -> G::Scalar + Sync, { let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); + let mut polys: Vec> = Vec::new(); let mut claim_per_round = *claim; for _ in 0..num_rounds { let poly = { - let len = poly_A.len() / 2; - - // Make an iterator returning the contributions to the evaluations - let (eval_point_0, eval_point_2) = (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let eval_point_2 = comb_func(&poly_A_bound_point, &poly_B_bound_point); - (eval_point_0, eval_point_2) - }) - .reduce( - || (G::Scalar::ZERO, G::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1), - ); + let (eval_point_0, eval_point_2) = + Self::compute_eval_points_quadratic(poly_A, poly_B, &comb_func); let evals = vec![eval_point_0, claim_per_round - eval_point_0, eval_point_2]; UniPoly::from_evals(&evals) @@ -136,30 +146,18 @@ impl SumcheckProof { transcript: &mut G::TE, ) -> Result<(Self, Vec, (Vec, Vec)), NovaError> where - F: Fn(&G::Scalar, &G::Scalar) -> G::Scalar, + F: Fn(&G::Scalar, &G::Scalar) -> G::Scalar + Sync, { let mut e = *claim; let mut r: Vec = Vec::new(); - let mut quad_polys: Vec> = Vec::new(); + let mut quad_polys: Vec> = Vec::new(); for _j in 0..num_rounds { let mut evals: Vec<(G::Scalar, G::Scalar)> = Vec::new(); for (poly_A, poly_B) in poly_A_vec.iter().zip(poly_B_vec.iter()) { - let mut eval_point_0 = G::Scalar::ZERO; - let mut eval_point_2 = G::Scalar::ZERO; - - let len = poly_A.len() / 2; - for i in 0..len { - // eval 0: bound_func is A(low) - eval_point_0 += comb_func(&poly_A[i], &poly_B[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - eval_point_2 += comb_func(&poly_A_bound_point, &poly_B_bound_point); - } - + let (eval_point_0, eval_point_2) = + Self::compute_eval_points_quadratic(poly_A, poly_B, &comb_func); evals.push((eval_point_0, eval_point_2)); } @@ -193,6 +191,55 @@ impl SumcheckProof { Ok((SumcheckProof::new(quad_polys), r, claims_prod)) } + #[inline] + pub(in crate::spartan) fn compute_eval_points_cubic( + poly_A: &MultilinearPolynomial, + poly_B: &MultilinearPolynomial, + poly_C: &MultilinearPolynomial, + poly_D: &MultilinearPolynomial, + comb_func: &F, + ) -> (G::Scalar, G::Scalar, G::Scalar) + where + F: Fn(&G::Scalar, &G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar + Sync, + { + let len = poly_A.len() / 2; + (0..len) + .into_par_iter() + .map(|i| { + // eval 0: bound_func is A(low) + let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); + + // eval 2: bound_func is -A(low) + 2*A(high) + let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; + let eval_point_2 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + + // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) + let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; + let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; + let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; + let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; + let eval_point_3 = comb_func( + &poly_A_bound_point, + &poly_B_bound_point, + &poly_C_bound_point, + &poly_D_bound_point, + ); + (eval_point_0, eval_point_2, eval_point_3) + }) + .reduce( + || (G::Scalar::ZERO, G::Scalar::ZERO, G::Scalar::ZERO), + |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), + ) + } + pub fn prove_cubic_with_additive_term( claim: &G::Scalar, num_rounds: usize, @@ -207,49 +254,14 @@ impl SumcheckProof { F: Fn(&G::Scalar, &G::Scalar, &G::Scalar, &G::Scalar) -> G::Scalar + Sync, { let mut r: Vec = Vec::new(); - let mut polys: Vec> = Vec::new(); + let mut polys: Vec> = Vec::new(); let mut claim_per_round = *claim; for _ in 0..num_rounds { let poly = { - let len = poly_A.len() / 2; - // Make an iterator returning the contributions to the evaluations - let (eval_point_0, eval_point_2, eval_point_3) = (0..len) - .into_par_iter() - .map(|i| { - // eval 0: bound_func is A(low) - let eval_point_0 = comb_func(&poly_A[i], &poly_B[i], &poly_C[i], &poly_D[i]); - - // eval 2: bound_func is -A(low) + 2*A(high) - let poly_A_bound_point = poly_A[len + i] + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B[len + i] + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C[len + i] + poly_C[len + i] - poly_C[i]; - let poly_D_bound_point = poly_D[len + i] + poly_D[len + i] - poly_D[i]; - let eval_point_2 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - - // eval 3: bound_func is -2A(low) + 3A(high); computed incrementally with bound_func applied to eval(2) - let poly_A_bound_point = poly_A_bound_point + poly_A[len + i] - poly_A[i]; - let poly_B_bound_point = poly_B_bound_point + poly_B[len + i] - poly_B[i]; - let poly_C_bound_point = poly_C_bound_point + poly_C[len + i] - poly_C[i]; - let poly_D_bound_point = poly_D_bound_point + poly_D[len + i] - poly_D[i]; - let eval_point_3 = comb_func( - &poly_A_bound_point, - &poly_B_bound_point, - &poly_C_bound_point, - &poly_D_bound_point, - ); - (eval_point_0, eval_point_2, eval_point_3) - }) - .reduce( - || (G::Scalar::ZERO, G::Scalar::ZERO, G::Scalar::ZERO), - |a, b| (a.0 + b.0, a.1 + b.1, a.2 + b.2), - ); + let (eval_point_0, eval_point_2, eval_point_3) = + Self::compute_eval_points_cubic(poly_A, poly_B, poly_C, poly_D, &comb_func); let evals = vec![ eval_point_0, @@ -291,25 +303,24 @@ impl SumcheckProof { // ax^2 + bx + c stored as vec![a,b,c] // ax^3 + bx^2 + cx + d stored as vec![a,b,c,d] #[derive(Debug)] -pub struct UniPoly { - coeffs: Vec, +pub struct UniPoly { + coeffs: Vec, } // ax^2 + bx + c stored as vec![a,c] // ax^3 + bx^2 + cx + d stored as vec![a,c,d] #[derive(Clone, Debug, Serialize, Deserialize)] -pub struct CompressedUniPoly { - coeffs_except_linear_term: Vec, - _p: PhantomData, +pub struct CompressedUniPoly { + coeffs_except_linear_term: Vec, } -impl UniPoly { - pub fn from_evals(evals: &[G::Scalar]) -> Self { +impl UniPoly { + pub fn from_evals(evals: &[Scalar]) -> Self { // we only support degree-2 or degree-3 univariate polynomials assert!(evals.len() == 3 || evals.len() == 4); let coeffs = if evals.len() == 3 { // ax^2 + bx + c - let two_inv = G::Scalar::from(2).invert().unwrap(); + let two_inv = Scalar::from(2).invert().unwrap(); let c = evals[0]; let a = two_inv * (evals[2] - evals[1] - evals[1] + c); @@ -317,8 +328,8 @@ impl UniPoly { vec![c, b, a] } else { // ax^3 + bx^2 + cx + d - let two_inv = G::Scalar::from(2).invert().unwrap(); - let six_inv = G::Scalar::from(6).invert().unwrap(); + let two_inv = Scalar::from(2).invert().unwrap(); + let six_inv = Scalar::from(6).invert().unwrap(); let d = evals[0]; let a = six_inv @@ -341,18 +352,18 @@ impl UniPoly { self.coeffs.len() - 1 } - pub fn eval_at_zero(&self) -> G::Scalar { + pub fn eval_at_zero(&self) -> Scalar { self.coeffs[0] } - pub fn eval_at_one(&self) -> G::Scalar { + pub fn eval_at_one(&self) -> Scalar { (0..self.coeffs.len()) .into_par_iter() .map(|i| self.coeffs[i]) - .reduce(|| G::Scalar::ZERO, |a, b| a + b) + .sum() } - pub fn evaluate(&self, r: &G::Scalar) -> G::Scalar { + pub fn evaluate(&self, r: &Scalar) -> Scalar { let mut eval = self.coeffs[0]; let mut power = *r; for coeff in self.coeffs.iter().skip(1) { @@ -362,27 +373,26 @@ impl UniPoly { eval } - pub fn compress(&self) -> CompressedUniPoly { + pub fn compress(&self) -> CompressedUniPoly { let coeffs_except_linear_term = [&self.coeffs[0..1], &self.coeffs[2..]].concat(); assert_eq!(coeffs_except_linear_term.len() + 1, self.coeffs.len()); CompressedUniPoly { coeffs_except_linear_term, - _p: Default::default(), } } } -impl CompressedUniPoly { +impl CompressedUniPoly { // we require eval(0) + eval(1) = hint, so we can solve for the linear term as: // linear_term = hint - 2 * constant_term - deg2 term - deg3 term - pub fn decompress(&self, hint: &G::Scalar) -> UniPoly { + pub fn decompress(&self, hint: &Scalar) -> UniPoly { let mut linear_term = *hint - self.coeffs_except_linear_term[0] - self.coeffs_except_linear_term[0]; for i in 1..self.coeffs_except_linear_term.len() { linear_term -= self.coeffs_except_linear_term[i]; } - let mut coeffs: Vec = Vec::new(); + let mut coeffs: Vec = Vec::new(); coeffs.push(self.coeffs_except_linear_term[0]); coeffs.push(linear_term); coeffs.extend(&self.coeffs_except_linear_term[1..]); @@ -391,7 +401,7 @@ impl CompressedUniPoly { } } -impl TranscriptReprTrait for UniPoly { +impl TranscriptReprTrait for UniPoly { fn to_transcript_bytes(&self) -> Vec { let coeffs = self.compress().coeffs_except_linear_term; coeffs.as_slice().to_transcript_bytes() diff --git a/src/traits/commitment.rs b/src/traits/commitment.rs index 9b4725fc3..4ac8349ce 100644 --- a/src/traits/commitment.rs +++ b/src/traits/commitment.rs @@ -6,10 +6,12 @@ use crate::{ }; use core::{ fmt::Debug, - ops::{Add, AddAssign, Mul, MulAssign}, + ops::{Add, AddAssign}, }; use serde::{Deserialize, Serialize}; +use super::ScalarMul; + /// Defines basic operations on commitments pub trait CommitmentOps: Add + AddAssign @@ -31,12 +33,6 @@ impl CommitmentOpsOwned for T where { } -/// A helper trait for types implementing a multiplication of a commitment with a scalar -pub trait ScalarMul: Mul + MulAssign {} - -impl ScalarMul for T where T: Mul + MulAssign -{} - /// This trait defines the behavior of the commitment pub trait CommitmentTrait: Clone diff --git a/src/traits/mod.rs b/src/traits/mod.rs index 5138cea8d..91d8d320e 100644 --- a/src/traits/mod.rs +++ b/src/traits/mod.rs @@ -41,8 +41,7 @@ pub trait Group: + for<'de> Deserialize<'de>; /// A type representing an element of the scalar field of the group - type Scalar: PrimeField - + PrimeFieldBits + type Scalar: PrimeFieldBits + PrimeFieldExt + Send + Sync @@ -236,11 +235,9 @@ pub trait PrimeFieldExt: PrimeField { impl> TranscriptReprTrait for &[T] { fn to_transcript_bytes(&self) -> Vec { - (0..self.len()) - .map(|i| self[i].to_transcript_bytes()) - .collect::>() - .into_iter() - .flatten() + self + .iter() + .flat_map(|t| t.to_transcript_bytes()) .collect::>() } }