diff --git a/vid/src/advz.rs b/vid/src/advz.rs index 49c89b2ac..7dbfae68b 100644 --- a/vid/src/advz.rs +++ b/vid/src/advz.rs @@ -295,10 +295,7 @@ where // TODO further aggregate into a single KZG proof. aggregate_proofs: Vec>, - // eval_proofs.len() equals multiplicity - // TODO put all evals into a single merkle proof - // https://github.com/EspressoSystems/jellyfish/issues/671 - eval_proofs: Vec>, + evals_proof: KzgEvalsMerkleTreeProof, } impl Share @@ -309,21 +306,14 @@ where /// Return a [`Vec`] of payload data for this share. /// /// These data are extracted from [`MerkleProof`] structs. The returned - /// [`Vec`] has length `multiplicity * num_polys`s + /// [`Vec`] has length `multiplicity * num_polys` /// /// TODO store these data in a new field `Share::evals` after fixing /// https://github.com/EspressoSystems/jellyfish/issues/658 - fn evals(&self) -> VidResult>> { - self.eval_proofs - .iter() - .map(|eval_proof| { - eval_proof - .elem() - .cloned() - .ok_or_else(|| VidError::Internal(anyhow::anyhow!("empty merkle proof"))) - }) - .flatten_ok() - .collect() + fn evals(&self) -> VidResult<&Vec>> { + self.evals_proof + .elem() + .ok_or_else(|| VidError::Internal(anyhow::anyhow!("empty merkle proof"))) } } @@ -456,6 +446,7 @@ where } let multiplicity = self.min_multiplicity(common.payload_byte_len.try_into().map_err(vid)?)?; + let multiplicity_usize = usize::try_from(multiplicity).map_err(vid)?; if common.multiplicity != multiplicity { return Err(VidError::Argument(format!( "common multiplicity {} differs from derived min {}", @@ -470,40 +461,34 @@ where common.poly_commits.len() ))); } - if share.eval_proofs.len() != multiplicity as usize { - return Err(VidError::Argument(format!( - "number of eval_proofs {} differs from common multiplicity {}", - share.eval_proofs.len(), - multiplicity, - ))); - } - Self::is_consistent(commit, common)?; - if share.index >= self.num_storage_nodes { return Ok(Err(())); // not an arg error } - // verify eval proofs - for i in 0..multiplicity { - if KzgEvalsMerkleTree::::verify( - common.all_evals_digest, - &KzgEvalsMerkleTreeIndex::::from((share.index * multiplicity) + i), - &share.eval_proofs[i as usize], - ) - .map_err(vid)? - .is_err() - { - return Ok(Err(())); - } + // verify eval proof + if KzgEvalsMerkleTree::::verify( + common.all_evals_digest, + &KzgEvalsMerkleTreeIndex::::from(share.index), + &share.evals_proof, + ) + .map_err(vid)? + .is_err() + { + return Ok(Err(())); } + + // the magic pseudorandom scalar used for aggregation let pseudorandom_scalar = Self::pseudorandom_scalar(common, commit)?; - // Compute aggregate polynomial [commitment|evaluation] - // as a pseudorandom linear combo of [commitments|evaluations] - // via evaluation of the polynomial whose coefficients are - // [commitments|evaluations] and whose input point is the pseudorandom - // scalar. + // Compute an aggregate polynomial [commitment|evaluation] as a + // pseudorandom linear combo of [commitments|evaluations]. + // To this end use function `polynomoial_eval` where each "coefficient" + // is actually a [commitment|evaluation] and the input point is + // `pseudorandom_scalar`. + // + // - Here: aggregate polynomial commitment in `aggregate_poly_commit`. + // - Below: aggregate polynomial evals in `aggregate_eval`. let aggregate_poly_commit = KzgCommit::::from( polynomial_eval( common @@ -515,40 +500,31 @@ where .into(), ); - // verify aggregate proof - // - // some boilerplate needed to accommodate builds without `parallel` - // feature. - let multiplicities = Vec::from_iter((0..multiplicity as usize)); + // convenience quantities + let multiplicities = Vec::from_iter((0..multiplicity_usize)); let polys_len = common.poly_commits.len(); let multi_open_domain = self.multi_open_domain(multiplicity)?; - let verification_iter = parallelizable_slice_iter(&multiplicities).map(|i| { - let range = i * polys_len..(i + 1) * polys_len; - let aggregate_eval = polynomial_eval( - evals - .get(range.clone()) - .ok_or_else(|| { - VidError::Internal(anyhow::anyhow!( - "share evals range {:?} out of bounds for length {}", - range, - evals.len() - )) - })? - .iter() - .map(FieldMultiplier), - pseudorandom_scalar, - ); + + // verify aggregate proofs + let verification_iter = parallelizable_slice_iter(&multiplicities).map(|m| { + let evals_iter = evals.iter().skip(*m).step_by(multiplicity_usize); + let aggregate_eval = + polynomial_eval(evals_iter.map(FieldMultiplier), pseudorandom_scalar); + let domain_index = usize::try_from(share.index).map_err(vid)? + + m * usize::try_from(self.num_storage_nodes).map_err(vid)?; Ok(UnivariateKzgPCS::verify( &self.vk, &aggregate_poly_commit, - &multi_open_domain.element((share.index * multiplicity) as usize + i), + &multi_open_domain.element(domain_index), &aggregate_eval, - &share.aggregate_proofs[*i], + &share.aggregate_proofs[*m], ) .map_err(vid)? .then_some(()) .ok_or(())) }); + + // Boilerplate needed to accommodate builds without `parallel`feature. let abort = |result: &VidResult>| match result { Ok(success) => success.is_err(), Err(_) => true, @@ -580,17 +556,17 @@ where ))); } - let shares_evals = shares + let all_shares_evals = shares .iter() .map(|share| share.evals()) .collect::>>()?; // check args: all shares must have equal evals len - let num_evals = shares_evals + let num_evals = all_shares_evals .first() .ok_or_else(|| VidError::Argument("shares is empty".into()))? .len(); - if let Some((index, share_evals)) = shares_evals + if let Some((index, share_evals)) = all_shares_evals .iter() .enumerate() .find(|(_, evals)| evals.len() != num_evals) @@ -612,8 +588,9 @@ where } // convenience quantities - let chunk_size = - usize::try_from(common.multiplicity * self.recovery_threshold).map_err(vid)?; + let multiplicity = usize::try_from(common.multiplicity).map_err(vid)?; + let num_storage_nodes = usize::try_from(self.num_storage_nodes).map_err(vid)?; + let chunk_size = multiplicity * usize::try_from(self.recovery_threshold).map_err(vid)?; let num_polys = common.poly_commits.len(); let elems_capacity = num_polys * chunk_size; let fft_domain = Self::eval_domain(chunk_size)?; @@ -621,12 +598,12 @@ where let mut elems = Vec::with_capacity(elems_capacity); let mut evals = Vec::with_capacity(num_evals); for p in 0..num_polys { - for (share, share_evals) in shares.iter().zip(shares_evals.iter()) { + for (share, share_evals) in shares.iter().zip(all_shares_evals.iter()) { // extract all evaluations for polynomial p from the share for m in 0..common.multiplicity as usize { evals.push(( - (share.index * common.multiplicity) as usize + m, - share_evals[(m * num_polys) + p], + usize::try_from(share.index).map_err(vid)? + m * num_storage_nodes, + share_evals[p * multiplicity + m], )) } } @@ -698,7 +675,9 @@ where payload_byte_len, self.num_storage_nodes )); let multiplicity = self.min_multiplicity(payload.len())?; - let code_word_size = usize::try_from(multiplicity * self.num_storage_nodes).map_err(vid)?; + let multiplicity_usize = usize::try_from(multiplicity).map_err(vid)?; + let num_storage_nodes_usize = usize::try_from(self.num_storage_nodes).map_err(vid)?; + let code_word_size = num_storage_nodes_usize * multiplicity_usize; let multi_open_domain = self.multi_open_domain(multiplicity)?; // evaluate polynomials @@ -715,27 +694,41 @@ where code_word_size, &multi_open_domain, ) + .map(|poly_evals| { + assert_eq!(poly_evals.len(), code_word_size); // sanity + poly_evals + }) .map_err(vid) }) .collect::, VidError>>()?; + assert_eq!(all_poly_evals.len(), polys.len()); // sanity - // distribute evals from each poly among the storage nodes + // Populate a Vec of polynomial evaluations for all storage nodes: + // + // The `i`th item is a Vec of `polys.len() * multiplicity` + // polynomial evaluations. // - // perf warning: runtime is quadratic in payload_size - let mut all_storage_node_evals = vec![Vec::with_capacity(polys.len()); code_word_size]; + // Evaluations for storage node `i` are ordered as follows. Define + // - `n`: the number of storage nodes: `self.num_storage_nodes` + // - `k`: the number of polynomials minus 1: `polys.len() - 1` + // - `m`: `multiplicity - 1` + // - `p[j](x)`: the value of the `j`th polynomial evaluated at `x`. + // + // p[0](i), p[0](i+n), ..., p[0](i+m*n), + // ..., + // p[k](i), p[k](i+n), ..., p[k](i+m*n), + let mut all_storage_node_evals = + vec![Vec::with_capacity(polys.len() * multiplicity_usize); num_storage_nodes_usize]; for poly_evals in all_poly_evals { - for (storage_node_evals, poly_eval) in all_storage_node_evals - .iter_mut() - .zip(poly_evals.into_iter()) - { - storage_node_evals.push(poly_eval); + for (i, poly_eval) in poly_evals.into_iter().enumerate() { + all_storage_node_evals[i % num_storage_nodes_usize].push(poly_eval); } } // sanity checks - assert_eq!(all_storage_node_evals.len(), code_word_size); + assert_eq!(all_storage_node_evals.len(), num_storage_nodes_usize); for storage_node_evals in all_storage_node_evals.iter() { - assert_eq!(storage_node_evals.len(), polys.len()); + assert_eq!(storage_node_evals.len(), polys.len() * multiplicity_usize); } all_storage_node_evals @@ -746,7 +739,7 @@ where let all_evals_commit_timer = start_timer!(|| "compute merkle root of all storage node evals"); let all_evals_commit = - KzgEvalsMerkleTree::::from_elems(None, &all_storage_node_evals).map_err(vid)?; + KzgEvalsMerkleTree::::from_elems(None, all_storage_node_evals).map_err(vid)?; end_timer!(all_evals_commit_timer); let common = Common { @@ -764,9 +757,10 @@ where )?; let pseudorandom_scalar = Self::pseudorandom_scalar(&common, &commit)?; - // Compute aggregate polynomial as a pseudorandom linear combo of polynomial via - // evaluation of the polynomial whose coefficients are polynomials and whose - // input point is the pseudorandom scalar. + // Compute the aggregate polynomial as a pseudorandom linear combo of + // `polys`. To this end use function `polynomoial_eval` where each + // "coefficient" is actually a polynomial from `polys` and the input + // point is `pseudorandom_scalar`. let aggregate_poly = polynomial_eval(polys.iter().map(PolynomialMultiplier), pseudorandom_scalar); @@ -784,38 +778,48 @@ where end_timer!(agg_proofs_timer); let assemblage_timer = start_timer!(|| "assemble shares for dispersal"); - let shares: Vec<_> = { - // compute share data - let share_data = all_storage_node_evals - .into_iter() - .zip(aggregate_proofs) - .enumerate() - .map(|(i, (eval, proof))| { - let eval_proof = all_evals_commit + + // Populate a Vec of aggregate proofs for all storage nodes: + // + // The `i`th item is a Vec of `multiplicity` KZG proofs. + // + // Proofs for storage node `i` are ordered as follows. Define + // - `n`: the number of storage nodes: `self.num_storage_nodes` + // - `m`: `multiplicity - 1` + // - `p(x)`: the value of the aggregate polynomial `p` evaluated at `x`. + // + // p(i), p(i+n), ..., p(i+m*n) + let mut all_storage_node_aggregate_proofs = + vec![Vec::with_capacity(multiplicity_usize); num_storage_nodes_usize]; + for (i, aggregate_proof) in aggregate_proofs.into_iter().enumerate() { + all_storage_node_aggregate_proofs[i % num_storage_nodes_usize].push(aggregate_proof); + } + + // sanity checks + assert_eq!( + all_storage_node_aggregate_proofs.len(), + num_storage_nodes_usize + ); + for storage_node_aggregate_proof in all_storage_node_aggregate_proofs.iter() { + assert_eq!(storage_node_aggregate_proof.len(), multiplicity_usize); + } + + let shares = all_storage_node_aggregate_proofs + .into_iter() + .enumerate() + .map(|(i, aggregate_proofs)| { + Ok(Share { + index: u32::try_from(i).map_err(vid)?, + aggregate_proofs, + evals_proof: all_evals_commit .lookup(KzgEvalsMerkleTreeIndex::::from(i as u64)) .expect_ok() .map_err(vid)? - .1; - Ok((eval, proof, eval_proof)) + .1, }) - .collect::, VidError>>()?; + }) + .collect::>>()?; - // split share data into chunks of size multiplicity - share_data - .into_iter() - .chunks(multiplicity as usize) - .into_iter() - .enumerate() - .map(|(index, chunk)| { - let (evals, proofs, eval_proofs): (Vec<_>, _, _) = chunk.multiunzip(); - Share { - index: index as u32, - aggregate_proofs: proofs, - eval_proofs, - } - }) - .collect() - }; end_timer!(assemblage_timer); end_timer!(disperse_time); diff --git a/vid/src/advz/test.rs b/vid/src/advz/test.rs index e1530f528..49246dff8 100644 --- a/vid/src/advz/test.rs +++ b/vid/src/advz/test.rs @@ -4,6 +4,7 @@ use ark_std::{ rand::{CryptoRng, RngCore}, vec, }; +use core::ops::AddAssign; use jf_pcs::{ checked_fft_size, prelude::{Commitment, UnivariateUniversalParams}, @@ -57,24 +58,52 @@ fn sad_path_verify_share_corrupt_share() { let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit); for (i, share) in shares.iter().enumerate() { + // happy path: uncorrupted share + assert!(common.multiplicity > 1, "multiplicity should be nontrivial"); + advz.verify_share(&share, &common, &commit) + .unwrap() + .unwrap(); + // missing share eval { - let share_missing_eval = Share { - eval_proofs: share.eval_proofs[1..].to_vec(), - ..share.clone() - }; + let mut share_missing_eval = share.clone(); + Share::::extract_leaf_mut(&mut share_missing_eval.evals_proof) + .unwrap() + .pop(); assert_arg_err( advz.verify_share(&share_missing_eval, &common, &commit), "1 missing share should be arg error", ); } - // corrupted share eval + // corrupted share eval: domain index < num_storage_nodes + // + // The first eval (`first_mut` below) always has domain index `i` for `i + // < num_storage_nodes`. { let mut share_bad_eval = share.clone(); - Share::::extract_leaf_mut(&mut share_bad_eval.eval_proofs[0]).unwrap() - [0] - .double_in_place(); + Share::::extract_leaf_mut(&mut share_bad_eval.evals_proof) + .unwrap() + .first_mut() + .unwrap() + .add_assign(::ScalarField::ONE); + advz.verify_share(&share_bad_eval, &common, &commit) + .unwrap() + .expect_err("bad share value should fail verification"); + } + + // corrupted share eval: domain index > num_storage_nodes + // + // The last eval (`last_mut` below) always has domain index `i + + // num_storage_nodes * (multiplicity - 1)`, which exceeds + // `num_storage_nodes` whenever multiplicity > 1. + { + let mut share_bad_eval = share.clone(); + Share::::extract_leaf_mut(&mut share_bad_eval.evals_proof) + .unwrap() + .last_mut() + .unwrap() + .add_assign(::ScalarField::ONE); advz.verify_share(&share_bad_eval, &common, &commit) .unwrap() .expect_err("bad share value should fail verification"); @@ -108,7 +137,7 @@ fn sad_path_verify_share_corrupt_share() { // (without also causing a deserialization failure). // So we use another share's proof instead. let share_bad_evals_proof = Share { - eval_proofs: shares[(i + 1) % shares.len()].eval_proofs.clone(), + evals_proof: shares[(i + 1) % shares.len()].evals_proof.clone(), ..share.clone() }; advz.verify_share(&share_bad_evals_proof, &common, &commit) @@ -170,7 +199,9 @@ fn sad_path_verify_share_corrupt_share_and_commit() { let (mut shares, mut common, commit) = (disperse.shares, disperse.common, disperse.commit); common.poly_commits.pop(); - shares[0].eval_proofs.pop(); + Share::::extract_leaf_mut(&mut shares[0].evals_proof) + .unwrap() + .pop(); // equal nonzero lengths for common, share assert_arg_err( @@ -179,7 +210,9 @@ fn sad_path_verify_share_corrupt_share_and_commit() { ); common.poly_commits.clear(); - shares[0].eval_proofs.clear(); + Share::::extract_leaf_mut(&mut shares[0].evals_proof) + .unwrap() + .clear(); // zero length for common, share assert_arg_err( @@ -198,7 +231,9 @@ fn sad_path_recover_payload_corrupt_shares() { // unequal share eval lengths let mut shares_missing_evals = shares.clone(); for i in 0..shares_missing_evals.len() - 1 { - shares_missing_evals[i].eval_proofs.pop(); + Share::::extract_leaf_mut(&mut shares_missing_evals[i].evals_proof) + .unwrap() + .pop(); assert_arg_err( advz.recover_payload(&shares_missing_evals, &common), format!("{} shares missing 1 eval should be arg error", i + 1).as_str(), @@ -206,7 +241,11 @@ fn sad_path_recover_payload_corrupt_shares() { } // 1 eval missing from all shares - shares_missing_evals.last_mut().unwrap().eval_proofs.pop(); + Share::::extract_leaf_mut( + &mut shares_missing_evals.last_mut().unwrap().evals_proof, + ) + .unwrap() + .pop(); assert_arg_err( advz.recover_payload(&shares_missing_evals, &common), format!("all shares missing 1 eval should be arg error").as_str(), @@ -240,68 +279,6 @@ fn sad_path_recover_payload_corrupt_shares() { } } -#[test] -fn verify_share_with_multiplicity() { - let advz_params = AdvzParams { - recovery_threshold: 16, - num_storage_nodes: 20, - max_multiplicity: 4, - payload_len: 4000, - }; - let (mut advz, payload) = advz_init_with::(advz_params); - - let disperse = advz.disperse(payload).unwrap(); - let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit); - - for share in shares { - assert!(advz.verify_share(&share, &common, &commit).unwrap().is_ok()) - } -} - -#[test] -fn sad_path_verify_share_with_multiplicity() { - // regression test for https://github.com/EspressoSystems/jellyfish/issues/654 - let advz_params = AdvzParams { - recovery_threshold: 16, - num_storage_nodes: 20, - max_multiplicity: 32, // payload fitting into a single polynomial - payload_len: 8200, - }; - let (mut advz, payload) = advz_init_with::(advz_params); - - let disperse = advz.disperse(payload).unwrap(); - let (shares, common, commit) = (disperse.shares, disperse.common, disperse.commit); - for (i, share) in shares.iter().enumerate() { - // corrupt the last evaluation of the share - { - let mut share_bad_eval = share.clone(); - Share::::extract_leaf_mut( - &mut share_bad_eval.eval_proofs[common.multiplicity as usize - 1], - ) - .unwrap()[common.poly_commits.len() - 1] - .double_in_place(); - advz.verify_share(&share_bad_eval, &common, &commit) - .unwrap() - .expect_err("bad share value should fail verification"); - } - - // check that verification fails if any of the eval_proofs are - // inconsistent with the merkle root. - // corrupt the last eval proof of this share by assigning it to the value of - // last eval proof of the next share. - { - let mut share_bad_eval_proofs = share.clone(); - let next_eval_proof = shares[(i + 1) % shares.len()].eval_proofs - [common.multiplicity as usize - 1] - .clone(); - share_bad_eval_proofs.eval_proofs[common.multiplicity as usize - 1] = next_eval_proof; - advz.verify_share(&share_bad_eval_proofs, &common, &commit) - .unwrap() - .expect_err("bad share evals proof should fail verification"); - } - } -} - #[test] fn verify_share_with_different_multiplicity() { // leader_multiplicity < everyone else's multiplicity @@ -492,9 +469,24 @@ pub(super) fn advz_init() -> (Advz, Vec) { let advz_params = AdvzParams { recovery_threshold: 16, num_storage_nodes: 20, - max_multiplicity: 1, + max_multiplicity: 4, payload_len: 4000, }; + + assert!( + advz_params.max_multiplicity > 1, + "multiplicity should be nontrivial" + ); + { + let elem_byte_len = bytes_to_field::elem_byte_capacity::<::ScalarField>(); + let num_payload_elems = + u32::try_from(advz_params.payload_len.div_ceil(elem_byte_len)).unwrap(); + assert!( + num_payload_elems > advz_params.max_multiplicity * advz_params.recovery_threshold, + "payload size should span multiple polynomials" + ); + } + advz_init_with(advz_params) }