Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Block processing eip4844 #3673

Merged
merged 7 commits into from
Nov 1, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 4 additions & 0 deletions consensus/state_processing/src/per_block_processing.rs
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ pub use self::verify_attester_slashing::{
pub use self::verify_proposer_slashing::verify_proposer_slashing;
pub use altair::sync_committee::process_sync_aggregate;
pub use block_signature_verifier::{BlockSignatureVerifier, ParallelSignatureSets};
pub use eip4844::eip4844::process_blob_kzg_commitments;
pub use is_valid_indexed_attestation::is_valid_indexed_attestation;
pub use process_operations::process_operations;
pub use verify_attestation::{
Expand All @@ -24,6 +25,7 @@ pub use verify_exit::verify_exit;

pub mod altair;
pub mod block_signature_verifier;
pub mod eip4844;
pub mod errors;
mod is_valid_indexed_attestation;
pub mod process_operations;
Expand Down Expand Up @@ -171,6 +173,8 @@ pub fn per_block_processing<T: EthSpec, Payload: AbstractExecPayload<T>>(
)?;
}

process_blob_kzg_commitments(block.body())?;

Ok(())
}

Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
pub mod eip4844;
122 changes: 122 additions & 0 deletions consensus/state_processing/src/per_block_processing/eip4844/eip4844.rs
Original file line number Diff line number Diff line change
@@ -0,0 +1,122 @@
use crate::BlockProcessingError;
use eth2_hashing::hash_fixed;
use itertools::{EitherOrBoth, Itertools};
use safe_arith::SafeArith;
use ssz::Decode;
use ssz_types::VariableList;
use types::consts::eip4844::{BLOB_TX_TYPE, VERSIONED_HASH_VERSION_KZG};
use types::{
AbstractExecPayload, BeaconBlockBodyRef, EthSpec, ExecPayload, FullPayload, FullPayloadRef,
KzgCommitment, Transaction, Transactions, VersionedHash,
};

pub fn process_blob_kzg_commitments<T: EthSpec, Payload: AbstractExecPayload<T>>(
block_body: BeaconBlockBodyRef<T, Payload>,
) -> Result<(), BlockProcessingError> {
if let (Ok(payload), Ok(kzg_commitments)) = (
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

We can probably ? these away right? Once 4844 is activated we can expect both payload + blob_kzg_commitments can't we?

Copy link
Member Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

There's no check elsewhere in block processing before this that we're in 4844 so if we error here we couldn't process pre 4844 blocks - but this raises the question of how we should be handling fork specific logic in the per_block_processing method generally cause it isn't obvious that it's shared and what logic exactly is shared across what fork. Maybe the current set up of "process this field if it exists" is the best solution but I'm not sure

block_body.execution_payload(),
block_body.blob_kzg_commitments(),
) {
if let Some(transactions) = payload.transactions() {
if !verify_kzg_commitments_against_transactions::<T>(transactions, kzg_commitments)? {
return Err(BlockProcessingError::BlobVersionHashMismatch);
}
}
}

Ok(())
}

pub fn verify_kzg_commitments_against_transactions<T: EthSpec>(
transactions: &Transactions<T>,
kzg_commitments: &VariableList<KzgCommitment, T::MaxBlobsPerBlock>,
) -> Result<bool, BlockProcessingError> {
let nested_iter = transactions
.into_iter()
.filter(|tx| {
tx.get(0)
.map(|tx_type| *tx_type == BLOB_TX_TYPE)
.unwrap_or(false)
})
.map(|tx| tx_peek_blob_versioned_hashes::<T>(tx));

itertools::process_results(nested_iter, |iter| {
let zipped_iter = iter
.flatten()
// Need to use `itertools::zip_longest` here because just zipping hides if one iter is shorter
// and `itertools::zip_eq` panics.
.zip_longest(kzg_commitments.into_iter())
.enumerate()
.map(|(index, next)| match next {
EitherOrBoth::Both(hash, commitment) => Ok((hash?, commitment)),
// The number of versioned hashes from the blob transactions exceeds the number of
// commitments in the block.
EitherOrBoth::Left(_) => Err(BlockProcessingError::BlobNumCommitmentsMismatch {
commitments_processed_in_block: index,
commitments_processed_in_transactions: index.safe_add(1)?,
}),
// The number of commitments in the block exceeds the number of versioned hashes
// in the blob transactions.
EitherOrBoth::Right(_) => Err(BlockProcessingError::BlobNumCommitmentsMismatch {
commitments_processed_in_block: index.safe_add(1)?,
commitments_processed_in_transactions: index,
}),
});

itertools::process_results(zipped_iter, |mut iter| {
iter.all(|(tx_versioned_hash, commitment)| {
tx_versioned_hash == kzg_commitment_to_versioned_hash(commitment)
})
})
})?
}

/// Only transactions of type `BLOB_TX_TYPE` should be passed into this function.
fn tx_peek_blob_versioned_hashes<T: EthSpec>(
opaque_tx: &Transaction<T::MaxBytesPerTransaction>,
) -> Result<
impl IntoIterator<Item = Result<VersionedHash, BlockProcessingError>> + '_,
BlockProcessingError,
> {
let tx_len = opaque_tx.len();
let message_offset = 1.safe_add(u32::from_ssz_bytes(opaque_tx.get(1..5).ok_or(
BlockProcessingError::BlobVersionHashIndexOutOfBounds {
length: tx_len,
index: 5,
},
)?)?)?;

let message_offset_usize = message_offset as usize;

// field offset: 32 + 8 + 32 + 32 + 8 + 4 + 32 + 4 + 4 + 32 = 188
let blob_versioned_hashes_offset = message_offset.safe_add(u32::from_ssz_bytes(
opaque_tx
.get(message_offset_usize.safe_add(188)?..message_offset_usize.safe_add(192)?)
.ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds {
length: tx_len,
index: message_offset_usize.safe_add(192)?,
})?,
)?)?;

let num_hashes = tx_len
.safe_sub(blob_versioned_hashes_offset as usize)?
.safe_div(32)?;

Ok((0..num_hashes).into_iter().map(move |i| {
let next_version_hash_index =
(blob_versioned_hashes_offset as usize).safe_add(i.safe_mul(32)?)?;
let bytes = opaque_tx
.get(next_version_hash_index..next_version_hash_index.safe_add(32)?)
.ok_or(BlockProcessingError::BlobVersionHashIndexOutOfBounds {
length: tx_len,
index: (next_version_hash_index as usize).safe_add(32)?,
})?;
Ok(VersionedHash::from_slice(bytes))
}))
}

fn kzg_commitment_to_versioned_hash(kzg_commitment: &KzgCommitment) -> VersionedHash {
let mut hashed_commitment = hash_fixed(&kzg_commitment.0);
hashed_commitment[0] = VERSIONED_HASH_VERSION_KZG;
VersionedHash::from(hashed_commitment)
}
20 changes: 20 additions & 0 deletions consensus/state_processing/src/per_block_processing/errors.rs
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
use super::signature_sets::Error as SignatureSetError;
use merkle_proof::MerkleTreeError;
use safe_arith::ArithError;
use ssz::DecodeError;
use types::*;

/// The error returned from the `per_block_processing` function. Indicates that a block is either
Expand Down Expand Up @@ -53,6 +54,7 @@ pub enum BlockProcessingError {
BeaconStateError(BeaconStateError),
SignatureSetError(SignatureSetError),
SszTypesError(ssz_types::Error),
SszDecodeError(DecodeError),
MerkleTreeError(MerkleTreeError),
ArithError(ArithError),
InconsistentBlockFork(InconsistentFork),
Expand All @@ -70,6 +72,18 @@ pub enum BlockProcessingError {
found: u64,
},
ExecutionInvalid,
BlobVersionHashMismatch,
/// The number of commitments in blob transactions in the payload does not match the number
/// of commitments in the block.
BlobNumCommitmentsMismatch {
commitments_processed_in_block: usize,
/// This number depic
commitments_processed_in_transactions: usize,
},
BlobVersionHashIndexOutOfBounds {
index: usize,
length: usize,
},
}

impl From<BeaconStateError> for BlockProcessingError {
Expand All @@ -90,6 +104,12 @@ impl From<ssz_types::Error> for BlockProcessingError {
}
}

impl From<DecodeError> for BlockProcessingError {
fn from(error: DecodeError) -> Self {
BlockProcessingError::SszDecodeError(error)
}
}

impl From<ArithError> for BlockProcessingError {
fn from(e: ArithError) -> Self {
BlockProcessingError::ArithError(e)
Expand Down
1 change: 1 addition & 0 deletions consensus/types/src/consts.rs
Original file line number Diff line number Diff line change
Expand Up @@ -34,4 +34,5 @@ pub mod eip4844 {
.expect("should initialize BLS_MODULUS");
}
pub const BLOB_TX_TYPE: u8 = 5;
pub const VERSIONED_HASH_VERSION_KZG: u8 = 1;
}
2 changes: 1 addition & 1 deletion consensus/types/src/kzg_commitment.rs
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ use tree_hash::{PackedEncoding, TreeHash};

#[derive(Derivative, Debug, Clone, Serialize, Deserialize)]
#[derivative(PartialEq, Eq, Hash)]
pub struct KzgCommitment(#[serde(with = "BigArray")] [u8; 48]);
pub struct KzgCommitment(#[serde(with = "BigArray")] pub [u8; 48]);

impl Display for KzgCommitment {
fn fmt(&self, f: &mut Formatter<'_>) -> fmt::Result {
Expand Down
2 changes: 1 addition & 1 deletion consensus/types/src/kzg_proof.rs
Original file line number Diff line number Diff line change
@@ -1,9 +1,9 @@
use crate::test_utils::{RngCore, TestRandom};
use serde::{Deserialize, Serialize};
use serde_big_array::BigArray;
use ssz::{Decode, DecodeError, Encode};
use std::fmt;
use tree_hash::{PackedEncoding, TreeHash};
use serde_big_array::BigArray;

const KZG_PROOF_BYTES_LEN: usize = 48;

Expand Down
1 change: 1 addition & 0 deletions consensus/types/src/lib.rs
Original file line number Diff line number Diff line change
Expand Up @@ -195,6 +195,7 @@ pub type Address = H160;
pub type ForkVersion = [u8; 4];
pub type BLSFieldElement = Uint256;
pub type Blob<T> = FixedVector<BLSFieldElement, <T as EthSpec>::FieldElementsPerBlob>;
pub type VersionedHash = Hash256;

pub use bls::{
AggregatePublicKey, AggregateSignature, Keypair, PublicKey, PublicKeyBytes, SecretKey,
Expand Down
49 changes: 49 additions & 0 deletions consensus/types/src/payload.rs
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,9 @@ pub trait ExecPayload<T: EthSpec>: Debug + Clone + PartialEq + Hash + TreeHash +
fn fee_recipient(&self) -> Address;
fn gas_limit(&self) -> u64;

/// This will return `None` on blinded blocks or pre-merge blocks.
fn transactions(&self) -> Option<&Transactions<T>>;

// Is this a default payload? (pre-merge)
fn is_default(&self) -> bool;
}
Expand Down Expand Up @@ -191,6 +194,10 @@ impl<T: EthSpec> ExecPayload<T> for FullPayloadMerge<T> {
self.execution_payload.gas_limit
}

fn transactions(&self) -> Option<&Transactions<T>> {
Some(&self.execution_payload.transactions)
}

// TODO: can this function be optimized?
fn is_default(&self) -> bool {
self.execution_payload == ExecutionPayloadMerge::default()
Expand Down Expand Up @@ -235,6 +242,10 @@ impl<T: EthSpec> ExecPayload<T> for FullPayloadCapella<T> {
self.execution_payload.gas_limit
}

fn transactions(&self) -> Option<&Transactions<T>> {
Some(&self.execution_payload.transactions)
}

// TODO: can this function be optimized?
fn is_default(&self) -> bool {
self.execution_payload == ExecutionPayloadCapella::default()
Expand Down Expand Up @@ -279,6 +290,10 @@ impl<T: EthSpec> ExecPayload<T> for FullPayloadEip4844<T> {
self.execution_payload.gas_limit
}

fn transactions(&self) -> Option<&Transactions<T>> {
Some(&self.execution_payload.transactions)
}

// TODO: can this function be optimized?
fn is_default(&self) -> bool {
self.execution_payload == ExecutionPayloadEip4844::default()
Expand Down Expand Up @@ -347,6 +362,13 @@ impl<T: EthSpec> ExecPayload<T> for FullPayload<T> {
})
}

fn transactions<'a>(&'a self) -> Option<&'a Transactions<T>> {
map_full_payload_ref!(&'a _, self.to_ref(), move |payload, cons| {
cons(payload);
Some(&payload.execution_payload.transactions)
})
}

fn is_default(&self) -> bool {
match self {
Self::Merge(payload) => payload.is_default(),
Expand Down Expand Up @@ -428,6 +450,13 @@ impl<'b, T: EthSpec> ExecPayload<T> for FullPayloadRef<'b, T> {
})
}

fn transactions<'a>(&'a self) -> Option<&'a Transactions<T>> {
map_full_payload_ref!(&'a _, self, move |payload, cons| {
cons(payload);
Some(&payload.execution_payload.transactions)
})
}

// TODO: can this function be optimized?
fn is_default<'a>(&'a self) -> bool {
match self {
Expand Down Expand Up @@ -687,6 +716,10 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayload<T> {
}
}

fn transactions(&self) -> Option<&Transactions<T>> {
None
}

// TODO: can this function be optimized?
fn is_default(&self) -> bool {
match self {
Expand Down Expand Up @@ -773,6 +806,10 @@ impl<'b, T: EthSpec> ExecPayload<T> for BlindedPayloadRef<'b, T> {
}
}

fn transactions(&self) -> Option<&Transactions<T>> {
None
}

// TODO: can this function be optimized?
fn is_default<'a>(&'a self) -> bool {
match self {
Expand Down Expand Up @@ -828,6 +865,10 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayloadMerge<T> {
self.execution_payload_header.gas_limit
}

fn transactions(&self) -> Option<&Transactions<T>> {
None
}

fn is_default(&self) -> bool {
self.execution_payload_header == ExecutionPayloadHeaderMerge::default()
}
Expand Down Expand Up @@ -871,6 +912,10 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayloadCapella<T> {
self.execution_payload_header.gas_limit
}

fn transactions(&self) -> Option<&Transactions<T>> {
None
}

fn is_default(&self) -> bool {
self.execution_payload_header == ExecutionPayloadHeaderCapella::default()
}
Expand Down Expand Up @@ -914,6 +959,10 @@ impl<T: EthSpec> ExecPayload<T> for BlindedPayloadEip4844<T> {
self.execution_payload_header.gas_limit
}

fn transactions(&self) -> Option<&Transactions<T>> {
None
}

fn is_default(&self) -> bool {
self.execution_payload_header == ExecutionPayloadHeaderEip4844::default()
}
Expand Down