From c53ab45e19ce99b5c207cf9f4f2ef0c0c0c72bdb Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 24 Nov 2022 17:29:09 +0600 Subject: [PATCH 001/210] Add in-protocol deposit processing --- specs/deposits/beacon-chain.md | 416 +++++++++++++++++++++++++++++++++ 1 file changed, 416 insertions(+) create mode 100644 specs/deposits/beacon-chain.md diff --git a/specs/deposits/beacon-chain.md b/specs/deposits/beacon-chain.md new file mode 100644 index 0000000000..bb6ebd2e8c --- /dev/null +++ b/specs/deposits/beacon-chain.md @@ -0,0 +1,416 @@ +# DepositEIP -- The Beacon Chain + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Preset](#preset) + - [State list lengths](#state-list-lengths) + - [Execution](#execution) +- [Containers](#containers) + - [New containers](#new-containers) + - [`DepositReceipt`](#depositreceipt) + - [`IndexedDepositData`](#indexeddepositdata) + - [Extended Containers](#extended-containers) + - [`ExecutionPayload`](#executionpayload) + - [`ExecutionPayloadHeader`](#executionpayloadheader) + - [`BeaconState`](#beaconstate) +- [Beacon chain state transition function](#beacon-chain-state-transition-function) + - [Epoch processing](#epoch-processing) + - [Helper functions](#helper-functions) + - [New `get_validator_from_indexed_deposit_data`](#new-get_validator_from_indexed_deposit_data) + - [New `apply_pending_deposit`](#new-apply_pending_deposit) + - [New `process_pending_deposits`](#new-process_pending_deposits) + - [Block processing](#block-processing) + - [New `process_deposit_receipts`](#new-process_deposit_receipts) + - [Modified `process_execution_payload`](#modified-process_execution_payload) + - [Modified `process_operations`](#modified-process_operations) +- [Testing](#testing) + + + + +## Introduction + +This is the beacon chain specification of in-protocol deposits processing mechanism. +This mechanism relies on the changes proposed by the corresponding EIP. + +*Note:* This specification is under development and should be used with care. + +## Preset + +### State list lengths + +| Name | Value | +| - | - | +| `PENDING_DEPOSITS_LIMIT` | `2**32` (= 4,294,967,296) | + +### Execution + +| Name | Value | Description | +| - | - | - | +| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**10)` (= 1,024) | Maximum number of deposit receipts allowed in each payload | + +## Containers + +### New containers + +#### `DepositReceipt` + +```python +class DepositReceipt(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + amount: Gwei + signature: BLSSignature + index: uint64 +``` + +#### `IndexedDepositData` + +```python +class IndexedDepositData(Container): + pubkey: BLSPubkey + withdrawal_credentials: Bytes32 + amount: Gwei + index: uint64 + epoch: Epoch +``` + +### Extended Containers + +#### `ExecutionPayload` + +```python +class ExecutionPayload(Container): + # Execution block header fields + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + # Extra payload fields + block_hash: Hash32 + transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] + withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] + deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in DepositEIP] +``` + +#### `ExecutionPayloadHeader` + +```python +class ExecutionPayloadHeader(Container): + # Execution block header fields + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + # Extra payload fields + block_hash: Hash32 + transactions_root: Root + withdrawals_root: Root + deposit_receipts_root: Root # [New in DepositEIP] +``` + +#### `BeaconState` + +```python +class BeaconState(Container): + # Versioning + genesis_time: uint64 + genesis_validators_root: Root + slot: Slot + fork: Fork + # History + latest_block_header: BeaconBlockHeader + block_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + state_roots: Vector[Root, SLOTS_PER_HISTORICAL_ROOT] + historical_roots: List[Root, HISTORICAL_ROOTS_LIMIT] + # Eth1 + eth1_data: Eth1Data + eth1_data_votes: List[Eth1Data, EPOCHS_PER_ETH1_VOTING_PERIOD * SLOTS_PER_EPOCH] + eth1_deposit_index: uint64 + # Registry + validators: List[Validator, VALIDATOR_REGISTRY_LIMIT] + balances: List[Gwei, VALIDATOR_REGISTRY_LIMIT] + # Randomness + randao_mixes: Vector[Bytes32, EPOCHS_PER_HISTORICAL_VECTOR] + # Slashings + slashings: Vector[Gwei, EPOCHS_PER_SLASHINGS_VECTOR] # Per-epoch sums of slashed effective balances + # Participation + previous_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + current_epoch_participation: List[ParticipationFlags, VALIDATOR_REGISTRY_LIMIT] + # Finality + justification_bits: Bitvector[JUSTIFICATION_BITS_LENGTH] # Bit set for every recent justified epoch + previous_justified_checkpoint: Checkpoint + current_justified_checkpoint: Checkpoint + finalized_checkpoint: Checkpoint + # Inactivity + inactivity_scores: List[uint64, VALIDATOR_REGISTRY_LIMIT] + # Sync + current_sync_committee: SyncCommittee + next_sync_committee: SyncCommittee + # Execution + latest_execution_payload_header: ExecutionPayloadHeader + # Withdrawals + next_withdrawal_index: WithdrawalIndex + next_withdrawal_validator_index: ValidatorIndex + # DepositsEIP + pending_deposits: List[IndexedDepositData, PENDING_DEPOSITS_LIMIT] +``` + +## Beacon chain state transition function + +### Epoch processing + +```python +def process_epoch(state: BeaconState) -> None: + process_justification_and_finalization(state) + process_inactivity_updates(state) + process_rewards_and_penalties(state) + # Run before registry and after finality updates + process_pending_deposits(state) # [New in DepositsEIP] + process_registry_updates(state) + process_slashings(state) + process_eth1_data_reset(state) + process_effective_balance_updates(state) + process_slashings_reset(state) + process_randao_mixes_reset(state) + process_historical_roots_update(state) + process_participation_flag_updates(state) + process_sync_committee_updates(state) +``` + +#### Helper functions + +##### New `get_validator_from_indexed_deposit_data` + +```python +def get_validator_from_indexed_deposit_data(indexed_deposit_data: IndexedDepositData) -> Validator: + amount = indexed_deposit_data.amount + effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) + + return Validator( + pubkey=indexed_deposit_data.pubkey, + withdrawal_credentials=indexed_deposit_data.withdrawal_credentials, + activation_eligibility_epoch=FAR_FUTURE_EPOCH, + activation_epoch=FAR_FUTURE_EPOCH, + exit_epoch=FAR_FUTURE_EPOCH, + withdrawable_epoch=FAR_FUTURE_EPOCH, + effective_balance=effective_balance, + ) +``` + +##### New `apply_pending_deposit` + +```python +def apply_pending_deposit(state: BeaconState, indexed_deposit_data: IndexedDepositData) -> None: + pubkey = indexed_deposit_data.pubkey + amount = indexed_deposit_data.amount + validator_pubkeys = [v.pubkey for v in state.validators] + if pubkey not in validator_pubkeys: + # Add validator and balance entries + state.validators.append(get_validator_from_indexed_deposit_data(indexed_deposit_data)) + state.balances.append(amount) + else: + # Increase balance by deposit amount + index = ValidatorIndex(validator_pubkeys.index(pubkey)) + increase_balance(state, index, amount) +``` + +#### New `process_pending_deposits` + +```python +def process_pending_deposits(state: BeaconState) -> None: + finalized_epoch = state.finalized_checkpoint.epoch + + next_pending_deposit_index = 0 + for pending_deposit in state.pending_deposits: + # Apply only finalized deposits + if pending_deposit.epoch > finalized_epoch + break + + # Skip already applied deposits + if pending_deposit.index >= state.eth1_deposit_index: + apply_pending_deposit(state, pending_deposit) + state.eth1_deposit_index += 1 + + next_pending_deposit_index += 1 + + state.pending_deposit = state.pending_deposit[next_pending_deposit_index:] +``` + +### Block processing + +```python +def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + if is_execution_enabled(state, block.body): + process_withdrawals(state, block.body.execution_payload) + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in DepositsEIP] + process_deposit_receipts(state, block.body.execution_payload) # [New in DepositsEIP] + process_randao(state, block.body) + process_eth1_data(state, block.body) + process_operations(state, block.body) # [Modified in DepositsEIP] + process_sync_aggregate(state, block.body.sync_aggregate) +``` + +#### New `process_deposit_receipts` + +```python +def process_deposit_receipts(state: BeaconState, payload: ExecutionPayload) -> None: + current_epoch = get_current_epoch(state) + + for deposit_receipt in payload.deposit_receipts: + if pubkey not in validator_pubkeys: + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + deposit_message = DepositMessage( + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, + amount=deposit_receipt.amount, + ) + domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks + signing_root = compute_signing_root(deposit_message, domain) + if not bls.Verify(pubkey, signing_root, deposit.data.signature): + continue + + pending_deposit = IndexedDepositData( + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, + amount=deposit_receipt.amount, + index=deposit_receipt.index, + epoch=current_epoch, + ) + state.pending_deposits.append(pending_deposit) +``` + +#### Modified `process_execution_payload` + +*Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type. + +```python +def process_execution_payload(state: BeaconState, payload: ExecutionPayload, execution_engine: ExecutionEngine) -> None: + # Verify consistency of the parent hash with respect to the previous execution payload header + if is_merge_transition_complete(state): + assert payload.parent_hash == state.latest_execution_payload_header.block_hash + # Verify prev_randao + assert payload.prev_randao == get_randao_mix(state, get_current_epoch(state)) + # Verify timestamp + assert payload.timestamp == compute_timestamp_at_slot(state, state.slot) + # Verify the execution payload is valid + assert execution_engine.notify_new_payload(payload) + # Cache execution payload header + state.latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + withdrawals_root=hash_tree_root(payload.withdrawals), + deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in DepositsEIP] + ) +``` + +#### Modified `process_operations` + +*Note*: The function `process_operations` is modified to process `BLSToExecutionChange` operations included in the block. + +```python +def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # Verify that outstanding deposits are processed up to the maximum number of deposits + unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in DepositsEIP] + assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in DepositsEIP] + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + for_ops(body.proposer_slashings, process_proposer_slashing) + for_ops(body.attester_slashings, process_attester_slashing) + for_ops(body.attestations, process_attestation) + for_ops(body.deposits, process_deposit) + for_ops(body.voluntary_exits, process_voluntary_exit) + for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) # [New in Capella] +``` + +## Testing + +*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure DepositsEIP testing only. +Modifications include: +1. Use `DEPOSITS_EIP_FORK_VERSION` as the previous and current fork version. +2. Utilize the DepositsEIP `BeaconBlockBody` when constructing the initial `latest_block_header`. + +```python +def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, + eth1_timestamp: uint64, + deposits: Sequence[Deposit], + execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader() + ) -> BeaconState: + fork = Fork( + previous_version=CAPELLA_FORK_VERSION, # [Modified in Capella] for testing only + current_version=CAPELLA_FORK_VERSION, # [Modified in Capella] + epoch=GENESIS_EPOCH, + ) + state = BeaconState( + genesis_time=eth1_timestamp + GENESIS_DELAY, + fork=fork, + eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), + latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), + randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy + ) + + # Process deposits + leaves = list(map(lambda deposit: deposit.data, deposits)) + for index, deposit in enumerate(deposits): + deposit_data_list = List[DepositData, 2**DEPOSIT_CONTRACT_TREE_DEPTH](*leaves[:index + 1]) + state.eth1_data.deposit_root = hash_tree_root(deposit_data_list) + process_deposit(state, deposit) + + # Process activations + for index, validator in enumerate(state.validators): + balance = state.balances[index] + validator.effective_balance = min(balance - balance % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) + if validator.effective_balance == MAX_EFFECTIVE_BALANCE: + validator.activation_eligibility_epoch = GENESIS_EPOCH + validator.activation_epoch = GENESIS_EPOCH + + # Set genesis validators root for domain separation and chain versioning + state.genesis_validators_root = hash_tree_root(state.validators) + + # Fill in sync committees + # Note: A duplicate committee is assigned for the current and next committee at genesis + state.current_sync_committee = get_next_sync_committee(state) + state.next_sync_committee = get_next_sync_committee(state) + + # Initialize the execution payload header + state.latest_execution_payload_header = execution_payload_header + + return state +``` From b3c771c46db891812422e113e227b39c6757620c Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 7 Dec 2022 12:05:24 +0600 Subject: [PATCH 002/210] Preserve deposits per epoch boundary --- specs/deposits/beacon-chain.md | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/specs/deposits/beacon-chain.md b/specs/deposits/beacon-chain.md index bb6ebd2e8c..7e8b5bf82d 100644 --- a/specs/deposits/beacon-chain.md +++ b/specs/deposits/beacon-chain.md @@ -244,10 +244,14 @@ def process_pending_deposits(state: BeaconState) -> None: next_pending_deposit_index = 0 for pending_deposit in state.pending_deposits: + # Preserve deposits per epoch boundary + if next_pending_deposit_index >= MAX_DEPOSITS * SLOTS_PER_EPOCH: + break + # Apply only finalized deposits - if pending_deposit.epoch > finalized_epoch + if pending_deposit.epoch > finalized_epoch: break - + # Skip already applied deposits if pending_deposit.index >= state.eth1_deposit_index: apply_pending_deposit(state, pending_deposit) From 5ea983ac33e16963b845318fbfd1f178a516a8cb Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 7 Dec 2022 22:25:22 +0600 Subject: [PATCH 003/210] Fix toc, add more comments --- specs/deposits/beacon-chain.md | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/deposits/beacon-chain.md b/specs/deposits/beacon-chain.md index 7e8b5bf82d..40d9864c2d 100644 --- a/specs/deposits/beacon-chain.md +++ b/specs/deposits/beacon-chain.md @@ -22,7 +22,7 @@ - [Epoch processing](#epoch-processing) - [Helper functions](#helper-functions) - [New `get_validator_from_indexed_deposit_data`](#new-get_validator_from_indexed_deposit_data) - - [New `apply_pending_deposit`](#new-apply_pending_deposit) + - [New `apply_indexed_deposit_data`](#new-apply_indexed_deposit_data) - [New `process_pending_deposits`](#new-process_pending_deposits) - [Block processing](#block-processing) - [New `process_deposit_receipts`](#new-process_deposit_receipts) @@ -219,10 +219,10 @@ def get_validator_from_indexed_deposit_data(indexed_deposit_data: IndexedDeposit ) ``` -##### New `apply_pending_deposit` +##### New `apply_indexed_deposit_data` ```python -def apply_pending_deposit(state: BeaconState, indexed_deposit_data: IndexedDepositData) -> None: +def apply_indexed_deposit_data(state: BeaconState, indexed_deposit_data: IndexedDepositData) -> None: pubkey = indexed_deposit_data.pubkey amount = indexed_deposit_data.amount validator_pubkeys = [v.pubkey for v in state.validators] @@ -254,7 +254,7 @@ def process_pending_deposits(state: BeaconState) -> None: # Skip already applied deposits if pending_deposit.index >= state.eth1_deposit_index: - apply_pending_deposit(state, pending_deposit) + apply_indexed_deposit_data(state, pending_deposit) state.eth1_deposit_index += 1 next_pending_deposit_index += 1 @@ -348,8 +348,9 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # Verify that outstanding deposits are processed up to the maximum number of deposits + # Prevent potential underflow that is introduced by the mix of two deposit processing flows unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in DepositsEIP] + # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in DepositsEIP] def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: From 8cc293c8697376bb594cca9f753f0b93c2aef634 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 7 Dec 2022 22:29:21 +0600 Subject: [PATCH 004/210] Fix wording --- specs/deposits/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deposits/beacon-chain.md b/specs/deposits/beacon-chain.md index 40d9864c2d..76423d656d 100644 --- a/specs/deposits/beacon-chain.md +++ b/specs/deposits/beacon-chain.md @@ -348,7 +348,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # Prevent potential underflow that is introduced by the mix of two deposit processing flows + # Prevent potential underflow introduced by mixing two deposit processing flows unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in DepositsEIP] # Verify that outstanding deposits are processed up to the maximum number of deposits assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in DepositsEIP] From 9d2a8f7d63f8a720c7723f04190972fc6e542e96 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 19 Dec 2022 17:47:56 +0600 Subject: [PATCH 005/210] Give deposits EIP a name --- specs/{deposits => eip6110}/beacon-chain.md | 28 ++++++++++----------- 1 file changed, 14 insertions(+), 14 deletions(-) rename specs/{deposits => eip6110}/beacon-chain.md (95%) diff --git a/specs/deposits/beacon-chain.md b/specs/eip6110/beacon-chain.md similarity index 95% rename from specs/deposits/beacon-chain.md rename to specs/eip6110/beacon-chain.md index 76423d656d..d1b64f5a68 100644 --- a/specs/deposits/beacon-chain.md +++ b/specs/eip6110/beacon-chain.md @@ -1,4 +1,4 @@ -# DepositEIP -- The Beacon Chain +# EIP-6110 -- The Beacon Chain ## Table of contents @@ -36,7 +36,7 @@ ## Introduction This is the beacon chain specification of in-protocol deposits processing mechanism. -This mechanism relies on the changes proposed by the corresponding EIP. +This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110). *Note:* This specification is under development and should be used with care. @@ -103,7 +103,7 @@ class ExecutionPayload(Container): block_hash: Hash32 transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] - deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in DepositEIP] + deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP-6110] ``` #### `ExecutionPayloadHeader` @@ -127,7 +127,7 @@ class ExecutionPayloadHeader(Container): block_hash: Hash32 transactions_root: Root withdrawals_root: Root - deposit_receipts_root: Root # [New in DepositEIP] + deposit_receipts_root: Root # [New in EIP-6110] ``` #### `BeaconState` @@ -173,7 +173,7 @@ class BeaconState(Container): # Withdrawals next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex - # DepositsEIP + # EIP-6110 pending_deposits: List[IndexedDepositData, PENDING_DEPOSITS_LIMIT] ``` @@ -187,7 +187,7 @@ def process_epoch(state: BeaconState) -> None: process_inactivity_updates(state) process_rewards_and_penalties(state) # Run before registry and after finality updates - process_pending_deposits(state) # [New in DepositsEIP] + process_pending_deposits(state) # [New in EIP-6110] process_registry_updates(state) process_slashings(state) process_eth1_data_reset(state) @@ -269,11 +269,11 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) if is_execution_enabled(state, block.body): process_withdrawals(state, block.body.execution_payload) - process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in DepositsEIP] - process_deposit_receipts(state, block.body.execution_payload) # [New in DepositsEIP] + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110] + process_deposit_receipts(state, block.body.execution_payload) # [New in EIP-6110] process_randao(state, block.body) process_eth1_data(state, block.body) - process_operations(state, block.body) # [Modified in DepositsEIP] + process_operations(state, block.body) # [Modified in EIP-6110] process_sync_aggregate(state, block.body.sync_aggregate) ``` @@ -338,7 +338,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), - deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in DepositsEIP] + deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP-6110] ) ``` @@ -349,9 +349,9 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Prevent potential underflow introduced by mixing two deposit processing flows - unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in DepositsEIP] + unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in EIP-6110] # Verify that outstanding deposits are processed up to the maximum number of deposits - assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in DepositsEIP] + assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in EIP-6110] def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: @@ -367,10 +367,10 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: ## Testing -*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure DepositsEIP testing only. +*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only. Modifications include: 1. Use `DEPOSITS_EIP_FORK_VERSION` as the previous and current fork version. -2. Utilize the DepositsEIP `BeaconBlockBody` when constructing the initial `latest_block_header`. +2. Utilize the EIP-6110 `BeaconBlockBody` when constructing the initial `latest_block_header`. ```python def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, From 48f120c90e77dadcbce51d8ac08df0e23b5e6db1 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 21 Dec 2022 19:19:13 +0600 Subject: [PATCH 006/210] Set a higher limit to deposit receipts list --- specs/eip6110/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/eip6110/beacon-chain.md b/specs/eip6110/beacon-chain.md index d1b64f5a68..cf9c046af3 100644 --- a/specs/eip6110/beacon-chain.md +++ b/specs/eip6110/beacon-chain.md @@ -52,7 +52,7 @@ This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum | Name | Value | Description | | - | - | - | -| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**10)` (= 1,024) | Maximum number of deposit receipts allowed in each payload | +| `MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD` | `uint64(2**13)` (= 8,192) | Maximum number of deposit receipts allowed in each payload | ## Containers From 6eb118d34afad8f8e01d2164e5d8d06ea0cae663 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 22 Dec 2022 16:58:34 +0600 Subject: [PATCH 007/210] Fix finality check in deposit processing --- specs/eip6110/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/eip6110/beacon-chain.md b/specs/eip6110/beacon-chain.md index cf9c046af3..4a95fa95f7 100644 --- a/specs/eip6110/beacon-chain.md +++ b/specs/eip6110/beacon-chain.md @@ -249,7 +249,7 @@ def process_pending_deposits(state: BeaconState) -> None: break # Apply only finalized deposits - if pending_deposit.epoch > finalized_epoch: + if pending_deposit.epoch >= finalized_epoch: break # Skip already applied deposits From 03f4b8fa4dbb0f5de4541cf435fee8b5a5badc0c Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Fri, 27 Jan 2023 14:23:38 +0000 Subject: [PATCH 008/210] Add KZG multi verify function --- specs/eip4844/polynomial-commitments.md | 98 +++++++++++++++++++++++-- 1 file changed, 93 insertions(+), 5 deletions(-) diff --git a/specs/eip4844/polynomial-commitments.md b/specs/eip4844/polynomial-commitments.md index 9a0500d96d..8ef1fc419a 100644 --- a/specs/eip4844/polynomial-commitments.md +++ b/specs/eip4844/polynomial-commitments.md @@ -83,6 +83,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog | - | - | | `FIELD_ELEMENTS_PER_BLOB` | `uint64(4096)` | | `FIAT_SHAMIR_PROTOCOL_DOMAIN` | `b'FSBLOBVERIFY_V1_'` | +| `RANDOM_CHALLENGE_KZG_MULTI_DOMAIN` | `b'RCKZGMULTI___V1_'` | ### Crypto @@ -414,6 +415,51 @@ def verify_kzg_proof_impl(commitment: KZGCommitment, ]) ``` +#### `verify_kzg_proof_multi` + +```python +def verify_kzg_proof_multi(commitments: Sequence[KZGCommitment], + zs: Sequence[BLSFieldElement], + ys: Sequence[BLSFieldElement], + proofs: Sequence[KZGProof]) -> bool: + """ + Verify multiple KZG proofs efficiently. + """ + + assert len(commitments) == len(zs) == len(ys) == len(proofs) + + # Compute a random challenge. Note that it does not have to be computed from a hash, + # r just has to be random. + degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS) + num_commitments = int.to_bytes(len(commitments), 8, ENDIANNESS) + data = RANDOM_CHALLENGE_KZG_MULTI_DOMAIN + degree_poly + num_commitments + + # Append each polynomial which is composed by field elements + for commitment, z, y, proof in zip(commitments, zs, ys, proofs): + data += commitment \ + + int.to_bytes(z, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \ + + int.to_bytes(y, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \ + + proof + + hashed_data = hash(data) + r = hash_to_bls_field(hashed_data + b'\x00') + r_powers = compute_powers(r, len(commitments)) + + # Verify: e(sum r^i proof_i, [s]) == + # e(sum r^i (commitment_i - [y_i]) + sum r^i z_i proof_i, [1]) + proof_lincomb = g1_lincomb(proofs, r_powers) + proof_z_lincomb = g1_lincomb(proofs, [z * r_power for z, r_power in zip(zs, r_powers)]) + C_minus_ys = [bls.G1_to_bytes48(bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))) + for commitment, y in zip(commitments, ys)] + C_minus_y_as_KZGCommitments = [KZGCommitment(x) for x in C_minus_ys] + C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers) + + return bls.pairing_check([ + [proof_lincomb, bls.neg(KZG_SETUP_G2[1])], + [bls.add(C_minus_y_lincomb, proof_z_lincomb), bls.G2] + ]) +``` + #### `compute_kzg_proof` ```python @@ -491,12 +537,12 @@ def compute_aggregate_kzg_proof(blobs: Sequence[Blob]) -> KZGProof: return compute_kzg_proof_impl(aggregated_poly, evaluation_challenge) ``` -#### `verify_aggregate_kzg_proof` +#### `verify_aggregate_kzg_proof_aggregation` ```python -def verify_aggregate_kzg_proof(blobs: Sequence[Blob], - commitments_bytes: Sequence[Bytes48], - aggregated_proof_bytes: Bytes48) -> bool: +def verify_aggregate_kzg_proof_aggregation(blobs: Sequence[Blob], + commitments_bytes: Sequence[Bytes48]) \ + -> Tuple[KZGCommitment, BLSFieldElement, BLSFieldElement]: """ Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments. @@ -512,7 +558,49 @@ def verify_aggregate_kzg_proof(blobs: Sequence[Blob], # Evaluate aggregated polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero) y = evaluate_polynomial_in_evaluation_form(aggregated_poly, evaluation_challenge) - # Verify aggregated proof + return (aggregated_poly_commitment, evaluation_challenge, y) +``` + +#### `verify_aggregate_kzg_proof` + +```python +def verify_aggregate_kzg_proof(blobs: Sequence[Blob], + commitments_bytes: Sequence[Bytes48], + aggregated_proof_bytes: Bytes48) -> bool: + """ + Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments. + + Public method. + """ + aggregated_poly_commitment, evaluation_challenge, y = \ + verify_aggregate_kzg_proof_aggregation(blobs, commitments_bytes) + aggregated_proof = bytes_to_kzg_proof(aggregated_proof_bytes) + return verify_kzg_proof_impl(aggregated_poly_commitment, evaluation_challenge, y, aggregated_proof) ``` + +#### `verify_aggregate_kzg_proof_multi` + +```python +def verify_aggregate_kzg_proof_multi(list_blobs: Sequence[Sequence[Blob]], + list_commitments_bytes: Sequence[Sequence[Bytes48]], + list_aggregated_proof_bytes: Sequence[Bytes48]) -> bool: + """ + Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments. + + Public method. + """ + + aggregated_poly_commitments, evaluation_challenges, ys = [], [], [] + for blobs, commitments_bytes in zip(list_blobs, list_commitments_bytes): + aggregated_poly_commitment, evaluation_challenge, y = \ + verify_aggregate_kzg_proof_aggregation(blobs, commitments_bytes) + aggregated_poly_commitments.append(aggregated_poly_commitment) + evaluation_challenges.append(evaluation_challenge) + ys.append(y) + + list_aggregated_proof = [bytes_to_kzg_proof(proof) for proof in list_aggregated_proof_bytes] + + return verify_kzg_proof_multi(aggregated_poly_commitments, evaluation_challenges, ys, list_aggregated_proof) +``` From d89e57908973bb9b2916a22fdf453d3cb3ecea14 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Fri, 27 Jan 2023 14:33:43 +0000 Subject: [PATCH 009/210] Updater toc --- specs/eip4844/polynomial-commitments.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/specs/eip4844/polynomial-commitments.md b/specs/eip4844/polynomial-commitments.md index 8ef1fc419a..c6e15f65e9 100644 --- a/specs/eip4844/polynomial-commitments.md +++ b/specs/eip4844/polynomial-commitments.md @@ -37,11 +37,14 @@ - [`blob_to_kzg_commitment`](#blob_to_kzg_commitment) - [`verify_kzg_proof`](#verify_kzg_proof) - [`verify_kzg_proof_impl`](#verify_kzg_proof_impl) + - [`verify_kzg_proof_multi`](#verify_kzg_proof_multi) - [`compute_kzg_proof`](#compute_kzg_proof) - [`compute_kzg_proof_impl`](#compute_kzg_proof_impl) - [`compute_aggregated_poly_and_commitment`](#compute_aggregated_poly_and_commitment) - [`compute_aggregate_kzg_proof`](#compute_aggregate_kzg_proof) + - [`verify_aggregate_kzg_proof_aggregation`](#verify_aggregate_kzg_proof_aggregation) - [`verify_aggregate_kzg_proof`](#verify_aggregate_kzg_proof) + - [`verify_aggregate_kzg_proof_multi`](#verify_aggregate_kzg_proof_multi) From 659c7f513f74494371b8b478fe2d930536306e5d Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sun, 29 Jan 2023 13:05:02 +0000 Subject: [PATCH 010/210] Change blob verification fiat-shamir to single blob --- specs/eip4844/polynomial-commitments.md | 177 +++++++----------------- 1 file changed, 49 insertions(+), 128 deletions(-) diff --git a/specs/eip4844/polynomial-commitments.md b/specs/eip4844/polynomial-commitments.md index c6e15f65e9..63217c0bce 100644 --- a/specs/eip4844/polynomial-commitments.md +++ b/specs/eip4844/polynomial-commitments.md @@ -25,11 +25,10 @@ - [`bytes_to_kzg_commitment`](#bytes_to_kzg_commitment) - [`bytes_to_kzg_proof`](#bytes_to_kzg_proof) - [`blob_to_polynomial`](#blob_to_polynomial) - - [`compute_challenges`](#compute_challenges) + - [`compute_challenge`](#compute_challenge) - [`bls_modular_inverse`](#bls_modular_inverse) - [`div`](#div) - [`g1_lincomb`](#g1_lincomb) - - [`poly_lincomb`](#poly_lincomb) - [`compute_powers`](#compute_powers) - [Polynomials](#polynomials) - [`evaluate_polynomial_in_evaluation_form`](#evaluate_polynomial_in_evaluation_form) @@ -40,11 +39,9 @@ - [`verify_kzg_proof_multi`](#verify_kzg_proof_multi) - [`compute_kzg_proof`](#compute_kzg_proof) - [`compute_kzg_proof_impl`](#compute_kzg_proof_impl) - - [`compute_aggregated_poly_and_commitment`](#compute_aggregated_poly_and_commitment) - - [`compute_aggregate_kzg_proof`](#compute_aggregate_kzg_proof) - - [`verify_aggregate_kzg_proof_aggregation`](#verify_aggregate_kzg_proof_aggregation) - - [`verify_aggregate_kzg_proof`](#verify_aggregate_kzg_proof) - - [`verify_aggregate_kzg_proof_multi`](#verify_aggregate_kzg_proof_multi) + - [`compute_blob_kzg_proof`](#compute_blob_kzg_proof) + - [`verify_blob_kzg_proof`](#verify_blob_kzg_proof) + - [`verify_blob_kzg_proof_multi`](#verify_blob_kzg_proof_multi) @@ -226,44 +223,34 @@ def blob_to_polynomial(blob: Blob) -> Polynomial: return polynomial ``` -#### `compute_challenges` +#### `compute_challenge` ```python -def compute_challenges(polynomials: Sequence[Polynomial], - commitments: Sequence[KZGCommitment]) -> Tuple[Sequence[BLSFieldElement], BLSFieldElement]: +def compute_challenge(polynomial: Polynomial, + commitment: KZGCommitment) -> BLSFieldElement: """ Return the Fiat-Shamir challenges required by the rest of the protocol. The Fiat-Shamir logic works as per the following pseudocode: - hashed_data = hash(DOMAIN_SEPARATOR, polynomials, commitments) - r = hash(hashed_data, 0) - r_powers = [1, r, r**2, r**3, ...] - eval_challenge = hash(hashed_data, 1) - - Then return `r_powers` and `eval_challenge` after converting them to BLS field elements. - The resulting field elements are not uniform over the BLS field. + hashed_data = hash(DOMAIN_SEPARATOR, polynomial, commitment) + eval_challenge = hash(hashed_data, 0) """ + # Append the number of polynomials and the degree of each polynomial as a domain separator - num_polynomials = int.to_bytes(len(polynomials), 8, ENDIANNESS) + num_polynomials = int.to_bytes(1, 8, ENDIANNESS) degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS) data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + num_polynomials # Append each polynomial which is composed by field elements - for poly in polynomials: - for field_element in poly: - data += int.to_bytes(field_element, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) + for field_element in polynomial: + data += int.to_bytes(field_element, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) # Append serialized G1 points - for commitment in commitments: - data += commitment + data += commitment # Transcript has been prepared: time to create the challenges hashed_data = hash(data) - r = hash_to_bls_field(hashed_data + b'\x00') - r_powers = compute_powers(r, len(commitments)) - eval_challenge = hash_to_bls_field(hashed_data + b'\x01') - - return r_powers, eval_challenge + return hash_to_bls_field(hashed_data + b'\x00') ``` #### `bls_modular_inverse` @@ -301,23 +288,6 @@ def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElemen return KZGCommitment(bls.G1_to_bytes48(result)) ``` -#### `poly_lincomb` - -```python -def poly_lincomb(polys: Sequence[Polynomial], - scalars: Sequence[BLSFieldElement]) -> Polynomial: - """ - Given a list of ``polynomials``, interpret it as a 2D matrix and compute the linear combination - of each column with `scalars`: return the resulting polynomials. - """ - assert len(polys) == len(scalars) - result = [0] * FIELD_ELEMENTS_PER_BLOB - for v, s in zip(polys, scalars): - for i, x in enumerate(v): - result[i] = (result[i] + int(s) * int(x)) % BLS_MODULUS - return Polynomial([BLSFieldElement(x) for x in result]) -``` - #### `compute_powers` ```python @@ -496,114 +466,65 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGPro return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial)) ``` -#### `compute_aggregated_poly_and_commitment` +#### `compute_blob_kzg_proof` ```python -def compute_aggregated_poly_and_commitment( - blobs: Sequence[Blob], - kzg_commitments: Sequence[KZGCommitment]) -> Tuple[Polynomial, KZGCommitment, BLSFieldElement]: +def compute_blob_kzg_proof(blob: Blob) -> KZGProof: """ - Return (1) the aggregated polynomial, (2) the aggregated KZG commitment, - and (3) the polynomial evaluation random challenge. - This function should also work with blobs == [] and kzg_commitments == [] - """ - assert len(blobs) == len(kzg_commitments) - - # Convert blobs to polynomials - polynomials = [blob_to_polynomial(blob) for blob in blobs] - - # Generate random linear combination and evaluation challenges - r_powers, evaluation_challenge = compute_challenges(polynomials, kzg_commitments) - - # Create aggregated polynomial in evaluation form - aggregated_poly = poly_lincomb(polynomials, r_powers) - - # Compute commitment to aggregated polynomial - aggregated_poly_commitment = KZGCommitment(g1_lincomb(kzg_commitments, r_powers)) - - return aggregated_poly, aggregated_poly_commitment, evaluation_challenge -``` - -#### `compute_aggregate_kzg_proof` - -```python -def compute_aggregate_kzg_proof(blobs: Sequence[Blob]) -> KZGProof: - """ - Given a list of blobs, return the aggregated KZG proof that is used to verify them against their commitments. + Given a blob, return the KZG proof that is used to verify it against the commitment. Public method. """ - commitments = [blob_to_kzg_commitment(blob) for blob in blobs] - aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment( - blobs, - commitments - ) - return compute_kzg_proof_impl(aggregated_poly, evaluation_challenge) + commitment = blob_to_kzg_commitment(blob) + evaluation_challenge = compute_challenge(blob, commitment) + polynomial = blob_to_polynomial(blob) + return compute_kzg_proof_impl(polynomial, evaluation_challenge) ``` -#### `verify_aggregate_kzg_proof_aggregation` +#### `verify_blob_kzg_proof` ```python -def verify_aggregate_kzg_proof_aggregation(blobs: Sequence[Blob], - commitments_bytes: Sequence[Bytes48]) \ - -> Tuple[KZGCommitment, BLSFieldElement, BLSFieldElement]: +def verify_blob_kzg_proof(blob: Blob, + commitment_bytes: Bytes48, + proof_bytes: Bytes48) -> bool: """ - Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments. + Given a blob and a KZG proof, verify that the blob data corresponds to the provided commitment. Public method. """ - commitments = [bytes_to_kzg_commitment(c) for c in commitments_bytes] - - aggregated_poly, aggregated_poly_commitment, evaluation_challenge = compute_aggregated_poly_and_commitment( - blobs, - commitments - ) - - # Evaluate aggregated polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero) - y = evaluate_polynomial_in_evaluation_form(aggregated_poly, evaluation_challenge) - - return (aggregated_poly_commitment, evaluation_challenge, y) -``` - -#### `verify_aggregate_kzg_proof` - -```python -def verify_aggregate_kzg_proof(blobs: Sequence[Blob], - commitments_bytes: Sequence[Bytes48], - aggregated_proof_bytes: Bytes48) -> bool: - """ - Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments. + commitment = bytes_to_kzg_commitment(commitment_bytes) - Public method. - """ - aggregated_poly_commitment, evaluation_challenge, y = \ - verify_aggregate_kzg_proof_aggregation(blobs, commitments_bytes) + evaluation_challenge = compute_challenge(blob, commitment) + polynomial = blob_to_polynomial(blob) - aggregated_proof = bytes_to_kzg_proof(aggregated_proof_bytes) + # Evaluate polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero) + y = evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge) - return verify_kzg_proof_impl(aggregated_poly_commitment, evaluation_challenge, y, aggregated_proof) + # Verify proof + proof = bytes_to_kzg_proof(proof_bytes) + return verify_kzg_proof_impl(commitment, evaluation_challenge, y, proof) ``` -#### `verify_aggregate_kzg_proof_multi` +#### `verify_blob_kzg_proof_multi` ```python -def verify_aggregate_kzg_proof_multi(list_blobs: Sequence[Sequence[Blob]], - list_commitments_bytes: Sequence[Sequence[Bytes48]], - list_aggregated_proof_bytes: Sequence[Bytes48]) -> bool: +def verify_blob_kzg_proof_multi(blobs: Sequence[Blob], + commitments_bytes: Sequence[Bytes48], + proofs_bytes: Sequence[Bytes48]) -> bool: """ - Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments. + Given a list of blobs and blob KZG proofs, verify that they correspond to the provided commitments. Public method. """ - aggregated_poly_commitments, evaluation_challenges, ys = [], [], [] - for blobs, commitments_bytes in zip(list_blobs, list_commitments_bytes): - aggregated_poly_commitment, evaluation_challenge, y = \ - verify_aggregate_kzg_proof_aggregation(blobs, commitments_bytes) - aggregated_poly_commitments.append(aggregated_poly_commitment) + commitments, evaluation_challenges, ys, proofs = [], [], [], [] + for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes): + commitment = bytes_to_kzg_commitment(commitment_bytes) + commitments.append(commitment) + evaluation_challenge = compute_challenge(blob, commitment) evaluation_challenges.append(evaluation_challenge) - ys.append(y) - - list_aggregated_proof = [bytes_to_kzg_proof(proof) for proof in list_aggregated_proof_bytes] + polynomial = blob_to_polynomial(blob) + ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)) + proofs.append(bytes_to_kzg_proof(proof_bytes)) - return verify_kzg_proof_multi(aggregated_poly_commitments, evaluation_challenges, ys, list_aggregated_proof) + return verify_kzg_proof_multi(commitments, evaluation_challenges, ys, proofs) ``` From 65d3c6aeb68ecd5b168929ccedc877bad197988e Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 7 Feb 2023 10:55:51 +0100 Subject: [PATCH 011/210] Free the blobs This PR reintroduces and further decouples blocks and blobs in EIP-4844, so as to improve network and processing performance. Block and blob processing, for the purpose of gossip validation, are independent: they can both be propagated and gossip-validated in parallel - the decoupled design allows 4 important optimizations (or, if you are so inclined, removes 4 unnecessary pessimizations): * Blocks and blobs travel on independent meshes allowing for better parallelization and utilization of high-bandwidth peers * Re-broadcasting after validation can start earlier allowing more efficient use of upload bandwidth - blocks for example can be rebroadcast to peers while blobs are still being downloaded * bandwidth-reduction techniques such as per-peer deduplication are more efficient because of the smaller message size * gossip verification happens independently for blocks and blobs, allowing better sharing / use of CPU and I/O resources in clients With growing block sizes and additional blob data to stream, the network streaming time becomes a dominant factor in propagation times - on a 100mbit line, streaming 1mb to 8 peers takes ~1s - this process is repeated for each hop in both incoming and outgoing directions. This design in particular sends each blob on a separate subnet, thus maximising the potential for parallelisation and providing a natural path for growing the number of blobs per block should the network be judged to be able to handle it. Changes compared to the current design include: * `BlobsSidecar` is split into individual `BlobSidecar` containers - each container is signed individually by the proposer * the signature is used during gossip validation but later dropped. * KZG commitment verification is moved out of the gossip pipeline and instead done before fork choice addition, when both block and sidecars have arrived * clients may verify individual blob commitments earlier * more generally and similar to block verification, gossip propagation is performed solely based on trivial consistency checks and proposer signature verification * by-root blob requests are done per-blob, so as to retain the ability to fill in blobs one-by-one assuming clients generally receive blobs from gossip * by-range blob requests are done per-block, so as to simplify historical sync * range and root requests are limited to `128` entries for both blocks and blobs - practically, the current higher limit of `1024` for blocks does not get used and keeping the limits consistent simplifies implementation - with the merge, block sizes have grown significantly and clients generally fetch smaller chunks. --- specs/eip4844/beacon-chain.md | 5 +- specs/eip4844/fork-choice.md | 39 +++------ specs/eip4844/p2p-interface.md | 147 ++++++++++++++++++++------------- specs/eip4844/validator.md | 33 ++++---- 4 files changed, 124 insertions(+), 100 deletions(-) diff --git a/specs/eip4844/beacon-chain.md b/specs/eip4844/beacon-chain.md index f681ab951e..145710a890 100644 --- a/specs/eip4844/beacon-chain.md +++ b/specs/eip4844/beacon-chain.md @@ -44,6 +44,7 @@ This upgrade adds blobs to the beacon chain as part of EIP-4844. This is an exte | Name | SSZ equivalent | Description | | - | - | - | | `VersionedHash` | `Bytes32` | | +| `BlobIndex` | `uint64` | | ## Constants @@ -52,7 +53,7 @@ This upgrade adds blobs to the beacon chain as part of EIP-4844. This is an exte | Name | Value | | - | - | | `BLOB_TX_TYPE` | `uint8(0x05)` | -| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` | +| `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` | ## Preset @@ -249,7 +250,7 @@ def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> N *Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-4844 testing only. -The `BeaconState` initialization is unchanged, except for the use of the updated `eip4844.BeaconBlockBody` type +The `BeaconState` initialization is unchanged, except for the use of the updated `eip4844.BeaconBlockBody` type when initializing the first body-root. ```python diff --git a/specs/eip4844/fork-choice.md b/specs/eip4844/fork-choice.md index 8dea28dedc..3e909423e9 100644 --- a/specs/eip4844/fork-choice.md +++ b/specs/eip4844/fork-choice.md @@ -7,9 +7,8 @@ - [Introduction](#introduction) - [Containers](#containers) - - [`BlobsSidecar`](#blobssidecar) - [Helpers](#helpers) - - [`validate_blobs_sidecar`](#validate_blobs_sidecar) + - [`validate_blob_sidecars`](#validate_blob_sidecars) - [`is_data_available`](#is_data_available) - [Updated fork-choice handlers](#updated-fork-choice-handlers) - [`on_block`](#on_block) @@ -23,54 +22,42 @@ This is the modification of the fork choice accompanying the EIP-4844 upgrade. ## Containers -### `BlobsSidecar` - -```python -class BlobsSidecar(Container): - beacon_block_root: Root - beacon_block_slot: Slot - blobs: List[Blob, MAX_BLOBS_PER_BLOCK] - kzg_aggregated_proof: KZGProof -``` - ## Helpers -#### `validate_blobs_sidecar` +#### `validate_blob_sidecars` ```python -def validate_blobs_sidecar(slot: Slot, +def validate_blob_sidecars(slot: Slot, beacon_block_root: Root, expected_kzg_commitments: Sequence[KZGCommitment], - blobs_sidecar: BlobsSidecar) -> None: + blob_sidecars: Sequence[BlobSidecar]) -> None: assert slot == blobs_sidecar.beacon_block_slot assert beacon_block_root == blobs_sidecar.beacon_block_root - blobs = blobs_sidecar.blobs - kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof - assert len(expected_kzg_commitments) == len(blobs) - - assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof) + assert len(expected_kzg_commitments) == len(blob_sidecars) + # TODO validate commitments individually or aggregate first? + # assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof) ``` #### `is_data_available` The implementation of `is_data_available` will become more sophisticated during later scaling upgrades. -Initially, verification requires every verifying actor to retrieve the matching `BlobsSidecar`, -and validate the sidecar with `validate_blobs_sidecar`. +Initially, verification requires every verifying actor to retrieve all matching `BlobSidecar`s, +and validate the sidecar with `validate_blob_sidecars`. -The block MUST NOT be considered valid until a valid `BlobsSidecar` has been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `BlobsSidecar` has subsequently been pruned. +The block MUST NOT be considered valid until all valid `BlobSidecar`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `BlobSidecar`s has subsequently been pruned. ```python def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool: # `retrieve_blobs_sidecar` is implementation and context dependent, raises an exception if not available. # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` - sidecar = retrieve_blobs_sidecar(slot, beacon_block_root) + sidecars = retrieve_blob_sidecars(slot, beacon_block_root) # For testing, `retrieve_blobs_sidecar` returns "TEST". # TODO: Remove it once we have a way to inject `BlobsSidecar` into tests. if isinstance(sidecar, str): return True - validate_blobs_sidecar(slot, beacon_block_root, blob_kzg_commitments, sidecar) + validate_blob_sidecars(slot, beacon_block_root, blob_kzg_commitments, sidecars) return True ``` @@ -102,7 +89,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # [New in EIP-4844] # Check if blob data is available # If not, this block MAY be queued and subsequently considered when blob data becomes available - assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments) + assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments) # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/eip4844/p2p-interface.md b/specs/eip4844/p2p-interface.md index ae9380f7a8..94821a82cb 100644 --- a/specs/eip4844/p2p-interface.md +++ b/specs/eip4844/p2p-interface.md @@ -10,23 +10,22 @@ The specification of these changes continues in the same format as the network s - - [Configuration](#configuration) - - [Containers](#containers) - - [`SignedBeaconBlockAndBlobsSidecar`](#signedbeaconblockandblobssidecar) - - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - - [Topics and messages](#topics-and-messages) - - [Global topics](#global-topics) - - [`beacon_block`](#beacon_block) - - [`beacon_block_and_blobs_sidecar`](#beacon_block_and_blobs_sidecar) - - [Transitioning the gossip](#transitioning-the-gossip) - - [The Req/Resp domain](#the-reqresp-domain) - - [Messages](#messages) - - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) - - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) - - [BeaconBlockAndBlobsSidecarByRoot v1](#beaconblockandblobssidecarbyroot-v1) - - [BlobsSidecarsByRange v1](#blobssidecarsbyrange-v1) -- [Design decision rationale](#design-decision-rationale) - - [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks) +- [Configuration](#configuration) +- [Containers](#containers) + - [`BlobSidecar`](#blobsidecar) + - [`SignedBlobSidecar`](#signedblobsidecar) +- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [Topics and messages](#topics-and-messages) + - [Global topics](#global-topics) + - [`beacon_block`](#beacon_block) + - [`blob_sidecar_{index}`](#blob_sidecar_index) + - [Transitioning the gossip](#transitioning-the-gossip) +- [The Req/Resp domain](#the-reqresp-domain) + - [Messages](#messages) + - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) + - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) + - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) + - [BlobsSidecarsByRange v1](#blobssidecarsbyrange-v1) @@ -35,17 +34,31 @@ The specification of these changes continues in the same format as the network s | Name | Value | Description | |------------------------------------------|-----------------------------------|---------------------------------------------------------------------| -| `MAX_REQUEST_BLOBS_SIDECARS` | `2**7` (= 128) | Maximum number of blobs sidecars in a single request | +| `MAX_REQUEST_BLOCKS_EIP4844` | `2**7` (= 128) | Maximum number of blocks in a single request | | `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blobs sidecars | ## Containers -### `SignedBeaconBlockAndBlobsSidecar` +### `BlobSidecar` ```python -class SignedBeaconBlockAndBlobsSidecar(Container): - beacon_block: SignedBeaconBlock - blobs_sidecar: BlobsSidecar +class BlobSidecar(Container): + block_root: Root + index: BlobIndex # Index of blob in block + slot: Slot + block_parent_root: Root # Proposer shuffling determinant + proposer_index: ValidatorIndex + blob: Blob + kzg_commitment: KZGCommitment + kzg_proof: KZGProof # Allows for quick verification of kzg_commitment +``` + +### `SignedBlobSidecar` + +```python +class SignedBlobSidecar(Container): + message: BlobSidecar + signature: Signature ``` ## The gossip domain: gossipsub @@ -65,34 +78,35 @@ The new topics along with the type of the `data` field of a gossipsub message ar | Name | Message Type | | - | - | -| `beacon_block_and_blobs_sidecar` | `SignedBeaconBlockAndBlobsSidecar` (new) | +| `blob_sidecar_{index}` | `SignedBlobSidecar` (new) | #### Global topics -EIP-4844 introduces a new global topic for beacon block and blobs-sidecars. +EIP-4844 introduces new global topics for blob sidecars. ##### `beacon_block` -This topic is deprecated and clients **MUST NOT** expose in their topic set to any peer. Implementers do not need to do -anything beyond simply skip implementation, and it is explicitly called out as it is a departure from previous versioning -of this topic. +The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in EIP4844. -Refer to [the section below](#transitioning-the-gossip) for details on how to transition the gossip. +##### `blob_sidecar_{index}` -##### `beacon_block_and_blobs_sidecar` +This topic is used to propagate signed blob sidecars, one for each sidecar index. -This topic is used to propagate new signed and coupled beacon blocks and blobs sidecars to all nodes on the networks. +The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`: -In addition to the gossip validations for the `beacon_block` topic from prior specifications, the following validations MUST pass before forwarding the `signed_beacon_block_and_blobs_sidecar` on the network. -Alias `signed_beacon_block = signed_beacon_block_and_blobs_sidecar.beacon_block`, `block = signed_beacon_block.message`, `execution_payload = block.body.execution_payload`. -- _[REJECT]_ The KZG commitments correspond to the versioned hashes in the transactions list - -- i.e. `verify_kzg_commitments_against_transactions(block.body.execution_payload.transactions, block.body.blob_kzg_commitments)` - -Alias `sidecar = signed_beacon_block_and_blobs_sidecar.blobs_sidecar`. -- _[IGNORE]_ the `sidecar.beacon_block_slot` is for the current slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) - -- i.e. `sidecar.beacon_block_slot == block.slot`. -- _[REJECT]_ The KZG commitments in the block are valid against the provided blobs sidecar - -- i.e. `validate_blobs_sidecar(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments, sidecar)` +- _[REJECT]_ The sidecar is for the correct topic -- + i.e. `sidecar.index` matches the topic `{index}`. +- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) +- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- + i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` +- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. +- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.slot, sidecar.proposer_index, sidecar.index)`. + -- Clients MUST discard blocks where multiple sidecars for the same proposer and index have been observed. +- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot + in the context of the current shuffling (defined by `parent_root`/`slot`). + If the `proposer_index` cannot immediately be verified against the expected shuffling, + the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- + in such a case _do not_ `REJECT`, instead `IGNORE` this message. ### Transitioning the gossip @@ -121,6 +135,8 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` | | `EIP4844_FORK_VERSION` | `eip4844.SignedBeaconBlock` | +No more than `MAX_REQUEST_BLOCKS_EIP4844` may be requested at a time. + #### BeaconBlocksByRoot v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` @@ -139,15 +155,23 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` | | `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` | -#### BeaconBlockAndBlobsSidecarByRoot v1 +No more than `MAX_REQUEST_BLOCKS_EIP4844` may be requested at a time. -**Protocol ID:** `/eth2/beacon_chain/req/beacon_block_and_blobs_sidecar_by_root/1/` +#### BlobSidecarsByRoot v1 + +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` Request Content: +```python +class BlobIdentifier(Container): + block_root: Root + index: uint64 +``` + ``` ( - List[Root, MAX_REQUEST_BLOCKS] + List[BlobIdentifier, MAX_REQUEST_BLOCKS_EIP4844] ) ``` @@ -155,29 +179,32 @@ Response Content: ``` ( - List[SignedBeaconBlockAndBlobsSidecar, MAX_REQUEST_BLOCKS] + List[BlobSidecar, MAX_REQUEST_BLOCKS_EIP4844] ) ``` -Requests blocks by block root (= `hash_tree_root(SignedBeaconBlockAndBlobsSidecar.beacon_block.message)`). -The response is a list of `SignedBeaconBlockAndBlobsSidecar` whose length is less than or equal to the number of requests. +Requests sidecars by block root and index. +The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests. It may be less in the case that the responding peer is missing blocks and sidecars. -No more than `MAX_REQUEST_BLOCKS` may be requested at a time. +The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer +may not be available beyond the initial distribution via gossip. + +No more than `MAX_REQUEST_BLOCKS_EIP4844` may be requested at a time. -`BeaconBlockAndBlobsSidecarByRoot` is primarily used to recover recent blocks and sidecars (e.g. when receiving a block or attestation whose parent is unknown). +`BlobSidecarsByRoot` is primarily used to recover recent blocks and sidecars (e.g. when receiving a block or attestation whose parent is unknown). The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlockAndBlobsSidecar` payload. +Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. -Clients MUST support requesting blocks and sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, EIP4844_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers SHOULD respond with error code `3: ResourceUnavailable`. +Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, EIP4844_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response. -Clients MUST respond with at least one block and sidecar, if they have it. +Clients MUST respond with at least one sidecar, if they have it. Clients MAY limit the number of blocks and sidecars in the response. #### BlobsSidecarsByRange v1 -**Protocol ID:** `/eth2/beacon_chain/req/blobs_sidecars_by_range/1/` +**Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` Request Content: ``` @@ -188,16 +215,22 @@ Request Content: ``` Response Content: + +```python +class BlobSidecars(Container): + block_root: Root + List[BlobSidecar, MAX_BLOBS_PER_BLOCK] + ``` ( - List[BlobsSidecar, MAX_REQUEST_BLOBS_SIDECARS] + List[BlobSidecars, MAX_REQUEST_BLOCKS_EIP4844] ) ``` -Requests blobs sidecars in the slot range `[start_slot, start_slot + count)`, +Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, leading up to the current head block as selected by fork choice. -The response is unsigned, i.e. `BlobsSidecarsByRange`, as the signature of the beacon block proposer +The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and @@ -215,7 +248,7 @@ Clients MUST keep a record of signed blobs sidecars seen on the epoch range where `current_epoch` is defined by the current wall-clock time, and clients MUST support serving requests of blobs on this range. -Peers that are unable to reply to blobs sidecars requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` +Peers that are unable to reply to blob sidecar requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epoch range SHOULD respond with error code `3: ResourceUnavailable`. Such peers that are unable to successfully reply to this range of requests MAY get descored or disconnected at any time. @@ -229,7 +262,7 @@ participating in the networking immediately, other peers MAY disconnect and/or temporarily ban such an un-synced or semi-synced client. Clients MUST respond with at least the first blobs sidecar that exists in the range, if they have it, -and no more than `MAX_REQUEST_BLOBS_SIDECARS` sidecars. +and no more than `MAX_REQUEST_BLOCKS_EIP4844` sidecars. The following blobs sidecars, where they exist, MUST be sent in consecutive order. diff --git a/specs/eip4844/validator.md b/specs/eip4844/validator.md index bfdd69370a..619444351d 100644 --- a/specs/eip4844/validator.md +++ b/specs/eip4844/validator.md @@ -79,27 +79,30 @@ def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload, 3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`. -#### Constructing the `SignedBeaconBlockAndBlobsSidecar` -To construct a `SignedBeaconBlockAndBlobsSidecar`, a `signed_beacon_block_and_blobs_sidecar` is defined with the necessary context for block and sidecar proposal. - -##### Block -Set `signed_beacon_block_and_blobs_sidecar.beacon_block = block` where `block` is obtained above. +#### Constructing the `SignedBlobSidecar` +To construct a `SignedBlobSidecar`, a `signed_blob_sidecar` is defined with the necessary context for block and sidecar proposal. ##### Sidecar -Coupled with block, the corresponding blobs are packaged into a sidecar object for distribution to the network. -Set `signed_beacon_block_and_blobs_sidecar.blobs_sidecar = sidecar` where `sidecar` is obtained from: +Coupled with block, the corresponding blobs are packaged into sidecar objects for distribution to the network. + +Each `sidecar` is obtained from: ```python -def get_blobs_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> BlobsSidecar: - return BlobsSidecar( - beacon_block_root=hash_tree_root(block), - beacon_block_slot=block.slot, - blobs=blobs, - kzg_aggregated_proof=compute_aggregate_kzg_proof(blobs), - ) +def get_blob_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobsSidecar]: + return [ + BlobsSidecar( + block_root=hash_tree_root(block), + index=idx + slot=block.slot, + block_parent_root=block.parent_root, + blob=blob, + kzg_commitment=block.body.blob_kzg_commitments[idx], + kzg_aggregated_proof=compute_kzg_proof(blob),) + for idx, blob in enumerate(blobs) + ] ``` -This `signed_beacon_block_and_blobs_sidecar` is then published to the global `beacon_block_and_blobs_sidecar` topic. +Each `sidecar` is then published to the global `blob_sidecar_{index}` topics according to its index. After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested. The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable, From 1e07685f74bf9d508d2acbb8c6b82aace53142d7 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 7 Feb 2023 11:14:59 +0100 Subject: [PATCH 012/210] doctoc --- specs/eip4844/validator.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/specs/eip4844/validator.md b/specs/eip4844/validator.md index 619444351d..4ea7051e93 100644 --- a/specs/eip4844/validator.md +++ b/specs/eip4844/validator.md @@ -16,8 +16,7 @@ - [Block and sidecar proposal](#block-and-sidecar-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - [Blob KZG commitments](#blob-kzg-commitments) - - [Constructing the `SignedBeaconBlockAndBlobsSidecar`](#constructing-the-signedbeaconblockandblobssidecar) - - [Block](#block) + - [Constructing the `SignedBlobSidecar`](#constructing-the-signedblobsidecar) - [Sidecar](#sidecar) From deb82e2f26e9a7cf3062bcce544e842fcfdb72b5 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 7 Feb 2023 11:23:18 +0100 Subject: [PATCH 013/210] fix member --- specs/eip4844/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/eip4844/validator.md b/specs/eip4844/validator.md index 4ea7051e93..a4ff8b8f20 100644 --- a/specs/eip4844/validator.md +++ b/specs/eip4844/validator.md @@ -96,7 +96,7 @@ def get_blob_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blob block_parent_root=block.parent_root, blob=blob, kzg_commitment=block.body.blob_kzg_commitments[idx], - kzg_aggregated_proof=compute_kzg_proof(blob),) + kzg_proof=compute_kzg_proof(blob),) for idx, blob in enumerate(blobs) ] ``` From f6b8827eca8b0b7a1ced3bddd6a721f8f31cfca1 Mon Sep 17 00:00:00 2001 From: protolambda Date: Mon, 23 Jan 2023 22:51:55 +0100 Subject: [PATCH 014/210] eip4844: move excess data gas field to end of execution payload for merkle proof path compat --- specs/deneb/beacon-chain.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index e82fdfdcb9..aba3b3df48 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -108,11 +108,11 @@ class ExecutionPayload(Container): timestamp: uint64 extra_data: ByteList[MAX_EXTRA_DATA_BYTES] base_fee_per_gas: uint256 - excess_data_gas: uint256 # [New in Deneb] # Extra payload fields block_hash: Hash32 # Hash of execution block transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] + excess_data_gas: uint256 # [New in Deneb] ``` #### `ExecutionPayloadHeader` @@ -132,11 +132,11 @@ class ExecutionPayloadHeader(Container): timestamp: uint64 extra_data: ByteList[MAX_EXTRA_DATA_BYTES] base_fee_per_gas: uint256 - excess_data_gas: uint256 # [New in Deneb] # Extra payload fields block_hash: Hash32 # Hash of execution block transactions_root: Root withdrawals_root: Root + excess_data_gas: uint256 # [New in Deneb] ``` ## Helper functions @@ -230,10 +230,10 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, - excess_data_gas=payload.excess_data_gas, # [New in Deneb] block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), + excess_data_gas=payload.excess_data_gas, # [New in Deneb] ) ``` From 832e96412ce8bc0d3d6f93e8048825db576bf562 Mon Sep 17 00:00:00 2001 From: protolambda Date: Tue, 24 Jan 2023 15:30:33 +0100 Subject: [PATCH 015/210] fix container fork check with remerkleable v0.1.26 --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f87ed5a6cf..45dd635d31 100644 --- a/setup.py +++ b/setup.py @@ -1169,7 +1169,7 @@ def run(self): "pycryptodome==3.15.0", "py_ecc==6.0.0", "milagro_bls_binding==1.9.0", - "remerkleable==0.1.25", + "remerkleable==0.1.26", "trie==2.0.2", RUAMEL_YAML_VERSION, "lru-dict==1.1.8", From 902a9c996784334a02891936417db88400c19c9b Mon Sep 17 00:00:00 2001 From: protolambda Date: Thu, 26 Jan 2023 11:57:47 +0100 Subject: [PATCH 016/210] remerkleable: fix container dict key hashing --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 45dd635d31..34a1083d69 100644 --- a/setup.py +++ b/setup.py @@ -1169,7 +1169,7 @@ def run(self): "pycryptodome==3.15.0", "py_ecc==6.0.0", "milagro_bls_binding==1.9.0", - "remerkleable==0.1.26", + "remerkleable==0.1.27", "trie==2.0.2", RUAMEL_YAML_VERSION, "lru-dict==1.1.8", From 7b5acbfd21d0b0fd7a46034f43362124e2f88899 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 27 Jan 2023 11:00:37 +0100 Subject: [PATCH 017/210] Fix Capella fork test assertions --- specs/capella/beacon-chain.md | 2 +- tests/core/pyspec/eth2spec/test/helpers/capella/fork.py | 4 +--- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/specs/capella/beacon-chain.md b/specs/capella/beacon-chain.md index 1be41e7eb1..1df617daaf 100644 --- a/specs/capella/beacon-chain.md +++ b/specs/capella/beacon-chain.md @@ -242,7 +242,7 @@ class BeaconState(Container): current_sync_committee: SyncCommittee next_sync_committee: SyncCommittee # Execution - latest_execution_payload_header: ExecutionPayloadHeader + latest_execution_payload_header: ExecutionPayloadHeader # [Modified in Capella] # Withdrawals next_withdrawal_index: WithdrawalIndex # [New in Capella] next_withdrawal_validator_index: ValidatorIndex # [New in Capella] diff --git a/tests/core/pyspec/eth2spec/test/helpers/capella/fork.py b/tests/core/pyspec/eth2spec/test/helpers/capella/fork.py index 8e0aec9c6e..bca8ddb8d1 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/capella/fork.py +++ b/tests/core/pyspec/eth2spec/test/helpers/capella/fork.py @@ -29,14 +29,12 @@ def run_fork_test(post_spec, pre_state): 'inactivity_scores', # Sync 'current_sync_committee', 'next_sync_committee', - # Execution - 'latest_execution_payload_header', ] for field in stable_fields: assert getattr(pre_state, field) == getattr(post_state, field) # Modified fields - modified_fields = ['fork'] + modified_fields = ['fork', 'latest_execution_payload_header'] for field in modified_fields: assert getattr(pre_state, field) != getattr(post_state, field) From 368e70d9bef4f29a16f1c2f7018fe68eaeeb7b06 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 2 Feb 2023 14:47:28 +1100 Subject: [PATCH 018/210] Remove sending empty blobs sidecar responses --- specs/deneb/p2p-interface.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index b1ff8b9226..f6b1050314 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -235,8 +235,9 @@ The following blobs sidecars, where they exist, MUST be sent in consecutive orde Clients MAY limit the number of blobs sidecars in the response. -An empty `BlobSidecar` is one that does not contain any blobs, but contains non-zero `beacon_block_root`, `beacon_block_slot` and a valid `kzg_aggregated_proof`. -Clients MAY NOT want to consider empty `BlobSidecar`s in rate limiting logic. +Slots that do not contain known blobs MUST be skipped, mimicking the behaviour +of the `BlocksByRange` request. Only response chunks with known blobs should +therefore be sent. The response MUST contain no more than `count` blobs sidecars. From ffc78e99282e65cc4da92c4dba0743be434d367b Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 10 Feb 2023 10:40:43 +0100 Subject: [PATCH 019/210] fixes * separate constant for blob requests * pedantry --- specs/eip4844/p2p-interface.md | 62 +++++++++++++++++----------------- specs/eip4844/validator.md | 32 ++++++++++-------- 2 files changed, 48 insertions(+), 46 deletions(-) diff --git a/specs/eip4844/p2p-interface.md b/specs/eip4844/p2p-interface.md index 94821a82cb..98365f8238 100644 --- a/specs/eip4844/p2p-interface.md +++ b/specs/eip4844/p2p-interface.md @@ -10,22 +10,24 @@ The specification of these changes continues in the same format as the network s -- [Configuration](#configuration) -- [Containers](#containers) - - [`BlobSidecar`](#blobsidecar) - - [`SignedBlobSidecar`](#signedblobsidecar) -- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - - [Topics and messages](#topics-and-messages) - - [Global topics](#global-topics) - - [`beacon_block`](#beacon_block) - - [`blob_sidecar_{index}`](#blob_sidecar_index) - - [Transitioning the gossip](#transitioning-the-gossip) -- [The Req/Resp domain](#the-reqresp-domain) - - [Messages](#messages) - - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) - - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) - - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) - - [BlobsSidecarsByRange v1](#blobssidecarsbyrange-v1) + - [Configuration](#configuration) + - [Containers](#containers) + - [`BlobSidecar`](#blobsidecar) + - [`SignedBlobSidecar`](#signedblobsidecar) + - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [Topics and messages](#topics-and-messages) + - [Global topics](#global-topics) + - [`beacon_block`](#beacon_block) + - [`blob_sidecar_{index}`](#blob_sidecar_index) + - [Transitioning the gossip](#transitioning-the-gossip) + - [The Req/Resp domain](#the-reqresp-domain) + - [Messages](#messages) + - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) + - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) + - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) + - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) +- [Design decision rationale](#design-decision-rationale) + - [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks) @@ -35,6 +37,7 @@ The specification of these changes continues in the same format as the network s | Name | Value | Description | |------------------------------------------|-----------------------------------|---------------------------------------------------------------------| | `MAX_REQUEST_BLOCKS_EIP4844` | `2**7` (= 128) | Maximum number of blocks in a single request | +| `MAX_REQUEST_BLOB_SIDECARS` | `2**7` (= 128) | Maximum number of blob sidecars in a single request | | `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blobs sidecars | ## Containers @@ -68,7 +71,8 @@ Some gossip meshes are upgraded in the fork of EIP-4844 to support upgraded type ### Topics and messages Topics follow the same specification as in prior upgrades. -The `beacon_block` topic is deprecated and replaced by the `beacon_block_and_blobs_sidecar` topic. All other topics remain stable. + +The `beacon_block` topic is modified to also support EIP4844 blocks and new topics are added per table below. All other topics remain stable. The specification around the creation, validation, and dissemination of messages has not changed from the Capella document unless explicitly noted here. @@ -94,19 +98,14 @@ This topic is used to propagate signed blob sidecars, one for each sidecar index The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`: -- _[REJECT]_ The sidecar is for the correct topic -- - i.e. `sidecar.index` matches the topic `{index}`. +- _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`. - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- - i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` +- _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.slot, sidecar.proposer_index, sidecar.index)`. -- Clients MUST discard blocks where multiple sidecars for the same proposer and index have been observed. -- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot - in the context of the current shuffling (defined by `parent_root`/`slot`). - If the `proposer_index` cannot immediately be verified against the expected shuffling, - the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- - in such a case _do not_ `REJECT`, instead `IGNORE` this message. +- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). + If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. ### Transitioning the gossip @@ -171,7 +170,7 @@ class BlobIdentifier(Container): ``` ( - List[BlobIdentifier, MAX_REQUEST_BLOCKS_EIP4844] + List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] ) ``` @@ -179,7 +178,7 @@ Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOCKS_EIP4844] + List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] ) ``` @@ -190,7 +189,7 @@ It may be less in the case that the responding peer is missing blocks and sideca The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -No more than `MAX_REQUEST_BLOCKS_EIP4844` may be requested at a time. +No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time. `BlobSidecarsByRoot` is primarily used to recover recent blocks and sidecars (e.g. when receiving a block or attestation whose parent is unknown). @@ -202,7 +201,7 @@ Clients MUST support requesting sidecars since `minimum_request_epoch`, where `m Clients MUST respond with at least one sidecar, if they have it. Clients MAY limit the number of blocks and sidecars in the response. -#### BlobsSidecarsByRange v1 +#### BlobSidecarsByRange v1 **Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` @@ -220,10 +219,11 @@ Response Content: class BlobSidecars(Container): block_root: Root List[BlobSidecar, MAX_BLOBS_PER_BLOCK] +``` ``` ( - List[BlobSidecars, MAX_REQUEST_BLOCKS_EIP4844] + List[BlobSidecars, MAX_REQUEST_BLOB_SIDECARS] ) ``` diff --git a/specs/eip4844/validator.md b/specs/eip4844/validator.md index a4ff8b8f20..8c3fc6faf3 100644 --- a/specs/eip4844/validator.md +++ b/specs/eip4844/validator.md @@ -16,7 +16,7 @@ - [Block and sidecar proposal](#block-and-sidecar-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) - [Blob KZG commitments](#blob-kzg-commitments) - - [Constructing the `SignedBlobSidecar`](#constructing-the-signedblobsidecar) + - [Constructing the `SignedBlobSidecar`s](#constructing-the-signedblobsidecars) - [Sidecar](#sidecar) @@ -78,27 +78,29 @@ def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload, 3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`. -#### Constructing the `SignedBlobSidecar` +#### Constructing the `SignedBlobSidecar`s + To construct a `SignedBlobSidecar`, a `signed_blob_sidecar` is defined with the necessary context for block and sidecar proposal. ##### Sidecar -Coupled with block, the corresponding blobs are packaged into sidecar objects for distribution to the network. +Blobs associated with a block are packaged into sidecar objects for distribution to the network. Each `sidecar` is obtained from: ```python -def get_blob_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobsSidecar]: - return [ - BlobsSidecar( - block_root=hash_tree_root(block), - index=idx - slot=block.slot, - block_parent_root=block.parent_root, - blob=blob, - kzg_commitment=block.body.blob_kzg_commitments[idx], - kzg_proof=compute_kzg_proof(blob),) - for idx, blob in enumerate(blobs) - ] +def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobsSidecar]: + return [ + BlobsSidecar( + block_root=hash_tree_root(block), + index=index, + slot=block.slot, + block_parent_root=block.parent_root, + blob=blob, + kzg_commitment=block.body.blob_kzg_commitments[idx], + kzg_proof=compute_kzg_proof(blob), + ) + for index, blob in enumerate(blobs) + ] ``` Each `sidecar` is then published to the global `blob_sidecar_{index}` topics according to its index. From 8bc19d99aea32897be7eace506b1b344b33b0de1 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Fri, 10 Feb 2023 11:16:51 +0100 Subject: [PATCH 020/210] fixes * expand sidecar gossip conditions * editing * add spec text for `BlobSidecar` signatures --- specs/deneb/fork-choice.md | 2 +- specs/deneb/p2p-interface.md | 49 +++++++++++++++++++----------------- specs/deneb/validator.md | 19 +++++++++++--- 3 files changed, 43 insertions(+), 27 deletions(-) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 89eac22020..ea235c0553 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -53,7 +53,7 @@ def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: sidecars = retrieve_blob_sidecars(slot, beacon_block_root) # For testing, `retrieve_blobs_sidecar` returns "TEST". - # TODO: Remove it once we have a way to inject `BlobsSidecar` into tests. + # TODO: Remove it once we have a way to inject `BlobSidecar` into tests. if isinstance(sidecar, str): return True diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index f023657952..41af2f9f14 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -10,22 +10,22 @@ The specification of these changes continues in the same format as the network s - - [Configuration](#configuration) - - [Containers](#containers) - - [`BlobSidecar`](#blobsidecar) - - [`SignedBlobSidecar`](#signedblobsidecar) - - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - - [Topics and messages](#topics-and-messages) - - [Global topics](#global-topics) - - [`beacon_block`](#beacon_block) - - [`blob_sidecar_{index}`](#blob_sidecar_index) - - [Transitioning the gossip](#transitioning-the-gossip) - - [The Req/Resp domain](#the-reqresp-domain) - - [Messages](#messages) - - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) - - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) - - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) - - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) +- [Configuration](#configuration) +- [Containers](#containers) + - [`BlobSidecar`](#blobsidecar) + - [`SignedBlobSidecar`](#signedblobsidecar) +- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [Topics and messages](#topics-and-messages) + - [Global topics](#global-topics) + - [`beacon_block`](#beacon_block) + - [`blob_sidecar_{index}`](#blob_sidecar_index) + - [Transitioning the gossip](#transitioning-the-gossip) +- [The Req/Resp domain](#the-reqresp-domain) + - [Messages](#messages) + - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) + - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) + - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) + - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) - [Design decision rationale](#design-decision-rationale) - [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks) @@ -99,8 +99,9 @@ This topic is used to propagate signed blob sidecars, one for each sidecar index The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`: - _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`. -- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) +- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` +- _[IGNORE]_ The blob's block's parent defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.slot, sidecar.proposer_index, sidecar.index)`. -- Clients MUST discard blocks where multiple sidecars for the same proposer and index have been observed. @@ -140,9 +141,6 @@ No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time. **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` -After `DENEB_FORK_EPOCH`, `BeaconBlocksByRootV2` is replaced by `BeaconBlockAndBlobsSidecarByRootV1`. -Clients MUST support requesting blocks by root for pre-fork-epoch blocks. - Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: [1]: # (eth2spec: skip) @@ -153,6 +151,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | | `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` | | `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` | +| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` | No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time. @@ -160,6 +159,8 @@ No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time. **Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` +New in deneb. + Request Content: ```python @@ -191,7 +192,7 @@ may not be available beyond the initial distribution via gossip. No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time. -`BlobSidecarsByRoot` is primarily used to recover recent blocks and sidecars (e.g. when receiving a block or attestation whose parent is unknown). +`BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). The response MUST consist of zero or more `response_chunk`. Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. @@ -205,6 +206,8 @@ Clients MAY limit the number of blocks and sidecars in the response. **Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` +New in deneb. + Request Content: ``` ( @@ -282,9 +285,9 @@ Clients MUST respond with blobs sidecars that are consistent from a single chain After the initial blobs sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request. -# Design decision rationale +## Design decision rationale -## Why are blobs relayed as a sidecar, separate from beacon blocks? +### Why are blobs relayed as a sidecar, separate from beacon blocks? This "sidecar" design provides forward compatibility for further data increases by black-boxing `is_data_available()`: with full sharding `is_data_available()` can be replaced by data-availability-sampling (DAS) diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 1b6995ad80..bd05c31d2d 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -88,9 +88,9 @@ Blobs associated with a block are packaged into sidecar objects for distribution Each `sidecar` is obtained from: ```python -def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobsSidecar]: +def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobSidecar]: return [ - BlobsSidecar( + BlobSidecar( block_root=hash_tree_root(block), index=index, slot=block.slot, @@ -101,11 +101,24 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo ) for index, blob in enumerate(blobs) ] + ``` -Each `sidecar` is then published to the global `blob_sidecar_{index}` topics according to its index. +Then `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and to the global `blob_sidecar_{index}` topics according to its index. + +`signature` is obtained from: + +```python +def get_blob_sidecar_signature(state: BeaconState, + sidecar: BlobSidecar, + privkey: int) -> BLSSignature: + domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(sidecar.slot)) + signing_root = compute_signing_root(sidecar, domain) + return bls.Sign(privkey, signing_root) +``` After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested. + The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable, to ensure the data-availability of these blobs throughout the network. From 86962b94377f549989ff7273fec8dc7faee70627 Mon Sep 17 00:00:00 2001 From: Potuz Date: Fri, 10 Feb 2023 11:43:38 -0300 Subject: [PATCH 021/210] Simplify commitee weight computation --- specs/phase0/fork-choice.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index e535184af7..f2ccc24b9d 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -194,10 +194,7 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei: proposer_score = Gwei(0) # Boost is applied if ``root`` is an ancestor of ``proposer_boost_root`` if get_ancestor(store, store.proposer_boost_root, store.blocks[root].slot) == root: - num_validators = len(get_active_validator_indices(state, get_current_epoch(state))) - avg_balance = get_total_active_balance(state) // num_validators - committee_size = num_validators // SLOTS_PER_EPOCH - committee_weight = committee_size * avg_balance + committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100 return attestation_score + proposer_score From fc10714f42888a26a52f17a0ba7dabfdc83811cd Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 13 Feb 2023 14:32:50 +0000 Subject: [PATCH 022/210] Call compute_challenge with polynomial as argument --- specs/eip4844/polynomial-commitments.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/eip4844/polynomial-commitments.md b/specs/eip4844/polynomial-commitments.md index 63217c0bce..0e2c671dad 100644 --- a/specs/eip4844/polynomial-commitments.md +++ b/specs/eip4844/polynomial-commitments.md @@ -475,8 +475,8 @@ def compute_blob_kzg_proof(blob: Blob) -> KZGProof: Public method. """ commitment = blob_to_kzg_commitment(blob) - evaluation_challenge = compute_challenge(blob, commitment) polynomial = blob_to_polynomial(blob) + evaluation_challenge = compute_challenge(polynomial, commitment) return compute_kzg_proof_impl(polynomial, evaluation_challenge) ``` @@ -493,8 +493,8 @@ def verify_blob_kzg_proof(blob: Blob, """ commitment = bytes_to_kzg_commitment(commitment_bytes) - evaluation_challenge = compute_challenge(blob, commitment) polynomial = blob_to_polynomial(blob) + evaluation_challenge = compute_challenge(polynomial, commitment) # Evaluate polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero) y = evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge) @@ -520,9 +520,9 @@ def verify_blob_kzg_proof_multi(blobs: Sequence[Blob], for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes): commitment = bytes_to_kzg_commitment(commitment_bytes) commitments.append(commitment) - evaluation_challenge = compute_challenge(blob, commitment) - evaluation_challenges.append(evaluation_challenge) polynomial = blob_to_polynomial(blob) + evaluation_challenge = compute_challenge(polynomial, commitment) + evaluation_challenges.append(evaluation_challenge) ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)) proofs.append(bytes_to_kzg_proof(proof_bytes)) From 7b642a2884189e821254b596db29ab0cc4c892ec Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 13 Feb 2023 14:57:04 +0000 Subject: [PATCH 023/210] compute_challenge takes blobs + linter --- specs/eip4844/fork-choice.md | 5 +++-- specs/eip4844/polynomial-commitments.md | 11 +++++------ specs/eip4844/validator.md | 3 ++- 3 files changed, 10 insertions(+), 9 deletions(-) diff --git a/specs/eip4844/fork-choice.md b/specs/eip4844/fork-choice.md index 8dea28dedc..9629879077 100644 --- a/specs/eip4844/fork-choice.md +++ b/specs/eip4844/fork-choice.md @@ -45,10 +45,11 @@ def validate_blobs_sidecar(slot: Slot, assert slot == blobs_sidecar.beacon_block_slot assert beacon_block_root == blobs_sidecar.beacon_block_root blobs = blobs_sidecar.blobs - kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof + # kzg_aggregated_proof = blobs_sidecar.kzg_aggregated_proof assert len(expected_kzg_commitments) == len(blobs) - assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof) + # Disabled because not available before switch to single blob sidecars + # assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof) ``` #### `is_data_available` diff --git a/specs/eip4844/polynomial-commitments.md b/specs/eip4844/polynomial-commitments.md index 0e2c671dad..ac99313ce9 100644 --- a/specs/eip4844/polynomial-commitments.md +++ b/specs/eip4844/polynomial-commitments.md @@ -226,7 +226,7 @@ def blob_to_polynomial(blob: Blob) -> Polynomial: #### `compute_challenge` ```python -def compute_challenge(polynomial: Polynomial, +def compute_challenge(blob: Blob, commitment: KZGCommitment) -> BLSFieldElement: """ Return the Fiat-Shamir challenges required by the rest of the protocol. @@ -242,8 +242,7 @@ def compute_challenge(polynomial: Polynomial, data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + num_polynomials # Append each polynomial which is composed by field elements - for field_element in polynomial: - data += int.to_bytes(field_element, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) + data += blob # Append serialized G1 points data += commitment @@ -476,7 +475,7 @@ def compute_blob_kzg_proof(blob: Blob) -> KZGProof: """ commitment = blob_to_kzg_commitment(blob) polynomial = blob_to_polynomial(blob) - evaluation_challenge = compute_challenge(polynomial, commitment) + evaluation_challenge = compute_challenge(blob, commitment) return compute_kzg_proof_impl(polynomial, evaluation_challenge) ``` @@ -494,7 +493,7 @@ def verify_blob_kzg_proof(blob: Blob, commitment = bytes_to_kzg_commitment(commitment_bytes) polynomial = blob_to_polynomial(blob) - evaluation_challenge = compute_challenge(polynomial, commitment) + evaluation_challenge = compute_challenge(blob, commitment) # Evaluate polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero) y = evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge) @@ -521,7 +520,7 @@ def verify_blob_kzg_proof_multi(blobs: Sequence[Blob], commitment = bytes_to_kzg_commitment(commitment_bytes) commitments.append(commitment) polynomial = blob_to_polynomial(blob) - evaluation_challenge = compute_challenge(polynomial, commitment) + evaluation_challenge = compute_challenge(blob, commitment) evaluation_challenges.append(evaluation_challenge) ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)) proofs.append(bytes_to_kzg_proof(proof_bytes)) diff --git a/specs/eip4844/validator.md b/specs/eip4844/validator.md index bfdd69370a..413e315fc6 100644 --- a/specs/eip4844/validator.md +++ b/specs/eip4844/validator.md @@ -95,7 +95,8 @@ def get_blobs_sidecar(block: BeaconBlock, blobs: Sequence[Blob]) -> BlobsSidecar beacon_block_root=hash_tree_root(block), beacon_block_slot=block.slot, blobs=blobs, - kzg_aggregated_proof=compute_aggregate_kzg_proof(blobs), + # Disabled because not available before switch to single blob sidecars + kzg_aggregated_proof=KZGProof(), # compute_aggregate_kzg_proof(blobs), ) ``` From 901303f14fbb2153393fd2b5d7b88f160ba569ac Mon Sep 17 00:00:00 2001 From: Paul Harris Date: Mon, 23 Jan 2023 15:08:34 +0100 Subject: [PATCH 024/210] Replaced EIP4844 references with Deneb Fixes #3207 --- .circleci/config.yml | 6 +- .github/workflows/run-tests.yml | 2 +- .gitignore | 2 +- Makefile | 12 +- README.md | 2 +- configs/mainnet.yaml | 6 +- configs/minimal.yaml | 6 +- presets/mainnet/{eip4844.yaml => deneb.yaml} | 0 presets/minimal/{eip4844.yaml => deneb.yaml} | 0 setup.py | 40 +++--- specs/{eip4844 => deneb}/beacon-chain.md | 6 +- specs/{eip4844 => deneb}/fork-choice.md | 0 specs/{eip4844 => deneb}/fork.md | 20 +-- specs/{eip4844 => deneb}/light-client/fork.md | 38 ++--- .../light-client/full-node.md | 8 +- .../light-client/p2p-interface.md | 60 ++++---- .../light-client/sync-protocol.md | 14 +- specs/{eip4844 => deneb}/p2p-interface.md | 8 +- .../polynomial-commitments.md | 0 specs/{eip4844 => deneb}/validator.md | 0 .../test/altair/light_client/test_sync.py | 52 +++---- tests/core/pyspec/eth2spec/test/context.py | 10 +- .../test/{eip4844 => deneb}/__init__.py | 0 .../test/{eip4844 => deneb}/fork/__init__.py | 0 .../fork/test_deneb_fork_basic.py} | 48 +++---- .../test/deneb/fork/test_deneb_fork_random.py | 84 +++++++++++ .../{eip4844 => deneb}/random/__init__.py | 0 .../{eip4844 => deneb}/random/test_random.py | 130 +++++++++--------- .../{eip4844 => deneb}/sanity/__init__.py | 0 .../{eip4844 => deneb}/sanity/test_blocks.py | 6 +- .../{eip4844 => deneb}/unittests/__init__.py | 0 .../unittests/fork_choice/__init__.py | 0 .../test_validate_blobs_sidecar.py | 10 +- .../polynomial_commitments/__init__.py | 0 .../test_polynomial_commitments.py | 8 +- .../{eip4844 => deneb}/unittests/test_kzg.py | 4 +- .../unittests/test_offset.py | 4 +- .../eip4844/fork/test_eip4844_fork_random.py | 84 ----------- .../pyspec/eth2spec/test/helpers/constants.py | 10 +- .../helpers/{eip4844 => deneb}/__init__.py | 0 .../test/helpers/{eip4844 => deneb}/fork.py | 10 +- .../test/helpers/execution_payload.py | 6 +- .../eth2spec/test/helpers/fork_transition.py | 10 +- .../pyspec/eth2spec/test/helpers/forks.py | 10 +- .../pyspec/eth2spec/test/helpers/genesis.py | 6 +- .../test/utils/randomized_block_tests.py | 4 +- tests/generators/epoch_processing/main.py | 6 +- tests/generators/finality/main.py | 6 +- tests/generators/fork_choice/main.py | 6 +- tests/generators/forks/main.py | 8 +- tests/generators/genesis/main.py | 6 +- tests/generators/light_client/main.py | 6 +- tests/generators/operations/main.py | 6 +- tests/generators/random/Makefile | 4 +- tests/generators/random/generate.py | 14 +- tests/generators/random/main.py | 6 +- tests/generators/rewards/main.py | 6 +- tests/generators/sanity/main.py | 8 +- tests/generators/sync/main.py | 6 +- 59 files changed, 407 insertions(+), 407 deletions(-) rename presets/mainnet/{eip4844.yaml => deneb.yaml} (100%) rename presets/minimal/{eip4844.yaml => deneb.yaml} (100%) rename specs/{eip4844 => deneb}/beacon-chain.md (98%) rename specs/{eip4844 => deneb}/fork-choice.md (100%) rename specs/{eip4844 => deneb}/fork.md (86%) rename specs/{eip4844 => deneb}/light-client/fork.md (62%) rename specs/{eip4844 => deneb}/light-client/full-node.md (94%) rename specs/{eip4844 => deneb}/light-client/p2p-interface.md (65%) rename specs/{eip4844 => deneb}/light-client/sync-protocol.md (84%) rename specs/{eip4844 => deneb}/p2p-interface.md (95%) rename specs/{eip4844 => deneb}/polynomial-commitments.md (100%) rename specs/{eip4844 => deneb}/validator.md (100%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/fork/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{eip4844/fork/test_eip4844_fork_basic.py => deneb/fork/test_deneb_fork_basic.py} (55%) create mode 100644 tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_random.py rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/random/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/random/test_random.py (63%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/sanity/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/sanity/test_blocks.py (95%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/unittests/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/unittests/fork_choice/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/unittests/fork_choice/test_validate_blobs_sidecar.py (93%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/unittests/polynomial_commitments/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/unittests/polynomial_commitments/test_polynomial_commitments.py (96%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/unittests/test_kzg.py (91%) rename tests/core/pyspec/eth2spec/test/{eip4844 => deneb}/unittests/test_offset.py (94%) delete mode 100644 tests/core/pyspec/eth2spec/test/eip4844/fork/test_eip4844_fork_random.py rename tests/core/pyspec/eth2spec/test/helpers/{eip4844 => deneb}/__init__.py (100%) rename tests/core/pyspec/eth2spec/test/helpers/{eip4844 => deneb}/fork.py (90%) diff --git a/.circleci/config.yml b/.circleci/config.yml index 94065d0bb7..665207bdd0 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -142,7 +142,7 @@ jobs: command: make citest fork=capella - store_test_results: path: tests/core/pyspec/test-reports - test-eip4844: + test-deneb: docker: - image: circleci/python:3.8 working_directory: ~/specs-repo @@ -152,7 +152,7 @@ jobs: - restore_pyspec_cached_venv - run: name: Run py-tests - command: make citest fork=eip4844 + command: make citest fork=deneb - store_test_results: path: tests/core/pyspec/test-reports table_of_contents: @@ -272,7 +272,7 @@ workflows: - test-capella: requires: - install_pyspec_test - - test-eip4844: + - test-deneb: requires: - install_pyspec_test - table_of_contents diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 2c7b9d883b..926c3fbbf9 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -83,7 +83,7 @@ jobs: needs: [preclear,lint,codespell,table_of_contents] strategy: matrix: - version: ["phase0", "altair", "bellatrix", "capella", "eip4844"] + version: ["phase0", "altair", "bellatrix", "capella", "deneb"] steps: - name: Checkout this repo uses: actions/checkout@v3.2.0 diff --git a/.gitignore b/.gitignore index 2192515998..c49e6c006c 100644 --- a/.gitignore +++ b/.gitignore @@ -19,7 +19,7 @@ tests/core/pyspec/eth2spec/phase0/ tests/core/pyspec/eth2spec/altair/ tests/core/pyspec/eth2spec/bellatrix/ tests/core/pyspec/eth2spec/capella/ -tests/core/pyspec/eth2spec/eip4844/ +tests/core/pyspec/eth2spec/deneb/ # coverage reports .htmlcov diff --git a/Makefile b/Makefile index 8604fac27a..854f42ce38 100644 --- a/Makefile +++ b/Makefile @@ -30,7 +30,7 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \ $(wildcard $(SPEC_DIR)/custody/*.md) \ $(wildcard $(SPEC_DIR)/das/*.md) \ $(wildcard $(SPEC_DIR)/sharding/*.md) \ - $(wildcard $(SPEC_DIR)/eip4844/*.md) $(wildcard $(SPEC_DIR)/eip4844/**/*.md) \ + $(wildcard $(SPEC_DIR)/deneb/*.md) $(wildcard $(SPEC_DIR)/deneb/**/*.md) \ $(wildcard $(SSZ_DIR)/*.md) COV_HTML_OUT=.htmlcov @@ -67,7 +67,7 @@ partial_clean: rm -rf $(ETH2SPEC_MODULE_DIR)/altair rm -rf $(ETH2SPEC_MODULE_DIR)/bellatrix rm -rf $(ETH2SPEC_MODULE_DIR)/capella - rm -rf $(ETH2SPEC_MODULE_DIR)/eip4844 + rm -rf $(ETH2SPEC_MODULE_DIR)/deneb rm -rf $(COV_HTML_OUT_DIR) rm -rf $(TEST_REPORT_DIR) rm -rf eth2spec.egg-info dist build @@ -105,12 +105,12 @@ install_test: # Testing against `minimal` or `mainnet` config by default test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.eip4844.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec # Testing against `minimal` or `mainnet` config by default find_test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.eip4844.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec citest: pyspec mkdir -p $(TEST_REPORT_DIR); @@ -142,8 +142,8 @@ codespell: lint: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \ - && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/eip4844 \ - && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.eip4844 + && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \ + && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb lint_generators: pyspec . venv/bin/activate; cd $(TEST_GENERATORS_DIR); \ diff --git a/README.md b/README.md index ed8771cb00..466c151937 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ Features are researched and developed in parallel, and then consolidated into se | Code Name or Topic | Specs | Notes | | - | - | - | | Capella (tentative) |
  • Core
    • [Beacon chain changes](specs/capella/beacon-chain.md)
    • [Capella fork](specs/capella/fork.md)
  • Additions
    • [Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))
    • [Validator additions](specs/capella/validator.md)
    • [P2P networking](specs/capella/p2p-interface.md)
| -| EIP4844 (tentative) |
  • Core
    • [Beacon Chain changes](specs/eip4844/beacon-chain.md)
    • [EIP-4844 fork](specs/eip4844/fork.md)
    • [Polynomial commitments](specs/eip4844/polynomial-commitments.md)
    • [Fork choice changes](specs/eip4844/fork-choice.md)
  • Additions
    • [Light client sync protocol changes](specs/eip4844/light-client/sync-protocol.md) ([fork](specs/eip4844/light-client/fork.md), [full node](specs/eip4844/light-client/full-node.md), [networking](specs/eip4844/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/eip4844/validator.md)
    • [P2P networking](specs/eip4844/p2p-interface.md)
| +| Deneb (tentative) |
  • Core
    • [Beacon Chain changes](specs/deneb/beacon-chain.md)
    • [EIP-4844 fork](specs/deneb/fork.md)
    • [Polynomial commitments](specs/deneb/polynomial-commitments.md)
    • [Fork choice changes](specs/deneb/fork-choice.md)
  • Additions
    • [Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/deneb/validator.md)
    • [P2P networking](specs/deneb/p2p-interface.md)
| | Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/sharding/p2p-interface.md)
| | Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/custody_game/validator.md)
| Dependent on sharding | | Data Availability Sampling (outdated) |
  • Core
    • [Core types and functions](specs/das/das-core.md)
    • [Fork choice changes](specs/das/fork-choice.md)
  • Additions
    • [P2P Networking](specs/das/p2p-interface.md)
    • [Sampling process](specs/das/sampling.md)
|
  • Dependent on sharding
  • [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
| diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 929d39f8a2..f7e53d7e18 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -47,9 +47,9 @@ BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 CAPELLA_FORK_EPOCH: 18446744073709551615 -# EIP4844 -EIP4844_FORK_VERSION: 0x04000000 -EIP4844_FORK_EPOCH: 18446744073709551615 +# Deneb +DENEB_FORK_VERSION: 0x04000000 +DENEB_FORK_EPOCH: 18446744073709551615 diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 5dde4b7494..abecb18813 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -46,9 +46,9 @@ BELLATRIX_FORK_EPOCH: 18446744073709551615 # Capella CAPELLA_FORK_VERSION: 0x03000001 CAPELLA_FORK_EPOCH: 18446744073709551615 -# EIP4844 -EIP4844_FORK_VERSION: 0x04000001 -EIP4844_FORK_EPOCH: 18446744073709551615 +# DENEB +DENEB_FORK_VERSION: 0x04000001 +DENEB_FORK_EPOCH: 18446744073709551615 # Time parameters diff --git a/presets/mainnet/eip4844.yaml b/presets/mainnet/deneb.yaml similarity index 100% rename from presets/mainnet/eip4844.yaml rename to presets/mainnet/deneb.yaml diff --git a/presets/minimal/eip4844.yaml b/presets/minimal/deneb.yaml similarity index 100% rename from presets/minimal/eip4844.yaml rename to presets/minimal/deneb.yaml diff --git a/setup.py b/setup.py index 9102f819bb..f87ed5a6cf 100644 --- a/setup.py +++ b/setup.py @@ -46,7 +46,7 @@ def installPackage(package: str): ALTAIR = 'altair' BELLATRIX = 'bellatrix' CAPELLA = 'capella' -EIP4844 = 'eip4844' +DENEB = 'deneb' # The helper functions that are used when defining constants @@ -632,10 +632,10 @@ def hardcoded_ssz_dep_constants(cls) -> Dict[str, str]: return {**super().hardcoded_ssz_dep_constants(), **constants} # -# EIP4844SpecBuilder +# DenebSpecBuilder # -class EIP4844SpecBuilder(CapellaSpecBuilder): - fork: str = EIP4844 +class DenebSpecBuilder(CapellaSpecBuilder): + fork: str = DENEB @classmethod def imports(cls, preset_name: str): @@ -669,7 +669,7 @@ def hardcoded_custom_type_dep_constants(cls, spec_object) -> str: spec_builders = { builder.fork: builder - for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, EIP4844SpecBuilder) + for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder) } @@ -968,14 +968,14 @@ def finalize_options(self): if len(self.md_doc_paths) == 0: print("no paths were specified, using default markdown file paths for pyspec" " build (spec fork: %s)" % self.spec_fork) - if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844): + if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB): self.md_doc_paths = """ specs/phase0/beacon-chain.md specs/phase0/fork-choice.md specs/phase0/validator.md specs/phase0/weak-subjectivity.md """ - if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, EIP4844): + if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB): self.md_doc_paths += """ specs/altair/light-client/full-node.md specs/altair/light-client/light-client.md @@ -987,7 +987,7 @@ def finalize_options(self): specs/altair/validator.md specs/altair/p2p-interface.md """ - if self.spec_fork in (BELLATRIX, CAPELLA, EIP4844): + if self.spec_fork in (BELLATRIX, CAPELLA, DENEB): self.md_doc_paths += """ specs/bellatrix/beacon-chain.md specs/bellatrix/fork.md @@ -996,7 +996,7 @@ def finalize_options(self): specs/bellatrix/p2p-interface.md sync/optimistic.md """ - if self.spec_fork in (CAPELLA, EIP4844): + if self.spec_fork in (CAPELLA, DENEB): self.md_doc_paths += """ specs/capella/light-client/fork.md specs/capella/light-client/full-node.md @@ -1008,18 +1008,18 @@ def finalize_options(self): specs/capella/validator.md specs/capella/p2p-interface.md """ - if self.spec_fork == EIP4844: + if self.spec_fork == DENEB: self.md_doc_paths += """ - specs/eip4844/light-client/fork.md - specs/eip4844/light-client/full-node.md - specs/eip4844/light-client/p2p-interface.md - specs/eip4844/light-client/sync-protocol.md - specs/eip4844/beacon-chain.md - specs/eip4844/fork.md - specs/eip4844/fork-choice.md - specs/eip4844/polynomial-commitments.md - specs/eip4844/p2p-interface.md - specs/eip4844/validator.md + specs/deneb/light-client/fork.md + specs/deneb/light-client/full-node.md + specs/deneb/light-client/p2p-interface.md + specs/deneb/light-client/sync-protocol.md + specs/deneb/beacon-chain.md + specs/deneb/fork.md + specs/deneb/fork-choice.md + specs/deneb/polynomial-commitments.md + specs/deneb/p2p-interface.md + specs/deneb/validator.md """ if len(self.md_doc_paths) == 0: raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork) diff --git a/specs/eip4844/beacon-chain.md b/specs/deneb/beacon-chain.md similarity index 98% rename from specs/eip4844/beacon-chain.md rename to specs/deneb/beacon-chain.md index f681ab951e..87ebf7a9e9 100644 --- a/specs/eip4844/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -249,7 +249,7 @@ def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> N *Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-4844 testing only. -The `BeaconState` initialization is unchanged, except for the use of the updated `eip4844.BeaconBlockBody` type +The `BeaconState` initialization is unchanged, except for the use of the updated `deneb.BeaconBlockBody` type when initializing the first body-root. ```python @@ -259,8 +259,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader() ) -> BeaconState: fork = Fork( - previous_version=EIP4844_FORK_VERSION, # [Modified in EIP-4844] for testing only - current_version=EIP4844_FORK_VERSION, # [Modified in EIP-4844] + previous_version=DENEB_FORK_VERSION, # [Modified in Deneb] for testing only + current_version=DENEB_FORK_VERSION, # [Modified in Deneb] epoch=GENESIS_EPOCH, ) state = BeaconState( diff --git a/specs/eip4844/fork-choice.md b/specs/deneb/fork-choice.md similarity index 100% rename from specs/eip4844/fork-choice.md rename to specs/deneb/fork-choice.md diff --git a/specs/eip4844/fork.md b/specs/deneb/fork.md similarity index 86% rename from specs/eip4844/fork.md rename to specs/deneb/fork.md index 39521879ac..864e28888d 100644 --- a/specs/eip4844/fork.md +++ b/specs/deneb/fork.md @@ -28,8 +28,8 @@ Warning: this configuration is not definitive. | Name | Value | | - | - | -| `EIP4844_FORK_VERSION` | `Version('0x04000000')` | -| `EIP4844_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | +| `DENEB_FORK_VERSION` | `Version('0x04000000')` | +| `DENEB_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | ## Helper functions @@ -42,8 +42,8 @@ def compute_fork_version(epoch: Epoch) -> Version: """ Return the fork version at the given ``epoch``. """ - if epoch >= EIP4844_FORK_EPOCH: - return EIP4844_FORK_VERSION + if epoch >= DENEB_FORK_EPOCH: + return DENEB_FORK_VERSION if epoch >= CAPELLA_FORK_EPOCH: return CAPELLA_FORK_VERSION if epoch >= BELLATRIX_FORK_EPOCH: @@ -58,16 +58,16 @@ def compute_fork_version(epoch: Epoch) -> Version: ### Fork trigger TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade. -For now, we assume the condition will be triggered at epoch `EIP4844_FORK_EPOCH`. +For now, we assume the condition will be triggered at epoch `DENEB_FORK_EPOCH`. -Note that for the pure EIP-4844 networks, we don't apply `upgrade_to_eip4844` since it starts with EIP-4844 version logic. +Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since it starts with Deneb version logic. ### Upgrading the state -Since the `eip4844.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`. +Since the `deneb.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`. ```python -def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState: +def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState: epoch = capella.get_current_epoch(pre) latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=pre.latest_execution_payload_header.parent_hash, @@ -94,7 +94,7 @@ def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState: slot=pre.slot, fork=Fork( previous_version=pre.fork.current_version, - current_version=EIP4844_FORK_VERSION, # [Modified in EIP4844] + current_version=DENEB_FORK_VERSION, # [Modified in Deneb] epoch=epoch, ), # History @@ -127,7 +127,7 @@ def upgrade_to_eip4844(pre: capella.BeaconState) -> BeaconState: current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, # Execution-layer - latest_execution_payload_header=latest_execution_payload_header, # [Modified in EIP4844] + latest_execution_payload_header=latest_execution_payload_header, # [Modified in Deneb] # Withdrawals next_withdrawal_index=pre.next_withdrawal_index, next_withdrawal_validator_index=pre.next_withdrawal_validator_index, diff --git a/specs/eip4844/light-client/fork.md b/specs/deneb/light-client/fork.md similarity index 62% rename from specs/eip4844/light-client/fork.md rename to specs/deneb/light-client/fork.md index 2d5f74f467..8c552937a5 100644 --- a/specs/eip4844/light-client/fork.md +++ b/specs/deneb/light-client/fork.md @@ -1,4 +1,4 @@ -# EIP4844 Light Client -- Fork Logic +# Deneb Light Client -- Fork Logic ## Table of contents @@ -15,14 +15,14 @@ ## Introduction -This document describes how to upgrade existing light client objects based on the [Capella specification](../../capella/light-client/sync-protocol.md) to EIP4844. This is necessary when processing pre-EIP4844 data with a post-EIP4844 `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format. +This document describes how to upgrade existing light client objects based on the [Capella specification](../../capella/light-client/sync-protocol.md) to Deneb. This is necessary when processing pre-Deneb data with a post-Deneb `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format. ### Upgrading light client data -A EIP4844 `LightClientStore` can still process earlier light client data. In order to do so, that pre-EIP4844 data needs to be locally upgraded to EIP4844 before processing. +A Deneb `LightClientStore` can still process earlier light client data. In order to do so, that pre-Deneb data needs to be locally upgraded to Deneb before processing. ```python -def upgrade_lc_header_to_eip4844(pre: capella.LightClientHeader) -> LightClientHeader: +def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHeader: return LightClientHeader( beacon=pre.beacon, execution=ExecutionPayloadHeader( @@ -47,21 +47,21 @@ def upgrade_lc_header_to_eip4844(pre: capella.LightClientHeader) -> LightClientH ``` ```python -def upgrade_lc_bootstrap_to_eip4844(pre: capella.LightClientBootstrap) -> LightClientBootstrap: +def upgrade_lc_bootstrap_to_deneb(pre: capella.LightClientBootstrap) -> LightClientBootstrap: return LightClientBootstrap( - header=upgrade_lc_header_to_eip4844(pre.header), + header=upgrade_lc_header_to_deneb(pre.header), current_sync_committee=pre.current_sync_committee, current_sync_committee_branch=pre.current_sync_committee_branch, ) ``` ```python -def upgrade_lc_update_to_eip4844(pre: capella.LightClientUpdate) -> LightClientUpdate: +def upgrade_lc_update_to_deneb(pre: capella.LightClientUpdate) -> LightClientUpdate: return LightClientUpdate( - attested_header=upgrade_lc_header_to_eip4844(pre.attested_header), + attested_header=upgrade_lc_header_to_deneb(pre.attested_header), next_sync_committee=pre.next_sync_committee, next_sync_committee_branch=pre.next_sync_committee_branch, - finalized_header=upgrade_lc_header_to_eip4844(pre.finalized_header), + finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), finality_branch=pre.finality_branch, sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, @@ -69,10 +69,10 @@ def upgrade_lc_update_to_eip4844(pre: capella.LightClientUpdate) -> LightClientU ``` ```python -def upgrade_lc_finality_update_to_eip4844(pre: capella.LightClientFinalityUpdate) -> LightClientFinalityUpdate: +def upgrade_lc_finality_update_to_deneb(pre: capella.LightClientFinalityUpdate) -> LightClientFinalityUpdate: return LightClientFinalityUpdate( - attested_header=upgrade_lc_header_to_eip4844(pre.attested_header), - finalized_header=upgrade_lc_header_to_eip4844(pre.finalized_header), + attested_header=upgrade_lc_header_to_deneb(pre.attested_header), + finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), finality_branch=pre.finality_branch, sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, @@ -80,9 +80,9 @@ def upgrade_lc_finality_update_to_eip4844(pre: capella.LightClientFinalityUpdate ``` ```python -def upgrade_lc_optimistic_update_to_eip4844(pre: capella.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate: +def upgrade_lc_optimistic_update_to_deneb(pre: capella.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate: return LightClientOptimisticUpdate( - attested_header=upgrade_lc_header_to_eip4844(pre.attested_header), + attested_header=upgrade_lc_header_to_deneb(pre.attested_header), sync_aggregate=pre.sync_aggregate, signature_slot=pre.signature_slot, ) @@ -90,20 +90,20 @@ def upgrade_lc_optimistic_update_to_eip4844(pre: capella.LightClientOptimisticUp ### Upgrading the store -Existing `LightClientStore` objects based on Capella MUST be upgraded to EIP4844 before EIP4844 based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `EIP4844_FORK_EPOCH`. +Existing `LightClientStore` objects based on Capella MUST be upgraded to Deneb before Deneb based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `DENEB_FORK_EPOCH`. ```python -def upgrade_lc_store_to_eip4844(pre: capella.LightClientStore) -> LightClientStore: +def upgrade_lc_store_to_deneb(pre: capella.LightClientStore) -> LightClientStore: if pre.best_valid_update is None: best_valid_update = None else: - best_valid_update = upgrade_lc_update_to_eip4844(pre.best_valid_update) + best_valid_update = upgrade_lc_update_to_deneb(pre.best_valid_update) return LightClientStore( - finalized_header=upgrade_lc_header_to_eip4844(pre.finalized_header), + finalized_header=upgrade_lc_header_to_deneb(pre.finalized_header), current_sync_committee=pre.current_sync_committee, next_sync_committee=pre.next_sync_committee, best_valid_update=best_valid_update, - optimistic_header=upgrade_lc_header_to_eip4844(pre.optimistic_header), + optimistic_header=upgrade_lc_header_to_deneb(pre.optimistic_header), previous_max_active_participants=pre.previous_max_active_participants, current_max_active_participants=pre.current_max_active_participants, ) diff --git a/specs/eip4844/light-client/full-node.md b/specs/deneb/light-client/full-node.md similarity index 94% rename from specs/eip4844/light-client/full-node.md rename to specs/deneb/light-client/full-node.md index 70983e1b39..2751940366 100644 --- a/specs/eip4844/light-client/full-node.md +++ b/specs/deneb/light-client/full-node.md @@ -1,4 +1,4 @@ -# EIP4844 Light Client -- Full Node +# Deneb Light Client -- Full Node **Notice**: This document is a work-in-progress for researchers and implementers. @@ -17,7 +17,7 @@ ## Introduction -This upgrade adds information about the execution payload to light client data as part of the EIP4844 upgrade. +This upgrade adds information about the execution payload to light client data as part of the Deneb upgrade. ## Helper functions @@ -47,8 +47,8 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: withdrawals_root=hash_tree_root(payload.withdrawals), ) - # [New in EIP4844] - if epoch >= EIP4844_FORK_EPOCH: + # [New in Deneb] + if epoch >= DENEB_FORK_EPOCH: execution_header.excess_data_gas = payload.excess_data_gas execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX) diff --git a/specs/eip4844/light-client/p2p-interface.md b/specs/deneb/light-client/p2p-interface.md similarity index 65% rename from specs/eip4844/light-client/p2p-interface.md rename to specs/deneb/light-client/p2p-interface.md index f3d89c130d..0ca53056a9 100644 --- a/specs/eip4844/light-client/p2p-interface.md +++ b/specs/deneb/light-client/p2p-interface.md @@ -1,4 +1,4 @@ -# EIP4844 Light Client -- Networking +# Deneb Light Client -- Networking **Notice**: This document is a work-in-progress for researchers and implementers. @@ -26,7 +26,7 @@ ## Networking -The [Capella light client networking specification](../../capella/light-client/p2p-interface.md) is extended to exchange [EIP4844 light client data](./sync-protocol.md). +The [Capella light client networking specification](../../capella/light-client/p2p-interface.md) is extended to exchange [Deneb light client data](./sync-protocol.md). ### The gossip domain: gossipsub @@ -38,23 +38,23 @@ The [Capella light client networking specification](../../capella/light-client/p [0]: # (eth2spec: skip) -| `fork_version` | Message SSZ type | -| ------------------------------------------------------ | ------------------------------------- | -| `GENESIS_FORK_VERSION` | n/a | -| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` | -| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` | -| `EIP4844_FORK_VERSION` and later | `eip4844.LightClientFinalityUpdate` | +| `fork_version` | Message SSZ type | +|--------------------------------------------------------|-------------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` | +| `DENEB_FORK_VERSION` and later | `deneb.LightClientFinalityUpdate` | ###### `light_client_optimistic_update` [0]: # (eth2spec: skip) | `fork_version` | Message SSZ type | -| ------------------------------------------------------ | ------------------------------------- | +|--------------------------------------------------------|---------------------------------------| | `GENESIS_FORK_VERSION` | n/a | | `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` | | `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` | -| `EIP4844_FORK_VERSION` and later | `eip4844.LightClientOptimisticUpdate` | +| `DENEB_FORK_VERSION` and later | `deneb.LightClientOptimisticUpdate` | ### The Req/Resp domain @@ -64,42 +64,42 @@ The [Capella light client networking specification](../../capella/light-client/p [0]: # (eth2spec: skip) -| `fork_version` | Response SSZ type | -| ------------------------------------------------------ | ------------------------------------- | -| `GENESIS_FORK_VERSION` | n/a | -| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` | -| `CAPELLA_FORK_VERSION` | `capella.LightClientBootstrap` | -| `EIP4844_FORK_VERSION` and later | `eip4844.LightClientBootstrap` | +| `fork_version` | Response SSZ type | +|--------------------------------------------------------|------------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientBootstrap` | +| `DENEB_FORK_VERSION` and later | `deneb.LightClientBootstrap` | ##### LightClientUpdatesByRange [0]: # (eth2spec: skip) -| `fork_version` | Response chunk SSZ type | -| ------------------------------------------------------ | ------------------------------------- | -| `GENESIS_FORK_VERSION` | n/a | -| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` | -| `CAPELLA_FORK_VERSION` | `capella.LightClientUpdate` | -| `EIP4844_FORK_VERSION` and later | `eip4844.LightClientUpdate` | +| `fork_version` | Response chunk SSZ type | +|--------------------------------------------------------|----------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientUpdate` | +| `DENEB_FORK_VERSION` and later | `deneb.LightClientUpdate` | ##### GetLightClientFinalityUpdate [0]: # (eth2spec: skip) -| `fork_version` | Response SSZ type | -| ------------------------------------------------------ | ------------------------------------- | -| `GENESIS_FORK_VERSION` | n/a | -| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` | -| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` | -| `EIP4844_FORK_VERSION` and later | `eip4844.LightClientFinalityUpdate` | +| `fork_version` | Response SSZ type | +|--------------------------------------------------------|-------------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` | +| `DENEB_FORK_VERSION` and later | `deneb.LightClientFinalityUpdate` | ##### GetLightClientOptimisticUpdate [0]: # (eth2spec: skip) | `fork_version` | Response SSZ type | -| ------------------------------------------------------ | ------------------------------------- | +|--------------------------------------------------------|---------------------------------------| | `GENESIS_FORK_VERSION` | n/a | | `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` | | `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` | -| `EIP4844_FORK_VERSION` and later | `eip4844.LightClientOptimisticUpdate` | +| `DENEB_FORK_VERSION` and later | `deneb.LightClientOptimisticUpdate` | diff --git a/specs/eip4844/light-client/sync-protocol.md b/specs/deneb/light-client/sync-protocol.md similarity index 84% rename from specs/eip4844/light-client/sync-protocol.md rename to specs/deneb/light-client/sync-protocol.md index 181ca14eb4..6f948257bb 100644 --- a/specs/eip4844/light-client/sync-protocol.md +++ b/specs/deneb/light-client/sync-protocol.md @@ -1,4 +1,4 @@ -# EIP4844 Light Client -- Sync Protocol +# Deneb Light Client -- Sync Protocol **Notice**: This document is a work-in-progress for researchers and implementers. @@ -18,7 +18,7 @@ ## Introduction -This upgrade updates light client data to include the EIP4844 changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to EIP4844. +This upgrade updates light client data to include the Denbeb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb. Additional documents describes the impact of the upgrade on certain roles: - [Full node](./full-node.md) @@ -32,11 +32,11 @@ Additional documents describes the impact of the upgrade on certain roles: def get_lc_execution_root(header: LightClientHeader) -> Root: epoch = compute_epoch_at_slot(header.beacon.slot) - # [New in EIP4844] - if epoch >= EIP4844_FORK_EPOCH: + # [New in Deneb] + if epoch >= DENEB_FORK_EPOCH: return hash_tree_root(header.execution) - # [Modified in EIP4844] + # [Modified in Deneb] if epoch >= CAPELLA_FORK_EPOCH: execution_header = capella.ExecutionPayloadHeader( parent_hash=header.execution.parent_hash, @@ -66,8 +66,8 @@ def get_lc_execution_root(header: LightClientHeader) -> Root: def is_valid_light_client_header(header: LightClientHeader) -> bool: epoch = compute_epoch_at_slot(header.beacon.slot) - # [New in EIP4844] - if epoch < EIP4844_FORK_EPOCH: + # [New in Deneb] + if epoch < DENEB_FORK_EPOCH: if header.execution.excess_data_gas != uint256(0): return False diff --git a/specs/eip4844/p2p-interface.md b/specs/deneb/p2p-interface.md similarity index 95% rename from specs/eip4844/p2p-interface.md rename to specs/deneb/p2p-interface.md index ae9380f7a8..852597b099 100644 --- a/specs/eip4844/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -119,13 +119,13 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | | `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` | | `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` | -| `EIP4844_FORK_VERSION` | `eip4844.SignedBeaconBlock` | +| `DENEB_FORK_VERSION` | `deneb.SignedBeaconBlock` | #### BeaconBlocksByRoot v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` -After `EIP4844_FORK_EPOCH`, `BeaconBlocksByRootV2` is replaced by `BeaconBlockAndBlobsSidecarByRootV1`. +After `DENEB_FORK_EPOCH`, `BeaconBlocksByRootV2` is replaced by `BeaconBlockAndBlobsSidecarByRootV1`. Clients MUST support requesting blocks by root for pre-fork-epoch blocks. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: @@ -170,7 +170,7 @@ No more than `MAX_REQUEST_BLOCKS` may be requested at a time. The response MUST consist of zero or more `response_chunk`. Each _successful_ `response_chunk` MUST contain a single `SignedBeaconBlockAndBlobsSidecar` payload. -Clients MUST support requesting blocks and sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, EIP4844_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers SHOULD respond with error code `3: ResourceUnavailable`. +Clients MUST support requesting blocks and sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers SHOULD respond with error code `3: ResourceUnavailable`. Clients MUST respond with at least one block and sidecar, if they have it. Clients MAY limit the number of blocks and sidecars in the response. @@ -211,7 +211,7 @@ The response MUST consist of zero or more `response_chunk`. Each _successful_ `response_chunk` MUST contain a single `BlobsSidecar` payload. Clients MUST keep a record of signed blobs sidecars seen on the epoch range -`[max(current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, EIP4844_FORK_EPOCH), current_epoch]` +`[max(current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]` where `current_epoch` is defined by the current wall-clock time, and clients MUST support serving requests of blobs on this range. diff --git a/specs/eip4844/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md similarity index 100% rename from specs/eip4844/polynomial-commitments.md rename to specs/deneb/polynomial-commitments.md diff --git a/specs/eip4844/validator.md b/specs/deneb/validator.md similarity index 100% rename from specs/eip4844/validator.md rename to specs/deneb/validator.md diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 664b4fb44f..63bec26b09 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -16,7 +16,7 @@ state_transition_with_full_block, ) from eth2spec.test.helpers.constants import ( - PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844, + PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, MINIMAL, ALL_PHASES, ) @@ -24,7 +24,7 @@ do_fork, ) from eth2spec.test.helpers.forks import ( - is_post_capella, is_post_eip4844, + is_post_capella, is_post_deneb, is_post_fork, ) from eth2spec.test.helpers.light_client import ( @@ -53,8 +53,8 @@ def needs_upgrade_to_capella(d_spec, s_spec): return is_post_capella(s_spec) and not is_post_capella(d_spec) -def needs_upgrade_to_eip4844(d_spec, s_spec): - return is_post_eip4844(s_spec) and not is_post_eip4844(d_spec) +def needs_upgrade_to_deneb(d_spec, s_spec): + return is_post_deneb(s_spec) and not is_post_deneb(d_spec) def check_lc_header_equal(d_spec, s_spec, data, upgraded): @@ -80,8 +80,8 @@ def upgrade_lc_bootstrap_to_store(d_spec, s_spec, data): upgraded = s_spec.upgrade_lc_bootstrap_to_capella(upgraded) check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded) - if needs_upgrade_to_eip4844(d_spec, s_spec): - upgraded = s_spec.upgrade_lc_bootstrap_to_eip4844(upgraded) + if needs_upgrade_to_deneb(d_spec, s_spec): + upgraded = s_spec.upgrade_lc_bootstrap_to_deneb(upgraded) check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded) return upgraded @@ -103,8 +103,8 @@ def upgrade_lc_update_to_store(d_spec, s_spec, data): upgraded = s_spec.upgrade_lc_update_to_capella(upgraded) check_lc_update_equal(d_spec, s_spec, data, upgraded) - if needs_upgrade_to_eip4844(d_spec, s_spec): - upgraded = s_spec.upgrade_lc_update_to_eip4844(upgraded) + if needs_upgrade_to_deneb(d_spec, s_spec): + upgraded = s_spec.upgrade_lc_update_to_deneb(upgraded) check_lc_update_equal(d_spec, s_spec, data, upgraded) return upgraded @@ -130,8 +130,8 @@ def upgrade_lc_store_to_new_spec(d_spec, s_spec, data): upgraded = s_spec.upgrade_lc_store_to_capella(upgraded) check_lc_store_equal(d_spec, s_spec, data, upgraded) - if needs_upgrade_to_eip4844(d_spec, s_spec): - upgraded = s_spec.upgrade_lc_store_to_eip4844(upgraded) + if needs_upgrade_to_deneb(d_spec, s_spec): + upgraded = s_spec.upgrade_lc_store_to_deneb(upgraded) check_lc_store_equal(d_spec, s_spec, data, upgraded) return upgraded @@ -145,8 +145,8 @@ class LightClientSyncTest(object): def get_store_fork_version(s_spec): - if is_post_eip4844(s_spec): - return s_spec.config.EIP4844_FORK_VERSION + if is_post_deneb(s_spec): + return s_spec.config.DENEB_FORK_VERSION if is_post_capella(s_spec): return s_spec.config.CAPELLA_FORK_VERSION return s_spec.config.ALTAIR_FORK_VERSION @@ -731,16 +731,16 @@ def test_capella_fork(spec, phases, state): yield from run_test_single_fork(spec, phases, state, CAPELLA) -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_config_overrides({ - 'EIP4844_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 + 'DENEB_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=EIP4844) +@with_matching_spec_config(emitted_fork=DENEB) @with_presets([MINIMAL], reason="too slow") -def test_eip4844_fork(spec, phases, state): - yield from run_test_single_fork(spec, phases, state, EIP4844) +def test_deneb_fork(spec, phases, state): + yield from run_test_single_fork(spec, phases, state, DENEB) def run_test_multi_fork(spec, phases, state, fork_1, fork_2): @@ -779,17 +779,17 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2): yield from finish_test(test) -@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, EIP4844]) +@with_phases(phases=[BELLATRIX], other_phases=[CAPELLA, DENEB]) @spec_test @with_config_overrides({ 'CAPELLA_FORK_EPOCH': 3, # `setup_test` advances to epoch 2 - 'EIP4844_FORK_EPOCH': 4, + 'DENEB_FORK_EPOCH': 4, }, emit=False) @with_state -@with_matching_spec_config(emitted_fork=EIP4844) +@with_matching_spec_config(emitted_fork=DENEB) @with_presets([MINIMAL], reason="too slow") -def test_capella_eip4844_fork(spec, phases, state): - yield from run_test_multi_fork(spec, phases, state, CAPELLA, EIP4844) +def test_capella_deneb_fork(spec, phases, state): + yield from run_test_multi_fork(spec, phases, state, CAPELLA, DENEB) def run_test_upgraded_store_with_legacy_data(spec, phases, state, fork): @@ -823,10 +823,10 @@ def test_capella_store_with_legacy_data(spec, phases, state): yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, CAPELLA) -@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[ALTAIR, BELLATRIX, CAPELLA], other_phases=[DENEB]) @spec_test @with_state -@with_matching_spec_config(emitted_fork=EIP4844) +@with_matching_spec_config(emitted_fork=DENEB) @with_presets([MINIMAL], reason="too slow") -def test_eip4844_store_with_legacy_data(spec, phases, state): - yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, EIP4844) +def test_deneb_store_with_legacy_data(spec, phases, state): + yield from run_test_upgraded_store_with_legacy_data(spec, phases, state, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 8401b973e6..38e7f0b715 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -7,12 +7,12 @@ from eth2spec.altair import mainnet as spec_altair_mainnet, minimal as spec_altair_minimal from eth2spec.bellatrix import mainnet as spec_bellatrix_mainnet, minimal as spec_bellatrix_minimal from eth2spec.capella import mainnet as spec_capella_mainnet, minimal as spec_capella_minimal -from eth2spec.eip4844 import mainnet as spec_eip4844_mainnet, minimal as spec_eip4844_minimal +from eth2spec.deneb import mainnet as spec_deneb_mainnet, minimal as spec_deneb_minimal from eth2spec.utils import bls from .exceptions import SkippedTest from .helpers.constants import ( - PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844, + PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, MINIMAL, MAINNET, ALL_PHASES, ALL_FORK_UPGRADES, @@ -78,14 +78,14 @@ class ForkMeta: ALTAIR: spec_altair_minimal, BELLATRIX: spec_bellatrix_minimal, CAPELLA: spec_capella_minimal, - EIP4844: spec_eip4844_minimal, + DENEB: spec_deneb_minimal, }, MAINNET: { PHASE0: spec_phase0_mainnet, ALTAIR: spec_altair_mainnet, BELLATRIX: spec_bellatrix_mainnet, CAPELLA: spec_capella_mainnet, - EIP4844: spec_eip4844_mainnet + DENEB: spec_deneb_mainnet }, } @@ -427,7 +427,7 @@ def decorator(fn): with_altair_and_later = with_all_phases_from(ALTAIR) with_bellatrix_and_later = with_all_phases_from(BELLATRIX) with_capella_and_later = with_all_phases_from(CAPELLA) -with_eip4844_and_later = with_all_phases_from(EIP4844) +with_deneb_and_later = with_all_phases_from(DENEB) def _get_preset_targets(kw): diff --git a/tests/core/pyspec/eth2spec/test/eip4844/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/eip4844/__init__.py rename to tests/core/pyspec/eth2spec/test/deneb/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/eip4844/fork/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/fork/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/eip4844/fork/__init__.py rename to tests/core/pyspec/eth2spec/test/deneb/fork/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/eip4844/fork/test_eip4844_fork_basic.py b/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_basic.py similarity index 55% rename from tests/core/pyspec/eth2spec/test/eip4844/fork/test_eip4844_fork_basic.py rename to tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_basic.py index aca7cb8527..1666fdd71c 100644 --- a/tests/core/pyspec/eth2spec/test/eip4844/fork/test_eip4844_fork_basic.py +++ b/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_basic.py @@ -7,76 +7,76 @@ ) from eth2spec.test.utils import with_meta_tags from eth2spec.test.helpers.constants import ( - CAPELLA, EIP4844, + CAPELLA, DENEB, MINIMAL, ) from eth2spec.test.helpers.state import ( next_epoch, next_epoch_via_block, ) -from eth2spec.test.helpers.eip4844.fork import ( - EIP4844_FORK_TEST_META_TAGS, +from eth2spec.test.helpers.deneb.fork import ( + DENEB_FORK_TEST_META_TAGS, run_fork_test, ) -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_state -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) def test_fork_base_state(spec, phases, state): - yield from run_fork_test(phases[EIP4844], state) + yield from run_fork_test(phases[DENEB], state) -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_state -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) def test_fork_next_epoch(spec, phases, state): next_epoch(spec, state) - yield from run_fork_test(phases[EIP4844], state) + yield from run_fork_test(phases[DENEB], state) -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_state -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) def test_fork_next_epoch_with_block(spec, phases, state): next_epoch_via_block(spec, state) - yield from run_fork_test(phases[EIP4844], state) + yield from run_fork_test(phases[DENEB], state) -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @spec_test @with_state -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) def test_fork_many_next_epoch(spec, phases, state): for _ in range(3): next_epoch(spec, state) - yield from run_fork_test(phases[EIP4844], state) + yield from run_fork_test(phases[DENEB], state) -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @spec_test -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) def test_fork_random_low_balances(spec, phases, state): - yield from run_fork_test(phases[EIP4844], state) + yield from run_fork_test(phases[DENEB], state) -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @spec_test -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) def test_fork_random_misc_balances(spec, phases, state): - yield from run_fork_test(phases[EIP4844], state) + yield from run_fork_test(phases[DENEB], state) -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) @with_presets([MINIMAL], reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") @with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) @spec_test -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) def test_fork_random_large_validator_set(spec, phases, state): - yield from run_fork_test(phases[EIP4844], state) + yield from run_fork_test(phases[DENEB], state) diff --git a/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_random.py b/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_random.py new file mode 100644 index 0000000000..e88b636932 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/fork/test_deneb_fork_random.py @@ -0,0 +1,84 @@ +from random import Random + +from eth2spec.test.context import ( + with_phases, + with_custom_state, + with_presets, + spec_test, with_state, + low_balances, misc_balances, large_validator_set, +) +from eth2spec.test.utils import with_meta_tags +from eth2spec.test.helpers.constants import ( + CAPELLA, DENEB, + MINIMAL, +) +from eth2spec.test.helpers.deneb.fork import ( + DENEB_FORK_TEST_META_TAGS, + run_fork_test, +) +from eth2spec.test.helpers.random import randomize_state + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_state +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) +def test_deneb_fork_random_0(spec, phases, state): + randomize_state(spec, state, rng=Random(1010)) + yield from run_fork_test(phases[DENEB], state) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_state +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) +def test_deneb_fork_random_1(spec, phases, state): + randomize_state(spec, state, rng=Random(2020)) + yield from run_fork_test(phases[DENEB], state) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_state +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) +def test_deneb_fork_random_2(spec, phases, state): + randomize_state(spec, state, rng=Random(3030)) + yield from run_fork_test(phases[DENEB], state) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_state +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) +def test_deneb_fork_random_3(spec, phases, state): + randomize_state(spec, state, rng=Random(4040)) + yield from run_fork_test(phases[DENEB], state) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) +def test_deneb_fork_random_low_balances(spec, phases, state): + randomize_state(spec, state, rng=Random(5050)) + yield from run_fork_test(phases[DENEB], state) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@spec_test +@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) +def test_deneb_fork_random_misc_balances(spec, phases, state): + randomize_state(spec, state, rng=Random(6060)) + yield from run_fork_test(phases[DENEB], state) + + +@with_phases(phases=[CAPELLA], other_phases=[DENEB]) +@with_presets([MINIMAL], + reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") +@spec_test +@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) +@with_meta_tags(DENEB_FORK_TEST_META_TAGS) +def test_deneb_fork_random_large_validator_set(spec, phases, state): + randomize_state(spec, state, rng=Random(7070)) + yield from run_fork_test(phases[DENEB], state) diff --git a/tests/core/pyspec/eth2spec/test/eip4844/random/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/random/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/eip4844/random/__init__.py rename to tests/core/pyspec/eth2spec/test/deneb/random/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/eip4844/random/test_random.py b/tests/core/pyspec/eth2spec/test/deneb/random/test_random.py similarity index 63% rename from tests/core/pyspec/eth2spec/test/eip4844/random/test_random.py rename to tests/core/pyspec/eth2spec/test/deneb/random/test_random.py index b90b858b21..e8c0a1bb10 100644 --- a/tests/core/pyspec/eth2spec/test/eip4844/random/test_random.py +++ b/tests/core/pyspec/eth2spec/test/deneb/random/test_random.py @@ -4,7 +4,7 @@ See the README for that generator for more information. """ -from eth2spec.test.helpers.constants import EIP4844 +from eth2spec.test.helpers.constants import DENEB from eth2spec.test.context import ( misc_balances_in_default_range_with_many_validators, with_phases, @@ -23,7 +23,7 @@ @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -36,11 +36,11 @@ def test_randomized_0(spec, state): # epochs:0,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:1,slots:0,with-block:no_block # epochs:0,slots:random_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -49,7 +49,7 @@ def test_randomized_0(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -62,11 +62,11 @@ def test_randomized_1(spec, state): # epochs:0,slots:0,with-block:no_block # epochs:1,slots:0,with-block:no_block # epochs:0,slots:random_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:0,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -75,7 +75,7 @@ def test_randomized_1(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -88,11 +88,11 @@ def test_randomized_2(spec, state): # epochs:0,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block # epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:0,slots:0,with-block:no_block # epochs:0,slots:last_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -101,7 +101,7 @@ def test_randomized_2(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -114,11 +114,11 @@ def test_randomized_3(spec, state): # epochs:0,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block # epochs:0,slots:last_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:1,slots:0,with-block:no_block # epochs:0,slots:last_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -127,7 +127,7 @@ def test_randomized_3(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -140,11 +140,11 @@ def test_randomized_4(spec, state): # epochs:0,slots:0,with-block:no_block # epochs:1,slots:0,with-block:no_block # epochs:0,slots:last_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:1,slots:0,with-block:no_block # epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -153,7 +153,7 @@ def test_randomized_4(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -166,11 +166,11 @@ def test_randomized_5(spec, state): # epochs:0,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block # epochs:0,slots:random_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:0,slots:0,with-block:no_block # epochs:0,slots:random_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -179,7 +179,7 @@ def test_randomized_5(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -192,11 +192,11 @@ def test_randomized_6(spec, state): # epochs:0,slots:0,with-block:no_block # epochs:1,slots:0,with-block:no_block # epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:0,slots:0,with-block:no_block # epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -205,7 +205,7 @@ def test_randomized_6(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -218,11 +218,11 @@ def test_randomized_7(spec, state): # epochs:0,slots:0,with-block:no_block # epochs:1,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:1,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'validation': 'validate_is_not_leaking', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -231,7 +231,7 @@ def test_randomized_7(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -244,11 +244,11 @@ def test_randomized_8(spec, state): # epochs:epochs_until_leak,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:1,slots:0,with-block:no_block # epochs:0,slots:random_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -257,7 +257,7 @@ def test_randomized_8(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -270,11 +270,11 @@ def test_randomized_9(spec, state): # epochs:epochs_until_leak,slots:0,with-block:no_block # epochs:1,slots:0,with-block:no_block # epochs:0,slots:random_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:0,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -283,7 +283,7 @@ def test_randomized_9(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -296,11 +296,11 @@ def test_randomized_10(spec, state): # epochs:epochs_until_leak,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block # epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:0,slots:0,with-block:no_block # epochs:0,slots:last_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -309,7 +309,7 @@ def test_randomized_10(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -322,11 +322,11 @@ def test_randomized_11(spec, state): # epochs:epochs_until_leak,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block # epochs:0,slots:last_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:1,slots:0,with-block:no_block # epochs:0,slots:last_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -335,7 +335,7 @@ def test_randomized_11(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -348,11 +348,11 @@ def test_randomized_12(spec, state): # epochs:epochs_until_leak,slots:0,with-block:no_block # epochs:1,slots:0,with-block:no_block # epochs:0,slots:last_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:1,slots:0,with-block:no_block # epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'last_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -361,7 +361,7 @@ def test_randomized_12(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -374,11 +374,11 @@ def test_randomized_13(spec, state): # epochs:epochs_until_leak,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block # epochs:0,slots:random_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:0,slots:0,with-block:no_block # epochs:0,slots:random_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'random_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -387,7 +387,7 @@ def test_randomized_13(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -400,11 +400,11 @@ def test_randomized_14(spec, state): # epochs:epochs_until_leak,slots:0,with-block:no_block # epochs:1,slots:0,with-block:no_block # epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:0,slots:0,with-block:no_block # epochs:0,slots:penultimate_slot_in_epoch,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 0, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 'penultimate_slot_in_epoch', 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, @@ -413,7 +413,7 @@ def test_randomized_14(spec, state): @only_generator("randomized test for broad coverage, not point-to-point CI") -@with_phases([EIP4844]) +@with_phases([DENEB]) @with_custom_state( balances_fn=misc_balances_in_default_range_with_many_validators, threshold_fn=zero_activation_threshold @@ -426,11 +426,11 @@ def test_randomized_15(spec, state): # epochs:epochs_until_leak,slots:0,with-block:no_block # epochs:1,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 + # epochs:0,slots:0,with-block:random_block_deneb # epochs:1,slots:0,with-block:no_block # epochs:0,slots:0,with-block:no_block - # epochs:0,slots:0,with-block:random_block_eip4844 - scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_eip4844', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_eip4844'} # noqa: E501 + # epochs:0,slots:0,with-block:random_block_deneb + scenario = {'transitions': [{'epochs_to_skip': 'epochs_until_leak', 'validation': 'validate_is_leaking', 'slots_to_skip': 0, 'block_producer': 'no_block'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}, {'epochs_to_skip': 1, 'slots_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'slots_to_skip': 0, 'epochs_to_skip': 0, 'block_producer': 'no_block', 'validation': 'no_op_validation'}, {'block_producer': 'random_block_deneb', 'epochs_to_skip': 0, 'slots_to_skip': 0, 'validation': 'no_op_validation'}], 'state_randomizer': 'randomize_state_deneb'} # noqa: E501 yield from run_generated_randomized_test( spec, state, diff --git a/tests/core/pyspec/eth2spec/test/eip4844/sanity/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/eip4844/sanity/__init__.py rename to tests/core/pyspec/eth2spec/test/deneb/sanity/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/eip4844/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py similarity index 95% rename from tests/core/pyspec/eth2spec/test/eip4844/sanity/test_blocks.py rename to tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index 0aeafe052f..c7fb708b8f 100644 --- a/tests/core/pyspec/eth2spec/test/eip4844/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -6,7 +6,7 @@ ) from eth2spec.test.context import ( spec_state_test, - with_eip4844_and_later, + with_deneb_and_later, ) from eth2spec.test.helpers.execution_payload import ( compute_el_block_hash, @@ -16,7 +16,7 @@ ) -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_one_blob(spec, state): yield 'pre', state @@ -32,7 +32,7 @@ def test_one_blob(spec, state): yield 'post', state -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_max_blobs(spec, state): yield 'pre', state diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/eip4844/unittests/__init__.py rename to tests/core/pyspec/eth2spec/test/deneb/unittests/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/fork_choice/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/eip4844/unittests/fork_choice/__init__.py rename to tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/fork_choice/test_validate_blobs_sidecar.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs_sidecar.py similarity index 93% rename from tests/core/pyspec/eth2spec/test/eip4844/unittests/fork_choice/test_validate_blobs_sidecar.py rename to tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs_sidecar.py index dbea9f7841..87ed9ff8ea 100644 --- a/tests/core/pyspec/eth2spec/test/eip4844/unittests/fork_choice/test_validate_blobs_sidecar.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs_sidecar.py @@ -6,7 +6,7 @@ ) from eth2spec.test.context import ( spec_state_test, - with_eip4844_and_later, + with_deneb_and_later, ) from eth2spec.test.helpers.execution_payload import ( compute_el_block_hash, @@ -29,25 +29,25 @@ def _run_validate_blobs_sidecar_test(spec, state, blob_count): spec.validate_blobs_sidecar(block.slot, block.hash_tree_root(), expected_commitments, blobs_sidecar) -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_validate_blobs_sidecar_zero_blobs(spec, state): _run_validate_blobs_sidecar_test(spec, state, blob_count=0) -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_validate_blobs_sidecar_one_blob(spec, state): _run_validate_blobs_sidecar_test(spec, state, blob_count=1) -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_validate_blobs_sidecar_two_blobs(spec, state): _run_validate_blobs_sidecar_test(spec, state, blob_count=2) -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_validate_blobs_sidecar_max_blobs(spec, state): _run_validate_blobs_sidecar_test(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK) diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/polynomial_commitments/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/eip4844/unittests/polynomial_commitments/__init__.py rename to tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py similarity index 96% rename from tests/core/pyspec/eth2spec/test/eip4844/unittests/polynomial_commitments/test_polynomial_commitments.py rename to tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 04f5857f31..4d881e3e36 100644 --- a/tests/core/pyspec/eth2spec/test/eip4844/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -2,7 +2,7 @@ from eth2spec.test.context import ( spec_state_test, - with_eip4844_and_later, + with_deneb_and_later, ) from eth2spec.test.helpers.sharding import ( get_sample_blob, @@ -11,7 +11,7 @@ ) -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_verify_kzg_proof(spec, state): x = 3 @@ -24,7 +24,7 @@ def test_verify_kzg_proof(spec, state): assert spec.verify_kzg_proof_impl(commitment, x, y, proof) -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_barycentric_outside_domain(spec, state): """ @@ -57,7 +57,7 @@ def test_barycentric_outside_domain(spec, state): assert p_z_coeff == p_z_eval -@with_eip4844_and_later +@with_deneb_and_later @spec_state_test def test_barycentric_within_domain(spec, state): """ diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/test_kzg.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py similarity index 91% rename from tests/core/pyspec/eth2spec/test/eip4844/unittests/test_kzg.py rename to tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py index 7474707b97..71bfae8b89 100644 --- a/tests/core/pyspec/eth2spec/test/eip4844/unittests/test_kzg.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py @@ -1,6 +1,6 @@ from eth2spec.test.helpers.constants import ( - EIP4844, + DENEB, MINIMAL, ) from eth2spec.test.helpers.sharding import ( @@ -13,7 +13,7 @@ ) -@with_phases([EIP4844]) +@with_phases([DENEB]) @spec_state_test @with_presets([MINIMAL]) def test_blob_to_kzg_commitment(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/eip4844/unittests/test_offset.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py similarity index 94% rename from tests/core/pyspec/eth2spec/test/eip4844/unittests/test_offset.py rename to tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py index 1702ea7e00..13150180bc 100644 --- a/tests/core/pyspec/eth2spec/test/eip4844/unittests/test_offset.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py @@ -1,6 +1,6 @@ from eth2spec.test.helpers.constants import ( - EIP4844, + DENEB, MINIMAL, ) from eth2spec.test.helpers.sharding import ( @@ -13,7 +13,7 @@ ) -@with_phases([EIP4844]) +@with_phases([DENEB]) @spec_state_test @with_presets([MINIMAL]) def test_tx_peek_blob_versioned_hashes(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/eip4844/fork/test_eip4844_fork_random.py b/tests/core/pyspec/eth2spec/test/eip4844/fork/test_eip4844_fork_random.py deleted file mode 100644 index a22de4b599..0000000000 --- a/tests/core/pyspec/eth2spec/test/eip4844/fork/test_eip4844_fork_random.py +++ /dev/null @@ -1,84 +0,0 @@ -from random import Random - -from eth2spec.test.context import ( - with_phases, - with_custom_state, - with_presets, - spec_test, with_state, - low_balances, misc_balances, large_validator_set, -) -from eth2spec.test.utils import with_meta_tags -from eth2spec.test.helpers.constants import ( - CAPELLA, EIP4844, - MINIMAL, -) -from eth2spec.test.helpers.eip4844.fork import ( - EIP4844_FORK_TEST_META_TAGS, - run_fork_test, -) -from eth2spec.test.helpers.random import randomize_state - - -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) -@spec_test -@with_state -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) -def test_eip4844_fork_random_0(spec, phases, state): - randomize_state(spec, state, rng=Random(1010)) - yield from run_fork_test(phases[EIP4844], state) - - -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) -@spec_test -@with_state -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) -def test_eip4844_fork_random_1(spec, phases, state): - randomize_state(spec, state, rng=Random(2020)) - yield from run_fork_test(phases[EIP4844], state) - - -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) -@spec_test -@with_state -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) -def test_eip4844_fork_random_2(spec, phases, state): - randomize_state(spec, state, rng=Random(3030)) - yield from run_fork_test(phases[EIP4844], state) - - -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) -@spec_test -@with_state -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) -def test_eip4844_fork_random_3(spec, phases, state): - randomize_state(spec, state, rng=Random(4040)) - yield from run_fork_test(phases[EIP4844], state) - - -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) -@spec_test -@with_custom_state(balances_fn=low_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) -def test_eip4844_fork_random_low_balances(spec, phases, state): - randomize_state(spec, state, rng=Random(5050)) - yield from run_fork_test(phases[EIP4844], state) - - -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) -@spec_test -@with_custom_state(balances_fn=misc_balances, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) -def test_eip4844_fork_random_misc_balances(spec, phases, state): - randomize_state(spec, state, rng=Random(6060)) - yield from run_fork_test(phases[EIP4844], state) - - -@with_phases(phases=[CAPELLA], other_phases=[EIP4844]) -@with_presets([MINIMAL], - reason="mainnet config leads to larger validator set than limit of public/private keys pre-generated") -@spec_test -@with_custom_state(balances_fn=large_validator_set, threshold_fn=lambda spec: spec.config.EJECTION_BALANCE) -@with_meta_tags(EIP4844_FORK_TEST_META_TAGS) -def test_eip4844_fork_random_large_validator_set(spec, phases, state): - randomize_state(spec, state, rng=Random(7070)) - yield from run_fork_test(phases[EIP4844], state) diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py index cd103337f5..0d31adb431 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/constants.py +++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py @@ -14,25 +14,25 @@ SHARDING = SpecForkName('sharding') CUSTODY_GAME = SpecForkName('custody_game') DAS = SpecForkName('das') -EIP4844 = SpecForkName('eip4844') +DENEB = SpecForkName('deneb') # The forks that pytest can run with. ALL_PHASES = ( # Formal forks PHASE0, ALTAIR, BELLATRIX, CAPELLA, # Experimental patches - EIP4844, + DENEB, ) # The forks that output to the test vectors. -TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844) +TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB) -# TODO: no EIP4844 fork tests now. Should add when we figure out the content of Capella. +# TODO: no DENEB fork tests now. Should add when we figure out the content of Capella. ALL_FORK_UPGRADES = { # pre_fork_name: post_fork_name PHASE0: ALTAIR, ALTAIR: BELLATRIX, BELLATRIX: CAPELLA, - CAPELLA: EIP4844, + CAPELLA: DENEB, } ALL_PRE_POST_FORKS = ALL_FORK_UPGRADES.items() AFTER_BELLATRIX_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key != PHASE0} diff --git a/tests/core/pyspec/eth2spec/test/helpers/eip4844/__init__.py b/tests/core/pyspec/eth2spec/test/helpers/deneb/__init__.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/helpers/eip4844/__init__.py rename to tests/core/pyspec/eth2spec/test/helpers/deneb/__init__.py diff --git a/tests/core/pyspec/eth2spec/test/helpers/eip4844/fork.py b/tests/core/pyspec/eth2spec/test/helpers/deneb/fork.py similarity index 90% rename from tests/core/pyspec/eth2spec/test/helpers/eip4844/fork.py rename to tests/core/pyspec/eth2spec/test/helpers/deneb/fork.py index ed4ae057d5..7fe0535c10 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/eip4844/fork.py +++ b/tests/core/pyspec/eth2spec/test/helpers/deneb/fork.py @@ -1,17 +1,17 @@ from eth2spec.test.helpers.constants import ( - EIP4844, + DENEB, ) -EIP4844_FORK_TEST_META_TAGS = { - 'fork': EIP4844, +DENEB_FORK_TEST_META_TAGS = { + 'fork': DENEB, } def run_fork_test(post_spec, pre_state): yield 'pre', pre_state - post_state = post_spec.upgrade_to_eip4844(pre_state) + post_state = post_spec.upgrade_to_deneb(pre_state) # Stable fields stable_fields = [ @@ -57,7 +57,7 @@ def run_fork_test(post_spec, pre_state): assert getattr(pre_validator, field) == getattr(post_validator, field) assert pre_state.fork.current_version == post_state.fork.previous_version - assert post_state.fork.current_version == post_spec.config.EIP4844_FORK_VERSION + assert post_state.fork.current_version == post_spec.config.DENEB_FORK_VERSION assert post_state.fork.epoch == post_spec.get_current_epoch(post_state) yield 'post', post_state diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py index 5e0c160b31..c0a70aca1d 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py @@ -4,7 +4,7 @@ from rlp.sedes import big_endian_int, Binary, List from eth2spec.debug.random_value import get_random_bytes_list -from eth2spec.test.helpers.forks import is_post_capella, is_post_eip4844 +from eth2spec.test.helpers.forks import is_post_capella, is_post_deneb def get_execution_payload_header(spec, execution_payload): @@ -26,7 +26,7 @@ def get_execution_payload_header(spec, execution_payload): ) if is_post_capella(spec): payload_header.withdrawals_root = spec.hash_tree_root(execution_payload.withdrawals) - if is_post_eip4844(spec): + if is_post_deneb(spec): payload_header.excess_data_gas = execution_payload.excess_data_gas return payload_header @@ -89,7 +89,7 @@ def compute_el_header_block_hash(spec, if is_post_capella(spec): # withdrawals_root execution_payload_header_rlp.append((Binary(32, 32), withdrawals_trie_root)) - if is_post_eip4844(spec): + if is_post_deneb(spec): # excess_data_gas execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas)) diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py index ca961bde42..96d0d20dcd 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py @@ -14,7 +14,7 @@ ALTAIR, BELLATRIX, CAPELLA, - EIP4844, + DENEB, ) from eth2spec.test.helpers.deposits import ( prepare_state_and_deposit, @@ -153,8 +153,8 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict= state = post_spec.upgrade_to_bellatrix(state) elif post_spec.fork == CAPELLA: state = post_spec.upgrade_to_capella(state) - elif post_spec.fork == EIP4844: - state = post_spec.upgrade_to_eip4844(state) + elif post_spec.fork == DENEB: + state = post_spec.upgrade_to_deneb(state) assert state.fork.epoch == fork_epoch @@ -167,9 +167,9 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict= elif post_spec.fork == CAPELLA: assert state.fork.previous_version == post_spec.config.BELLATRIX_FORK_VERSION assert state.fork.current_version == post_spec.config.CAPELLA_FORK_VERSION - elif post_spec.fork == EIP4844: + elif post_spec.fork == DENEB: assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION - assert state.fork.current_version == post_spec.config.EIP4844_FORK_VERSION + assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION if with_block: return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict) diff --git a/tests/core/pyspec/eth2spec/test/helpers/forks.py b/tests/core/pyspec/eth2spec/test/helpers/forks.py index 82ff12ff1d..be3103e67f 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/forks.py +++ b/tests/core/pyspec/eth2spec/test/helpers/forks.py @@ -1,11 +1,11 @@ from .constants import ( - PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844, + PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, ) def is_post_fork(a, b): - if a == EIP4844: - return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844] + if a == DENEB: + return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB] if a == CAPELLA: return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA] if a == BELLATRIX: @@ -29,5 +29,5 @@ def is_post_capella(spec): return is_post_fork(spec.fork, CAPELLA) -def is_post_eip4844(spec): - return is_post_fork(spec.fork, EIP4844) +def is_post_deneb(spec): + return is_post_fork(spec.fork, DENEB) diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py index de2dd2647f..0610f11ad8 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py +++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py @@ -1,5 +1,5 @@ from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, EIP4844, + ALTAIR, BELLATRIX, CAPELLA, DENEB, ) from eth2spec.test.helpers.execution_payload import ( compute_el_header_block_hash, @@ -77,9 +77,9 @@ def create_genesis_state(spec, validator_balances, activation_threshold): elif spec.fork == CAPELLA: previous_version = spec.config.BELLATRIX_FORK_VERSION current_version = spec.config.CAPELLA_FORK_VERSION - elif spec.fork == EIP4844: + elif spec.fork == DENEB: previous_version = spec.config.CAPELLA_FORK_VERSION - current_version = spec.config.EIP4844_FORK_VERSION + current_version = spec.config.DENEB_FORK_VERSION state = spec.BeaconState( genesis_time=0, diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py index 4d2ec124db..35ddbc330a 100644 --- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py +++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py @@ -88,7 +88,7 @@ def randomize_state_capella(spec, state, stats, exit_fraction=0.1, slash_fractio return scenario_state -def randomize_state_eip4844(spec, state, stats, exit_fraction=0.1, slash_fraction=0.1): +def randomize_state_deneb(spec, state, stats, exit_fraction=0.1, slash_fraction=0.1): scenario_state = randomize_state_capella(spec, state, stats, @@ -232,7 +232,7 @@ def random_block_capella(spec, state, signed_blocks, scenario_state, rng=Random( return block -def random_block_eip4844(spec, state, signed_blocks, scenario_state, rng=Random(3456)): +def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(3456)): block = random_block_capella(spec, state, signed_blocks, scenario_state) # TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=1) diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py index dda4345a8e..a485f646aa 100644 --- a/tests/generators/epoch_processing/main.py +++ b/tests/generators/epoch_processing/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB if __name__ == "__main__": @@ -32,7 +32,7 @@ ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - eip4844_mods = capella_mods + deneb_mods = capella_mods # TODO Custody Game testgen is disabled for now # custody_game_mods = {**{key: 'eth2spec.test.custody_game.epoch_processing.test_process_' + key for key in [ @@ -46,7 +46,7 @@ ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="epoch_processing", all_mods=all_mods) diff --git a/tests/generators/finality/main.py b/tests/generators/finality/main.py index de5af9b112..a25f3b8e7a 100644 --- a/tests/generators/finality/main.py +++ b/tests/generators/finality/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB if __name__ == "__main__": @@ -7,14 +7,14 @@ altair_mods = phase_0_mods # No additional Altair specific finality tests bellatrix_mods = altair_mods # No additional Bellatrix specific finality tests capella_mods = bellatrix_mods # No additional Capella specific finality tests - eip4844_mods = capella_mods # No additional EIP4844 specific finality tests + deneb_mods = capella_mods # No additional Deneb specific finality tests all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="finality", all_mods=all_mods) diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 40e19a8ac9..c106810f8e 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB if __name__ == "__main__": @@ -18,14 +18,14 @@ ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) capella_mods = bellatrix_mods # No additional Capella specific fork choice tests - eip4844_mods = capella_mods # No additional Capella specific fork choice tests + deneb_mods = capella_mods # No additional Capella specific fork choice tests all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="fork_choice", all_mods=all_mods) diff --git a/tests/generators/forks/main.py b/tests/generators/forks/main.py index 42f3f3a1f2..7d68a31e7a 100644 --- a/tests/generators/forks/main.py +++ b/tests/generators/forks/main.py @@ -1,14 +1,14 @@ from typing import Iterable from eth2spec.test.helpers.constants import ( - PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844, + PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, MINIMAL, MAINNET, ) from eth2spec.test.helpers.typing import SpecForkName, PresetBaseName from eth2spec.test.altair.fork import test_altair_fork_basic, test_altair_fork_random from eth2spec.test.bellatrix.fork import test_bellatrix_fork_basic, test_bellatrix_fork_random from eth2spec.test.capella.fork import test_capella_fork_basic, test_capella_fork_random -from eth2spec.test.eip4844.fork import test_eip4844_fork_basic, test_eip4844_fork_random +from eth2spec.test.deneb.fork import test_deneb_fork_basic, test_deneb_fork_random from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing from eth2spec.gen_helpers.gen_from_tests.gen import generate_from_tests @@ -40,8 +40,8 @@ def _get_fork_tests_providers(): yield create_provider(test_bellatrix_fork_random, preset, ALTAIR, BELLATRIX) yield create_provider(test_capella_fork_basic, preset, BELLATRIX, CAPELLA) yield create_provider(test_capella_fork_random, preset, BELLATRIX, CAPELLA) - yield create_provider(test_eip4844_fork_basic, preset, CAPELLA, EIP4844) - yield create_provider(test_eip4844_fork_random, preset, CAPELLA, EIP4844) + yield create_provider(test_deneb_fork_basic, preset, CAPELLA, DENEB) + yield create_provider(test_deneb_fork_random, preset, CAPELLA, DENEB) if __name__ == "__main__": diff --git a/tests/generators/genesis/main.py b/tests/generators/genesis/main.py index a5c4eba9df..e95afcde19 100644 --- a/tests/generators/genesis/main.py +++ b/tests/generators/genesis/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB if __name__ == "__main__": @@ -16,13 +16,13 @@ ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) capella_mods = bellatrix_mods # No additional Capella specific genesis tests - eip4844_mods = capella_mods # No additional EIP4844 specific genesis tests + deneb_mods = capella_mods # No additional Deneb specific genesis tests all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="genesis", all_mods=all_mods) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 54c09fae64..cfe34aee4b 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -1,4 +1,4 @@ -from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB from eth2spec.gen_helpers.gen_from_tests.gen import combine_mods, run_state_test_generators @@ -14,13 +14,13 @@ 'single_merkle_proof', ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - eip4844_mods = capella_mods + deneb_mods = capella_mods all_mods = { ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="light_client", all_mods=all_mods) diff --git a/tests/generators/operations/main.py b/tests/generators/operations/main.py index d370a1b850..ed4c6c26c8 100644 --- a/tests/generators/operations/main.py +++ b/tests/generators/operations/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB if __name__ == "__main__": @@ -36,7 +36,7 @@ ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - eip4844_mods = capella_mods + deneb_mods = capella_mods # TODO Custody Game testgen is disabled for now # _new_custody_game_mods = {key: 'eth2spec.test.custody_game.block_processing.test_process_' + key for key in [ @@ -53,7 +53,7 @@ ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="operations", all_mods=all_mods) diff --git a/tests/generators/random/Makefile b/tests/generators/random/Makefile index f57221ab45..bb557204a2 100644 --- a/tests/generators/random/Makefile +++ b/tests/generators/random/Makefile @@ -6,9 +6,9 @@ all: rm -f ../../core/pyspec/eth2spec/test/altair/random/test_random.py rm -f ../../core/pyspec/eth2spec/test/bellatrix/random/test_random.py rm -f ../../core/pyspec/eth2spec/test/capella/random/test_random.py - rm -f ../../core/pyspec/eth2spec/test/eip4844/random/test_random.py + rm -f ../../core/pyspec/eth2spec/test/deneb/random/test_random.py python3 generate.py phase0 > ../../core/pyspec/eth2spec/test/phase0/random/test_random.py python3 generate.py altair > ../../core/pyspec/eth2spec/test/altair/random/test_random.py python3 generate.py bellatrix > ../../core/pyspec/eth2spec/test/bellatrix/random/test_random.py python3 generate.py capella > ../../core/pyspec/eth2spec/test/capella/random/test_random.py - python3 generate.py eip4844 > ../../core/pyspec/eth2spec/test/eip4844/random/test_random.py + python3 generate.py deneb > ../../core/pyspec/eth2spec/test/deneb/random/test_random.py diff --git a/tests/generators/random/generate.py b/tests/generators/random/generate.py index 129d670fd3..3a1eb9c671 100644 --- a/tests/generators/random/generate.py +++ b/tests/generators/random/generate.py @@ -21,12 +21,12 @@ randomize_state_altair, randomize_state_bellatrix, randomize_state_capella, - randomize_state_eip4844, + randomize_state_deneb, random_block, random_block_altair_with_cycling_sync_committee_participation, random_block_bellatrix, random_block_capella, - random_block_eip4844, + random_block_deneb, last_slot_in_epoch, random_slot_in_epoch, penultimate_slot_in_epoch, @@ -36,7 +36,7 @@ transition_to_leaking, transition_without_leak, ) -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB # Ensure this many blocks are present in *each* randomized scenario @@ -274,12 +274,12 @@ def run_generate_tests_to_std_out(phase, state_randomizer, block_randomizer): state_randomizer=randomize_state_capella, block_randomizer=random_block_capella, ) - if EIP4844 in sys.argv: + if DENEB in sys.argv: did_generate = True run_generate_tests_to_std_out( - EIP4844, - state_randomizer=randomize_state_eip4844, - block_randomizer=random_block_eip4844, + DENEB, + state_randomizer=randomize_state_deneb, + block_randomizer=random_block_deneb, ) if not did_generate: warnings.warn("no phase given for test generation") diff --git a/tests/generators/random/main.py b/tests/generators/random/main.py index e36678771f..c5b991e4a1 100644 --- a/tests/generators/random/main.py +++ b/tests/generators/random/main.py @@ -1,4 +1,4 @@ -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators @@ -15,7 +15,7 @@ capella_mods = {key: 'eth2spec.test.capella.random.test_' + key for key in [ 'random', ]} - eip4844_mods = {key: 'eth2spec.test.eip4844.random.test_' + key for key in [ + deneb_mods = {key: 'eth2spec.test.deneb.random.test_' + key for key in [ 'random', ]} @@ -24,7 +24,7 @@ ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="random", all_mods=all_mods) diff --git a/tests/generators/rewards/main.py b/tests/generators/rewards/main.py index 8958074bc2..e6244d1720 100644 --- a/tests/generators/rewards/main.py +++ b/tests/generators/rewards/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB if __name__ == "__main__": @@ -16,14 +16,14 @@ # Transaction fees are part of the execution-layer. bellatrix_mods = altair_mods capella_mods = bellatrix_mods - eip4844_mods = capella_mods + deneb_mods = capella_mods all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="rewards", all_mods=all_mods) diff --git a/tests/generators/sanity/main.py b/tests/generators/sanity/main.py index 9dd6d7ac0e..8a6c7b39cc 100644 --- a/tests/generators/sanity/main.py +++ b/tests/generators/sanity/main.py @@ -1,4 +1,4 @@ -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods @@ -23,17 +23,17 @@ ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) - _new_eip4844_mods = {key: 'eth2spec.test.eip4844.sanity.test_' + key for key in [ + _new_deneb_mods = {key: 'eth2spec.test.deneb.sanity.test_' + key for key in [ 'blocks', ]} - eip4844_mods = combine_mods(_new_eip4844_mods, capella_mods) + deneb_mods = combine_mods(_new_deneb_mods, capella_mods) all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="sanity", all_mods=all_mods) diff --git a/tests/generators/sync/main.py b/tests/generators/sync/main.py index 8fb3950535..11f05a741f 100644 --- a/tests/generators/sync/main.py +++ b/tests/generators/sync/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators -from eth2spec.test.helpers.constants import BELLATRIX, CAPELLA, EIP4844 +from eth2spec.test.helpers.constants import BELLATRIX, CAPELLA, DENEB if __name__ == "__main__": @@ -7,12 +7,12 @@ 'optimistic', ]} capella_mods = bellatrix_mods - eip4844_mods = capella_mods + deneb_mods = capella_mods all_mods = { BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, - EIP4844: eip4844_mods, + DENEB: deneb_mods, } run_state_test_generators(runner_name="sync", all_mods=all_mods) From 470c1b14b35c64151114bca07a9a90154f77fe0e Mon Sep 17 00:00:00 2001 From: Paul Harris Date: Wed, 8 Feb 2023 09:22:28 +1000 Subject: [PATCH 025/210] fix references to eip4844 --- README.md | 2 +- specs/deneb/beacon-chain.md | 22 +++++++++---------- specs/deneb/fork-choice.md | 6 ++--- specs/deneb/fork.md | 10 ++++----- specs/deneb/p2p-interface.md | 10 ++++----- specs/deneb/polynomial-commitments.md | 6 ++--- specs/deneb/validator.md | 6 ++--- .../pyspec/eth2spec/test/helpers/sharding.py | 2 +- tests/generators/transition/main.py | 6 ++--- 9 files changed, 35 insertions(+), 35 deletions(-) diff --git a/README.md b/README.md index 466c151937..da893a53d2 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ Features are researched and developed in parallel, and then consolidated into se | Code Name or Topic | Specs | Notes | | - | - | - | | Capella (tentative) |
  • Core
    • [Beacon chain changes](specs/capella/beacon-chain.md)
    • [Capella fork](specs/capella/fork.md)
  • Additions
    • [Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))
    • [Validator additions](specs/capella/validator.md)
    • [P2P networking](specs/capella/p2p-interface.md)
| -| Deneb (tentative) |
  • Core
    • [Beacon Chain changes](specs/deneb/beacon-chain.md)
    • [EIP-4844 fork](specs/deneb/fork.md)
    • [Polynomial commitments](specs/deneb/polynomial-commitments.md)
    • [Fork choice changes](specs/deneb/fork-choice.md)
  • Additions
    • [Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/deneb/validator.md)
    • [P2P networking](specs/deneb/p2p-interface.md)
| +| Deneb (tentative) |
  • Core
    • [Beacon Chain changes](specs/deneb/beacon-chain.md)
    • [Deneb fork](specs/deneb/fork.md)
    • [Polynomial commitments](specs/deneb/polynomial-commitments.md)
    • [Fork choice changes](specs/deneb/fork-choice.md)
  • Additions
    • [Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/deneb/validator.md)
    • [P2P networking](specs/deneb/p2p-interface.md)
| | Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/sharding/p2p-interface.md)
| | Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/custody_game/validator.md)
| Dependent on sharding | | Data Availability Sampling (outdated) |
  • Core
    • [Core types and functions](specs/das/das-core.md)
    • [Fork choice changes](specs/das/fork-choice.md)
  • Additions
    • [P2P Networking](specs/das/p2p-interface.md)
    • [Sampling process](specs/das/sampling.md)
|
  • Dependent on sharding
  • [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
| diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index 87ebf7a9e9..e82fdfdcb9 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -1,4 +1,4 @@ -# EIP-4844 -- The Beacon Chain +# Deneb -- The Beacon Chain **Notice**: This document is a work-in-progress for researchers and implementers. @@ -37,7 +37,7 @@ ## Introduction -This upgrade adds blobs to the beacon chain as part of EIP-4844. This is an extension of the Capella upgrade. +This upgrade adds blobs to the beacon chain as part of Deneb. This is an extension of the Capella upgrade. ## Custom types @@ -86,9 +86,9 @@ class BeaconBlockBody(Container): voluntary_exits: List[SignedVoluntaryExit, MAX_VOLUNTARY_EXITS] sync_aggregate: SyncAggregate # Execution - execution_payload: ExecutionPayload # [Modified in EIP-4844] + execution_payload: ExecutionPayload # [Modified in Deneb] bls_to_execution_changes: List[SignedBLSToExecutionChange, MAX_BLS_TO_EXECUTION_CHANGES] - blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in EIP-4844] + blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] # [New in Deneb] ``` #### `ExecutionPayload` @@ -108,7 +108,7 @@ class ExecutionPayload(Container): timestamp: uint64 extra_data: ByteList[MAX_EXTRA_DATA_BYTES] base_fee_per_gas: uint256 - excess_data_gas: uint256 # [New in EIP-4844] + excess_data_gas: uint256 # [New in Deneb] # Extra payload fields block_hash: Hash32 # Hash of execution block transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] @@ -132,7 +132,7 @@ class ExecutionPayloadHeader(Container): timestamp: uint64 extra_data: ByteList[MAX_EXTRA_DATA_BYTES] base_fee_per_gas: uint256 - excess_data_gas: uint256 # [New in EIP-4844] + excess_data_gas: uint256 # [New in Deneb] # Extra payload fields block_hash: Hash32 # Hash of execution block transactions_root: Root @@ -152,7 +152,7 @@ def kzg_commitment_to_versioned_hash(kzg_commitment: KZGCommitment) -> Versioned #### `tx_peek_blob_versioned_hashes` -This function retrieves the hashes from the `SignedBlobTransaction` as defined in EIP-4844, using SSZ offsets. +This function retrieves the hashes from the `SignedBlobTransaction` as defined in Deneb, using SSZ offsets. Offsets are little-endian `uint32` values, as defined in the [SSZ specification](../../ssz/simple-serialize.md). See [the full details of `blob_versioned_hashes` offset calculation](https://gist.github.com/protolambda/23bd106b66f6d4bb854ce46044aa3ca3). @@ -192,12 +192,12 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) if is_execution_enabled(state, block.body): process_withdrawals(state, block.body.execution_payload) - process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-4844] + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in Deneb] process_randao(state, block.body) process_eth1_data(state, block.body) process_operations(state, block.body) process_sync_aggregate(state, block.body.sync_aggregate) - process_blob_kzg_commitments(state, block.body) # [New in EIP-4844] + process_blob_kzg_commitments(state, block.body) # [New in Deneb] ``` #### Execution payload @@ -230,7 +230,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe timestamp=payload.timestamp, extra_data=payload.extra_data, base_fee_per_gas=payload.base_fee_per_gas, - excess_data_gas=payload.excess_data_gas, # [New in EIP-4844] + excess_data_gas=payload.excess_data_gas, # [New in Deneb] block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), @@ -247,7 +247,7 @@ def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> N ## Testing -*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-4844 testing only. +*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure Deneb testing only. The `BeaconState` initialization is unchanged, except for the use of the updated `deneb.BeaconBlockBody` type when initializing the first body-root. diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 9629879077..d245034cf7 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -1,4 +1,4 @@ -# EIP-4844 -- Fork Choice +# Deneb -- Fork Choice ## Table of contents @@ -19,7 +19,7 @@ ## Introduction -This is the modification of the fork choice accompanying the EIP-4844 upgrade. +This is the modification of the fork choice accompanying the Deneb upgrade. ## Containers @@ -100,7 +100,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check block is a descendant of the finalized block at the checkpoint finalized slot assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root - # [New in EIP-4844] + # [New in Deneb] # Check if blob data is available # If not, this block MAY be queued and subsequently considered when blob data becomes available assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments) diff --git a/specs/deneb/fork.md b/specs/deneb/fork.md index 864e28888d..1ace26c7f5 100644 --- a/specs/deneb/fork.md +++ b/specs/deneb/fork.md @@ -1,4 +1,4 @@ -# EIP-4844 -- Fork Logic +# Deneb -- Fork Logic **Notice**: This document is a work-in-progress for researchers and implementers. @@ -12,7 +12,7 @@ - [Helper functions](#helper-functions) - [Misc](#misc) - [Modified `compute_fork_version`](#modified-compute_fork_version) -- [Fork to EIP-4844](#fork-to-eip-4844) +- [Fork to Deneb](#fork-to-deneb) - [Fork trigger](#fork-trigger) - [Upgrading the state](#upgrading-the-state) @@ -20,7 +20,7 @@ ## Introduction -This document describes the process of EIP-4844 upgrade. +This document describes the process of Deneb upgrade. ## Configuration @@ -53,7 +53,7 @@ def compute_fork_version(epoch: Epoch) -> Version: return GENESIS_FORK_VERSION ``` -## Fork to EIP-4844 +## Fork to Deneb ### Fork trigger @@ -82,7 +82,7 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState: timestamp=pre.latest_execution_payload_header.timestamp, extra_data=pre.latest_execution_payload_header.extra_data, base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, - excess_data_gas=uint256(0), # [New in EIP-4844] + excess_data_gas=uint256(0), # [New in Deneb] block_hash=pre.latest_execution_payload_header.block_hash, transactions_root=pre.latest_execution_payload_header.transactions_root, withdrawals_root=pre.latest_execution_payload_header.withdrawals_root, diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 852597b099..b1ff8b9226 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -1,6 +1,6 @@ -# EIP-4844 -- Networking +# Deneb -- Networking -This document contains the consensus-layer networking specification for EIP-4844. +This document contains the consensus-layer networking specification for Deneb. The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. @@ -50,7 +50,7 @@ class SignedBeaconBlockAndBlobsSidecar(Container): ## The gossip domain: gossipsub -Some gossip meshes are upgraded in the fork of EIP-4844 to support upgraded types. +Some gossip meshes are upgraded in the fork of Deneb to support upgraded types. ### Topics and messages @@ -69,7 +69,7 @@ The new topics along with the type of the `data` field of a gossipsub message ar #### Global topics -EIP-4844 introduces a new global topic for beacon block and blobs-sidecars. +Deneb introduces a new global topic for beacon block and blobs-sidecars. ##### `beacon_block` @@ -107,7 +107,7 @@ details on how to handle transitioning gossip topics for this upgrade. **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` -The EIP-4844 fork-digest is introduced to the `context` enum to specify EIP-4844 beacon block type. +The Deneb fork-digest is introduced to the `context` enum to specify Deneb beacon block type. Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index ac99313ce9..9590575584 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -1,4 +1,4 @@ -# EIP-4844 -- Polynomial Commitments +# Deneb -- Polynomial Commitments ## Table of contents @@ -48,7 +48,7 @@ ## Introduction -This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the EIP-4844 specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations. +This document specifies basic polynomial operations and KZG polynomial commitment operations as they are needed for the Deneb specification. The implementations are not optimized for performance, but readability. All practical implementations should optimize the polynomial operations. Functions flagged as "Public method" MUST be provided by the underlying KZG library as public functions. All other functions are private functions used internally by the KZG library. @@ -337,7 +337,7 @@ def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial, ### KZG -KZG core functions. These are also defined in EIP-4844 execution specs. +KZG core functions. These are also defined in Deneb execution specs. #### `blob_to_kzg_commitment` diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 413e315fc6..92a5e53337 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -1,4 +1,4 @@ -# EIP-4844 -- Honest Validator +# Deneb -- Honest Validator **Notice**: This document is a work-in-progress for researchers and implementers. @@ -25,14 +25,14 @@ ## Introduction -This document represents the changes to be made in the code of an "honest validator" to implement EIP-4844. +This document represents the changes to be made in the code of an "honest validator" to implement Deneb. ## Prerequisites This document is an extension of the [Capella -- Honest Validator](../capella/validator.md) guide. All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. -All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of EIP-4844](./beacon-chain.md) are requisite for this document and used throughout. +All terminology, constants, functions, and protocol mechanics defined in the updated [Beacon Chain doc of Deneb](./beacon-chain.md) are requisite for this document and used throughout. Please see related Beacon Chain doc before continuing and use them as a reference throughout. ## Helpers diff --git a/tests/core/pyspec/eth2spec/test/helpers/sharding.py b/tests/core/pyspec/eth2spec/test/helpers/sharding.py index 2ea8c94bce..fd60d5d3be 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/sharding.py +++ b/tests/core/pyspec/eth2spec/test/helpers/sharding.py @@ -12,7 +12,7 @@ # -# Containers from EIP-4844 +# Containers from Deneb # MAX_CALLDATA_SIZE = 2**24 MAX_VERSIONED_HASHES_LIST_SIZE = 2**24 diff --git a/tests/generators/transition/main.py b/tests/generators/transition/main.py index a4eba90df4..303f309c26 100644 --- a/tests/generators/transition/main.py +++ b/tests/generators/transition/main.py @@ -16,8 +16,8 @@ test_slashing as test_altair_slashing, test_operations as test_altair_operations, ) -from eth2spec.test.eip4844.transition import ( - test_operations as test_eip4844_operations, +from eth2spec.test.deneb.transition import ( + test_operations as test_deneb_operations, ) @@ -46,7 +46,7 @@ def cases_fn() -> Iterable[gen_typing.TestCase]: test_altair_leaking, test_altair_slashing, test_altair_operations, - test_eip4844_operations, + test_deneb_operations, ) for transition_test_module in all_tests: for pre_fork, post_fork in ALL_PRE_POST_FORKS: From f91b9863cec50895143ec0d60249d0d2cb34eec6 Mon Sep 17 00:00:00 2001 From: Potuz Date: Fri, 10 Feb 2023 11:43:38 -0300 Subject: [PATCH 026/210] Simplify commitee weight computation --- specs/phase0/fork-choice.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index e535184af7..f2ccc24b9d 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -194,10 +194,7 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei: proposer_score = Gwei(0) # Boost is applied if ``root`` is an ancestor of ``proposer_boost_root`` if get_ancestor(store, store.proposer_boost_root, store.blocks[root].slot) == root: - num_validators = len(get_active_validator_indices(state, get_current_epoch(state))) - avg_balance = get_total_active_balance(state) // num_validators - committee_size = num_validators // SLOTS_PER_EPOCH - committee_weight = committee_size * avg_balance + committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100 return attestation_score + proposer_score From b76ea49feceb4e6c00c068dd769fdc987a0ed1a2 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Fri, 27 Jan 2023 14:23:38 +0000 Subject: [PATCH 027/210] Add KZG multi verify function --- specs/deneb/polynomial-commitments.md | 25 +++++++++++++++++++++++++ 1 file changed, 25 insertions(+) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 9590575584..a7df3c84d3 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -527,3 +527,28 @@ def verify_blob_kzg_proof_multi(blobs: Sequence[Blob], return verify_kzg_proof_multi(commitments, evaluation_challenges, ys, proofs) ``` + +#### `verify_aggregate_kzg_proof_multi` + +```python +def verify_aggregate_kzg_proof_multi(list_blobs: Sequence[Sequence[Blob]], + list_commitments_bytes: Sequence[Sequence[Bytes48]], + list_aggregated_proof_bytes: Sequence[Bytes48]) -> bool: + """ + Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments. + + Public method. + """ + + aggregated_poly_commitments, evaluation_challenges, ys = [], [], [] + for blobs, commitments_bytes in zip(list_blobs, list_commitments_bytes): + aggregated_poly_commitment, evaluation_challenge, y = \ + verify_aggregate_kzg_proof_aggregation(blobs, commitments_bytes) + aggregated_poly_commitments.append(aggregated_poly_commitment) + evaluation_challenges.append(evaluation_challenge) + ys.append(y) + + list_aggregated_proof = [bytes_to_kzg_proof(proof) for proof in list_aggregated_proof_bytes] + + return verify_kzg_proof_multi(aggregated_poly_commitments, evaluation_challenges, ys, list_aggregated_proof) +``` From 7f1748b3c876db2ed818a036a16ba117573eafea Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sun, 29 Jan 2023 13:05:02 +0000 Subject: [PATCH 028/210] Change blob verification fiat-shamir to single blob --- specs/deneb/polynomial-commitments.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index a7df3c84d3..40fb83ad49 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -540,15 +540,15 @@ def verify_aggregate_kzg_proof_multi(list_blobs: Sequence[Sequence[Blob]], Public method. """ - aggregated_poly_commitments, evaluation_challenges, ys = [], [], [] - for blobs, commitments_bytes in zip(list_blobs, list_commitments_bytes): - aggregated_poly_commitment, evaluation_challenge, y = \ - verify_aggregate_kzg_proof_aggregation(blobs, commitments_bytes) - aggregated_poly_commitments.append(aggregated_poly_commitment) + commitments, evaluation_challenges, ys, proofs = [], [], [], [] + for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes): + commitment = bytes_to_kzg_commitment(commitment_bytes) + commitments.append(commitment) + evaluation_challenge = compute_challenge(blob, commitment) evaluation_challenges.append(evaluation_challenge) - ys.append(y) - - list_aggregated_proof = [bytes_to_kzg_proof(proof) for proof in list_aggregated_proof_bytes] + polynomial = blob_to_polynomial(blob) + ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)) + proofs.append(bytes_to_kzg_proof(proof_bytes)) - return verify_kzg_proof_multi(aggregated_poly_commitments, evaluation_challenges, ys, list_aggregated_proof) + return verify_kzg_proof_multi(commitments, evaluation_challenges, ys, proofs) ``` From 86d955ab7f286a2595ec7f3966785de38f717ccf Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 13 Feb 2023 14:32:50 +0000 Subject: [PATCH 029/210] Call compute_challenge with polynomial as argument --- specs/deneb/polynomial-commitments.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 40fb83ad49..d4e3b26d68 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -547,6 +547,8 @@ def verify_aggregate_kzg_proof_multi(list_blobs: Sequence[Sequence[Blob]], evaluation_challenge = compute_challenge(blob, commitment) evaluation_challenges.append(evaluation_challenge) polynomial = blob_to_polynomial(blob) + evaluation_challenge = compute_challenge(polynomial, commitment) + evaluation_challenges.append(evaluation_challenge) ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)) proofs.append(bytes_to_kzg_proof(proof_bytes)) From c8719f85246a64383bd8cd0a977f88d7a3a255bb Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 14 Feb 2023 13:32:18 +0100 Subject: [PATCH 030/210] Apply suggestions from code review Co-authored-by: Danny Ryan --- specs/deneb/p2p-interface.md | 12 ++++++------ specs/deneb/validator.md | 4 ++-- 2 files changed, 8 insertions(+), 8 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 41af2f9f14..d77e4f371d 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -47,13 +47,13 @@ The specification of these changes continues in the same format as the network s ```python class BlobSidecar(Container): block_root: Root - index: BlobIndex # Index of blob in block + index: BlobIndex # Index of blob in block slot: Slot - block_parent_root: Root # Proposer shuffling determinant + block_parent_root: Root # Proposer shuffling determinant proposer_index: ValidatorIndex blob: Blob kzg_commitment: KZGCommitment - kzg_proof: KZGProof # Allows for quick verification of kzg_commitment + kzg_proof: KZGProof # Allows for quick verification of kzg_commitment ``` ### `SignedBlobSidecar` @@ -101,11 +101,11 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`. - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` -- _[IGNORE]_ The blob's block's parent defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). +- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.slot, sidecar.proposer_index, sidecar.index)`. -- Clients MUST discard blocks where multiple sidecars for the same proposer and index have been observed. -- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). +- _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. ### Transitioning the gossip @@ -185,7 +185,7 @@ Response Content: Requests sidecars by block root and index. The response is a list of `BlobSidecar` whose length is less than or equal to the number of requests. -It may be less in the case that the responding peer is missing blocks and sidecars. +It may be less in the case that the responding peer is missing blocks or sidecars. The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index bd05c31d2d..a415fbf438 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -96,7 +96,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo slot=block.slot, block_parent_root=block.parent_root, blob=blob, - kzg_commitment=block.body.blob_kzg_commitments[idx], + kzg_commitment=block.body.blob_kzg_commitments[index], kzg_proof=compute_kzg_proof(blob), ) for index, blob in enumerate(blobs) @@ -104,7 +104,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo ``` -Then `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and to the global `blob_sidecar_{index}` topics according to its index. +Then `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index. `signature` is obtained from: From e6b8324e25f4e2151346629cd4535b6fde3ca083 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 14 Feb 2023 13:39:59 +0100 Subject: [PATCH 031/210] sidecar domain --- specs/deneb/beacon-chain.md | 6 ++++++ specs/deneb/validator.md | 2 +- 2 files changed, 7 insertions(+), 1 deletion(-) diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index 97b426f5d6..c06e44f39b 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -48,6 +48,12 @@ This upgrade adds blobs to the beacon chain as part of Deneb. This is an extensi ## Constants +### Domain types + +| Name | Value | +| - | - | +| `DOMAIN_BLOB_SIDECAR` | `DomainType('0x0B000000')` | + ### Blob | Name | Value | diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index a415fbf438..1d4a7287e9 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -112,7 +112,7 @@ Then `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` def get_blob_sidecar_signature(state: BeaconState, sidecar: BlobSidecar, privkey: int) -> BLSSignature: - domain = get_domain(state, DOMAIN_BEACON_PROPOSER, compute_epoch_at_slot(sidecar.slot)) + domain = get_domain(state, DOMAIN_BLOB_SIDECAR, compute_epoch_at_slot(sidecar.slot)) signing_root = compute_signing_root(sidecar, domain) return bls.Sign(privkey, signing_root) ``` From fc4e1a9acfc6565d42c6756b54b2ec42ec3171a3 Mon Sep 17 00:00:00 2001 From: George Kadianakis Date: Tue, 14 Feb 2023 14:50:44 +0200 Subject: [PATCH 032/210] EIP4844: compute_kzg_proof() can now create proofs within the domain (#3243) This will be used by optimistic rollups to create proofs about past data --- specs/deneb/polynomial-commitments.md | 47 +++++++++++++++++-- .../test_polynomial_commitments.py | 20 ++++++++ 2 files changed, 63 insertions(+), 4 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index facf1dbc22..dde75bdcd8 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -38,6 +38,7 @@ - [`verify_kzg_proof`](#verify_kzg_proof) - [`verify_kzg_proof_impl`](#verify_kzg_proof_impl) - [`compute_kzg_proof`](#compute_kzg_proof) + - [`compute_quotient_eval_within_domain`](#compute_quotient_eval_within_domain) - [`compute_kzg_proof_impl`](#compute_kzg_proof_impl) - [`compute_aggregated_poly_and_commitment`](#compute_aggregated_poly_and_commitment) - [`compute_aggregate_kzg_proof`](#compute_aggregate_kzg_proof) @@ -427,6 +428,34 @@ def compute_kzg_proof(blob: Blob, z: Bytes32) -> KZGProof: return compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z)) ``` +#### `compute_quotient_eval_within_domain` + +```python +def compute_quotient_eval_within_domain(z: BLSFieldElement, + polynomial: Polynomial, + y: BLSFieldElement + ) -> BLSFieldElement: + """ + Given `y == p(z)` for a polynomial `p(x)`, compute `q(z)`: the KZG quotient polynomial evaluated at `z` for the + special case where `z` is in `ROOTS_OF_UNITY`. + + For more details, read https://dankradfeist.de/ethereum/2021/06/18/pcs-multiproofs.html section "Dividing + when one of the points is zero". The code below computes q(x_m) for the roots of unity special case. + """ + roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY) + result = 0 + for i, omega_i in enumerate(roots_of_unity_brp): + if omega_i == z: # skip the evaluation point in the sum + continue + + f_i = int(BLS_MODULUS) + int(polynomial[i]) - int(y) % BLS_MODULUS + numerator = f_i * int(omega_i) % BLS_MODULUS + denominator = int(z) * (int(BLS_MODULUS) + int(z) - int(omega_i)) % BLS_MODULUS + result += div(BLSFieldElement(numerator), BLSFieldElement(denominator)) + + return BLSFieldElement(result % BLS_MODULUS) +``` + #### `compute_kzg_proof_impl` ```python @@ -434,16 +463,26 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGPro """ Helper function for compute_kzg_proof() and compute_aggregate_kzg_proof(). """ + roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY) + + # For all x_i, compute p(x_i) - p(z) y = evaluate_polynomial_in_evaluation_form(polynomial, z) polynomial_shifted = [BLSFieldElement((int(p) - int(y)) % BLS_MODULUS) for p in polynomial] - # Make sure we won't divide by zero during division - assert z not in ROOTS_OF_UNITY + # For all x_i, compute (x_i - z) denominator_poly = [BLSFieldElement((int(x) - int(z)) % BLS_MODULUS) for x in bit_reversal_permutation(ROOTS_OF_UNITY)] - # Calculate quotient polynomial by doing point-by-point division - quotient_polynomial = [div(a, b) for a, b in zip(polynomial_shifted, denominator_poly)] + # Compute the quotient polynomial directly in evaluation form + quotient_polynomial = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_BLOB + for i, (a, b) in enumerate(zip(polynomial_shifted, denominator_poly)): + if b == 0: + # The denominator is zero hence `z` is a root of unity: we must handle it as a special case + quotient_polynomial[i] = compute_quotient_eval_within_domain(roots_of_unity_brp[i], polynomial, y) + else: + # Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z). + quotient_polynomial[i] = div(a, b) + return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial)) ``` diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 4d881e3e36..67dce5c5b3 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -87,3 +87,23 @@ def test_barycentric_within_domain(spec, state): # The two evaluations should be agree and p(z) should also be the i-th "coefficient" of the polynomial in # evaluation form assert p_z_coeff == p_z_eval == poly_eval[i] + + +@with_deneb_and_later +@spec_state_test +def test_compute_kzg_proof_within_domain(spec, state): + """ + Create and verify KZG proof that p(z) == y + where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + """ + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + polynomial = spec.blob_to_polynomial(blob) + + roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY) + + for i, z in enumerate(roots_of_unity_brp): + proof = spec.compute_kzg_proof_impl(polynomial, z) + + y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z) + assert spec.verify_kzg_proof_impl(commitment, z, y, proof) From 58207c1c05b494b97bf01176547e2ed8749783f5 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Tue, 14 Feb 2023 14:18:29 +0100 Subject: [PATCH 033/210] Upper limit on indices --- specs/deneb/fork-choice.md | 2 +- specs/deneb/p2p-interface.md | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index ea235c0553..29d59048b3 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -44,7 +44,7 @@ The implementation of `is_data_available` will become more sophisticated during Initially, verification requires every verifying actor to retrieve all matching `BlobSidecar`s, and validate the sidecar with `validate_blob_sidecars`. -The block MUST NOT be considered valid until all valid `BlobSidecar`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `BlobSidecar`s has subsequently been pruned. +The block MUST NOT be considered valid until all valid `BlobSidecar`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `BlobSidecar`s have subsequently been pruned. ```python def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool: diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index d77e4f371d..3cbc000529 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -94,7 +94,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB ##### `blob_sidecar_{index}` -This topic is used to propagate signed blob sidecars, one for each sidecar index. +This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`. The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`: @@ -108,6 +108,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. + ### Transitioning the gossip See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for From db5a168f3b073cf525116bca7433fede0172c422 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 14 Feb 2023 23:39:43 +0800 Subject: [PATCH 034/210] Move experimental features to `specs/_features` folder --- specs/{ => _features}/custody_game/beacon-chain.md | 0 specs/{ => _features}/custody_game/validator.md | 4 ++-- specs/{ => _features}/das/das-core.md | 0 specs/{ => _features}/das/fork-choice.md | 0 specs/{ => _features}/das/p2p-interface.md | 4 ++-- specs/{ => _features}/das/sampling.md | 0 specs/{ => _features}/sharding/beacon-chain.md | 0 specs/{ => _features}/sharding/p2p-interface.md | 2 +- specs/{ => _features}/sharding/polynomial-commitments.md | 0 specs/{ => _features}/sharding/validator.md | 2 +- 10 files changed, 6 insertions(+), 6 deletions(-) rename specs/{ => _features}/custody_game/beacon-chain.md (100%) rename specs/{ => _features}/custody_game/validator.md (96%) rename specs/{ => _features}/das/das-core.md (100%) rename specs/{ => _features}/das/fork-choice.md (100%) rename specs/{ => _features}/das/p2p-interface.md (98%) rename specs/{ => _features}/das/sampling.md (100%) rename specs/{ => _features}/sharding/beacon-chain.md (100%) rename specs/{ => _features}/sharding/p2p-interface.md (97%) rename specs/{ => _features}/sharding/polynomial-commitments.md (100%) rename specs/{ => _features}/sharding/validator.md (99%) diff --git a/specs/custody_game/beacon-chain.md b/specs/_features/custody_game/beacon-chain.md similarity index 100% rename from specs/custody_game/beacon-chain.md rename to specs/_features/custody_game/beacon-chain.md diff --git a/specs/custody_game/validator.md b/specs/_features/custody_game/validator.md similarity index 96% rename from specs/custody_game/validator.md rename to specs/_features/custody_game/validator.md index 05ceb854d7..ed47eb0acc 100644 --- a/specs/custody_game/validator.md +++ b/specs/_features/custody_game/validator.md @@ -36,11 +36,11 @@ docs are requisite for this document and used throughout. Please see the Custody ## Becoming a validator -Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#becoming-a-validator) for details. +Becoming a validator in Custody Game is unchanged from Phase 0. See the [Phase 0 validator guide](../../phase0/validator.md#becoming-a-validator) for details. ## Beacon chain validator assignments -Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../phase0/validator.md#validator-assignments) for details. +Beacon chain validator assignments to beacon committees and beacon block proposal are unchanged from Phase 0. See the [Phase 0 validator guide](../../phase0/validator.md#validator-assignments) for details. ##### Custody slashings diff --git a/specs/das/das-core.md b/specs/_features/das/das-core.md similarity index 100% rename from specs/das/das-core.md rename to specs/_features/das/das-core.md diff --git a/specs/das/fork-choice.md b/specs/_features/das/fork-choice.md similarity index 100% rename from specs/das/fork-choice.md rename to specs/_features/das/fork-choice.md diff --git a/specs/das/p2p-interface.md b/specs/_features/das/p2p-interface.md similarity index 98% rename from specs/das/p2p-interface.md rename to specs/_features/das/p2p-interface.md index a60bd9c85f..b166c9c3e4 100644 --- a/specs/das/p2p-interface.md +++ b/specs/_features/das/p2p-interface.md @@ -143,7 +143,7 @@ If the node does not already have connected peers on the topic it needs to sampl ### Topics and messages -Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are: +Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names and payload types are: | Name | Message Type | |----------------------------------|---------------------------| | `das_sample_{subnet_index}` | `DASSample` | @@ -192,7 +192,7 @@ This is to serve other peers that may have missed it. To pull samples from nodes, in case of network instability when samples are unavailable, a new query method is added to the Req-Resp domain. -This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../phase0/p2p-interface.md). +This builds on top of the protocol identification and encoding spec which was introduced in [the Phase0 network spec](../../phase0/p2p-interface.md). Note that DAS networking uses a different protocol prefix: `/eth2/das/req` diff --git a/specs/das/sampling.md b/specs/_features/das/sampling.md similarity index 100% rename from specs/das/sampling.md rename to specs/_features/das/sampling.md diff --git a/specs/sharding/beacon-chain.md b/specs/_features/sharding/beacon-chain.md similarity index 100% rename from specs/sharding/beacon-chain.md rename to specs/_features/sharding/beacon-chain.md diff --git a/specs/sharding/p2p-interface.md b/specs/_features/sharding/p2p-interface.md similarity index 97% rename from specs/sharding/p2p-interface.md rename to specs/_features/sharding/p2p-interface.md index 3b627a3398..c29146fe9d 100644 --- a/specs/sharding/p2p-interface.md +++ b/specs/_features/sharding/p2p-interface.md @@ -39,7 +39,7 @@ The adjustments and additions for Shards are outlined in this document. ### Topics and messages -Following the same scheme as the [Phase0 gossip topics](../phase0/p2p-interface.md#topics-and-messages), names and payload types are: +Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interface.md#topics-and-messages), names and payload types are: | Name | Message Type | |---------------------------------|--------------------------| diff --git a/specs/sharding/polynomial-commitments.md b/specs/_features/sharding/polynomial-commitments.md similarity index 100% rename from specs/sharding/polynomial-commitments.md rename to specs/_features/sharding/polynomial-commitments.md diff --git a/specs/sharding/validator.md b/specs/_features/sharding/validator.md similarity index 99% rename from specs/sharding/validator.md rename to specs/_features/sharding/validator.md index 38914095f4..466c4df663 100644 --- a/specs/sharding/validator.md +++ b/specs/_features/sharding/validator.md @@ -33,7 +33,7 @@ This document represents the changes to be made in the code of an "honest valida ## Prerequisites -This document is an extension of the [Bellatrix -- Honest Validator](../bellatrix/validator.md) guide. +This document is an extension of the [Bellatrix -- Honest Validator](../../bellatrix/validator.md) guide. All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Sharding](./beacon-chain.md) are requisite for this document and used throughout. From 95720872e6097505be325eb5be2e1e348f5cd390 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 14 Feb 2023 23:50:00 +0800 Subject: [PATCH 035/210] Update README.md --- Makefile | 6 +++--- README.md | 6 +++--- 2 files changed, 6 insertions(+), 6 deletions(-) diff --git a/Makefile b/Makefile index 854f42ce38..d4259b2fe9 100644 --- a/Makefile +++ b/Makefile @@ -27,10 +27,10 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \ $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SPEC_DIR)/altair/**/*.md) \ $(wildcard $(SPEC_DIR)/bellatrix/*.md) \ $(wildcard $(SPEC_DIR)/capella/*.md) $(wildcard $(SPEC_DIR)/capella/**/*.md) \ - $(wildcard $(SPEC_DIR)/custody/*.md) \ - $(wildcard $(SPEC_DIR)/das/*.md) \ - $(wildcard $(SPEC_DIR)/sharding/*.md) \ $(wildcard $(SPEC_DIR)/deneb/*.md) $(wildcard $(SPEC_DIR)/deneb/**/*.md) \ + $(wildcard $(SPEC_DIR)/_features/custody/*.md) \ + $(wildcard $(SPEC_DIR)/_features/das/*.md) \ + $(wildcard $(SPEC_DIR)/_features/sharding/*.md) \ $(wildcard $(SSZ_DIR)/*.md) COV_HTML_OUT=.htmlcov diff --git a/README.md b/README.md index da893a53d2..49e1c3a4d9 100644 --- a/README.md +++ b/README.md @@ -26,9 +26,9 @@ Features are researched and developed in parallel, and then consolidated into se | - | - | - | | Capella (tentative) |
  • Core
    • [Beacon chain changes](specs/capella/beacon-chain.md)
    • [Capella fork](specs/capella/fork.md)
  • Additions
    • [Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))
    • [Validator additions](specs/capella/validator.md)
    • [P2P networking](specs/capella/p2p-interface.md)
| | Deneb (tentative) |
  • Core
    • [Beacon Chain changes](specs/deneb/beacon-chain.md)
    • [Deneb fork](specs/deneb/fork.md)
    • [Polynomial commitments](specs/deneb/polynomial-commitments.md)
    • [Fork choice changes](specs/deneb/fork-choice.md)
  • Additions
    • [Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/deneb/validator.md)
    • [P2P networking](specs/deneb/p2p-interface.md)
| -| Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/sharding/p2p-interface.md)
| -| Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/custody_game/validator.md)
| Dependent on sharding | -| Data Availability Sampling (outdated) |
  • Core
    • [Core types and functions](specs/das/das-core.md)
    • [Fork choice changes](specs/das/fork-choice.md)
  • Additions
    • [P2P Networking](specs/das/p2p-interface.md)
    • [Sampling process](specs/das/sampling.md)
|
  • Dependent on sharding
  • [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
| +| Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/_features/sharding/p2p-interface.md)
| +| Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/_features/custody_game/validator.md)
| Dependent on sharding | +| Data Availability Sampling (outdated) |
  • Core
    • [Core types and functions](specs/_features/das/das-core.md)
    • [Fork choice changes](specs/_features/das/fork-choice.md)
  • Additions
    • [P2P Networking](specs/_features/das/p2p-interface.md)
    • [Sampling process](specs/_features/das/sampling.md)
|
  • Dependent on sharding
  • [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
| ### Accompanying documents can be found in [specs](specs) and include: From c49a2c2855cb19bcbf350540e130f9478633bdb7 Mon Sep 17 00:00:00 2001 From: dankrad Date: Tue, 14 Feb 2023 20:00:58 +0000 Subject: [PATCH 036/210] Update specs/deneb/polynomial-commitments.md Co-authored-by: George Kadianakis --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index d4e3b26d68..b604e7431f 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -406,7 +406,7 @@ def verify_kzg_proof_multi(commitments: Sequence[KZGCommitment], num_commitments = int.to_bytes(len(commitments), 8, ENDIANNESS) data = RANDOM_CHALLENGE_KZG_MULTI_DOMAIN + degree_poly + num_commitments - # Append each polynomial which is composed by field elements + # Append all inputs to the transcript before we hash for commitment, z, y, proof in zip(commitments, zs, ys, proofs): data += commitment \ + int.to_bytes(z, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \ From 855cf062f01f6b66915e007d2bdba673c31c51c7 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Feb 2023 20:07:22 +0000 Subject: [PATCH 037/210] Remove additional function --- specs/deneb/polynomial-commitments.md | 27 --------------------------- 1 file changed, 27 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index b604e7431f..c9d7496fa3 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -527,30 +527,3 @@ def verify_blob_kzg_proof_multi(blobs: Sequence[Blob], return verify_kzg_proof_multi(commitments, evaluation_challenges, ys, proofs) ``` - -#### `verify_aggregate_kzg_proof_multi` - -```python -def verify_aggregate_kzg_proof_multi(list_blobs: Sequence[Sequence[Blob]], - list_commitments_bytes: Sequence[Sequence[Bytes48]], - list_aggregated_proof_bytes: Sequence[Bytes48]) -> bool: - """ - Given a list of blobs and an aggregated KZG proof, verify that they correspond to the provided commitments. - - Public method. - """ - - commitments, evaluation_challenges, ys, proofs = [], [], [], [] - for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes): - commitment = bytes_to_kzg_commitment(commitment_bytes) - commitments.append(commitment) - evaluation_challenge = compute_challenge(blob, commitment) - evaluation_challenges.append(evaluation_challenge) - polynomial = blob_to_polynomial(blob) - evaluation_challenge = compute_challenge(polynomial, commitment) - evaluation_challenges.append(evaluation_challenge) - ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)) - proofs.append(bytes_to_kzg_proof(proof_bytes)) - - return verify_kzg_proof_multi(commitments, evaluation_challenges, ys, proofs) -``` From 3a6fccd389de63c80b45c08cf62b6d867e36597a Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Feb 2023 20:17:25 +0000 Subject: [PATCH 038/210] Remove double hashing --- specs/deneb/polynomial-commitments.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index c9d7496fa3..e4e899d671 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -248,8 +248,7 @@ def compute_challenge(blob: Blob, data += commitment # Transcript has been prepared: time to create the challenges - hashed_data = hash(data) - return hash_to_bls_field(hashed_data + b'\x00') + return hash_to_bls_field(data) ``` #### `bls_modular_inverse` @@ -413,8 +412,7 @@ def verify_kzg_proof_multi(commitments: Sequence[KZGCommitment], + int.to_bytes(y, BYTES_PER_FIELD_ELEMENT, ENDIANNESS) \ + proof - hashed_data = hash(data) - r = hash_to_bls_field(hashed_data + b'\x00') + r = hash_to_bls_field(data) r_powers = compute_powers(r, len(commitments)) # Verify: e(sum r^i proof_i, [s]) == From aafbd45a19d75dc825d94ec513b920e63dae98c5 Mon Sep 17 00:00:00 2001 From: dankrad Date: Tue, 14 Feb 2023 20:59:24 +0000 Subject: [PATCH 039/210] Update specs/deneb/polynomial-commitments.md Co-authored-by: George Kadianakis --- specs/deneb/polynomial-commitments.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index e4e899d671..04f1b97c6f 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -419,9 +419,9 @@ def verify_kzg_proof_multi(commitments: Sequence[KZGCommitment], # e(sum r^i (commitment_i - [y_i]) + sum r^i z_i proof_i, [1]) proof_lincomb = g1_lincomb(proofs, r_powers) proof_z_lincomb = g1_lincomb(proofs, [z * r_power for z, r_power in zip(zs, r_powers)]) - C_minus_ys = [bls.G1_to_bytes48(bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y))) + C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y)) for commitment, y in zip(commitments, ys)] - C_minus_y_as_KZGCommitments = [KZGCommitment(x) for x in C_minus_ys] + C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys] C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers) return bls.pairing_check([ From d8509e42c6c02f97ec738843ba617d292b5f8554 Mon Sep 17 00:00:00 2001 From: dankrad Date: Tue, 14 Feb 2023 20:59:41 +0000 Subject: [PATCH 040/210] Update specs/deneb/polynomial-commitments.md Co-authored-by: George Kadianakis --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 04f1b97c6f..4d990113df 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -493,7 +493,7 @@ def verify_blob_kzg_proof(blob: Blob, polynomial = blob_to_polynomial(blob) evaluation_challenge = compute_challenge(blob, commitment) - # Evaluate polynomial at `evaluation_challenge` (evaluation function checks for div-by-zero) + # Evaluate polynomial at `evaluation_challenge` y = evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge) # Verify proof From c3cb7fa773e10e14695536b6576491636cac58c3 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Feb 2023 21:10:09 +0000 Subject: [PATCH 041/210] Comment on compute_challenge, assert on verify_blob_kzg_proof_multi --- specs/deneb/polynomial-commitments.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 4d990113df..2484e1a2c4 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -229,11 +229,8 @@ def blob_to_polynomial(blob: Blob) -> Polynomial: def compute_challenge(blob: Blob, commitment: KZGCommitment) -> BLSFieldElement: """ - Return the Fiat-Shamir challenges required by the rest of the protocol. + Return the Fiat-Shamir challenge required by the rest of the protocol. The Fiat-Shamir logic works as per the following pseudocode: - - hashed_data = hash(DOMAIN_SEPARATOR, polynomial, commitment) - eval_challenge = hash(hashed_data, 0) """ # Append the number of polynomials and the degree of each polynomial as a domain separator @@ -512,6 +509,8 @@ def verify_blob_kzg_proof_multi(blobs: Sequence[Blob], Public method. """ + + assert len(blobs) == len(commitments_bytes) == len(proofs_bytes) commitments, evaluation_challenges, ys, proofs = [], [], [], [] for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes): From 4086a09d0fdc8e21bc96b68160051cb53c3c24ea Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Feb 2023 21:21:46 +0000 Subject: [PATCH 042/210] multi -> batch --- specs/deneb/polynomial-commitments.md | 18 +++++++++--------- 1 file changed, 9 insertions(+), 9 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 2484e1a2c4..39ae3570cc 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -36,12 +36,12 @@ - [`blob_to_kzg_commitment`](#blob_to_kzg_commitment) - [`verify_kzg_proof`](#verify_kzg_proof) - [`verify_kzg_proof_impl`](#verify_kzg_proof_impl) - - [`verify_kzg_proof_multi`](#verify_kzg_proof_multi) + - [`verify_kzg_proof_batch`](#verify_kzg_proof_batch) - [`compute_kzg_proof`](#compute_kzg_proof) - [`compute_kzg_proof_impl`](#compute_kzg_proof_impl) - [`compute_blob_kzg_proof`](#compute_blob_kzg_proof) - [`verify_blob_kzg_proof`](#verify_blob_kzg_proof) - - [`verify_blob_kzg_proof_multi`](#verify_blob_kzg_proof_multi) + - [`verify_blob_kzg_proof_batch`](#verify_blob_kzg_proof_batch) @@ -83,7 +83,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog | - | - | | `FIELD_ELEMENTS_PER_BLOB` | `uint64(4096)` | | `FIAT_SHAMIR_PROTOCOL_DOMAIN` | `b'FSBLOBVERIFY_V1_'` | -| `RANDOM_CHALLENGE_KZG_MULTI_DOMAIN` | `b'RCKZGMULTI___V1_'` | +| `RANDOM_CHALLENGE_KZG_BATCH_DOMAIN` | `b'RCKZGBATCH___V1_'` | ### Crypto @@ -383,10 +383,10 @@ def verify_kzg_proof_impl(commitment: KZGCommitment, ]) ``` -#### `verify_kzg_proof_multi` +#### `verify_kzg_proof_batch` ```python -def verify_kzg_proof_multi(commitments: Sequence[KZGCommitment], +def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment], zs: Sequence[BLSFieldElement], ys: Sequence[BLSFieldElement], proofs: Sequence[KZGProof]) -> bool: @@ -400,7 +400,7 @@ def verify_kzg_proof_multi(commitments: Sequence[KZGCommitment], # r just has to be random. degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS) num_commitments = int.to_bytes(len(commitments), 8, ENDIANNESS) - data = RANDOM_CHALLENGE_KZG_MULTI_DOMAIN + degree_poly + num_commitments + data = RANDOM_CHALLENGE_KZG_BATCH_DOMAIN + degree_poly + num_commitments # Append all inputs to the transcript before we hash for commitment, z, y, proof in zip(commitments, zs, ys, proofs): @@ -498,10 +498,10 @@ def verify_blob_kzg_proof(blob: Blob, return verify_kzg_proof_impl(commitment, evaluation_challenge, y, proof) ``` -#### `verify_blob_kzg_proof_multi` +#### `verify_blob_kzg_proof_batch` ```python -def verify_blob_kzg_proof_multi(blobs: Sequence[Blob], +def verify_blob_kzg_proof_batch(blobs: Sequence[Blob], commitments_bytes: Sequence[Bytes48], proofs_bytes: Sequence[Bytes48]) -> bool: """ @@ -522,5 +522,5 @@ def verify_blob_kzg_proof_multi(blobs: Sequence[Blob], ys.append(evaluate_polynomial_in_evaluation_form(polynomial, evaluation_challenge)) proofs.append(bytes_to_kzg_proof(proof_bytes)) - return verify_kzg_proof_multi(commitments, evaluation_challenges, ys, proofs) + return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs) ``` From 3a37c3c4978467f8909f66d8899b80b9e1373e72 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 15 Feb 2023 08:00:39 +0100 Subject: [PATCH 043/210] Allow clients to orphan blocks from spammy proposers Proposers that spam the blob topic with multiple blob versions, some of which are invalid, MAY see their block orphaned. --- specs/deneb/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 3cbc000529..6f64e5514e 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -104,7 +104,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.slot, sidecar.proposer_index, sidecar.index)`. - -- Clients MUST discard blocks where multiple sidecars for the same proposer and index have been observed. + -- If full verification of the blob fails at a later processing stage, clients MUST clear the blob from this "seen" cache so as to allow a the valid blob to propagate. Block producers MAY orphan blocks if they have observed multiple blobs signed by the proposer for the same "seen" tuple. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. From da34af97d4b6575131e67c2bf22acb6de4f0951d Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 15 Feb 2023 08:51:57 +0100 Subject: [PATCH 044/210] simplify blob verification, range request * validate blobs using raw types * remove `BlobSidecars` and send flattened list of `BlobSidecar` instances instead --- specs/deneb/fork-choice.md | 35 +++++++++++++++--------------- specs/deneb/p2p-interface.md | 41 ++++++++++++------------------------ 2 files changed, 30 insertions(+), 46 deletions(-) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 29d59048b3..58c281a597 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -27,37 +27,36 @@ This is the modification of the fork choice accompanying the Deneb upgrade. #### `validate_blob_sidecars` ```python -def validate_blob_sidecars(slot: Slot, - beacon_block_root: Root, - expected_kzg_commitments: Sequence[KZGCommitment], - blob_sidecars: Sequence[BlobSidecar]) -> None: - assert slot == blobs_sidecar.beacon_block_slot - assert beacon_block_root == blobs_sidecar.beacon_block_root - assert len(expected_kzg_commitments) == len(blob_sidecars) - # TODO validate commitments individually or aggregate first? - # assert verify_aggregate_kzg_proof(blobs, expected_kzg_commitments, kzg_aggregated_proof) +def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment], + blobs: Sequence[Blob], + proofs: Sequence[KZGProof]) -> None: + assert len(expected_kzg_commitments) == len(blobs) + assert len(blobs) == len(proofs) + + # Clients MAY use `verify_blob_kzg_proof_multi` for efficiency + for commitment, blob, proof in zip(expected_kzg_commitments, blobs, proofs): + assert verify_blob_kzg_proof(commitment, blob, proof) ``` #### `is_data_available` The implementation of `is_data_available` will become more sophisticated during later scaling upgrades. -Initially, verification requires every verifying actor to retrieve all matching `BlobSidecar`s, -and validate the sidecar with `validate_blob_sidecars`. +Initially, verification requires every verifying actor to retrieve all matching `Blob`s and `KZGProof`s, and validate them with `validate_blobs`. -The block MUST NOT be considered valid until all valid `BlobSidecar`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `BlobSidecar`s have subsequently been pruned. +The block MUST NOT be considered valid until all valid `Blob`s have been downloaded. Blocks that have been previously validated as available SHOULD be considered available even if the associated `Blob`s have subsequently been pruned. ```python -def is_data_available(slot: Slot, beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool: - # `retrieve_blobs_sidecar` is implementation and context dependent, raises an exception if not available. +def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool: + # `retrieve_blobs_and_proofs` is implementation and context dependent, raises an exception if not available. It returns all the blobs for the given block root. # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` - sidecars = retrieve_blob_sidecars(slot, beacon_block_root) + blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root) - # For testing, `retrieve_blobs_sidecar` returns "TEST". + # For testing, `retrieve_blobs_and_proofs` returns "TEST". # TODO: Remove it once we have a way to inject `BlobSidecar` into tests. if isinstance(sidecar, str): return True - validate_blob_sidecars(slot, beacon_block_root, blob_kzg_commitments, sidecars) + validate_blobs(expected_kzg_commitments, blobs, proofs) return True ``` @@ -89,7 +88,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # [New in Deneb] # Check if blob data is available # If not, this block MAY be queued and subsequently considered when blob data becomes available - assert is_data_available(block.slot, hash_tree_root(block), block.body.blob_kzg_commitments) + assert is_data_available(hash_tree_root(block), block.body.blob_kzg_commitments) # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 6f64e5514e..63a63feda6 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -218,34 +218,24 @@ Request Content: ``` Response Content: - -```python -class BlobSidecars(Container): - block_root: Root - List[BlobSidecar, MAX_BLOBS_PER_BLOCK] -``` - ``` ( - List[BlobSidecars, MAX_REQUEST_BLOB_SIDECARS] + List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK] ) ``` -Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, -leading up to the current head block as selected by fork choice. +Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, leading up to the current head block as selected by fork choice. -The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer -may not be available beyond the initial distribution via gossip. +The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and -correct w.r.t. the expected KZG commitments through `validate_blobs_sidecar`. +Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs_sidecar`. -`BlobsSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` window. +`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` window. The request MUST be encoded as an SSZ-container. The response MUST consist of zero or more `response_chunk`. -Each _successful_ `response_chunk` MUST contain a single `BlobsSidecar` payload. +Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. Clients MUST keep a record of signed blobs sidecars seen on the epoch range `[max(current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]` @@ -265,26 +255,21 @@ to be fully compliant with `BlobsSidecarsByRange` requests. participating in the networking immediately, other peers MAY disconnect and/or temporarily ban such an un-synced or semi-synced client. -Clients MUST respond with at least the first blobs sidecar that exists in the range, if they have it, -and no more than `MAX_REQUEST_BLOCKS_DENEB` sidecars. +Clients MUST respond with at least the first blobs sidecar that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars. -The following blobs sidecars, where they exist, MUST be sent in consecutive order. +The following blobs sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order. Clients MAY limit the number of blobs sidecars in the response. -An empty `BlobSidecar` is one that does not contain any blobs, but contains non-zero `beacon_block_root`, `beacon_block_slot` and a valid `kzg_aggregated_proof`. -Clients MAY NOT want to consider empty `BlobSidecar`s in rate limiting logic. - -The response MUST contain no more than `count` blobs sidecars. +The response MUST contain no more than `count * MAX_BLOBS_PER_BLOCK` blob sidecars. -Clients MUST respond with blobs sidecars from their view of the current fork choice --- that is, blobs sidecars as included by blocks from the single chain defined by the current head. +Clients MUST respond with blob sidecars from their view of the current fork choice +-- that is, blob sidecars as included by blocks from the single chain defined by the current head. Of note, blocks from slots before the finalization MUST lead to the finalized block reported in the `Status` handshake. -Clients MUST respond with blobs sidecars that are consistent from a single chain within the context of the request. +Clients MUST respond with blob sidecars that are consistent from a single chain within the context of the request. -After the initial blobs sidecar, clients MAY stop in the process of responding -if their fork choice changes the view of the chain in the context of the request. +After the initial blob sidecar, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request. ## Design decision rationale From a5f61fc173cc4c8bf350bc72945f5cf3dc39bae5 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 15 Feb 2023 08:57:23 +0100 Subject: [PATCH 045/210] correct function --- specs/deneb/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 1d4a7287e9..d867098744 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -97,7 +97,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo block_parent_root=block.parent_root, blob=blob, kzg_commitment=block.body.blob_kzg_commitments[index], - kzg_proof=compute_kzg_proof(blob), + kzg_proof=compute_blob_kzg_proof(blob), ) for index, blob in enumerate(blobs) ] From f0dc126602041679ed50f3cc8d6f6efccc0fa0ab Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Wed, 15 Feb 2023 09:10:31 +0100 Subject: [PATCH 046/210] doctoc --- specs/deneb/beacon-chain.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index c06e44f39b..ba0148d47f 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -11,6 +11,7 @@ - [Introduction](#introduction) - [Custom types](#custom-types) - [Constants](#constants) + - [Domain types](#domain-types) - [Blob](#blob) - [Preset](#preset) - [Execution](#execution) From 7637158a2fe8c54d36b1f69dd2932c5de9a2794e Mon Sep 17 00:00:00 2001 From: Ben Edgington Date: Wed, 15 Feb 2023 11:39:33 +0000 Subject: [PATCH 047/210] Change get_latest_attesting_balances() to get_weight() --- specs/phase0/fork-choice.md | 9 ++++----- .../core/pyspec/eth2spec/test/helpers/optimistic_sync.py | 2 +- .../eth2spec/test/phase0/fork_choice/test_on_block.py | 8 ++++---- 3 files changed, 9 insertions(+), 10 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index f2ccc24b9d..3176c1cd5d 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -18,7 +18,7 @@ - [`get_current_slot`](#get_current_slot) - [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start) - [`get_ancestor`](#get_ancestor) - - [`get_latest_attesting_balance`](#get_latest_attesting_balance) + - [`get_weight`](#get_weight) - [`filter_block_tree`](#filter_block_tree) - [`get_filtered_block_tree`](#get_filtered_block_tree) - [`get_head`](#get_head) @@ -174,10 +174,10 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: return root ``` -#### `get_latest_attesting_balance` +#### `get_weight` ```python -def get_latest_attesting_balance(store: Store, root: Root) -> Gwei: +def get_weight(store: Store, root: Root) -> Gwei: state = store.checkpoint_states[store.justified_checkpoint] active_indices = get_active_validator_indices(state, get_current_epoch(state)) attestation_score = Gwei(sum( @@ -197,7 +197,6 @@ def get_latest_attesting_balance(store: Store, root: Root) -> Gwei: committee_weight = get_total_active_balance(state) // SLOTS_PER_EPOCH proposer_score = (committee_weight * PROPOSER_SCORE_BOOST) // 100 return attestation_score + proposer_score - ``` #### `filter_block_tree` @@ -270,7 +269,7 @@ def get_head(store: Store) -> Root: return head # Sort by latest attesting balance with ties broken lexicographically # Ties broken by favoring block with lexicographically higher root - head = max(children, key=lambda root: (get_latest_attesting_balance(store, root), root)) + head = max(children, key=lambda root: (get_weight(store, root), root)) ``` #### `should_update_justified_checkpoint` diff --git a/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py b/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py index 6f42aa9bad..816c7a10b7 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py +++ b/tests/core/pyspec/eth2spec/test/helpers/optimistic_sync.py @@ -177,7 +177,7 @@ def get_opt_head_block_root(spec, mega_store): return head # Sort by latest attesting balance with ties broken lexicographically # Ties broken by favoring block with lexicographically higher root - head = max(children, key=lambda root: (spec.get_latest_attesting_balance(store, root), root)) + head = max(children, key=lambda root: (spec.get_weight(store, root), root)) def is_invalidated(mega_store, block_root): diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index eede246302..23514b325b 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -729,14 +729,14 @@ def test_proposer_boost(spec, state): on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block, test_steps) assert store.proposer_boost_root == spec.hash_tree_root(block) - assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0 + assert spec.get_weight(store, spec.hash_tree_root(block)) > 0 # Ensure that boost is removed after slot is over time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT + spec.config.SECONDS_PER_SLOT) on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() - assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0 + assert spec.get_weight(store, spec.hash_tree_root(block)) == 0 next_slots(spec, state, 3) block = build_empty_block_for_next_slot(spec, state) @@ -747,14 +747,14 @@ def test_proposer_boost(spec, state): on_tick_and_append_step(spec, store, time, test_steps) yield from add_block(spec, store, signed_block, test_steps) assert store.proposer_boost_root == spec.hash_tree_root(block) - assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) > 0 + assert spec.get_weight(store, spec.hash_tree_root(block)) > 0 # Ensure that boost is removed after slot is over time = (store.genesis_time + block.slot * spec.config.SECONDS_PER_SLOT + spec.config.SECONDS_PER_SLOT) on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() - assert spec.get_latest_attesting_balance(store, spec.hash_tree_root(block)) == 0 + assert spec.get_weight(store, spec.hash_tree_root(block)) == 0 test_steps.append({ 'checks': { From b26c136b34e7fd4cc35cd6c690bcf3af2389848f Mon Sep 17 00:00:00 2001 From: Stefan Bratanov Date: Wed, 15 Feb 2023 17:25:08 +0000 Subject: [PATCH 048/210] fix Deneb reference in presets --- presets/mainnet/deneb.yaml | 2 +- presets/minimal/deneb.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/presets/mainnet/deneb.yaml b/presets/mainnet/deneb.yaml index 3866e82ff7..ebe33f2d10 100644 --- a/presets/mainnet/deneb.yaml +++ b/presets/mainnet/deneb.yaml @@ -1,4 +1,4 @@ -# Mainnet preset - Phase0 +# Mainnet preset - Deneb # Misc # --------------------------------------------------------------- diff --git a/presets/minimal/deneb.yaml b/presets/minimal/deneb.yaml index dacacf5b84..e51b5587d0 100644 --- a/presets/minimal/deneb.yaml +++ b/presets/minimal/deneb.yaml @@ -1,4 +1,4 @@ -# Minimal preset - Phase0 +# Minimal preset - Deneb # Misc # --------------------------------------------------------------- From 48e7be7dd0e1ce10ce151f797dbfa58a31c76b2d Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Wed, 15 Feb 2023 18:23:04 +0000 Subject: [PATCH 049/210] Fix doctoc --- specs/deneb/polynomial-commitments.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 460fb82977..6b94e1d9ad 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -38,6 +38,7 @@ - [`verify_kzg_proof_impl`](#verify_kzg_proof_impl) - [`verify_kzg_proof_batch`](#verify_kzg_proof_batch) - [`compute_kzg_proof`](#compute_kzg_proof) + - [`compute_quotient_eval_within_domain`](#compute_quotient_eval_within_domain) - [`compute_kzg_proof_impl`](#compute_kzg_proof_impl) - [`compute_blob_kzg_proof`](#compute_blob_kzg_proof) - [`verify_blob_kzg_proof`](#verify_blob_kzg_proof) From 078d62e6ffe1f840e2d2ae9bc43d3b2e48c5926a Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Wed, 15 Feb 2023 19:48:58 +0000 Subject: [PATCH 050/210] Simplify compute_challenge --- specs/deneb/polynomial-commitments.md | 13 ++++--------- 1 file changed, 4 insertions(+), 9 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 6b94e1d9ad..afcf934fc7 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -231,21 +231,16 @@ def compute_challenge(blob: Blob, commitment: KZGCommitment) -> BLSFieldElement: """ Return the Fiat-Shamir challenge required by the rest of the protocol. - The Fiat-Shamir logic works as per the following pseudocode: """ - # Append the number of polynomials and the degree of each polynomial as a domain separator - num_polynomials = int.to_bytes(1, 8, ENDIANNESS) - degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 8, ENDIANNESS) - data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly + num_polynomials + # Append the degree of the polynomial as a domain separator + degree_poly = int.to_bytes(FIELD_ELEMENTS_PER_BLOB, 16, ENDIANNESS) + data = FIAT_SHAMIR_PROTOCOL_DOMAIN + degree_poly - # Append each polynomial which is composed by field elements data += blob - - # Append serialized G1 points data += commitment - # Transcript has been prepared: time to create the challenges + # Transcript has been prepared: time to create the challenge return hash_to_bls_field(data) ``` From c39fda19c6bfdb39054c4f3e02d2316886efaa04 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 16 Feb 2023 08:18:52 +0100 Subject: [PATCH 051/210] Apply suggestions from code review Co-authored-by: Danny Ryan Co-authored-by: Jimmy Chen --- specs/deneb/fork-choice.md | 5 +++-- specs/deneb/p2p-interface.md | 8 ++++---- specs/deneb/validator.md | 2 +- 3 files changed, 8 insertions(+), 7 deletions(-) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 58c281a597..46403cbe99 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -24,7 +24,7 @@ This is the modification of the fork choice accompanying the Deneb upgrade. ## Helpers -#### `validate_blob_sidecars` +#### `validate_blobs` ```python def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment], @@ -47,7 +47,8 @@ The block MUST NOT be considered valid until all valid `Blob`s have been downloa ```python def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool: - # `retrieve_blobs_and_proofs` is implementation and context dependent, raises an exception if not available. It returns all the blobs for the given block root. + # `retrieve_blobs_and_proofs` is implementation and context dependent + # It returns all the blobs for the given block root, and raises an exception if not available # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 63a63feda6..258b9e3f1f 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -228,7 +228,7 @@ Requests blob sidecars in the slot range `[start_slot, start_slot + count)`, lea The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -Before consuming the next response chunk, the response reader SHOULD verify the blobs sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs_sidecar`. +Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs`. `BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` window. @@ -255,11 +255,11 @@ to be fully compliant with `BlobsSidecarsByRange` requests. participating in the networking immediately, other peers MAY disconnect and/or temporarily ban such an un-synced or semi-synced client. -Clients MUST respond with at least the first blobs sidecar that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars. +Clients MUST respond with at least the first blob sidecar that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars. -The following blobs sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order. +The following blob sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order. -Clients MAY limit the number of blobs sidecars in the response. +Clients MAY limit the number of blob sidecars in the response. The response MUST contain no more than `count * MAX_BLOBS_PER_BLOCK` blob sidecars. diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index d867098744..45a8e5c834 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -104,7 +104,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo ``` -Then `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index. +Then for each sidecar, `signed_sidecar = SignedBlobSidecar(message=sidecar, signature=signature)` is constructed and published to the `blob_sidecar_{index}` topics according to its index. `signature` is obtained from: From 639ff9b2b0acfbba0e642e04da6efa76ad1f5292 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 16 Feb 2023 08:30:40 +0100 Subject: [PATCH 052/210] Update specs/deneb/p2p-interface.md Co-authored-by: Jimmy Chen --- specs/deneb/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 258b9e3f1f..2ba8de409f 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -38,7 +38,7 @@ The specification of these changes continues in the same format as the network s |------------------------------------------|-----------------------------------|---------------------------------------------------------------------| | `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request | | `MAX_REQUEST_BLOB_SIDECARS` | `2**7` (= 128) | Maximum number of blob sidecars in a single request | -| `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blobs sidecars | +| `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars | ## Containers From 24a19bb886f7294f8b9c8ff990818936b044f5fa Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 16 Feb 2023 09:12:34 +0100 Subject: [PATCH 053/210] fixes * fight the test suite * clarify who orphans the block * must supply all blobs of a block in range request --- setup.py | 4 ++-- specs/deneb/fork-choice.md | 6 +++--- specs/deneb/p2p-interface.md | 23 ++++++++++++++--------- 3 files changed, 19 insertions(+), 14 deletions(-) diff --git a/setup.py b/setup.py index f87ed5a6cf..7b1c718b8c 100644 --- a/setup.py +++ b/setup.py @@ -653,9 +653,9 @@ def preparations(cls): @classmethod def sundry_functions(cls) -> str: return super().sundry_functions() + '\n\n' + ''' -def retrieve_blobs_sidecar(slot: Slot, beacon_block_root: Root) -> PyUnion[BlobsSidecar, str]: +def retrieve_blobs_and_proofs(slot: Slot, beacon_block_root: Root) -> PyUnion[BlobsSidecar, str]: # pylint: disable=unused-argument - return "TEST"''' + return ("TEST", "TEST")''' @classmethod def hardcoded_custom_type_dep_constants(cls, spec_object) -> str: diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 46403cbe99..a2866092d2 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -8,7 +8,7 @@ - [Introduction](#introduction) - [Containers](#containers) - [Helpers](#helpers) - - [`validate_blob_sidecars`](#validate_blob_sidecars) + - [`validate_blobs`](#validate_blobs) - [`is_data_available`](#is_data_available) - [Updated fork-choice handlers](#updated-fork-choice-handlers) - [`on_block`](#on_block) @@ -52,9 +52,9 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root) - # For testing, `retrieve_blobs_and_proofs` returns "TEST". + # For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST"). # TODO: Remove it once we have a way to inject `BlobSidecar` into tests. - if isinstance(sidecar, str): + if isinstance(blobs, str): return True validate_blobs(expected_kzg_commitments, blobs, proofs) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 2ba8de409f..5e0b162070 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -14,6 +14,7 @@ The specification of these changes continues in the same format as the network s - [Containers](#containers) - [`BlobSidecar`](#blobsidecar) - [`SignedBlobSidecar`](#signedblobsidecar) + - [`BlobIdentifier`](#blobidentifier) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) @@ -61,7 +62,15 @@ class BlobSidecar(Container): ```python class SignedBlobSidecar(Container): message: BlobSidecar - signature: Signature + signature: BLSSignature +``` + +### `BlobIdentifier` + +```python +class BlobIdentifier(Container): + block_root: Root + index: uint64 ``` ## The gossip domain: gossipsub @@ -104,7 +113,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.slot, sidecar.proposer_index, sidecar.index)`. - -- If full verification of the blob fails at a later processing stage, clients MUST clear the blob from this "seen" cache so as to allow a the valid blob to propagate. Block producers MAY orphan blocks if they have observed multiple blobs signed by the proposer for the same "seen" tuple. + -- If full verification of the blob fails at a later processing stage, clients MUST clear the blob from this "seen" cache so as to allow a the valid blob to propagate. The next block producer MAY orphan the block if they have observed multiple blobs signed by the proposer for the same "seen" tuple. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. @@ -164,12 +173,6 @@ New in deneb. Request Content: -```python -class BlobIdentifier(Container): - block_root: Root - index: uint64 -``` - ``` ( List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] @@ -255,7 +258,9 @@ to be fully compliant with `BlobsSidecarsByRange` requests. participating in the networking immediately, other peers MAY disconnect and/or temporarily ban such an un-synced or semi-synced client. -Clients MUST respond with at least the first blob sidecar that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars. +Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars. + +Clients MUST include all blob sidecars of each block from which they include blob sidecars. The following blob sidecars, where they exist, MUST be sent in consecutive `(slot, index)` order. From 5fe857b2096c6a903ac0c822cae66e2236f03feb Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Thu, 16 Feb 2023 09:20:40 +0100 Subject: [PATCH 054/210] fixes --- setup.py | 2 +- specs/deneb/p2p-interface.md | 2 +- specs/deneb/validator.md | 1 - 3 files changed, 2 insertions(+), 3 deletions(-) diff --git a/setup.py b/setup.py index 7b1c718b8c..666ba8afc1 100644 --- a/setup.py +++ b/setup.py @@ -653,7 +653,7 @@ def preparations(cls): @classmethod def sundry_functions(cls) -> str: return super().sundry_functions() + '\n\n' + ''' -def retrieve_blobs_and_proofs(slot: Slot, beacon_block_root: Root) -> PyUnion[BlobsSidecar, str]: +def retrieve_blobs_and_proofs(slot: Slot, beacon_block_root: Root) -> PyUnion[(Blob, KZGProof), (str, str)]: # pylint: disable=unused-argument return ("TEST", "TEST")''' diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 5e0b162070..163522ec3a 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -284,5 +284,5 @@ This "sidecar" design provides forward compatibility for further data increases with full sharding `is_data_available()` can be replaced by data-availability-sampling (DAS) thus avoiding all blobs being downloaded by all beacon nodes on the network. -Such sharding design may introduce an updated `BlobsSidecar` to identify the shard, +Such sharding design may introduce an updated `BlobSidecar` to identify the shard, but does not affect the `BeaconBlock` structure. diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 45a8e5c834..b29330ce57 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -52,7 +52,6 @@ def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFi ## Beacon chain responsibilities All validator responsibilities remain unchanged other than those noted below. -Namely, the blob handling and the addition of `SignedBeaconBlockAndBlobsSidecar`. ### Block and sidecar proposal From f72a26cac6168fa85b3cc5bd2596c2f95ade073d Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Thu, 16 Feb 2023 11:02:58 +0100 Subject: [PATCH 055/210] Fix: typos --- sync/optimistic.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/sync/optimistic.md b/sync/optimistic.md index 79a5271c2e..14eb99fb11 100644 --- a/sync/optimistic.md +++ b/sync/optimistic.md @@ -375,7 +375,7 @@ Given all of this, we can say two things: justify an honest chain. 2. **BNs which are syncing can optimistically import transition blocks.** In this case a justified chain already exists blocks. The poison block would be - quickly reverted and would have no affect on liveness. + quickly reverted and would have no effect on liveness. Astute readers will notice that (2) contains a glaring assumption about network liveness. This is necessary because a node cannot feasibly ascertain that the @@ -408,13 +408,13 @@ Such a scenario requires manual intervention. An alternative to optimistic sync is to run a light client inside/alongside beacon nodes that mitigates the need for optimistic sync by providing -tip-of-chain blocks to the execution engine. However, light clients comes with +tip-of-chain blocks to the execution engine. However, light clients come with their own set of complexities. Relying on light clients may also restrict nodes from syncing from genesis, if they so desire. A notable thing about optimistic sync is that it's *optional*. Should an implementation decide to go the light-client route, then they can just ignore -optimistic sync all together. +optimistic sync altogether. ### What if `TERMINAL_BLOCK_HASH` is used? From f23ed0cdbc4d7590212653a460eb709350e7ed37 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 16 Feb 2023 21:11:18 +0800 Subject: [PATCH 056/210] Make linter happy --- setup.py | 2 +- specs/deneb/fork-choice.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setup.py b/setup.py index 031ac7457c..9c5488f126 100644 --- a/setup.py +++ b/setup.py @@ -653,7 +653,7 @@ def preparations(cls): @classmethod def sundry_functions(cls) -> str: return super().sundry_functions() + '\n\n' + ''' -def retrieve_blobs_and_proofs(slot: Slot, beacon_block_root: Root) -> PyUnion[(Blob, KZGProof), (str, str)]: +def retrieve_blobs_and_proofs(beacon_block_root: Root) -> PyUnion[Tuple[Blob, KZGProof], Tuple[str, str]]: # pylint: disable=unused-argument return ("TEST", "TEST")''' diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 2757e591ef..8fa08357a7 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -54,7 +54,7 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ # For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST"). # TODO: Remove it once we have a way to inject `BlobSidecar` into tests. - if isinstance(blobs, str): + if isinstance(blobs, str) or isinstance(proofs, str): return True validate_blobs(blob_kzg_commitments, blobs, proofs) From a7e45db9ac2b60a33e144444969ad3ac0aae3d4c Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 16 Feb 2023 22:09:57 +0800 Subject: [PATCH 057/210] Fix `verify_kzg_proof_batch` and the tests --- specs/deneb/fork-choice.md | 4 +-- specs/deneb/polynomial-commitments.md | 9 ++++--- ...lobs_sidecar.py => test_validate_blobs.py} | 26 ++++++++++--------- 3 files changed, 21 insertions(+), 18 deletions(-) rename tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/{test_validate_blobs_sidecar.py => test_validate_blobs.py} (54%) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 8fa08357a7..e93eb54faf 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -33,9 +33,7 @@ def validate_blobs(expected_kzg_commitments: Sequence[KZGCommitment], assert len(expected_kzg_commitments) == len(blobs) assert len(blobs) == len(proofs) - # Clients MAY use `verify_blob_kzg_proof_multi` for efficiency - for commitment, blob, proof in zip(expected_kzg_commitments, blobs, proofs): - assert verify_blob_kzg_proof(commitment, blob, proof) + assert verify_blob_kzg_proof_batch(blobs, expected_kzg_commitments, proofs) ``` #### `is_data_available` diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index afcf934fc7..76affe6200 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -411,15 +411,18 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment], # Verify: e(sum r^i proof_i, [s]) == # e(sum r^i (commitment_i - [y_i]) + sum r^i z_i proof_i, [1]) proof_lincomb = g1_lincomb(proofs, r_powers) - proof_z_lincomb = g1_lincomb(proofs, [z * r_power for z, r_power in zip(zs, r_powers)]) + proof_z_lincomb = g1_lincomb( + proofs, + [BLSFieldElement((int(z) * int(r_power)) % BLS_MODULUS) for z, r_power in zip(zs, r_powers)], + ) C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y)) for commitment, y in zip(commitments, ys)] C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys] C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers) return bls.pairing_check([ - [proof_lincomb, bls.neg(KZG_SETUP_G2[1])], - [bls.add(C_minus_y_lincomb, proof_z_lincomb), bls.G2] + [bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2[1]))], + [bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2] ]) ``` diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs_sidecar.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py similarity index 54% rename from tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs_sidecar.py rename to tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py index 87ed9ff8ea..d9934c5ade 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs_sidecar.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py @@ -16,7 +16,7 @@ ) -def _run_validate_blobs_sidecar_test(spec, state, blob_count): +def _run_validate_blobs(spec, state, blob_count): block = build_empty_block_for_next_slot(spec, state) opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments @@ -24,30 +24,32 @@ def _run_validate_blobs_sidecar_test(spec, state, blob_count): block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) state_transition_and_sign_block(spec, state, block) - blobs_sidecar = spec.get_blobs_sidecar(block, blobs) - expected_commitments = [spec.blob_to_kzg_commitment(blobs[i]) for i in range(blob_count)] - spec.validate_blobs_sidecar(block.slot, block.hash_tree_root(), expected_commitments, blobs_sidecar) + # Also test the proof generation in `get_blob_sidecars` + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars] + kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] + spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs) @with_deneb_and_later @spec_state_test -def test_validate_blobs_sidecar_zero_blobs(spec, state): - _run_validate_blobs_sidecar_test(spec, state, blob_count=0) +def test_validate_blobs_zero_blobs(spec, state): + _run_validate_blobs(spec, state, blob_count=0) @with_deneb_and_later @spec_state_test -def test_validate_blobs_sidecar_one_blob(spec, state): - _run_validate_blobs_sidecar_test(spec, state, blob_count=1) +def test_validate_blobs_one_blob(spec, state): + _run_validate_blobs(spec, state, blob_count=1) @with_deneb_and_later @spec_state_test -def test_validate_blobs_sidecar_two_blobs(spec, state): - _run_validate_blobs_sidecar_test(spec, state, blob_count=2) +def test_validate_blobs_two_blobs(spec, state): + _run_validate_blobs(spec, state, blob_count=2) @with_deneb_and_later @spec_state_test -def test_validate_blobs_sidecar_max_blobs(spec, state): - _run_validate_blobs_sidecar_test(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK) +def test_validate_blobs_max_blobs(spec, state): + _run_validate_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK) From a562710fe6f6c15f93f06f6d40ba7599133747b5 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 17 Feb 2023 01:22:11 +0800 Subject: [PATCH 058/210] Fix `compute_quotient_eval_within_domain` overflow --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index afcf934fc7..593c1a4f3e 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -459,7 +459,7 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement, f_i = int(BLS_MODULUS) + int(polynomial[i]) - int(y) % BLS_MODULUS numerator = f_i * int(omega_i) % BLS_MODULUS denominator = int(z) * (int(BLS_MODULUS) + int(z) - int(omega_i)) % BLS_MODULUS - result += div(BLSFieldElement(numerator), BLSFieldElement(denominator)) + result += int(div(BLSFieldElement(numerator), BLSFieldElement(denominator))) return BLSFieldElement(result % BLS_MODULUS) ``` From 9dd7d2ba2f5b9bb1ce50494d1971d483764afcb6 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Fri, 17 Feb 2023 11:59:56 -0700 Subject: [PATCH 059/210] fix Blob pluralization in a few places --- specs/deneb/p2p-interface.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 163522ec3a..8357776abb 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -201,7 +201,7 @@ No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested The response MUST consist of zero or more `response_chunk`. Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. -Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response. +Clients MUST support requesting sidecars since `minimum_request_epoch`, where `minimum_request_epoch = max(finalized_epoch, current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH)`. If any root in the request content references a block earlier than `minimum_request_epoch`, peers MAY respond with error code `3: ResourceUnavailable` or not include the blob in the response. Clients MUST respond with at least one sidecar, if they have it. Clients MAY limit the number of blocks and sidecars in the response. @@ -233,7 +233,7 @@ The response is unsigned, i.e. `BlobSidecarsByRange`, as the signature of the be Before consuming the next response chunk, the response reader SHOULD verify the blob sidecar is well-formatted and correct w.r.t. the expected KZG commitments through `validate_blobs`. -`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` window. +`BlobSidecarsByRange` is primarily used to sync blobs that may have been missed on gossip and to sync within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` window. The request MUST be encoded as an SSZ-container. @@ -241,18 +241,18 @@ The response MUST consist of zero or more `response_chunk`. Each _successful_ `response_chunk` MUST contain a single `BlobSidecar` payload. Clients MUST keep a record of signed blobs sidecars seen on the epoch range -`[max(current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]` +`[max(current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS, DENEB_FORK_EPOCH), current_epoch]` where `current_epoch` is defined by the current wall-clock time, and clients MUST support serving requests of blobs on this range. -Peers that are unable to reply to blob sidecar requests within the `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` +Peers that are unable to reply to blob sidecar requests within the `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epoch range SHOULD respond with error code `3: ResourceUnavailable`. Such peers that are unable to successfully reply to this range of requests MAY get descored or disconnected at any time. *Note*: The above requirement implies that nodes that start from a recent weak subjectivity checkpoint -MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` -to be fully compliant with `BlobsSidecarsByRange` requests. +MUST backfill the local blobs database to at least epoch `current_epoch - MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` +to be fully compliant with `BlobSidecarsByRange` requests. *Note*: Although clients that bootstrap from a weak subjectivity checkpoint can begin participating in the networking immediately, other peers MAY From c1a2962b31ee17c9795308e690a568a26c2e8917 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Sat, 18 Feb 2023 15:09:43 +0000 Subject: [PATCH 060/210] Update polynomial-commitments.md --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 593c1a4f3e..0e076cd4ef 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -469,7 +469,7 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement, ```python def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof: """ - Helper function for compute_kzg_proof() and compute_aggregate_kzg_proof(). + Helper function for compute_kzg_proof(). """ roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY) From 54d2559eb54db217844d30098662e0be3c8d9e5c Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Sat, 18 Feb 2023 17:45:16 +0100 Subject: [PATCH 061/210] remove producer reorg on multi-blob * also, use root/index for uniqueness --- specs/deneb/p2p-interface.md | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 8357776abb..b11e61ef65 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -112,8 +112,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. -- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.slot, sidecar.proposer_index, sidecar.index)`. - -- If full verification of the blob fails at a later processing stage, clients MUST clear the blob from this "seen" cache so as to allow a the valid blob to propagate. The next block producer MAY orphan the block if they have observed multiple blobs signed by the proposer for the same "seen" tuple. +- _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. From 0632a5a32ca0e7915a8d63600d47f950f4c64c31 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Mon, 20 Feb 2023 10:54:16 +0000 Subject: [PATCH 062/210] Update specs/deneb/polynomial-commitments.md Co-authored-by: Hsiao-Wei Wang --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 0e076cd4ef..31138a5efd 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -469,7 +469,7 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement, ```python def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof: """ - Helper function for compute_kzg_proof(). + Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`. """ roots_of_unity_brp = bit_reversal_permutation(ROOTS_OF_UNITY) From 83cf02f66818b71256d7c2d6cf41f66118cf4951 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Mon, 20 Feb 2023 10:57:39 +0000 Subject: [PATCH 063/210] Remove repeated computation --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 593c1a4f3e..6978317d7d 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -479,7 +479,7 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGPro # For all x_i, compute (x_i - z) denominator_poly = [BLSFieldElement((int(x) - int(z)) % BLS_MODULUS) - for x in bit_reversal_permutation(ROOTS_OF_UNITY)] + for x in roots_of_unity_brp] # Compute the quotient polynomial directly in evaluation form quotient_polynomial = [BLSFieldElement(0)] * FIELD_ELEMENTS_PER_BLOB From 4e2a9920f1e1f1f7496289161fa7d830bb5b1d0e Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 20 Feb 2023 12:15:53 +0100 Subject: [PATCH 064/210] Update specs/deneb/p2p-interface.md Co-authored-by: g11tech --- specs/deneb/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index b11e61ef65..531161b4e9 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -70,7 +70,7 @@ class SignedBlobSidecar(Container): ```python class BlobIdentifier(Container): block_root: Root - index: uint64 + index: BlobIndex ``` ## The gossip domain: gossipsub From ac0ec660d39e9d2470c0efb6be8ad53815085833 Mon Sep 17 00:00:00 2001 From: Jacek Sieka Date: Mon, 20 Feb 2023 16:35:11 +0100 Subject: [PATCH 065/210] add parent validation requirement sama as block --- specs/deneb/p2p-interface.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 531161b4e9..040d594dd2 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -111,6 +111,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). +- _[REJECT]_ The blob's block's parent (defined by `sidecar.block_parent_root`) passes validation. - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). From dff740752b9b0f20792e242c2e789cc88f1fe787 Mon Sep 17 00:00:00 2001 From: djrtwo Date: Mon, 20 Feb 2023 10:07:24 -0700 Subject: [PATCH 066/210] add deposit+bls_change test --- .../test/capella/sanity/test_blocks.py | 43 ++++++++++++++++++- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/capella/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/capella/sanity/test_blocks.py index 079990e3e1..d62e458be6 100644 --- a/tests/core/pyspec/eth2spec/test/capella/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/capella/sanity/test_blocks.py @@ -37,7 +37,7 @@ @with_capella_and_later @spec_state_test -def test_success_bls_change(spec, state): +def test_bls_change(spec, state): index = 0 signed_address_change = get_signed_address_change(spec, state, validator_index=index) pre_credentials = state.validators[index].withdrawal_credentials @@ -60,7 +60,46 @@ def test_success_bls_change(spec, state): @with_capella_and_later @spec_state_test -def test_success_exit_and_bls_change(spec, state): +def test_deposit_and_bls_change(spec, state): + initial_registry_len = len(state.validators) + initial_balances_len = len(state.balances) + + validator_index = len(state.validators) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit = prepare_state_and_deposit(spec, state, validator_index, amount, signed=True) + + signed_address_change = get_signed_address_change( + spec, state, + validator_index=validator_index, + withdrawal_pubkey=deposit.data.pubkey, # Deposit helper defaults to use pubkey as withdrawal credential + ) + + deposit_credentials = deposit.data.withdrawal_credentials + assert deposit_credentials[:1] == spec.BLS_WITHDRAWAL_PREFIX + + yield 'pre', state + + block = build_empty_block_for_next_slot(spec, state) + block.body.deposits.append(deposit) + block.body.bls_to_execution_changes.append(signed_address_change) + + signed_block = state_transition_and_sign_block(spec, state, block) + + yield 'blocks', [signed_block] + yield 'post', state + + assert len(state.validators) == initial_registry_len + 1 + assert len(state.balances) == initial_balances_len + 1 + validator_credentials = state.validators[validator_index].withdrawal_credentials + assert deposit_credentials != validator_credentials + assert validator_credentials[:1] == spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + assert validator_credentials[1:12] == b'\x00' * 11 + assert validator_credentials[12:] == signed_address_change.message.to_execution_address + + +@with_capella_and_later +@spec_state_test +def test_exit_and_bls_change(spec, state): # move state forward SHARD_COMMITTEE_PERIOD epochs to allow for exit state.slot += spec.config.SHARD_COMMITTEE_PERIOD * spec.SLOTS_PER_EPOCH From 95401cf6e46aa9ba47d78e9a50ccc2ca87e4e1ec Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 26 Jan 2023 19:30:49 +1100 Subject: [PATCH 067/210] Clarify context bytes in the RPC methods in 4844 --- specs/deneb/p2p-interface.md | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 040d594dd2..ea29eb7f4b 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -171,6 +171,14 @@ No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time. New in deneb. +The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: + +[1]: # (eth2spec: skip) + +| `fork_version` | Chunk SSZ type | +|--------------------------|-------------------------------| +| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | + Request Content: ``` @@ -212,6 +220,14 @@ Clients MAY limit the number of blocks and sidecars in the response. New in deneb. +The `` field is calculated as `context = compute_fork_digest(fork_version, genesis_validators_root)`: + +[1]: # (eth2spec: skip) + +| `fork_version` | Chunk SSZ type | +|--------------------------|-------------------------------| +| `DENEB_FORK_VERSION` | `deneb.BlobSidecar` | + Request Content: ``` ( From 7ff627e03290e2706d811c3915256f45351f3151 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 21 Feb 2023 01:14:46 +0800 Subject: [PATCH 068/210] bump VERSION.txt to 1.3.0-rc.3 --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 1d074f43e5..99aab26b29 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.3.0-rc.2 +1.3.0-rc.3 From d5c7474d4d50982f9b6b714b0ba2a03103d0e1be Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 21 Feb 2023 17:31:27 +0600 Subject: [PATCH 069/210] Move EIP6110 to features --- specs/{ => _features}/eip6110/beacon-chain.md | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename specs/{ => _features}/eip6110/beacon-chain.md (100%) diff --git a/specs/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md similarity index 100% rename from specs/eip6110/beacon-chain.md rename to specs/_features/eip6110/beacon-chain.md From 9391f3ccfca279ec6d02e9b689b5b77c949da1e1 Mon Sep 17 00:00:00 2001 From: kasey Date: Tue, 21 Feb 2023 21:48:21 -0600 Subject: [PATCH 070/210] fix MAX_REQUEST_BLOBS_SIDECARS typo --- specs/deneb/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index ea29eb7f4b..378a65fdc2 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -183,7 +183,7 @@ Request Content: ``` ( - List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK] ) ``` @@ -191,7 +191,7 @@ Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK] ) ``` @@ -202,7 +202,7 @@ It may be less in the case that the responding peer is missing blocks or sidecar The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time. +No more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time. `BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). From 08c7287387d456b96bfe952ab4952c6be1af1612 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 22 Feb 2023 18:33:05 +0600 Subject: [PATCH 071/210] Get rid of pending_deposits queue --- specs/_features/eip6110/beacon-chain.md | 254 ++++++++++++------------ 1 file changed, 130 insertions(+), 124 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 4a95fa95f7..3c81cfb891 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -7,6 +7,8 @@ - [Introduction](#introduction) +- [Constants](#constants) + - [Misc](#misc) - [Preset](#preset) - [State list lengths](#state-list-lengths) - [Execution](#execution) @@ -19,15 +21,12 @@ - [`ExecutionPayloadHeader`](#executionpayloadheader) - [`BeaconState`](#beaconstate) - [Beacon chain state transition function](#beacon-chain-state-transition-function) - - [Epoch processing](#epoch-processing) - - [Helper functions](#helper-functions) - - [New `get_validator_from_indexed_deposit_data`](#new-get_validator_from_indexed_deposit_data) - - [New `apply_indexed_deposit_data`](#new-apply_indexed_deposit_data) - - [New `process_pending_deposits`](#new-process_pending_deposits) - [Block processing](#block-processing) - - [New `process_deposit_receipts`](#new-process_deposit_receipts) - - [Modified `process_execution_payload`](#modified-process_execution_payload) - [Modified `process_operations`](#modified-process_operations) + - [New `get_validator_from_deposit_receipt`](#new-get_validator_from_deposit_receipt) + - [New `process_deposit_receipt`](#new-process_deposit_receipt) + - [Modified `process_deposit`](#modified-process_deposit) + - [Modified `process_execution_payload`](#modified-process_execution_payload) - [Testing](#testing) @@ -40,6 +39,16 @@ This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum *Note:* This specification is under development and should be used with care. +## Constants + +The following values are (non-configurable) constants used throughout the specification. + +### Misc + +| Name | Value | +| - | - | +| `NOT_SET_DEPOSIT_RECEIPT_START_INDEX` | `2**64 - 1` | + ## Preset ### State list lengths @@ -174,43 +183,66 @@ class BeaconState(Container): next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex # EIP-6110 - pending_deposits: List[IndexedDepositData, PENDING_DEPOSITS_LIMIT] + deposit_receipt_start_index: uint64 + deposit_receipt_next_index: uint64 ``` ## Beacon chain state transition function -### Epoch processing +### Block processing ```python -def process_epoch(state: BeaconState) -> None: - process_justification_and_finalization(state) - process_inactivity_updates(state) - process_rewards_and_penalties(state) - # Run before registry and after finality updates - process_pending_deposits(state) # [New in EIP-6110] - process_registry_updates(state) - process_slashings(state) - process_eth1_data_reset(state) - process_effective_balance_updates(state) - process_slashings_reset(state) - process_randao_mixes_reset(state) - process_historical_roots_update(state) - process_participation_flag_updates(state) - process_sync_committee_updates(state) +def process_block(state: BeaconState, block: BeaconBlock) -> None: + process_block_header(state, block) + if is_execution_enabled(state, block.body): + process_withdrawals(state, block.body.execution_payload) + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110] + process_randao(state, block.body) + process_eth1_data(state, block.body) + process_operations(state, block.body) # [Modified in EIP-6110] + process_sync_aggregate(state, block.body.sync_aggregate) ``` -#### Helper functions +#### Modified `process_operations` + +*Note*: The function `process_operations` is modified to process `DepositReceipt` operations included in the payload. + +```python +def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: + # Prevent potential underflow introduced by mixing two deposit processing flows + unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in EIP-6110] + # Verify that outstanding deposits are processed up to the maximum number of deposits + assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in EIP-6110] + + def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: + for operation in operations: + fn(state, operation) + + for_ops(body.proposer_slashings, process_proposer_slashing) + for_ops(body.attester_slashings, process_attester_slashing) + for_ops(body.attestations, process_attestation) + for_ops(body.deposits, process_deposit) # [Modified in EIP-6110] + for_ops(body.voluntary_exits, process_voluntary_exit) + for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) + + # [New in EIP-6110] + if is_execution_enabled(state, body): + for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) + # Signify the end of transition to in-protocol deposits logic + if state.eth1_deposit_index >= state.deposit_receipt_start_index + state.eth1_deposit_index = state.deposit_receipt_next_index +``` -##### New `get_validator_from_indexed_deposit_data` +#### New `get_validator_from_deposit_receipt` ```python -def get_validator_from_indexed_deposit_data(indexed_deposit_data: IndexedDepositData) -> Validator: - amount = indexed_deposit_data.amount +def get_validator_from_deposit_receipt(deposit_receipt: DepositReceipt) -> Validator: + amount = deposit_receipt.amount effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) return Validator( - pubkey=indexed_deposit_data.pubkey, - withdrawal_credentials=indexed_deposit_data.withdrawal_credentials, + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, @@ -219,91 +251,87 @@ def get_validator_from_indexed_deposit_data(indexed_deposit_data: IndexedDeposit ) ``` -##### New `apply_indexed_deposit_data` +#### New `process_deposit_receipt` ```python -def apply_indexed_deposit_data(state: BeaconState, indexed_deposit_data: IndexedDepositData) -> None: - pubkey = indexed_deposit_data.pubkey - amount = indexed_deposit_data.amount - validator_pubkeys = [v.pubkey for v in state.validators] +def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: + # Set deposit receipt start index + if state.deposit_receipt_start_index == NOT_SET_DEPOSIT_RECEIPT_START_INDEX: + state.deposit_receipt_start_index = deposit_receipt.index + + state.deposit_receipt_next_index = deposit_receipt.index + 1 + + pubkey = deposit_receipt.pubkey + amount = deposit_receipt.amount + validator_pubkeys = [validator.pubkey for validator in state.validators] if pubkey not in validator_pubkeys: - # Add validator and balance entries - state.validators.append(get_validator_from_indexed_deposit_data(indexed_deposit_data)) - state.balances.append(amount) + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + deposit_message = DepositMessage( + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, + ) + domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks + signing_root = compute_signing_root(deposit_message, domain) + # Initialize validator if the deposit signature is valid + if bls.Verify(pubkey, signing_root, deposit.data.signature): + state.validators.append(get_validator_from_deposit_receipt(deposit)) + state.balances.append(amount) + state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) + state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) + state.inactivity_scores.append(uint64(0)) else: # Increase balance by deposit amount index = ValidatorIndex(validator_pubkeys.index(pubkey)) increase_balance(state, index, amount) ``` -#### New `process_pending_deposits` - -```python -def process_pending_deposits(state: BeaconState) -> None: - finalized_epoch = state.finalized_checkpoint.epoch - - next_pending_deposit_index = 0 - for pending_deposit in state.pending_deposits: - # Preserve deposits per epoch boundary - if next_pending_deposit_index >= MAX_DEPOSITS * SLOTS_PER_EPOCH: - break - - # Apply only finalized deposits - if pending_deposit.epoch >= finalized_epoch: - break +#### Modified `process_deposit` - # Skip already applied deposits - if pending_deposit.index >= state.eth1_deposit_index: - apply_indexed_deposit_data(state, pending_deposit) - state.eth1_deposit_index += 1 - - next_pending_deposit_index += 1 - - state.pending_deposit = state.pending_deposit[next_pending_deposit_index:] -``` - -### Block processing +*Note*: The function `process_deposit` is modified to prevent deposits from being processed in the second time (due to `process_deposit_receipt`). ```python -def process_block(state: BeaconState, block: BeaconBlock) -> None: - process_block_header(state, block) - if is_execution_enabled(state, block.body): - process_withdrawals(state, block.body.execution_payload) - process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110] - process_deposit_receipts(state, block.body.execution_payload) # [New in EIP-6110] - process_randao(state, block.body) - process_eth1_data(state, block.body) - process_operations(state, block.body) # [Modified in EIP-6110] - process_sync_aggregate(state, block.body.sync_aggregate) -``` - -#### New `process_deposit_receipts` +def process_deposit(state: BeaconState, deposit: Deposit) -> None: + # Skip already processed deposits + if state.eth1_deposit_index >= state.deposit_receipt_start_index: + return + + # Verify the Merkle branch + assert is_valid_merkle_branch( + leaf=hash_tree_root(deposit.data), + branch=deposit.proof, + depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in + index=state.eth1_deposit_index, + root=state.eth1_data.deposit_root, + ) -```python -def process_deposit_receipts(state: BeaconState, payload: ExecutionPayload) -> None: - current_epoch = get_current_epoch(state) - - for deposit_receipt in payload.deposit_receipts: - if pubkey not in validator_pubkeys: - # Verify the deposit signature (proof of possession) which is not checked by the deposit contract - deposit_message = DepositMessage( - pubkey=deposit_receipt.pubkey, - withdrawal_credentials=deposit_receipt.withdrawal_credentials, - amount=deposit_receipt.amount, - ) - domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks - signing_root = compute_signing_root(deposit_message, domain) - if not bls.Verify(pubkey, signing_root, deposit.data.signature): - continue - - pending_deposit = IndexedDepositData( - pubkey=deposit_receipt.pubkey, - withdrawal_credentials=deposit_receipt.withdrawal_credentials, - amount=deposit_receipt.amount, - index=deposit_receipt.index, - epoch=current_epoch, + # Deposits must be processed in order + state.eth1_deposit_index += 1 + + pubkey = deposit.data.pubkey + amount = deposit.data.amount + validator_pubkeys = [validator.pubkey for validator in state.validators] + if pubkey not in validator_pubkeys: + # Verify the deposit signature (proof of possession) which is not checked by the deposit contract + deposit_message = DepositMessage( + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, ) - state.pending_deposits.append(pending_deposit) + domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks + signing_root = compute_signing_root(deposit_message, domain) + # Initialize validator if the deposit signature is valid + if bls.Verify(pubkey, signing_root, deposit.data.signature): + state.validators.append(get_validator_from_deposit(deposit)) + state.balances.append(amount) + # [New in Altair] + state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) + state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) + state.inactivity_scores.append(uint64(0)) + else: + # Increase balance by deposit amount + index = ValidatorIndex(validator_pubkeys.index(pubkey)) + increase_balance(state, index, amount) ``` #### Modified `process_execution_payload` @@ -342,29 +370,6 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe ) ``` -#### Modified `process_operations` - -*Note*: The function `process_operations` is modified to process `BLSToExecutionChange` operations included in the block. - -```python -def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # Prevent potential underflow introduced by mixing two deposit processing flows - unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in EIP-6110] - # Verify that outstanding deposits are processed up to the maximum number of deposits - assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in EIP-6110] - - def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: - for operation in operations: - fn(state, operation) - - for_ops(body.proposer_slashings, process_proposer_slashing) - for_ops(body.attester_slashings, process_attester_slashing) - for_ops(body.attestations, process_attestation) - for_ops(body.deposits, process_deposit) - for_ops(body.voluntary_exits, process_voluntary_exit) - for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) # [New in Capella] -``` - ## Testing *Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only. @@ -389,6 +394,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy + deposit_receipt_start_index = NOT_SET_DEPOSIT_RECEIPT_START_INDEX, ) # Process deposits From 837233a1be3350032a058df6730f0af910e05179 Mon Sep 17 00:00:00 2001 From: Henri DF Date: Wed, 22 Feb 2023 16:50:56 +0100 Subject: [PATCH 072/210] Fix reference to block->sidecar (This was probably a cut-n-paste from block validation) --- specs/deneb/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index ea29eb7f4b..840ea18630 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -108,9 +108,9 @@ This topic is used to propagate signed blob sidecars, one for each sidecar index The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`: - _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`. -- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future blocks for processing at the appropriate slot). +- _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` -- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue blocks for processing once the parent block is retrieved). +- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The blob's block's parent (defined by `sidecar.block_parent_root`) passes validation. - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. From d8111d7d3b9a021d3f4c6ea85a81c467669b55f2 Mon Sep 17 00:00:00 2001 From: Henri DF Date: Wed, 22 Feb 2023 16:51:56 +0100 Subject: [PATCH 073/210] Refer to "sidecar" consistently --- specs/deneb/p2p-interface.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 840ea18630..52ad411f89 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -110,8 +110,8 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`. - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` -- _[IGNORE]_ The blob's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). -- _[REJECT]_ The blob's block's parent (defined by `sidecar.block_parent_root`) passes validation. +- _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). +- _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). From 970da9efd2b5b1bdd46dda6b7f676e7d8cffdc90 Mon Sep 17 00:00:00 2001 From: Henri DF Date: Wed, 22 Feb 2023 17:15:39 +0100 Subject: [PATCH 074/210] Clean up max request blobs constants The spec currently defines `MAX_REQUEST_BLOB_SIDECARS` as the "maximum number of blob sidecars in a single request", but then later in the RPC description defines the max is `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK`. Clean this up by defining `MAX_REQUEST_BLOB_SIDECARS` to be the actual max. --- specs/deneb/p2p-interface.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index ea29eb7f4b..22d8cf94c4 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -38,7 +38,7 @@ The specification of these changes continues in the same format as the network s | Name | Value | Description | |------------------------------------------|-----------------------------------|---------------------------------------------------------------------| | `MAX_REQUEST_BLOCKS_DENEB` | `2**7` (= 128) | Maximum number of blocks in a single request | -| `MAX_REQUEST_BLOB_SIDECARS` | `2**7` (= 128) | Maximum number of blob sidecars in a single request | +| `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request | | `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars | ## Containers @@ -183,7 +183,7 @@ Request Content: ``` ( - List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS] ) ``` @@ -191,7 +191,7 @@ Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS] ) ``` @@ -202,7 +202,7 @@ It may be less in the case that the responding peer is missing blocks or sidecar The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -No more than `MAX_REQUEST_BLOBS_SIDECARS * MAX_BLOBS_PER_BLOCK` may be requested at a time. +No more than `MAX_REQUEST_BLOBS_SIDECARS` may be requested at a time. `BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). @@ -239,7 +239,7 @@ Request Content: Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK] + List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS] ) ``` @@ -274,7 +274,7 @@ to be fully compliant with `BlobSidecarsByRange` requests. participating in the networking immediately, other peers MAY disconnect and/or temporarily ban such an un-synced or semi-synced client. -Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS * MAX_BLOBS_PER_BLOCK` sidecars. +Clients MUST respond with at least the blob sidecars of the first blob-carrying block that exists in the range, if they have it, and no more than `MAX_REQUEST_BLOB_SIDECARS` sidecars. Clients MUST include all blob sidecars of each block from which they include blob sidecars. From 23c10cfd7fc7c72127b2a7bc2f7e3561c978b511 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 23 Feb 2023 13:53:15 +0600 Subject: [PATCH 075/210] Remove state.deposit_receipt_next_index variable --- specs/_features/eip6110/beacon-chain.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 3c81cfb891..df92fb103a 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -184,7 +184,6 @@ class BeaconState(Container): next_withdrawal_validator_index: ValidatorIndex # EIP-6110 deposit_receipt_start_index: uint64 - deposit_receipt_next_index: uint64 ``` ## Beacon chain state transition function @@ -228,9 +227,6 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # [New in EIP-6110] if is_execution_enabled(state, body): for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) - # Signify the end of transition to in-protocol deposits logic - if state.eth1_deposit_index >= state.deposit_receipt_start_index - state.eth1_deposit_index = state.deposit_receipt_next_index ``` #### New `get_validator_from_deposit_receipt` @@ -259,7 +255,9 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) if state.deposit_receipt_start_index == NOT_SET_DEPOSIT_RECEIPT_START_INDEX: state.deposit_receipt_start_index = deposit_receipt.index - state.deposit_receipt_next_index = deposit_receipt.index + 1 + # Signify the end of transition to in-protocol deposit logic + if state.eth1_deposit_index >= state.deposit_receipt_start_index + state.eth1_deposit_index = deposit_receipt.index + 1 pubkey = deposit_receipt.pubkey amount = deposit_receipt.amount @@ -294,6 +292,7 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Skip already processed deposits if state.eth1_deposit_index >= state.deposit_receipt_start_index: + state.eth1_deposit_index += 1 return # Verify the Merkle branch From b22c89244a88a20853c82fbc2df00af545728e81 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 23 Feb 2023 14:09:01 +0600 Subject: [PATCH 076/210] Cosmetic renaming --- specs/_features/eip6110/beacon-chain.md | 14 +++++++------- 1 file changed, 7 insertions(+), 7 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index df92fb103a..e7e0630b06 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -47,7 +47,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `NOT_SET_DEPOSIT_RECEIPT_START_INDEX` | `2**64 - 1` | +| `NOT_SET_DEPOSIT_RECEIPTS_START_INDEX` | `2**64 - 1` | ## Preset @@ -183,7 +183,7 @@ class BeaconState(Container): next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex # EIP-6110 - deposit_receipt_start_index: uint64 + deposit_receipts_start_index: uint64 ``` ## Beacon chain state transition function @@ -252,11 +252,11 @@ def get_validator_from_deposit_receipt(deposit_receipt: DepositReceipt) -> Valid ```python def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: # Set deposit receipt start index - if state.deposit_receipt_start_index == NOT_SET_DEPOSIT_RECEIPT_START_INDEX: - state.deposit_receipt_start_index = deposit_receipt.index + if state.deposit_receipts_start_index == NOT_SET_DEPOSIT_RECEIPTS_START_INDEX: + state.deposit_receipts_start_index = deposit_receipt.index # Signify the end of transition to in-protocol deposit logic - if state.eth1_deposit_index >= state.deposit_receipt_start_index + if state.eth1_deposit_index >= state.deposit_receipts_start_index state.eth1_deposit_index = deposit_receipt.index + 1 pubkey = deposit_receipt.pubkey @@ -291,7 +291,7 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Skip already processed deposits - if state.eth1_deposit_index >= state.deposit_receipt_start_index: + if state.eth1_deposit_index >= state.deposit_receipts_start_index: state.eth1_deposit_index += 1 return @@ -393,7 +393,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipt_start_index = NOT_SET_DEPOSIT_RECEIPT_START_INDEX, + deposit_receipts_start_index = NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, ) # Process deposits From a1daac098ce4458acad75d0385a89e7146d3d20b Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 23 Feb 2023 22:34:32 +0800 Subject: [PATCH 077/210] Make EIP-6110 executable and fix linter errors --- .gitignore | 1 + setup.py | 29 ++++- specs/_features/eip6110/beacon-chain.md | 16 +-- specs/_features/eip6110/fork.md | 142 ++++++++++++++++++++++++ 4 files changed, 176 insertions(+), 12 deletions(-) create mode 100644 specs/_features/eip6110/fork.md diff --git a/.gitignore b/.gitignore index c49e6c006c..c56a658ce2 100644 --- a/.gitignore +++ b/.gitignore @@ -20,6 +20,7 @@ tests/core/pyspec/eth2spec/altair/ tests/core/pyspec/eth2spec/bellatrix/ tests/core/pyspec/eth2spec/capella/ tests/core/pyspec/eth2spec/deneb/ +tests/core/pyspec/eth2spec/eip6110/ # coverage reports .htmlcov diff --git a/setup.py b/setup.py index 9c5488f126..b1158e440f 100644 --- a/setup.py +++ b/setup.py @@ -47,6 +47,7 @@ def installPackage(package: str): BELLATRIX = 'bellatrix' CAPELLA = 'capella' DENEB = 'deneb' +EIP6110 = 'eip6110' # The helper functions that are used when defining constants @@ -667,9 +668,22 @@ def hardcoded_custom_type_dep_constants(cls, spec_object) -> str: return {**super().hardcoded_custom_type_dep_constants(spec_object), **constants} +# +# EIP6110SpecBuilder +# +class EIP6110SpecBuilder(CapellaSpecBuilder): + fork: str = EIP6110 + + @classmethod + def imports(cls, preset_name: str): + return super().imports(preset_name) + f''' +from eth2spec.capella import {preset_name} as capella +''' + + spec_builders = { builder.fork: builder - for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder) + for builder in (Phase0SpecBuilder, AltairSpecBuilder, BellatrixSpecBuilder, CapellaSpecBuilder, DenebSpecBuilder, EIP6110SpecBuilder) } @@ -968,14 +982,14 @@ def finalize_options(self): if len(self.md_doc_paths) == 0: print("no paths were specified, using default markdown file paths for pyspec" " build (spec fork: %s)" % self.spec_fork) - if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB): + if self.spec_fork in (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110): self.md_doc_paths = """ specs/phase0/beacon-chain.md specs/phase0/fork-choice.md specs/phase0/validator.md specs/phase0/weak-subjectivity.md """ - if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB): + if self.spec_fork in (ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110): self.md_doc_paths += """ specs/altair/light-client/full-node.md specs/altair/light-client/light-client.md @@ -987,7 +1001,7 @@ def finalize_options(self): specs/altair/validator.md specs/altair/p2p-interface.md """ - if self.spec_fork in (BELLATRIX, CAPELLA, DENEB): + if self.spec_fork in (BELLATRIX, CAPELLA, DENEB, EIP6110): self.md_doc_paths += """ specs/bellatrix/beacon-chain.md specs/bellatrix/fork.md @@ -996,7 +1010,7 @@ def finalize_options(self): specs/bellatrix/p2p-interface.md sync/optimistic.md """ - if self.spec_fork in (CAPELLA, DENEB): + if self.spec_fork in (CAPELLA, DENEB, EIP6110): self.md_doc_paths += """ specs/capella/light-client/fork.md specs/capella/light-client/full-node.md @@ -1021,6 +1035,11 @@ def finalize_options(self): specs/deneb/p2p-interface.md specs/deneb/validator.md """ + if self.spec_fork == EIP6110: + self.md_doc_paths += """ + specs/_features/eip6110/beacon-chain.md + specs/_features/eip6110/fork.md + """ if len(self.md_doc_paths) == 0: raise Exception('no markdown files specified, and spec fork "%s" is unknown', self.spec_fork) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index e7e0630b06..1daa44fb14 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -182,6 +182,8 @@ class BeaconState(Container): # Withdrawals next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex + # Deep history valid from Capella onwards + historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] # EIP-6110 deposit_receipts_start_index: uint64 ``` @@ -256,7 +258,7 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) state.deposit_receipts_start_index = deposit_receipt.index # Signify the end of transition to in-protocol deposit logic - if state.eth1_deposit_index >= state.deposit_receipts_start_index + if state.eth1_deposit_index >= state.deposit_receipts_start_index: state.eth1_deposit_index = deposit_receipt.index + 1 pubkey = deposit_receipt.pubkey @@ -265,15 +267,15 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, + amount=deposit_receipt.amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, deposit.data.signature): - state.validators.append(get_validator_from_deposit_receipt(deposit)) + if bls.Verify(pubkey, signing_root, deposit_receipt.signature): + state.validators.append(get_validator_from_deposit_receipt(deposit_receipt)) state.balances.append(amount) state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) @@ -393,7 +395,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipts_start_index = NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, + deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, ) # Process deposits diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md new file mode 100644 index 0000000000..3a8de1b8dc --- /dev/null +++ b/specs/_features/eip6110/fork.md @@ -0,0 +1,142 @@ +# EIP-6110 -- Fork Logic + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + +- [Introduction](#introduction) +- [Configuration](#configuration) +- [Helper functions](#helper-functions) + - [Misc](#misc) + - [Modified `compute_fork_version`](#modified-compute_fork_version) +- [Fork to Deneb](#fork-to-deneb) + - [Fork trigger](#fork-trigger) + - [Upgrading the state](#upgrading-the-state) + + + +## Introduction + +This document describes the process of Deneb upgrade. + +## Configuration + +Warning: this configuration is not definitive. + +| Name | Value | +| - | - | +| `EIP6110_FORK_VERSION` | `Version('0x05000000')` | +| `EIP6110_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | + +## Helper functions + +### Misc + +#### Modified `compute_fork_version` + +```python +def compute_fork_version(epoch: Epoch) -> Version: + """ + Return the fork version at the given ``epoch``. + """ + if epoch >= EIP6110_FORK_EPOCH: + return EIP6110_FORK_EPOCH + if epoch >= CAPELLA_FORK_EPOCH: + return CAPELLA_FORK_VERSION + if epoch >= BELLATRIX_FORK_EPOCH: + return BELLATRIX_FORK_VERSION + if epoch >= ALTAIR_FORK_EPOCH: + return ALTAIR_FORK_VERSION + return GENESIS_FORK_VERSION +``` + +## Fork to Deneb + +### Fork trigger + +TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade. +For now, we assume the condition will be triggered at epoch `EIP6110_FORK_EPOCH`. + +Note that for the pure Deneb networks, we don't apply `upgrade_to_eip6110` since it starts with Deneb version logic. + +### Upgrading the state + +If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == EIP6110_FORK_EPOCH`, +an irregular state change is made to upgrade to EIP-6110. + +```python +def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState: + epoch = capella.get_current_epoch(pre) + latest_execution_payload_header = ExecutionPayloadHeader( + parent_hash=pre.latest_execution_payload_header.parent_hash, + fee_recipient=pre.latest_execution_payload_header.fee_recipient, + state_root=pre.latest_execution_payload_header.state_root, + receipts_root=pre.latest_execution_payload_header.receipts_root, + logs_bloom=pre.latest_execution_payload_header.logs_bloom, + prev_randao=pre.latest_execution_payload_header.prev_randao, + block_number=pre.latest_execution_payload_header.block_number, + gas_limit=pre.latest_execution_payload_header.gas_limit, + gas_used=pre.latest_execution_payload_header.gas_used, + timestamp=pre.latest_execution_payload_header.timestamp, + extra_data=pre.latest_execution_payload_header.extra_data, + base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, + block_hash=pre.latest_execution_payload_header.block_hash, + transactions_root=pre.latest_execution_payload_header.transactions_root, + withdrawals_root=pre.latest_execution_payload_header.withdrawals_root, + deposit_receipts_root=Root(), # [New in EIP-6110] + ) + post = BeaconState( + # Versioning + genesis_time=pre.genesis_time, + genesis_validators_root=pre.genesis_validators_root, + slot=pre.slot, + fork=Fork( + previous_version=pre.fork.current_version, + current_version=EIP6110_FORK_VERSION, # [Modified in EIP-6110] + epoch=epoch, + ), + # History + latest_block_header=pre.latest_block_header, + block_roots=pre.block_roots, + state_roots=pre.state_roots, + historical_roots=pre.historical_roots, + # Eth1 + eth1_data=pre.eth1_data, + eth1_data_votes=pre.eth1_data_votes, + eth1_deposit_index=pre.eth1_deposit_index, + # Registry + validators=pre.validators, + balances=pre.balances, + # Randomness + randao_mixes=pre.randao_mixes, + # Slashings + slashings=pre.slashings, + # Participation + previous_epoch_participation=pre.previous_epoch_participation, + current_epoch_participation=pre.current_epoch_participation, + # Finality + justification_bits=pre.justification_bits, + previous_justified_checkpoint=pre.previous_justified_checkpoint, + current_justified_checkpoint=pre.current_justified_checkpoint, + finalized_checkpoint=pre.finalized_checkpoint, + # Inactivity + inactivity_scores=pre.inactivity_scores, + # Sync + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + # Execution-layer + latest_execution_payload_header=latest_execution_payload_header, # [Modified in EIP-6110] + # Withdrawals + next_withdrawal_index=pre.next_withdrawal_index, + next_withdrawal_validator_index=pre.next_withdrawal_validator_index, + # Deep history valid from Capella onwards + historical_summaries=pre.historical_summaries, + # EIP-6110 + deposit_receipts_start_index=0, # [New in EIP-6110] + ) + + return post +``` From e7035dacf5f1f20fddfd0a2b45ae97e0bcf7e449 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 23 Feb 2023 22:46:55 +0800 Subject: [PATCH 078/210] Remove the outdated statement --- specs/deneb/fork.md | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/specs/deneb/fork.md b/specs/deneb/fork.md index 1ace26c7f5..23b3f23c7b 100644 --- a/specs/deneb/fork.md +++ b/specs/deneb/fork.md @@ -64,8 +64,6 @@ Note that for the pure Deneb networks, we don't apply `upgrade_to_deneb` since i ### Upgrading the state -Since the `deneb.BeaconState` format is equal to the `capella.BeaconState` format, we only have to update `BeaconState.fork`. - ```python def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState: epoch = capella.get_current_epoch(pre) @@ -82,10 +80,10 @@ def upgrade_to_deneb(pre: capella.BeaconState) -> BeaconState: timestamp=pre.latest_execution_payload_header.timestamp, extra_data=pre.latest_execution_payload_header.extra_data, base_fee_per_gas=pre.latest_execution_payload_header.base_fee_per_gas, - excess_data_gas=uint256(0), # [New in Deneb] block_hash=pre.latest_execution_payload_header.block_hash, transactions_root=pre.latest_execution_payload_header.transactions_root, withdrawals_root=pre.latest_execution_payload_header.withdrawals_root, + excess_data_gas=uint256(0), # [New in Deneb] ) post = BeaconState( # Versioning From 7d6831ec8691607b1b4b30d4b1b9cd0af69b16a5 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 23 Feb 2023 21:23:52 +0600 Subject: [PATCH 079/210] Fix initialize_beacon_state_from_eth1 definition --- specs/_features/eip6110/beacon-chain.md | 9 +++++---- specs/_features/eip6110/fork.md | 2 +- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 1daa44fb14..62a8664769 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -375,8 +375,9 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe *Note*: The function `initialize_beacon_state_from_eth1` is modified for pure EIP-6110 testing only. Modifications include: -1. Use `DEPOSITS_EIP_FORK_VERSION` as the previous and current fork version. +1. Use `EIP6110_FORK_VERSION` as the previous and current fork version. 2. Utilize the EIP-6110 `BeaconBlockBody` when constructing the initial `latest_block_header`. +3. Add `deposit_receipts_start_index` variable to the genesis state initialization. ```python def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, @@ -385,8 +386,8 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader() ) -> BeaconState: fork = Fork( - previous_version=CAPELLA_FORK_VERSION, # [Modified in Capella] for testing only - current_version=CAPELLA_FORK_VERSION, # [Modified in Capella] + previous_version=EIP6110_FORK_VERSION, # [Modified in EIP6110] for testing only + current_version=EIP6110_FORK_VERSION, # [Modified in EIP6110] epoch=GENESIS_EPOCH, ) state = BeaconState( @@ -395,7 +396,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, + deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] ) # Process deposits diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md index 3a8de1b8dc..6e89788872 100644 --- a/specs/_features/eip6110/fork.md +++ b/specs/_features/eip6110/fork.md @@ -135,7 +135,7 @@ def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState: # Deep history valid from Capella onwards historical_summaries=pre.historical_summaries, # EIP-6110 - deposit_receipts_start_index=0, # [New in EIP-6110] + deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110] ) return post From 703fdfc7c7cc5945fa8d451498164b1e641b625a Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 23 Feb 2023 21:31:19 +0600 Subject: [PATCH 080/210] Fix linter --- specs/_features/eip6110/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 62a8664769..827637221c 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -396,7 +396,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] + deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] ) # Process deposits From fda0eae70af88658f1bff373e3bb7bb7b2fb8c67 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 23 Feb 2023 23:41:57 +0800 Subject: [PATCH 081/210] Add EIP6110 to pylint and mypy scope --- Makefile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index d4259b2fe9..b17baf7c89 100644 --- a/Makefile +++ b/Makefile @@ -142,8 +142,8 @@ codespell: lint: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \ - && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \ - && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb + && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb ./eth2spec/eip6110 \ + && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb -p eth2spec.eip6110 lint_generators: pyspec . venv/bin/activate; cd $(TEST_GENERATORS_DIR); \ From 9d690a4cb298b34da59960cdf24891608b817ad0 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 24 Feb 2023 17:58:10 +0800 Subject: [PATCH 082/210] Fix typo --- specs/_features/eip6110/fork.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md index 6e89788872..a932ecb24e 100644 --- a/specs/_features/eip6110/fork.md +++ b/specs/_features/eip6110/fork.md @@ -12,7 +12,7 @@ - [Helper functions](#helper-functions) - [Misc](#misc) - [Modified `compute_fork_version`](#modified-compute_fork_version) -- [Fork to Deneb](#fork-to-deneb) +- [Fork to EIP-6110](#fork-to-eip-6110) - [Fork trigger](#fork-trigger) - [Upgrading the state](#upgrading-the-state) @@ -20,7 +20,7 @@ ## Introduction -This document describes the process of Deneb upgrade. +This document describes the process of EIP-6110 upgrade. ## Configuration @@ -53,14 +53,14 @@ def compute_fork_version(epoch: Epoch) -> Version: return GENESIS_FORK_VERSION ``` -## Fork to Deneb +## Fork to EIP-6110 ### Fork trigger TBD. This fork is defined for testing purposes, the EIP may be combined with other consensus-layer upgrade. For now, we assume the condition will be triggered at epoch `EIP6110_FORK_EPOCH`. -Note that for the pure Deneb networks, we don't apply `upgrade_to_eip6110` since it starts with Deneb version logic. +Note that for the pure EIP-6110 networks, we don't apply `upgrade_to_eip6110` since it starts with EIP-6110 version logic. ### Upgrading the state From 136c78ddc77416ae55ed89f231dbfb4ad58b58d1 Mon Sep 17 00:00:00 2001 From: henridf Date: Fri, 24 Feb 2023 14:07:16 +0100 Subject: [PATCH 083/210] Update fork-choice.md Fix outdated (likely a Bellatrix cut-paste) description of change. --- specs/deneb/fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index e93eb54faf..91ff3c4b1a 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -63,7 +63,7 @@ def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZ ### `on_block` -*Note*: The only modification is the addition of the verification of transition block conditions. +*Note*: The only modification is the addition of the blob data availability check. ```python def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: From 0879c46a34ee03b23dddbb0d83f9cd179cacac51 Mon Sep 17 00:00:00 2001 From: Enrico Del Fante Date: Mon, 27 Feb 2023 20:12:31 +0100 Subject: [PATCH 084/210] Add `blob_sidecar` gossip rule for parent slot Similarly to the check we do on Block gossip, we should check slot consistency with the parent block, so we can independently reject wrong block and blobb_sidecar when the rule is violated. --- specs/deneb/p2p-interface.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 2e77fa98fa..1898f40267 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -112,6 +112,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. +- _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`). - _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). From de5be633996fca73355a84200f21c58bfaafa656 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 28 Feb 2023 16:14:47 +0600 Subject: [PATCH 085/210] Apply suggestions from code review Co-authored-by: Hsiao-Wei Wang --- specs/_features/eip6110/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 827637221c..7fe2218ae9 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -47,7 +47,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `NOT_SET_DEPOSIT_RECEIPTS_START_INDEX` | `2**64 - 1` | +| `NOT_SET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` | ## Preset @@ -292,6 +292,7 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: + # [New in EIP-6110] # Skip already processed deposits if state.eth1_deposit_index >= state.deposit_receipts_start_index: state.eth1_deposit_index += 1 @@ -325,7 +326,6 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: if bls.Verify(pubkey, signing_root, deposit.data.signature): state.validators.append(get_validator_from_deposit(deposit)) state.balances.append(amount) - # [New in Altair] state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.inactivity_scores.append(uint64(0)) From fae77eb53dc45be4e2a346cc5a57e7f6a1e007e9 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 28 Feb 2023 16:36:46 +0600 Subject: [PATCH 086/210] Apply @hwwhww suggestions --- specs/_features/eip6110/beacon-chain.md | 122 ++++++++++-------------- 1 file changed, 53 insertions(+), 69 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 7fe2218ae9..160eee6585 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -10,12 +10,10 @@ - [Constants](#constants) - [Misc](#misc) - [Preset](#preset) - - [State list lengths](#state-list-lengths) - [Execution](#execution) - [Containers](#containers) - [New containers](#new-containers) - [`DepositReceipt`](#depositreceipt) - - [`IndexedDepositData`](#indexeddepositdata) - [Extended Containers](#extended-containers) - [`ExecutionPayload`](#executionpayload) - [`ExecutionPayloadHeader`](#executionpayloadheader) @@ -23,7 +21,8 @@ - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [Block processing](#block-processing) - [Modified `process_operations`](#modified-process_operations) - - [New `get_validator_from_deposit_receipt`](#new-get_validator_from_deposit_receipt) + - [New `get_validator_from_deposit_data`](#new-get_validator_from_deposit_data) + - [New `apply_deposit`](#new-apply_deposit) - [New `process_deposit_receipt`](#new-process_deposit_receipt) - [Modified `process_deposit`](#modified-process_deposit) - [Modified `process_execution_payload`](#modified-process_execution_payload) @@ -51,12 +50,6 @@ The following values are (non-configurable) constants used throughout the specif ## Preset -### State list lengths - -| Name | Value | -| - | - | -| `PENDING_DEPOSITS_LIMIT` | `2**32` (= 4,294,967,296) | - ### Execution | Name | Value | Description | @@ -78,17 +71,6 @@ class DepositReceipt(Container): index: uint64 ``` -#### `IndexedDepositData` - -```python -class IndexedDepositData(Container): - pubkey: BLSPubkey - withdrawal_credentials: Bytes32 - amount: Gwei - index: uint64 - epoch: Epoch -``` - ### Extended Containers #### `ExecutionPayload` @@ -211,9 +193,11 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: # Prevent potential underflow introduced by mixing two deposit processing flows - unprocessed_deposits_count = max(0, state.eth1_data.deposit_count - state.eth1_deposit_index) # [New in EIP-6110] - # Verify that outstanding deposits are processed up to the maximum number of deposits - assert len(body.deposits) == min(MAX_DEPOSITS, unprocessed_deposits_count) # [Modified in EIP-6110] + # [New in EIP-6110] + if state.eth1_data.deposit_count > state.eth1_deposit_index: + assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) + else: + assert len(body.deposits) == 0 def for_ops(operations: Sequence[Any], fn: Callable[[BeaconState, Any], None]) -> None: for operation in operations: @@ -231,16 +215,15 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) ``` -#### New `get_validator_from_deposit_receipt` +#### New `get_validator_from_deposit_data` ```python -def get_validator_from_deposit_receipt(deposit_receipt: DepositReceipt) -> Validator: - amount = deposit_receipt.amount +def get_validator_from_deposit_data(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator: effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) return Validator( - pubkey=deposit_receipt.pubkey, - withdrawal_credentials=deposit_receipt.withdrawal_credentials, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, @@ -249,33 +232,28 @@ def get_validator_from_deposit_receipt(deposit_receipt: DepositReceipt) -> Valid ) ``` -#### New `process_deposit_receipt` +#### New `apply_deposit` ```python -def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: - # Set deposit receipt start index - if state.deposit_receipts_start_index == NOT_SET_DEPOSIT_RECEIPTS_START_INDEX: - state.deposit_receipts_start_index = deposit_receipt.index - - # Signify the end of transition to in-protocol deposit logic - if state.eth1_deposit_index >= state.deposit_receipts_start_index: - state.eth1_deposit_index = deposit_receipt.index + 1 - - pubkey = deposit_receipt.pubkey - amount = deposit_receipt.amount +def apply_deposit(state: BeaconState, + pubkey: BLSPubkey, + withdrawal_credentials: Bytes32, + amount: uint64, + signature: BLSSignature, + ) -> None: validator_pubkeys = [validator.pubkey for validator in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( - pubkey=deposit_receipt.pubkey, - withdrawal_credentials=deposit_receipt.withdrawal_credentials, - amount=deposit_receipt.amount, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, deposit_receipt.signature): - state.validators.append(get_validator_from_deposit_receipt(deposit_receipt)) + if bls.Verify(pubkey, signing_root, signature): + state.validators.append(get_validator_from_deposit_data(pubkey, withdrawal_credentials, amount)) state.balances.append(amount) state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) @@ -286,9 +264,31 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) increase_balance(state, index, amount) ``` + +#### New `process_deposit_receipt` + +```python +def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: + # Set deposit receipt start index + if state.deposit_receipts_start_index == NOT_SET_DEPOSIT_RECEIPTS_START_INDEX: + state.deposit_receipts_start_index = deposit_receipt.index + + # Signify the end of transition to in-protocol deposit logic + if state.eth1_deposit_index >= state.deposit_receipts_start_index: + state.eth1_deposit_index = deposit_receipt.index + 1 + + apply_deposit( + state=state, + pubkey=deposit_receipt.pubkey, + withdrawal_credentials=deposit_receipt.withdrawal_credentials, + amount=deposit_receipt.amount, + signature=deposit_receipt.signature, + ) +``` + #### Modified `process_deposit` -*Note*: The function `process_deposit` is modified to prevent deposits from being processed in the second time (due to `process_deposit_receipt`). +*Note*: The function `process_deposit` is modified to prevent deposits from being processed the second time (due to `process_deposit_receipt`). ```python def process_deposit(state: BeaconState, deposit: Deposit) -> None: @@ -310,29 +310,13 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: # Deposits must be processed in order state.eth1_deposit_index += 1 - pubkey = deposit.data.pubkey - amount = deposit.data.amount - validator_pubkeys = [validator.pubkey for validator in state.validators] - if pubkey not in validator_pubkeys: - # Verify the deposit signature (proof of possession) which is not checked by the deposit contract - deposit_message = DepositMessage( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, - ) - domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks - signing_root = compute_signing_root(deposit_message, domain) - # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, deposit.data.signature): - state.validators.append(get_validator_from_deposit(deposit)) - state.balances.append(amount) - state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.inactivity_scores.append(uint64(0)) - else: - # Increase balance by deposit amount - index = ValidatorIndex(validator_pubkeys.index(pubkey)) - increase_balance(state, index, amount) + apply_deposit( + state=state, + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, + signature=deposit.data.signature, + ) ``` #### Modified `process_execution_payload` From 7bb65f88d91f5f00ec0d19baf665f0f59c9134e0 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 28 Feb 2023 17:18:07 +0600 Subject: [PATCH 087/210] Cosmetic fix --- specs/_features/eip6110/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 160eee6585..4c7ca790d5 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -192,8 +192,8 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # Prevent potential underflow introduced by mixing two deposit processing flows # [New in EIP-6110] + # Prevent potential underflow introduced by mixing two deposit processing flows if state.eth1_data.deposit_count > state.eth1_deposit_index: assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) else: From 195babdf3d55c6661dad4ce658b4679063cb33e7 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 24 Feb 2023 17:56:38 +0800 Subject: [PATCH 088/210] Refactoring the specs list. Avoid listing specs again and again. --- Makefile | 29 ++++++++++++++++------------- 1 file changed, 16 insertions(+), 13 deletions(-) diff --git a/Makefile b/Makefile index d4259b2fe9..1bb78cb0c7 100644 --- a/Makefile +++ b/Makefile @@ -33,6 +33,12 @@ MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \ $(wildcard $(SPEC_DIR)/_features/sharding/*.md) \ $(wildcard $(SSZ_DIR)/*.md) +ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb +# The parameters for commands. Use `foreach` to avoid listing specs again. +COVERAGE_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), --cov=eth2spec.$S.$(TEST_PRESET_TYPE)) +PYLINT_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), ./eth2spec/$S) +MYPY_SCOPE := $(foreach S,$(ALL_EXECUTABLE_SPECS), -p eth2spec.$S) + COV_HTML_OUT=.htmlcov COV_HTML_OUT_DIR=$(PY_SPEC_DIR)/$(COV_HTML_OUT) COV_INDEX_FILE=$(COV_HTML_OUT_DIR)/index.html @@ -63,15 +69,14 @@ partial_clean: rm -f .coverage rm -rf $(PY_SPEC_DIR)/.pytest_cache rm -rf $(DEPOSIT_CONTRACT_TESTER_DIR)/.pytest_cache - rm -rf $(ETH2SPEC_MODULE_DIR)/phase0 - rm -rf $(ETH2SPEC_MODULE_DIR)/altair - rm -rf $(ETH2SPEC_MODULE_DIR)/bellatrix - rm -rf $(ETH2SPEC_MODULE_DIR)/capella - rm -rf $(ETH2SPEC_MODULE_DIR)/deneb rm -rf $(COV_HTML_OUT_DIR) rm -rf $(TEST_REPORT_DIR) rm -rf eth2spec.egg-info dist build - rm -rf build + rm -rf build; + @for spec_name in $(ALL_EXECUTABLE_SPECS) ; do \ + echo $$spec_name; \ + rm -rf $(ETH2SPEC_MODULE_DIR)/$$spec_name; \ + done clean: partial_clean rm -rf venv @@ -105,12 +110,12 @@ install_test: # Testing against `minimal` or `mainnet` config by default test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 4 --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -n 4 --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec # Testing against `minimal` or `mainnet` config by default find_test: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -k=$(K) --disable-bls --cov=eth2spec.phase0.$(TEST_PRESET_TYPE) --cov=eth2spec.altair.$(TEST_PRESET_TYPE) --cov=eth2spec.bellatrix.$(TEST_PRESET_TYPE) --cov=eth2spec.capella.$(TEST_PRESET_TYPE) --cov=eth2spec.deneb.$(TEST_PRESET_TYPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec + python3 -m pytest -k=$(K) --disable-bls $(COVERAGE_SCOPE) --cov-report="html:$(COV_HTML_OUT)" --cov-branch eth2spec citest: pyspec mkdir -p $(TEST_REPORT_DIR); @@ -119,7 +124,7 @@ ifdef fork python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec else . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec + python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec endif @@ -137,13 +142,11 @@ check_toc: $(MARKDOWN_FILES:=.toc) codespell: codespell . --skip "./.git,./venv,$(PY_SPEC_DIR)/.mypy_cache" -I .codespell-whitelist -# TODO: add future protocol upgrade patch packages to linting. -# NOTE: we use `pylint` just for catching unused arguments in spec code lint: pyspec . venv/bin/activate; cd $(PY_SPEC_DIR); \ flake8 --config $(LINTER_CONFIG_FILE) ./eth2spec \ - && pylint --rcfile $(LINTER_CONFIG_FILE) ./eth2spec/phase0 ./eth2spec/altair ./eth2spec/bellatrix ./eth2spec/capella ./eth2spec/deneb \ - && mypy --config-file $(LINTER_CONFIG_FILE) -p eth2spec.phase0 -p eth2spec.altair -p eth2spec.bellatrix -p eth2spec.capella -p eth2spec.deneb + && pylint --rcfile $(LINTER_CONFIG_FILE) $(PYLINT_SCOPE) \ + && mypy --config-file $(LINTER_CONFIG_FILE) $(MYPY_SCOPE) lint_generators: pyspec . venv/bin/activate; cd $(TEST_GENERATORS_DIR); \ From 1f3249407a9521827c24455d8a010b2c27c3aed6 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 28 Feb 2023 23:37:12 +0800 Subject: [PATCH 089/210] Full wildcard search `MARKDOWN_FILES` --- Makefile | 12 ++++-------- 1 file changed, 4 insertions(+), 8 deletions(-) diff --git a/Makefile b/Makefile index 1bb78cb0c7..371a3ecf8a 100644 --- a/Makefile +++ b/Makefile @@ -23,14 +23,10 @@ GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENER # To check generator matching: #$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}]) -MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/phase0/*.md) \ - $(wildcard $(SPEC_DIR)/altair/*.md) $(wildcard $(SPEC_DIR)/altair/**/*.md) \ - $(wildcard $(SPEC_DIR)/bellatrix/*.md) \ - $(wildcard $(SPEC_DIR)/capella/*.md) $(wildcard $(SPEC_DIR)/capella/**/*.md) \ - $(wildcard $(SPEC_DIR)/deneb/*.md) $(wildcard $(SPEC_DIR)/deneb/**/*.md) \ - $(wildcard $(SPEC_DIR)/_features/custody/*.md) \ - $(wildcard $(SPEC_DIR)/_features/das/*.md) \ - $(wildcard $(SPEC_DIR)/_features/sharding/*.md) \ +MARKDOWN_FILES = $(wildcard $(SPEC_DIR)/*/*.md) \ + $(wildcard $(SPEC_DIR)/*/*/*.md) \ + $(wildcard $(SPEC_DIR)/_features/*/*.md) \ + $(wildcard $(SPEC_DIR)/_features/*/*/*.md) \ $(wildcard $(SSZ_DIR)/*.md) ALL_EXECUTABLE_SPECS = phase0 altair bellatrix capella deneb From a236770b077af8107b0afd4054781f6e1856e31f Mon Sep 17 00:00:00 2001 From: terencechain Date: Tue, 28 Feb 2023 15:17:40 -0800 Subject: [PATCH 090/210] EIP4844: Use `MAX_REQUEST_BLOB_SIDECARS` --- specs/deneb/p2p-interface.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 2e77fa98fa..660448b525 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -183,7 +183,7 @@ Request Content: ``` ( - List[BlobIdentifier, MAX_REQUEST_BLOBS_SIDECARS] + List[BlobIdentifier, MAX_REQUEST_BLOB_SIDECARS] ) ``` @@ -191,7 +191,7 @@ Response Content: ``` ( - List[BlobSidecar, MAX_REQUEST_BLOBS_SIDECARS] + List[BlobSidecar, MAX_REQUEST_BLOB_SIDECARS] ) ``` @@ -202,7 +202,7 @@ It may be less in the case that the responding peer is missing blocks or sidecar The response is unsigned, i.e. `BlobSidecar`, as the signature of the beacon block proposer may not be available beyond the initial distribution via gossip. -No more than `MAX_REQUEST_BLOBS_SIDECARS` may be requested at a time. +No more than `MAX_REQUEST_BLOB_SIDECARS` may be requested at a time. `BlobSidecarsByRoot` is primarily used to recover recent blobs (e.g. when receiving a block with a transaction whose corresponding blob is missing). From 3259922a9eb59c30c826e0986afbc07bfc296b2c Mon Sep 17 00:00:00 2001 From: Stefan Bratanov Date: Wed, 1 Mar 2023 17:10:58 +0000 Subject: [PATCH 091/210] change usage of MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS --- specs/deneb/fork-choice.md | 2 +- specs/deneb/validator.md | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 91ff3c4b1a..830c487645 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -47,7 +47,7 @@ The block MUST NOT be considered valid until all valid `Blob`s have been downloa def is_data_available(beacon_block_root: Root, blob_kzg_commitments: Sequence[KZGCommitment]) -> bool: # `retrieve_blobs_and_proofs` is implementation and context dependent # It returns all the blobs for the given block root, and raises an exception if not available - # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` + # Note: the p2p network does not guarantee sidecar retrieval outside of `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` blobs, proofs = retrieve_blobs_and_proofs(beacon_block_root) # For testing, `retrieve_blobs_and_proofs` returns ("TEST", "TEST"). diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index b29330ce57..77edb957f8 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -118,7 +118,7 @@ def get_blob_sidecar_signature(state: BeaconState, After publishing the peers on the network may request the sidecar through sync-requests, or a local user may be interested. -The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` epochs and serve when capable, +The validator MUST hold on to sidecars for `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` epochs and serve when capable, to ensure the data-availability of these blobs throughout the network. -After `MIN_EPOCHS_FOR_BLOBS_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them. +After `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` nodes MAY prune the sidecars and/or stop serving them. From c445fa9b3762f665e08a2d558c80abf78178dc0d Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 2 Mar 2023 15:50:08 +0600 Subject: [PATCH 092/210] Apply suggestions from code review Co-authored-by: Danny Ryan --- specs/_features/eip6110/beacon-chain.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 4c7ca790d5..ddaca21a24 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -46,7 +46,7 @@ The following values are (non-configurable) constants used throughout the specif | Name | Value | | - | - | -| `NOT_SET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` | +| `UNSET_DEPOSIT_RECEIPTS_START_INDEX` | `uint64(2**64 - 1)` | ## Preset @@ -166,7 +166,7 @@ class BeaconState(Container): next_withdrawal_validator_index: ValidatorIndex # Deep history valid from Capella onwards historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] - # EIP-6110 + # [New in EIP-6110] deposit_receipts_start_index: uint64 ``` @@ -239,8 +239,7 @@ def apply_deposit(state: BeaconState, pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64, - signature: BLSSignature, - ) -> None: + signature: BLSSignature) -> None: validator_pubkeys = [validator.pubkey for validator in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract From 13f3654296546f63a1b60c5b9263a6516125f5e5 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 2 Mar 2023 17:29:22 +0600 Subject: [PATCH 093/210] Apply suggestions from @djrtwo --- specs/_features/eip6110/beacon-chain.md | 100 ++---------------------- specs/_features/eip6110/fork.md | 2 +- specs/altair/beacon-chain.md | 36 +++------ specs/phase0/beacon-chain.md | 60 ++++++++------ 4 files changed, 58 insertions(+), 140 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index ddaca21a24..95655d32b3 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -21,10 +21,7 @@ - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [Block processing](#block-processing) - [Modified `process_operations`](#modified-process_operations) - - [New `get_validator_from_deposit_data`](#new-get_validator_from_deposit_data) - - [New `apply_deposit`](#new-apply_deposit) - [New `process_deposit_receipt`](#new-process_deposit_receipt) - - [Modified `process_deposit`](#modified-process_deposit) - [Modified `process_execution_payload`](#modified-process_execution_payload) - [Testing](#testing) @@ -36,7 +33,7 @@ This is the beacon chain specification of in-protocol deposits processing mechanism. This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110). -*Note:* This specification is under development and should be used with care. +*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development. ## Constants @@ -192,10 +189,11 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # [New in EIP-6110] - # Prevent potential underflow introduced by mixing two deposit processing flows - if state.eth1_data.deposit_count > state.eth1_deposit_index: - assert len(body.deposits) == min(MAX_DEPOSITS, state.eth1_data.deposit_count - state.eth1_deposit_index) + # [Modified in EIP-6110] + # Disable former deposit mechanism once all prior deposits are processed + eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index) + if state.eth1_deposit_index < eth1_deposit_index_limit: + assert len(body.deposits) == min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index) else: assert len(body.deposits) == 0 @@ -215,61 +213,12 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) ``` -#### New `get_validator_from_deposit_data` - -```python -def get_validator_from_deposit_data(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator: - effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) - - return Validator( - pubkey=pubkey, - withdrawal_credentials=withdrawal_credentials, - activation_eligibility_epoch=FAR_FUTURE_EPOCH, - activation_epoch=FAR_FUTURE_EPOCH, - exit_epoch=FAR_FUTURE_EPOCH, - withdrawable_epoch=FAR_FUTURE_EPOCH, - effective_balance=effective_balance, - ) -``` - -#### New `apply_deposit` - -```python -def apply_deposit(state: BeaconState, - pubkey: BLSPubkey, - withdrawal_credentials: Bytes32, - amount: uint64, - signature: BLSSignature) -> None: - validator_pubkeys = [validator.pubkey for validator in state.validators] - if pubkey not in validator_pubkeys: - # Verify the deposit signature (proof of possession) which is not checked by the deposit contract - deposit_message = DepositMessage( - pubkey=pubkey, - withdrawal_credentials=withdrawal_credentials, - amount=amount, - ) - domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks - signing_root = compute_signing_root(deposit_message, domain) - # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, signature): - state.validators.append(get_validator_from_deposit_data(pubkey, withdrawal_credentials, amount)) - state.balances.append(amount) - state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.inactivity_scores.append(uint64(0)) - else: - # Increase balance by deposit amount - index = ValidatorIndex(validator_pubkeys.index(pubkey)) - increase_balance(state, index, amount) -``` - - #### New `process_deposit_receipt` ```python def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) -> None: # Set deposit receipt start index - if state.deposit_receipts_start_index == NOT_SET_DEPOSIT_RECEIPTS_START_INDEX: + if state.deposit_receipts_start_index == UNSET_DEPOSIT_RECEIPTS_START_INDEX: state.deposit_receipts_start_index = deposit_receipt.index # Signify the end of transition to in-protocol deposit logic @@ -285,39 +234,6 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) ) ``` -#### Modified `process_deposit` - -*Note*: The function `process_deposit` is modified to prevent deposits from being processed the second time (due to `process_deposit_receipt`). - -```python -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - # [New in EIP-6110] - # Skip already processed deposits - if state.eth1_deposit_index >= state.deposit_receipts_start_index: - state.eth1_deposit_index += 1 - return - - # Verify the Merkle branch - assert is_valid_merkle_branch( - leaf=hash_tree_root(deposit.data), - branch=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in - index=state.eth1_deposit_index, - root=state.eth1_data.deposit_root, - ) - - # Deposits must be processed in order - state.eth1_deposit_index += 1 - - apply_deposit( - state=state, - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, - signature=deposit.data.signature, - ) -``` - #### Modified `process_execution_payload` *Note*: The function `process_execution_payload` is modified to use the new `ExecutionPayloadHeader` type. @@ -379,7 +295,7 @@ def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, eth1_data=Eth1Data(block_hash=eth1_block_hash, deposit_count=uint64(len(deposits))), latest_block_header=BeaconBlockHeader(body_root=hash_tree_root(BeaconBlockBody())), randao_mixes=[eth1_block_hash] * EPOCHS_PER_HISTORICAL_VECTOR, # Seed RANDAO with Eth1 entropy - deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] + deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP6110] ) # Process deposits diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md index a932ecb24e..b08661e5fa 100644 --- a/specs/_features/eip6110/fork.md +++ b/specs/_features/eip6110/fork.md @@ -135,7 +135,7 @@ def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState: # Deep history valid from Capella onwards historical_summaries=pre.historical_summaries, # EIP-6110 - deposit_receipts_start_index=NOT_SET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110] + deposit_receipts_start_index=UNSET_DEPOSIT_RECEIPTS_START_INDEX, # [New in EIP-6110] ) return post diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index fe71a5ff83..58dfad608a 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -43,7 +43,7 @@ - [Modified `slash_validator`](#modified-slash_validator) - [Block processing](#block-processing) - [Modified `process_attestation`](#modified-process_attestation) - - [Modified `process_deposit`](#modified-process_deposit) + - [Modified `apply_deposit`](#modified-apply_deposit) - [Sync aggregate processing](#sync-aggregate-processing) - [Epoch processing](#epoch-processing) - [Justification and finalization](#justification-and-finalization) @@ -489,39 +489,29 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: increase_balance(state, get_beacon_proposer_index(state), proposer_reward) ``` -#### Modified `process_deposit` +#### Modified `apply_deposit` -*Note*: The function `process_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`. +*Note*: The function `apply_deposit` is modified to initialize `inactivity_scores`, `previous_epoch_participation`, and `current_epoch_participation`. ```python -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - # Verify the Merkle branch - assert is_valid_merkle_branch( - leaf=hash_tree_root(deposit.data), - branch=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in - index=state.eth1_deposit_index, - root=state.eth1_data.deposit_root, - ) - - # Deposits must be processed in order - state.eth1_deposit_index += 1 - - pubkey = deposit.data.pubkey - amount = deposit.data.amount +def apply_deposit(state: BeaconState, + pubkey: BLSPubkey, + withdrawal_credentials: Bytes32, + amount: uint64, + signature: BLSSignature) -> None: validator_pubkeys = [validator.pubkey for validator in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) # Initialize validator if the deposit signature is valid - if bls.Verify(pubkey, signing_root, deposit.data.signature): - state.validators.append(get_validator_from_deposit(deposit)) + if bls.Verify(pubkey, signing_root, signature): + state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) state.balances.append(amount) # [New in Altair] state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index 7e14fa951a..3794cd6be3 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -1835,13 +1835,12 @@ def process_attestation(state: BeaconState, attestation: Attestation) -> None: ##### Deposits ```python -def get_validator_from_deposit(deposit: Deposit) -> Validator: - amount = deposit.data.amount +def get_validator_from_deposit(pubkey: BLSPubkey, withdrawal_credentials: Bytes32, amount: uint64) -> Validator: effective_balance = min(amount - amount % EFFECTIVE_BALANCE_INCREMENT, MAX_EFFECTIVE_BALANCE) return Validator( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, activation_eligibility_epoch=FAR_FUTURE_EPOCH, activation_epoch=FAR_FUTURE_EPOCH, exit_epoch=FAR_FUTURE_EPOCH, @@ -1851,36 +1850,26 @@ def get_validator_from_deposit(deposit: Deposit) -> Validator: ``` ```python -def process_deposit(state: BeaconState, deposit: Deposit) -> None: - # Verify the Merkle branch - assert is_valid_merkle_branch( - leaf=hash_tree_root(deposit.data), - branch=deposit.proof, - depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in - index=state.eth1_deposit_index, - root=state.eth1_data.deposit_root, - ) - - # Deposits must be processed in order - state.eth1_deposit_index += 1 - - pubkey = deposit.data.pubkey - amount = deposit.data.amount +def apply_deposit(state: BeaconState, + pubkey: BLSPubkey, + withdrawal_credentials: Bytes32, + amount: uint64, + signature: BLSSignature) -> None: validator_pubkeys = [v.pubkey for v in state.validators] if pubkey not in validator_pubkeys: # Verify the deposit signature (proof of possession) which is not checked by the deposit contract deposit_message = DepositMessage( - pubkey=deposit.data.pubkey, - withdrawal_credentials=deposit.data.withdrawal_credentials, - amount=deposit.data.amount, + pubkey=pubkey, + withdrawal_credentials=withdrawal_credentials, + amount=amount, ) domain = compute_domain(DOMAIN_DEPOSIT) # Fork-agnostic domain since deposits are valid across forks signing_root = compute_signing_root(deposit_message, domain) - if not bls.Verify(pubkey, signing_root, deposit.data.signature): + if not bls.Verify(pubkey, signing_root, signature): return # Add validator and balance entries - state.validators.append(get_validator_from_deposit(deposit)) + state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) state.balances.append(amount) else: # Increase balance by deposit amount @@ -1888,6 +1877,29 @@ def process_deposit(state: BeaconState, deposit: Deposit) -> None: increase_balance(state, index, amount) ``` +```python +def process_deposit(state: BeaconState, deposit: Deposit) -> None: + # Verify the Merkle branch + assert is_valid_merkle_branch( + leaf=hash_tree_root(deposit.data), + branch=deposit.proof, + depth=DEPOSIT_CONTRACT_TREE_DEPTH + 1, # Add 1 for the List length mix-in + index=state.eth1_deposit_index, + root=state.eth1_data.deposit_root, + ) + + # Deposits must be processed in order + state.eth1_deposit_index += 1 + + apply_deposit( + state=state, + pubkey=deposit.data.pubkey, + withdrawal_credentials=deposit.data.withdrawal_credentials, + amount=deposit.data.amount, + signature=deposit.data.signature, + ) +``` + ##### Voluntary exits ```python From 00557c56492171dbaa41853e5c658b0789ff3206 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 2 Mar 2023 17:31:12 +0600 Subject: [PATCH 094/210] Remove unnecessary eth1_deposit_index bump --- specs/_features/eip6110/beacon-chain.md | 4 ---- 1 file changed, 4 deletions(-) diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 95655d32b3..70a72a5f45 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -221,10 +221,6 @@ def process_deposit_receipt(state: BeaconState, deposit_receipt: DepositReceipt) if state.deposit_receipts_start_index == UNSET_DEPOSIT_RECEIPTS_START_INDEX: state.deposit_receipts_start_index = deposit_receipt.index - # Signify the end of transition to in-protocol deposit logic - if state.eth1_deposit_index >= state.deposit_receipts_start_index: - state.eth1_deposit_index = deposit_receipt.index + 1 - apply_deposit( state=state, pubkey=deposit_receipt.pubkey, From 86fb82b221474cc89387fa6436806507b3849d88 Mon Sep 17 00:00:00 2001 From: dankrad Date: Thu, 2 Mar 2023 20:49:10 +0000 Subject: [PATCH 095/210] Test generators for kzg-4844 libraries (#3274) Arkworks integration and test generators for kzg-4844 libraries --- Makefile | 4 +- setup.py | 1 + specs/deneb/polynomial-commitments.md | 15 +- tests/core/pyspec/eth2spec/test/conftest.py | 11 +- tests/core/pyspec/eth2spec/utils/bls.py | 223 ++++++- tests/formats/kzg/README.md | 15 + tests/formats/kzg/blob_to_kzg_commitment.md | 21 + tests/formats/kzg/compute_blob_kzg_proof.md | 21 + tests/formats/kzg/compute_kzg_proof.md | 23 + tests/formats/kzg/verify_blob_kzg_proof.md | 23 + .../kzg/verify_blob_kzg_proof_batch.md | 23 + tests/formats/kzg/verify_kzg_proof.md | 25 + tests/generators/kzg_4844/README.md | 3 + tests/generators/kzg_4844/main.py | 579 ++++++++++++++++++ tests/generators/kzg_4844/requirements.txt | 2 + 15 files changed, 951 insertions(+), 38 deletions(-) create mode 100644 tests/formats/kzg/README.md create mode 100644 tests/formats/kzg/blob_to_kzg_commitment.md create mode 100644 tests/formats/kzg/compute_blob_kzg_proof.md create mode 100644 tests/formats/kzg/compute_kzg_proof.md create mode 100644 tests/formats/kzg/verify_blob_kzg_proof.md create mode 100644 tests/formats/kzg/verify_blob_kzg_proof_batch.md create mode 100644 tests/formats/kzg/verify_kzg_proof.md create mode 100644 tests/generators/kzg_4844/README.md create mode 100644 tests/generators/kzg_4844/main.py create mode 100644 tests/generators/kzg_4844/requirements.txt diff --git a/Makefile b/Makefile index 371a3ecf8a..cd18256e9d 100644 --- a/Makefile +++ b/Makefile @@ -117,10 +117,10 @@ citest: pyspec mkdir -p $(TEST_REPORT_DIR); ifdef fork . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec + python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --fork=$(fork) --junitxml=test-reports/test_results.xml eth2spec else . venv/bin/activate; cd $(PY_SPEC_DIR); \ - python3 -m pytest -n 16 --bls-type=milagro --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec + python3 -m pytest -n 16 --bls-type=fastest --preset=$(TEST_PRESET_TYPE) --junitxml=test-reports/test_results.xml eth2spec endif diff --git a/setup.py b/setup.py index 9c5488f126..cf030c5492 100644 --- a/setup.py +++ b/setup.py @@ -1174,5 +1174,6 @@ def run(self): RUAMEL_YAML_VERSION, "lru-dict==1.1.8", MARKO_VERSION, + "py_arkworks_bls12381==0.3.4", ] ) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 61e22e1820..7b65b44b62 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -273,7 +273,7 @@ def g1_lincomb(points: Sequence[KZGCommitment], scalars: Sequence[BLSFieldElemen BLS multiscalar multiplication. This function can be optimized using Pippenger's algorithm and variants. """ assert len(points) == len(scalars) - result = bls.Z1 + result = bls.Z1() for x, a in zip(points, scalars): result = bls.add(result, bls.multiply(bls.bytes48_to_G1(x), a)) return KZGCommitment(bls.G1_to_bytes48(result)) @@ -323,7 +323,7 @@ def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial, a = BLSFieldElement(int(polynomial[i]) * int(roots_of_unity_brp[i]) % BLS_MODULUS) b = BLSFieldElement((int(BLS_MODULUS) + int(z) - int(roots_of_unity_brp[i])) % BLS_MODULUS) result += int(div(a, b) % BLS_MODULUS) - result = result * int(pow(z, width, BLS_MODULUS) - 1) * int(inverse_width) + result = result * int(BLS_MODULUS + pow(z, width, BLS_MODULUS) - 1) * int(inverse_width) return BLSFieldElement(result % BLS_MODULUS) ``` @@ -371,10 +371,10 @@ def verify_kzg_proof_impl(commitment: KZGCommitment, Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``. """ # Verify: P - y = Q * (X - z) - X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2, BLS_MODULUS - z)) - P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y)) + X_minus_z = bls.add(bls.bytes96_to_G2(KZG_SETUP_G2[1]), bls.multiply(bls.G2(), (BLS_MODULUS - z) % BLS_MODULUS)) + P_minus_y = bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), (BLS_MODULUS - y) % BLS_MODULUS)) return bls.pairing_check([ - [P_minus_y, bls.neg(bls.G2)], + [P_minus_y, bls.neg(bls.G2())], [bls.bytes48_to_G1(proof), X_minus_z] ]) ``` @@ -415,14 +415,14 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment], proofs, [BLSFieldElement((int(z) * int(r_power)) % BLS_MODULUS) for z, r_power in zip(zs, r_powers)], ) - C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1, BLS_MODULUS - y)) + C_minus_ys = [bls.add(bls.bytes48_to_G1(commitment), bls.multiply(bls.G1(), (BLS_MODULUS - y) % BLS_MODULUS)) for commitment, y in zip(commitments, ys)] C_minus_y_as_KZGCommitments = [KZGCommitment(bls.G1_to_bytes48(x)) for x in C_minus_ys] C_minus_y_lincomb = g1_lincomb(C_minus_y_as_KZGCommitments, r_powers) return bls.pairing_check([ [bls.bytes48_to_G1(proof_lincomb), bls.neg(bls.bytes96_to_G2(KZG_SETUP_G2[1]))], - [bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2] + [bls.add(bls.bytes48_to_G1(C_minus_y_lincomb), bls.bytes48_to_G1(proof_z_lincomb)), bls.G2()] ]) ``` @@ -561,3 +561,4 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob], return verify_kzg_proof_batch(commitments, evaluation_challenges, ys, proofs) ``` + diff --git a/tests/core/pyspec/eth2spec/test/conftest.py b/tests/core/pyspec/eth2spec/test/conftest.py index a5f19e20cb..3026b48eb7 100644 --- a/tests/core/pyspec/eth2spec/test/conftest.py +++ b/tests/core/pyspec/eth2spec/test/conftest.py @@ -44,8 +44,11 @@ def pytest_addoption(parser): help="bls-default: make tests that are not dependent on BLS run without BLS" ) parser.addoption( - "--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro"], - help="bls-type: use 'pyecc' or 'milagro' implementation for BLS" + "--bls-type", action="store", type=str, default="py_ecc", choices=["py_ecc", "milagro", "arkworks", "fastest"], + help=( + "bls-type: use specified BLS implementation;" + "fastest: use milagro for signatures and arkworks for everything else (e.g. KZG)" + ) ) @@ -88,5 +91,9 @@ def bls_type(request): bls_utils.use_py_ecc() elif bls_type == "milagro": bls_utils.use_milagro() + elif bls_type == "arkworks": + bls_utils.use_arkworks() + elif bls_type == "fastest": + bls_utils.use_fastest() else: raise Exception(f"unrecognized bls type: {bls_type}") diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py index aa060f4f9a..7ea22be46d 100644 --- a/tests/core/pyspec/eth2spec/utils/bls.py +++ b/tests/core/pyspec/eth2spec/utils/bls.py @@ -1,28 +1,49 @@ from py_ecc.bls import G2ProofOfPossession as py_ecc_bls from py_ecc.bls.g2_primatives import signature_to_G2 as _signature_to_G2 from py_ecc.optimized_bls12_381 import ( # noqa: F401 - G1, - G2, - Z1, - Z2, - FQ, - add, - multiply, - neg, - pairing, - final_exponentiate, - FQ12 + G1 as py_ecc_G1, + G2 as py_ecc_G2, + Z1 as py_ecc_Z1, + add as py_ecc_add, + multiply as py_ecc_mul, + neg as py_ecc_neg, + pairing as py_ecc_pairing, + final_exponentiate as py_ecc_final_exponentiate, + FQ12 as py_ecc_GT, ) from py_ecc.bls.g2_primitives import ( # noqa: F401 - G1_to_pubkey as G1_to_bytes48, - pubkey_to_G1 as bytes48_to_G1, - G2_to_signature as G2_to_bytes96, - signature_to_G2 as bytes96_to_G2, + G1_to_pubkey as py_ecc_G1_to_bytes48, + pubkey_to_G1 as py_ecc_bytes48_to_G1, + G2_to_signature as py_ecc_G2_to_bytes96, + signature_to_G2 as py_ecc_bytes96_to_G2, +) +from py_arkworks_bls12381 import ( + G1Point as arkworks_G1, + G2Point as arkworks_G2, + Scalar as arkworks_Scalar, + GT as arkworks_GT, ) import milagro_bls_binding as milagro_bls # noqa: F401 for BLS switching option +import py_arkworks_bls12381 as arkworks_bls # noqa: F401 for BLS switching option + + +class fastest_bls: + G1 = arkworks_G1 + G2 = arkworks_G2 + Scalar = arkworks_Scalar + GT = arkworks_GT + _AggregatePKs = milagro_bls._AggregatePKs + Sign = milagro_bls.Sign + Verify = milagro_bls.Verify + Aggregate = milagro_bls.Aggregate + AggregateVerify = milagro_bls.AggregateVerify + FastAggregateVerify = milagro_bls.FastAggregateVerify + SkToPk = milagro_bls.SkToPk + + # Flag to make BLS active or not. Used for testing, do not ignore BLS in production unless you know what you are doing. bls_active = True @@ -43,6 +64,14 @@ def use_milagro(): bls = milagro_bls +def use_arkworks(): + """ + Shortcut to use Arkworks as BLS library + """ + global bls + bls = arkworks_bls + + def use_py_ecc(): """ Shortcut to use Py-ecc as BLS library @@ -51,6 +80,14 @@ def use_py_ecc(): bls = py_ecc_bls +def use_fastest(): + """ + Shortcut to use Milagro for signatures and Arkworks for other BLS operations + """ + global bls + bls = fastest_bls + + def only_with_bls(alt_return=None): """ Decorator factory to make a function only run when BLS is active. Otherwise return the default. @@ -68,7 +105,10 @@ def entry(*args, **kw): @only_with_bls(alt_return=True) def Verify(PK, message, signature): try: - result = bls.Verify(PK, message, signature) + if bls == arkworks_bls: # no signature API in arkworks + result = py_ecc_bls.Verify(PK, message, signature) + else: + result = bls.Verify(PK, message, signature) except Exception: result = False finally: @@ -78,7 +118,10 @@ def Verify(PK, message, signature): @only_with_bls(alt_return=True) def AggregateVerify(pubkeys, messages, signature): try: - result = bls.AggregateVerify(list(pubkeys), list(messages), signature) + if bls == arkworks_bls: # no signature API in arkworks + result = py_ecc_bls.AggregateVerify(list(pubkeys), list(messages), signature) + else: + result = bls.AggregateVerify(list(pubkeys), list(messages), signature) except Exception: result = False finally: @@ -88,7 +131,10 @@ def AggregateVerify(pubkeys, messages, signature): @only_with_bls(alt_return=True) def FastAggregateVerify(pubkeys, message, signature): try: - result = bls.FastAggregateVerify(list(pubkeys), message, signature) + if bls == arkworks_bls: # no signature API in arkworks + result = py_ecc_bls.FastAggregateVerify(list(pubkeys), message, signature) + else: + result = bls.FastAggregateVerify(list(pubkeys), message, signature) except Exception: result = False finally: @@ -97,12 +143,16 @@ def FastAggregateVerify(pubkeys, message, signature): @only_with_bls(alt_return=STUB_SIGNATURE) def Aggregate(signatures): + if bls == arkworks_bls: # no signature API in arkworks + return py_ecc_bls.Aggregate(signatures) return bls.Aggregate(signatures) @only_with_bls(alt_return=STUB_SIGNATURE) def Sign(SK, message): - if bls == py_ecc_bls: + if bls == arkworks_bls: # no signature API in arkworks + return py_ecc_bls.Sign(SK, message) + elif bls == py_ecc_bls: return bls.Sign(SK, message) else: return bls.Sign(SK.to_bytes(32, 'big'), message) @@ -121,24 +171,143 @@ def AggregatePKs(pubkeys): # milagro_bls._AggregatePKs checks KeyValidate internally pass + if bls == arkworks_bls: # no signature API in arkworks + return py_ecc_bls._AggregatePKs(list(pubkeys)) + return bls._AggregatePKs(list(pubkeys)) @only_with_bls(alt_return=STUB_SIGNATURE) def SkToPk(SK): - if bls == py_ecc_bls: - return bls.SkToPk(SK) + if bls == py_ecc_bls or bls == arkworks_bls: # no signature API in arkworks + return py_ecc_bls.SkToPk(SK) else: return bls.SkToPk(SK.to_bytes(32, 'big')) def pairing_check(values): - p_q_1, p_q_2 = values - final_exponentiation = final_exponentiate( - pairing(p_q_1[1], p_q_1[0], final_exponentiate=False) - * pairing(p_q_2[1], p_q_2[0], final_exponentiate=False) - ) - return final_exponentiation == FQ12.one() + if bls == arkworks_bls or bls == fastest_bls: + p_q_1, p_q_2 = values + g1s = [p_q_1[0], p_q_2[0]] + g2s = [p_q_1[1], p_q_2[1]] + return arkworks_GT.multi_pairing(g1s, g2s) == arkworks_GT.one() + else: + p_q_1, p_q_2 = values + final_exponentiation = py_ecc_final_exponentiate( + py_ecc_pairing(p_q_1[1], p_q_1[0], final_exponentiate=False) + * py_ecc_pairing(p_q_2[1], p_q_2[0], final_exponentiate=False) + ) + return final_exponentiation == py_ecc_GT.one() + + +def add(lhs, rhs): + """ + Performs point addition of `lhs` and `rhs`. + The points can either be in G1 or G2. + """ + if bls == arkworks_bls or bls == fastest_bls: + return lhs + rhs + return py_ecc_add(lhs, rhs) + + +def multiply(point, scalar): + """ + Performs Scalar multiplication between + `point` and `scalar`. + `point` can either be in G1 or G2 + """ + if bls == arkworks_bls or bls == fastest_bls: + int_as_bytes = scalar.to_bytes(32, 'little') + scalar = arkworks_Scalar.from_le_bytes(int_as_bytes) + return point * scalar + return py_ecc_mul(point, scalar) + + +def neg(point): + """ + Returns the point negation of `point` + `point` can either be in G1 or G2 + """ + if bls == arkworks_bls or bls == fastest_bls: + return -point + return py_ecc_neg(point) + + +def Z1(): + """ + Returns the identity point in G1 + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G1.identity() + return py_ecc_Z1 + + +def G1(): + """ + Returns the chosen generator point in G1 + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G1() + return py_ecc_G1 + + +def G2(): + """ + Returns the chosen generator point in G2 + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G2() + return py_ecc_G2 + + +def G1_to_bytes48(point): + """ + Serializes a point in G1. + Returns a bytearray of size 48 as + we use the compressed format + """ + if bls == arkworks_bls or bls == fastest_bls: + return bytes(point.to_compressed_bytes()) + return py_ecc_G1_to_bytes48(point) + + +def G2_to_bytes96(point): + """ + Serializes a point in G2. + Returns a bytearray of size 96 as + we use the compressed format + """ + if bls == arkworks_bls or bls == fastest_bls: + return bytes(point.to_compressed_bytes()) + return py_ecc_G2_to_bytes96(point) + + +def bytes48_to_G1(bytes48): + """ + Deserializes a purported compressed serialized + point in G1. + - No subgroup checks are performed + - If the bytearray is not a valid serialization + of a point in G1, then this method will raise + an exception + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G1.from_compressed_bytes_unchecked(bytes48) + return py_ecc_bytes48_to_G1(bytes48) + + +def bytes96_to_G2(bytes96): + """ + Deserializes a purported compressed serialized + point in G2. + - No subgroup checks are performed + - If the bytearray is not a valid serialization + of a point in G2, then this method will raise + an exception + """ + if bls == arkworks_bls or bls == fastest_bls: + return arkworks_G2.from_compressed_bytes_unchecked(bytes96) + return py_ecc_bytes96_to_G2(bytes96) @only_with_bls(alt_return=True) diff --git a/tests/formats/kzg/README.md b/tests/formats/kzg/README.md new file mode 100644 index 0000000000..b5bd720393 --- /dev/null +++ b/tests/formats/kzg/README.md @@ -0,0 +1,15 @@ +# KZG tests + +A test type for KZG libraries. Tests all the public interfaces that a KZG library required to implement EIP-4844 needs to provide, as defined in `polynomial-commitments.md`. + +We do not recommend rolling your own crypto or using an untested KZG library. + +The KZG test suite runner has the following handlers: + +- [`blob_to_kzg_commitment`](./blob_to_kzg_commitment.md) +- [`compute_kzg_proof`](./compute_kzg_proof.md) +- [`verify_kzg_proof`](./verify_kzg_proof.md) +- [`compute_blob_kzg_proof`](./compute_blob_kzg_proof.md) +- [`verify_blob_kzg_proof`](./verify_blob_kzg_proof.md) +- [`verify_blob_kzg_proof_batch`](./verify_blob_kzg_proof_batch.md) + diff --git a/tests/formats/kzg/blob_to_kzg_commitment.md b/tests/formats/kzg/blob_to_kzg_commitment.md new file mode 100644 index 0000000000..dbb1556a1d --- /dev/null +++ b/tests/formats/kzg/blob_to_kzg_commitment.md @@ -0,0 +1,21 @@ +# Test format: Blob to KZG commitment + +Compute the KZG commitment for a given `blob`. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: Blob -- the data blob +output: KZGCommitment -- The KZG commitment +``` + +- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `blob_to_kzg_commitment` handler should compute the KZG commitment for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/compute_blob_kzg_proof.md b/tests/formats/kzg/compute_blob_kzg_proof.md new file mode 100644 index 0000000000..512f60ecb3 --- /dev/null +++ b/tests/formats/kzg/compute_blob_kzg_proof.md @@ -0,0 +1,21 @@ +# Test format: Compute blob KZG proof + +Compute the blob KZG proof for a given `blob`, that helps with quickly verifying that the KZG commitment for the blob is correct. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: Blob -- the data blob +output: KZGProof -- The blob KZG proof +``` + +- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `compute_blob_kzg_proof` handler should compute the blob KZG proof for `blob`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/compute_kzg_proof.md b/tests/formats/kzg/compute_kzg_proof.md new file mode 100644 index 0000000000..bba13638f8 --- /dev/null +++ b/tests/formats/kzg/compute_kzg_proof.md @@ -0,0 +1,23 @@ +# Test format: Compute KZG proof + +Compute the KZG proof for a given `blob` and an evaluation point `z`. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: Blob -- the data blob representing a polynomial + z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated +output: KZGProof -- The KZG proof +``` + +- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. +- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `compute_kzg_proof` handler should compute the KZG proof for evaluating the polynomial represented by `blob` at `z`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or `z` is not a valid BLS field element, it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/verify_blob_kzg_proof.md b/tests/formats/kzg/verify_blob_kzg_proof.md new file mode 100644 index 0000000000..dd0bcda5a9 --- /dev/null +++ b/tests/formats/kzg/verify_blob_kzg_proof.md @@ -0,0 +1,23 @@ +# Test format: Verify blob KZG proof + +Use the blob KZG proof to verify that the KZG commitment for a given `blob` is correct + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: Blob -- the data blob + commitment: KZGCommitment -- the KZG commitment to the data blob + proof: KZGProof -- The KZG proof +output: bool -- true (valid proof) or false (incorrect proof) +``` + +- `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `verify_blob_kzg_proof` handler should verify that `commitment` is a correct KZG commitment to `blob` by using the blob KZG proof `proof`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or `blob` is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/verify_blob_kzg_proof_batch.md b/tests/formats/kzg/verify_blob_kzg_proof_batch.md new file mode 100644 index 0000000000..3bcc74d6bb --- /dev/null +++ b/tests/formats/kzg/verify_blob_kzg_proof_batch.md @@ -0,0 +1,23 @@ +# Test format: Verify blob KZG proof batch + +Use the blob KZG proofs to verify that the KZG commitments for given `blob`s are correct + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + blob: List[Blob] -- the data blob + commitment: List[KZGCommitment] -- the KZG commitment to the data blob + proof: List[KZGProof] -- The KZG proof +output: bool -- true (all proofs are valid) or false (some proofs incorrect) +``` + +- `blob`s here are encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `verify_blob_kzg_proof_batch` handler should verify that `commitments` are correct KZG commitments to `blobs` by using the blob KZG proofs `proofs`, and the result should match the expected `output`. If any of the commitments or proofs are invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or any blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element), it should error, i.e. the output should be `null`. diff --git a/tests/formats/kzg/verify_kzg_proof.md b/tests/formats/kzg/verify_kzg_proof.md new file mode 100644 index 0000000000..143466b66f --- /dev/null +++ b/tests/formats/kzg/verify_kzg_proof.md @@ -0,0 +1,25 @@ +# Test format: Verify KZG proof + +Verify the KZG proof for a given `blob` and an evaluation point `z` that claims to result in a value of `y`. + +## Test case format + +The test data is declared in a `data.yaml` file: + +```yaml +input: + commitment: KZGCommitment -- the KZG commitment to the data blob + z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated + y: Bytes32 -- the claimed result of the evaluation + proof: KZGProof -- The KZG proof +output: bool -- true (valid proof) or false (incorrect proof) +``` + +- `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. +- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. + +All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. + +## Condition + +The `verify_kzg_proof` handler should verify the KZG proof for evaluating the polynomial represented by `blob` at `z` resulting in the value `y`, and the result should match the expected `output`. If the commitment or proof is invalid (e.g. not on the curve or not in the G1 subgroup of the BLS curve) or `z` or `y` are not a valid BLS field element, it should error, i.e. the output should be `null`. diff --git a/tests/generators/kzg_4844/README.md b/tests/generators/kzg_4844/README.md new file mode 100644 index 0000000000..ab81a85e86 --- /dev/null +++ b/tests/generators/kzg_4844/README.md @@ -0,0 +1,3 @@ +# KZG 4844 Test Generator + +These tests are specific to the KZG API required for implementing EIP-4844 \ No newline at end of file diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py new file mode 100644 index 0000000000..616e2cc461 --- /dev/null +++ b/tests/generators/kzg_4844/main.py @@ -0,0 +1,579 @@ +""" +KZG 4844 test vectors generator +""" + +from hashlib import sha256 +from typing import Tuple, Iterable, Any, Callable, Dict + +from eth_utils import ( + encode_hex, + int_to_big_endian, +) + +from eth2spec.utils import bls +from eth2spec.test.helpers.constants import DENEB +from eth2spec.test.helpers.typing import SpecForkName +from eth2spec.gen_helpers.gen_base import gen_runner, gen_typing +from eth2spec.deneb import spec + + +def expect_exception(func, *args): + try: + func(*args) + except Exception: + pass + else: + raise Exception("should have raised exception") + + +def field_element_bytes(x): + return int.to_bytes(x % spec.BLS_MODULUS, 32, "little") + + +def encode_hex_list(a): + return [encode_hex(x) for x in a] + + +def bls_add_one(x): + """ + Adds "one" (actually bls.G1()) to a compressed group element. + Useful to compute definitely incorrect proofs. + """ + return bls.G1_to_bytes48( + bls.add(bls.bytes48_to_G1(x), bls.G1()) + ) + + +def evaluate_blob_at(blob, z): + return field_element_bytes( + spec.evaluate_polynomial_in_evaluation_form(spec.blob_to_polynomial(blob), spec.bytes_to_bls_field(z)) + ) + + +G1 = bls.G1_to_bytes48(bls.G1()) +P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcdef") +P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcde0") +BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.ENDIANNESS) + +BLOB_ALL_ZEROS = spec.Blob() +BLOB_RANDOM_VALID1 = spec.Blob(b''.join([field_element_bytes(pow(2, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) +BLOB_RANDOM_VALID2 = spec.Blob(b''.join([field_element_bytes(pow(3, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) +BLOB_RANDOM_VALID3 = spec.Blob(b''.join([field_element_bytes(pow(5, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) +BLOB_ALL_MODULUS_MINUS_ONE = spec.Blob(b''.join([field_element_bytes(spec.BLS_MODULUS - 1) for n in range(4096)])) +BLOB_ALMOST_ZERO = spec.Blob(b''.join([field_element_bytes(1 if n == 3211 else 0) for n in range(4096)])) +BLOB_INVALID = spec.Blob(b'\xFF' * 4096 * 32) +BLOB_INVALID_CLOSE = spec.Blob(b''.join( + [BLS_MODULUS_BYTES if n == 2111 else field_element_bytes(0) for n in range(4096)] +)) + +VALID_BLOBS = [BLOB_ALL_ZEROS, BLOB_RANDOM_VALID1, BLOB_RANDOM_VALID2, + BLOB_RANDOM_VALID3, BLOB_ALL_MODULUS_MINUS_ONE, BLOB_ALMOST_ZERO] +INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE] +VALID_ZS = [field_element_bytes(x) for x in [0, 1, 2, pow(5, 1235, spec.BLS_MODULUS), + spec.BLS_MODULUS - 1, spec.ROOTS_OF_UNITY[1]]] +INVALID_ZS = [x.to_bytes(32, spec.ENDIANNESS) for x in [spec.BLS_MODULUS, 2**256 - 1, 2**256 - 2**128]] + + +def hash(x): + return sha256(x).digest() + + +def int_to_hex(n: int, byte_length: int = None) -> str: + byte_value = int_to_big_endian(n) + if byte_length: + byte_value = byte_value.rjust(byte_length, b'\x00') + return encode_hex(byte_value) + + +def case01_blob_to_kzg_commitment(): + # Valid cases + for blob in VALID_BLOBS: + commitment = spec.blob_to_kzg_commitment(blob) + identifier = f'{encode_hex(hash(blob))}' + yield f'blob_to_kzg_commitment_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + }, + 'output': encode_hex(commitment) + } + + # Edge case: Invalid blobs + for blob in INVALID_BLOBS: + identifier = f'{encode_hex(hash(blob))}' + expect_exception(spec.blob_to_kzg_commitment, blob) + yield f'blob_to_kzg_commitment_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob) + }, + 'output': None + } + + +def case02_compute_kzg_proof(): + # Valid cases + for blob in VALID_BLOBS: + for z in VALID_ZS: + proof = spec.compute_kzg_proof(blob, z) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'z': encode_hex(z), + }, + 'output': encode_hex(proof) + } + + # Edge case: Invalid blobs + for blob in INVALID_BLOBS: + z = VALID_ZS[0] + expect_exception(spec.compute_kzg_proof, blob, z) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'compute_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'z': encode_hex(z), + }, + 'output': None + } + + # Edge case: Invalid z + for z in INVALID_ZS: + blob = VALID_BLOBS[4] + expect_exception(spec.compute_kzg_proof, blob, z) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'compute_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'z': encode_hex(z), + }, + 'output': None + } + + +def case03_verify_kzg_proof(): + # Valid cases + for blob in VALID_BLOBS: + for z in VALID_ZS: + proof = spec.compute_kzg_proof(blob, z) + commitment = spec.blob_to_kzg_commitment(blob) + y = evaluate_blob_at(blob, z) + assert spec.verify_kzg_proof(commitment, z, y, proof) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'verify_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': True + } + + # Incorrect proofs + for blob in VALID_BLOBS: + for z in VALID_ZS: + proof = bls_add_one(spec.compute_kzg_proof(blob, z)) + commitment = spec.blob_to_kzg_commitment(blob) + y = evaluate_blob_at(blob, z) + assert not spec.verify_kzg_proof(commitment, z, y, proof) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'verify_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': False + } + + # Edge case: Invalid z + for z in INVALID_ZS: + blob, validz = VALID_BLOBS[4], VALID_ZS[1] + proof = spec.compute_kzg_proof(blob, validz) + commitment = spec.blob_to_kzg_commitment(blob) + y = VALID_ZS[3] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + yield f'verify_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid y + blob, z = VALID_BLOBS[1], VALID_ZS[1] + proof = spec.compute_kzg_proof(blob, z) + commitment = spec.blob_to_kzg_commitment(blob) + y = INVALID_ZS[0] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_invalid_y', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, not in G1 + blob, z = VALID_BLOBS[2], VALID_ZS[0] + proof = P1_NOT_IN_G1 + commitment = spec.blob_to_kzg_commitment(blob) + y = VALID_ZS[1] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_proof_not_in_G1', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, not on curve + blob, z = VALID_BLOBS[3], VALID_ZS[1] + proof = P1_NOT_ON_CURVE + commitment = spec.blob_to_kzg_commitment(blob) + y = VALID_ZS[1] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_proof_not_on_curve', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, not in G1 + blob, z = VALID_BLOBS[4], VALID_ZS[3] + proof = spec.compute_kzg_proof(blob, z) + commitment = P1_NOT_IN_G1 + y = VALID_ZS[2] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_commitment_not_in_G1', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, not on curve + blob, z = VALID_BLOBS[1], VALID_ZS[4] + proof = spec.compute_kzg_proof(blob, z) + commitment = P1_NOT_ON_CURVE + y = VALID_ZS[3] + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_commitment_not_on_curve', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + +def case04_compute_blob_kzg_proof(): + # Valid cases + for blob in VALID_BLOBS: + proof = spec.compute_blob_kzg_proof(blob) + identifier = f'{encode_hex(hash(blob))}' + yield f'compute_blob_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + }, + 'output': encode_hex(proof) + } + + # Edge case: Invalid blob + for blob in INVALID_BLOBS: + expect_exception(spec.compute_blob_kzg_proof, blob) + identifier = f'{encode_hex(hash(blob))}' + yield f'compute_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + }, + 'output': None + } + + +def case05_verify_blob_kzg_proof(): + # Valid cases + for blob in VALID_BLOBS: + proof = spec.compute_blob_kzg_proof(blob) + commitment = spec.blob_to_kzg_commitment(blob) + assert spec.verify_blob_kzg_proof(blob, commitment, proof) + identifier = f'{encode_hex(hash(blob))}' + yield f'verify_blob_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': True + } + + # Incorrect proofs + for blob in VALID_BLOBS: + proof = bls_add_one(spec.compute_blob_kzg_proof(blob)) + commitment = spec.blob_to_kzg_commitment(blob) + assert not spec.verify_blob_kzg_proof(blob, commitment, proof) + identifier = f'{encode_hex(hash(blob))}' + yield f'verify_blob_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': False + } + + # Edge case: Invalid proof, not in G1 + blob = VALID_BLOBS[2] + proof = P1_NOT_IN_G1 + commitment = G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_proof_not_in_G1', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, not on curve + blob = VALID_BLOBS[1] + proof = P1_NOT_ON_CURVE + commitment = G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_proof_not_on_curve', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, not in G1 + blob = VALID_BLOBS[0] + proof = G1 + commitment = P1_NOT_IN_G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_commitment_not_in_G1', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, not on curve + blob = VALID_BLOBS[2] + proof = G1 + commitment = P1_NOT_ON_CURVE + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_commitment_not_on_curve', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid blob + for blob in INVALID_BLOBS: + proof = G1 + commitment = G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + identifier = f'{encode_hex(hash(blob))}' + yield f'verify_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + +def case06_verify_blob_kzg_proof_batch(): + # Valid cases + proofs = [] + commitments = [] + for blob in VALID_BLOBS: + proofs.append(spec.compute_blob_kzg_proof(blob)) + commitments.append(spec.blob_to_kzg_commitment(blob)) + + for i in range(len(proofs)): + assert spec.verify_blob_kzg_proof_batch(VALID_BLOBS[:i], commitments[:i], proofs[:i]) + identifier = f'{encode_hex(hash(b"".join(VALID_BLOBS[:i])))}' + yield f'verify_blob_kzg_proof_batch_case_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS[:i]), + 'commitments': encode_hex_list(commitments[:i]), + 'proofs': encode_hex_list(proofs[:i]), + }, + 'output': True + } + + # Incorrect proof + proofs_incorrect = [bls_add_one(proofs[0])] + proofs[1:] + assert not spec.verify_blob_kzg_proof_batch(VALID_BLOBS, commitments, proofs_incorrect) + yield 'verify_blob_kzg_proof_batch_case_invalid_proof', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_incorrect), + }, + 'output': False + } + + # Edge case: Invalid proof, not in G1 + proofs_invalid_notG1 = [P1_NOT_IN_G1] + proofs[1:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notG1) + yield 'verify_blob_kzg_proof_batch_case_proof_not_in_G1', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid_notG1), + }, + 'output': None + } + + # Edge case: Invalid proof, not on curve + proofs_invalid_notCurve = proofs[:1] + [P1_NOT_ON_CURVE] + proofs[2:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notCurve) + yield 'verify_blob_kzg_proof_batch_case_proof_not_on_curve', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid_notCurve), + }, + 'output': None + } + + # Edge case: Invalid commitment, not in G1 + commitments_invalid_notG1 = commitments[:2] + [P1_NOT_IN_G1] + commitments[3:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notG1) + yield 'verify_blob_kzg_proof_batch_case_commitment_not_in_G1', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments_invalid_notG1), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Invalid commitment, not on curve + commitments_invalid_notCurve = commitments[:3] + [P1_NOT_ON_CURVE] + commitments[4:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notCurve) + yield 'verify_blob_kzg_proof_batch_case_not_on_curve', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments_invalid_notCurve), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Invalid blob + blobs_invalid = VALID_BLOBS[:4] + [BLOB_INVALID] + VALID_BLOBS[5:] + expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs) + yield 'verify_blob_kzg_proof_batch_case_invalid_blob', { + 'input': { + 'blobs': encode_hex_list(blobs_invalid), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Blob length different + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS[:-1], commitments, proofs) + yield 'verify_blob_kzg_proof_batch_case_blob_length_different', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS[:-1]), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Commitment length different + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments[:-1], proofs) + yield 'verify_blob_kzg_proof_batch_case_commitment_length_different', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments[:-1]), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Proof length different + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs[:-1]) + yield 'verify_blob_kzg_proof_batch_case_proof_length_different', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs[:-1]), + }, + 'output': None + } + + +def create_provider(fork_name: SpecForkName, + handler_name: str, + test_case_fn: Callable[[], Iterable[Tuple[str, Dict[str, Any]]]]) -> gen_typing.TestProvider: + + def prepare_fn() -> None: + # Nothing to load / change in spec. Maybe in future forks. + # Put the tests into the general config category, to not require any particular configuration. + return + + def cases_fn() -> Iterable[gen_typing.TestCase]: + for data in test_case_fn(): + (case_name, case_content) = data + yield gen_typing.TestCase( + fork_name=fork_name, + preset_name='general', + runner_name='kzg', + handler_name=handler_name, + suite_name='small', + case_name=case_name, + case_fn=lambda: [('data', 'data', case_content)] + ) + + return gen_typing.TestProvider(prepare=prepare_fn, make_cases=cases_fn) + + +if __name__ == "__main__": + bls.use_arkworks() + gen_runner.run_generator("kzg", [ + # DENEB + create_provider(DENEB, 'blob_to_kzg_commitment', case01_blob_to_kzg_commitment), + create_provider(DENEB, 'compute_kzg_proof', case02_compute_kzg_proof), + create_provider(DENEB, 'verify_kzg_proof', case03_verify_kzg_proof), + create_provider(DENEB, 'compute_blob_kzg_proof', case04_compute_blob_kzg_proof), + create_provider(DENEB, 'verify_blob_kzg_proof', case05_verify_blob_kzg_proof), + create_provider(DENEB, 'verify_blob_kzg_proof_batch', case06_verify_blob_kzg_proof_batch), + ]) diff --git a/tests/generators/kzg_4844/requirements.txt b/tests/generators/kzg_4844/requirements.txt new file mode 100644 index 0000000000..1822486863 --- /dev/null +++ b/tests/generators/kzg_4844/requirements.txt @@ -0,0 +1,2 @@ +pytest>=4.4 +../../../[generator] From 1b4840c96704a1f1bcaa9d37df4d9c368ad2d047 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Sat, 4 Mar 2023 19:20:01 +0000 Subject: [PATCH 096/210] Fix comment for `evaluate_polynomial_in_evaluation_form` to reflect that it can now also be used in the domain --- specs/deneb/polynomial-commitments.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 7b65b44b62..1b9de42a3b 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -303,8 +303,10 @@ def compute_powers(x: BLSFieldElement, n: uint64) -> Sequence[BLSFieldElement]: def evaluate_polynomial_in_evaluation_form(polynomial: Polynomial, z: BLSFieldElement) -> BLSFieldElement: """ - Evaluate a polynomial (in evaluation form) at an arbitrary point ``z`` that is not in the domain. - Uses the barycentric formula: + Evaluate a polynomial (in evaluation form) at an arbitrary point ``z``. + - When ``z`` is in the domain, the evaluation can be found by indexing the polynomial at the + position that ``z`` is in the domain. + - When ``z`` is not in the domain, the barycentric formula is used: f(z) = (z**WIDTH - 1) / WIDTH * sum_(i=0)^WIDTH (f(DOMAIN[i]) * DOMAIN[i]) / (z - DOMAIN[i]) """ width = len(polynomial) From ca8a51fcf9b6b14135126d82e687010b0b4a57d5 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Mon, 6 Mar 2023 22:56:17 +0000 Subject: [PATCH 097/210] More CI tests for polynomial commitments --- .../test_polynomial_commitments.py | 98 +++++++++++++++++++ 1 file changed, 98 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 67dce5c5b3..b4904580ba 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -9,6 +9,24 @@ get_poly_in_both_forms, eval_poly_in_coeff_form, ) +from eth2spec.utils import bls + + +BLS_MODULUS = bls.curve_order + + +def bls_add_one(x): + """ + Adds "one" (actually bls.G1()) to a compressed group element. + Useful to compute definitely incorrect proofs. + """ + return bls.G1_to_bytes48( + bls.add(bls.bytes48_to_G1(x), bls.G1()) + ) + + +def field_element_bytes(x): + return int.to_bytes(x % BLS_MODULUS, 32, "little") @with_deneb_and_later @@ -18,12 +36,53 @@ def test_verify_kzg_proof(spec, state): blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) polynomial = spec.blob_to_polynomial(blob) + proof = spec.compute_kzg_proof(blob, field_element_bytes(x)) + + y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) + assert spec.verify_kzg_proof(commitment, field_element_bytes(x), field_element_bytes(y), proof) + + +@with_deneb_and_later +@spec_state_test +def test_verify_kzg_proof_incorrect_proof(spec, state): + x = 3465 + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + polynomial = spec.blob_to_polynomial(blob) + proof = spec.compute_kzg_proof(blob, field_element_bytes(x)) + proof = bls_add_one(proof) + + y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) + assert not spec.verify_kzg_proof(commitment, field_element_bytes(x), field_element_bytes(y), proof) + + +@with_deneb_and_later +@spec_state_test +def test_verify_kzg_proof_impl(spec, state): + x = spec.BLS_MODULUS - 1 + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + polynomial = spec.blob_to_polynomial(blob) proof = spec.compute_kzg_proof_impl(polynomial, x) y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) assert spec.verify_kzg_proof_impl(commitment, x, y, proof) +@with_deneb_and_later +@spec_state_test +def test_verify_kzg_proof_impl_incorrect_proof(spec, state): + x = 324561 + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + polynomial = spec.blob_to_polynomial(blob) + proof = spec.compute_kzg_proof_impl(polynomial, x) + proof = bls_add_one(proof) + + y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) + assert not spec.verify_kzg_proof_impl(commitment, x, y, proof) + + @with_deneb_and_later @spec_state_test def test_barycentric_outside_domain(spec, state): @@ -107,3 +166,42 @@ def test_compute_kzg_proof_within_domain(spec, state): y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z) assert spec.verify_kzg_proof_impl(commitment, z, y, proof) + + +@with_deneb_and_later +@spec_state_test +def test_verify_blob_kzg_proof(spec, state): + """ + Create and verify KZG proof that p(z) == y + where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + """ + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob) + + assert spec.verify_blob_kzg_proof(blob, commitment, proof) + + +@with_deneb_and_later +@spec_state_test +def test_verify_blob_kzg_proof_incorrect_proof(spec, state): + """ + Create and verify KZG proof that p(z) == y + where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + """ + blob = get_sample_blob(spec) + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob) + proof = bls_add_one(proof) + + assert not spec.verify_blob_kzg_proof(blob, commitment, proof) + + +@with_deneb_and_later +@spec_state_test +def test_validate_kzg_g1(spec, state): + """ + Verify that `validate_kzg_g1` allows the neutral element in G1 + """ + + spec.validate_kzg_g1(bls.G1_to_bytes48(bls.Z1())) From 661cca59c065af32cd6ede1ef673be6093ed26e1 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 7 Mar 2023 16:52:09 +0800 Subject: [PATCH 098/210] Import `curve_order as BLS_MODULUS` --- .../polynomial_commitments/test_polynomial_commitments.py | 2 +- tests/core/pyspec/eth2spec/utils/bls.py | 1 + 2 files changed, 2 insertions(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index b4904580ba..9b60f7e7c4 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -12,7 +12,7 @@ from eth2spec.utils import bls -BLS_MODULUS = bls.curve_order +BLS_MODULUS = bls.BLS_MODULUS def bls_add_one(x): diff --git a/tests/core/pyspec/eth2spec/utils/bls.py b/tests/core/pyspec/eth2spec/utils/bls.py index 7ea22be46d..7dd9597ebe 100644 --- a/tests/core/pyspec/eth2spec/utils/bls.py +++ b/tests/core/pyspec/eth2spec/utils/bls.py @@ -12,6 +12,7 @@ FQ12 as py_ecc_GT, ) from py_ecc.bls.g2_primitives import ( # noqa: F401 + curve_order as BLS_MODULUS, G1_to_pubkey as py_ecc_G1_to_bytes48, pubkey_to_G1 as py_ecc_bytes48_to_G1, G2_to_signature as py_ecc_G2_to_bytes96, From 81ab7de44a6815f388f4af0292ce9e9f11544197 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 7 Mar 2023 10:54:17 +0000 Subject: [PATCH 099/210] Add unit tests for validate_kzg_g1 and bytes_to_bls_field --- .../test_polynomial_commitments.py | 101 ++++++++++++++++-- 1 file changed, 94 insertions(+), 7 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 9b60f7e7c4..a13ce68518 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -3,6 +3,7 @@ from eth2spec.test.context import ( spec_state_test, with_deneb_and_later, + expect_assertion_error ) from eth2spec.test.helpers.sharding import ( get_sample_blob, @@ -10,9 +11,13 @@ eval_poly_in_coeff_form, ) from eth2spec.utils import bls +from eth2spec.utils.bls import BLS_MODULUS - -BLS_MODULUS = bls.BLS_MODULUS +G1 = bls.G1_to_bytes48(bls.G1()) +P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcdef") +P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcde0") def bls_add_one(x): @@ -32,6 +37,9 @@ def field_element_bytes(x): @with_deneb_and_later @spec_state_test def test_verify_kzg_proof(spec, state): + """ + Test the wrapper functions (taking bytes arguments) for computing and verifying KZG proofs. + """ x = 3 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -45,6 +53,9 @@ def test_verify_kzg_proof(spec, state): @with_deneb_and_later @spec_state_test def test_verify_kzg_proof_incorrect_proof(spec, state): + """ + Test the wrapper function `verify_kzg_proof` fails on an incorrect proof. + """ x = 3465 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -59,6 +70,9 @@ def test_verify_kzg_proof_incorrect_proof(spec, state): @with_deneb_and_later @spec_state_test def test_verify_kzg_proof_impl(spec, state): + """ + Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs. + """ x = spec.BLS_MODULUS - 1 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -72,6 +86,9 @@ def test_verify_kzg_proof_impl(spec, state): @with_deneb_and_later @spec_state_test def test_verify_kzg_proof_impl_incorrect_proof(spec, state): + """ + Test the implementation function `verify_kzg_proof` fails on an incorrect proof. + """ x = 324561 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -172,8 +189,7 @@ def test_compute_kzg_proof_within_domain(spec, state): @spec_state_test def test_verify_blob_kzg_proof(spec, state): """ - Create and verify KZG proof that p(z) == y - where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + Test the functions to compute and verify a blob KZG proof """ blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -186,8 +202,7 @@ def test_verify_blob_kzg_proof(spec, state): @spec_state_test def test_verify_blob_kzg_proof_incorrect_proof(spec, state): """ - Create and verify KZG proof that p(z) == y - where z is in the domain of our KZG scheme (i.e. a relevant root of unity). + Check that `verify_blob_kzg_proof` fails on an incorrect proof """ blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) @@ -199,9 +214,81 @@ def test_verify_blob_kzg_proof_incorrect_proof(spec, state): @with_deneb_and_later @spec_state_test -def test_validate_kzg_g1(spec, state): +def test_validate_kzg_g1_generator(spec, state): + """ + Verify that `validate_kzg_g1` allows the generator G1 + """ + + spec.validate_kzg_g1(bls.G1_to_bytes48(bls.G1())) + + +@with_deneb_and_later +@spec_state_test +def test_validate_kzg_g1_neutral_element(spec, state): """ Verify that `validate_kzg_g1` allows the neutral element in G1 """ spec.validate_kzg_g1(bls.G1_to_bytes48(bls.Z1())) + + +@with_deneb_and_later +@spec_state_test +def test_validate_kzg_g1_not_in_g1(spec, state): + """ + Verify that `validate_kzg_g1` fails on point not in G1 + """ + + expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_IN_G1)) + + +@with_deneb_and_later +@spec_state_test +def test_validate_kzg_g1_not_on_curve(spec, state): + """ + Verify that `validate_kzg_g1` fails on point not in G1 + """ + + expect_assertion_error(lambda: spec.validate_kzg_g1(P1_NOT_ON_CURVE)) + + +@with_deneb_and_later +@spec_state_test +def test_bytes_to_bls_field_zero(spec, state): + """ + Verify that `bytes_to_bls_field` handles zero + """ + + spec.bytes_to_bls_field(b"\0" * 32) + + +@with_deneb_and_later +@spec_state_test +def test_bytes_to_bls_field_modulus_minus_one(spec, state): + """ + Verify that `bytes_to_bls_field` handles modulus minus one + """ + + spec.bytes_to_bls_field((BLS_MODULUS - 1).to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS)) + + +@with_deneb_and_later +@spec_state_test +def test_bytes_to_bls_field_modulus(spec, state): + """ + Verify that `bytes_to_bls_field` fails on BLS modulus + """ + + expect_assertion_error(lambda: spec.bytes_to_bls_field( + BLS_MODULUS.to_bytes(spec.BYTES_PER_FIELD_ELEMENT, spec.ENDIANNESS) + )) + + +@with_deneb_and_later +@spec_state_test +def test_bytes_to_bls_field_max(spec, state): + """ + Verify that `bytes_to_bls_field` fails on 2**256 - 1 + """ + + expect_assertion_error(lambda: spec.bytes_to_bls_field(b"\xFF" * 32)) From cce82b4938c6f05fc137e07739f5fdcca6ce71c7 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 7 Mar 2023 10:56:16 +0000 Subject: [PATCH 100/210] Remove spec. for getting BLS_MODULUS --- .../polynomial_commitments/test_polynomial_commitments.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index a13ce68518..91115a299a 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -73,7 +73,7 @@ def test_verify_kzg_proof_impl(spec, state): """ Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs. """ - x = spec.BLS_MODULUS - 1 + x = BLS_MODULUS - 1 blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) polynomial = spec.blob_to_polynomial(blob) @@ -119,9 +119,9 @@ def test_barycentric_outside_domain(spec, state): for _ in range(n_samples): # Get a random evaluation point and make sure it's not a root of unity - z = rng.randint(0, spec.BLS_MODULUS - 1) + z = rng.randint(0, BLS_MODULUS - 1) while z in roots_of_unity_brp: - z = rng.randint(0, spec.BLS_MODULUS - 1) + z = rng.randint(0, BLS_MODULUS - 1) # Get p(z) by evaluating poly in coefficient form p_z_coeff = eval_poly_in_coeff_form(spec, poly_coeff, z) From 15033d28b9f0f3a05a26de6ec093d55a7860a305 Mon Sep 17 00:00:00 2001 From: dankrad Date: Tue, 7 Mar 2023 17:50:56 +0000 Subject: [PATCH 101/210] Modify compute_[blob_]kzg_proof to remove superfluous computations (#3280) Add parameter `commitment` to `compute_blob_kzg_proof` and output `y` to `compute_kzg_proof` --- specs/deneb/polynomial-commitments.md | 17 ++--- specs/deneb/validator.md | 2 +- .../test_polynomial_commitments.py | 6 +- tests/formats/kzg/compute_blob_kzg_proof.md | 2 + tests/formats/kzg/compute_kzg_proof.md | 5 +- tests/generators/kzg_4844/main.py | 62 +++++++++++++------ 6 files changed, 62 insertions(+), 32 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 1b9de42a3b..24ae08822b 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -431,14 +431,15 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment], #### `compute_kzg_proof` ```python -def compute_kzg_proof(blob: Blob, z: Bytes32) -> KZGProof: +def compute_kzg_proof(blob: Blob, z: Bytes32) -> Tuple[KZGProof, Bytes32]: """ Compute KZG proof at point `z` for the polynomial represented by `blob`. Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z). Public method. """ polynomial = blob_to_polynomial(blob) - return compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z)) + proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z)) + return proof, y.to_bytes(BYTES_PER_FIELD_ELEMENT, ENDIANNESS) ``` #### `compute_quotient_eval_within_domain` @@ -472,7 +473,7 @@ def compute_quotient_eval_within_domain(z: BLSFieldElement, #### `compute_kzg_proof_impl` ```python -def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGProof: +def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> Tuple[KZGProof, BLSFieldElement]: """ Helper function for `compute_kzg_proof()` and `compute_blob_kzg_proof()`. """ @@ -496,21 +497,23 @@ def compute_kzg_proof_impl(polynomial: Polynomial, z: BLSFieldElement) -> KZGPro # Compute: q(x_i) = (p(x_i) - p(z)) / (x_i - z). quotient_polynomial[i] = div(a, b) - return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial)) + return KZGProof(g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), quotient_polynomial)), y ``` #### `compute_blob_kzg_proof` ```python -def compute_blob_kzg_proof(blob: Blob) -> KZGProof: +def compute_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48) -> KZGProof: """ Given a blob, return the KZG proof that is used to verify it against the commitment. + This method does not verify that the commitment is correct with respect to `blob`. Public method. """ - commitment = blob_to_kzg_commitment(blob) + commitment = bytes_to_kzg_commitment(commitment_bytes) polynomial = blob_to_polynomial(blob) evaluation_challenge = compute_challenge(blob, commitment) - return compute_kzg_proof_impl(polynomial, evaluation_challenge) + proof, _ = compute_kzg_proof_impl(polynomial, evaluation_challenge) + return proof ``` #### `verify_blob_kzg_proof` diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 77edb957f8..50ab832e00 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -96,7 +96,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo block_parent_root=block.parent_root, blob=blob, kzg_commitment=block.body.blob_kzg_commitments[index], - kzg_proof=compute_blob_kzg_proof(blob), + kzg_proof=compute_blob_kzg_proof(blob, block.body.blob_kzg_commitments[index]), ) for index, blob in enumerate(blobs) ] diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 67dce5c5b3..e1e67d639a 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -18,9 +18,8 @@ def test_verify_kzg_proof(spec, state): blob = get_sample_blob(spec) commitment = spec.blob_to_kzg_commitment(blob) polynomial = spec.blob_to_polynomial(blob) - proof = spec.compute_kzg_proof_impl(polynomial, x) + proof, y = spec.compute_kzg_proof_impl(polynomial, x) - y = spec.evaluate_polynomial_in_evaluation_form(polynomial, x) assert spec.verify_kzg_proof_impl(commitment, x, y, proof) @@ -103,7 +102,6 @@ def test_compute_kzg_proof_within_domain(spec, state): roots_of_unity_brp = spec.bit_reversal_permutation(spec.ROOTS_OF_UNITY) for i, z in enumerate(roots_of_unity_brp): - proof = spec.compute_kzg_proof_impl(polynomial, z) + proof, y = spec.compute_kzg_proof_impl(polynomial, z) - y = spec.evaluate_polynomial_in_evaluation_form(polynomial, z) assert spec.verify_kzg_proof_impl(commitment, z, y, proof) diff --git a/tests/formats/kzg/compute_blob_kzg_proof.md b/tests/formats/kzg/compute_blob_kzg_proof.md index 512f60ecb3..62fce37231 100644 --- a/tests/formats/kzg/compute_blob_kzg_proof.md +++ b/tests/formats/kzg/compute_blob_kzg_proof.md @@ -9,10 +9,12 @@ The test data is declared in a `data.yaml` file: ```yaml input: blob: Blob -- the data blob + commitment: Bytes48 -- the commitment to the blob output: KZGProof -- The blob KZG proof ``` - `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. +- `commitment` here is encoded as a string: hexadecimal encoding of `48` bytes, prefixed with `0x`. All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. diff --git a/tests/formats/kzg/compute_kzg_proof.md b/tests/formats/kzg/compute_kzg_proof.md index bba13638f8..0713d50d81 100644 --- a/tests/formats/kzg/compute_kzg_proof.md +++ b/tests/formats/kzg/compute_kzg_proof.md @@ -10,14 +10,15 @@ The test data is declared in a `data.yaml` file: input: blob: Blob -- the data blob representing a polynomial z: Bytes32 -- bytes encoding the BLS field element at which the polynomial should be evaluated -output: KZGProof -- The KZG proof +output: Tuple[KZGProof, Bytes32] -- The KZG proof and the value y = f(z) ``` - `blob` here is encoded as a string: hexadecimal encoding of `4096 * 32 = 131072` bytes, prefixed with `0x`. - `z` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. +- `y` here is encoded as a string: hexadecimal encoding of `32` bytes representing a little endian encoded field element, prefixed with `0x`. All byte(s) fields are encoded as strings, hexadecimal encoding, prefixed with `0x`. ## Condition -The `compute_kzg_proof` handler should compute the KZG proof for evaluating the polynomial represented by `blob` at `z`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or `z` is not a valid BLS field element, it should error, i.e. the output should be `null`. +The `compute_kzg_proof` handler should compute the KZG proof as well as the value `y` for evaluating the polynomial represented by `blob` at `z`, and the result should match the expected `output`. If the blob is invalid (e.g. incorrect length or one of the 32-byte blocks does not represent a BLS field element) or `z` is not a valid BLS field element, it should error, i.e. the output should be `null`. diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py index 616e2cc461..65f6405e52 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/kzg_4844/main.py @@ -115,14 +115,14 @@ def case02_compute_kzg_proof(): # Valid cases for blob in VALID_BLOBS: for z in VALID_ZS: - proof = spec.compute_kzg_proof(blob, z) + proof, y = spec.compute_kzg_proof(blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), 'z': encode_hex(z), }, - 'output': encode_hex(proof) + 'output': (encode_hex(proof), encode_hex(y)) } # Edge case: Invalid blobs @@ -156,9 +156,8 @@ def case03_verify_kzg_proof(): # Valid cases for blob in VALID_BLOBS: for z in VALID_ZS: - proof = spec.compute_kzg_proof(blob, z) + proof, y = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) - y = evaluate_blob_at(blob, z) assert spec.verify_kzg_proof(commitment, z, y, proof) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'verify_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -174,9 +173,9 @@ def case03_verify_kzg_proof(): # Incorrect proofs for blob in VALID_BLOBS: for z in VALID_ZS: - proof = bls_add_one(spec.compute_kzg_proof(blob, z)) + proof_orig, y = spec.compute_kzg_proof(blob, z) + proof = bls_add_one(proof_orig) commitment = spec.blob_to_kzg_commitment(blob) - y = evaluate_blob_at(blob, z) assert not spec.verify_kzg_proof(commitment, z, y, proof) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'verify_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -192,9 +191,8 @@ def case03_verify_kzg_proof(): # Edge case: Invalid z for z in INVALID_ZS: blob, validz = VALID_BLOBS[4], VALID_ZS[1] - proof = spec.compute_kzg_proof(blob, validz) + proof, y = spec.compute_kzg_proof(blob, validz) commitment = spec.blob_to_kzg_commitment(blob) - y = VALID_ZS[3] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'verify_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -209,7 +207,7 @@ def case03_verify_kzg_proof(): # Edge case: Invalid y blob, z = VALID_BLOBS[1], VALID_ZS[1] - proof = spec.compute_kzg_proof(blob, z) + proof, _ = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) y = INVALID_ZS[0] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) @@ -257,9 +255,8 @@ def case03_verify_kzg_proof(): # Edge case: Invalid commitment, not in G1 blob, z = VALID_BLOBS[4], VALID_ZS[3] - proof = spec.compute_kzg_proof(blob, z) + proof, y = spec.compute_kzg_proof(blob, z) commitment = P1_NOT_IN_G1 - y = VALID_ZS[2] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) yield 'verify_kzg_proof_case_commitment_not_in_G1', { 'input': { @@ -273,9 +270,8 @@ def case03_verify_kzg_proof(): # Edge case: Invalid commitment, not on curve blob, z = VALID_BLOBS[1], VALID_ZS[4] - proof = spec.compute_kzg_proof(blob, z) + proof, y = spec.compute_kzg_proof(blob, z) commitment = P1_NOT_ON_CURVE - y = VALID_ZS[3] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) yield 'verify_kzg_proof_case_commitment_not_on_curve', { 'input': { @@ -291,32 +287,62 @@ def case03_verify_kzg_proof(): def case04_compute_blob_kzg_proof(): # Valid cases for blob in VALID_BLOBS: - proof = spec.compute_blob_kzg_proof(blob) + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) identifier = f'{encode_hex(hash(blob))}' yield f'compute_blob_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), }, 'output': encode_hex(proof) } # Edge case: Invalid blob for blob in INVALID_BLOBS: - expect_exception(spec.compute_blob_kzg_proof, blob) + commitment = G1 + expect_exception(spec.compute_blob_kzg_proof, blob, commitment) identifier = f'{encode_hex(hash(blob))}' yield f'compute_blob_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), }, 'output': None } + # Edge case: Invalid commitment, not in G1 + commitment = P1_NOT_IN_G1 + blob = VALID_BLOBS[1] + expect_exception(spec.compute_blob_kzg_proof, blob, commitment) + identifier = f'{encode_hex(hash(blob))}' + yield 'compute_blob_kzg_proof_case_invalid_commitment_not_in_G1', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + }, + 'output': None + } + + # Edge case: Invalid commitment, not on curve + commitment = P1_NOT_ON_CURVE + blob = VALID_BLOBS[1] + expect_exception(spec.compute_blob_kzg_proof, blob, commitment) + identifier = f'{encode_hex(hash(blob))}' + yield 'compute_blob_kzg_proof_case_invalid_commitment_not_on_curve', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + }, + 'output': None + } + def case05_verify_blob_kzg_proof(): # Valid cases for blob in VALID_BLOBS: - proof = spec.compute_blob_kzg_proof(blob) commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) assert spec.verify_blob_kzg_proof(blob, commitment, proof) identifier = f'{encode_hex(hash(blob))}' yield f'verify_blob_kzg_proof_case_correct_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -330,8 +356,8 @@ def case05_verify_blob_kzg_proof(): # Incorrect proofs for blob in VALID_BLOBS: - proof = bls_add_one(spec.compute_blob_kzg_proof(blob)) commitment = spec.blob_to_kzg_commitment(blob) + proof = bls_add_one(spec.compute_blob_kzg_proof(blob, commitment)) assert not spec.verify_blob_kzg_proof(blob, commitment, proof) identifier = f'{encode_hex(hash(blob))}' yield f'verify_blob_kzg_proof_case_incorrect_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -420,8 +446,8 @@ def case06_verify_blob_kzg_proof_batch(): proofs = [] commitments = [] for blob in VALID_BLOBS: - proofs.append(spec.compute_blob_kzg_proof(blob)) commitments.append(spec.blob_to_kzg_commitment(blob)) + proofs.append(spec.compute_blob_kzg_proof(blob, commitments[-1])) for i in range(len(proofs)): assert spec.verify_blob_kzg_proof_batch(VALID_BLOBS[:i], commitments[:i], proofs[:i]) From ccfe576dcc466a129da19d13f0418700af6515ab Mon Sep 17 00:00:00 2001 From: Justin Traglia <95511699+jtraglia@users.noreply.github.com> Date: Tue, 7 Mar 2023 14:56:55 -0700 Subject: [PATCH 102/210] Add KZG tests for input length inputs (#3282) --- specs/deneb/polynomial-commitments.md | 32 +++- tests/generators/kzg_4844/main.py | 264 ++++++++++++++++++++++---- 2 files changed, 256 insertions(+), 40 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 24ae08822b..edf2062b2a 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -72,7 +72,10 @@ Public functions MUST accept raw bytes as input and perform the required cryptog | Name | Value | Notes | | - | - | - | | `BLS_MODULUS` | `52435875175126190479447740508185965837690552500527637822603658699938581184513` | Scalar field modulus of BLS12-381 | +| `BYTES_PER_COMMITMENT` | `uint64(48)` | The number of bytes in a KZG commitment | +| `BYTES_PER_PROOF` | `uint64(48)` | The number of bytes in a KZG proof | | `BYTES_PER_FIELD_ELEMENT` | `uint64(32)` | Bytes used to encode a BLS scalar field element | +| `BYTES_PER_BLOB` | `uint64(BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB)` | The number of bytes in a blob | | `G1_POINT_AT_INFINITY` | `Bytes48(b'\xc0' + b'\x00' * 47)` | Serialized form of the point at infinity on the G1 group | @@ -340,6 +343,7 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment: """ Public method. """ + assert len(blob) == BYTES_PER_BLOB return g1_lincomb(bit_reversal_permutation(KZG_SETUP_LAGRANGE), blob_to_polynomial(blob)) ``` @@ -347,17 +351,22 @@ def blob_to_kzg_commitment(blob: Blob) -> KZGCommitment: ```python def verify_kzg_proof(commitment_bytes: Bytes48, - z: Bytes32, - y: Bytes32, + z_bytes: Bytes32, + y_bytes: Bytes32, proof_bytes: Bytes48) -> bool: """ Verify KZG proof that ``p(z) == y`` where ``p(z)`` is the polynomial represented by ``polynomial_kzg``. Receives inputs as bytes. Public method. """ + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT + assert len(y_bytes) == BYTES_PER_FIELD_ELEMENT + assert len(proof_bytes) == BYTES_PER_PROOF + return verify_kzg_proof_impl(bytes_to_kzg_commitment(commitment_bytes), - bytes_to_bls_field(z), - bytes_to_bls_field(y), + bytes_to_bls_field(z_bytes), + bytes_to_bls_field(y_bytes), bytes_to_kzg_proof(proof_bytes)) ``` @@ -431,14 +440,16 @@ def verify_kzg_proof_batch(commitments: Sequence[KZGCommitment], #### `compute_kzg_proof` ```python -def compute_kzg_proof(blob: Blob, z: Bytes32) -> Tuple[KZGProof, Bytes32]: +def compute_kzg_proof(blob: Blob, z_bytes: Bytes32) -> Tuple[KZGProof, Bytes32]: """ Compute KZG proof at point `z` for the polynomial represented by `blob`. Do this by computing the quotient polynomial in evaluation form: q(x) = (p(x) - p(z)) / (x - z). Public method. """ + assert len(blob) == BYTES_PER_BLOB + assert len(z_bytes) == BYTES_PER_FIELD_ELEMENT polynomial = blob_to_polynomial(blob) - proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z)) + proof, y = compute_kzg_proof_impl(polynomial, bytes_to_bls_field(z_bytes)) return proof, y.to_bytes(BYTES_PER_FIELD_ELEMENT, ENDIANNESS) ``` @@ -509,6 +520,8 @@ def compute_blob_kzg_proof(blob: Blob, commitment_bytes: Bytes48) -> KZGProof: This method does not verify that the commitment is correct with respect to `blob`. Public method. """ + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT commitment = bytes_to_kzg_commitment(commitment_bytes) polynomial = blob_to_polynomial(blob) evaluation_challenge = compute_challenge(blob, commitment) @@ -527,6 +540,10 @@ def verify_blob_kzg_proof(blob: Blob, Public method. """ + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(proof_bytes) == BYTES_PER_PROOF + commitment = bytes_to_kzg_commitment(commitment_bytes) polynomial = blob_to_polynomial(blob) @@ -556,6 +573,9 @@ def verify_blob_kzg_proof_batch(blobs: Sequence[Blob], commitments, evaluation_challenges, ys, proofs = [], [], [], [] for blob, commitment_bytes, proof_bytes in zip(blobs, commitments_bytes, proofs_bytes): + assert len(blob) == BYTES_PER_BLOB + assert len(commitment_bytes) == BYTES_PER_COMMITMENT + assert len(proof_bytes) == BYTES_PER_PROOF commitment = bytes_to_kzg_commitment(commitment_bytes) commitments.append(commitment) polynomial = blob_to_polynomial(blob) diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py index 65f6405e52..699d1f369a 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/kzg_4844/main.py @@ -27,7 +27,11 @@ def expect_exception(func, *args): def field_element_bytes(x): - return int.to_bytes(x % spec.BLS_MODULUS, 32, "little") + return int.to_bytes(x % spec.BLS_MODULUS, 32, spec.ENDIANNESS) + + +def field_element_bytes_unchecked(x): + return int.to_bytes(x, 32, spec.ENDIANNESS) def encode_hex_list(a): @@ -67,13 +71,30 @@ def evaluate_blob_at(blob, z): BLOB_INVALID_CLOSE = spec.Blob(b''.join( [BLS_MODULUS_BYTES if n == 2111 else field_element_bytes(0) for n in range(4096)] )) +BLOB_INVALID_LENGTH_PLUS_ONE = BLOB_RANDOM_VALID1 + b"\x00" +BLOB_INVALID_LENGTH_MINUS_ONE = BLOB_RANDOM_VALID1[:-1] VALID_BLOBS = [BLOB_ALL_ZEROS, BLOB_RANDOM_VALID1, BLOB_RANDOM_VALID2, BLOB_RANDOM_VALID3, BLOB_ALL_MODULUS_MINUS_ONE, BLOB_ALMOST_ZERO] -INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE] -VALID_ZS = [field_element_bytes(x) for x in [0, 1, 2, pow(5, 1235, spec.BLS_MODULUS), - spec.BLS_MODULUS - 1, spec.ROOTS_OF_UNITY[1]]] -INVALID_ZS = [x.to_bytes(32, spec.ENDIANNESS) for x in [spec.BLS_MODULUS, 2**256 - 1, 2**256 - 2**128]] +INVALID_BLOBS = [BLOB_INVALID, BLOB_INVALID_CLOSE, BLOB_INVALID_LENGTH_PLUS_ONE, BLOB_INVALID_LENGTH_MINUS_ONE] + +FE_VALID1 = field_element_bytes(0) +FE_VALID2 = field_element_bytes(1) +FE_VALID3 = field_element_bytes(2) +FE_VALID4 = field_element_bytes(pow(5, 1235, spec.BLS_MODULUS)) +FE_VALID5 = field_element_bytes(spec.BLS_MODULUS - 1) +FE_VALID6 = field_element_bytes(spec.ROOTS_OF_UNITY[1]) +VALID_FIELD_ELEMENTS = [FE_VALID1, FE_VALID2, FE_VALID3, FE_VALID4, FE_VALID5, FE_VALID6] + +FE_INVALID_EQUAL_TO_MODULUS = field_element_bytes_unchecked(spec.BLS_MODULUS) +FE_INVALID_MODULUS_PLUS_ONE = field_element_bytes_unchecked(spec.BLS_MODULUS + 1) +FE_INVALID_UINT256_MAX = field_element_bytes_unchecked(2**256 - 1) +FE_INVALID_UINT256_MID = field_element_bytes_unchecked(2**256 - 2**128) +FE_INVALID_LENGTH_PLUS_ONE = VALID_FIELD_ELEMENTS[0] + b"\x00" +FE_INVALID_LENGTH_MINUS_ONE = VALID_FIELD_ELEMENTS[0][:-1] +INVALID_FIELD_ELEMENTS = [FE_INVALID_EQUAL_TO_MODULUS, FE_INVALID_MODULUS_PLUS_ONE, + FE_INVALID_UINT256_MAX, FE_INVALID_UINT256_MID, + FE_INVALID_LENGTH_PLUS_ONE, FE_INVALID_LENGTH_MINUS_ONE] def hash(x): @@ -114,7 +135,7 @@ def case01_blob_to_kzg_commitment(): def case02_compute_kzg_proof(): # Valid cases for blob in VALID_BLOBS: - for z in VALID_ZS: + for z in VALID_FIELD_ELEMENTS: proof, y = spec.compute_kzg_proof(blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'compute_kzg_proof_case_valid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -127,7 +148,7 @@ def case02_compute_kzg_proof(): # Edge case: Invalid blobs for blob in INVALID_BLOBS: - z = VALID_ZS[0] + z = VALID_FIELD_ELEMENTS[0] expect_exception(spec.compute_kzg_proof, blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' yield f'compute_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { @@ -139,7 +160,7 @@ def case02_compute_kzg_proof(): } # Edge case: Invalid z - for z in INVALID_ZS: + for z in INVALID_FIELD_ELEMENTS: blob = VALID_BLOBS[4] expect_exception(spec.compute_kzg_proof, blob, z) identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' @@ -155,7 +176,7 @@ def case02_compute_kzg_proof(): def case03_verify_kzg_proof(): # Valid cases for blob in VALID_BLOBS: - for z in VALID_ZS: + for z in VALID_FIELD_ELEMENTS: proof, y = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) assert spec.verify_kzg_proof(commitment, z, y, proof) @@ -172,7 +193,7 @@ def case03_verify_kzg_proof(): # Incorrect proofs for blob in VALID_BLOBS: - for z in VALID_ZS: + for z in VALID_FIELD_ELEMENTS: proof_orig, y = spec.compute_kzg_proof(blob, z) proof = bls_add_one(proof_orig) commitment = spec.blob_to_kzg_commitment(blob) @@ -189,8 +210,8 @@ def case03_verify_kzg_proof(): } # Edge case: Invalid z - for z in INVALID_ZS: - blob, validz = VALID_BLOBS[4], VALID_ZS[1] + for z in INVALID_FIELD_ELEMENTS: + blob, validz = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] proof, y = spec.compute_kzg_proof(blob, validz) commitment = spec.blob_to_kzg_commitment(blob) expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) @@ -206,12 +227,29 @@ def case03_verify_kzg_proof(): } # Edge case: Invalid y - blob, z = VALID_BLOBS[1], VALID_ZS[1] - proof, _ = spec.compute_kzg_proof(blob, z) + for y in INVALID_FIELD_ELEMENTS: + blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] + proof, _ = spec.compute_kzg_proof(blob, z) + commitment = spec.blob_to_kzg_commitment(blob) + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + identifier = f'{encode_hex(hash(blob))}_{encode_hex(y)}' + yield f'verify_kzg_proof_case_invalid_y_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, not in G1 + blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[0] + proof = P1_NOT_IN_G1 commitment = spec.blob_to_kzg_commitment(blob) - y = INVALID_ZS[0] + y = VALID_FIELD_ELEMENTS[1] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_invalid_y', { + yield 'verify_kzg_proof_case_proof_not_in_G1', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), @@ -221,13 +259,13 @@ def case03_verify_kzg_proof(): 'output': None } - # Edge case: Invalid proof, not in G1 - blob, z = VALID_BLOBS[2], VALID_ZS[0] - proof = P1_NOT_IN_G1 + # Edge case: Invalid proof, not on curve + blob, z = VALID_BLOBS[3], VALID_FIELD_ELEMENTS[1] + proof = P1_NOT_ON_CURVE commitment = spec.blob_to_kzg_commitment(blob) - y = VALID_ZS[1] + y = VALID_FIELD_ELEMENTS[1] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_proof_not_in_G1', { + yield 'verify_kzg_proof_case_proof_not_on_curve', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), @@ -237,13 +275,31 @@ def case03_verify_kzg_proof(): 'output': None } - # Edge case: Invalid proof, not on curve - blob, z = VALID_BLOBS[3], VALID_ZS[1] - proof = P1_NOT_ON_CURVE + # Edge case: Invalid proof, too few bytes + blob = VALID_BLOBS[1] commitment = spec.blob_to_kzg_commitment(blob) - y = VALID_ZS[1] + z = VALID_FIELD_ELEMENTS[4] + proof, y = spec.compute_kzg_proof(blob, z) + proof = proof[:-1] expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_proof_not_on_curve', { + yield 'verify_kzg_proof_case_proof_too_few_bytes', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, too many bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + z = VALID_FIELD_ELEMENTS[4] + proof, y = spec.compute_kzg_proof(blob, z) + proof = proof + b"\x00" + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_proof_too_many_bytes', { 'input': { 'commitment': encode_hex(commitment), 'z': encode_hex(z), @@ -254,7 +310,7 @@ def case03_verify_kzg_proof(): } # Edge case: Invalid commitment, not in G1 - blob, z = VALID_BLOBS[4], VALID_ZS[3] + blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[3] proof, y = spec.compute_kzg_proof(blob, z) commitment = P1_NOT_IN_G1 expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) @@ -269,7 +325,7 @@ def case03_verify_kzg_proof(): } # Edge case: Invalid commitment, not on curve - blob, z = VALID_BLOBS[1], VALID_ZS[4] + blob, z = VALID_BLOBS[1], VALID_FIELD_ELEMENTS[4] proof, y = spec.compute_kzg_proof(blob, z) commitment = P1_NOT_ON_CURVE expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) @@ -283,6 +339,38 @@ def case03_verify_kzg_proof(): 'output': None } + # Edge case: Invalid commitment, too few bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob)[:-1] + z = VALID_FIELD_ELEMENTS[4] + proof, y = spec.compute_kzg_proof(blob, z) + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_commitment_too_few_bytes', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, too many bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + b"\x00" + z = VALID_FIELD_ELEMENTS[4] + proof, y = spec.compute_kzg_proof(blob, z) + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + yield 'verify_kzg_proof_case_commitment_too_many_bytes', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + def case04_compute_blob_kzg_proof(): # Valid cases @@ -397,6 +485,34 @@ def case05_verify_blob_kzg_proof(): 'output': None } + # Edge case: Invalid proof, too few bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment)[:-1] + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_proof_too_few_bytes', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof, too many bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) + b"\x00" + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_proof_too_many_bytes', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + # Edge case: Invalid commitment, not in G1 blob = VALID_BLOBS[0] proof = G1 @@ -425,6 +541,36 @@ def case05_verify_blob_kzg_proof(): 'output': None } + # Edge case: Invalid commitment, too few bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) + commitment = commitment[:-1] + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_commitment_too_few_bytes', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid commitment, too many bytes + blob = VALID_BLOBS[1] + commitment = spec.blob_to_kzg_commitment(blob) + proof = spec.compute_blob_kzg_proof(blob, commitment) + commitment = commitment + b"\x00" + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + yield 'verify_blob_kzg_proof_case_commitment_too_many_bytes', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + # Edge case: Invalid blob for blob in INVALID_BLOBS: proof = G1 @@ -473,6 +619,20 @@ def case06_verify_blob_kzg_proof_batch(): 'output': False } + # Edge case: Invalid blobs + for blob in INVALID_BLOBS: + blobs_invalid = VALID_BLOBS[:4] + [blob] + VALID_BLOBS[5:] + expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs) + identifier = f'{encode_hex(hash(blob))}' + yield f'verify_blob_kzg_proof_batch_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blobs': encode_hex_list(blobs_invalid), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + # Edge case: Invalid proof, not in G1 proofs_invalid_notG1 = [P1_NOT_IN_G1] + proofs[1:] expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notG1) @@ -497,6 +657,30 @@ def case06_verify_blob_kzg_proof_batch(): 'output': None } + # Edge case: Invalid proof, too few bytes + proofs_invalid_tooFewBytes = proofs[:1] + [proofs[1][:-1]] + proofs[2:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooFewBytes) + yield 'verify_blob_kzg_proof_batch_case_proof_too_few_bytes', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid_tooFewBytes), + }, + 'output': None + } + + # Edge case: Invalid proof, too many bytes + proofs_invalid_tooManyBytes = proofs[:1] + [proofs[1] + b"\x00"] + proofs[2:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooManyBytes) + yield 'verify_blob_kzg_proof_batch_case_proof_too_many_bytes', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid_tooManyBytes), + }, + 'output': None + } + # Edge case: Invalid commitment, not in G1 commitments_invalid_notG1 = commitments[:2] + [P1_NOT_IN_G1] + commitments[3:] expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notG1) @@ -521,13 +705,25 @@ def case06_verify_blob_kzg_proof_batch(): 'output': None } - # Edge case: Invalid blob - blobs_invalid = VALID_BLOBS[:4] + [BLOB_INVALID] + VALID_BLOBS[5:] - expect_exception(spec.verify_blob_kzg_proof_batch, blobs_invalid, commitments, proofs) - yield 'verify_blob_kzg_proof_batch_case_invalid_blob', { + # Edge case: Invalid commitment, too few bytes + commitments_invalid_tooFewBytes = commitments[:3] + [commitments[3][:-1]] + commitments[4:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooFewBytes) + yield 'verify_blob_kzg_proof_batch_case_too_few_bytes', { 'input': { - 'blobs': encode_hex_list(blobs_invalid), - 'commitments': encode_hex_list(commitments), + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments_invalid_tooFewBytes), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } + + # Edge case: Invalid commitment, too many bytes + commitments_invalid_tooManyBytes = commitments[:3] + [commitments[3] + b"\x00"] + commitments[4:] + expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooManyBytes) + yield 'verify_blob_kzg_proof_batch_case_too_many_bytes', { + 'input': { + 'blobs': encode_hex_list(VALID_BLOBS), + 'commitments': encode_hex_list(commitments_invalid_tooManyBytes), 'proofs': encode_hex_list(proofs), }, 'output': None From b4c130a4a25bd50299c3fbb67bc0b8723db26c01 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 7 Mar 2023 21:57:45 +0000 Subject: [PATCH 103/210] Remove state from unit tests --- .../test_polynomial_commitments.py | 88 +++++++++++-------- 1 file changed, 53 insertions(+), 35 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 91115a299a..8ad552056c 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -1,7 +1,8 @@ import random from eth2spec.test.context import ( - spec_state_test, + spec_test, + single_phase, with_deneb_and_later, expect_assertion_error ) @@ -35,8 +36,9 @@ def field_element_bytes(x): @with_deneb_and_later -@spec_state_test -def test_verify_kzg_proof(spec, state): +@spec_test +@single_phase +def test_verify_kzg_proof(spec): """ Test the wrapper functions (taking bytes arguments) for computing and verifying KZG proofs. """ @@ -51,8 +53,9 @@ def test_verify_kzg_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_kzg_proof_incorrect_proof(spec, state): +@spec_test +@single_phase +def test_verify_kzg_proof_incorrect_proof(spec): """ Test the wrapper function `verify_kzg_proof` fails on an incorrect proof. """ @@ -68,8 +71,9 @@ def test_verify_kzg_proof_incorrect_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_kzg_proof_impl(spec, state): +@spec_test +@single_phase +def test_verify_kzg_proof_impl(spec): """ Test the implementation functions (taking field element arguments) for computing and verifying KZG proofs. """ @@ -84,8 +88,9 @@ def test_verify_kzg_proof_impl(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_kzg_proof_impl_incorrect_proof(spec, state): +@spec_test +@single_phase +def test_verify_kzg_proof_impl_incorrect_proof(spec): """ Test the implementation function `verify_kzg_proof` fails on an incorrect proof. """ @@ -101,8 +106,9 @@ def test_verify_kzg_proof_impl_incorrect_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_barycentric_outside_domain(spec, state): +@spec_test +@single_phase +def test_barycentric_outside_domain(spec): """ Test barycentric formula correctness by using it to evaluate a polynomial at a bunch of points outside its domain (the roots of unity). @@ -134,8 +140,9 @@ def test_barycentric_outside_domain(spec, state): @with_deneb_and_later -@spec_state_test -def test_barycentric_within_domain(spec, state): +@spec_test +@single_phase +def test_barycentric_within_domain(spec): """ Test barycentric formula correctness by using it to evaluate a polynomial at all the points of its domain (the roots of unity). @@ -166,8 +173,9 @@ def test_barycentric_within_domain(spec, state): @with_deneb_and_later -@spec_state_test -def test_compute_kzg_proof_within_domain(spec, state): +@spec_test +@single_phase +def test_compute_kzg_proof_within_domain(spec): """ Create and verify KZG proof that p(z) == y where z is in the domain of our KZG scheme (i.e. a relevant root of unity). @@ -186,8 +194,9 @@ def test_compute_kzg_proof_within_domain(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_blob_kzg_proof(spec, state): +@spec_test +@single_phase +def test_verify_blob_kzg_proof(spec): """ Test the functions to compute and verify a blob KZG proof """ @@ -199,8 +208,9 @@ def test_verify_blob_kzg_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_verify_blob_kzg_proof_incorrect_proof(spec, state): +@spec_test +@single_phase +def test_verify_blob_kzg_proof_incorrect_proof(spec): """ Check that `verify_blob_kzg_proof` fails on an incorrect proof """ @@ -213,8 +223,9 @@ def test_verify_blob_kzg_proof_incorrect_proof(spec, state): @with_deneb_and_later -@spec_state_test -def test_validate_kzg_g1_generator(spec, state): +@spec_test +@single_phase +def test_validate_kzg_g1_generator(spec): """ Verify that `validate_kzg_g1` allows the generator G1 """ @@ -223,8 +234,9 @@ def test_validate_kzg_g1_generator(spec, state): @with_deneb_and_later -@spec_state_test -def test_validate_kzg_g1_neutral_element(spec, state): +@spec_test +@single_phase +def test_validate_kzg_g1_neutral_element(spec): """ Verify that `validate_kzg_g1` allows the neutral element in G1 """ @@ -233,8 +245,9 @@ def test_validate_kzg_g1_neutral_element(spec, state): @with_deneb_and_later -@spec_state_test -def test_validate_kzg_g1_not_in_g1(spec, state): +@spec_test +@single_phase +def test_validate_kzg_g1_not_in_g1(spec): """ Verify that `validate_kzg_g1` fails on point not in G1 """ @@ -243,8 +256,9 @@ def test_validate_kzg_g1_not_in_g1(spec, state): @with_deneb_and_later -@spec_state_test -def test_validate_kzg_g1_not_on_curve(spec, state): +@spec_test +@single_phase +def test_validate_kzg_g1_not_on_curve(spec): """ Verify that `validate_kzg_g1` fails on point not in G1 """ @@ -253,8 +267,9 @@ def test_validate_kzg_g1_not_on_curve(spec, state): @with_deneb_and_later -@spec_state_test -def test_bytes_to_bls_field_zero(spec, state): +@spec_test +@single_phase +def test_bytes_to_bls_field_zero(spec): """ Verify that `bytes_to_bls_field` handles zero """ @@ -263,8 +278,9 @@ def test_bytes_to_bls_field_zero(spec, state): @with_deneb_and_later -@spec_state_test -def test_bytes_to_bls_field_modulus_minus_one(spec, state): +@spec_test +@single_phase +def test_bytes_to_bls_field_modulus_minus_one(spec): """ Verify that `bytes_to_bls_field` handles modulus minus one """ @@ -273,8 +289,9 @@ def test_bytes_to_bls_field_modulus_minus_one(spec, state): @with_deneb_and_later -@spec_state_test -def test_bytes_to_bls_field_modulus(spec, state): +@spec_test +@single_phase +def test_bytes_to_bls_field_modulus(spec): """ Verify that `bytes_to_bls_field` fails on BLS modulus """ @@ -285,8 +302,9 @@ def test_bytes_to_bls_field_modulus(spec, state): @with_deneb_and_later -@spec_state_test -def test_bytes_to_bls_field_max(spec, state): +@spec_test +@single_phase +def test_bytes_to_bls_field_max(spec): """ Verify that `bytes_to_bls_field` fails on 2**256 - 1 """ From 43e714e60f142d5951bc68eab55e5a6a801f0a39 Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 8 Mar 2023 15:34:56 +0100 Subject: [PATCH 104/210] Check correct fork version in LC sync protocol - Sync committee is determined by signature_slot - Signature fork version is determined by max(signature_slot, 1) - 1 - Attested block fork version can be anything < signature_slot Old logic incorrectly derived signature fork version from signature_slot and did not subtract a slot. Extended tests to check this edge case. --- specs/altair/light-client/sync-protocol.md | 3 ++- .../eth2spec/test/altair/light_client/test_sync.py | 13 +++++-------- .../pyspec/eth2spec/test/helpers/fork_transition.py | 12 ++++++++++-- .../pyspec/eth2spec/test/helpers/light_client.py | 2 +- 4 files changed, 18 insertions(+), 12 deletions(-) diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md index f1b012e981..8cc048219f 100644 --- a/specs/altair/light-client/sync-protocol.md +++ b/specs/altair/light-client/sync-protocol.md @@ -387,7 +387,8 @@ def validate_light_client_update(store: LightClientStore, pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys) if bit ] - fork_version = compute_fork_version(compute_epoch_at_slot(update.signature_slot)) + fork_version_slot = max(update.signature_slot, 1) - 1 + fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot)) domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root) signing_root = compute_signing_root(update.attested_header.beacon, domain) assert bls.FastAggregateVerify(participant_pubkeys, signing_root, sync_aggregate.sync_committee_signature) diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index 63bec26b09..d33e68961d 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -668,10 +668,9 @@ def run_test_single_fork(spec, phases, state, fork): # Upgrade to post-fork spec, attested block is still before the fork attested_block = block.copy() attested_state = state.copy() - state, _ = do_fork(state, spec, phases[fork], fork_epoch, with_block=False) + sync_aggregate, _ = get_sync_aggregate(phases[fork], state) + state, block = do_fork(state, spec, phases[fork], fork_epoch, sync_aggregate=sync_aggregate) spec = phases[fork] - sync_aggregate, _ = get_sync_aggregate(spec, state) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) assert test.store.finalized_header.beacon.slot == finalized_state.slot assert test.store.next_sync_committee == finalized_state.next_sync_committee @@ -755,18 +754,16 @@ def run_test_multi_fork(spec, phases, state, fork_1, fork_2): # ..., attested is from `fork_1`, ... fork_1_epoch = getattr(phases[fork_1].config, fork_1.upper() + '_FORK_EPOCH') transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_1_epoch) - 1) - state, _ = do_fork(state, spec, phases[fork_1], fork_1_epoch, with_block=False) + state, attested_block = do_fork(state, spec, phases[fork_1], fork_1_epoch) spec = phases[fork_1] - attested_block = state_transition_with_full_block(spec, state, True, True) attested_state = state.copy() # ..., and signature is from `fork_2` fork_2_epoch = getattr(phases[fork_2].config, fork_2.upper() + '_FORK_EPOCH') transition_to(spec, state, spec.compute_start_slot_at_epoch(fork_2_epoch) - 1) - state, _ = do_fork(state, spec, phases[fork_2], fork_2_epoch, with_block=False) + sync_aggregate, _ = get_sync_aggregate(phases[fork_2], state) + state, block = do_fork(state, spec, phases[fork_2], fork_2_epoch, sync_aggregate=sync_aggregate) spec = phases[fork_2] - sync_aggregate, _ = get_sync_aggregate(spec, state) - block = state_transition_with_full_block(spec, state, True, True, sync_aggregate=sync_aggregate) # Check that update applies yield from emit_update(test, spec, state, block, attested_state, attested_block, finalized_block, phases=phases) diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py index 96d0d20dcd..241c7dc37e 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py @@ -47,6 +47,7 @@ def _set_operations_by_dict(block, operation_dict): def _state_transition_and_sign_block_at_slot(spec, state, + sync_aggregate=None, operation_dict=None): """ Cribbed from ``transition_unsigned_block`` helper @@ -61,6 +62,8 @@ def _state_transition_and_sign_block_at_slot(spec, Thus use dict to pass operations. """ block = build_empty_block(spec, state) + if sync_aggregate is not None: + block.body.sync_aggregate = sync_aggregate if operation_dict: _set_operations_by_dict(block, operation_dict) @@ -141,7 +144,7 @@ def state_transition_across_slots_with_ignoring_proposers(spec, next_slot(spec, state) -def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict=None): +def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None): spec.process_slots(state, state.slot + 1) assert state.slot % spec.SLOTS_PER_EPOCH == 0 @@ -172,7 +175,12 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, operation_dict= assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION if with_block: - return state, _state_transition_and_sign_block_at_slot(post_spec, state, operation_dict=operation_dict) + return state, _state_transition_and_sign_block_at_slot( + post_spec, + state, + sync_aggregate=sync_aggregate, + operation_dict=operation_dict, + ) else: return state, None diff --git a/tests/core/pyspec/eth2spec/test/helpers/light_client.py b/tests/core/pyspec/eth2spec/test/helpers/light_client.py index 215d174fc8..ceca145e94 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/light_client.py +++ b/tests/core/pyspec/eth2spec/test/helpers/light_client.py @@ -31,7 +31,7 @@ def get_sync_aggregate(spec, state, num_participants=None, signature_slot=None): sync_committee_signature = compute_aggregate_sync_committee_signature( spec, signature_state, - signature_slot, + max(signature_slot, 1) - 1, committee_indices[:num_participants], ) sync_aggregate = spec.SyncAggregate( From 54c7df5bbf3bd550d9a4361a081ae78c6e049d3e Mon Sep 17 00:00:00 2001 From: Etan Kissling Date: Wed, 8 Mar 2023 15:42:56 +0100 Subject: [PATCH 105/210] Fix lint --- specs/altair/light-client/sync-protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/altair/light-client/sync-protocol.md b/specs/altair/light-client/sync-protocol.md index 8cc048219f..baef684c62 100644 --- a/specs/altair/light-client/sync-protocol.md +++ b/specs/altair/light-client/sync-protocol.md @@ -387,7 +387,7 @@ def validate_light_client_update(store: LightClientStore, pubkey for (bit, pubkey) in zip(sync_aggregate.sync_committee_bits, sync_committee.pubkeys) if bit ] - fork_version_slot = max(update.signature_slot, 1) - 1 + fork_version_slot = max(update.signature_slot, Slot(1)) - Slot(1) fork_version = compute_fork_version(compute_epoch_at_slot(fork_version_slot)) domain = compute_domain(DOMAIN_SYNC_COMMITTEE, fork_version, genesis_validators_root) signing_root = compute_signing_root(update.attested_header.beacon, domain) From 7f74a08a6c4734e2d87c07e65dbc4a2624c16ab6 Mon Sep 17 00:00:00 2001 From: Ben Edgington Date: Thu, 9 Mar 2023 11:07:01 +0000 Subject: [PATCH 106/210] Fix trailing whitespace --- specs/phase0/fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 3176c1cd5d..8681975ca9 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -388,7 +388,7 @@ def on_tick(store: Store, time: uint64) -> None: # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot) if ancestor_at_finalized_slot == store.finalized_checkpoint.root: store.justified_checkpoint = store.best_justified_checkpoint From 0da79bdbfd127be24102b1dc0454f5ba200499d5 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 9 Mar 2023 21:05:07 +0600 Subject: [PATCH 107/210] Provide validator guide for EIP6110 --- specs/_features/eip6110/validator.md | 42 ++++++++++++++++++++++++++++ 1 file changed, 42 insertions(+) create mode 100644 specs/_features/eip6110/validator.md diff --git a/specs/_features/eip6110/validator.md b/specs/_features/eip6110/validator.md new file mode 100644 index 0000000000..dcaaf11041 --- /dev/null +++ b/specs/_features/eip6110/validator.md @@ -0,0 +1,42 @@ +# EIP-6110 -- Honest Validator + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Prerequisites](#prerequisites) +- [Block proposal](#block-proposal) + - [Deposits](#deposits) + + + + +## Introduction + +This document represents the changes to be made in the code of an "honest validator" to implement EIP-6110. + +## Prerequisites + +This document is an extension of the [Capella -- Honest Validator](../../capella/validator.md) guide. +All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. + +All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [EIP-6110](./beacon-chain.md) are requisite for this document and used throughout. +Please see related Beacon Chain doc before continuing and use them as a reference throughout. + +## Block proposal + +### Deposits + +The expected number of deposits MUST be changed from `min(MAX_DEPOSITS, eth1_data.deposit_count - state.eth1_deposit_index)` to the result of the following function: + +```python +def get_eth1_deposit_count(state: BeaconState) -> uint64: + eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index) + if state.eth1_deposit_index < eth1_deposit_index_limit: + return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index) + else: + return 0 +``` From 92f8c5cf6ba4647aa2893bce0f08572666aefbf8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marius=20Kj=C3=A6rstad?= Date: Thu, 9 Mar 2023 23:06:30 +0100 Subject: [PATCH 108/210] Update disclosure page and email for reporting bugs Update disclosure page and email for reporting bugs --- SECURITY.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/SECURITY.md b/SECURITY.md index e46fab4de1..2101ea1554 100644 --- a/SECURITY.md +++ b/SECURITY.md @@ -8,4 +8,4 @@ Please see [Releases](https://github.com/ethereum/consensus-specs/releases/). We **Please do not file a public ticket** mentioning the vulnerability. -To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://eth2bounty.ethereum.org](https://eth2bounty.ethereum.org) or email eth2bounty@ethereum.org. Please read the [disclosure page](https://eth2bounty.ethereum.org) for more information about publicly disclosed security vulnerabilities. +To find out how to disclose a vulnerability in the Ethereum Consensus Layer visit [https://ethereum.org/bug-bounty](https://ethereum.org/bug-bounty) or email bounty@ethereum.org. Please read the [disclosure page](https://ethereum.org/bug-bounty) for more information about publicly disclosed security vulnerabilities. From a5333a1d108838b3bb20ec91759616b8bb4ab5be Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Fri, 10 Mar 2023 11:43:05 +0000 Subject: [PATCH 109/210] Remove useless test_kzg.py --- .../eth2spec/test/deneb/unittests/test_kzg.py | 21 ------------------- 1 file changed, 21 deletions(-) delete mode 100644 tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py deleted file mode 100644 index 71bfae8b89..0000000000 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_kzg.py +++ /dev/null @@ -1,21 +0,0 @@ - -from eth2spec.test.helpers.constants import ( - DENEB, - MINIMAL, -) -from eth2spec.test.helpers.sharding import ( - get_sample_blob, -) -from eth2spec.test.context import ( - with_phases, - spec_state_test, - with_presets, -) - - -@with_phases([DENEB]) -@spec_state_test -@with_presets([MINIMAL]) -def test_blob_to_kzg_commitment(spec, state): - blob = get_sample_blob(spec) - spec.blob_to_kzg_commitment(blob) From 6b69450992f5133a0d716930147598fdaf56503f Mon Sep 17 00:00:00 2001 From: kevaundray Date: Sat, 11 Mar 2023 15:44:42 +0000 Subject: [PATCH 110/210] fix typo in type of KZG_SETUP_LAGRANGE --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index edf2062b2a..57496eb020 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -105,7 +105,7 @@ but reusing the `mainnet` settings in public networks is a critical security req | `KZG_SETUP_G2_LENGTH` | `65` | | `KZG_SETUP_G1` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD | | `KZG_SETUP_G2` | `Vector[G2Point, KZG_SETUP_G2_LENGTH]`, contents TBD | -| `KZG_SETUP_LAGRANGE` | `Vector[KZGCommitment, FIELD_ELEMENTS_PER_BLOB]`, contents TBD | +| `KZG_SETUP_LAGRANGE` | `Vector[G1Point, FIELD_ELEMENTS_PER_BLOB]`, contents TBD | ## Helper functions From 5e74c5141138cadf7c339031cac1aefc91f8b8bc Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sat, 11 Mar 2023 22:34:41 +0000 Subject: [PATCH 111/210] Tests for validate_blobs_and_kzg_commitments and fix --- specs/deneb/validator.md | 2 +- .../deneb/unittests/validator/__init__.py | 0 .../unittests/validator/test_validator.py | 113 ++++++++++++++++++ 3 files changed, 114 insertions(+), 1 deletion(-) create mode 100644 tests/core/pyspec/eth2spec/test/deneb/unittests/validator/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 50ab832e00..6519ec6e69 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -72,7 +72,7 @@ def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload, # Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine) assert len(blob_kzg_commitments) == len(blobs) - assert [blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)] + assert all(blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)) ``` 3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`. diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/__init__.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py new file mode 100644 index 0000000000..48a01f624d --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -0,0 +1,113 @@ +import random + +from eth2spec.test.context import ( + spec_state_test, + with_deneb_and_later, + expect_assertion_error +) +from eth2spec.test.helpers.execution_payload import ( + compute_el_block_hash, +) +from eth2spec.test.helpers.sharding import ( + get_sample_opaque_tx, +) +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot +) +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, +) +from eth2spec.utils import bls +from eth2spec.utils.bls import BLS_MODULUS + +G1 = bls.G1_to_bytes48(bls.G1()) +P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcdef") +P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + + "0123456789abcdef0123456789abcdef0123456789abcde0") + + +def bls_add_one(x): + """ + Adds "one" (actually bls.G1()) to a compressed group element. + Useful to compute definitely incorrect proofs. + """ + return bls.G1_to_bytes48( + bls.add(bls.bytes48_to_G1(x), bls.G1()) + ) + + +def field_element_bytes(x): + return int.to_bytes(x % BLS_MODULUS, 32, "little") + + +@with_deneb_and_later +@spec_state_test +def test_validate_blobs_and_kzg_commitments(spec, state): + """ + Test `validate_blobs_and_kzg_commitments` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + # state_transition_and_sign_block(spec, state, block) + + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars] + + spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, + blobs, + blob_kzg_commitments) + + +@with_deneb_and_later +@spec_state_test +def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state): + """ + Test `validate_blobs_and_kzg_commitments` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + # state_transition_and_sign_block(spec, state, block) + + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars][:-1] + + expect_assertion_error(lambda: + spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, + blobs, + blob_kzg_commitments) + ) + + +@with_deneb_and_later +@spec_state_test +def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state): + """ + Test `validate_blobs_and_kzg_commitments` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + # state_transition_and_sign_block(spec, state, block) + + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars] + + blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:]) + + expect_assertion_error(lambda: + spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, + blobs, + blob_kzg_commitments) + ) \ No newline at end of file From 29b5309f7df8166ef25803c6542116c69ffa1203 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sat, 11 Mar 2023 23:02:35 +0000 Subject: [PATCH 112/210] Add proofs to validate_blobs_and_kzg_commitments --- specs/deneb/validator.md | 9 +++-- .../unittests/validator/test_validator.py | 39 +++++++++++++++++-- 2 files changed, 40 insertions(+), 8 deletions(-) diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 6519ec6e69..d2209986e6 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -44,7 +44,7 @@ Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` Implementers may also retrieve blobs individually per transaction. ```python -def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment]]: +def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment], Sequence[KZGProof]]: # pylint: disable=unused-argument ... ``` @@ -66,13 +66,14 @@ use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blo ```python def validate_blobs_and_kzg_commitments(execution_payload: ExecutionPayload, blobs: Sequence[Blob], - blob_kzg_commitments: Sequence[KZGCommitment]) -> None: + blob_kzg_commitments: Sequence[KZGCommitment], + blob_kzg_proofs: Sequence[KZGProof]) -> None: # Optionally sanity-check that the KZG commitments match the versioned hashes in the transactions assert verify_kzg_commitments_against_transactions(execution_payload.transactions, blob_kzg_commitments) # Optionally sanity-check that the KZG commitments match the blobs (as produced by the execution engine) - assert len(blob_kzg_commitments) == len(blobs) - assert all(blob_to_kzg_commitment(blob) == commitment for blob, commitment in zip(blobs, blob_kzg_commitments)) + assert len(blob_kzg_commitments) == len(blobs) == len(blob_kzg_proofs) + assert verify_blob_kzg_proof_batch(blobs, blob_kzg_commitments, blob_kzg_proofs) ``` 3. If valid, set `block.body.blob_kzg_commitments = blob_kzg_commitments`. diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py index 48a01f624d..33b216e876 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -53,14 +53,15 @@ def test_validate_blobs_and_kzg_commitments(spec, state): block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # state_transition_and_sign_block(spec, state, block) blob_sidecars = spec.get_blob_sidecars(block, blobs) blobs = [sidecar.blob for sidecar in blob_sidecars] + proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, blobs, - blob_kzg_commitments) + blob_kzg_commitments, + proofs) @with_deneb_and_later @@ -79,11 +80,39 @@ def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state): blob_sidecars = spec.get_blob_sidecars(block, blobs) blobs = [sidecar.blob for sidecar in blob_sidecars][:-1] + proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] + + expect_assertion_error(lambda: + spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, + blobs, + blob_kzg_commitments, + proofs) + ) + + +@with_deneb_and_later +@spec_state_test +def test_validate_blobs_and_kzg_commitments_missing_proof(spec, state): + """ + Test `validate_blobs_and_kzg_commitments` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + # state_transition_and_sign_block(spec, state, block) + + blob_sidecars = spec.get_blob_sidecars(block, blobs) + blobs = [sidecar.blob for sidecar in blob_sidecars] + proofs = [sidecar.kzg_proof for sidecar in blob_sidecars][:-1] expect_assertion_error(lambda: spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, blobs, - blob_kzg_commitments) + blob_kzg_commitments, + proofs) ) @@ -103,11 +132,13 @@ def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state): blob_sidecars = spec.get_blob_sidecars(block, blobs) blobs = [sidecar.blob for sidecar in blob_sidecars] + proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:]) expect_assertion_error(lambda: spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, blobs, - blob_kzg_commitments) + blob_kzg_commitments, + proofs) ) \ No newline at end of file From 96ad61bcecb6473283e312094d23dc3378cfb796 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sun, 12 Mar 2023 23:05:01 +0000 Subject: [PATCH 113/210] Add tests for blob sidecar signature --- specs/deneb/p2p-interface.md | 11 ++ specs/deneb/validator.md | 9 +- .../eth2spec/test/deneb/sanity/test_blocks.py | 4 +- .../fork_choice/test_validate_blobs.py | 5 +- .../test/deneb/unittests/test_offset.py | 2 +- .../unittests/validator/test_validator.py | 122 ++++++++++++------ .../pyspec/eth2spec/test/helpers/sharding.py | 5 +- 7 files changed, 105 insertions(+), 53 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 107c90b9a9..9f4c158493 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -73,6 +73,17 @@ class BlobIdentifier(Container): index: BlobIndex ``` +### Helpers + +#### `verify_sidecar_signature` + +```python +def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool: + proposer = state.validators[signed_blob_sidecar.message.proposer_index] + signing_root = compute_signing_root(signed_blob_sidecar.message, get_domain(state, DOMAIN_BLOB_SIDECAR)) + return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature) +``` + ## The gossip domain: gossipsub Some gossip meshes are upgraded in the fork of Deneb to support upgraded types. diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index d2209986e6..a21fadd085 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -44,7 +44,8 @@ Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` Implementers may also retrieve blobs individually per transaction. ```python -def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment], Sequence[KZGProof]]: +def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> \ + Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment], Sequence[KZGProof]]: # pylint: disable=unused-argument ... ``` @@ -88,7 +89,9 @@ Blobs associated with a block are packaged into sidecar objects for distribution Each `sidecar` is obtained from: ```python -def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[BlobSidecar]: +def get_blob_sidecars(block: BeaconBlock, + blobs: Sequence[Blob], + blob_kzg_proofs: Sequence[KZGProof]) -> Sequence[BlobSidecar]: return [ BlobSidecar( block_root=hash_tree_root(block), @@ -97,7 +100,7 @@ def get_blob_sidecars(block: BeaconBlock, blobs: Sequence[Blob]) -> Sequence[Blo block_parent_root=block.parent_root, blob=blob, kzg_commitment=block.body.blob_kzg_commitments[index], - kzg_proof=compute_blob_kzg_proof(blob, block.body.blob_kzg_commitments[index]), + kzg_proof=blob_kzg_proofs[index], ) for index, blob in enumerate(blobs) ] diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index c7fb708b8f..5e65dbd4ef 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -22,7 +22,7 @@ def test_one_blob(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) - opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) @@ -38,7 +38,7 @@ def test_max_blobs(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) - opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py index d9934c5ade..0d7bd53e52 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/fork_choice/test_validate_blobs.py @@ -18,14 +18,13 @@ def _run_validate_blobs(spec, state, blob_count): block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, kzg_proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) state_transition_and_sign_block(spec, state, block) - # Also test the proof generation in `get_blob_sidecars` - blob_sidecars = spec.get_blob_sidecars(block, blobs) + blob_sidecars = spec.get_blob_sidecars(block, blobs, kzg_proofs) blobs = [sidecar.blob for sidecar in blob_sidecars] kzg_proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] spec.validate_blobs(blob_kzg_commitments, blobs, kzg_proofs) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py index 13150180bc..3c3b51ff1a 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/test_offset.py @@ -17,7 +17,7 @@ @spec_state_test @with_presets([MINIMAL]) def test_tx_peek_blob_versioned_hashes(spec, state): - otx, blobs, commitments = get_sample_opaque_tx(spec) + otx, _, commitments, _ = get_sample_opaque_tx(spec) data_hashes = spec.tx_peek_blob_versioned_hashes(otx) expected = [spec.kzg_commitment_to_versioned_hash(blob_commitment) for blob_commitment in commitments] assert expected == data_hashes diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py index 33b216e876..7e5cf8bfe9 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -1,6 +1,5 @@ -import random - from eth2spec.test.context import ( + always_bls, spec_state_test, with_deneb_and_later, expect_assertion_error @@ -14,8 +13,8 @@ from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot ) -from eth2spec.test.helpers.state import ( - state_transition_and_sign_block, +from eth2spec.test.helpers.keys import ( + pubkey_to_privkey ) from eth2spec.utils import bls from eth2spec.utils.bls import BLS_MODULUS @@ -49,15 +48,11 @@ def test_validate_blobs_and_kzg_commitments(spec, state): """ blob_count = 4 block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - blob_sidecars = spec.get_blob_sidecars(block, blobs) - blobs = [sidecar.blob for sidecar in blob_sidecars] - proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] - spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, blobs, blob_kzg_commitments, @@ -72,21 +67,18 @@ def test_validate_blobs_and_kzg_commitments_missing_blob(spec, state): """ blob_count = 4 block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # state_transition_and_sign_block(spec, state, block) - - blob_sidecars = spec.get_blob_sidecars(block, blobs) - blobs = [sidecar.blob for sidecar in blob_sidecars][:-1] - proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] - expect_assertion_error(lambda: - spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, - blobs, - blob_kzg_commitments, - proofs) + expect_assertion_error( + lambda: spec.validate_blobs_and_kzg_commitments( + block.body.execution_payload, + blobs[:-1], + blob_kzg_commitments, + proofs + ) ) @@ -98,21 +90,18 @@ def test_validate_blobs_and_kzg_commitments_missing_proof(spec, state): """ blob_count = 4 block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # state_transition_and_sign_block(spec, state, block) - blob_sidecars = spec.get_blob_sidecars(block, blobs) - blobs = [sidecar.blob for sidecar in blob_sidecars] - proofs = [sidecar.kzg_proof for sidecar in blob_sidecars][:-1] - - expect_assertion_error(lambda: - spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, - blobs, - blob_kzg_commitments, - proofs) + expect_assertion_error( + lambda: spec.validate_blobs_and_kzg_commitments( + block.body.execution_payload, + blobs, + blob_kzg_commitments, + proofs[:-1] + ) ) @@ -124,21 +113,68 @@ def test_validate_blobs_and_kzg_commitments_incorrect_blob(spec, state): """ blob_count = 4 block = build_empty_block_for_next_slot(spec, state) - opaque_tx, blobs, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=blob_count) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # state_transition_and_sign_block(spec, state, block) - - blob_sidecars = spec.get_blob_sidecars(block, blobs) - blobs = [sidecar.blob for sidecar in blob_sidecars] - proofs = [sidecar.kzg_proof for sidecar in blob_sidecars] blobs[1] = spec.Blob(blobs[1][:13] + bytes([(blobs[1][13] + 1) % 256]) + blobs[1][14:]) - expect_assertion_error(lambda: - spec.validate_blobs_and_kzg_commitments(block.body.execution_payload, - blobs, - blob_kzg_commitments, - proofs) - ) \ No newline at end of file + expect_assertion_error( + lambda: spec.validate_blobs_and_kzg_commitments( + block.body.execution_payload, + blobs, + blob_kzg_commitments, + proofs + ) + ) + + +@with_deneb_and_later +@spec_state_test +def test_blob_sidecar_signature(spec, state): + """ + Test `get_blob_sidecar_signature` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + + blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs) + proposer = state.validators[blob_sidecars[1].proposer_index] + privkey = pubkey_to_privkey[proposer.pubkey] + sidecar_signature = spec.get_blob_sidecar_signature(state, + blob_sidecars[1], + privkey) + + signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature) + + assert spec.verify_blob_sidecar_signature(state, signed_blob_sidecar) + + +@with_deneb_and_later +@spec_state_test +@always_bls +def test_blob_sidecar_signature_incorrect(spec, state): + """ + Test `get_blob_sidecar_signature` + """ + blob_count = 4 + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, blobs, blob_kzg_commitments, proofs = get_sample_opaque_tx(spec, blob_count=blob_count) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + + blob_sidecars = spec.get_blob_sidecars(block, blobs, proofs) + + sidecar_signature = spec.get_blob_sidecar_signature(state, + blob_sidecars[1], + 123) + + signed_blob_sidecar = spec.SignedBlobSidecar(message=blob_sidecars[1], signature=sidecar_signature) + + assert not spec.verify_blob_sidecar_signature(state, signed_blob_sidecar) diff --git a/tests/core/pyspec/eth2spec/test/helpers/sharding.py b/tests/core/pyspec/eth2spec/test/helpers/sharding.py index fd60d5d3be..89f03c3c39 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/sharding.py +++ b/tests/core/pyspec/eth2spec/test/helpers/sharding.py @@ -101,13 +101,16 @@ def get_poly_in_both_forms(spec, rng=None): def get_sample_opaque_tx(spec, blob_count=1, rng=None): blobs = [] blob_kzg_commitments = [] + blob_kzg_proofs = [] blob_versioned_hashes = [] for _ in range(blob_count): blob = get_sample_blob(spec, rng) blob_commitment = spec.KZGCommitment(spec.blob_to_kzg_commitment(blob)) + blob_kzg_proof = spec.compute_blob_kzg_proof(blob, blob_commitment) blob_versioned_hash = spec.kzg_commitment_to_versioned_hash(blob_commitment) blobs.append(blob) blob_kzg_commitments.append(blob_commitment) + blob_kzg_proofs.append(blob_kzg_proof) blob_versioned_hashes.append(blob_versioned_hash) signed_blob_tx = SignedBlobTransaction( @@ -117,4 +120,4 @@ def get_sample_opaque_tx(spec, blob_count=1, rng=None): ) serialized_tx = serialize(signed_blob_tx) opaque_tx = spec.uint_to_bytes(spec.BLOB_TX_TYPE) + serialized_tx - return opaque_tx, blobs, blob_kzg_commitments + return opaque_tx, blobs, blob_kzg_commitments, blob_kzg_proofs From 723e8a11fe4ee025f9f4faa3352c5ca27bfb59a5 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sun, 12 Mar 2023 23:07:19 +0000 Subject: [PATCH 114/210] Remove unused imports/functions in test_validator.py --- .../unittests/validator/test_validator.py | 22 ------------------- 1 file changed, 22 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py index 7e5cf8bfe9..07039ccfeb 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/validator/test_validator.py @@ -16,28 +16,6 @@ from eth2spec.test.helpers.keys import ( pubkey_to_privkey ) -from eth2spec.utils import bls -from eth2spec.utils.bls import BLS_MODULUS - -G1 = bls.G1_to_bytes48(bls.G1()) -P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + - "0123456789abcdef0123456789abcdef0123456789abcdef") -P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + - "0123456789abcdef0123456789abcdef0123456789abcde0") - - -def bls_add_one(x): - """ - Adds "one" (actually bls.G1()) to a compressed group element. - Useful to compute definitely incorrect proofs. - """ - return bls.G1_to_bytes48( - bls.add(bls.bytes48_to_G1(x), bls.G1()) - ) - - -def field_element_bytes(x): - return int.to_bytes(x % BLS_MODULUS, 32, "little") @with_deneb_and_later From cc284b2b60af6c857210f0e413f2ed423a95e6fa Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Sun, 12 Mar 2023 23:11:40 +0000 Subject: [PATCH 115/210] Toc --- specs/deneb/p2p-interface.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 9f4c158493..6f10f7a345 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -15,6 +15,8 @@ The specification of these changes continues in the same format as the network s - [`BlobSidecar`](#blobsidecar) - [`SignedBlobSidecar`](#signedblobsidecar) - [`BlobIdentifier`](#blobidentifier) + - [Helpers](#helpers) + - [`verify_sidecar_signature`](#verify_sidecar_signature) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) From e9551715d5ae2655189f03917d01968d290a4904 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Thu, 9 Mar 2023 15:17:11 -0800 Subject: [PATCH 116/210] large fork choice upgrade --- presets/mainnet/phase0.yaml | 6 - presets/minimal/phase0.yaml | 6 - specs/bellatrix/fork-choice.md | 21 +- specs/deneb/fork-choice.md | 21 +- specs/phase0/fork-choice.md | 234 +++- .../eth2spec/test/helpers/attestations.py | 10 +- .../eth2spec/test/helpers/fork_choice.py | 93 +- .../test/phase0/fork_choice/test_get_head.py | 439 +++++-- .../test/phase0/fork_choice/test_on_block.py | 1119 ++++++++++++----- .../test/phase0/fork_choice/test_reorg.py | 498 ++++++++ .../phase0/fork_choice/test_withholding.py | 205 +++ .../unittests/fork_choice/test_on_block.py | 87 -- .../unittests/fork_choice/test_on_tick.py | 77 +- tests/formats/fork_choice/README.md | 5 - tests/generators/fork_choice/main.py | 2 + 15 files changed, 2163 insertions(+), 660 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py create mode 100644 tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py delete mode 100644 tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py diff --git a/presets/mainnet/phase0.yaml b/presets/mainnet/phase0.yaml index 89bb97d6a8..02bc96c8cd 100644 --- a/presets/mainnet/phase0.yaml +++ b/presets/mainnet/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**3 (= 8) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 8 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/presets/minimal/phase0.yaml b/presets/minimal/phase0.yaml index 2c6fbb3691..e7028f5a42 100644 --- a/presets/minimal/phase0.yaml +++ b/presets/minimal/phase0.yaml @@ -18,12 +18,6 @@ HYSTERESIS_DOWNWARD_MULTIPLIER: 1 HYSTERESIS_UPWARD_MULTIPLIER: 5 -# Fork Choice -# --------------------------------------------------------------- -# 2**1 (= 1) -SAFE_SLOTS_TO_UPDATE_JUSTIFIED: 2 - - # Gwei values # --------------------------------------------------------------- # 2**0 * 10**9 (= 1,000,000,000) Gwei diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index 94d0688273..ed7d60a932 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -174,6 +174,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check the block is valid and compute the post-state state = pre_state.copy() + block_root = hash_tree_root(block) state_transition(state, signed_block, True) # [New in Bellatrix] @@ -181,9 +182,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: validate_merge_block(block) # Add new block to the store - store.blocks[hash_tree_root(block)] = block + store.blocks[block_root] = block # Add new state for this block to the store - store.block_states[hash_tree_root(block)] = state + store.block_states[block_root] = state # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT @@ -191,15 +192,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: if get_current_slot(store) == block.slot and is_before_attesting_interval: store.proposer_boost_root = hash_tree_root(block) - # Update justified checkpoint - if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: - store.best_justified_checkpoint = state.current_justified_checkpoint - if should_update_justified_checkpoint(store, state.current_justified_checkpoint): - store.justified_checkpoint = state.current_justified_checkpoint - - # Update finalized checkpoint - if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: - store.finalized_checkpoint = state.finalized_checkpoint - store.justified_checkpoint = state.current_justified_checkpoint + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) ``` diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 830c487645..61714cf1a8 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -91,6 +91,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check the block is valid and compute the post-state state = pre_state.copy() + block_root = hash_tree_root(block) state_transition(state, signed_block, True) # Check the merge transition @@ -98,9 +99,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: validate_merge_block(block) # Add new block to the store - store.blocks[hash_tree_root(block)] = block + store.blocks[block_root] = block # Add new state for this block to the store - store.block_states[hash_tree_root(block)] = state + store.block_states[block_root] = state # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT @@ -108,15 +109,9 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: if get_current_slot(store) == block.slot and is_before_attesting_interval: store.proposer_boost_root = hash_tree_root(block) - # Update justified checkpoint - if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: - store.best_justified_checkpoint = state.current_justified_checkpoint - if should_update_justified_checkpoint(store, state.current_justified_checkpoint): - store.justified_checkpoint = state.current_justified_checkpoint - - # Update finalized checkpoint - if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: - store.finalized_checkpoint = state.finalized_checkpoint - store.justified_checkpoint = state.current_justified_checkpoint + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) ``` diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 8681975ca9..6cbebe507c 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -8,10 +8,10 @@ - [Introduction](#introduction) - [Fork choice](#fork-choice) - [Constant](#constant) - - [Preset](#preset) - [Configuration](#configuration) - [Helpers](#helpers) - [`LatestMessage`](#latestmessage) + - [`is_previous_epoch_justified`](#is_previous_epoch_justified) - [`Store`](#store) - [`get_forkchoice_store`](#get_forkchoice_store) - [`get_slots_since_genesis`](#get_slots_since_genesis) @@ -19,10 +19,16 @@ - [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start) - [`get_ancestor`](#get_ancestor) - [`get_weight`](#get_weight) + - [`get_voting_source`](#get_voting_source) - [`filter_block_tree`](#filter_block_tree) - [`get_filtered_block_tree`](#get_filtered_block_tree) - [`get_head`](#get_head) - - [`should_update_justified_checkpoint`](#should_update_justified_checkpoint) + - [`update_checkpoints`](#update_checkpoints) + - [`update_unrealized_checkpoints`](#update_unrealized_checkpoints) + - [Pull-up tip helpers](#pull-up-tip-helpers) + - [`compute_pulled_up_tip`](#compute_pulled_up_tip) + - [`on_tick` helpers](#on_tick-helpers) + - [`on_tick_per_slot`](#on_tick_per_slot) - [`on_attestation` helpers](#on_attestation-helpers) - [`validate_target_epoch_against_current_time`](#validate_target_epoch_against_current_time) - [`validate_on_attestation`](#validate_on_attestation) @@ -67,12 +73,6 @@ Any of the above handlers that trigger an unhandled exception (e.g. a failed ass | -------------------- | ----------- | | `INTERVALS_PER_SLOT` | `uint64(3)` | -### Preset - -| Name | Value | Unit | Duration | -| -------------------------------- | ------------ | :---: | :--------: | -| `SAFE_SLOTS_TO_UPDATE_JUSTIFIED` | `2**3` (= 8) | slots | 96 seconds | - ### Configuration | Name | Value | @@ -92,8 +92,26 @@ class LatestMessage(object): root: Root ``` + +### `is_previous_epoch_justified` + +```python +def is_previous_epoch_justified(store: Store) -> bool: + current_slot = get_current_slot(store) + current_epoch = compute_epoch_at_slot(current_slot) + return store.justified_checkpoint.epoch + 1 == current_epoch +``` + + #### `Store` +The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below: + +- `justified_checkpoint`: the justified checkpoint being used as the starting point for the LMD GHOST fork choice algorithm. +- `finalized_checkpoint`: the highest finalized checkpoint that was seen. In general, the fork choice will consider only those blocks that are not conflicting with this checkpoint. +- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization***, i.e., FFG processing of new attestations, has occured. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. +- `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block. + ```python @dataclass class Store(object): @@ -101,13 +119,15 @@ class Store(object): genesis_time: uint64 justified_checkpoint: Checkpoint finalized_checkpoint: Checkpoint - best_justified_checkpoint: Checkpoint + unrealized_justified_checkpoint: Checkpoint + unrealized_finalized_checkpoint: Checkpoint proposer_boost_root: Root equivocating_indices: Set[ValidatorIndex] blocks: Dict[Root, BeaconBlock] = field(default_factory=dict) block_states: Dict[Root, BeaconState] = field(default_factory=dict) checkpoint_states: Dict[Checkpoint, BeaconState] = field(default_factory=dict) latest_messages: Dict[ValidatorIndex, LatestMessage] = field(default_factory=dict) + unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict) ``` #### `get_forkchoice_store` @@ -130,12 +150,14 @@ def get_forkchoice_store(anchor_state: BeaconState, anchor_block: BeaconBlock) - genesis_time=anchor_state.genesis_time, justified_checkpoint=justified_checkpoint, finalized_checkpoint=finalized_checkpoint, - best_justified_checkpoint=justified_checkpoint, + unrealized_justified_checkpoint=justified_checkpoint, + unrealized_finalized_checkpoint=finalized_checkpoint, proposer_boost_root=proposer_boost_root, equivocating_indices=set(), blocks={anchor_root: copy(anchor_block)}, block_states={anchor_root: copy(anchor_state)}, checkpoint_states={justified_checkpoint: copy(anchor_state)}, + unrealized_justifications={anchor_root: justified_checkpoint} ) ``` @@ -179,9 +201,12 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: ```python def get_weight(store: Store, root: Root) -> Gwei: state = store.checkpoint_states[store.justified_checkpoint] - active_indices = get_active_validator_indices(state, get_current_epoch(state)) + unslashed_and_active_indices = [ + i for i in get_active_validator_indices(state, get_current_epoch(state)) + if not state.validators[i].slashed + ] attestation_score = Gwei(sum( - state.validators[i].effective_balance for i in active_indices + state.validators[i].effective_balance for i in unslashed_and_active_indices if (i in store.latest_messages and i not in store.equivocating_indices and get_ancestor(store, store.latest_messages[i].root, store.blocks[root].slot) == root) @@ -199,8 +224,32 @@ def get_weight(store: Store, root: Root) -> Gwei: return attestation_score + proposer_score ``` +#### `get_voting_source` + +```python +def get_voting_source(store: Store, block_root: Root) -> Checkpoint: + """ + Compute the voting source checkpoint in the case that block with root ``block_root`` + is chosen as the head block + """ + block = store.blocks[block_root] + current_epoch = compute_epoch_at_slot(get_current_slot(store)) + block_epoch = compute_epoch_at_slot(block.slot) + if current_epoch > block_epoch: + # The block is from a prior epoch, the voting source will be pulled-up. + return store.unrealized_justifications[block_root] + else: + # The block is not from a prior epoch, therefore the voting source is + # not pulled up. + head_state = store.block_states[block_root] + return head_state.current_justified_checkpoint + +``` + #### `filter_block_tree` +*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST have `block_root` as `store.justified_checkpoint`. + ```python def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool: block = store.blocks[block_root] @@ -218,17 +267,30 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB return True return False - # If leaf block, check finalized/justified checkpoints as matching latest. - head_state = store.block_states[block_root] + current_epoch = compute_epoch_at_slot(get_current_slot(store)) + voting_source = get_voting_source(store, block_root) + # The voting source should be at the same height as the store's justified checkpoint. correct_justified = ( store.justified_checkpoint.epoch == GENESIS_EPOCH - or head_state.current_justified_checkpoint == store.justified_checkpoint + or voting_source.epoch == store.justified_checkpoint.epoch ) + + # If the block should be pulled-up due to previous epoch being justified, also check + # that the unrealized justification is higher than the store's justified + # checkpoint, and the voting source is not more than two epochs ago. + if not correct_justified and is_previous_epoch_justified(store): + correct_justified = ( + store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch and + voting_source.epoch + 2 >= current_epoch + ) + + finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) correct_finalized = ( store.finalized_checkpoint.epoch == GENESIS_EPOCH - or head_state.finalized_checkpoint == store.finalized_checkpoint + or store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) ) + # If expected finalized/justified, add to viable block-tree and signal viability to parent. if correct_justified and correct_finalized: blocks[block_root] = block @@ -272,25 +334,87 @@ def get_head(store: Store) -> Root: head = max(children, key=lambda root: (get_weight(store, root), root)) ``` -#### `should_update_justified_checkpoint` +#### `update_checkpoints` ```python -def should_update_justified_checkpoint(store: Store, new_justified_checkpoint: Checkpoint) -> bool: +def update_checkpoints(store: Store, justified_checkpoint: Checkpoint, finalized_checkpoint: Checkpoint) -> None: """ - To address the bouncing attack, only update conflicting justified - checkpoints in the fork choice if in the early slots of the epoch. - Otherwise, delay incorporation of new justified checkpoint until next epoch boundary. + Update checkpoints in store if necessary + """ + # Update justified checkpoint + if justified_checkpoint.epoch > store.justified_checkpoint.epoch: + store.justified_checkpoint = justified_checkpoint + + # Update finalized checkpoint + if finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: + store.finalized_checkpoint = finalized_checkpoint +``` - See https://ethresear.ch/t/prevention-of-bouncing-attack-on-ffg/6114 for more detailed analysis and discussion. +#### `update_unrealized_checkpoints` + +```python +def update_unrealized_checkpoints(store: Store, unrealized_justified_checkpoint: Checkpoint, + unrealized_finalized_checkpoint: Checkpoint) -> None: """ - if compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED: - return True + Update unrealized checkpoints in store if necessary + """ + # Update unrealized justified checkpoint + if unrealized_justified_checkpoint.epoch > store.unrealized_justified_checkpoint.epoch: + store.unrealized_justified_checkpoint = unrealized_justified_checkpoint - justified_slot = compute_start_slot_at_epoch(store.justified_checkpoint.epoch) - if not get_ancestor(store, new_justified_checkpoint.root, justified_slot) == store.justified_checkpoint.root: - return False + # Update unrealized finalized checkpoint + if unrealized_finalized_checkpoint.epoch > store.unrealized_finalized_checkpoint.epoch: + store.unrealized_finalized_checkpoint = unrealized_finalized_checkpoint +``` + + +#### Pull-up tip helpers + +##### `compute_pulled_up_tip` + +```python +def compute_pulled_up_tip(store: Store, block_root: Root) -> None: + state = store.block_states[block_root].copy() + # Pull up the post-state of the block to the next epoch boundary + process_justification_and_finalization(state) + + # Store the unrealized justification. + store.unrealized_justifications[block_root] = state.current_justified_checkpoint + + # Update unrealized checkpoints in store if necessary + update_unrealized_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) + + # If the block is from a prior epoch, apply the realized values. + block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot) + current_epoch = compute_epoch_at_slot(get_current_slot(store)) + if block_epoch < current_epoch: + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) +``` + + +#### `on_tick` helpers + +##### `on_tick_per_slot` + +```python +def on_tick_per_slot(store: Store, time: uint64) -> None: + previous_slot = get_current_slot(store) + + # update store time + store.time = time + + current_slot = get_current_slot(store) + + # Reset store.proposer_boost_root if this is a new slot + if current_slot > previous_slot: + store.proposer_boost_root = Root() - return True + # Not a new epoch, return + if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0): + return + + # Pull-up justification and finalization from previous epoch + update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint) ``` #### `on_attestation` helpers @@ -323,7 +447,8 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc # Check that the epoch number and slot number are matching assert target.epoch == compute_epoch_at_slot(attestation.data.slot) - # Attestations target be for a known block. If target block is unknown, delay consideration until the block is found + # Attestations target be for a known block. + # If target block is unknown, delay consideration until the block is found. assert target.root in store.blocks # Attestations must be for a known block. If block is unknown, delay consideration until the block is found @@ -355,7 +480,9 @@ def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None: ##### `update_latest_messages` ```python -def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None: +def update_latest_messages(store: Store, + attesting_indices: Sequence[ValidatorIndex], + attestation: Attestation) -> None: target = attestation.data.target beacon_block_root = attestation.data.beacon_block_root non_equivocating_attesting_indices = [i for i in attesting_indices if i not in store.equivocating_indices] @@ -371,27 +498,13 @@ def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIn ```python def on_tick(store: Store, time: uint64) -> None: - previous_slot = get_current_slot(store) - - # update store time - store.time = time - - current_slot = get_current_slot(store) - - # Reset store.proposer_boost_root if this is a new slot - if current_slot > previous_slot: - store.proposer_boost_root = Root() - - # Not a new epoch, return - if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0): - return - - # Update store.justified_checkpoint if a better checkpoint on the store.finalized_checkpoint chain - if store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - ancestor_at_finalized_slot = get_ancestor(store, store.best_justified_checkpoint.root, finalized_slot) - if ancestor_at_finalized_slot == store.finalized_checkpoint.root: - store.justified_checkpoint = store.best_justified_checkpoint + # If the ``store.time`` falls behind, catch up slot by slot to + # ensure that every previous slot will be processed with ``on_tick_per_slot``. + tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT + while get_current_slot(store) < tick_slot: + previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT + on_tick_per_slot(store, previous_time) + on_tick_per_slot(store, time) ``` #### `on_block` @@ -414,11 +527,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Check the block is valid and compute the post-state state = pre_state.copy() + block_root = hash_tree_root(block) state_transition(state, signed_block, True) # Add new block to the store - store.blocks[hash_tree_root(block)] = block + store.blocks[block_root] = block # Add new state for this block to the store - store.block_states[hash_tree_root(block)] = state + store.block_states[block_root] = state # Add proposer score boost if the block is timely time_into_slot = (store.time - store.genesis_time) % SECONDS_PER_SLOT @@ -426,17 +540,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: if get_current_slot(store) == block.slot and is_before_attesting_interval: store.proposer_boost_root = hash_tree_root(block) - # Update justified checkpoint - if state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch: - if state.current_justified_checkpoint.epoch > store.best_justified_checkpoint.epoch: - store.best_justified_checkpoint = state.current_justified_checkpoint - if should_update_justified_checkpoint(store, state.current_justified_checkpoint): - store.justified_checkpoint = state.current_justified_checkpoint + # Update checkpoints in store if necessary + update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) - # Update finalized checkpoint - if state.finalized_checkpoint.epoch > store.finalized_checkpoint.epoch: - store.finalized_checkpoint = state.finalized_checkpoint - store.justified_checkpoint = state.current_justified_checkpoint + # Eagerly compute unrealized justification and finality. + compute_pulled_up_tip(store, block_root) ``` #### `on_attestation` diff --git a/tests/core/pyspec/eth2spec/test/helpers/attestations.py b/tests/core/pyspec/eth2spec/test/helpers/attestations.py index c60d047b92..360e194f59 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/attestations.py +++ b/tests/core/pyspec/eth2spec/test/helpers/attestations.py @@ -187,7 +187,7 @@ def add_attestations_to_state(spec, state, attestations, slot): spec.process_attestation(state, attestation) -def _get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None): +def get_valid_attestation_at_slot(state, spec, slot_to_attest, participation_fn=None): committees_per_slot = spec.get_committee_count_per_slot(state, spec.compute_epoch_at_slot(slot_to_attest)) for index in range(committees_per_slot): def participants_filter(comm): @@ -262,7 +262,7 @@ def state_transition_with_full_block(spec, if fill_cur_epoch and state.slot >= spec.MIN_ATTESTATION_INCLUSION_DELAY: slot_to_attest = state.slot - spec.MIN_ATTESTATION_INCLUSION_DELAY + 1 if slot_to_attest >= spec.compute_start_slot_at_epoch(spec.get_current_epoch(state)): - attestations = _get_valid_attestation_at_slot( + attestations = get_valid_attestation_at_slot( state, spec, slot_to_attest, @@ -272,7 +272,7 @@ def state_transition_with_full_block(spec, block.body.attestations.append(attestation) if fill_prev_epoch: slot_to_attest = state.slot - spec.SLOTS_PER_EPOCH + 1 - attestations = _get_valid_attestation_at_slot( + attestations = get_valid_attestation_at_slot( state, spec, slot_to_attest, @@ -300,7 +300,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f slots = state.slot % spec.SLOTS_PER_EPOCH for slot_offset in range(slots): target_slot = state.slot - slot_offset - attestations += _get_valid_attestation_at_slot( + attestations += get_valid_attestation_at_slot( state, spec, target_slot, @@ -311,7 +311,7 @@ def state_transition_with_full_attestations_block(spec, state, fill_cur_epoch, f slots = spec.SLOTS_PER_EPOCH - state.slot % spec.SLOTS_PER_EPOCH for slot_offset in range(1, slots): target_slot = state.slot - (state.slot % spec.SLOTS_PER_EPOCH) - slot_offset - attestations += _get_valid_attestation_at_slot( + attestations += get_valid_attestation_at_slot( state, spec, target_slot, diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py index bd8abd95b5..af231d87ff 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_choice.py @@ -3,6 +3,7 @@ from eth2spec.test.helpers.attestations import ( next_epoch_with_attestations, next_slots_with_attestations, + state_transition_with_full_block, ) @@ -16,12 +17,13 @@ def get_anchor_root(spec, state): def tick_and_add_block(spec, store, signed_block, test_steps, valid=True, merge_block=False, block_not_found=False, is_optimistic=False): pre_state = store.block_states[signed_block.message.parent_root] - block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT if merge_block: assert spec.is_merge_transition_block(pre_state, signed_block.message.body) - if store.time < block_time: - on_tick_and_append_step(spec, store, block_time, test_steps) + block_time = pre_state.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT + while store.time < block_time: + time = pre_state.genesis_time + (spec.get_current_slot(store) + 1) * spec.config.SECONDS_PER_SLOT + on_tick_and_append_step(spec, store, time, test_steps) post_state = yield from add_block( spec, store, signed_block, test_steps, @@ -39,6 +41,11 @@ def add_attestation(spec, store, attestation, test_steps, is_from_block=False): test_steps.append({'attestation': get_attestation_file_name(attestation)}) +def add_attestations(spec, store, attestations, test_steps, is_from_block=False): + for attestation in attestations: + yield from add_attestation(spec, store, attestation, test_steps, is_from_block=is_from_block) + + def tick_and_run_on_attestation(spec, store, attestation, test_steps, is_from_block=False): parent_block = store.blocks[attestation.data.beacon_block_root] pre_state = store.block_states[spec.hash_tree_root(parent_block)] @@ -90,6 +97,7 @@ def get_attester_slashing_file_name(attester_slashing): def on_tick_and_append_step(spec, store, time, test_steps): spec.on_tick(store, time) test_steps.append({'tick': int(time)}) + output_store_checks(spec, store, test_steps) def run_on_block(spec, store, signed_block, valid=True): @@ -153,25 +161,7 @@ def add_block(spec, assert store.blocks[block_root] == signed_block.message assert store.block_states[block_root].hash_tree_root() == signed_block.message.state_root if not is_optimistic: - test_steps.append({ - 'checks': { - 'time': int(store.time), - 'head': get_formatted_head_output(spec, store), - 'justified_checkpoint': { - 'epoch': int(store.justified_checkpoint.epoch), - 'root': encode_hex(store.justified_checkpoint.root), - }, - 'finalized_checkpoint': { - 'epoch': int(store.finalized_checkpoint.epoch), - 'root': encode_hex(store.finalized_checkpoint.root), - }, - 'best_justified_checkpoint': { - 'epoch': int(store.best_justified_checkpoint.epoch), - 'root': encode_hex(store.best_justified_checkpoint.root), - }, - 'proposer_boost_root': encode_hex(store.proposer_boost_root), - } - }) + output_store_checks(spec, store, test_steps) return store.block_states[signed_block.message.hash_tree_root()] @@ -217,6 +207,32 @@ def get_formatted_head_output(spec, store): } +def output_head_check(spec, store, test_steps): + test_steps.append({ + 'checks': { + 'head': get_formatted_head_output(spec, store), + } + }) + + +def output_store_checks(spec, store, test_steps): + test_steps.append({ + 'checks': { + 'time': int(store.time), + 'head': get_formatted_head_output(spec, store), + 'justified_checkpoint': { + 'epoch': int(store.justified_checkpoint.epoch), + 'root': encode_hex(store.justified_checkpoint.root), + }, + 'finalized_checkpoint': { + 'epoch': int(store.finalized_checkpoint.epoch), + 'root': encode_hex(store.finalized_checkpoint.root), + }, + 'proposer_boost_root': encode_hex(store.proposer_boost_root), + } + }) + + def apply_next_epoch_with_attestations(spec, state, store, @@ -263,6 +279,39 @@ def apply_next_slots_with_attestations(spec, return post_state, store, last_signed_block +def is_ready_to_justify(spec, state): + """ + Check if the given ``state`` will trigger justification updates at epoch boundary. + """ + temp_state = state.copy() + spec.process_justification_and_finalization(temp_state) + return temp_state.current_justified_checkpoint.epoch > state.current_justified_checkpoint.epoch + + +def find_next_justifying_slot(spec, + state, + fill_cur_epoch, + fill_prev_epoch, + participation_fn=None): + temp_state = state.copy() + + signed_blocks = [] + justifying_slot = None + while justifying_slot is None: + signed_block = state_transition_with_full_block( + spec, + temp_state, + fill_cur_epoch, + fill_prev_epoch, + participation_fn, + ) + signed_blocks.append(signed_block) + if is_ready_to_justify(spec, temp_state): + justifying_slot = temp_state.slot + + return signed_blocks, justifying_slot + + def get_pow_block_file_name(pow_block): return f"pow_block_{encode_hex(pow_block.block_hash)}" diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index 990c420313..2107a470a7 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -1,9 +1,9 @@ import random -from eth_utils import encode_hex from eth2spec.test.context import ( spec_state_test, with_all_phases, + with_altair_and_later, with_presets, ) from eth2spec.test.helpers.attestations import get_valid_attestation, next_epoch_with_attestations @@ -22,6 +22,8 @@ add_attestation, tick_and_run_on_attestation, tick_and_add_block, + output_head_check, + apply_next_epoch_with_attestations, ) from eth2spec.test.helpers.forks import ( is_post_altair, @@ -71,11 +73,7 @@ def test_chain_no_attestations(spec, state): anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # On receiving a block of `GENESIS_SLOT + 1` slot block_1 = build_empty_block_for_next_slot(spec, state) @@ -88,11 +86,7 @@ def test_chain_no_attestations(spec, state): yield from tick_and_add_block(spec, store, signed_block_2, test_steps) assert spec.get_head(store) == spec.hash_tree_root(block_2) - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @@ -109,11 +103,7 @@ def test_split_tie_breaker_no_attestations(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # Create block at slot 1 block_1_state = genesis_state.copy() @@ -135,11 +125,7 @@ def test_split_tie_breaker_no_attestations(spec, state): highest_root = max(spec.hash_tree_root(block_1), spec.hash_tree_root(block_2)) assert spec.get_head(store) == highest_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @@ -156,11 +142,7 @@ def test_shorter_chain_but_heavier_weight(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # build longer tree long_state = genesis_state.copy() @@ -183,11 +165,7 @@ def test_shorter_chain_but_heavier_weight(spec, state): yield from tick_and_run_on_attestation(spec, store, short_attestation, test_steps) assert spec.get_head(store) == spec.hash_tree_root(short_block) - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @@ -203,11 +181,7 @@ def test_filtered_block_tree(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # transition state past initial couple of epochs next_epoch(spec, state) @@ -227,13 +201,7 @@ def test_filtered_block_tree(spec, state): # the last block in the branch should be the head expected_head_root = spec.hash_tree_root(signed_blocks[-1].message) assert spec.get_head(store) == expected_head_root - - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - 'justified_checkpoint_root': encode_hex(store.justified_checkpoint.root), - } - }) + output_head_check(spec, store, test_steps) # # create branch containing the justified block but not containing enough on @@ -274,11 +242,7 @@ def test_filtered_block_tree(spec, state): # ensure that get_head still returns the head from the previous branch assert spec.get_head(store) == expected_head_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store) - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @@ -295,11 +259,7 @@ def test_proposer_boost_correct_head(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # Build block that serves as head ONLY on timely arrival, and ONLY in that slot state_1 = genesis_state.copy() @@ -337,19 +297,14 @@ def test_proposer_boost_correct_head(spec, state): on_tick_and_append_step(spec, store, time, test_steps) assert store.proposer_boost_root == spec.Root() assert spec.get_head(store) == spec.hash_tree_root(block_2) - - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) yield 'steps', test_steps @with_all_phases @spec_state_test -def test_discard_equivocations(spec, state): +def test_discard_equivocations_on_attester_slashing(spec, state): test_steps = [] genesis_state = state.copy() @@ -359,11 +314,7 @@ def test_discard_equivocations(spec, state): yield 'anchor_block', anchor_block anchor_root = get_anchor_root(spec, state) assert spec.get_head(store) == anchor_root - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + output_head_check(spec, store, test_steps) # Build block that serves as head before discarding equivocations state_1 = genesis_state.copy() @@ -418,11 +369,359 @@ def test_discard_equivocations(spec, state): # The head should revert to block_2 yield from add_attester_slashing(spec, store, attester_slashing, test_steps) assert spec.get_head(store) == spec.hash_tree_root(block_2) + output_head_check(spec, store, test_steps) - test_steps.append({ - 'checks': { - 'head': get_formatted_head_output(spec, store), - } - }) + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_discard_equivocations_slashed_validator_censoring(spec, state): + # Check that the store does not count LMD votes from validators that are slashed in the justified state + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 0 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 0 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 + + # We will slash all validators voting at the 2nd slot of epoch 0 + current_slot = spec.get_current_slot(store) + eqv_slot = current_slot + 1 + eqv_epoch = spec.compute_epoch_at_slot(eqv_slot) + assert eqv_slot % spec.SLOTS_PER_EPOCH == 1 + assert eqv_epoch == 0 + slashed_validators = [] + comm_count = spec.get_committee_count_per_slot(state, eqv_epoch) + for comm_index in range(comm_count): + comm = spec.get_beacon_committee(state, eqv_slot, comm_index) + slashed_validators += comm + assert len(slashed_validators) > 0 + + # Slash those validators in the state + for val_index in slashed_validators: + state.validators[val_index].slashed = True + + # Store this state as the anchor state + anchor_state = state.copy() + # Generate an anchor block with correct state root + anchor_block = spec.BeaconBlock(state_root=anchor_state.hash_tree_root()) + yield 'anchor_state', anchor_state + yield 'anchor_block', anchor_block + + # Get a new store with the anchor state & anchor block + store = spec.get_forkchoice_store(anchor_state, anchor_block) + + # Now generate the store checks + current_time = anchor_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + # Create two competing blocks at eqv_slot + next_slots(spec, state, eqv_slot - state.slot - 1) + assert state.slot == eqv_slot - 1 + + state_1 = state.copy() + block_1 = build_empty_block_for_next_slot(spec, state_1) + signed_block_1 = state_transition_and_sign_block(spec, state_1, block_1) + + state_2 = state.copy() + block_2 = build_empty_block_for_next_slot(spec, state_2) + block_2.body.graffiti = block_2.body.graffiti = b'\x42' * 32 + signed_block_2 = state_transition_and_sign_block(spec, state_2, block_2) + + assert block_1.slot == block_2.slot == eqv_slot + + # Add both blocks to the store + yield from tick_and_add_block(spec, store, signed_block_1, test_steps) + yield from tick_and_add_block(spec, store, signed_block_2, test_steps) + + # Find out which block will win in tie breaking + if spec.hash_tree_root(block_1) < spec.hash_tree_root(block_2): + block_low_root = block_1.hash_tree_root() + block_low_root_post_state = state_1 + block_high_root = block_2.hash_tree_root() + else: + block_low_root = block_2.hash_tree_root() + block_low_root_post_state = state_2 + block_high_root = block_1.hash_tree_root() + assert block_low_root < block_high_root + + # Tick to next slot so proposer boost does not apply + current_time = store.genesis_time + (block_1.slot + 1) * spec.config.SECONDS_PER_SLOT + on_tick_and_append_step(spec, store, current_time, test_steps) + + # Check that block with higher root wins + assert spec.get_head(store) == block_high_root + + # Create attestation for block with lower root + attestation = get_valid_attestation(spec, block_low_root_post_state, slot=eqv_slot, index=0, signed=True) + # Check that all attesting validators were slashed in the anchor state + att_comm = spec.get_beacon_committee(block_low_root_post_state, eqv_slot, 0) + for i in att_comm: + assert anchor_state.validators[i].slashed + # Add attestation to the store + yield from add_attestation(spec, store, attestation, test_steps) + # Check that block with higher root still wins + assert spec.get_head(store) == block_high_root + output_head_check(spec, store, test_steps) + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_voting_source_within_two_epoch(spec, state): + """ + Check that the store allows for a head block that has: + - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and + - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and + - store.voting_source[block_root].epoch + 2 >= current_epoch, and + - store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Copy the state to use later + fork_state = state.copy() + + # Fill epoch 4 + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + assert store.finalized_checkpoint.epoch == 3 + + # Create a fork from the earlier saved state + next_epoch(spec, fork_state) + assert spec.compute_epoch_at_slot(fork_state.slot) == 5 + _, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True) + # Only keep the blocks from epoch 5, so discard the last generated block + signed_blocks = signed_blocks[:-1] + last_fork_block = signed_blocks[-1].message + assert spec.compute_epoch_at_slot(last_fork_block.slot) == 5 + + # Now add the fork to the store + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + assert store.finalized_checkpoint.epoch == 3 + + # Check that the last block from the fork is the head + # LMD votes for the competing branch are overwritten so this fork should win + last_fork_block_root = last_fork_block.hash_tree_root() + # assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch + assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch + # assert store.voting_source[last_fork_block_root].epoch + 2 >= \ + # spec.compute_epoch_at_slot(spec.get_current_slot(store)) + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert spec.get_head(store) == last_fork_block_root + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_voting_source_beyond_two_epoch(spec, state): + """ + Check that the store doesn't allow for a head block that has: + - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and + - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and + - store.voting_source[block_root].epoch + 2 < current_epoch, and + - store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Copy the state to use later + fork_state = state.copy() + + # Fill epoch 4 and 5 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert store.finalized_checkpoint.epoch == 4 + + # Create a fork from the earlier saved state + for _ in range(2): + next_epoch(spec, fork_state) + assert spec.compute_epoch_at_slot(fork_state.slot) == 6 + assert fork_state.current_justified_checkpoint.epoch == 3 + _, signed_blocks, fork_state = next_epoch_with_attestations(spec, fork_state, True, True) + # Only keep the blocks from epoch 6, so discard the last generated block + signed_blocks = signed_blocks[:-1] + last_fork_block = signed_blocks[-1].message + assert spec.compute_epoch_at_slot(last_fork_block.slot) == 6 + + # Store the head before adding the fork to the store + correct_head = spec.get_head(store) + + # Now add the fork to the store + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert store.finalized_checkpoint.epoch == 4 + + last_fork_block_root = last_fork_block.hash_tree_root() + last_fork_block_state = store.block_states[last_fork_block_root] + assert last_fork_block_state.current_justified_checkpoint.epoch == 3 + + # Check that the head is unchanged + # assert store.voting_source[last_fork_block_root].epoch != store.justified_checkpoint.epoch + assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch + # assert store.voting_source[last_fork_block_root].epoch + 2 < \ + # spec.compute_epoch_at_slot(spec.get_current_slot(store)) + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert spec.get_head(store) == correct_head + + yield 'steps', test_steps + + +""" +Note: +We are unable to generate test vectors that check failure of the correct_finalized condition. +We cannot generate a block that: +- has !correct_finalized, and +- has correct_justified, and +- is a descendant of store.justified_checkpoint.root + +The block being a descendant of store.justified_checkpoint.root is necessary because +filter_block_tree descends the tree starting at store.justified_checkpoint.root + +@with_all_phases +@spec_state_test +def test_incorrect_finalized(spec, state): + # Check that the store doesn't allow for a head block that has: + # - store.voting_source[block_root].epoch == store.justified_checkpoint.epoch, and + # - store.finalized_checkpoint.epoch != GENESIS_EPOCH, and + # - store.finalized_checkpoint.root != get_ancestor(store, block_root, finalized_slot) + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 4 + for _ in range(4): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + assert store.finalized_checkpoint.epoch == 3 + + # Identify the fork block as the last block in epoch 4 + fork_block_root = state.latest_block_header.parent_root + fork_block = store.blocks[fork_block_root] + assert spec.compute_epoch_at_slot(fork_block.slot) == 4 + # Copy the state to use later + fork_state = store.block_states[fork_block_root].copy() + assert spec.compute_epoch_at_slot(fork_state.slot) == 4 + assert fork_state.current_justified_checkpoint.epoch == 3 + assert fork_state.finalized_checkpoint.epoch == 2 + + # Create a fork from the earlier saved state + for _ in range(2): + next_epoch(spec, fork_state) + assert spec.compute_epoch_at_slot(fork_state.slot) == 6 + assert fork_state.current_justified_checkpoint.epoch == 4 + assert fork_state.finalized_checkpoint.epoch == 3 + # Fill epoch 6 + signed_blocks = [] + _, signed_blocks_1, fork_state = next_epoch_with_attestations(spec, fork_state, True, False) + signed_blocks += signed_blocks_1 + assert spec.compute_epoch_at_slot(fork_state.slot) == 7 + # Check that epoch 6 is justified in this fork - it will be used as voting source for the tip of this fork + assert fork_state.current_justified_checkpoint.epoch == 6 + assert fork_state.finalized_checkpoint.epoch == 3 + # Create a chain in epoch 7 that has new justification for epoch 7 + _, signed_blocks_2, fork_state = next_epoch_with_attestations(spec, fork_state, True, False) + # Only keep the blocks from epoch 7, so discard the last generated block + signed_blocks_2 = signed_blocks_2[:-1] + signed_blocks += signed_blocks_2 + last_fork_block = signed_blocks[-1].message + assert spec.compute_epoch_at_slot(last_fork_block.slot) == 7 + + # Now add the fork to the store + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 3 + + # Fill epoch 5 and 6 in the original chain + for _ in range(2): + state, store, signed_head_block = yield from apply_next_epoch_with_attestations( + spec, state, store, True, False, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 5 + # Store the expected head + head_root = signed_head_block.message.hash_tree_root() + + # Check that the head is unchanged + last_fork_block_root = last_fork_block.hash_tree_root() + assert store.voting_source[last_fork_block_root].epoch == store.justified_checkpoint.epoch + assert store.finalized_checkpoint.epoch != spec.GENESIS_EPOCH + finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) + assert store.finalized_checkpoint.root != spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert spec.get_head(store) != last_fork_block_root + assert spec.get_head(store) == head_root yield 'steps', test_steps +""" diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index 23514b325b..eaae825ab2 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -2,12 +2,16 @@ from eth_utils import encode_hex from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.test.context import MINIMAL, spec_state_test, with_all_phases, with_presets +from eth2spec.test.context import ( + MINIMAL, + spec_state_test, + with_all_phases, + with_altair_and_later, + with_presets +) from eth2spec.test.helpers.attestations import ( next_epoch_with_attestations, next_slots_with_attestations, - state_transition_with_full_block, - state_transition_with_full_attestations_block, ) from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, @@ -22,6 +26,8 @@ tick_and_add_block, apply_next_epoch_with_attestations, apply_next_slots_with_attestations, + is_ready_to_justify, + find_next_justifying_slot, ) from eth2spec.test.helpers.state import ( next_epoch, @@ -280,301 +286,22 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): yield 'steps', test_steps -@with_all_phases -@spec_state_test -@with_presets([MINIMAL], reason="mainnet config requires too many pre-generated public/private keys") -def test_on_block_update_justified_checkpoint_within_safe_slots(spec, state): - """ - Test `should_update_justified_checkpoint`: - compute_slots_since_epoch_start(get_current_slot(store)) < SAFE_SLOTS_TO_UPDATE_JUSTIFIED - """ - test_steps = [] - # Initialization - store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) - yield 'anchor_state', state - yield 'anchor_block', anchor_block - current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time - on_tick_and_append_step(spec, store, current_time, test_steps) - assert store.time == current_time - - # Skip epoch 0 & 1 - for _ in range(2): - next_epoch(spec, state) - # Fill epoch 2 - state, store, _ = yield from apply_next_epoch_with_attestations( - spec, state, store, True, False, test_steps=test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 - # Skip epoch 3 & 4 - for _ in range(2): - next_epoch(spec, state) - # Epoch 5: Attest current epoch - state, store, _ = yield from apply_next_epoch_with_attestations( - spec, state, store, True, False, participation_fn=_drop_random_one_third, test_steps=test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 - assert state.current_justified_checkpoint.epoch == 2 - assert store.justified_checkpoint.epoch == 2 - assert state.current_justified_checkpoint == store.justified_checkpoint - - # Skip epoch 6 - next_epoch(spec, state) - - pre_state = state.copy() - - # Build a block to justify epoch 5 - signed_block = state_transition_with_full_block(spec, state, True, True) - assert state.finalized_checkpoint.epoch == 0 - assert state.current_justified_checkpoint.epoch == 5 - assert state.current_justified_checkpoint.epoch > store.justified_checkpoint.epoch - assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH < spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - # Run on_block - yield from tick_and_add_block(spec, store, signed_block, test_steps) - # Ensure justified_checkpoint has been changed but finality is unchanged - assert store.justified_checkpoint.epoch == 5 - assert store.justified_checkpoint == state.current_justified_checkpoint - assert store.finalized_checkpoint.epoch == pre_state.finalized_checkpoint.epoch == 0 - - yield 'steps', test_steps - - -@with_all_phases -@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch") -@spec_state_test -def test_on_block_outside_safe_slots_but_finality(spec, state): - """ - Test `should_update_justified_checkpoint` case - - compute_slots_since_epoch_start(get_current_slot(store)) > SAFE_SLOTS_TO_UPDATE_JUSTIFIED - - new_justified_checkpoint and store.justified_checkpoint.root are NOT conflicting - - Thus should_update_justified_checkpoint returns True. - - Part of this script is similar to `test_new_justified_is_later_than_store_justified`. - """ - test_steps = [] - # Initialization - store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) - yield 'anchor_state', state - yield 'anchor_block', anchor_block - current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time - on_tick_and_append_step(spec, store, current_time, test_steps) - assert store.time == current_time - - # Skip epoch 0 - next_epoch(spec, state) - # Fill epoch 1 to 3, attest current epoch - for _ in range(3): - state, store, _ = yield from apply_next_epoch_with_attestations( - spec, state, store, True, False, test_steps=test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 - - # Skip epoch 4-6 - for _ in range(3): - next_epoch(spec, state) - - # epoch 7 - state, store, _ = yield from apply_next_epoch_with_attestations( - spec, state, store, True, True, test_steps=test_steps) - assert state.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == 7 - - # epoch 8, attest the first 5 blocks - state, store, _ = yield from apply_next_slots_with_attestations( - spec, state, store, 5, True, True, test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7 - - # Propose a block at epoch 9, 5th slot - next_epoch(spec, state) - next_slots(spec, state, 4) - signed_block = state_transition_with_full_attestations_block(spec, state, True, True) - yield from tick_and_add_block(spec, store, signed_block, test_steps) - assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 - assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 7 - - # Propose an empty block at epoch 10, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot - # This block would trigger justification and finality updates on store - next_epoch(spec, state) - next_slots(spec, state, 4) - block = build_empty_block_for_next_slot(spec, state) - signed_block = state_transition_and_sign_block(spec, state, block) - assert state.finalized_checkpoint.epoch == 7 - assert state.current_justified_checkpoint.epoch == 8 - # Step time past safe slots and run on_block - if store.time < spec.compute_time_at_slot(state, signed_block.message.slot): - time = store.genesis_time + signed_block.message.slot * spec.config.SECONDS_PER_SLOT - on_tick_and_append_step(spec, store, time, test_steps) - assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - yield from add_block(spec, store, signed_block, test_steps) - - # Ensure justified_checkpoint finality has been changed - assert store.finalized_checkpoint.epoch == 7 - assert store.finalized_checkpoint == state.finalized_checkpoint - assert store.justified_checkpoint.epoch == 8 - assert store.justified_checkpoint == state.current_justified_checkpoint - - yield 'steps', test_steps - - -@with_all_phases -@with_presets([MINIMAL], reason="It assumes that `MAX_ATTESTATIONS` >= 2/3 attestations of an epoch") -@spec_state_test -def test_new_justified_is_later_than_store_justified(spec, state): - """ - J: Justified - F: Finalized - fork_1_state (forked from genesis): - epoch - [0] <- [1] <- [2] <- [3] <- [4] - F J - - fork_2_state (forked from fork_1_state's epoch 2): - epoch - └──── [3] <- [4] <- [5] <- [6] - F J - - fork_3_state (forked from genesis): - [0] <- [1] <- [2] <- [3] <- [4] <- [5] - F J - """ - # The 1st fork, from genesis - fork_1_state = state.copy() - # The 3rd fork, from genesis - fork_3_state = state.copy() - - test_steps = [] - # Initialization - store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) - yield 'anchor_state', state - yield 'anchor_block', anchor_block - current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time - on_tick_and_append_step(spec, store, current_time, test_steps) - assert store.time == current_time - - # ----- Process fork_1_state - # Skip epoch 0 - next_epoch(spec, fork_1_state) - # Fill epoch 1 with previous epoch attestations - fork_1_state, store, _ = yield from apply_next_epoch_with_attestations( - spec, fork_1_state, store, False, True, test_steps=test_steps) - - # Fork `fork_2_state` at the start of epoch 2 - fork_2_state = fork_1_state.copy() - assert spec.get_current_epoch(fork_2_state) == 2 - - # Skip epoch 2 - next_epoch(spec, fork_1_state) - # # Fill epoch 3 & 4 with previous epoch attestations - for _ in range(2): - fork_1_state, store, _ = yield from apply_next_epoch_with_attestations( - spec, fork_1_state, store, False, True, test_steps=test_steps) - - assert fork_1_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 0 - assert fork_1_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 - assert store.justified_checkpoint == fork_1_state.current_justified_checkpoint - - # ------ fork_2_state: Create a chain to set store.best_justified_checkpoint - # NOTE: The goal is to make `store.best_justified_checkpoint.epoch > store.justified_checkpoint.epoch` - all_blocks = [] - - # Proposed an empty block at epoch 2, 1st slot - block = build_empty_block_for_next_slot(spec, fork_2_state) - signed_block = state_transition_and_sign_block(spec, fork_2_state, block) - yield from tick_and_add_block(spec, store, signed_block, test_steps) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Skip to epoch 4 - for _ in range(2): - next_epoch(spec, fork_2_state) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Propose a block at epoch 4, 5th slot - # Propose a block at epoch 5, 5th slot - for _ in range(2): - next_epoch(spec, fork_2_state) - next_slots(spec, fork_2_state, 4) - signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True) - yield from tick_and_add_block(spec, store, signed_block, test_steps) - assert fork_2_state.current_justified_checkpoint.epoch == 0 - - # Propose a block at epoch 6, SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2 slot - next_epoch(spec, fork_2_state) - next_slots(spec, fork_2_state, spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED + 2) - signed_block = state_transition_with_full_attestations_block(spec, fork_2_state, True, True) - assert fork_2_state.finalized_checkpoint.epoch == 0 - assert fork_2_state.current_justified_checkpoint.epoch == 5 - # Check SAFE_SLOTS_TO_UPDATE_JUSTIFIED - time = store.genesis_time + fork_2_state.slot * spec.config.SECONDS_PER_SLOT - on_tick_and_append_step(spec, store, time, test_steps) - assert spec.compute_slots_since_epoch_start(spec.get_current_slot(store)) >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - # Run on_block - yield from add_block(spec, store, signed_block, test_steps) - assert store.finalized_checkpoint.epoch == 0 - assert store.justified_checkpoint.epoch == 3 - assert store.best_justified_checkpoint.epoch == 5 - - # ------ fork_3_state: Create another chain to test the - # "Update justified if new justified is later than store justified" case - all_blocks = [] - for _ in range(3): - next_epoch(spec, fork_3_state) - - # epoch 3 - _, signed_blocks, fork_3_state = next_epoch_with_attestations(spec, fork_3_state, True, True) - all_blocks += signed_blocks - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # epoch 4, attest the first 5 blocks - _, blocks, fork_3_state = next_slots_with_attestations(spec, fork_3_state, 5, True, True) - all_blocks += blocks.copy() - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # Propose a block at epoch 5, 5th slot - next_epoch(spec, fork_3_state) - next_slots(spec, fork_3_state, 4) - signed_block = state_transition_with_full_block(spec, fork_3_state, True, True) - all_blocks.append(signed_block.copy()) - assert fork_3_state.finalized_checkpoint.epoch == 0 - - # Propose a block at epoch 6, 5th slot - next_epoch(spec, fork_3_state) - next_slots(spec, fork_3_state, 4) - signed_block = state_transition_with_full_block(spec, fork_3_state, True, True) - all_blocks.append(signed_block.copy()) - assert fork_3_state.finalized_checkpoint.epoch == 3 - assert fork_3_state.current_justified_checkpoint.epoch == 4 - - # Apply blocks of `fork_3_state` to `store` - for block in all_blocks: - if store.time < spec.compute_time_at_slot(fork_2_state, block.message.slot): - time = store.genesis_time + block.message.slot * spec.config.SECONDS_PER_SLOT - on_tick_and_append_step(spec, store, time, test_steps) - yield from add_block(spec, store, block, test_steps) - - assert store.finalized_checkpoint == fork_3_state.finalized_checkpoint - assert store.justified_checkpoint == fork_3_state.current_justified_checkpoint - assert store.justified_checkpoint != store.best_justified_checkpoint - assert store.best_justified_checkpoint == fork_2_state.current_justified_checkpoint - - yield 'steps', test_steps - - +""" @with_all_phases @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): - """ - J: Justified - F: Finalized - state (forked from genesis): - epoch - [0] <- [1] <- [2] <- [3] <- [4] <- [5] - F J + # J: Justified + # F: Finalized + # state (forked from genesis): + # epoch + # [0] <- [1] <- [2] <- [3] <- [4] <- [5] + # F J + + # another_state (forked from epoch 0): + # └──── [1] <- [2] <- [3] <- [4] <- [5] + # F J - another_state (forked from epoch 0): - └──── [1] <- [2] <- [3] <- [4] <- [5] - F J - """ test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) @@ -631,9 +358,15 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): assert ancestor_at_finalized_slot != store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint - assert store.justified_checkpoint == another_state.current_justified_checkpoint + + # NOTE: inconsistent justified/finalized checkpoints in this edge case. + # This can only happen when >1/3 validators are slashable, as this testcase requires that + # store.justified_checkpoint is higher than store.finalized_checkpoint and on a different branch. + # Ignoring this testcase for now. + assert store.justified_checkpoint != another_state.current_justified_checkpoint yield 'steps', test_steps +""" @with_all_phases @@ -701,7 +434,9 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): assert ancestor_at_finalized_slot == store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint - assert store.justified_checkpoint == another_state.current_justified_checkpoint + + # NOTE: inconsistent justified/finalized checkpoints in this edge case + assert store.justified_checkpoint != another_state.current_justified_checkpoint yield 'steps', test_steps @@ -797,3 +532,797 @@ def test_proposer_boost_root_same_slot_untimely_block(spec, state): }) yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justification_withholding(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + for _ in range(2): + next_epoch(spec, state) + + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(state) == 4 + + # ------------ + + # Create attacker's fork that can justify epoch 4 + # Do not apply attacker's blocks to store + attacker_state = state.copy() + attacker_signed_blocks = [] + + while not is_ready_to_justify(spec, attacker_state): + attacker_state, signed_blocks, attacker_state = next_slots_with_attestations( + spec, attacker_state, 1, True, False) + attacker_signed_blocks += signed_blocks + + assert attacker_state.finalized_checkpoint.epoch == 2 + assert attacker_state.current_justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(attacker_state) == 4 + + # ------------ + + # The honest fork sees all except the last block from attacker_signed_blocks + # Apply honest fork to store + honest_signed_blocks = attacker_signed_blocks[:-1] + assert len(honest_signed_blocks) > 0 + + for signed_block in honest_signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + + last_honest_block = honest_signed_blocks[-1].message + honest_state = store.block_states[hash_tree_root(last_honest_block)].copy() + + assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(honest_state) == 4 + + # Create & apply an honest block in epoch 5 that can justify epoch 4 + next_epoch(spec, honest_state) + assert spec.get_current_epoch(honest_state) == 5 + + honest_block = build_empty_block_for_next_slot(spec, honest_state) + honest_block.body.attestations = attacker_signed_blocks[-1].message.body.attestations + signed_block = state_transition_and_sign_block(spec, honest_state, honest_block) + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_head(store) == hash_tree_root(honest_block) + assert is_ready_to_justify(spec, honest_state) + + # ------------ + + # When the attacker's block is received, the honest block is still the head + # This relies on the honest block's LMD score increasing due to proposer boost + yield from tick_and_add_block(spec, store, attacker_signed_blocks[-1], test_steps) + assert store.finalized_checkpoint.epoch == 3 + assert store.justified_checkpoint.epoch == 4 + assert spec.get_head(store) == hash_tree_root(honest_block) + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justification_withholding_reverse_order(spec, state): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + for _ in range(2): + next_epoch(spec, state) + + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(state) == 4 + + # ------------ + + # Create attacker's fork that can justify epoch 4 + attacker_state = state.copy() + attacker_signed_blocks = [] + + while not is_ready_to_justify(spec, attacker_state): + attacker_state, signed_blocks, attacker_state = next_slots_with_attestations( + spec, attacker_state, 1, True, False) + assert len(signed_blocks) == 1 + attacker_signed_blocks += signed_blocks + yield from tick_and_add_block(spec, store, signed_blocks[0], test_steps) + + assert attacker_state.finalized_checkpoint.epoch == 2 + assert attacker_state.current_justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(attacker_state) == 4 + attackers_head = hash_tree_root(attacker_signed_blocks[-1].message) + assert spec.get_head(store) == attackers_head + + # ------------ + + # The honest fork sees all except the last block from attacker_signed_blocks + honest_signed_blocks = attacker_signed_blocks[:-1] + assert len(honest_signed_blocks) > 0 + + last_honest_block = honest_signed_blocks[-1].message + honest_state = store.block_states[hash_tree_root(last_honest_block)].copy() + + assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.get_current_epoch(honest_state) == 4 + + # Create an honest block in epoch 5 that can justify epoch 4 + next_epoch(spec, honest_state) + assert spec.get_current_epoch(honest_state) == 5 + + honest_block = build_empty_block_for_next_slot(spec, honest_state) + honest_block.body.attestations = attacker_signed_blocks[-1].message.body.attestations + signed_block = state_transition_and_sign_block(spec, honest_state, honest_block) + assert honest_state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + assert honest_state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert is_ready_to_justify(spec, honest_state) + + # When the honest block is received, the honest block becomes the head + # This relies on the honest block's LMD score increasing due to proposer boost + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert store.finalized_checkpoint.epoch == 3 + assert store.justified_checkpoint.epoch == 4 + assert spec.get_head(store) == hash_tree_root(honest_block) + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justification_update_beginning_of_epoch(spec, state): + """ + Check that the store's justified checkpoint is updated when a block containing better justification is + revealed at the first slot of an epoch + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create a block that has new justification information contained within it, but don't add to store yet + another_state = state.copy() + _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, False) + assert spec.compute_epoch_at_slot(another_state.slot) == 5 + assert another_state.current_justified_checkpoint.epoch == 4 + + # Tick store to the start of the next epoch + slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert store.justified_checkpoint.epoch == 4 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justification_update_end_of_epoch(spec, state): + """ + Check that the store's justified checkpoint is updated when a block containing better justification is + revealed at the last slot of an epoch + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create a block that has new justification information contained within it, but don't add to store yet + another_state = state.copy() + _, signed_blocks, another_state = next_epoch_with_attestations(spec, another_state, True, False) + assert spec.compute_epoch_at_slot(another_state.slot) == 5 + assert another_state.current_justified_checkpoint.epoch == 4 + + # Tick store to the last slot of the next epoch + slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + slot = slot + spec.SLOTS_PER_EPOCH - 1 + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert store.justified_checkpoint.epoch == 4 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_incompatible_justification_update_start_of_epoch(spec, state): + """ + Check that the store's justified checkpoint is updated when a block containing better justification is + revealed at the start slot of an epoch, even when the better justified checkpoint is not a descendant of + the store's justified checkpoint + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + + # Copy the state to create a fork later + another_state = state.copy() + + # Fill epoch 4 and 5 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + + # Create a block that has new justification information contained within it, but don't add to store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + assert another_state.finalized_checkpoint.epoch == 2 + last_block_root = another_state.latest_block_header.parent_root + + # Tick store to the last slot of the next epoch + slot = another_state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 8 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + finalized_slot = spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) + assert spec.get_ancestor(store, last_block_root, finalized_slot) == state.finalized_checkpoint.root + justified_slot = spec.compute_start_slot_at_epoch(state.current_justified_checkpoint.epoch) + assert spec.get_ancestor(store, last_block_root, justified_slot) != state.current_justified_checkpoint.root + assert store.finalized_checkpoint.epoch == 4 + assert store.justified_checkpoint.epoch == 6 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_incompatible_justification_update_end_of_epoch(spec, state): + """ + Check that the store's justified checkpoint is updated when a block containing better justification is + revealed at the last slot of an epoch, even when the better justified checkpoint is not a descendant of + the store's justified checkpoint + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 2 + + # Copy the state to create a fork later + another_state = state.copy() + + # Fill epoch 4 and 5 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + + # Create a block that has new justification information contained within it, but don't add to store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + assert another_state.finalized_checkpoint.epoch == 2 + last_block_root = another_state.latest_block_header.parent_root + + # Tick store to the last slot of the next epoch + slot = another_state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + slot = slot + spec.SLOTS_PER_EPOCH - 1 + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 8 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + finalized_slot = spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) + assert spec.get_ancestor(store, last_block_root, finalized_slot) == state.finalized_checkpoint.root + justified_slot = spec.compute_start_slot_at_epoch(state.current_justified_checkpoint.epoch) + assert spec.get_ancestor(store, last_block_root, justified_slot) != state.current_justified_checkpoint.root + assert store.finalized_checkpoint.epoch == 4 + assert store.justified_checkpoint.epoch == 6 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justified_update_not_realized_finality(spec, state): + """ + Check that the store updates its justified checkpoint if a higher justified checkpoint is found that is + a descendant of the finalized checkpoint, but does not know about the finality + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # We'll make the current head block the finalized block + finalized_root = spec.get_head(store) + finalized_block = store.blocks[finalized_root] + assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 + assert spec.get_head(store) == finalized_root + # Copy the post-state to use later + another_state = state.copy() + + # Create a fork that finalizes our block + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + assert state.finalized_checkpoint.root == store.finalized_checkpoint.root == finalized_root + + # Create a fork for a better justification that is a descendant of the finalized block, + # but does not realize the finality. + # Do not add these blocks to the store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 4 + last_block = signed_blocks[-1] + last_block_root = last_block.message.hash_tree_root() + ancestor_at_finalized_slot = spec.get_ancestor(store, last_block_root, finalized_block.slot) + assert ancestor_at_finalized_slot == store.finalized_checkpoint.root + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justified_update_monotonic(spec, state): + """ + Check that the store does not update it's justified checkpoint with lower justified checkpoints. + This testcase checks that the store's justified checkpoint remains the same even when we input a block that has: + - a higher finalized checkpoint than the store's finalized checkpoint, and + - a lower justified checkpoint than the store's justified checkpoint + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # We'll eventually make the current head block the finalized block + finalized_root = spec.get_head(store) + finalized_block = store.blocks[finalized_root] + assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 + assert spec.get_head(store) == finalized_root + # Copy into another variable so we can use `state` later + another_state = state.copy() + + # Create a fork with justification that is a descendant of the finalized block + # Do not add these blocks to the store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + assert another_state.finalized_checkpoint.epoch == 2 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 2 + last_block = signed_blocks[-1] + last_block_root = last_block.message.hash_tree_root() + ancestor_at_finalized_slot = spec.get_ancestor(store, last_block_root, finalized_block.slot) + assert ancestor_at_finalized_slot == finalized_root + + # Create a fork with lower justification that also finalizes our chosen block + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert state.current_justified_checkpoint.epoch == 5 + # Check that store's finalized checkpoint is updated + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + # Check that store's justified checkpoint is not updated + assert store.justified_checkpoint.epoch == 6 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_justified_update_always_if_better(spec, state): + """ + Check that the store updates it's justified checkpoint with any higher justified checkpoint. + This testcase checks that the store's justified checkpoint is updated when we input a block that has: + - a lower finalized checkpoint than the store's finalized checkpoint, and + - a higher justified checkpoint than the store's justified checkpoint + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # We'll eventually make the current head block the finalized block + finalized_root = spec.get_head(store) + finalized_block = store.blocks[finalized_root] + assert spec.compute_epoch_at_slot(finalized_block.slot) == 4 + assert spec.get_head(store) == finalized_root + # Copy into another variable to use later + another_state = state.copy() + + # Create a fork with lower justification that also finalizes our chosen block + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 5 + assert state.finalized_checkpoint.epoch == store.finalized_checkpoint.epoch == 4 + + # Create a fork with higher justification that is a descendant of the finalized block + # Do not add these blocks to the store yet + next_epoch(spec, another_state) + signed_blocks = [] + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, False, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 6 + assert another_state.current_justified_checkpoint.epoch == 3 + assert another_state.finalized_checkpoint.epoch == 2 + _, signed_blocks_temp, another_state = next_epoch_with_attestations(spec, another_state, True, False) + signed_blocks += signed_blocks_temp + assert spec.compute_epoch_at_slot(another_state.slot) == 7 + assert another_state.current_justified_checkpoint.epoch == 6 + assert another_state.finalized_checkpoint.epoch == 2 + + # Now add the blocks & check that justification update was triggered + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + assert store.justified_checkpoint.epoch == 6 + assert store.finalized_checkpoint.epoch == 4 + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_pull_up_past_epoch_block(spec, state): + """ + Check that the store pulls-up a block from the past epoch to realize it's justification & finalization information + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Create a chain within epoch 4 that contains a justification for epoch 4 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 4 + + # Tick store to the next epoch + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Add the previously created chain to the store and check for updates + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert store.justified_checkpoint.epoch == 4 + assert store.finalized_checkpoint.epoch == 3 + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_not_pull_up_current_epoch_block(spec, state): + """ + Check that the store does not pull-up a block from the current epoch if the previous epoch is not justified + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Skip to the next epoch + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(state.slot) == 5 + + # Create a chain within epoch 5 that contains a justification for epoch 5 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 5 + + # Add the previously created chain to the store and check that store does not apply pull-up updates + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets([MINIMAL], reason="too slow") +def test_pull_up_on_tick(spec, state): + """ + Check that the store pulls-up current epoch tips on the on_tick transition to the next epoch + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Skip to the next epoch + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(state.slot) == 5 + + # Create a chain within epoch 5 that contains a justification for epoch 5 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) == 5 + + # Add the previously created chain to the store and check that store does not apply pull-up updates, + # since the previous epoch was not justified + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert store.justified_checkpoint.epoch == 3 + assert store.finalized_checkpoint.epoch == 2 + + # Now tick the store to the next epoch and check that pull-up tip updates were applied + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(state.slot) == 6 + assert store.justified_checkpoint.epoch == 5 + # There's no new finality, so no finality updates expected + assert store.finalized_checkpoint.epoch == 3 + + yield 'steps', test_steps diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py new file mode 100644 index 0000000000..30f1b06c7b --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py @@ -0,0 +1,498 @@ +from eth2spec.test.context import ( + spec_state_test, + with_all_phases, + with_presets, +) +from eth2spec.test.helpers.constants import ( + MINIMAL, +) +from eth2spec.test.helpers.attestations import ( + state_transition_with_full_block, + get_valid_attestation, + get_valid_attestation_at_slot, +) +from eth2spec.test.helpers.block import ( + build_empty_block, + build_empty_block_for_next_slot, +) +from eth2spec.test.helpers.fork_choice import ( + get_genesis_forkchoice_store_and_block, + on_tick_and_append_step, + add_attestations, + tick_and_add_block, + apply_next_epoch_with_attestations, + find_next_justifying_slot, + is_ready_to_justify, +) +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, + next_epoch, + next_slot, + transition_to, +) + + +TESTING_PRESETS = [MINIMAL] + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_simple_attempted_reorg_without_enough_ffg_votes(spec, state): + """ + [Case 1] + + { epoch 4 }{ epoch 5 } + [c4]<--[a]<--[-]<--[y] + ↑____[-]<--[z] + + At c4, c3 is the latest justified checkpoint (or something earlier) + + The block y doesn't have enough votes to justify c4. + The block z also doesn't have enough votes to justify c4. + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # create block_a, it needs 2 more full blocks to justify epoch 4 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) + for signed_block in signed_blocks[:-2]: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + state = store.block_states[spec.get_head(store)].copy() + assert state.current_justified_checkpoint.epoch == 3 + next_slot(spec, state) + state_a = state.copy() + + # to test the "no withholding" situation, temporarily store the blocks in lists + signed_blocks_of_y = [] + signed_blocks_of_z = [] + + # add an empty block on chain y + block_y = build_empty_block_for_next_slot(spec, state) + signed_block_y = state_transition_and_sign_block(spec, state, block_y) + signed_blocks_of_y.append(signed_block_y) + + # chain y has some on-chain attestations, but not enough to justify c4 + signed_block_y = state_transition_with_full_block(spec, state, True, True) + assert not is_ready_to_justify(spec, state) + signed_blocks_of_y.append(signed_block_y) + assert store.justified_checkpoint.epoch == 3 + + state = state_a.copy() + signed_block_z = None + # add one block on chain z, which is not enough to justify c4 + attestation = get_valid_attestation(spec, state, slot=state.slot, signed=True) + block_z = build_empty_block_for_next_slot(spec, state) + block_z.body.attestations = [attestation] + signed_block_z = state_transition_and_sign_block(spec, state, block_z) + signed_blocks_of_z.append(signed_block_z) + + # add an empty block on chain z + block_z = build_empty_block_for_next_slot(spec, state) + signed_block_z = state_transition_and_sign_block(spec, state, block_z) + signed_blocks_of_z.append(signed_block_z) + + # ensure z couldn't justify c4 + assert not is_ready_to_justify(spec, state) + + # apply blocks to store + # (i) slot block_a.slot + 1 + signed_block_y = signed_blocks_of_y.pop(0) + yield from tick_and_add_block(spec, store, signed_block_y, test_steps) + # apply block of chain `z` + signed_block_z = signed_blocks_of_z.pop(0) + yield from tick_and_add_block(spec, store, signed_block_z, test_steps) + + # (ii) slot block_a.slot + 2 + # apply block of chain `z` + signed_block_z = signed_blocks_of_z.pop(0) + yield from tick_and_add_block(spec, store, signed_block_z, test_steps) + # apply block of chain `y` + signed_block_y = signed_blocks_of_y.pop(0) + yield from tick_and_add_block(spec, store, signed_block_y, test_steps) + # chain `y` remains the winner since it arrives earlier than `z` + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + assert len(signed_blocks_of_y) == len(signed_blocks_of_z) == 0 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + + # tick to the prior of the epoch boundary + slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1 + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + # chain `y` reminds the winner + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + + # to next block + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + yield 'steps', test_steps + + +def _run_delayed_justification(spec, state, attemped_reorg, is_justifying_previous_epoch): + """ + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 2 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + if is_justifying_previous_epoch: + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, False, False, test_steps=test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 + else: + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + if is_justifying_previous_epoch: + # try to find the block that can justify epoch 3 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, False, True) + else: + # try to find the block that can justify epoch 4 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, True) + + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) + for signed_block in signed_blocks: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + spec.get_head(store) == signed_block.message.hash_tree_root() + state = store.block_states[spec.get_head(store)].copy() + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == 2 + else: + assert state.current_justified_checkpoint.epoch == 3 + + assert is_ready_to_justify(spec, state) + state_b = state.copy() + + # add chain y + if is_justifying_previous_epoch: + signed_block_y = state_transition_with_full_block(spec, state, False, True) + else: + signed_block_y = state_transition_with_full_block(spec, state, True, True) + yield from tick_and_add_block(spec, store, signed_block_y, test_steps) + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + if is_justifying_previous_epoch: + assert store.justified_checkpoint.epoch == 2 + else: + assert store.justified_checkpoint.epoch == 3 + + # add attestations of y + temp_state = state.copy() + next_slot(spec, temp_state) + attestations_for_y = list(get_valid_attestation_at_slot(temp_state, spec, signed_block_y.message.slot)) + current_time = temp_state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + yield from add_attestations(spec, store, attestations_for_y, test_steps) + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + + if attemped_reorg: + # add chain z + state = state_b.copy() + slot = state.slot + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) - 1 + transition_to(spec, state, slot) + block_z = build_empty_block_for_next_slot(spec, state) + assert spec.compute_epoch_at_slot(block_z.slot) == 5 + signed_block_z = state_transition_and_sign_block(spec, state, block_z) + yield from tick_and_add_block(spec, store, signed_block_z, test_steps) + else: + # next epoch + state = state_b.copy() + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + + # no reorg + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + else: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_simple_attempted_reorg_delayed_justification_current_epoch(spec, state): + """ + [Case 2] + + { epoch 4 }{ epoch 5 } + [c4]<--[b]<--[y] + ↑______________[z] + At c4, c3 is the latest justified checkpoint (or something earlier) + + block_b: the block that can justify c4. + z: the child of block of x at the first slot of epoch 5. + block z can reorg the chain from block y. + """ + yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=False) + + +def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justifying_previous_epoch): + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 2 + for _ in range(2): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + if is_justifying_previous_epoch: + block_a = build_empty_block_for_next_slot(spec, state) + signed_block_a = state_transition_and_sign_block(spec, state, block_a) + yield from tick_and_add_block(spec, store, signed_block_a, test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 + else: + # fill one more epoch + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + signed_block_a = state_transition_with_full_block(spec, state, True, True) + yield from tick_and_add_block(spec, store, signed_block_a, test_steps) + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + spec.get_head(store) == signed_block_a.message.hash_tree_root() + + state = store.block_states[spec.get_head(store)].copy() + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == 2 + else: + assert state.current_justified_checkpoint.epoch == 3 + state_a = state.copy() + + if is_justifying_previous_epoch: + # try to find the block that can justify epoch 3 + _, justifying_slot = find_next_justifying_slot(spec, state, False, True) + else: + # try to find the block that can justify epoch 4 + _, justifying_slot = find_next_justifying_slot(spec, state, True, True) + + last_slot_of_z = justifying_slot if enough_ffg else justifying_slot - 1 + last_slot_of_y = justifying_slot if is_justifying_previous_epoch else last_slot_of_z - 1 + + # to test the "no withholding" situation, temporarily store the blocks in lists + signed_blocks_of_y = [] + + # build an empty chain to the slot prior epoch boundary + signed_blocks_of_empty_chain = [] + states_of_empty_chain = [] + + for slot in range(state.slot + 1, last_slot_of_y + 1): + block = build_empty_block(spec, state, slot=slot) + signed_block = state_transition_and_sign_block(spec, state, block) + signed_blocks_of_empty_chain.append(signed_block) + states_of_empty_chain.append(state.copy()) + signed_blocks_of_y.append(signed_block) + + signed_block_y = signed_blocks_of_empty_chain[-1] + + # create 2/3 votes for the empty chain + attestations_for_y = [] + # target_is_current = not is_justifying_previous_epoch + attestations = list(get_valid_attestation_at_slot(state, spec, state_a.slot)) + attestations_for_y.append(attestations) + for state in states_of_empty_chain: + attestations = list(get_valid_attestation_at_slot(state, spec, state.slot)) + attestations_for_y.append(attestations) + + state = state_a.copy() + signed_block_z = None + + for slot in range(state_a.slot + 1, last_slot_of_z + 1): + # apply chain y, the empty chain + if slot <= last_slot_of_y and len(signed_blocks_of_y) > 0: + signed_block_y = signed_blocks_of_y.pop(0) + assert signed_block_y.message.slot == slot + yield from tick_and_add_block(spec, store, signed_block_y, test_steps) + + # apply chain z, a fork chain that includes these attestations_for_y + block = build_empty_block(spec, state, slot=slot) + if ( + len(attestations_for_y) > 0 and ( + (not is_justifying_previous_epoch) + or (is_justifying_previous_epoch and attestations_for_y[0][0].data.slot == slot - 5) + ) + ): + block.body.attestations = attestations_for_y.pop(0) + signed_block_z = state_transition_and_sign_block(spec, state, block) + if signed_block_y != signed_block_z: + yield from tick_and_add_block(spec, store, signed_block_z, test_steps) + if is_ready_to_justify(spec, state): + break + + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 2 + else: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + if enough_ffg: + assert is_ready_to_justify(spec, state) + else: + assert not is_ready_to_justify(spec, state) + + # to next epoch + next_epoch(spec, state) + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + + if enough_ffg: + # reorg + assert spec.get_head(store) == signed_block_z.message.hash_tree_root() + if is_justifying_previous_epoch: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + else: + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 4 + else: + # no reorg + assert spec.get_head(store) == signed_block_y.message.hash_tree_root() + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + yield 'steps', test_steps + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(spec, state): + """ + [Case 3] + """ + yield from _run_include_votes_of_another_empty_chain( + spec, state, enough_ffg=True, is_justifying_previous_epoch=False) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoch(spec, state): + """ + [Case 4] + """ + yield from _run_include_votes_of_another_empty_chain( + spec, state, enough_ffg=False, is_justifying_previous_epoch=False) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_delayed_justification_current_epoch(spec, state): + """ + [Case 5] + + To compare with ``test_simple_attempted_reorg_delayed_justification_current_epoch``, + this is the basic case if there is no chain z + + { epoch 4 }{ epoch 5 } + [c4]<--[b]<--[y] + + At c4, c3 is the latest justified checkpoint. + + block_b: the block that can justify c4. + """ + yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=False) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_delayed_justification_previous_epoch(spec, state): + """ + [Case 6] + + Similar to ``test_delayed_justification_current_epoch``, + but includes attestations during epoch N to justify checkpoint N-1. + + { epoch 3 }{ epoch 4 }{ epoch 5 } + [c3]<---------------[c4]---[b]<---------------------------------[y] + + """ + yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=True) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state): + """ + [Case 7] + + Similar to ``test_simple_attempted_reorg_delayed_justification_current_epoch``, + but includes attestations during epoch N to justify checkpoint N-1. + + { epoch 3 }{ epoch 4 }{ epoch 5 } + [c3]<---------------[c4]<--[b]<--[y] + ↑______________[z] + + At c4, c2 is the latest justified checkpoint. + + block_b: the block that can justify c3. + z: the child of block of x at the first slot of epoch 5. + block z can reorg the chain from block y. + """ + yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=True) + + +@with_all_phases +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_include_votes_another_empty_chain_with_enough_ffg_votes_previous_epoch(spec, state): + """ + [Case 8] + + Similar to ``test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch``, + but includes attestations during epoch N to justify checkpoint N-1. + + """ + yield from _run_include_votes_of_another_empty_chain( + spec, state, enough_ffg=True, is_justifying_previous_epoch=True) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py new file mode 100644 index 0000000000..61926875ad --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_withholding.py @@ -0,0 +1,205 @@ +from eth2spec.test.context import ( + spec_state_test, + with_altair_and_later, + with_presets, +) +from eth2spec.test.helpers.constants import ( + MINIMAL, +) +from eth2spec.test.helpers.attestations import ( + state_transition_with_full_block, +) +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, +) +from eth2spec.test.helpers.fork_choice import ( + get_genesis_forkchoice_store_and_block, + on_tick_and_append_step, + tick_and_add_block, + apply_next_epoch_with_attestations, + find_next_justifying_slot, +) +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block, + next_epoch, +) + + +TESTING_PRESETS = [MINIMAL] + + +@with_altair_and_later +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_withholding_attack(spec, state): + """ + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create the attack block that includes justifying attestations for epoch 4 + # This block is withheld & revealed only in epoch 5 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) + assert len(signed_blocks) > 1 + signed_attack_block = signed_blocks[-1] + for signed_block in signed_blocks[:-1]: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root() + state = store.block_states[spec.get_head(store)].copy() + assert spec.compute_epoch_at_slot(state.slot) == 4 + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create an honest chain in epoch 5 that includes the justifying attestations from the attack block + next_epoch(spec, state) + assert spec.compute_epoch_at_slot(state.slot) == 5 + assert state.current_justified_checkpoint.epoch == 3 + # Create two block in the honest chain with full attestations, and add to the store + for _ in range(2): + signed_block = state_transition_with_full_block(spec, state, True, False) + yield from tick_and_add_block(spec, store, signed_block, test_steps) + # Create final block in the honest chain that includes the justifying attestations from the attack block + honest_block = build_empty_block_for_next_slot(spec, state) + honest_block.body.attestations = signed_attack_block.message.body.attestations + signed_honest_block = state_transition_and_sign_block(spec, state, honest_block) + # Add the honest block to the store + yield from tick_and_add_block(spec, store, signed_honest_block, test_steps) + assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Tick to the next slot so proposer boost is not a factor in choosing the head + current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Upon revealing the withheld attack block, the honest block should still be the head + yield from tick_and_add_block(spec, store, signed_attack_block, test_steps) + assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() + # As a side effect of the pull-up logic, the attack block is pulled up and store.justified_checkpoint is updated + assert store.justified_checkpoint.epoch == 4 + + # Even after going to the next epoch, the honest block should remain the head + slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert spec.get_head(store) == signed_honest_block.message.hash_tree_root() + + yield 'steps', test_steps + + +@with_altair_and_later +@spec_state_test +@with_presets(TESTING_PRESETS, reason="too slow") +def test_withholding_attack_unviable_honest_chain(spec, state): + """ + Checks that the withholding attack succeeds for one epoch if the honest chain has a voting source beyond + two epochs ago. + """ + test_steps = [] + # Initialization + store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) + yield 'anchor_state', state + yield 'anchor_block', anchor_block + current_time = state.slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert store.time == current_time + + next_epoch(spec, state) + on_tick_and_append_step(spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, test_steps) + + # Fill epoch 1 to 3 + for _ in range(3): + state, store, _ = yield from apply_next_epoch_with_attestations( + spec, state, store, True, True, test_steps=test_steps) + + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 4 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + next_epoch(spec, state) + assert spec.compute_epoch_at_slot(state.slot) == 5 + + # Create the attack block that includes justifying attestations for epoch 5 + # This block is withheld & revealed only in epoch 6 + signed_blocks, justifying_slot = find_next_justifying_slot(spec, state, True, False) + assert spec.compute_epoch_at_slot(justifying_slot) == spec.get_current_epoch(state) + assert len(signed_blocks) > 1 + signed_attack_block = signed_blocks[-1] + for signed_block in signed_blocks[:-1]: + yield from tick_and_add_block(spec, store, signed_block, test_steps) + assert spec.get_head(store) == signed_block.message.hash_tree_root() + assert spec.get_head(store) == signed_blocks[-2].message.hash_tree_root() + state = store.block_states[spec.get_head(store)].copy() + assert spec.compute_epoch_at_slot(state.slot) == 5 + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 5 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Create an honest chain in epoch 6 that includes the justifying attestations from the attack block + next_epoch(spec, state) + assert spec.compute_epoch_at_slot(state.slot) == 6 + assert state.current_justified_checkpoint.epoch == 3 + # Create two block in the honest chain with full attestations, and add to the store + for _ in range(2): + signed_block = state_transition_with_full_block(spec, state, True, False) + assert state.current_justified_checkpoint.epoch == 3 + yield from tick_and_add_block(spec, store, signed_block, test_steps) + # Create final block in the honest chain that includes the justifying attestations from the attack block + honest_block = build_empty_block_for_next_slot(spec, state) + honest_block.body.attestations = signed_attack_block.message.body.attestations + signed_honest_block = state_transition_and_sign_block(spec, state, honest_block) + honest_block_root = signed_honest_block.message.hash_tree_root() + assert state.current_justified_checkpoint.epoch == 3 + # Add the honest block to the store + yield from tick_and_add_block(spec, store, signed_honest_block, test_steps) + current_epoch = spec.compute_epoch_at_slot(spec.get_current_slot(store)) + assert current_epoch == 6 + # assert store.voting_source[honest_block_root].epoch == 3 + assert spec.get_head(store) == honest_block_root + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Tick to the next slot so proposer boost is not a factor in choosing the head + current_time = (honest_block.slot + 1) * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.get_head(store) == honest_block_root + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 6 + assert state.current_justified_checkpoint.epoch == store.justified_checkpoint.epoch == 3 + + # Upon revealing the withheld attack block, it should become the head + yield from tick_and_add_block(spec, store, signed_attack_block, test_steps) + # The attack block is pulled up and store.justified_checkpoint is updated + assert store.justified_checkpoint.epoch == 5 + attack_block_root = signed_attack_block.message.hash_tree_root() + assert spec.get_head(store) == attack_block_root + + # After going to the next epoch, the honest block should become the head + slot = spec.get_current_slot(store) + spec.SLOTS_PER_EPOCH - (state.slot % spec.SLOTS_PER_EPOCH) + current_time = slot * spec.config.SECONDS_PER_SLOT + store.genesis_time + on_tick_and_append_step(spec, store, current_time, test_steps) + assert spec.compute_epoch_at_slot(spec.get_current_slot(store)) == 7 + # assert store.voting_source[honest_block_root].epoch == 5 + assert spec.get_head(store) == honest_block_root + + yield 'steps', test_steps diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py deleted file mode 100644 index 92382c884b..0000000000 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_block.py +++ /dev/null @@ -1,87 +0,0 @@ -from copy import deepcopy - -from eth2spec.utils.ssz.ssz_impl import hash_tree_root -from eth2spec.test.context import ( - spec_state_test, - with_all_phases, -) -from eth2spec.test.helpers.block import ( - build_empty_block_for_next_slot, -) -from eth2spec.test.helpers.fork_choice import ( - get_genesis_forkchoice_store, - run_on_block, - apply_next_epoch_with_attestations, -) -from eth2spec.test.helpers.state import ( - next_epoch, - state_transition_and_sign_block, -) - - -@with_all_phases -@spec_state_test -def test_on_block_outside_safe_slots_and_multiple_better_justified(spec, state): - """ - NOTE: test_new_justified_is_later_than_store_justified also tests best_justified_checkpoint - """ - # Initialization - store = get_genesis_forkchoice_store(spec, state) - - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - state, store, last_signed_block = yield from apply_next_epoch_with_attestations( - spec, state, store, True, False) - last_block_root = hash_tree_root(last_signed_block.message) - - # NOTE: Mock fictitious justified checkpoint in store - store.justified_checkpoint = spec.Checkpoint( - epoch=spec.compute_epoch_at_slot(last_signed_block.message.slot), - root=spec.Root("0x4a55535449464945440000000000000000000000000000000000000000000000") - ) - - next_epoch(spec, state) - spec.on_tick(store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT) - - # Create new higher justified checkpoint not in branch of store's justified checkpoint - just_block = build_empty_block_for_next_slot(spec, state) - store.blocks[just_block.hash_tree_root()] = just_block - - # Step time past safe slots - spec.on_tick(store, store.time + spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED * spec.config.SECONDS_PER_SLOT) - assert spec.get_current_slot(store) % spec.SLOTS_PER_EPOCH >= spec.SAFE_SLOTS_TO_UPDATE_JUSTIFIED - - previously_finalized = store.finalized_checkpoint - previously_justified = store.justified_checkpoint - - # Add a series of new blocks with "better" justifications - best_justified_checkpoint = spec.Checkpoint(epoch=0) - for i in range(3, 0, -1): - # Mutate store - just_state = store.block_states[last_block_root] - new_justified = spec.Checkpoint( - epoch=previously_justified.epoch + i, - root=just_block.hash_tree_root(), - ) - if new_justified.epoch > best_justified_checkpoint.epoch: - best_justified_checkpoint = new_justified - - just_state.current_justified_checkpoint = new_justified - - block = build_empty_block_for_next_slot(spec, just_state) - signed_block = state_transition_and_sign_block(spec, deepcopy(just_state), block) - - # NOTE: Mock store so that the modified state could be accessed - parent_block = store.blocks[last_block_root].copy() - parent_block.state_root = just_state.hash_tree_root() - store.blocks[block.parent_root] = parent_block - store.block_states[block.parent_root] = just_state.copy() - assert block.parent_root in store.blocks.keys() - assert block.parent_root in store.block_states.keys() - - run_on_block(spec, store, signed_block) - - assert store.finalized_checkpoint == previously_finalized - assert store.justified_checkpoint == previously_justified - # ensure the best from the series was stored - assert store.best_justified_checkpoint == best_justified_checkpoint diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py index 0d9f6ddf54..33d1bbac44 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/fork_choice/test_on_tick.py @@ -18,7 +18,6 @@ def run_on_tick(spec, store, time, new_justified_checkpoint=False): assert store.time == time if new_justified_checkpoint: - assert store.justified_checkpoint == store.best_justified_checkpoint assert store.justified_checkpoint.epoch > previous_justified_checkpoint.epoch assert store.justified_checkpoint.root != previous_justified_checkpoint.root else: @@ -32,12 +31,12 @@ def test_basic(spec, state): run_on_tick(spec, store, store.time + 1) +""" @with_all_phases @spec_state_test def test_update_justified_single_on_store_finalized_chain(spec, state): store = get_genesis_forkchoice_store(spec, state) - # [Mock store.best_justified_checkpoint] # Create a block at epoch 1 next_epoch(spec, state) block = build_empty_block_for_next_slot(spec, state) @@ -58,8 +57,6 @@ def test_update_justified_single_on_store_finalized_chain(spec, state): state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block store.block_states[block.hash_tree_root()] = state - # Mock store.best_justified_checkpoint - store.best_justified_checkpoint = state.current_justified_checkpoint.copy() run_on_tick( spec, @@ -67,6 +64,7 @@ def test_update_justified_single_on_store_finalized_chain(spec, state): store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, new_justified_checkpoint=True ) +""" @with_all_phases @@ -89,7 +87,6 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state): root=block.hash_tree_root(), ) - # [Mock store.best_justified_checkpoint] # Create a block at epoch 1 state = init_state.copy() next_epoch(spec, state) @@ -112,79 +109,9 @@ def test_update_justified_single_not_on_store_finalized_chain(spec, state): state_transition_and_sign_block(spec, state, block) store.blocks[block.hash_tree_root()] = block.copy() store.block_states[block.hash_tree_root()] = state.copy() - # Mock store.best_justified_checkpoint - store.best_justified_checkpoint = state.current_justified_checkpoint.copy() run_on_tick( spec, store, store.genesis_time + state.slot * spec.config.SECONDS_PER_SLOT, ) - - -@with_all_phases -@spec_state_test -def test_no_update_same_slot_at_epoch_boundary(spec, state): - store = get_genesis_forkchoice_store(spec, state) - seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH - - store.best_justified_checkpoint = spec.Checkpoint( - epoch=store.justified_checkpoint.epoch + 1, - root=b'\x55' * 32, - ) - - # set store time to already be at epoch boundary - store.time = seconds_per_epoch - - run_on_tick(spec, store, store.time + 1) - - -@with_all_phases -@spec_state_test -def test_no_update_not_epoch_boundary(spec, state): - store = get_genesis_forkchoice_store(spec, state) - - store.best_justified_checkpoint = spec.Checkpoint( - epoch=store.justified_checkpoint.epoch + 1, - root=b'\x55' * 32, - ) - - run_on_tick(spec, store, store.time + spec.config.SECONDS_PER_SLOT) - - -@with_all_phases -@spec_state_test -def test_no_update_new_justified_equal_epoch(spec, state): - store = get_genesis_forkchoice_store(spec, state) - seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH - - store.best_justified_checkpoint = spec.Checkpoint( - epoch=store.justified_checkpoint.epoch + 1, - root=b'\x55' * 32, - ) - - store.justified_checkpoint = spec.Checkpoint( - epoch=store.best_justified_checkpoint.epoch, - root=b'\44' * 32, - ) - - run_on_tick(spec, store, store.time + seconds_per_epoch) - - -@with_all_phases -@spec_state_test -def test_no_update_new_justified_later_epoch(spec, state): - store = get_genesis_forkchoice_store(spec, state) - seconds_per_epoch = spec.config.SECONDS_PER_SLOT * spec.SLOTS_PER_EPOCH - - store.best_justified_checkpoint = spec.Checkpoint( - epoch=store.justified_checkpoint.epoch + 1, - root=b'\x55' * 32, - ) - - store.justified_checkpoint = spec.Checkpoint( - epoch=store.best_justified_checkpoint.epoch + 1, - root=b'\44' * 32, - ) - - run_on_tick(spec, store, store.time + seconds_per_epoch) diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index f79d436eb7..c94b959338 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -146,10 +146,6 @@ finalized_checkpoint: { epoch: int, -- Integer value from store.finalized_checkpoint.epoch root: string, -- Encoded 32-byte value from store.finalized_checkpoint.root } -best_justified_checkpoint: { - epoch: int, -- Integer value from store.best_justified_checkpoint.epoch - root: string, -- Encoded 32-byte value from store.best_justified_checkpoint.root -} proposer_boost_root: string -- Encoded 32-byte value from store.proposer_boost_root ``` @@ -160,7 +156,6 @@ For example: head: {slot: 32, root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb'} justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'} finalized_checkpoint: {epoch: 2, root: '0x40d32d6283ec11c53317a46808bc88f55657d93b95a1af920403187accf48f4f'} - best_justified_checkpoint: {epoch: 3, root: '0xc25faab4acab38d3560864ca01e4d5cc4dc2cd473da053fbc03c2669143a2de4'} proposer_boost_root: '0xdaa1d49d57594ced0c35688a6da133abb086d191a2ebdfd736fad95299325aeb' ``` diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index c106810f8e..945e687005 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -7,6 +7,8 @@ 'get_head', 'on_block', 'ex_ante', + 'reorg', + 'withholding', ]} # No additional Altair specific finality tests, yet. altair_mods = phase_0_mods From 23c5b7c66e12a1cd8ee183ac4da1017d0f494461 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Mon, 13 Mar 2023 13:48:34 -0700 Subject: [PATCH 117/210] fix typo --- specs/phase0/fork-choice.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 6cbebe507c..b7125e36aa 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -109,7 +109,7 @@ The `Store` is responsible for tracking information required for the fork choice - `justified_checkpoint`: the justified checkpoint being used as the starting point for the LMD GHOST fork choice algorithm. - `finalized_checkpoint`: the highest finalized checkpoint that was seen. In general, the fork choice will consider only those blocks that are not conflicting with this checkpoint. -- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization***, i.e., FFG processing of new attestations, has occured. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. +- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization***, i.e., FFG processing of new attestations, has occurred. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. - `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block. ```python @@ -231,10 +231,10 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint: """ Compute the voting source checkpoint in the case that block with root ``block_root`` is chosen as the head block - """ + """ block = store.blocks[block_root] current_epoch = compute_epoch_at_slot(get_current_slot(store)) - block_epoch = compute_epoch_at_slot(block.slot) + block_epoch = compute_epoch_at_slot(block.slot) if current_epoch > block_epoch: # The block is from a prior epoch, the voting source will be pulled-up. return store.unrealized_justifications[block_root] @@ -242,7 +242,7 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint: # The block is not from a prior epoch, therefore the voting source is # not pulled up. head_state = store.block_states[block_root] - return head_state.current_justified_checkpoint + return head_state.current_justified_checkpoint ``` From 0ae18d86e3fee6ace2187d92da0afca78e43b75d Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Tue, 14 Mar 2023 11:22:12 -0600 Subject: [PATCH 118/210] Update specs/_features/eip6110/validator.md Co-authored-by: Hsiao-Wei Wang --- specs/_features/eip6110/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/_features/eip6110/validator.md b/specs/_features/eip6110/validator.md index dcaaf11041..ae9d493a6f 100644 --- a/specs/_features/eip6110/validator.md +++ b/specs/_features/eip6110/validator.md @@ -38,5 +38,5 @@ def get_eth1_deposit_count(state: BeaconState) -> uint64: if state.eth1_deposit_index < eth1_deposit_index_limit: return min(MAX_DEPOSITS, eth1_deposit_index_limit - state.eth1_deposit_index) else: - return 0 + return uint64(0) ``` From 3de96f7a198ebbaa0149d0f8bb0f38d0f4e2f611 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Tue, 14 Mar 2023 13:54:57 -0700 Subject: [PATCH 119/210] Apply suggestions from code review Co-authored-by: Danny Ryan --- specs/phase0/fork-choice.md | 46 +++++++++++++++---------------------- 1 file changed, 18 insertions(+), 28 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index b7125e36aa..96bb2a87a4 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -107,9 +107,9 @@ def is_previous_epoch_justified(store: Store) -> bool: The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below: -- `justified_checkpoint`: the justified checkpoint being used as the starting point for the LMD GHOST fork choice algorithm. -- `finalized_checkpoint`: the highest finalized checkpoint that was seen. In general, the fork choice will consider only those blocks that are not conflicting with this checkpoint. -- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization***, i.e., FFG processing of new attestations, has occurred. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. +- `justified_checkpoint`: the justified checkpoint used as the starting point for the LMD GHOST fork choice algorithm. +- `finalized_checkpoint`: the highest known finalized checkpoint. The fork choice only considers blocks that are not conflicting with this checkpoint. +- `unrealized_justified_checkpoint` & `unrealized_finalized_checkpoint`: these track the highest justified & finalized checkpoints resp., without regard to whether on-chain ***realization*** has occurred, i.e. FFG processing of new attestations within the state transition function. This is an important distinction from `justified_checkpoint` & `finalized_checkpoint`, because they will only track the checkpoints that are realized on-chain. Note that on-chain processing of FFG information only happens at epoch boundaries. - `unrealized_justifications`: stores a map of block root to the unrealized justified checkpoint observed in that block. ```python @@ -229,18 +229,16 @@ def get_weight(store: Store, root: Root) -> Gwei: ```python def get_voting_source(store: Store, block_root: Root) -> Checkpoint: """ - Compute the voting source checkpoint in the case that block with root ``block_root`` - is chosen as the head block + Compute the voting source checkpoint in event that block with root ``block_root`` is the head block """ block = store.blocks[block_root] current_epoch = compute_epoch_at_slot(get_current_slot(store)) block_epoch = compute_epoch_at_slot(block.slot) if current_epoch > block_epoch: - # The block is from a prior epoch, the voting source will be pulled-up. + # The block is from a prior epoch, the voting source will be pulled-up return store.unrealized_justifications[block_root] else: - # The block is not from a prior epoch, therefore the voting source is - # not pulled up. + # The block is not from a prior epoch, therefore the voting source is not pulled up head_state = store.block_states[block_root] return head_state.current_justified_checkpoint @@ -248,7 +246,7 @@ def get_voting_source(store: Store, block_root: Root) -> Checkpoint: #### `filter_block_tree` -*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST have `block_root` as `store.justified_checkpoint`. +*Note*: External calls to `filter_block_tree` (i.e., any calls that are not made by the recursive logic in this function) MUST set `block_root` to `store.justified_checkpoint`. ```python def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconBlock]) -> bool: @@ -270,7 +268,7 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB current_epoch = compute_epoch_at_slot(get_current_slot(store)) voting_source = get_voting_source(store, block_root) - # The voting source should be at the same height as the store's justified checkpoint. + # The voting source should be at the same height as the store's justified checkpoint correct_justified = ( store.justified_checkpoint.epoch == GENESIS_EPOCH or voting_source.epoch == store.justified_checkpoint.epoch @@ -378,20 +376,16 @@ def compute_pulled_up_tip(store: Store, block_root: Root) -> None: # Pull up the post-state of the block to the next epoch boundary process_justification_and_finalization(state) - # Store the unrealized justification. store.unrealized_justifications[block_root] = state.current_justified_checkpoint - - # Update unrealized checkpoints in store if necessary update_unrealized_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) - # If the block is from a prior epoch, apply the realized values. + # If the block is from a prior epoch, apply the realized values block_epoch = compute_epoch_at_slot(store.blocks[block_root].slot) current_epoch = compute_epoch_at_slot(get_current_slot(store)) if block_epoch < current_epoch: update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) ``` - #### `on_tick` helpers ##### `on_tick_per_slot` @@ -400,21 +394,18 @@ def compute_pulled_up_tip(store: Store, block_root: Root) -> None: def on_tick_per_slot(store: Store, time: uint64) -> None: previous_slot = get_current_slot(store) - # update store time + # Update store time store.time = time current_slot = get_current_slot(store) - # Reset store.proposer_boost_root if this is a new slot + # If this is a new slot, reset store.proposer_boost_root if current_slot > previous_slot: store.proposer_boost_root = Root() - # Not a new epoch, return - if not (current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0): - return - - # Pull-up justification and finalization from previous epoch - update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint) + # If a new epoch, pull-up justification and finalization from previous epoch + if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0: + update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint) ``` #### `on_attestation` helpers @@ -447,8 +438,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc # Check that the epoch number and slot number are matching assert target.epoch == compute_epoch_at_slot(attestation.data.slot) - # Attestations target be for a known block. - # If target block is unknown, delay consideration until the block is found. + # Attestation target must be for a known block. If target block is unknown, delay consideration until block is found assert target.root in store.blocks # Attestations must be for a known block. If block is unknown, delay consideration until the block is found @@ -498,8 +488,8 @@ def update_latest_messages(store: Store, ```python def on_tick(store: Store, time: uint64) -> None: - # If the ``store.time`` falls behind, catch up slot by slot to - # ensure that every previous slot will be processed with ``on_tick_per_slot``. + # If the ``store.time`` falls behind, while loop catches up slot by slot + # to ensure that every previous slot is processed with ``on_tick_per_slot`` tick_slot = (time - store.genesis_time) // SECONDS_PER_SLOT while get_current_slot(store) < tick_slot: previous_time = store.genesis_time + (get_current_slot(store) + 1) * SECONDS_PER_SLOT @@ -543,7 +533,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: # Update checkpoints in store if necessary update_checkpoints(store, state.current_justified_checkpoint, state.finalized_checkpoint) - # Eagerly compute unrealized justification and finality. + # Eagerly compute unrealized justification and finality compute_pulled_up_tip(store, block_root) ``` From 637ef341331bdae3773cbfa8ae1c08a4b1d99ec1 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Tue, 14 Mar 2023 21:00:20 +0000 Subject: [PATCH 120/210] fix : change description for blob --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index 57496eb020..bde9627565 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -65,7 +65,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog | `KZGCommitment` | `Bytes48` | Validation: Perform [BLS standard's](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.5) "KeyValidate" check but do allow the identity point | | `KZGProof` | `Bytes48` | Same as for `KZGCommitment` | | `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | A polynomial in evaluation form | -| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic blob data | +| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A fixed-length sequence of bytes that may uniquely encode the evaluations of a polynomial | ## Constants From 1a2967a5424a53c98c423f8e44fb54086ec68312 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Tue, 14 Mar 2023 14:49:41 -0700 Subject: [PATCH 121/210] Apply suggestions from @djrtwo --- specs/phase0/fork-choice.md | 17 +++++------------ 1 file changed, 5 insertions(+), 12 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 96bb2a87a4..6e281d5c3d 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -189,11 +189,7 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: block = store.blocks[root] if block.slot > slot: return get_ancestor(store, block.parent_root, slot) - elif block.slot == slot: - return root - else: - # root is older than queried slot, thus a skip slot. Return most recent root prior to slot - return root + return root ``` #### `get_weight` @@ -274,9 +270,8 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB or voting_source.epoch == store.justified_checkpoint.epoch ) - # If the block should be pulled-up due to previous epoch being justified, also check - # that the unrealized justification is higher than the store's justified - # checkpoint, and the voting source is not more than two epochs ago. + # If the previous epoch is justified, the block should be pulled-up. In this case, check that unrealized + # justification is higher than the store and that the voting source is not more than two epochs ago if not correct_justified and is_previous_epoch_justified(store): correct_justified = ( store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch and @@ -404,7 +399,7 @@ def on_tick_per_slot(store: Store, time: uint64) -> None: store.proposer_boost_root = Root() # If a new epoch, pull-up justification and finalization from previous epoch - if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0: + if current_slot > previous_slot and compute_slots_since_epoch_start(current_slot) == 0: update_checkpoints(store, store.unrealized_justified_checkpoint, store.unrealized_finalized_checkpoint) ``` @@ -470,9 +465,7 @@ def store_target_checkpoint_state(store: Store, target: Checkpoint) -> None: ##### `update_latest_messages` ```python -def update_latest_messages(store: Store, - attesting_indices: Sequence[ValidatorIndex], - attestation: Attestation) -> None: +def update_latest_messages(store: Store, attesting_indices: Sequence[ValidatorIndex], attestation: Attestation) -> None: target = attestation.data.target beacon_block_root = attestation.data.beacon_block_root non_equivocating_attesting_indices = [i for i in attesting_indices if i not in store.equivocating_indices] From ff7a6c5d0e8ea7af80836fb808bb22e391390f6d Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Mar 2023 22:14:31 +0000 Subject: [PATCH 122/210] Add description when to use verify_sidecar_signature --- specs/deneb/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 6f10f7a345..b6960db3c2 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -125,7 +125,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar is from a slot greater than the latest finalized slot -- i.e. validate that `sidecar.slot > compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)` - _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. -- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid with respect to the `sidecar.proposer_index` pubkey. +- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature` - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. From 2d4bfabceba821cd1ce3fba67322a14294c7343c Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Mar 2023 22:16:41 +0000 Subject: [PATCH 123/210] Correct signature of get_blobs_and_kzg_commitments --- specs/deneb/validator.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index a21fadd085..04d8e2a409 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -45,7 +45,7 @@ Implementers may also retrieve blobs individually per transaction. ```python def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> \ - Tuple[Sequence[BLSFieldElement], Sequence[KZGCommitment], Sequence[KZGProof]]: + Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]: # pylint: disable=unused-argument ... ``` From 3e281e745770dc8282b3f9fc7cf944f39bf3f393 Mon Sep 17 00:00:00 2001 From: Dankrad Feist Date: Tue, 14 Mar 2023 22:22:23 +0000 Subject: [PATCH 124/210] Alternative for linter --- specs/deneb/validator.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index 04d8e2a409..b627de023e 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -44,8 +44,9 @@ Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` Implementers may also retrieve blobs individually per transaction. ```python -def get_blobs_and_kzg_commitments(payload_id: PayloadId) -> \ - Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]: +def get_blobs_and_kzg_commitments( + payload_id: PayloadId +) -> Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]: # pylint: disable=unused-argument ... ``` From 8fd22ab5044b7e8dbe8b11044ee35adce9f57244 Mon Sep 17 00:00:00 2001 From: kevaundray Date: Wed, 15 Mar 2023 15:25:09 +0000 Subject: [PATCH 125/210] code review --- specs/deneb/polynomial-commitments.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index bde9627565..c48857d9e8 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -65,7 +65,7 @@ Public functions MUST accept raw bytes as input and perform the required cryptog | `KZGCommitment` | `Bytes48` | Validation: Perform [BLS standard's](https://datatracker.ietf.org/doc/html/draft-irtf-cfrg-bls-signature-04#section-2.5) "KeyValidate" check but do allow the identity point | | `KZGProof` | `Bytes48` | Same as for `KZGCommitment` | | `Polynomial` | `Vector[BLSFieldElement, FIELD_ELEMENTS_PER_BLOB]` | A polynomial in evaluation form | -| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A fixed-length sequence of bytes that may uniquely encode the evaluations of a polynomial | +| `Blob` | `ByteVector[BYTES_PER_FIELD_ELEMENT * FIELD_ELEMENTS_PER_BLOB]` | A basic data blob | ## Constants From 5977f36fef8de792c3cf1eea5d5883adabdbe9a9 Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 15 Mar 2023 10:54:49 -0600 Subject: [PATCH 126/210] minor nits from code review --- specs/deneb/p2p-interface.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 6207cc76a8..9be028620d 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -126,7 +126,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`). -- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature` +- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature`. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. From 67984b5665362e327871007e438d6f67ef2cd8de Mon Sep 17 00:00:00 2001 From: Danny Ryan Date: Wed, 15 Mar 2023 11:30:46 -0600 Subject: [PATCH 127/210] bump version.txt --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 99aab26b29..9b388ed89d 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.3.0-rc.3 +1.3.0-rc.4 From 5a217607b0ace03fcea5214eed9074ed19fb031e Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Wed, 15 Mar 2023 16:32:57 -0700 Subject: [PATCH 128/210] shift all f.c. tests to altair & later --- .../test/phase0/fork_choice/test_ex_ante.py | 12 +++--- .../test/phase0/fork_choice/test_get_head.py | 19 ++++----- .../test/phase0/fork_choice/test_on_block.py | 41 +++++++++---------- .../test/phase0/fork_choice/test_reorg.py | 18 ++++---- 4 files changed, 44 insertions(+), 46 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py index 0a145dfa52..15feffa83d 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_ex_ante.py @@ -1,7 +1,7 @@ from eth2spec.test.context import ( MAINNET, spec_state_test, - with_all_phases, + with_altair_and_later, with_presets, ) from eth2spec.test.helpers.attestations import ( @@ -31,7 +31,7 @@ def _apply_base_block_a(spec, state, store, test_steps): assert spec.get_head(store) == signed_block_a.message.hash_tree_root() -@with_all_phases +@with_altair_and_later @spec_state_test def test_ex_ante_vanilla(spec, state): """ @@ -118,7 +118,7 @@ def _get_greater_than_proposer_boost_score(spec, store, state, proposer_boost_ro return proposer_score // base_effective_balance + 1 -@with_all_phases +@with_altair_and_later @with_presets([MAINNET], reason="to create non-duplicate committee") @spec_state_test def test_ex_ante_attestations_is_greater_than_proposer_boost_with_boost(spec, state): @@ -191,7 +191,7 @@ def _filter_participant_set(participants): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_ex_ante_sandwich_without_attestations(spec, state): """ @@ -254,7 +254,7 @@ def test_ex_ante_sandwich_without_attestations(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_ex_ante_sandwich_with_honest_attestation(spec, state): """ @@ -335,7 +335,7 @@ def _filter_participant_set(participants): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @with_presets([MAINNET], reason="to create non-duplicate committee") @spec_state_test def test_ex_ante_sandwich_with_boost_not_sufficient(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index 2107a470a7..f5960ff703 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -2,7 +2,6 @@ from eth2spec.test.context import ( spec_state_test, - with_all_phases, with_altair_and_later, with_presets, ) @@ -38,7 +37,7 @@ rng = random.Random(1001) -@with_all_phases +@with_altair_and_later @spec_state_test def test_genesis(spec, state): test_steps = [] @@ -62,7 +61,7 @@ def test_genesis(spec, state): yield 'description', 'meta', f"Although it's not phase 0, we may use {spec.fork} spec to start testnets." -@with_all_phases +@with_altair_and_later @spec_state_test def test_chain_no_attestations(spec, state): test_steps = [] @@ -91,7 +90,7 @@ def test_chain_no_attestations(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_split_tie_breaker_no_attestations(spec, state): test_steps = [] @@ -130,7 +129,7 @@ def test_split_tie_breaker_no_attestations(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_shorter_chain_but_heavier_weight(spec, state): test_steps = [] @@ -170,7 +169,7 @@ def test_shorter_chain_but_heavier_weight(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_filtered_block_tree(spec, state): @@ -247,7 +246,7 @@ def test_filtered_block_tree(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_proposer_boost_correct_head(spec, state): test_steps = [] @@ -302,7 +301,7 @@ def test_proposer_boost_correct_head(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_discard_equivocations_on_attester_slashing(spec, state): test_steps = [] @@ -374,7 +373,7 @@ def test_discard_equivocations_on_attester_slashing(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_discard_equivocations_slashed_validator_censoring(spec, state): @@ -636,7 +635,7 @@ def test_voting_source_beyond_two_epoch(spec, state): The block being a descendant of store.justified_checkpoint.root is necessary because filter_block_tree descends the tree starting at store.justified_checkpoint.root -@with_all_phases +@with_altair_and_later @spec_state_test def test_incorrect_finalized(spec, state): # Check that the store doesn't allow for a head block that has: diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index eaae825ab2..0af7753391 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -5,7 +5,6 @@ from eth2spec.test.context import ( MINIMAL, spec_state_test, - with_all_phases, with_altair_and_later, with_presets ) @@ -47,7 +46,7 @@ def _drop_random_one_third(_slot, _index, indices): return rng.sample(sorted(indices), participant_count) -@with_all_phases +@with_altair_and_later @spec_state_test def test_basic(spec, state): test_steps = [] @@ -77,7 +76,7 @@ def test_basic(spec, state): # TODO: add tests for justified_root and finalized_root -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_checkpoints(spec, state): @@ -114,7 +113,7 @@ def test_on_block_checkpoints(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_on_block_future_block(spec, state): test_steps = [] @@ -135,7 +134,7 @@ def test_on_block_future_block(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_on_block_bad_parent_root(spec, state): test_steps = [] @@ -161,7 +160,7 @@ def test_on_block_bad_parent_root(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_before_finalized(spec, state): @@ -193,7 +192,7 @@ def test_on_block_before_finalized(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_finalized_skip_slots(spec, state): @@ -240,7 +239,7 @@ def test_on_block_finalized_skip_slots(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): @@ -287,7 +286,7 @@ def test_on_block_finalized_skip_slots_not_in_skip_chain(spec, state): """ -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): @@ -369,7 +368,7 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): """ -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): @@ -441,7 +440,7 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_proposer_boost(spec, state): test_steps = [] @@ -500,7 +499,7 @@ def test_proposer_boost(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test def test_proposer_boost_root_same_slot_untimely_block(spec, state): test_steps = [] @@ -534,7 +533,7 @@ def test_proposer_boost_root_same_slot_untimely_block(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_withholding(spec, state): @@ -616,7 +615,7 @@ def test_justification_withholding(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_withholding_reverse_order(spec, state): @@ -693,7 +692,7 @@ def test_justification_withholding_reverse_order(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_update_beginning_of_epoch(spec, state): @@ -742,7 +741,7 @@ def test_justification_update_beginning_of_epoch(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justification_update_end_of_epoch(spec, state): @@ -792,7 +791,7 @@ def test_justification_update_end_of_epoch(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_incompatible_justification_update_start_of_epoch(spec, state): @@ -868,7 +867,7 @@ def test_incompatible_justification_update_start_of_epoch(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_incompatible_justification_update_end_of_epoch(spec, state): @@ -945,7 +944,7 @@ def test_incompatible_justification_update_end_of_epoch(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_not_realized_finality(spec, state): @@ -1018,7 +1017,7 @@ def test_justified_update_not_realized_finality(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_monotonic(spec, state): @@ -1097,7 +1096,7 @@ def test_justified_update_monotonic(spec, state): yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets([MINIMAL], reason="too slow") def test_justified_update_always_if_better(spec, state): diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py index 30f1b06c7b..afff8d4f46 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_reorg.py @@ -1,6 +1,6 @@ from eth2spec.test.context import ( spec_state_test, - with_all_phases, + with_altair_and_later, with_presets, ) from eth2spec.test.helpers.constants import ( @@ -35,7 +35,7 @@ TESTING_PRESETS = [MINIMAL] -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_without_enough_ffg_votes(spec, state): @@ -250,7 +250,7 @@ def _run_delayed_justification(spec, state, attemped_reorg, is_justifying_previo yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_delayed_justification_current_epoch(spec, state): @@ -401,7 +401,7 @@ def _run_include_votes_of_another_empty_chain(spec, state, enough_ffg, is_justif yield 'steps', test_steps -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(spec, state): @@ -412,7 +412,7 @@ def test_include_votes_another_empty_chain_with_enough_ffg_votes_current_epoch(s spec, state, enough_ffg=True, is_justifying_previous_epoch=False) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoch(spec, state): @@ -423,7 +423,7 @@ def test_include_votes_another_empty_chain_without_enough_ffg_votes_current_epoc spec, state, enough_ffg=False, is_justifying_previous_epoch=False) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_delayed_justification_current_epoch(spec, state): @@ -443,7 +443,7 @@ def test_delayed_justification_current_epoch(spec, state): yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=False) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_delayed_justification_previous_epoch(spec, state): @@ -460,7 +460,7 @@ def test_delayed_justification_previous_epoch(spec, state): yield from _run_delayed_justification(spec, state, attemped_reorg=False, is_justifying_previous_epoch=True) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state): @@ -483,7 +483,7 @@ def test_simple_attempted_reorg_delayed_justification_previous_epoch(spec, state yield from _run_delayed_justification(spec, state, attemped_reorg=True, is_justifying_previous_epoch=True) -@with_all_phases +@with_altair_and_later @spec_state_test @with_presets(TESTING_PRESETS, reason="too slow") def test_include_votes_another_empty_chain_with_enough_ffg_votes_previous_epoch(spec, state): From c48012f674007d4ea3e99d814eca0770fe0eb155 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 16 Mar 2023 08:34:44 +0800 Subject: [PATCH 129/210] Add EIP-6110 to README table --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 49e1c3a4d9..61600d3890 100644 --- a/README.md +++ b/README.md @@ -29,6 +29,7 @@ Features are researched and developed in parallel, and then consolidated into se | Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/_features/sharding/p2p-interface.md)
| | Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/_features/custody_game/validator.md)
| Dependent on sharding | | Data Availability Sampling (outdated) |
  • Core
    • [Core types and functions](specs/_features/das/das-core.md)
    • [Fork choice changes](specs/_features/das/fork-choice.md)
  • Additions
    • [P2P Networking](specs/_features/das/p2p-interface.md)
    • [Sampling process](specs/_features/das/sampling.md)
|
  • Dependent on sharding
  • [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
| +| EIP-6110 |
  • Core
    • [Beacon Chain changes](specs/_features/eip6110//beacon-chain.md)
    • [EIP-6110 fork](specs/_features/eip6110/fork.md)
  • Additions
    • [Honest validator guide changes](specs/_features/eip6110/validator.md)
| ### Accompanying documents can be found in [specs](specs) and include: From debf51e87afe299c3c2b0ce2459b5fba859b3b00 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 16 Mar 2023 10:13:09 +0800 Subject: [PATCH 130/210] Fix `get_sample_opaque_tx` call signature --- tests/core/pyspec/eth2spec/test/helpers/sharding.py | 2 +- tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/sharding.py b/tests/core/pyspec/eth2spec/test/helpers/sharding.py index 89f03c3c39..6b913b90ec 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/sharding.py +++ b/tests/core/pyspec/eth2spec/test/helpers/sharding.py @@ -12,7 +12,7 @@ # -# Containers from Deneb +# Containers from EIP-4844 # MAX_CALLDATA_SIZE = 2**24 MAX_VERSIONED_HASHES_LIST_SIZE = 2**24 diff --git a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py index 35ddbc330a..c164515103 100644 --- a/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py +++ b/tests/core/pyspec/eth2spec/test/utils/randomized_block_tests.py @@ -235,7 +235,7 @@ def random_block_capella(spec, state, signed_blocks, scenario_state, rng=Random( def random_block_deneb(spec, state, signed_blocks, scenario_state, rng=Random(3456)): block = random_block_capella(spec, state, signed_blocks, scenario_state) # TODO: more commitments. blob_kzg_commitments: List[KZGCommitment, MAX_BLOBS_PER_BLOCK] - opaque_tx, _, blob_kzg_commitments = get_sample_opaque_tx(spec, blob_count=1) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=1) block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) block.body.blob_kzg_commitments = blob_kzg_commitments From 8d3097be999c1692e7d29238072ec22918fdba31 Mon Sep 17 00:00:00 2001 From: Aditya Asgaonkar Date: Wed, 15 Mar 2023 19:48:16 -0700 Subject: [PATCH 131/210] remove phase 0 from f.c. test generator --- tests/generators/fork_choice/main.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 945e687005..20aa393923 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -3,15 +3,14 @@ if __name__ == "__main__": - phase_0_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [ + # Note: Fork choice tests start from Altair - there are no fork choice test for phase 0 anymore + altair_mods = {key: 'eth2spec.test.phase0.fork_choice.test_' + key for key in [ 'get_head', 'on_block', 'ex_ante', 'reorg', 'withholding', ]} - # No additional Altair specific finality tests, yet. - altair_mods = phase_0_mods # For merge `on_merge_block` test kind added with `pow_block_N.ssz` files with several # PowBlock's which should be resolved by `get_pow_block(hash: Hash32) -> PowBlock` function @@ -23,7 +22,6 @@ deneb_mods = capella_mods # No additional Capella specific fork choice tests all_mods = { - PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, From 74a1c90bbce92228044db3d6c3e0dd7da7f89844 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 16 Mar 2023 11:34:04 +0800 Subject: [PATCH 132/210] fix lint --- tests/generators/fork_choice/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 20aa393923..4456c2546b 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB if __name__ == "__main__": From 38f97246ee9c16ac43f45383b4ca47ca44d14abb Mon Sep 17 00:00:00 2001 From: Paul Harris Date: Fri, 17 Mar 2023 10:15:34 +1000 Subject: [PATCH 133/210] Added capella fork epoch to mainnet configuration --- configs/mainnet.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index f7e53d7e18..8b467fce24 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -46,7 +46,7 @@ BELLATRIX_FORK_VERSION: 0x02000000 BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 -CAPELLA_FORK_EPOCH: 18446744073709551615 +CAPELLA_FORK_EPOCH: 6209536 # April 12, 2023, 10:27:35pm UTC # Deneb DENEB_FORK_VERSION: 0x04000000 DENEB_FORK_EPOCH: 18446744073709551615 From 01ec71a3edd62ec2b77c68af322931690e69a590 Mon Sep 17 00:00:00 2001 From: Paul Harris Date: Fri, 17 Mar 2023 10:20:58 +1000 Subject: [PATCH 134/210] computed epoch, rather than slot number --- configs/mainnet.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 8b467fce24..f204d1746e 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -46,7 +46,7 @@ BELLATRIX_FORK_VERSION: 0x02000000 BELLATRIX_FORK_EPOCH: 144896 # Sept 6, 2022, 11:34:47am UTC # Capella CAPELLA_FORK_VERSION: 0x03000000 -CAPELLA_FORK_EPOCH: 6209536 # April 12, 2023, 10:27:35pm UTC +CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC # Deneb DENEB_FORK_VERSION: 0x04000000 DENEB_FORK_EPOCH: 18446744073709551615 From 0e0403d0e2edac7c4e992f7979d5296a9b8788c4 Mon Sep 17 00:00:00 2001 From: George Kadianakis Date: Thu, 16 Mar 2023 15:49:40 +0200 Subject: [PATCH 135/210] Reject zero inputs in bls_modular_inverse() --- specs/deneb/polynomial-commitments.md | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/specs/deneb/polynomial-commitments.md b/specs/deneb/polynomial-commitments.md index c48857d9e8..e23c31fab8 100644 --- a/specs/deneb/polynomial-commitments.md +++ b/specs/deneb/polynomial-commitments.md @@ -252,10 +252,11 @@ def compute_challenge(blob: Blob, ```python def bls_modular_inverse(x: BLSFieldElement) -> BLSFieldElement: """ - Compute the modular inverse of x - i.e. return y such that x * y % BLS_MODULUS == 1 and return 0 for x == 0 + Compute the modular inverse of x (for x != 0) + i.e. return y such that x * y % BLS_MODULUS == 1 """ - return BLSFieldElement(pow(x, -1, BLS_MODULUS)) if x != 0 else BLSFieldElement(0) + assert (int(x) % BLS_MODULUS) != 0 + return BLSFieldElement(pow(x, -1, BLS_MODULUS)) ``` #### `div` From 1219beae265f355e7f98d8b31a7afa7611b113ec Mon Sep 17 00:00:00 2001 From: George Kadianakis Date: Fri, 17 Mar 2023 11:50:19 +0200 Subject: [PATCH 136/210] Add some basic bls_modular_inverse() tests --- .../test_polynomial_commitments.py | 23 +++++++++++++++++++ 1 file changed, 23 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index f42f88393d..7d89a9788e 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -215,6 +215,29 @@ def test_verify_blob_kzg_proof_incorrect_proof(spec): assert not spec.verify_blob_kzg_proof(blob, commitment, proof) +@with_deneb_and_later +@spec_test +@single_phase +def test_bls_modular_inverse(spec): + """ + Verify computation of multiplicative inverse + """ + rng = random.Random(5566) + + # Should fail for x == 0 + expect_assertion_error(lambda: spec.bls_modular_inverse(0)) + expect_assertion_error(lambda: spec.bls_modular_inverse(spec.BLS_MODULUS)) + expect_assertion_error(lambda: spec.bls_modular_inverse(2 * spec.BLS_MODULUS)) + + # Test a trivial inversion + assert 1 == int(spec.bls_modular_inverse(1)) + + # Test a random inversion + r = rng.randint(0, spec.BLS_MODULUS - 1) + r_inv = int(spec.bls_modular_inverse(r)) + assert r * r_inv % BLS_MODULUS == 1 + + @with_deneb_and_later @spec_test @single_phase From 18eb1fa33462d74719f621f17fca5a16d0598f8f Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 20 Mar 2023 13:48:49 +0800 Subject: [PATCH 137/210] Set `CAPELLA_FORK_EPOCH` to `194048` --- README.md | 2 +- specs/capella/fork.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 61600d3890..9f2528263e 100644 --- a/README.md +++ b/README.md @@ -20,11 +20,11 @@ Features are researched and developed in parallel, and then consolidated into se | 0 | **Phase0** |`0` |
  • Core
    • [The beacon chain](specs/phase0/beacon-chain.md)
    • [Deposit contract](specs/phase0/deposit-contract.md)
    • [Beacon chain fork choice](specs/phase0/fork-choice.md)
  • Additions
    • [Honest validator guide](specs/phase0/validator.md)
    • [P2P networking](specs/phase0/p2p-interface.md)
    • [Weak subjectivity](specs/phase0/weak-subjectivity.md)
| | 1 | **Altair** | `74240` |
  • Core
    • [Beacon chain changes](specs/altair/beacon-chain.md)
    • [Altair fork](specs/altair/fork.md)
  • Additions
    • [Light client sync protocol](specs/altair/light-client/sync-protocol.md) ([full node](specs/altair/light-client/full-node.md), [light client](specs/altair/light-client/light-client.md), [networking](specs/altair/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/altair/validator.md)
    • [P2P networking](specs/altair/p2p-interface.md)
| | 2 | **Bellatrix**
(["The Merge"](https://ethereum.org/en/upgrades/merge/)) | `144896` |
  • Core
    • [Beacon Chain changes](specs/bellatrix/beacon-chain.md)
    • [Bellatrix fork](specs/bellatrix/fork.md)
    • [Fork choice changes](specs/bellatrix/fork-choice.md)
  • Additions
    • [Honest validator guide changes](specs/bellatrix/validator.md)
    • [P2P networking](specs/bellatrix/p2p-interface.md)
| +| 3 | **Capella** | `194048` |
  • Core
    • [Beacon chain changes](specs/capella/beacon-chain.md)
    • [Capella fork](specs/capella/fork.md)
  • Additions
    • [Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))
    • [Validator additions](specs/capella/validator.md)
    • [P2P networking](specs/capella/p2p-interface.md)
| ### In-development Specifications | Code Name or Topic | Specs | Notes | | - | - | - | -| Capella (tentative) |
  • Core
    • [Beacon chain changes](specs/capella/beacon-chain.md)
    • [Capella fork](specs/capella/fork.md)
  • Additions
    • [Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))
    • [Validator additions](specs/capella/validator.md)
    • [P2P networking](specs/capella/p2p-interface.md)
| | Deneb (tentative) |
  • Core
    • [Beacon Chain changes](specs/deneb/beacon-chain.md)
    • [Deneb fork](specs/deneb/fork.md)
    • [Polynomial commitments](specs/deneb/polynomial-commitments.md)
    • [Fork choice changes](specs/deneb/fork-choice.md)
  • Additions
    • [Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/deneb/validator.md)
    • [P2P networking](specs/deneb/p2p-interface.md)
| | Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/_features/sharding/p2p-interface.md)
| | Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/_features/custody_game/validator.md)
| Dependent on sharding | diff --git a/specs/capella/fork.md b/specs/capella/fork.md index 49bf8744f3..95bdf79aee 100644 --- a/specs/capella/fork.md +++ b/specs/capella/fork.md @@ -27,7 +27,7 @@ Warning: this configuration is not definitive. | Name | Value | | - | - | | `CAPELLA_FORK_VERSION` | `Version('0x03000000')` | -| `CAPELLA_FORK_EPOCH` | `Epoch(18446744073709551615)` **TBD** | +| `CAPELLA_FORK_EPOCH` | `Epoch(194048)` (April 12, 2023, 10:27:35pm UTC) | ## Helper functions From 024cec5fc95f95f4dcad69f15430fdc0065789cb Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 20 Mar 2023 14:39:24 +0800 Subject: [PATCH 138/210] bump version.txt to 1.3.0-rc.5 --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index 9b388ed89d..d4f06976f0 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.3.0-rc.4 +1.3.0-rc.5 From a9abfeb217969d27e3940cc91b5f2f7bd17670ff Mon Sep 17 00:00:00 2001 From: terence tsao Date: Thu, 23 Mar 2023 17:21:41 -0700 Subject: [PATCH 139/210] Denbeb -> Deneb --- specs/deneb/light-client/sync-protocol.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/light-client/sync-protocol.md b/specs/deneb/light-client/sync-protocol.md index 6f948257bb..c691a113da 100644 --- a/specs/deneb/light-client/sync-protocol.md +++ b/specs/deneb/light-client/sync-protocol.md @@ -18,7 +18,7 @@ ## Introduction -This upgrade updates light client data to include the Denbeb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb. +This upgrade updates light client data to include the Deneb changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to Deneb. Additional documents describes the impact of the upgrade on certain roles: - [Full node](./full-node.md) From b3db3ec83ad16e50f1d97c370a7fa354b44c391a Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sat, 25 Mar 2023 12:23:03 +0100 Subject: [PATCH 140/210] Fix: typo --- fork_choice/safe-block.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/fork_choice/safe-block.md b/fork_choice/safe-block.md index 490d245381..b76285b3a8 100644 --- a/fork_choice/safe-block.md +++ b/fork_choice/safe-block.md @@ -15,7 +15,7 @@ ## Introduction Under honest majority and certain network synchronicity assumptions -there exist a block that is safe from re-orgs. Normally this block is +there exists a block that is safe from re-orgs. Normally this block is pretty close to the head of canonical chain which makes it valuable to expose a safe block to users. From 3115d1140b23dd4c9c23fbd9e2428186cf816bde Mon Sep 17 00:00:00 2001 From: omahs <73983677+omahs@users.noreply.github.com> Date: Sat, 25 Mar 2023 13:14:57 +0100 Subject: [PATCH 141/210] Fix: typos --- specs/phase0/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/phase0/beacon-chain.md b/specs/phase0/beacon-chain.md index 3794cd6be3..b77e017ab7 100644 --- a/specs/phase0/beacon-chain.md +++ b/specs/phase0/beacon-chain.md @@ -269,7 +269,7 @@ Additional preset configurations can be found in the [`configs`](../../configs) - The `INACTIVITY_PENALTY_QUOTIENT` equals `INVERSE_SQRT_E_DROP_TIME**2` where `INVERSE_SQRT_E_DROP_TIME := 2**13` epochs (about 36 days) is the time it takes the inactivity penalty to reduce the balance of non-participating validators to about `1/sqrt(e) ~= 60.6%`. Indeed, the balance retained by offline validators after `n` epochs is about `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(n**2/2)`; so after `INVERSE_SQRT_E_DROP_TIME` epochs, it is roughly `(1 - 1/INACTIVITY_PENALTY_QUOTIENT)**(INACTIVITY_PENALTY_QUOTIENT/2) ~= 1/sqrt(e)`. Note this value will be upgraded to `2**24` after Phase 0 mainnet stabilizes to provide a faster recovery in the event of an inactivity leak. -- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stablizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin. +- The `PROPORTIONAL_SLASHING_MULTIPLIER` is set to `1` at initial mainnet launch, resulting in one-third of the minimum accountable safety margin in the event of a finality attack. After Phase 0 mainnet stabilizes, this value will be upgraded to `3` to provide the maximal minimum accountable safety margin. ### Max operations per block @@ -1036,7 +1036,7 @@ def get_total_balance(state: BeaconState, indices: Set[ValidatorIndex]) -> Gwei: """ Return the combined effective balance of the ``indices``. ``EFFECTIVE_BALANCE_INCREMENT`` Gwei minimum to avoid divisions by zero. - Math safe up to ~10B ETH, afterwhich this overflows uint64. + Math safe up to ~10B ETH, after which this overflows uint64. """ return Gwei(max(EFFECTIVE_BALANCE_INCREMENT, sum([state.validators[index].effective_balance for index in indices]))) ``` From d0839dfec4e9753183e65d3754d891b015221b70 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 27 Mar 2023 20:58:43 +0800 Subject: [PATCH 142/210] Add EIP-6110 to the pytest scope --- configs/mainnet.yaml | 5 +++-- configs/minimal.yaml | 3 +++ presets/mainnet/eip6110.yaml | 6 ++++++ presets/minimal/eip6110.yaml | 6 ++++++ tests/core/pyspec/eth2spec/test/context.py | 6 +++++- .../core/pyspec/eth2spec/test/helpers/constants.py | 13 +++++++------ tests/core/pyspec/eth2spec/test/helpers/forks.py | 7 +++++++ 7 files changed, 37 insertions(+), 9 deletions(-) create mode 100644 presets/mainnet/eip6110.yaml create mode 100644 presets/minimal/eip6110.yaml diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index f204d1746e..e62108b0aa 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -50,8 +50,9 @@ CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC # Deneb DENEB_FORK_VERSION: 0x04000000 DENEB_FORK_EPOCH: 18446744073709551615 - - +# EIP6110 +EIP6110_FORK_VERSION: 0x04000001 +EIP6110_FORK_EPOCH: 18446744073709551615 # Time parameters diff --git a/configs/minimal.yaml b/configs/minimal.yaml index abecb18813..271a0c14ae 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -49,6 +49,9 @@ CAPELLA_FORK_EPOCH: 18446744073709551615 # DENEB DENEB_FORK_VERSION: 0x04000001 DENEB_FORK_EPOCH: 18446744073709551615 +# EIP6110 +EIP6110_FORK_VERSION: 0x04000001 +EIP6110_FORK_EPOCH: 18446744073709551615 # Time parameters diff --git a/presets/mainnet/eip6110.yaml b/presets/mainnet/eip6110.yaml new file mode 100644 index 0000000000..16bf787d0c --- /dev/null +++ b/presets/mainnet/eip6110.yaml @@ -0,0 +1,6 @@ +# Mainnet preset - EIP6110 + +# Execution +# --------------------------------------------------------------- +# 2**13 (= 8192) receipts +MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 8192 diff --git a/presets/minimal/eip6110.yaml b/presets/minimal/eip6110.yaml new file mode 100644 index 0000000000..7486aa16e7 --- /dev/null +++ b/presets/minimal/eip6110.yaml @@ -0,0 +1,6 @@ +# Minimal preset - EIP6110 + +# Execution +# --------------------------------------------------------------- +# [customized] +MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD: 4 diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index 38e7f0b715..b859bf27c6 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -8,11 +8,13 @@ from eth2spec.bellatrix import mainnet as spec_bellatrix_mainnet, minimal as spec_bellatrix_minimal from eth2spec.capella import mainnet as spec_capella_mainnet, minimal as spec_capella_minimal from eth2spec.deneb import mainnet as spec_deneb_mainnet, minimal as spec_deneb_minimal +from eth2spec.eip6110 import mainnet as spec_eip6110_mainnet, minimal as spec_eip6110_minimal from eth2spec.utils import bls from .exceptions import SkippedTest from .helpers.constants import ( PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, + EIP6110, MINIMAL, MAINNET, ALL_PHASES, ALL_FORK_UPGRADES, @@ -79,13 +81,15 @@ class ForkMeta: BELLATRIX: spec_bellatrix_minimal, CAPELLA: spec_capella_minimal, DENEB: spec_deneb_minimal, + EIP6110: spec_eip6110_minimal, }, MAINNET: { PHASE0: spec_phase0_mainnet, ALTAIR: spec_altair_mainnet, BELLATRIX: spec_bellatrix_mainnet, CAPELLA: spec_capella_mainnet, - DENEB: spec_deneb_mainnet + DENEB: spec_deneb_mainnet, + EIP6110: spec_eip6110_mainnet, }, } diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py index 0d31adb431..83e7e40dbb 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/constants.py +++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py @@ -9,24 +9,25 @@ ALTAIR = SpecForkName('altair') BELLATRIX = SpecForkName('bellatrix') CAPELLA = SpecForkName('capella') +DENEB = SpecForkName('deneb') # Experimental phases (not included in default "ALL_PHASES"): SHARDING = SpecForkName('sharding') CUSTODY_GAME = SpecForkName('custody_game') DAS = SpecForkName('das') -DENEB = SpecForkName('deneb') +EIP6110 = SpecForkName('eip6110') # The forks that pytest can run with. ALL_PHASES = ( # Formal forks - PHASE0, ALTAIR, BELLATRIX, CAPELLA, + PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, # Experimental patches - DENEB, + EIP6110, ) # The forks that output to the test vectors. -TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB) +TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110) -# TODO: no DENEB fork tests now. Should add when we figure out the content of Capella. +# TODO: no EIP6110 fork tests now. ALL_FORK_UPGRADES = { # pre_fork_name: post_fork_name PHASE0: ALTAIR, @@ -41,7 +42,7 @@ if key not in [PHASE0, ALTAIR]} AFTER_CAPELLA_PRE_POST_FORKS = AFTER_CAPELLA_UPGRADES.items() AFTER_DENEB_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() - if key not in [PHASE0, ALTAIR, BELLATRIX]} + if key not in [PHASE0, ALTAIR, BELLATRIX, EIP6110]} AFTER_DENEB_PRE_POST_FORKS = AFTER_DENEB_UPGRADES.items() # diff --git a/tests/core/pyspec/eth2spec/test/helpers/forks.py b/tests/core/pyspec/eth2spec/test/helpers/forks.py index be3103e67f..e6320cc9b3 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/forks.py +++ b/tests/core/pyspec/eth2spec/test/helpers/forks.py @@ -1,9 +1,12 @@ from .constants import ( PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, + EIP6110, ) def is_post_fork(a, b): + if a == EIP6110: + return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP6110] if a == DENEB: return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB] if a == CAPELLA: @@ -31,3 +34,7 @@ def is_post_capella(spec): def is_post_deneb(spec): return is_post_fork(spec.fork, DENEB) + + +def is_post_eip6110(spec): + return is_post_fork(spec.fork, EIP6110) From 890f574c126f79315e6745eb9bbe72ecd3a9f3a4 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Mon, 27 Mar 2023 21:00:40 +0800 Subject: [PATCH 143/210] Add EIP-6110 to CI scope --- .circleci/config.yml | 16 ++++++++++++++++ .github/workflows/run-tests.yml | 2 +- 2 files changed, 17 insertions(+), 1 deletion(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 665207bdd0..1d5b098111 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -155,6 +155,19 @@ jobs: command: make citest fork=deneb - store_test_results: path: tests/core/pyspec/test-reports + test-eip6110: + docker: + - image: circleci/python:3.8 + working_directory: ~/specs-repo + steps: + - restore_cache: + key: v3-specs-repo-{{ .Branch }}-{{ .Revision }} + - restore_pyspec_cached_venv + - run: + name: Run py-tests + command: make citest fork=eip6110 + - store_test_results: + path: tests/core/pyspec/test-reports table_of_contents: docker: - image: circleci/node:10.16.3 @@ -275,6 +288,9 @@ workflows: - test-deneb: requires: - install_pyspec_test + - test-eip6110: + requires: + - install_pyspec_test - table_of_contents - codespell - lint: diff --git a/.github/workflows/run-tests.yml b/.github/workflows/run-tests.yml index 926c3fbbf9..41a80ab925 100644 --- a/.github/workflows/run-tests.yml +++ b/.github/workflows/run-tests.yml @@ -83,7 +83,7 @@ jobs: needs: [preclear,lint,codespell,table_of_contents] strategy: matrix: - version: ["phase0", "altair", "bellatrix", "capella", "deneb"] + version: ["phase0", "altair", "bellatrix", "capella", "deneb", "eip6110"] steps: - name: Checkout this repo uses: actions/checkout@v3.2.0 From f9b359be09df44e12732c90e87d9c281542dc859 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 28 Mar 2023 12:03:14 +0900 Subject: [PATCH 144/210] Reuse indexes with full sweep --- specs/_features/reuse_indexes/beacon-chain.md | 45 +++++++++++++++++++ specs/altair/beacon-chain.md | 22 ++++++--- 2 files changed, 62 insertions(+), 5 deletions(-) create mode 100644 specs/_features/reuse_indexes/beacon-chain.md diff --git a/specs/_features/reuse_indexes/beacon-chain.md b/specs/_features/reuse_indexes/beacon-chain.md new file mode 100644 index 0000000000..fcf1de8968 --- /dev/null +++ b/specs/_features/reuse_indexes/beacon-chain.md @@ -0,0 +1,45 @@ +# Reuse indexes -- The Beacon Chain + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Preset](#preset) + - [Time parameters](#time-parameters) +- [Beacon chain state transition function](#beacon-chain-state-transition-function) + - [Block processing](#block-processing) + - [Modified `assign_index_to_deposit`](#modified-assign_index_to_deposit) + + + + +## Introduction + +This is the beacon chain specification to assign new deposits to existing validator records that have withdrawn long ago. + +*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development. + +## Preset + +### Time parameters + +| Name | Value | Unit | Duration | +| - | - | - | +| `REUSE_VALIDATOR_INDEX_DELAY` | `uint64(2**16)` (= 65,536) | epochs | ~1 year | + +## Beacon chain state transition function + +### Block processing + +#### Modified `assign_index_to_deposit` + +```python +def assign_index_to_deposit(state: BeaconState) -> int: + for index, validator in enumerate(state.validators): + if validator.withdrawable_epoch < get_current_epoch(state) - REUSE_VALIDATOR_INDEX_DELAY: + return index + return len(state.validators) +``` diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 58dfad608a..d5b89a387b 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -511,16 +511,28 @@ def apply_deposit(state: BeaconState, signing_root = compute_signing_root(deposit_message, domain) # Initialize validator if the deposit signature is valid if bls.Verify(pubkey, signing_root, signature): - state.validators.append(get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) - state.balances.append(amount) + index = assign_index_to_deposit(state) + update_list(state.validators, index, get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) + update_list(state.balances, index, amount) # [New in Altair] - state.previous_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.current_epoch_participation.append(ParticipationFlags(0b0000_0000)) - state.inactivity_scores.append(uint64(0)) + update_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) + update_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) + update_list(state.inactivity_scores, index, uint64(0)) else: # Increase balance by deposit amount index = ValidatorIndex(validator_pubkeys.index(pubkey)) increase_balance(state, index, amount) + + +def assign_index_to_deposit(state: BeaconState) -> int: + return len(state.validators) + + +def update_list(list: List, index: int, value: Any) -> None: + if index == len(list): + list.append(value) + else: + list[index] = value ``` #### Sync aggregate processing From 201f113b5096978c2f2afca3f293870b2398f81a Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Tue, 28 Mar 2023 15:27:13 +1100 Subject: [PATCH 145/210] Introduce get_epoch_boundary_block --- specs/phase0/fork-choice.md | 19 ++++++++++++++----- 1 file changed, 14 insertions(+), 5 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 6e281d5c3d..044d85053f 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -192,6 +192,17 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: return root ``` +#### `get_epoch_boundary_block` + +```python +def get_epoch_boundary_block(store: Store, root: Root, epoch: Epoch) -> Root: + """ + Compute the epoch boundary block for epoch ``epoch`` in the chain of block ``root`` + """ + epoch_first_slot = compute_start_slot_at_epoch(epoch) + return get_ancestor(store, root, epoch_first_slot) +``` + #### `get_weight` ```python @@ -278,10 +289,9 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB voting_source.epoch + 2 >= current_epoch ) - finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) correct_finalized = ( store.finalized_checkpoint.epoch == GENESIS_EPOCH - or store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) + or store.finalized_checkpoint.root == get_epoch_boundary_block(store, block_root, store.finalized_checkpoint.epoch) ) # If expected finalized/justified, add to viable block-tree and signal viability to parent. @@ -442,8 +452,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot # LMD vote must be consistent with FFG vote target - target_slot = compute_start_slot_at_epoch(target.epoch) - assert target.root == get_ancestor(store, attestation.data.beacon_block_root, target_slot) + assert target.root == get_epoch_boundary_block(store, attestation.data.beacon_block_root, target.epoch) # Attestations can only affect the fork choice of subsequent slots. # Delay consideration in the fork choice until their slot is in the past. @@ -506,7 +515,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root + assert get_epoch_boundary_block(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root # Check the block is valid and compute the post-state state = pre_state.copy() From ddbd82e1be7fc38bcdcf672865bb036ab80837a7 Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Tue, 28 Mar 2023 15:51:34 +1100 Subject: [PATCH 146/210] Add toc --- specs/phase0/fork-choice.md | 1 + 1 file changed, 1 insertion(+) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 044d85053f..058089a62c 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -18,6 +18,7 @@ - [`get_current_slot`](#get_current_slot) - [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start) - [`get_ancestor`](#get_ancestor) + - [`get_epoch_boundary_block`](#get_epoch_boundary_block) - [`get_weight`](#get_weight) - [`get_voting_source`](#get_voting_source) - [`filter_block_tree`](#filter_block_tree) From 8acc31adceeae3f23def8c1291c0b3bacc6d586c Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 28 Mar 2023 14:06:10 +0800 Subject: [PATCH 147/210] Fix the default testing genesis by setting `deposit_receipts_start_index` and fork versions --- configs/mainnet.yaml | 2 +- specs/_features/eip6110/beacon-chain.md | 2 +- tests/core/pyspec/eth2spec/test/context.py | 1 + tests/core/pyspec/eth2spec/test/helpers/genesis.py | 10 ++++++++-- 4 files changed, 11 insertions(+), 4 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index e62108b0aa..8703af1a4f 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -51,7 +51,7 @@ CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC DENEB_FORK_VERSION: 0x04000000 DENEB_FORK_EPOCH: 18446744073709551615 # EIP6110 -EIP6110_FORK_VERSION: 0x04000001 +EIP6110_FORK_VERSION: 0x04000001 # temporary stub EIP6110_FORK_EPOCH: 18446744073709551615 diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 70a72a5f45..9e27b393cf 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -204,7 +204,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.proposer_slashings, process_proposer_slashing) for_ops(body.attester_slashings, process_attester_slashing) for_ops(body.attestations, process_attestation) - for_ops(body.deposits, process_deposit) # [Modified in EIP-6110] + for_ops(body.deposits, process_deposit) for_ops(body.voluntary_exits, process_voluntary_exit) for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) diff --git a/tests/core/pyspec/eth2spec/test/context.py b/tests/core/pyspec/eth2spec/test/context.py index b859bf27c6..901fd273a8 100644 --- a/tests/core/pyspec/eth2spec/test/context.py +++ b/tests/core/pyspec/eth2spec/test/context.py @@ -432,6 +432,7 @@ def decorator(fn): with_bellatrix_and_later = with_all_phases_from(BELLATRIX) with_capella_and_later = with_all_phases_from(CAPELLA) with_deneb_and_later = with_all_phases_from(DENEB) +with_eip6110_and_later = with_all_phases_from(EIP6110) def _get_preset_targets(kw): diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py index 0610f11ad8..db4f922515 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py +++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py @@ -1,11 +1,11 @@ from eth2spec.test.helpers.constants import ( - ALTAIR, BELLATRIX, CAPELLA, DENEB, + ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110, ) from eth2spec.test.helpers.execution_payload import ( compute_el_header_block_hash, ) from eth2spec.test.helpers.forks import ( - is_post_altair, is_post_bellatrix, is_post_capella, + is_post_altair, is_post_bellatrix, is_post_capella, is_post_eip6110, ) from eth2spec.test.helpers.keys import pubkeys @@ -80,6 +80,9 @@ def create_genesis_state(spec, validator_balances, activation_threshold): elif spec.fork == DENEB: previous_version = spec.config.CAPELLA_FORK_VERSION current_version = spec.config.DENEB_FORK_VERSION + elif spec.fork == EIP6110: + previous_version = spec.config.CAPELLA_FORK_VERSION + current_version = spec.config.EIP6110_FORK_VERSION state = spec.BeaconState( genesis_time=0, @@ -129,4 +132,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold): eth1_block_hash=eth1_block_hash, ) + if is_post_eip6110(spec): + state.deposit_receipts_start_index = spec.UNSET_DEPOSIT_RECEIPTS_START_INDEX + return state From ee3e1ac63e6e675e58d22cf055431b70a6f39696 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 28 Mar 2023 15:34:07 +0900 Subject: [PATCH 148/210] Apply review comments --- specs/_features/reuse_indexes/beacon-chain.md | 28 ++++++++++++++++--- specs/altair/beacon-chain.md | 16 +++++------ 2 files changed, 32 insertions(+), 12 deletions(-) diff --git a/specs/_features/reuse_indexes/beacon-chain.md b/specs/_features/reuse_indexes/beacon-chain.md index fcf1de8968..8677631e56 100644 --- a/specs/_features/reuse_indexes/beacon-chain.md +++ b/specs/_features/reuse_indexes/beacon-chain.md @@ -9,9 +9,12 @@ - [Introduction](#introduction) - [Preset](#preset) - [Time parameters](#time-parameters) +- [Helpers](#helpers) + - [Predicates](#predicates) + - [`is_reusable_validator`](#is_reusable_validator) - [Beacon chain state transition function](#beacon-chain-state-transition-function) - [Block processing](#block-processing) - - [Modified `assign_index_to_deposit`](#modified-assign_index_to_deposit) + - [Modified `get_index_for_new_validator`](#modified-get_index_for_new_validator) @@ -30,16 +33,33 @@ This is the beacon chain specification to assign new deposits to existing valida | - | - | - | | `REUSE_VALIDATOR_INDEX_DELAY` | `uint64(2**16)` (= 65,536) | epochs | ~1 year | +## Helper functions + +### Predicates + +#### `is_reusable_validator` + +```python +def is_reusable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> bool: + """ + Check if ``validator`` index can be re-assigned to a new deposit. + """ + return ( + validator.withdrawable_epoch < epoch - REUSE_VALIDATOR_INDEX_DELAY + and balance == 0 + ) +``` + ## Beacon chain state transition function ### Block processing -#### Modified `assign_index_to_deposit` +#### Modified `get_index_for_new_validator` ```python -def assign_index_to_deposit(state: BeaconState) -> int: +def get_index_for_new_validator(state: BeaconState) -> int: for index, validator in enumerate(state.validators): - if validator.withdrawable_epoch < get_current_epoch(state) - REUSE_VALIDATOR_INDEX_DELAY: + if is_reusable_validator(validator, state.balances[index], get_current_epoch(state)): return index return len(state.validators) ``` diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index d5b89a387b..5e24df83e0 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -511,24 +511,24 @@ def apply_deposit(state: BeaconState, signing_root = compute_signing_root(deposit_message, domain) # Initialize validator if the deposit signature is valid if bls.Verify(pubkey, signing_root, signature): - index = assign_index_to_deposit(state) - update_list(state.validators, index, get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) - update_list(state.balances, index, amount) + index = get_index_for_new_validator(state) + update_or_append_to_list(state.validators, index, get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) + update_or_append_to_list(state.balances, index, amount) # [New in Altair] - update_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) - update_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) - update_list(state.inactivity_scores, index, uint64(0)) + update_or_append_to_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) + update_or_append_to_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) + update_or_append_to_list(state.inactivity_scores, index, uint64(0)) else: # Increase balance by deposit amount index = ValidatorIndex(validator_pubkeys.index(pubkey)) increase_balance(state, index, amount) -def assign_index_to_deposit(state: BeaconState) -> int: +def get_index_for_new_validator(state: BeaconState) -> int: return len(state.validators) -def update_list(list: List, index: int, value: Any) -> None: +def update_or_append_to_list(list: List, index: int, value: Any) -> None: if index == len(list): list.append(value) else: From c1273bbfa405f7c335d36561c92d169552ef7432 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 28 Mar 2023 15:46:01 +0900 Subject: [PATCH 149/210] Add epoch > REUSE_VALIDATOR_INDEX_DELAY condition --- specs/_features/reuse_indexes/beacon-chain.md | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/specs/_features/reuse_indexes/beacon-chain.md b/specs/_features/reuse_indexes/beacon-chain.md index 8677631e56..3efa0c8698 100644 --- a/specs/_features/reuse_indexes/beacon-chain.md +++ b/specs/_features/reuse_indexes/beacon-chain.md @@ -45,8 +45,9 @@ def is_reusable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> Check if ``validator`` index can be re-assigned to a new deposit. """ return ( - validator.withdrawable_epoch < epoch - REUSE_VALIDATOR_INDEX_DELAY - and balance == 0 + epoch > REUSE_VALIDATOR_INDEX_DELAY + and validator.withdrawable_epoch < epoch - REUSE_VALIDATOR_INDEX_DELAY + and balance == 0 ) ``` From c1b16a2333b58c98a2e9dd2d47a84e65c0e37207 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 28 Mar 2023 14:49:13 +0800 Subject: [PATCH 150/210] Fix EIP-6110 configs --- configs/mainnet.yaml | 2 +- configs/minimal.yaml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/configs/mainnet.yaml b/configs/mainnet.yaml index 8703af1a4f..5ad394c082 100644 --- a/configs/mainnet.yaml +++ b/configs/mainnet.yaml @@ -51,7 +51,7 @@ CAPELLA_FORK_EPOCH: 194048 # April 12, 2023, 10:27:35pm UTC DENEB_FORK_VERSION: 0x04000000 DENEB_FORK_EPOCH: 18446744073709551615 # EIP6110 -EIP6110_FORK_VERSION: 0x04000001 # temporary stub +EIP6110_FORK_VERSION: 0x05000000 # temporary stub EIP6110_FORK_EPOCH: 18446744073709551615 diff --git a/configs/minimal.yaml b/configs/minimal.yaml index 271a0c14ae..5895cfc707 100644 --- a/configs/minimal.yaml +++ b/configs/minimal.yaml @@ -50,7 +50,7 @@ CAPELLA_FORK_EPOCH: 18446744073709551615 DENEB_FORK_VERSION: 0x04000001 DENEB_FORK_EPOCH: 18446744073709551615 # EIP6110 -EIP6110_FORK_VERSION: 0x04000001 +EIP6110_FORK_VERSION: 0x05000001 EIP6110_FORK_EPOCH: 18446744073709551615 From cd7783e59d38a0cc9ec5ba02c81a38ff1bbeb22c Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 28 Mar 2023 15:08:03 +0800 Subject: [PATCH 151/210] EIP-6110: Fix `compute_fork_version` and add light client specs --- setup.py | 4 + specs/_features/eip6110/beacon-chain.md | 2 +- specs/_features/eip6110/fork.md | 2 +- specs/_features/eip6110/light-client/fork.md | 111 ++++++++++++++++++ .../eip6110/light-client/full-node.md | 74 ++++++++++++ .../eip6110/light-client/p2p-interface.md | 105 +++++++++++++++++ .../eip6110/light-client/sync-protocol.md | 87 ++++++++++++++ .../test/altair/light_client/test_sync.py | 11 ++ .../eth2spec/test/helpers/fork_transition.py | 4 + 9 files changed, 398 insertions(+), 2 deletions(-) create mode 100644 specs/_features/eip6110/light-client/fork.md create mode 100644 specs/_features/eip6110/light-client/full-node.md create mode 100644 specs/_features/eip6110/light-client/p2p-interface.md create mode 100644 specs/_features/eip6110/light-client/sync-protocol.md diff --git a/setup.py b/setup.py index 52bad2b71b..2f8c4b12ec 100644 --- a/setup.py +++ b/setup.py @@ -1037,6 +1037,10 @@ def finalize_options(self): """ if self.spec_fork == EIP6110: self.md_doc_paths += """ + specs/_features/eip6110/light-client/fork.md + specs/_features/eip6110/light-client/full-node.md + specs/deneb/light-client/p2p-interface.md + specs/_features/eip6110/light-client/sync-protocol.md specs/_features/eip6110/beacon-chain.md specs/_features/eip6110/fork.md """ diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 9e27b393cf..3ed77bafbb 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -157,7 +157,7 @@ class BeaconState(Container): current_sync_committee: SyncCommittee next_sync_committee: SyncCommittee # Execution - latest_execution_payload_header: ExecutionPayloadHeader + latest_execution_payload_header: ExecutionPayloadHeader # [Modified in EIP-6110] # Withdrawals next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md index b08661e5fa..df98c4c69e 100644 --- a/specs/_features/eip6110/fork.md +++ b/specs/_features/eip6110/fork.md @@ -43,7 +43,7 @@ def compute_fork_version(epoch: Epoch) -> Version: Return the fork version at the given ``epoch``. """ if epoch >= EIP6110_FORK_EPOCH: - return EIP6110_FORK_EPOCH + return EIP6110_FORK_VERSION if epoch >= CAPELLA_FORK_EPOCH: return CAPELLA_FORK_VERSION if epoch >= BELLATRIX_FORK_EPOCH: diff --git a/specs/_features/eip6110/light-client/fork.md b/specs/_features/eip6110/light-client/fork.md new file mode 100644 index 0000000000..6ffa3d8697 --- /dev/null +++ b/specs/_features/eip6110/light-client/fork.md @@ -0,0 +1,111 @@ +# eip6110 Light Client -- Fork Logic + +## Table of contents + + + + + +- [Introduction](#introduction) + - [Upgrading light client data](#upgrading-light-client-data) + - [Upgrading the store](#upgrading-the-store) + + + + +## Introduction + +This document describes how to upgrade existing light client objects based on the [Capella specification](../../capella/light-client/sync-protocol.md) to eip6110. This is necessary when processing pre-eip6110 data with a post-eip6110 `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format. + +### Upgrading light client data + +A eip6110 `LightClientStore` can still process earlier light client data. In order to do so, that pre-eip6110 data needs to be locally upgraded to eip6110 before processing. + +```python +def upgrade_lc_header_to_eip6110(pre: capella.LightClientHeader) -> LightClientHeader: + return LightClientHeader( + beacon=pre.beacon, + execution=ExecutionPayloadHeader( + parent_hash=pre.execution.parent_hash, + fee_recipient=pre.execution.fee_recipient, + state_root=pre.execution.state_root, + receipts_root=pre.execution.receipts_root, + logs_bloom=pre.execution.logs_bloom, + prev_randao=pre.execution.prev_randao, + block_number=pre.execution.block_number, + gas_limit=pre.execution.gas_limit, + gas_used=pre.execution.gas_used, + timestamp=pre.execution.timestamp, + extra_data=pre.execution.extra_data, + base_fee_per_gas=pre.execution.base_fee_per_gas, + block_hash=pre.execution.block_hash, + transactions_root=pre.execution.transactions_root, + withdrawals_root=pre.execution.withdrawals_root, + deposit_receipts_root=Root(), + ), + execution_branch=pre.execution_branch, + ) +``` + +```python +def upgrade_lc_bootstrap_to_eip6110(pre: capella.LightClientBootstrap) -> LightClientBootstrap: + return LightClientBootstrap( + header=upgrade_lc_header_to_eip6110(pre.header), + current_sync_committee=pre.current_sync_committee, + current_sync_committee_branch=pre.current_sync_committee_branch, + ) +``` + +```python +def upgrade_lc_update_to_eip6110(pre: capella.LightClientUpdate) -> LightClientUpdate: + return LightClientUpdate( + attested_header=upgrade_lc_header_to_eip6110(pre.attested_header), + next_sync_committee=pre.next_sync_committee, + next_sync_committee_branch=pre.next_sync_committee_branch, + finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header), + finality_branch=pre.finality_branch, + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) +``` + +```python +def upgrade_lc_finality_update_to_eip6110(pre: capella.LightClientFinalityUpdate) -> LightClientFinalityUpdate: + return LightClientFinalityUpdate( + attested_header=upgrade_lc_header_to_eip6110(pre.attested_header), + finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header), + finality_branch=pre.finality_branch, + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) +``` + +```python +def upgrade_lc_optimistic_update_to_eip6110(pre: capella.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate: + return LightClientOptimisticUpdate( + attested_header=upgrade_lc_header_to_eip6110(pre.attested_header), + sync_aggregate=pre.sync_aggregate, + signature_slot=pre.signature_slot, + ) +``` + +### Upgrading the store + +Existing `LightClientStore` objects based on Capella MUST be upgraded to eip6110 before eip6110 based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `eip6110_FORK_EPOCH`. + +```python +def upgrade_lc_store_to_eip6110(pre: capella.LightClientStore) -> LightClientStore: + if pre.best_valid_update is None: + best_valid_update = None + else: + best_valid_update = upgrade_lc_update_to_eip6110(pre.best_valid_update) + return LightClientStore( + finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header), + current_sync_committee=pre.current_sync_committee, + next_sync_committee=pre.next_sync_committee, + best_valid_update=best_valid_update, + optimistic_header=upgrade_lc_header_to_eip6110(pre.optimistic_header), + previous_max_active_participants=pre.previous_max_active_participants, + current_max_active_participants=pre.current_max_active_participants, + ) +``` diff --git a/specs/_features/eip6110/light-client/full-node.md b/specs/_features/eip6110/light-client/full-node.md new file mode 100644 index 0000000000..0e400dd161 --- /dev/null +++ b/specs/_features/eip6110/light-client/full-node.md @@ -0,0 +1,74 @@ +# Deneb Light Client -- Full Node + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Helper functions](#helper-functions) + - [Modified `block_to_light_client_header`](#modified-block_to_light_client_header) + + + + +## Introduction + +This upgrade adds information about the execution payload to light client data as part of the Deneb upgrade. + +## Helper functions + +### Modified `block_to_light_client_header` + +```python +def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: + epoch = compute_epoch_at_slot(block.message.slot) + + if epoch >= CAPELLA_FORK_EPOCH: + payload = block.message.body.execution_payload + execution_header = ExecutionPayloadHeader( + parent_hash=payload.parent_hash, + fee_recipient=payload.fee_recipient, + state_root=payload.state_root, + receipts_root=payload.receipts_root, + logs_bloom=payload.logs_bloom, + prev_randao=payload.prev_randao, + block_number=payload.block_number, + gas_limit=payload.gas_limit, + gas_used=payload.gas_used, + timestamp=payload.timestamp, + extra_data=payload.extra_data, + base_fee_per_gas=payload.base_fee_per_gas, + block_hash=payload.block_hash, + transactions_root=hash_tree_root(payload.transactions), + withdrawals_root=hash_tree_root(payload.withdrawals), + ) + + # [New in Deneb] + if epoch >= EIP6110_FORK_EPOCH: + execution_header.deposit_receipts_root = hash_tree_root(payload.deposit_receipts) + + execution_branch = compute_merkle_proof_for_block_body(block.message.body, EXECUTION_PAYLOAD_INDEX) + else: + # Note that during fork transitions, `finalized_header` may still point to earlier forks. + # While Bellatrix blocks also contain an `ExecutionPayload` (minus `withdrawals_root`), + # it was not included in the corresponding light client data. To ensure compatibility + # with legacy data going through `upgrade_lc_header_to_capella`, leave out execution data. + execution_header = ExecutionPayloadHeader() + execution_branch = [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))] + + return LightClientHeader( + beacon=BeaconBlockHeader( + slot=block.message.slot, + proposer_index=block.message.proposer_index, + parent_root=block.message.parent_root, + state_root=block.message.state_root, + body_root=hash_tree_root(block.message.body), + ), + execution=execution_header, + execution_branch=execution_branch, + ) +``` diff --git a/specs/_features/eip6110/light-client/p2p-interface.md b/specs/_features/eip6110/light-client/p2p-interface.md new file mode 100644 index 0000000000..9b33d59ffd --- /dev/null +++ b/specs/_features/eip6110/light-client/p2p-interface.md @@ -0,0 +1,105 @@ +# EIP-6110 Light Client -- Networking + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +- [Networking](#networking) + - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [Topics and messages](#topics-and-messages) + - [Global topics](#global-topics) + - [`light_client_finality_update`](#light_client_finality_update) + - [`light_client_optimistic_update`](#light_client_optimistic_update) + - [The Req/Resp domain](#the-reqresp-domain) + - [Messages](#messages) + - [GetLightClientBootstrap](#getlightclientbootstrap) + - [LightClientUpdatesByRange](#lightclientupdatesbyrange) + - [GetLightClientFinalityUpdate](#getlightclientfinalityupdate) + - [GetLightClientOptimisticUpdate](#getlightclientoptimisticupdate) + + + + +## Networking + +The [Capella light client networking specification](../../capella/light-client/p2p-interface.md) is extended to exchange [EIP-6110 light client data](./sync-protocol.md). + +### The gossip domain: gossipsub + +#### Topics and messages + +##### Global topics + +###### `light_client_finality_update` + +[0]: # (eth2spec: skip) + +| `fork_version` | Message SSZ type | +|--------------------------------------------------------|-------------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` | +| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientFinalityUpdate` | + +###### `light_client_optimistic_update` + +[0]: # (eth2spec: skip) + +| `fork_version` | Message SSZ type | +|--------------------------------------------------------|---------------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` | +| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientOptimisticUpdate` | + +### The Req/Resp domain + +#### Messages + +##### GetLightClientBootstrap + +[0]: # (eth2spec: skip) + +| `fork_version` | Response SSZ type | +|--------------------------------------------------------|------------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientBootstrap` | +| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientBootstrap` | + +##### LightClientUpdatesByRange + +[0]: # (eth2spec: skip) + +| `fork_version` | Response chunk SSZ type | +|--------------------------------------------------------|----------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientUpdate` | +| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientUpdate` | + +##### GetLightClientFinalityUpdate + +[0]: # (eth2spec: skip) + +| `fork_version` | Response SSZ type | +|--------------------------------------------------------|-------------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` | +| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientFinalityUpdate` | + +##### GetLightClientOptimisticUpdate + +[0]: # (eth2spec: skip) + +| `fork_version` | Response SSZ type | +|--------------------------------------------------------|---------------------------------------| +| `GENESIS_FORK_VERSION` | n/a | +| `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` | +| `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` | +| `EIP6110_FORK_VERSION` and later | `eip6110.LightClientOptimisticUpdate` | diff --git a/specs/_features/eip6110/light-client/sync-protocol.md b/specs/_features/eip6110/light-client/sync-protocol.md new file mode 100644 index 0000000000..867aa2730d --- /dev/null +++ b/specs/_features/eip6110/light-client/sync-protocol.md @@ -0,0 +1,87 @@ +# Deneb Light Client -- Sync Protocol + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Helper functions](#helper-functions) + - [Modified `get_lc_execution_root`](#modified-get_lc_execution_root) + - [Modified `is_valid_light_client_header`](#modified-is_valid_light_client_header) + + + + +## Introduction + +This upgrade updates light client data to include the EIP-6110 changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to EIP-6110. + +Additional documents describes the impact of the upgrade on certain roles: +- [Full node](./full-node.md) +- [Networking](./p2p-interface.md) + +## Helper functions + +### Modified `get_lc_execution_root` + +```python +def get_lc_execution_root(header: LightClientHeader) -> Root: + epoch = compute_epoch_at_slot(header.beacon.slot) + + # [New in EIP-6110] + if epoch >= EIP6110_FORK_EPOCH: + return hash_tree_root(header.execution) + + # [Modified in EIP-6110] + if epoch >= CAPELLA_FORK_EPOCH: + execution_header = capella.ExecutionPayloadHeader( + parent_hash=header.execution.parent_hash, + fee_recipient=header.execution.fee_recipient, + state_root=header.execution.state_root, + receipts_root=header.execution.receipts_root, + logs_bloom=header.execution.logs_bloom, + prev_randao=header.execution.prev_randao, + block_number=header.execution.block_number, + gas_limit=header.execution.gas_limit, + gas_used=header.execution.gas_used, + timestamp=header.execution.timestamp, + extra_data=header.execution.extra_data, + base_fee_per_gas=header.execution.base_fee_per_gas, + block_hash=header.execution.block_hash, + transactions_root=header.execution.transactions_root, + withdrawals_root=header.execution.withdrawals_root, + ) + return hash_tree_root(execution_header) + + return Root() +``` + +### Modified `is_valid_light_client_header` + +```python +def is_valid_light_client_header(header: LightClientHeader) -> bool: + epoch = compute_epoch_at_slot(header.beacon.slot) + + # [New in EIP-6110] + if epoch < EIP6110_FORK_EPOCH: + if header.execution.withdrawals_root != Root(): + return False + + if epoch < CAPELLA_FORK_EPOCH: + return ( + header.execution == ExecutionPayloadHeader() + and header.execution_branch == [Bytes32() for _ in range(floorlog2(EXECUTION_PAYLOAD_INDEX))] + ) + + return is_valid_merkle_branch( + leaf=get_lc_execution_root(header), + branch=header.execution_branch, + depth=floorlog2(EXECUTION_PAYLOAD_INDEX), + index=get_subtree_index(EXECUTION_PAYLOAD_INDEX), + root=header.beacon.body_root, + ) +``` diff --git a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py index d33e68961d..1a527a767a 100644 --- a/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py +++ b/tests/core/pyspec/eth2spec/test/altair/light_client/test_sync.py @@ -26,6 +26,7 @@ from eth2spec.test.helpers.forks import ( is_post_capella, is_post_deneb, is_post_fork, + is_post_eip6110, ) from eth2spec.test.helpers.light_client import ( get_sync_aggregate, @@ -57,6 +58,10 @@ def needs_upgrade_to_deneb(d_spec, s_spec): return is_post_deneb(s_spec) and not is_post_deneb(d_spec) +def needs_upgrade_to_eip6110(d_spec, s_spec): + return is_post_eip6110(s_spec) and not is_post_eip6110(d_spec) + + def check_lc_header_equal(d_spec, s_spec, data, upgraded): assert upgraded.beacon.slot == data.beacon.slot assert upgraded.beacon.hash_tree_root() == data.beacon.hash_tree_root() @@ -84,6 +89,10 @@ def upgrade_lc_bootstrap_to_store(d_spec, s_spec, data): upgraded = s_spec.upgrade_lc_bootstrap_to_deneb(upgraded) check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded) + if needs_upgrade_to_eip6110(d_spec, s_spec): + upgraded = s_spec.upgrade_lc_bootstrap_to_eip6110(upgraded) + check_lc_bootstrap_equal(d_spec, s_spec, data, upgraded) + return upgraded @@ -145,6 +154,8 @@ class LightClientSyncTest(object): def get_store_fork_version(s_spec): + if is_post_eip6110(s_spec): + return s_spec.config.EIP6110_FORK_VERSION if is_post_deneb(s_spec): return s_spec.config.DENEB_FORK_VERSION if is_post_capella(s_spec): diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py index 241c7dc37e..20c20b938c 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py @@ -15,6 +15,7 @@ BELLATRIX, CAPELLA, DENEB, + EIP6110, ) from eth2spec.test.helpers.deposits import ( prepare_state_and_deposit, @@ -173,6 +174,9 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate= elif post_spec.fork == DENEB: assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION + elif post_spec.fork == EIP6110: + assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION + assert state.fork.current_version == post_spec.config.EIP6110_FORK_VERSION if with_block: return state, _state_transition_and_sign_block_at_slot( From 9dfee5ef48301de5e9bcf6a3610f689dd350aaf1 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 28 Mar 2023 15:21:09 +0800 Subject: [PATCH 152/210] Update execution_payload helpers for new EIP-6110 field --- .../eth2spec/test/helpers/execution_payload.py | 18 ++++++++++++++++-- 1 file changed, 16 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py index c0a70aca1d..7e2c7c976b 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py @@ -4,7 +4,11 @@ from rlp.sedes import big_endian_int, Binary, List from eth2spec.debug.random_value import get_random_bytes_list -from eth2spec.test.helpers.forks import is_post_capella, is_post_deneb +from eth2spec.test.helpers.forks import ( + is_post_capella, + is_post_deneb, + is_post_eip6110, +) def get_execution_payload_header(spec, execution_payload): @@ -28,6 +32,8 @@ def get_execution_payload_header(spec, execution_payload): payload_header.withdrawals_root = spec.hash_tree_root(execution_payload.withdrawals) if is_post_deneb(spec): payload_header.excess_data_gas = execution_payload.excess_data_gas + if is_post_eip6110(spec): + payload_header.deposit_receipts_root = spec.hash_tree_root(execution_payload.deposit_receipts) return payload_header @@ -48,7 +54,8 @@ def compute_trie_root_from_indexed_data(data): def compute_el_header_block_hash(spec, payload_header, transactions_trie_root, - withdrawals_trie_root=None): + withdrawals_trie_root=None, + deposit_receipts_trie_root=None): """ Computes the RLP execution block hash described by an `ExecutionPayloadHeader`. """ @@ -92,6 +99,10 @@ def compute_el_header_block_hash(spec, if is_post_deneb(spec): # excess_data_gas execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas)) + if is_post_eip6110(spec): + # TODO: RLP or SSZ for `deposit_receipts_root` + # FIXME: if using RLP, we need to implement `get_deposit_receipt_rlp` helper + ... sedes = List([schema for schema, _ in execution_payload_header_rlp]) values = [value for _, value in execution_payload_header_rlp] @@ -165,6 +176,9 @@ def build_empty_execution_payload(spec, state, randao_mix=None): ) if is_post_capella(spec): payload.withdrawals = spec.get_expected_withdrawals(state) + if is_post_eip6110(spec): + # just to be clear + payload.deposit_receipts = [] payload.block_hash = compute_el_block_hash(spec, payload) From 7476c1e0c9a67b39e4fa7e6ba8412ef1b5da7254 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 28 Mar 2023 15:34:07 +0800 Subject: [PATCH 153/210] Fix wrong doc path --- setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.py b/setup.py index 2f8c4b12ec..7b4ab6ac2e 100644 --- a/setup.py +++ b/setup.py @@ -1039,7 +1039,7 @@ def finalize_options(self): self.md_doc_paths += """ specs/_features/eip6110/light-client/fork.md specs/_features/eip6110/light-client/full-node.md - specs/deneb/light-client/p2p-interface.md + specs/_features/eip6110/light-client/p2p-interface.md specs/_features/eip6110/light-client/sync-protocol.md specs/_features/eip6110/beacon-chain.md specs/_features/eip6110/fork.md From 314b040fff2d5a5aa2ab8e7524c76200f3a64d79 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 28 Mar 2023 17:45:52 +0900 Subject: [PATCH 154/210] Reduce line len --- specs/altair/beacon-chain.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 5e24df83e0..9d7c8180e3 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -512,7 +512,8 @@ def apply_deposit(state: BeaconState, # Initialize validator if the deposit signature is valid if bls.Verify(pubkey, signing_root, signature): index = get_index_for_new_validator(state) - update_or_append_to_list(state.validators, index, get_validator_from_deposit(pubkey, withdrawal_credentials, amount)) + validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) + update_or_append_to_list(state.validators, index, validator) update_or_append_to_list(state.balances, index, amount) # [New in Altair] update_or_append_to_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) From c7029ce19e3f25d2d0e3ea301650561d1ac67d12 Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Wed, 29 Mar 2023 12:40:58 +1100 Subject: [PATCH 155/210] Rename get_epoch_boundary_block to get_ancestor_at_epoch_boundary --- specs/phase0/fork-choice.md | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 058089a62c..2ee6ecb926 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -18,7 +18,7 @@ - [`get_current_slot`](#get_current_slot) - [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start) - [`get_ancestor`](#get_ancestor) - - [`get_epoch_boundary_block`](#get_epoch_boundary_block) + - [`get_ancestor_at_epoch_boundary`](#get_ancestor_at_epoch_boundary) - [`get_weight`](#get_weight) - [`get_voting_source`](#get_voting_source) - [`filter_block_tree`](#filter_block_tree) @@ -193,10 +193,10 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: return root ``` -#### `get_epoch_boundary_block` +#### `get_ancestor_at_epoch_boundary` ```python -def get_epoch_boundary_block(store: Store, root: Root, epoch: Epoch) -> Root: +def get_ancestor_at_epoch_boundary(store: Store, root: Root, epoch: Epoch) -> Root: """ Compute the epoch boundary block for epoch ``epoch`` in the chain of block ``root`` """ @@ -292,7 +292,7 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB correct_finalized = ( store.finalized_checkpoint.epoch == GENESIS_EPOCH - or store.finalized_checkpoint.root == get_epoch_boundary_block(store, block_root, store.finalized_checkpoint.epoch) + or store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary(store, block_root, store.finalized_checkpoint.epoch) ) # If expected finalized/justified, add to viable block-tree and signal viability to parent. @@ -453,7 +453,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot # LMD vote must be consistent with FFG vote target - assert target.root == get_epoch_boundary_block(store, attestation.data.beacon_block_root, target.epoch) + assert target.root == get_ancestor_at_epoch_boundary(store, attestation.data.beacon_block_root, target.epoch) # Attestations can only affect the fork choice of subsequent slots. # Delay consideration in the fork choice until their slot is in the past. @@ -516,7 +516,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert get_epoch_boundary_block(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root + assert get_ancestor_at_epoch_boundary(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root # Check the block is valid and compute the post-state state = pre_state.copy() From f696b30608a1b6402d32c3c87ab01be99e2096bd Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 30 Mar 2023 09:11:36 +0900 Subject: [PATCH 156/210] Address PR review --- specs/_features/reuse_indexes/beacon-chain.md | 13 ++++++------- specs/altair/beacon-chain.md | 6 +++--- 2 files changed, 9 insertions(+), 10 deletions(-) diff --git a/specs/_features/reuse_indexes/beacon-chain.md b/specs/_features/reuse_indexes/beacon-chain.md index 3efa0c8698..5ec08ed199 100644 --- a/specs/_features/reuse_indexes/beacon-chain.md +++ b/specs/_features/reuse_indexes/beacon-chain.md @@ -1,4 +1,4 @@ -# Reuse indexes -- The Beacon Chain +# Reuse indices -- The Beacon Chain ## Table of contents @@ -31,7 +31,7 @@ This is the beacon chain specification to assign new deposits to existing valida | Name | Value | Unit | Duration | | - | - | - | -| `REUSE_VALIDATOR_INDEX_DELAY` | `uint64(2**16)` (= 65,536) | epochs | ~1 year | +| `REUSE_VALIDATOR_INDEX_DELAY` | `uint64(2**16)` (= 65,536) | epochs | ~0.8 year | ## Helper functions @@ -45,8 +45,7 @@ def is_reusable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> Check if ``validator`` index can be re-assigned to a new deposit. """ return ( - epoch > REUSE_VALIDATOR_INDEX_DELAY - and validator.withdrawable_epoch < epoch - REUSE_VALIDATOR_INDEX_DELAY + epoch > validator.withdrawable_epoch + REUSE_VALIDATOR_INDEX_DELAY and balance == 0 ) ``` @@ -58,9 +57,9 @@ def is_reusable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> #### Modified `get_index_for_new_validator` ```python -def get_index_for_new_validator(state: BeaconState) -> int: +def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex: for index, validator in enumerate(state.validators): if is_reusable_validator(validator, state.balances[index], get_current_epoch(state)): - return index - return len(state.validators) + return ValidatorIndex(index) + return ValidatorIndex(len(state.validators)) ``` diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 9d7c8180e3..df65d984c5 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -525,11 +525,11 @@ def apply_deposit(state: BeaconState, increase_balance(state, index, amount) -def get_index_for_new_validator(state: BeaconState) -> int: - return len(state.validators) +def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex: + return ValidatorIndex(len(state.validators)) -def update_or_append_to_list(list: List, index: int, value: Any) -> None: +def update_or_append_to_list(list: List, index: ValidatorIndex, value: Any) -> None: if index == len(list): list.append(value) else: From 3d9c87b27ccc9e8b96c9749abd7c727452bbb44a Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 30 Mar 2023 09:44:34 +0900 Subject: [PATCH 157/210] Move to misc helpers --- specs/altair/beacon-chain.md | 40 +++++++++++++++++++++--------------- 1 file changed, 24 insertions(+), 16 deletions(-) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index df65d984c5..4c7bb3f9a1 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -30,6 +30,8 @@ - [Misc](#misc-1) - [`add_flag`](#add_flag) - [`has_flag`](#has_flag) + - [`get_index_for_new_validator`](#get_index_for_new_validator) + - [`set_or_append_list`](#set_or_append_list) - [Beacon state accessors](#beacon-state-accessors) - [`get_next_sync_committee_indices`](#get_next_sync_committee_indices) - [`get_next_sync_committee`](#get_next_sync_committee) @@ -248,6 +250,23 @@ def has_flag(flags: ParticipationFlags, flag_index: int) -> bool: return flags & flag == flag ``` +#### `get_index_for_new_validator` + +```python +def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex: + return ValidatorIndex(len(state.validators)) +``` + +#### `set_or_append_list` + +```python +def set_or_append_list(list: List[Any], index: ValidatorIndex, value: Any) -> None: + if index == len(list): + list.append(value) + else: + list[index] = value +``` + ### Beacon state accessors #### `get_next_sync_committee_indices` @@ -513,27 +532,16 @@ def apply_deposit(state: BeaconState, if bls.Verify(pubkey, signing_root, signature): index = get_index_for_new_validator(state) validator = get_validator_from_deposit(pubkey, withdrawal_credentials, amount) - update_or_append_to_list(state.validators, index, validator) - update_or_append_to_list(state.balances, index, amount) + set_or_append_list(state.validators, index, validator) + set_or_append_list(state.balances, index, amount) # [New in Altair] - update_or_append_to_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) - update_or_append_to_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) - update_or_append_to_list(state.inactivity_scores, index, uint64(0)) + set_or_append_list(state.previous_epoch_participation, index, ParticipationFlags(0b0000_0000)) + set_or_append_list(state.current_epoch_participation, index, ParticipationFlags(0b0000_0000)) + set_or_append_list(state.inactivity_scores, index, uint64(0)) else: # Increase balance by deposit amount index = ValidatorIndex(validator_pubkeys.index(pubkey)) increase_balance(state, index, amount) - - -def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex: - return ValidatorIndex(len(state.validators)) - - -def update_or_append_to_list(list: List, index: ValidatorIndex, value: Any) -> None: - if index == len(list): - list.append(value) - else: - list[index] = value ``` #### Sync aggregate processing From 7cb163090246e152927bbc5857b4fd8c9ad53bf9 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 30 Mar 2023 14:09:16 +1100 Subject: [PATCH 158/210] Attnet revamp draft --- specs/phase0/validator.md | 33 ++++++++++++++++++++++++--------- 1 file changed, 24 insertions(+), 9 deletions(-) diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 54b344791e..2ed047f0ff 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -88,10 +88,11 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph | Name | Value | Unit | Duration | | - | - | :-: | :-: | -| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | | -| `RANDOM_SUBNETS_PER_VALIDATOR` | `2**0` (= 1) | subnets | | -| `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours | +| `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | +| `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours | | `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. | +| `ATTESTATION_SUBNET_EXTRA_BITS` | 0 | The number of extra bits of a NodeId to use when mapping to a subscribed subnet | +| `SUBNETS_PER_NODE` | 2 | The number of long-lived subnets a beacon node should be subscribed to. | ## Containers @@ -606,15 +607,29 @@ def get_aggregate_and_proof_signature(state: BeaconState, ## Phase 0 attestation subnet stability -Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each validator must: +Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each beacon node should: -* Randomly select and remain subscribed to `RANDOM_SUBNETS_PER_VALIDATOR` attestation subnets -* Maintain advertisement of the randomly selected subnets in their node's ENR `attnets` entry by setting the randomly selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets -* Set the lifetime of each random subscription to a random number of epochs between `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` and `2 * EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION]`. At the end of life for a subscription, select a new random subnet, update subnet subscriptions, and publish an updated ENR +* Remain subscribed to `SUBNETS_PER_NODE` for `SUBNET_DURATION_IN_EPOCHS` epochs. +* Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets. +* Select these subnets based on their node-id as specified by the following + `compute_subnets(node_id,epoch)` function. -*Note*: Short lived beacon committee assignments should not be added in into the ENR `attnets` entry. +```python +ATTESTATION_SUBNET_PREFIX_BITS = ceil(log2(ATTESTATION_SUBNET_COUNT)) + ATTESTATION_SUBNET_EXTRA_BITS + +def compute_subnet(node_id, epoch, index): + node_id_prefix = node_id >> (256 - ATTESTATION_SUBNET_PREFIX_BITS) + permutation_seed = hash(uint_to_bytes(epoch // SUBNET_DURATION_IN_EPOCHS)) + permutated_prefix = compute_shuffled_index(node_id_prefix, 1 << ATTESTATION_SUBNET_PREFIX_BITS, permutation_seed) + return (permutated_prefix + index) % ATTESTATION_SUBNET_COUNT + +def compute_subnets(node_id, epoch): + return [compute_subnet(node_id, epoch, idx) for idx in range(SUBNETS_PER_NODE)] +``` + +*Note*: Nodes should subscribe to new subnets and remain subscribed to old subnets for at least one epoch. Nodes should pick a random duration to unsubscribe from old subnets to smooth the transition on the exact epoch boundary of which the shuffling changes. -*Note*: When preparing for a hard fork, a validator must select and subscribe to random subnets of the future fork versioning at least `EPOCHS_PER_RANDOM_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements. +*Note*: When preparing for a hard fork, a validator must select and subscribe to subnets of the future fork versioning at least `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements. ## How to avoid slashing From 0dd8db76cd7d21a2853f0aad5995d027daf8c0e3 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 30 Mar 2023 14:51:41 +0800 Subject: [PATCH 159/210] Make linter happy. Add `SUBNET_DURATION_IN_EPOCHS` definition. --- specs/phase0/validator.md | 27 ++++++++++--------- .../unittests/test_config_invariants.py | 2 +- 2 files changed, 15 insertions(+), 14 deletions(-) diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 2ed047f0ff..4df4437d04 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -91,8 +91,10 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph | `TARGET_AGGREGATORS_PER_COMMITTEE` | `2**4` (= 16) | validators | | `EPOCHS_PER_SUBNET_SUBSCRIPTION` | `2**8` (= 256) | epochs | ~27 hours | | `ATTESTATION_SUBNET_COUNT` | `64` | The number of attestation subnets used in the gossipsub protocol. | -| `ATTESTATION_SUBNET_EXTRA_BITS` | 0 | The number of extra bits of a NodeId to use when mapping to a subscribed subnet | -| `SUBNETS_PER_NODE` | 2 | The number of long-lived subnets a beacon node should be subscribed to. | +| `ATTESTATION_SUBNET_EXTRA_BITS` | `0` | The number of extra bits of a NodeId to use when mapping to a subscribed subnet | +| `SUBNETS_PER_NODE` | `2` | The number of long-lived subnets a beacon node should be subscribed to. | +| `ATTESTATION_SUBNET_PREFIX_BITS` | `(ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS)` | | +| `SUBNET_DURATION_IN_EPOCHS` | `2` | | ## Containers @@ -611,20 +613,19 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th * Remain subscribed to `SUBNETS_PER_NODE` for `SUBNET_DURATION_IN_EPOCHS` epochs. * Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets. -* Select these subnets based on their node-id as specified by the following - `compute_subnets(node_id,epoch)` function. +* Select these subnets based on their node-id as specified by the following `compute_subnets(node_id,epoch)` function. ```python -ATTESTATION_SUBNET_PREFIX_BITS = ceil(log2(ATTESTATION_SUBNET_COUNT)) + ATTESTATION_SUBNET_EXTRA_BITS - -def compute_subnet(node_id, epoch, index): - node_id_prefix = node_id >> (256 - ATTESTATION_SUBNET_PREFIX_BITS) - permutation_seed = hash(uint_to_bytes(epoch // SUBNET_DURATION_IN_EPOCHS)) - permutated_prefix = compute_shuffled_index(node_id_prefix, 1 << ATTESTATION_SUBNET_PREFIX_BITS, permutation_seed) - return (permutated_prefix + index) % ATTESTATION_SUBNET_COUNT +def compute_subnet(node_id: int, epoch: Epoch, index: int) -> int: + node_id_prefix = node_id >> (256 - ATTESTATION_SUBNET_PREFIX_BITS) + permutation_seed = hash(uint_to_bytes(epoch // SUBNET_DURATION_IN_EPOCHS)) + permutated_prefix = compute_shuffled_index(node_id_prefix, 1 << ATTESTATION_SUBNET_PREFIX_BITS, permutation_seed) + return (permutated_prefix + index) % ATTESTATION_SUBNET_COUNT +``` -def compute_subnets(node_id, epoch): - return [compute_subnet(node_id, epoch, idx) for idx in range(SUBNETS_PER_NODE)] +```python +def compute_subnets(node_id: int, epoch: Epoch) -> Sequence[int]: + return [compute_subnet(node_id, epoch, idx) for idx in range(SUBNETS_PER_NODE)] ``` *Note*: Nodes should subscribe to new subnets and remain subscribed to old subnets for at least one epoch. Nodes should pick a random duration to unsubscribe from old subnets to smooth the transition on the exact epoch boundary of which the shuffling changes. diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py index 9b27d1deb9..69aa3eb2a5 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py @@ -75,7 +75,7 @@ def test_time(spec, state): @with_all_phases @spec_state_test def test_networking(spec, state): - assert spec.RANDOM_SUBNETS_PER_VALIDATOR <= spec.ATTESTATION_SUBNET_COUNT + assert spec.SUBNETS_PER_NODE <= spec.ATTESTATION_SUBNET_COUNT @with_all_phases From e255d09840855e16e62f0e531f2ca50fa45e2724 Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Fri, 31 Mar 2023 10:52:52 +1100 Subject: [PATCH 160/210] Apply changes to Bellatrix and Daneb --- specs/bellatrix/fork-choice.md | 2 +- specs/deneb/fork-choice.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index ed7d60a932..aac26f3dd9 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -170,7 +170,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root + assert get_ancestor_at_epoch_boundary(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 61714cf1a8..83cdb9972f 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -82,7 +82,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert get_ancestor(store, block.parent_root, finalized_slot) == store.finalized_checkpoint.root + assert get_ancestor_at_epoch_boundary(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root # [New in Deneb] # Check if blob data is available From 4c401d6575e06902e82cd427e8c99c7011c652c2 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 3 Apr 2023 18:17:03 +0600 Subject: [PATCH 161/210] Port process_deposit tests --- .../test_process_deposit_receipt.py | 233 ++++++++++++++++++ .../pyspec/eth2spec/test/helpers/deposits.py | 135 ++++++++++ 2 files changed, 368 insertions(+) create mode 100644 tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py diff --git a/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py b/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py new file mode 100644 index 0000000000..411a9f6af8 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py @@ -0,0 +1,233 @@ +from eth2spec.test.context import spec_state_test, always_bls, with_eip6110_and_later +from eth2spec.test.helpers.deposits import ( + prepare_deposit_receipt, + run_deposit_receipt_processing, + run_deposit_receipt_processing_with_specific_fork_version +) + + +@with_eip6110_and_later +@spec_state_test +def test_new_deposit_under_max(spec, state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validators) + # effective balance will be 1 EFFECTIVE_BALANCE_INCREMENT smaller because of this small decrement. + amount = spec.MAX_EFFECTIVE_BALANCE - 1 + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True) + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +def test_new_deposit_max(spec, state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validators) + # effective balance will be exactly the same as balance. + amount = spec.MAX_EFFECTIVE_BALANCE + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True) + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +def test_new_deposit_over_max(spec, state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validators) + # just 1 over the limit, effective balance should be set MAX_EFFECTIVE_BALANCE during processing + amount = spec.MAX_EFFECTIVE_BALANCE + 1 + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True) + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +def test_new_deposit_eth1_withdrawal_credentials(spec, state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validators) + withdrawal_credentials = ( + spec.ETH1_ADDRESS_WITHDRAWAL_PREFIX + + b'\x00' * 11 # specified 0s + + b'\x59' * 20 # a 20-byte eth1 address + ) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit_receipt = prepare_deposit_receipt( + spec, + validator_index, + amount, + withdrawal_credentials=withdrawal_credentials, + signed=True, + ) + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +def test_new_deposit_non_versioned_withdrawal_credentials(spec, state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validators) + withdrawal_credentials = ( + b'\xFF' # Non specified withdrawal credentials version + + b'\x02' * 31 # Garabage bytes + ) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit_receipt = prepare_deposit_receipt( + spec, + validator_index, + amount, + withdrawal_credentials=withdrawal_credentials, + signed=True, + ) + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +@always_bls +def test_correct_sig_but_forked_state(spec, state): + validator_index = len(state.validators) + amount = spec.MAX_EFFECTIVE_BALANCE + # deposits will always be valid, regardless of the current fork + state.fork.current_version = spec.Version('0x1234abcd') + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True) + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +@always_bls +def test_incorrect_sig_new_deposit(spec, state): + # fresh deposit = next validator index = validator appended to registry + validator_index = len(state.validators) + amount = spec.MAX_EFFECTIVE_BALANCE + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount) + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index, effective=False) + + +@with_eip6110_and_later +@spec_state_test +def test_top_up__max_effective_balance(spec, state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True) + + state.balances[validator_index] = spec.MAX_EFFECTIVE_BALANCE + state.validators[validator_index].effective_balance = spec.MAX_EFFECTIVE_BALANCE + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + assert state.balances[validator_index] == spec.MAX_EFFECTIVE_BALANCE + amount + assert state.validators[validator_index].effective_balance == spec.MAX_EFFECTIVE_BALANCE + + +@with_eip6110_and_later +@spec_state_test +def test_top_up__less_effective_balance(spec, state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True) + + initial_balance = spec.MAX_EFFECTIVE_BALANCE - 1000 + initial_effective_balance = spec.MAX_EFFECTIVE_BALANCE - spec.EFFECTIVE_BALANCE_INCREMENT + state.balances[validator_index] = initial_balance + state.validators[validator_index].effective_balance = initial_effective_balance + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + assert state.balances[validator_index] == initial_balance + amount + # unchanged effective balance + assert state.validators[validator_index].effective_balance == initial_effective_balance + + +@with_eip6110_and_later +@spec_state_test +def test_top_up__zero_balance(spec, state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, signed=True) + + initial_balance = 0 + initial_effective_balance = 0 + state.balances[validator_index] = initial_balance + state.validators[validator_index].effective_balance = initial_effective_balance + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + assert state.balances[validator_index] == initial_balance + amount + # unchanged effective balance + assert state.validators[validator_index].effective_balance == initial_effective_balance + + +@with_eip6110_and_later +@spec_state_test +@always_bls +def test_incorrect_sig_top_up(spec, state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount) + + # invalid signatures, in top-ups, are allowed! + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +def test_incorrect_withdrawal_credentials_top_up(spec, state): + validator_index = 0 + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(b"junk")[1:] + deposit_receipt = prepare_deposit_receipt( + spec, + validator_index, + amount, + withdrawal_credentials=withdrawal_credentials + ) + + # inconsistent withdrawal credentials, in top-ups, are allowed! + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +def test_key_validate_invalid_subgroup(spec, state): + validator_index = len(state.validators) + amount = spec.MAX_EFFECTIVE_BALANCE + + # All-zero pubkey would not pass `bls.KeyValidate`, but `process_deposit` would not throw exception. + pubkey = b'\x00' * 48 + + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, pubkey=pubkey, signed=True) + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +def test_key_validate_invalid_decompression(spec, state): + validator_index = len(state.validators) + amount = spec.MAX_EFFECTIVE_BALANCE + + # `deserialization_fails_infinity_with_true_b_flag` BLS G1 deserialization test case. + # This pubkey would not pass `bls.KeyValidate`, but `process_deposit` would not throw exception. + pubkey_hex = 'c01000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000' + pubkey = bytes.fromhex(pubkey_hex) + + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, pubkey=pubkey, signed=True) + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + +@with_eip6110_and_later +@spec_state_test +@always_bls +def test_ineffective_deposit_with_bad_fork_version(spec, state): + yield from run_deposit_receipt_processing_with_specific_fork_version( + spec, + state, + fork_version=spec.Version('0xAaBbCcDd'), + effective=False, + ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/deposits.py b/tests/core/pyspec/eth2spec/test/helpers/deposits.py index cfff9c5ef9..789b27c8fd 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/deposits.py +++ b/tests/core/pyspec/eth2spec/test/helpers/deposits.py @@ -171,6 +171,54 @@ def prepare_state_and_deposit(spec, state, validator_index, amount, return deposit +def build_deposit_receipt(spec, + index, + pubkey, + privkey, + amount, + withdrawal_credentials, + signed): + deposit_data = build_deposit_data(spec, pubkey, privkey, amount, withdrawal_credentials, signed=signed) + return spec.DepositReceipt( + pubkey=deposit_data.pubkey, + withdrawal_credentials=deposit_data.withdrawal_credentials, + amount=deposit_data.amount, + signature=deposit_data.signature, + index=index) + + +def prepare_deposit_receipt(spec, validator_index, amount, + index=None, + pubkey=None, + privkey=None, + withdrawal_credentials=None, + signed=False): + """ + Create a deposit receipt for the given validator, depositing the given amount. + """ + if index is None: + index = validator_index + + if pubkey is None: + pubkey = pubkeys[validator_index] + + if privkey is None: + privkey = privkeys[validator_index] + + # insecurely use pubkey as withdrawal key if no credentials provided + if withdrawal_credentials is None: + withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:] + + return build_deposit_receipt( + spec, + index, + pubkey, + privkey, + amount, + withdrawal_credentials, + signed, + ) + # # Run processing # @@ -255,3 +303,90 @@ def run_deposit_processing_with_specific_fork_version( state.eth1_data.deposit_count = 1 yield from run_deposit_processing(spec, state, deposit, validator_index, valid=valid, effective=effective) + + +def run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index, valid=True, effective=True): + """ + Run ``process_deposit_receipt``, yielding: + - pre-state ('pre') + - deposit_receipt ('deposit_receipt') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + pre_validator_count = len(state.validators) + pre_balance = 0 + is_top_up = False + # is a top-up + if validator_index < pre_validator_count: + is_top_up = True + pre_balance = get_balance(state, validator_index) + pre_effective_balance = state.validators[validator_index].effective_balance + + yield 'pre', state + yield 'deposit_receipt', deposit_receipt + + if not valid: + expect_assertion_error(lambda: spec.process_deposit_receipt(state, deposit_receipt)) + yield 'post', None + return + + spec.process_deposit_receipt(state, deposit_receipt) + + yield 'post', state + + if not effective or not bls.KeyValidate(deposit_receipt.pubkey): + assert len(state.validators) == pre_validator_count + assert len(state.balances) == pre_validator_count + if is_top_up: + assert get_balance(state, validator_index) == pre_balance + else: + if is_top_up: + # Top-ups do not change effective balance + assert state.validators[validator_index].effective_balance == pre_effective_balance + assert len(state.validators) == pre_validator_count + assert len(state.balances) == pre_validator_count + else: + # new validator + assert len(state.validators) == pre_validator_count + 1 + assert len(state.balances) == pre_validator_count + 1 + effective_balance = min(spec.MAX_EFFECTIVE_BALANCE, deposit_receipt.amount) + effective_balance -= effective_balance % spec.EFFECTIVE_BALANCE_INCREMENT + assert state.validators[validator_index].effective_balance == effective_balance + + assert get_balance(state, validator_index) == pre_balance + deposit_receipt.amount + + +def run_deposit_receipt_processing_with_specific_fork_version( + spec, + state, + fork_version, + valid=True, + effective=True): + validator_index = len(state.validators) + amount = spec.MAX_EFFECTIVE_BALANCE + + pubkey = pubkeys[validator_index] + privkey = privkeys[validator_index] + withdrawal_credentials = spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkey)[1:] + + deposit_message = spec.DepositMessage(pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount) + domain = spec.compute_domain(domain_type=spec.DOMAIN_DEPOSIT, fork_version=fork_version) + deposit_data = spec.DepositData( + pubkey=pubkey, withdrawal_credentials=withdrawal_credentials, amount=amount, + signature=bls.Sign(privkey, spec.compute_signing_root(deposit_message, domain)) + ) + deposit_receipt = spec.DepositReceipt( + pubkey=deposit_data.pubkey, + withdrawal_credentials=deposit_data.withdrawal_credentials, + amount=deposit_data.amount, + signature=deposit_data.signature, + index=validator_index) + + yield from run_deposit_receipt_processing( + spec, + state, + deposit_receipt, + validator_index, + valid=valid, + effective=effective + ) From 25ea243859329cb7d7131a03051ed78e0abd03c3 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 4 Apr 2023 11:45:08 +0900 Subject: [PATCH 162/210] PR comments --- .../_features/{reuse_indexes => reuse_indices}/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) rename specs/_features/{reuse_indexes => reuse_indices}/beacon-chain.md (99%) diff --git a/specs/_features/reuse_indexes/beacon-chain.md b/specs/_features/reuse_indices/beacon-chain.md similarity index 99% rename from specs/_features/reuse_indexes/beacon-chain.md rename to specs/_features/reuse_indices/beacon-chain.md index 5ec08ed199..ecac7fb308 100644 --- a/specs/_features/reuse_indexes/beacon-chain.md +++ b/specs/_features/reuse_indices/beacon-chain.md @@ -30,7 +30,7 @@ This is the beacon chain specification to assign new deposits to existing valida ### Time parameters | Name | Value | Unit | Duration | -| - | - | - | +| - | - | - | - | | `REUSE_VALIDATOR_INDEX_DELAY` | `uint64(2**16)` (= 65,536) | epochs | ~0.8 year | ## Helper functions From 622c9b97bac1a586a8b6692fe1998d3e621a8c30 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Tue, 4 Apr 2023 12:00:09 +0900 Subject: [PATCH 163/210] Fix CI --- specs/_features/reuse_indices/beacon-chain.md | 2 +- specs/altair/beacon-chain.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/_features/reuse_indices/beacon-chain.md b/specs/_features/reuse_indices/beacon-chain.md index ecac7fb308..6dd71e36fe 100644 --- a/specs/_features/reuse_indices/beacon-chain.md +++ b/specs/_features/reuse_indices/beacon-chain.md @@ -9,7 +9,7 @@ - [Introduction](#introduction) - [Preset](#preset) - [Time parameters](#time-parameters) -- [Helpers](#helpers) +- [Helper functions](#helper-functions) - [Predicates](#predicates) - [`is_reusable_validator`](#is_reusable_validator) - [Beacon chain state transition function](#beacon-chain-state-transition-function) diff --git a/specs/altair/beacon-chain.md b/specs/altair/beacon-chain.md index 4c7bb3f9a1..8c3a8877e8 100644 --- a/specs/altair/beacon-chain.md +++ b/specs/altair/beacon-chain.md @@ -260,7 +260,7 @@ def get_index_for_new_validator(state: BeaconState) -> ValidatorIndex: #### `set_or_append_list` ```python -def set_or_append_list(list: List[Any], index: ValidatorIndex, value: Any) -> None: +def set_or_append_list(list: List, index: ValidatorIndex, value: Any) -> None: if index == len(list): list.append(value) else: From 912c9b37a13f42de334e676f1de68352706b8f18 Mon Sep 17 00:00:00 2001 From: Suphanat Chunhapanya Date: Fri, 31 Mar 2023 21:06:17 +0700 Subject: [PATCH 164/210] Fix typos --- specs/deneb/p2p-interface.md | 8 ++++---- specs/phase0/validator.md | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 9be028620d..0b6381e20f 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -16,7 +16,7 @@ The specification of these changes continues in the same format as the network s - [`SignedBlobSidecar`](#signedblobsidecar) - [`BlobIdentifier`](#blobidentifier) - [Helpers](#helpers) - - [`verify_sidecar_signature`](#verify_sidecar_signature) + - [`verify_blob_sidecar_signature`](#verify_blob_sidecar_signature) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - [Topics and messages](#topics-and-messages) - [Global topics](#global-topics) @@ -77,7 +77,7 @@ class BlobIdentifier(Container): ### Helpers -#### `verify_sidecar_signature` +#### `verify_blob_sidecar_signature` ```python def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool: @@ -118,7 +118,7 @@ The *type* of the payload of this topic changes to the (modified) `SignedBeaconB This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`. -The following validations MUST pass before forwarding the `sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`: +The following validations MUST pass before forwarding the `signed_blob_sidecar` on the network, assuming the alias `sidecar = signed_blob_sidecar.message`: - _[REJECT]_ The sidecar is for the correct topic -- i.e. `sidecar.index` matches the topic `{index}`. - _[IGNORE]_ The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance) -- i.e. validate that `sidecar.slot <= current_slot` (a client MAY queue future sidecars for processing at the appropriate slot). @@ -126,7 +126,7 @@ The following validations MUST pass before forwarding the `sidecar` on the netwo - _[IGNORE]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) has been seen (via both gossip and non-gossip sources) (a client MAY queue sidecars for processing once the parent block is retrieved). - _[REJECT]_ The sidecar's block's parent (defined by `sidecar.block_parent_root`) passes validation. - _[REJECT]_ The sidecar is from a higher slot than the sidecar's block's parent (defined by `sidecar.block_parent_root`). -- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_sidecar_signature`. +- _[REJECT]_ The proposer signature, `signed_blob_sidecar.signature`, is valid as verified by `verify_blob_sidecar_signature`. - _[IGNORE]_ The sidecar is the only sidecar with valid signature received for the tuple `(sidecar.block_root, sidecar.index)`. - _[REJECT]_ The sidecar is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `block_parent_root`/`slot`). If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 54b344791e..2a4d5b920e 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -162,7 +162,7 @@ The `withdrawal_credentials` field must be such that: * `withdrawal_credentials[1:12] == b'\x00' * 11` * `withdrawal_credentials[12:] == eth1_withdrawal_address` -After the merge of the current Ethereum application layer into the Beacon Chain, +After the merge of the current Ethereum execution layer into the Beacon Chain, withdrawals to `eth1_withdrawal_address` will simply be increases to the account's ETH balance that do **NOT** trigger any EVM execution. ### Submit deposit From 502745e012214098115b506065d78f629dccc7be Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 4 Apr 2023 16:30:44 +0600 Subject: [PATCH 165/210] Port tests from bellatrix and capella --- .../test_process_deposit_receipt.py | 53 ++++++++++++++++++- 1 file changed, 51 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py b/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py index 411a9f6af8..d78c18ecb7 100644 --- a/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py +++ b/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_process_deposit_receipt.py @@ -4,6 +4,8 @@ run_deposit_receipt_processing, run_deposit_receipt_processing_with_specific_fork_version ) +from eth2spec.test.helpers.state import next_epoch_via_block +from eth2spec.test.helpers.withdrawals import set_validator_fully_withdrawable @with_eip6110_and_later @@ -224,10 +226,57 @@ def test_key_validate_invalid_decompression(spec, state): @with_eip6110_and_later @spec_state_test @always_bls -def test_ineffective_deposit_with_bad_fork_version(spec, state): +def test_ineffective_deposit_with_previous_fork_version(spec, state): + # Since deposits are valid across forks, the domain is always set with `GENESIS_FORK_VERSION`. + # It's an ineffective deposit because it fails at BLS sig verification. + # NOTE: it was effective in Altair. + assert state.fork.previous_version != state.fork.current_version + yield from run_deposit_receipt_processing_with_specific_fork_version( spec, state, - fork_version=spec.Version('0xAaBbCcDd'), + fork_version=state.fork.previous_version, effective=False, ) + + +@with_eip6110_and_later +@spec_state_test +@always_bls +def test_effective_deposit_with_genesis_fork_version(spec, state): + assert spec.config.GENESIS_FORK_VERSION not in (state.fork.previous_version, state.fork.current_version) + + yield from run_deposit_receipt_processing_with_specific_fork_version( + spec, + state, + fork_version=spec.config.GENESIS_FORK_VERSION, + ) + + +@with_eip6110_and_later +@spec_state_test +def test_success_top_up_to_withdrawn_validator(spec, state): + validator_index = 0 + + # Fully withdraw validator + set_validator_fully_withdrawable(spec, state, validator_index) + assert state.balances[validator_index] > 0 + next_epoch_via_block(spec, state) + assert state.balances[validator_index] == 0 + assert state.validators[validator_index].effective_balance > 0 + next_epoch_via_block(spec, state) + assert state.validators[validator_index].effective_balance == 0 + + # Make a top-up balance to validator + amount = spec.MAX_EFFECTIVE_BALANCE // 4 + deposit_receipt = prepare_deposit_receipt(spec, validator_index, amount, len(state.validators), signed=True) + + yield from run_deposit_receipt_processing(spec, state, deposit_receipt, validator_index) + + assert state.balances[validator_index] == amount + assert state.validators[validator_index].effective_balance == 0 + + validator = state.validators[validator_index] + balance = state.balances[validator_index] + current_epoch = spec.get_current_epoch(state) + assert spec.is_fully_withdrawable_validator(validator, balance, current_epoch) From 68e7766b085b33618880d6529cce577097e333c9 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 4 Apr 2023 22:04:48 +0800 Subject: [PATCH 166/210] Remove gitter link --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 9f2528263e..d0d6b222d0 100644 --- a/README.md +++ b/README.md @@ -1,6 +1,6 @@ # Ethereum Proof-of-Stake Consensus Specifications -[![Join the chat at https://discord.gg/qGpsxSA](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/qGpsxSA) [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Join the chat at https://discord.gg/qGpsxSA](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/qGpsxSA) To learn more about proof-of-stake and sharding, see the [PoS documentation](https://ethereum.org/en/developers/docs/consensus-mechanisms/pos/), [sharding documentation](https://ethereum.org/en/upgrades/sharding/) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). From 4cac76181827562a010c9c45dba1bb9f80f4f6cd Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 5 Apr 2023 11:38:20 +0800 Subject: [PATCH 167/210] make linter happy --- specs/bellatrix/fork-choice.md | 6 +++++- specs/deneb/fork-choice.md | 6 +++++- specs/phase0/fork-choice.md | 12 ++++++++++-- 3 files changed, 20 insertions(+), 4 deletions(-) diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index aac26f3dd9..d22436c9d8 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -170,7 +170,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert get_ancestor_at_epoch_boundary(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root + assert store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 83cdb9972f..e76e159c4f 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -82,7 +82,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert get_ancestor_at_epoch_boundary(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root + assert store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) # [New in Deneb] # Check if blob data is available diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 2ee6ecb926..478dd21427 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -292,7 +292,11 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB correct_finalized = ( store.finalized_checkpoint.epoch == GENESIS_EPOCH - or store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary(store, block_root, store.finalized_checkpoint.epoch) + or store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary( + store, + block_root, + store.finalized_checkpoint.epoch, + ) ) # If expected finalized/justified, add to viable block-tree and signal viability to parent. @@ -516,7 +520,11 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert get_ancestor_at_epoch_boundary(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root + assert store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) # Check the block is valid and compute the post-state state = pre_state.copy() From 80e6b0d6657598f069b11096025bbc6a30e3d648 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Wed, 5 Apr 2023 19:46:28 +0600 Subject: [PATCH 168/210] Add deposits transition tests --- .../test_deposit_transition.py | 238 ++++++++++++++++++ .../test/helpers/execution_payload.py | 34 ++- .../pyspec/eth2spec/test/helpers/genesis.py | 7 +- 3 files changed, 272 insertions(+), 7 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_deposit_transition.py diff --git a/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_deposit_transition.py new file mode 100644 index 0000000000..92c3a96b55 --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_deposit_transition.py @@ -0,0 +1,238 @@ +from eth2spec.test.helpers.block import ( + build_empty_block_for_next_slot, +) +from eth2spec.test.context import ( + spec_state_test, + with_phases, + EIP6110, + expect_assertion_error, +) +from eth2spec.test.helpers.deposits import ( + build_deposit_data, + deposit_from_context, + prepare_deposit_receipt, +) +from eth2spec.test.helpers.execution_payload import ( + compute_el_block_hash, +) +from eth2spec.test.helpers.keys import privkeys, pubkeys + + +def run_deposit_transition_block(spec, state, block, top_up_keys=[], valid=True): + """ + Run ``process_block``, yielding: + - pre-state ('pre') + - block ('block') + - post-state ('post'). + If ``valid == False``, run expecting ``AssertionError`` + """ + yield 'pre', state + yield 'block', block + + if not valid: + expect_assertion_error(lambda: spec.process_block(state, block)) + yield 'post', None + return + + spec.process_block(state, block) + yield 'post', state + + # Check that deposits are applied + expected_pubkeys = [d.data.pubkey for d in block.body.deposits] + deposit_receipts = block.body.execution_payload.deposit_receipts + expected_pubkeys = expected_pubkeys + [d.pubkey for d in deposit_receipts if (d.pubkey not in top_up_keys)] + actual_pubkeys = [v.pubkey for v in state.validators[len(state.validators) - len(expected_pubkeys):]] + + assert actual_pubkeys == expected_pubkeys + + +def prepare_state_and_block(spec, + state, + deposit_cnt, + deposit_receipt_cnt, + first_deposit_receipt_index=0, + deposit_receipts_start_index=None): + deposits = [] + deposit_receipts = [] + keypair_index = len(state.validators) + + # Prepare deposits + deposit_data_list = [] + for index in range(deposit_cnt): + deposit_data = build_deposit_data(spec, + pubkeys[keypair_index], + privkeys[keypair_index], + # use max effective balance + spec.MAX_EFFECTIVE_BALANCE, + # insecurely use pubkey as withdrawal key + spec.BLS_WITHDRAWAL_PREFIX + spec.hash(pubkeys[keypair_index])[1:], + signed=True) + deposit_data_list.append(deposit_data) + keypair_index += 1 + + deposit_root = None + for index in range(deposit_cnt): + deposit, deposit_root, _ = deposit_from_context(spec, deposit_data_list, index) + deposits.append(deposit) + + if deposit_root: + state.eth1_deposit_index = 0 + state.eth1_data = spec.Eth1Data(deposit_root=deposit_root, + deposit_count=deposit_cnt, + block_hash=state.eth1_data.block_hash) + + # Prepare deposit receipts + for offset in range(deposit_receipt_cnt): + deposit_receipt = prepare_deposit_receipt(spec, + keypair_index, + # use max effective balance + spec.MAX_EFFECTIVE_BALANCE, + first_deposit_receipt_index + offset, + signed=True) + deposit_receipts.append(deposit_receipt) + keypair_index += 1 + + # Set start index if defined + if deposit_receipts_start_index: + state.deposit_receipts_start_index = deposit_receipts_start_index + + block = build_empty_block_for_next_slot(spec, state) + + # Assign deposits and deposit receipts + block.body.deposits = deposits + block.body.execution_payload.deposit_receipts = deposit_receipts + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + + # Advance a slot + spec.process_slots(state, block.slot) + + return state, block + + +@with_phases([EIP6110]) +@spec_state_test +def test_deposit_transition__start_index_is_set(spec, state): + # 0 deposits, 2 deposit receipts, unset deposit_receipts_start_index + state, block = prepare_state_and_block(spec, state, + deposit_cnt=0, + deposit_receipt_cnt=2, + first_deposit_receipt_index=state.eth1_data.deposit_count + 11) + + yield from run_deposit_transition_block(spec, state, block) + + # deposit_receipts_start_index must be set to the index of the first receipt + assert state.deposit_receipts_start_index == block.body.execution_payload.deposit_receipts[0].index + + +@with_phases([EIP6110]) +@spec_state_test +def test_deposit_transition__process_eth1_deposits(spec, state): + # 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.deposit_receipts_start_index + state, block = prepare_state_and_block(spec, state, + deposit_cnt=3, + deposit_receipt_cnt=1, + first_deposit_receipt_index=11, + deposit_receipts_start_index=7) + + yield from run_deposit_transition_block(spec, state, block) + + +@with_phases([EIP6110]) +@spec_state_test +def test_deposit_transition__process_max_eth1_deposits(spec, state): + # spec.MAX_DEPOSITS deposits, 1 deposit receipt, state.eth1_data.deposit_count > state.deposit_receipts_start_index + # state.deposit_receipts_start_index == spec.MAX_DEPOSITS + state, block = prepare_state_and_block(spec, state, + deposit_cnt=spec.MAX_DEPOSITS, + deposit_receipt_cnt=1, + first_deposit_receipt_index=spec.MAX_DEPOSITS + 1, + deposit_receipts_start_index=spec.MAX_DEPOSITS) + state.eth1_data = spec.Eth1Data(deposit_root=state.eth1_data.deposit_root, + deposit_count=23, + block_hash=state.eth1_data.block_hash) + + yield from run_deposit_transition_block(spec, state, block) + + +@with_phases([EIP6110]) +@spec_state_test +def test_deposit_transition__process_eth1_deposits_up_to_start_index(spec, state): + # 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count == state.deposit_receipts_start_index + state, block = prepare_state_and_block(spec, state, + deposit_cnt=3, + deposit_receipt_cnt=1, + first_deposit_receipt_index=7, + deposit_receipts_start_index=3) + + yield from run_deposit_transition_block(spec, state, block) + + +@with_phases([EIP6110]) +@spec_state_test +def test_deposit_transition__invalid_not_enough_eth1_deposits(spec, state): + # 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.deposit_receipts_start_index + state, block = prepare_state_and_block(spec, state, + deposit_cnt=3, + deposit_receipt_cnt=1, + first_deposit_receipt_index=29, + deposit_receipts_start_index=23) + state.eth1_data = spec.Eth1Data(deposit_root=state.eth1_data.deposit_root, + deposit_count=17, + block_hash=state.eth1_data.block_hash) + + yield from run_deposit_transition_block(spec, state, block, valid=False) + + +@with_phases([EIP6110]) +@spec_state_test +def test_deposit_transition__invalid_too_many_eth1_deposits(spec, state): + # 3 deposits, 1 deposit receipt, state.eth1_data.deposit_count < state.eth1_data_index + state, block = prepare_state_and_block(spec, state, + deposit_cnt=3, + deposit_receipt_cnt=1, + first_deposit_receipt_index=11, + deposit_receipts_start_index=7) + state.eth1_data = spec.Eth1Data(deposit_root=state.eth1_data.deposit_root, + deposit_count=2, + block_hash=state.eth1_data.block_hash) + + yield from run_deposit_transition_block(spec, state, block, valid=False) + + +@with_phases([EIP6110]) +@spec_state_test +def test_deposit_transition__invalid_eth1_deposits_overlap_in_protocol_deposits(spec, state): + # spec.MAX_DEPOSITS deposits, 1 deposit receipt, state.eth1_data.deposit_count > state.deposit_receipts_start_index + # state.deposit_receipts_start_index == spec.MAX_DEPOSITS - 1 + state, block = prepare_state_and_block(spec, state, + deposit_cnt=spec.MAX_DEPOSITS, + deposit_receipt_cnt=1, + first_deposit_receipt_index=spec.MAX_DEPOSITS, + deposit_receipts_start_index=spec.MAX_DEPOSITS - 1) + state.eth1_data = spec.Eth1Data(deposit_root=state.eth1_data.deposit_root, + deposit_count=23, + block_hash=state.eth1_data.block_hash) + + yield from run_deposit_transition_block(spec, state, block, valid=False) + + +@with_phases([EIP6110]) +@spec_state_test +def test_deposit_transition__deposit_and_top_up_same_block(spec, state): + # 1 deposit, 1 deposit receipt that top ups deposited validator + state, block = prepare_state_and_block(spec, state, + deposit_cnt=1, + deposit_receipt_cnt=1, + first_deposit_receipt_index=11, + deposit_receipts_start_index=7) + + # Artificially assign deposit's pubkey to a deposit receipt of the same block + top_up_keys = [block.body.deposits[0].data.pubkey] + block.body.execution_payload.deposit_receipts[0].pubkey = top_up_keys[0] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + + yield from run_deposit_transition_block(spec, state, block, top_up_keys=top_up_keys) + + # Check the top up + expected_balance = block.body.deposits[0].data.amount + block.body.execution_payload.deposit_receipts[0].amount + assert state.balances[len(state.balances) - 1] == expected_balance diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py index 7e2c7c976b..d9980810c3 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py @@ -100,9 +100,9 @@ def compute_el_header_block_hash(spec, # excess_data_gas execution_payload_header_rlp.append((big_endian_int, payload_header.excess_data_gas)) if is_post_eip6110(spec): - # TODO: RLP or SSZ for `deposit_receipts_root` - # FIXME: if using RLP, we need to implement `get_deposit_receipt_rlp` helper - ... + # deposit_receipts_root + assert deposit_receipts_trie_root is not None + execution_payload_header_rlp.append((Binary(32, 32), deposit_receipts_trie_root)) sedes = List([schema for schema, _ in execution_payload_header_rlp]) values = [value for _, value in execution_payload_header_rlp] @@ -129,14 +129,37 @@ def get_withdrawal_rlp(spec, withdrawal): return encode(values, sedes) +def get_deposit_receipt_rlp(spec, deposit_receipt): + deposit_receipt_rlp = [ + # pubkey + (Binary(48, 48), deposit_receipt.pubkey), + # withdrawal_credentials + (Binary(32, 32), deposit_receipt.withdrawal_credentials), + # amount + (big_endian_int, deposit_receipt.amount), + # pubkey + (Binary(96, 96), deposit_receipt.signature), + # index + (big_endian_int, deposit_receipt.index), + ] + + sedes = List([schema for schema, _ in deposit_receipt_rlp]) + values = [value for _, value in deposit_receipt_rlp] + return encode(values, sedes) + + def compute_el_block_hash(spec, payload): transactions_trie_root = compute_trie_root_from_indexed_data(payload.transactions) + withdrawals_trie_root = None + deposit_receipts_trie_root = None + if is_post_capella(spec): withdrawals_encoded = [get_withdrawal_rlp(spec, withdrawal) for withdrawal in payload.withdrawals] withdrawals_trie_root = compute_trie_root_from_indexed_data(withdrawals_encoded) - else: - withdrawals_trie_root = None + if is_post_eip6110(spec): + deposit_receipts_encoded = [get_deposit_receipt_rlp(spec, receipt) for receipt in payload.deposit_receipts] + deposit_receipts_trie_root = compute_trie_root_from_indexed_data(deposit_receipts_encoded) payload_header = get_execution_payload_header(spec, payload) @@ -145,6 +168,7 @@ def compute_el_block_hash(spec, payload): payload_header, transactions_trie_root, withdrawals_trie_root, + deposit_receipts_trie_root, ) diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py index db4f922515..32ce8974d9 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py +++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py @@ -47,17 +47,20 @@ def get_sample_genesis_execution_payload_header(spec, ) transactions_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") + withdrawals_trie_root = None + deposit_receipts_trie_root = None if is_post_capella(spec): withdrawals_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") - else: - withdrawals_trie_root = None + if is_post_eip6110(spec): + deposit_receipts_trie_root = bytes.fromhex("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") payload_header.block_hash = compute_el_header_block_hash( spec, payload_header, transactions_trie_root, withdrawals_trie_root, + deposit_receipts_trie_root, ) return payload_header From a0d03378fabf76cb91897ffe17310050f3996ee2 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Thu, 6 Apr 2023 12:40:55 +1000 Subject: [PATCH 169/210] Correct subnet subscription duration variable --- specs/phase0/validator.md | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 4df4437d04..1b06aecfbb 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -94,7 +94,6 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph | `ATTESTATION_SUBNET_EXTRA_BITS` | `0` | The number of extra bits of a NodeId to use when mapping to a subscribed subnet | | `SUBNETS_PER_NODE` | `2` | The number of long-lived subnets a beacon node should be subscribed to. | | `ATTESTATION_SUBNET_PREFIX_BITS` | `(ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS)` | | -| `SUBNET_DURATION_IN_EPOCHS` | `2` | | ## Containers @@ -611,14 +610,14 @@ def get_aggregate_and_proof_signature(state: BeaconState, Because Phase 0 does not have shards and thus does not have Shard Committees, there is no stable backbone to the attestation subnets (`beacon_attestation_{subnet_id}`). To provide this stability, each beacon node should: -* Remain subscribed to `SUBNETS_PER_NODE` for `SUBNET_DURATION_IN_EPOCHS` epochs. +* Remain subscribed to `SUBNETS_PER_NODE` for `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs. * Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets. * Select these subnets based on their node-id as specified by the following `compute_subnets(node_id,epoch)` function. ```python def compute_subnet(node_id: int, epoch: Epoch, index: int) -> int: node_id_prefix = node_id >> (256 - ATTESTATION_SUBNET_PREFIX_BITS) - permutation_seed = hash(uint_to_bytes(epoch // SUBNET_DURATION_IN_EPOCHS)) + permutation_seed = hash(uint_to_bytes(epoch // EPOCHS_PER_SUBNET_SUBSCRIPTION)) permutated_prefix = compute_shuffled_index(node_id_prefix, 1 << ATTESTATION_SUBNET_PREFIX_BITS, permutation_seed) return (permutated_prefix + index) % ATTESTATION_SUBNET_COUNT ``` From 8fafc6c695253f6c64bb8e17fdfa2d825ca23ede Mon Sep 17 00:00:00 2001 From: Marius van der Wijden Date: Thu, 6 Apr 2023 11:20:07 +0200 Subject: [PATCH 170/210] deneb: switch blob tx type to 0x03 --- specs/deneb/beacon-chain.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index a0ac783b7f..df1da8e2a5 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -59,7 +59,7 @@ This upgrade adds blobs to the beacon chain as part of Deneb. This is an extensi | Name | Value | | - | - | -| `BLOB_TX_TYPE` | `uint8(0x05)` | +| `BLOB_TX_TYPE` | `uint8(0x03)` | | `VERSIONED_HASH_VERSION_KZG` | `Bytes1('0x01')` | ## Preset From 108f1eed860ff143d9b9481342d6788e96b78e5f Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 6 Apr 2023 16:53:31 +0600 Subject: [PATCH 171/210] Rebase EIP6110 to Deneb --- setup.py | 6 ++--- specs/_features/eip6110/beacon-chain.md | 24 +++++++++++-------- specs/_features/eip6110/fork.md | 7 ++++-- specs/_features/eip6110/light-client/fork.md | 19 ++++++++------- .../eip6110/light-client/full-node.md | 5 +++- .../eip6110/light-client/p2p-interface.md | 8 ++++++- .../eip6110/light-client/sync-protocol.md | 12 ++++++---- specs/_features/eip6110/validator.md | 2 +- specs/deneb/light-client/fork.md | 1 + .../test_polynomial_commitments.py | 5 +++- .../pyspec/eth2spec/test/helpers/forks.py | 2 +- 11 files changed, 57 insertions(+), 34 deletions(-) diff --git a/setup.py b/setup.py index 7b4ab6ac2e..a17655dc7a 100644 --- a/setup.py +++ b/setup.py @@ -671,13 +671,13 @@ def hardcoded_custom_type_dep_constants(cls, spec_object) -> str: # # EIP6110SpecBuilder # -class EIP6110SpecBuilder(CapellaSpecBuilder): +class EIP6110SpecBuilder(DenebSpecBuilder): fork: str = EIP6110 @classmethod def imports(cls, preset_name: str): return super().imports(preset_name) + f''' -from eth2spec.capella import {preset_name} as capella +from eth2spec.deneb import {preset_name} as deneb ''' @@ -1022,7 +1022,7 @@ def finalize_options(self): specs/capella/validator.md specs/capella/p2p-interface.md """ - if self.spec_fork == DENEB: + if self.spec_fork in (DENEB, EIP6110): self.md_doc_paths += """ specs/deneb/light-client/fork.md specs/deneb/light-client/full-node.md diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 3ed77bafbb..9693aa92ec 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -33,7 +33,7 @@ This is the beacon chain specification of in-protocol deposits processing mechanism. This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110). -*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development. +*Note:* This specification is built upon [Deneb](../../deneb/beacon_chain.md) and is under active development. ## Constants @@ -91,7 +91,8 @@ class ExecutionPayload(Container): block_hash: Hash32 transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] - deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP-6110] + excess_data_gas: uint256 + deposit_receipts: List[DepositReceipt, MAX_DEPOSIT_RECEIPTS_PER_PAYLOAD] # [New in EIP6110] ``` #### `ExecutionPayloadHeader` @@ -115,7 +116,8 @@ class ExecutionPayloadHeader(Container): block_hash: Hash32 transactions_root: Root withdrawals_root: Root - deposit_receipts_root: Root # [New in EIP-6110] + excess_data_gas: uint256 + deposit_receipts_root: Root # [New in EIP6110] ``` #### `BeaconState` @@ -157,13 +159,13 @@ class BeaconState(Container): current_sync_committee: SyncCommittee next_sync_committee: SyncCommittee # Execution - latest_execution_payload_header: ExecutionPayloadHeader # [Modified in EIP-6110] + latest_execution_payload_header: ExecutionPayloadHeader # [Modified in EIP6110] # Withdrawals next_withdrawal_index: WithdrawalIndex next_withdrawal_validator_index: ValidatorIndex # Deep history valid from Capella onwards historical_summaries: List[HistoricalSummary, HISTORICAL_ROOTS_LIMIT] - # [New in EIP-6110] + # [New in EIP6110] deposit_receipts_start_index: uint64 ``` @@ -176,11 +178,12 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_block_header(state, block) if is_execution_enabled(state, block.body): process_withdrawals(state, block.body.execution_payload) - process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP-6110] + process_execution_payload(state, block.body.execution_payload, EXECUTION_ENGINE) # [Modified in EIP6110] process_randao(state, block.body) process_eth1_data(state, block.body) - process_operations(state, block.body) # [Modified in EIP-6110] + process_operations(state, block.body) # [Modified in EIP6110] process_sync_aggregate(state, block.body.sync_aggregate) + process_blob_kzg_commitments(state, block.body) ``` #### Modified `process_operations` @@ -189,7 +192,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: ```python def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: - # [Modified in EIP-6110] + # [Modified in EIP6110] # Disable former deposit mechanism once all prior deposits are processed eth1_deposit_index_limit = min(state.eth1_data.deposit_count, state.deposit_receipts_start_index) if state.eth1_deposit_index < eth1_deposit_index_limit: @@ -208,7 +211,7 @@ def process_operations(state: BeaconState, body: BeaconBlockBody) -> None: for_ops(body.voluntary_exits, process_voluntary_exit) for_ops(body.bls_to_execution_changes, process_bls_to_execution_change) - # [New in EIP-6110] + # [New in EIP6110] if is_execution_enabled(state, body): for_ops(body.execution_payload.deposit_receipts, process_deposit_receipt) ``` @@ -262,7 +265,8 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe block_hash=payload.block_hash, transactions_root=hash_tree_root(payload.transactions), withdrawals_root=hash_tree_root(payload.withdrawals), - deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP-6110] + excess_data_gas=payload.excess_data_gas, + deposit_receipts_root=hash_tree_root(payload.deposit_receipts), # [New in EIP6110] ) ``` diff --git a/specs/_features/eip6110/fork.md b/specs/_features/eip6110/fork.md index df98c4c69e..2145a9d1a3 100644 --- a/specs/_features/eip6110/fork.md +++ b/specs/_features/eip6110/fork.md @@ -44,6 +44,8 @@ def compute_fork_version(epoch: Epoch) -> Version: """ if epoch >= EIP6110_FORK_EPOCH: return EIP6110_FORK_VERSION + if epoch >= DENEB_FORK_EPOCH: + return DENEB_FORK_VERSION if epoch >= CAPELLA_FORK_EPOCH: return CAPELLA_FORK_VERSION if epoch >= BELLATRIX_FORK_EPOCH: @@ -68,8 +70,8 @@ If `state.slot % SLOTS_PER_EPOCH == 0` and `compute_epoch_at_slot(state.slot) == an irregular state change is made to upgrade to EIP-6110. ```python -def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState: - epoch = capella.get_current_epoch(pre) +def upgrade_to_eip6110(pre: deneb.BeaconState) -> BeaconState: + epoch = deneb.get_current_epoch(pre) latest_execution_payload_header = ExecutionPayloadHeader( parent_hash=pre.latest_execution_payload_header.parent_hash, fee_recipient=pre.latest_execution_payload_header.fee_recipient, @@ -86,6 +88,7 @@ def upgrade_to_eip6110(pre: capella.BeaconState) -> BeaconState: block_hash=pre.latest_execution_payload_header.block_hash, transactions_root=pre.latest_execution_payload_header.transactions_root, withdrawals_root=pre.latest_execution_payload_header.withdrawals_root, + excess_data_gas=uint256(0), deposit_receipts_root=Root(), # [New in EIP-6110] ) post = BeaconState( diff --git a/specs/_features/eip6110/light-client/fork.md b/specs/_features/eip6110/light-client/fork.md index 6ffa3d8697..2aaae3d948 100644 --- a/specs/_features/eip6110/light-client/fork.md +++ b/specs/_features/eip6110/light-client/fork.md @@ -15,14 +15,14 @@ ## Introduction -This document describes how to upgrade existing light client objects based on the [Capella specification](../../capella/light-client/sync-protocol.md) to eip6110. This is necessary when processing pre-eip6110 data with a post-eip6110 `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format. +This document describes how to upgrade existing light client objects based on the [Deneb specification](../../deneb/light-client/sync-protocol.md) to eip6110. This is necessary when processing pre-eip6110 data with a post-eip6110 `LightClientStore`. Note that the data being exchanged over the network protocols uses the original format. ### Upgrading light client data A eip6110 `LightClientStore` can still process earlier light client data. In order to do so, that pre-eip6110 data needs to be locally upgraded to eip6110 before processing. ```python -def upgrade_lc_header_to_eip6110(pre: capella.LightClientHeader) -> LightClientHeader: +def upgrade_lc_header_to_eip6110(pre: deneb.LightClientHeader) -> LightClientHeader: return LightClientHeader( beacon=pre.beacon, execution=ExecutionPayloadHeader( @@ -41,14 +41,15 @@ def upgrade_lc_header_to_eip6110(pre: capella.LightClientHeader) -> LightClientH block_hash=pre.execution.block_hash, transactions_root=pre.execution.transactions_root, withdrawals_root=pre.execution.withdrawals_root, - deposit_receipts_root=Root(), + excess_data_gas=pre.execution.excess_data_gas, + deposit_receipts_root=Root(), # [New in EIP6110] ), execution_branch=pre.execution_branch, ) ``` ```python -def upgrade_lc_bootstrap_to_eip6110(pre: capella.LightClientBootstrap) -> LightClientBootstrap: +def upgrade_lc_bootstrap_to_eip6110(pre: deneb.LightClientBootstrap) -> LightClientBootstrap: return LightClientBootstrap( header=upgrade_lc_header_to_eip6110(pre.header), current_sync_committee=pre.current_sync_committee, @@ -57,7 +58,7 @@ def upgrade_lc_bootstrap_to_eip6110(pre: capella.LightClientBootstrap) -> LightC ``` ```python -def upgrade_lc_update_to_eip6110(pre: capella.LightClientUpdate) -> LightClientUpdate: +def upgrade_lc_update_to_eip6110(pre: deneb.LightClientUpdate) -> LightClientUpdate: return LightClientUpdate( attested_header=upgrade_lc_header_to_eip6110(pre.attested_header), next_sync_committee=pre.next_sync_committee, @@ -70,7 +71,7 @@ def upgrade_lc_update_to_eip6110(pre: capella.LightClientUpdate) -> LightClientU ``` ```python -def upgrade_lc_finality_update_to_eip6110(pre: capella.LightClientFinalityUpdate) -> LightClientFinalityUpdate: +def upgrade_lc_finality_update_to_eip6110(pre: deneb.LightClientFinalityUpdate) -> LightClientFinalityUpdate: return LightClientFinalityUpdate( attested_header=upgrade_lc_header_to_eip6110(pre.attested_header), finalized_header=upgrade_lc_header_to_eip6110(pre.finalized_header), @@ -81,7 +82,7 @@ def upgrade_lc_finality_update_to_eip6110(pre: capella.LightClientFinalityUpdate ``` ```python -def upgrade_lc_optimistic_update_to_eip6110(pre: capella.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate: +def upgrade_lc_optimistic_update_to_eip6110(pre: deneb.LightClientOptimisticUpdate) -> LightClientOptimisticUpdate: return LightClientOptimisticUpdate( attested_header=upgrade_lc_header_to_eip6110(pre.attested_header), sync_aggregate=pre.sync_aggregate, @@ -91,10 +92,10 @@ def upgrade_lc_optimistic_update_to_eip6110(pre: capella.LightClientOptimisticUp ### Upgrading the store -Existing `LightClientStore` objects based on Capella MUST be upgraded to eip6110 before eip6110 based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `eip6110_FORK_EPOCH`. +Existing `LightClientStore` objects based on Deneb MUST be upgraded to eip6110 before eip6110 based light client data can be processed. The `LightClientStore` upgrade MAY be performed before `EIP6110_FORK_EPOCH`. ```python -def upgrade_lc_store_to_eip6110(pre: capella.LightClientStore) -> LightClientStore: +def upgrade_lc_store_to_eip6110(pre: deneb.LightClientStore) -> LightClientStore: if pre.best_valid_update is None: best_valid_update = None else: diff --git a/specs/_features/eip6110/light-client/full-node.md b/specs/_features/eip6110/light-client/full-node.md index 0e400dd161..27b7b30637 100644 --- a/specs/_features/eip6110/light-client/full-node.md +++ b/specs/_features/eip6110/light-client/full-node.md @@ -47,7 +47,10 @@ def block_to_light_client_header(block: SignedBeaconBlock) -> LightClientHeader: withdrawals_root=hash_tree_root(payload.withdrawals), ) - # [New in Deneb] + if epoch >= DENEB_FORK_EPOCH: + execution_header.excess_data_gas = payload.excess_data_gas + + # [New in EIP6110] if epoch >= EIP6110_FORK_EPOCH: execution_header.deposit_receipts_root = hash_tree_root(payload.deposit_receipts) diff --git a/specs/_features/eip6110/light-client/p2p-interface.md b/specs/_features/eip6110/light-client/p2p-interface.md index 9b33d59ffd..f55fb2f77e 100644 --- a/specs/_features/eip6110/light-client/p2p-interface.md +++ b/specs/_features/eip6110/light-client/p2p-interface.md @@ -26,7 +26,7 @@ ## Networking -The [Capella light client networking specification](../../capella/light-client/p2p-interface.md) is extended to exchange [EIP-6110 light client data](./sync-protocol.md). +The [Deneb light client networking specification](../../deneb/light-client/p2p-interface.md) is extended to exchange [EIP-6110 light client data](./sync-protocol.md). ### The gossip domain: gossipsub @@ -43,6 +43,7 @@ The [Capella light client networking specification](../../capella/light-client/p | `GENESIS_FORK_VERSION` | n/a | | `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` | | `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` | +| `DENEB_FORK_VERSION` | `deneb.LightClientFinalityUpdate` | | `EIP6110_FORK_VERSION` and later | `eip6110.LightClientFinalityUpdate` | ###### `light_client_optimistic_update` @@ -54,6 +55,7 @@ The [Capella light client networking specification](../../capella/light-client/p | `GENESIS_FORK_VERSION` | n/a | | `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` | | `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` | +| `DENEB_FORK_VERSION` | `deneb.LightClientOptimisticUpdate` | | `EIP6110_FORK_VERSION` and later | `eip6110.LightClientOptimisticUpdate` | ### The Req/Resp domain @@ -69,6 +71,7 @@ The [Capella light client networking specification](../../capella/light-client/p | `GENESIS_FORK_VERSION` | n/a | | `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientBootstrap` | | `CAPELLA_FORK_VERSION` | `capella.LightClientBootstrap` | +| `DENEB_FORK_VERSION` | `deneb.LightClientBootstrap` | | `EIP6110_FORK_VERSION` and later | `eip6110.LightClientBootstrap` | ##### LightClientUpdatesByRange @@ -80,6 +83,7 @@ The [Capella light client networking specification](../../capella/light-client/p | `GENESIS_FORK_VERSION` | n/a | | `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientUpdate` | | `CAPELLA_FORK_VERSION` | `capella.LightClientUpdate` | +| `DENEB_FORK_VERSION` | `deneb.LightClientUpdate` | | `EIP6110_FORK_VERSION` and later | `eip6110.LightClientUpdate` | ##### GetLightClientFinalityUpdate @@ -91,6 +95,7 @@ The [Capella light client networking specification](../../capella/light-client/p | `GENESIS_FORK_VERSION` | n/a | | `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientFinalityUpdate` | | `CAPELLA_FORK_VERSION` | `capella.LightClientFinalityUpdate` | +| `DENEB_FORK_VERSION` | `deneb.LightClientFinalityUpdate` | | `EIP6110_FORK_VERSION` and later | `eip6110.LightClientFinalityUpdate` | ##### GetLightClientOptimisticUpdate @@ -102,4 +107,5 @@ The [Capella light client networking specification](../../capella/light-client/p | `GENESIS_FORK_VERSION` | n/a | | `ALTAIR_FORK_VERSION` through `BELLATRIX_FORK_VERSION` | `altair.LightClientOptimisticUpdate` | | `CAPELLA_FORK_VERSION` | `capella.LightClientOptimisticUpdate` | +| `DENEB_FORK_VERSION` | `deneb.LightClientOptimisticUpdate` | | `EIP6110_FORK_VERSION` and later | `eip6110.LightClientOptimisticUpdate` | diff --git a/specs/_features/eip6110/light-client/sync-protocol.md b/specs/_features/eip6110/light-client/sync-protocol.md index 867aa2730d..406a4b9347 100644 --- a/specs/_features/eip6110/light-client/sync-protocol.md +++ b/specs/_features/eip6110/light-client/sync-protocol.md @@ -18,7 +18,7 @@ ## Introduction -This upgrade updates light client data to include the EIP-6110 changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Capella Light Client specifications](../../capella/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Capella based deployments to EIP-6110. +This upgrade updates light client data to include the EIP-6110 changes to the [`ExecutionPayload`](../beacon-chain.md) structure. It extends the [Deneb Light Client specifications](../../deneb/light-client/sync-protocol.md). The [fork document](./fork.md) explains how to upgrade existing Deneb based deployments to EIP-6110. Additional documents describes the impact of the upgrade on certain roles: - [Full node](./full-node.md) @@ -32,11 +32,9 @@ Additional documents describes the impact of the upgrade on certain roles: def get_lc_execution_root(header: LightClientHeader) -> Root: epoch = compute_epoch_at_slot(header.beacon.slot) - # [New in EIP-6110] - if epoch >= EIP6110_FORK_EPOCH: + if epoch >= DENEB_FORK_EPOCH: return hash_tree_root(header.execution) - # [Modified in EIP-6110] if epoch >= CAPELLA_FORK_EPOCH: execution_header = capella.ExecutionPayloadHeader( parent_hash=header.execution.parent_hash, @@ -68,7 +66,11 @@ def is_valid_light_client_header(header: LightClientHeader) -> bool: # [New in EIP-6110] if epoch < EIP6110_FORK_EPOCH: - if header.execution.withdrawals_root != Root(): + if header.execution.deposit_receipts_root != Root(): + return False + + if epoch < DENEB_FORK_EPOCH: + if header.execution.excess_data_gas != uint256(0): return False if epoch < CAPELLA_FORK_EPOCH: diff --git a/specs/_features/eip6110/validator.md b/specs/_features/eip6110/validator.md index ae9d493a6f..6770ef56af 100644 --- a/specs/_features/eip6110/validator.md +++ b/specs/_features/eip6110/validator.md @@ -20,7 +20,7 @@ This document represents the changes to be made in the code of an "honest valida ## Prerequisites -This document is an extension of the [Capella -- Honest Validator](../../capella/validator.md) guide. +This document is an extension of the [Deneb -- Honest Validator](../../deneb/validator.md) guide. All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [EIP-6110](./beacon-chain.md) are requisite for this document and used throughout. diff --git a/specs/deneb/light-client/fork.md b/specs/deneb/light-client/fork.md index 8c552937a5..46a0930283 100644 --- a/specs/deneb/light-client/fork.md +++ b/specs/deneb/light-client/fork.md @@ -41,6 +41,7 @@ def upgrade_lc_header_to_deneb(pre: capella.LightClientHeader) -> LightClientHea block_hash=pre.execution.block_hash, transactions_root=pre.execution.transactions_root, withdrawals_root=pre.execution.withdrawals_root, + excess_data_gas=uint256(0), # [New in Deneb] ), execution_branch=pre.execution_branch, ) diff --git a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py index 7d89a9788e..6d3f377a33 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py +++ b/tests/core/pyspec/eth2spec/test/deneb/unittests/polynomial_commitments/test_polynomial_commitments.py @@ -4,7 +4,8 @@ spec_test, single_phase, with_deneb_and_later, - expect_assertion_error + expect_assertion_error, + always_bls ) from eth2spec.test.helpers.sharding import ( get_sample_blob, @@ -263,6 +264,7 @@ def test_validate_kzg_g1_neutral_element(spec): @with_deneb_and_later @spec_test @single_phase +@always_bls def test_validate_kzg_g1_not_in_g1(spec): """ Verify that `validate_kzg_g1` fails on point not in G1 @@ -274,6 +276,7 @@ def test_validate_kzg_g1_not_in_g1(spec): @with_deneb_and_later @spec_test @single_phase +@always_bls def test_validate_kzg_g1_not_on_curve(spec): """ Verify that `validate_kzg_g1` fails on point not in G1 diff --git a/tests/core/pyspec/eth2spec/test/helpers/forks.py b/tests/core/pyspec/eth2spec/test/helpers/forks.py index e6320cc9b3..5e97522dbb 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/forks.py +++ b/tests/core/pyspec/eth2spec/test/helpers/forks.py @@ -6,7 +6,7 @@ def is_post_fork(a, b): if a == EIP6110: - return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, EIP6110] + return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110] if a == DENEB: return b in [PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB] if a == CAPELLA: From 3e7e780b7722ce7a33bc47961acd31e2927998d8 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 6 Apr 2023 17:04:49 +0600 Subject: [PATCH 172/210] Apply suggestions from code review Co-authored-by: Hsiao-Wei Wang --- specs/_features/eip6110/light-client/fork.md | 2 +- specs/_features/eip6110/light-client/full-node.md | 4 ++-- specs/_features/eip6110/light-client/sync-protocol.md | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/_features/eip6110/light-client/fork.md b/specs/_features/eip6110/light-client/fork.md index 2aaae3d948..34f0fef8ce 100644 --- a/specs/_features/eip6110/light-client/fork.md +++ b/specs/_features/eip6110/light-client/fork.md @@ -1,4 +1,4 @@ -# eip6110 Light Client -- Fork Logic +# EIP-6110 Light Client -- Fork Logic ## Table of contents diff --git a/specs/_features/eip6110/light-client/full-node.md b/specs/_features/eip6110/light-client/full-node.md index 27b7b30637..03c0f17bd8 100644 --- a/specs/_features/eip6110/light-client/full-node.md +++ b/specs/_features/eip6110/light-client/full-node.md @@ -1,4 +1,4 @@ -# Deneb Light Client -- Full Node +# EIP-6110 Light Client -- Full Node **Notice**: This document is a work-in-progress for researchers and implementers. @@ -17,7 +17,7 @@ ## Introduction -This upgrade adds information about the execution payload to light client data as part of the Deneb upgrade. +This upgrade adds information about the execution payload to light client data as part of the EIP-6110 upgrade. ## Helper functions diff --git a/specs/_features/eip6110/light-client/sync-protocol.md b/specs/_features/eip6110/light-client/sync-protocol.md index 406a4b9347..bcb9d50e43 100644 --- a/specs/_features/eip6110/light-client/sync-protocol.md +++ b/specs/_features/eip6110/light-client/sync-protocol.md @@ -1,4 +1,4 @@ -# Deneb Light Client -- Sync Protocol +# EIP-6110 Light Client -- Sync Protocol **Notice**: This document is a work-in-progress for researchers and implementers. From 389b79408b1ce9a6a075f7338dabc72c8d2a377a Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Thu, 6 Apr 2023 17:39:05 +0600 Subject: [PATCH 173/210] Add EIP6110 operations gen, and to fork upgrades list --- tests/core/pyspec/eth2spec/test/helpers/constants.py | 4 ++-- tests/formats/operations/README.md | 1 + tests/generators/operations/main.py | 6 ++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/constants.py b/tests/core/pyspec/eth2spec/test/helpers/constants.py index 83e7e40dbb..2140c96e45 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/constants.py +++ b/tests/core/pyspec/eth2spec/test/helpers/constants.py @@ -27,13 +27,13 @@ # The forks that output to the test vectors. TESTGEN_FORKS = (PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110) -# TODO: no EIP6110 fork tests now. ALL_FORK_UPGRADES = { # pre_fork_name: post_fork_name PHASE0: ALTAIR, ALTAIR: BELLATRIX, BELLATRIX: CAPELLA, CAPELLA: DENEB, + DENEB: EIP6110, } ALL_PRE_POST_FORKS = ALL_FORK_UPGRADES.items() AFTER_BELLATRIX_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() if key != PHASE0} @@ -42,7 +42,7 @@ if key not in [PHASE0, ALTAIR]} AFTER_CAPELLA_PRE_POST_FORKS = AFTER_CAPELLA_UPGRADES.items() AFTER_DENEB_UPGRADES = {key: value for key, value in ALL_FORK_UPGRADES.items() - if key not in [PHASE0, ALTAIR, BELLATRIX, EIP6110]} + if key not in [PHASE0, ALTAIR, BELLATRIX]} AFTER_DENEB_PRE_POST_FORKS = AFTER_DENEB_UPGRADES.items() # diff --git a/tests/formats/operations/README.md b/tests/formats/operations/README.md index 810d62578e..245ce85653 100644 --- a/tests/formats/operations/README.md +++ b/tests/formats/operations/README.md @@ -45,6 +45,7 @@ Operations: | `execution_payload` | `ExecutionPayload` | `execution_payload` | `process_execution_payload(state, execution_payload)` (new in Bellatrix) | | `withdrawals` | `ExecutionPayload` | `execution_payload` | `process_withdrawals(state, execution_payload)` (new in Capella) | | `bls_to_execution_change` | `SignedBLSToExecutionChange` | `address_change` | `process_bls_to_execution_change(state, address_change)` (new in Capella) | +| `deposit_receipt` | `DepositReceipt` | `deposit_receipt` | `process_deposit_receipt(state, deposit_receipt)` (new in EIP6110) | Note that `block_header` is not strictly an operation (and is a full `Block`), but processed in the same manner, and hence included here. diff --git a/tests/generators/operations/main.py b/tests/generators/operations/main.py index ed4c6c26c8..7b382c838b 100644 --- a/tests/generators/operations/main.py +++ b/tests/generators/operations/main.py @@ -38,6 +38,11 @@ deneb_mods = capella_mods + _new_eip6110_mods = {key: 'eth2spec.test.eip6110.block_processing.test_process_' + key for key in [ + 'deposit_receipt', + ]} + eip6110_mods = combine_mods(_new_eip6110_mods, deneb_mods) + # TODO Custody Game testgen is disabled for now # _new_custody_game_mods = {key: 'eth2spec.test.custody_game.block_processing.test_process_' + key for key in [ # 'attestation', @@ -54,6 +59,7 @@ BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="operations", all_mods=all_mods) From 11842c9e2a1d18227d4c12c6ea5aa0d19fa49656 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 6 Apr 2023 20:04:28 +0800 Subject: [PATCH 174/210] Update test generators. Move `test_deposit_transition` to `sanity/blocks/` --- tests/core/pyspec/eth2spec/test/eip6110/__init__.py | 0 .../eth2spec/test/eip6110/block_processing/__init__.py | 0 .../core/pyspec/eth2spec/test/eip6110/sanity/__init__.py | 0 .../eth2spec/test/eip6110/sanity/blocks/__init__.py | 1 + .../blocks}/test_deposit_transition.py | 0 tests/generators/operations/main.py | 2 +- tests/generators/sanity/main.py | 8 +++++++- 7 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 tests/core/pyspec/eth2spec/test/eip6110/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/eip6110/block_processing/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/eip6110/sanity/__init__.py create mode 100644 tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/__init__.py rename tests/core/pyspec/eth2spec/test/eip6110/{block_processing => sanity/blocks}/test_deposit_transition.py (100%) diff --git a/tests/core/pyspec/eth2spec/test/eip6110/__init__.py b/tests/core/pyspec/eth2spec/test/eip6110/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/eip6110/block_processing/__init__.py b/tests/core/pyspec/eth2spec/test/eip6110/block_processing/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/eip6110/sanity/__init__.py b/tests/core/pyspec/eth2spec/test/eip6110/sanity/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/__init__.py b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/__init__.py new file mode 100644 index 0000000000..3c0e060f3d --- /dev/null +++ b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/__init__.py @@ -0,0 +1 @@ +from .test_deposit_transition import * # noqa: F401 F403 diff --git a/tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py similarity index 100% rename from tests/core/pyspec/eth2spec/test/eip6110/block_processing/test_deposit_transition.py rename to tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py diff --git a/tests/generators/operations/main.py b/tests/generators/operations/main.py index 7b382c838b..fc22179176 100644 --- a/tests/generators/operations/main.py +++ b/tests/generators/operations/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": diff --git a/tests/generators/sanity/main.py b/tests/generators/sanity/main.py index 8a6c7b39cc..b9f6d7fbb1 100644 --- a/tests/generators/sanity/main.py +++ b/tests/generators/sanity/main.py @@ -1,4 +1,4 @@ -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods @@ -28,12 +28,18 @@ ]} deneb_mods = combine_mods(_new_deneb_mods, capella_mods) + _new_eip6110_mods = {key: 'eth2spec.test.eip6110.sanity.' + key for key in [ + 'blocks', + ]} + eip6110_mods = combine_mods(_new_eip6110_mods, deneb_mods) + all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="sanity", all_mods=all_mods) From d78c7ada03bc497ed76ae55fa212a1c9d73726dc Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 6 Apr 2023 20:08:23 +0800 Subject: [PATCH 175/210] Fix previous fork version --- tests/core/pyspec/eth2spec/test/helpers/fork_transition.py | 4 +++- tests/core/pyspec/eth2spec/test/helpers/genesis.py | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py index 20c20b938c..68444c4726 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py +++ b/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py @@ -159,6 +159,8 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate= state = post_spec.upgrade_to_capella(state) elif post_spec.fork == DENEB: state = post_spec.upgrade_to_deneb(state) + elif post_spec.fork == EIP6110: + state = post_spec.upgrade_to_eip6110(state) assert state.fork.epoch == fork_epoch @@ -175,7 +177,7 @@ def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate= assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION assert state.fork.current_version == post_spec.config.DENEB_FORK_VERSION elif post_spec.fork == EIP6110: - assert state.fork.previous_version == post_spec.config.CAPELLA_FORK_VERSION + assert state.fork.previous_version == post_spec.config.DENEB_FORK_VERSION assert state.fork.current_version == post_spec.config.EIP6110_FORK_VERSION if with_block: diff --git a/tests/core/pyspec/eth2spec/test/helpers/genesis.py b/tests/core/pyspec/eth2spec/test/helpers/genesis.py index 32ce8974d9..fea259013b 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/genesis.py +++ b/tests/core/pyspec/eth2spec/test/helpers/genesis.py @@ -84,7 +84,7 @@ def create_genesis_state(spec, validator_balances, activation_threshold): previous_version = spec.config.CAPELLA_FORK_VERSION current_version = spec.config.DENEB_FORK_VERSION elif spec.fork == EIP6110: - previous_version = spec.config.CAPELLA_FORK_VERSION + previous_version = spec.config.DENEB_FORK_VERSION current_version = spec.config.EIP6110_FORK_VERSION state = spec.BeaconState( From c2473e7b8a3ca5b346abf7ea93b298345c557e07 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Fri, 7 Apr 2023 14:20:28 +0600 Subject: [PATCH 176/210] Make transition tests comply to sanity format --- .../test/eip6110/sanity/blocks/test_deposit_transition.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py index 92c3a96b55..2e03f1b0d7 100644 --- a/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py @@ -1,5 +1,6 @@ from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, + sign_block, ) from eth2spec.test.context import ( spec_state_test, @@ -27,7 +28,8 @@ def run_deposit_transition_block(spec, state, block, top_up_keys=[], valid=True) If ``valid == False``, run expecting ``AssertionError`` """ yield 'pre', state - yield 'block', block + signed_block = sign_block(spec, state, block, proposer_index=block.proposer_index) + yield 'blocks', [signed_block] if not valid: expect_assertion_error(lambda: spec.process_block(state, block)) From 41386092b761d61792c2ad0d2c07a76c4511a823 Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Sat, 8 Apr 2023 14:00:01 +1000 Subject: [PATCH 177/210] Apply changes to p2p-interface.md --- specs/phase0/p2p-interface.md | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index f527529316..56c1b8cfb1 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -317,7 +317,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block` - _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation. - _[REJECT]_ The block is from a higher slot than its parent. - _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e. - `get_ancestor(store, block.parent_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) + `get_ancestor_at_epoch_boundary(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root` - _[REJECT]_ The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). @@ -356,7 +356,7 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_ (a client MAY queue aggregates for processing once block is retrieved). - _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation. - _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `aggregate.data.beacon_block_root` -- i.e. - `get_ancestor(store, aggregate.data.beacon_block_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) + `get_ancestor_at_epoch_boundary(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch) == store.finalized_checkpoint.root` @@ -425,9 +425,9 @@ The following validations MUST pass before forwarding the `attestation` on the s (a client MAY queue attestations for processing once block is retrieved). - _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation. - _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e. - `get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(attestation.data.target.epoch)) == attestation.data.target.root` + `get_ancestor_at_epoch_boundary(store, attestation.data.beacon_block_root, attestation.data.target.epoch) == attestation.data.target.root` - _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `attestation.data.beacon_block_root` -- i.e. - `get_ancestor(store, attestation.data.beacon_block_root, compute_start_slot_at_epoch(store.finalized_checkpoint.epoch)) + `get_ancestor_at_epoch_boundary(store, attestation.data.beacon_block_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root` From e49a30f85ba7eef5ec22f6c17a260c6731f5c839 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 10 Apr 2023 13:47:21 +0600 Subject: [PATCH 178/210] Fix deposit transition tests --- .../sanity/blocks/test_deposit_transition.py | 59 ++++++++----------- 1 file changed, 25 insertions(+), 34 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py index 2e03f1b0d7..1477a04fb6 100644 --- a/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py @@ -17,6 +17,9 @@ compute_el_block_hash, ) from eth2spec.test.helpers.keys import privkeys, pubkeys +from eth2spec.test.helpers.state import ( + state_transition_and_sign_block +) def run_deposit_transition_block(spec, state, block, top_up_keys=[], valid=True): @@ -28,24 +31,20 @@ def run_deposit_transition_block(spec, state, block, top_up_keys=[], valid=True) If ``valid == False``, run expecting ``AssertionError`` """ yield 'pre', state - signed_block = sign_block(spec, state, block, proposer_index=block.proposer_index) - yield 'blocks', [signed_block] - if not valid: - expect_assertion_error(lambda: spec.process_block(state, block)) - yield 'post', None - return + signed_block = state_transition_and_sign_block(spec, state, block, not valid) - spec.process_block(state, block) - yield 'post', state + yield 'blocks', [signed_block] + yield 'post', state if valid else None # Check that deposits are applied - expected_pubkeys = [d.data.pubkey for d in block.body.deposits] - deposit_receipts = block.body.execution_payload.deposit_receipts - expected_pubkeys = expected_pubkeys + [d.pubkey for d in deposit_receipts if (d.pubkey not in top_up_keys)] - actual_pubkeys = [v.pubkey for v in state.validators[len(state.validators) - len(expected_pubkeys):]] + if valid: + expected_pubkeys = [d.data.pubkey for d in block.body.deposits] + deposit_receipts = block.body.execution_payload.deposit_receipts + expected_pubkeys = expected_pubkeys + [d.pubkey for d in deposit_receipts if (d.pubkey not in top_up_keys)] + actual_pubkeys = [v.pubkey for v in state.validators[len(state.validators) - len(expected_pubkeys):]] - assert actual_pubkeys == expected_pubkeys + assert actual_pubkeys == expected_pubkeys def prepare_state_and_block(spec, @@ -53,7 +52,8 @@ def prepare_state_and_block(spec, deposit_cnt, deposit_receipt_cnt, first_deposit_receipt_index=0, - deposit_receipts_start_index=None): + deposit_receipts_start_index=None, + eth1_data_deposit_count=None): deposits = [] deposit_receipts = [] keypair_index = len(state.validators) @@ -79,8 +79,10 @@ def prepare_state_and_block(spec, if deposit_root: state.eth1_deposit_index = 0 + if not eth1_data_deposit_count: + eth1_data_deposit_count = deposit_cnt state.eth1_data = spec.Eth1Data(deposit_root=deposit_root, - deposit_count=deposit_cnt, + deposit_count=eth1_data_deposit_count, block_hash=state.eth1_data.block_hash) # Prepare deposit receipts @@ -105,9 +107,6 @@ def prepare_state_and_block(spec, block.body.execution_payload.deposit_receipts = deposit_receipts block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - # Advance a slot - spec.process_slots(state, block.slot) - return state, block @@ -148,10 +147,8 @@ def test_deposit_transition__process_max_eth1_deposits(spec, state): deposit_cnt=spec.MAX_DEPOSITS, deposit_receipt_cnt=1, first_deposit_receipt_index=spec.MAX_DEPOSITS + 1, - deposit_receipts_start_index=spec.MAX_DEPOSITS) - state.eth1_data = spec.Eth1Data(deposit_root=state.eth1_data.deposit_root, - deposit_count=23, - block_hash=state.eth1_data.block_hash) + deposit_receipts_start_index=spec.MAX_DEPOSITS, + eth1_data_deposit_count=23) yield from run_deposit_transition_block(spec, state, block) @@ -177,10 +174,8 @@ def test_deposit_transition__invalid_not_enough_eth1_deposits(spec, state): deposit_cnt=3, deposit_receipt_cnt=1, first_deposit_receipt_index=29, - deposit_receipts_start_index=23) - state.eth1_data = spec.Eth1Data(deposit_root=state.eth1_data.deposit_root, - deposit_count=17, - block_hash=state.eth1_data.block_hash) + deposit_receipts_start_index=23, + eth1_data_deposit_count=17) yield from run_deposit_transition_block(spec, state, block, valid=False) @@ -193,10 +188,8 @@ def test_deposit_transition__invalid_too_many_eth1_deposits(spec, state): deposit_cnt=3, deposit_receipt_cnt=1, first_deposit_receipt_index=11, - deposit_receipts_start_index=7) - state.eth1_data = spec.Eth1Data(deposit_root=state.eth1_data.deposit_root, - deposit_count=2, - block_hash=state.eth1_data.block_hash) + deposit_receipts_start_index=7, + eth1_data_deposit_count=2) yield from run_deposit_transition_block(spec, state, block, valid=False) @@ -210,10 +203,8 @@ def test_deposit_transition__invalid_eth1_deposits_overlap_in_protocol_deposits( deposit_cnt=spec.MAX_DEPOSITS, deposit_receipt_cnt=1, first_deposit_receipt_index=spec.MAX_DEPOSITS, - deposit_receipts_start_index=spec.MAX_DEPOSITS - 1) - state.eth1_data = spec.Eth1Data(deposit_root=state.eth1_data.deposit_root, - deposit_count=23, - block_hash=state.eth1_data.block_hash) + deposit_receipts_start_index=spec.MAX_DEPOSITS - 1, + eth1_data_deposit_count=23) yield from run_deposit_transition_block(spec, state, block, valid=False) From 1505a04a94791cffdcc25e3171ac2ee306ae2ff9 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Mon, 10 Apr 2023 16:08:13 +0600 Subject: [PATCH 179/210] Make linter happy --- .../test/eip6110/sanity/blocks/test_deposit_transition.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py index 1477a04fb6..51ef109605 100644 --- a/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py +++ b/tests/core/pyspec/eth2spec/test/eip6110/sanity/blocks/test_deposit_transition.py @@ -1,12 +1,10 @@ from eth2spec.test.helpers.block import ( build_empty_block_for_next_slot, - sign_block, ) from eth2spec.test.context import ( spec_state_test, with_phases, EIP6110, - expect_assertion_error, ) from eth2spec.test.helpers.deposits import ( build_deposit_data, From 1c5b9fddaf5069e642a16180aac73364e289754a Mon Sep 17 00:00:00 2001 From: Alex Stokes Date: Tue, 11 Apr 2023 13:34:42 +0900 Subject: [PATCH 180/210] add `EIP-4788` feature --- specs/_features/eip4788/beacon-chain.md | 72 +++++++++++++++++ specs/_features/eip4788/validator.md | 103 ++++++++++++++++++++++++ 2 files changed, 175 insertions(+) create mode 100644 specs/_features/eip4788/beacon-chain.md create mode 100644 specs/_features/eip4788/validator.md diff --git a/specs/_features/eip4788/beacon-chain.md b/specs/_features/eip4788/beacon-chain.md new file mode 100644 index 0000000000..6cd876de99 --- /dev/null +++ b/specs/_features/eip4788/beacon-chain.md @@ -0,0 +1,72 @@ +# EIP-4788 -- The Beacon Chain + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Containers](#containers) + - [Extended Containers](#extended-containers) + - [`ExecutionPayload`](#executionpayload) + - [`ExecutionPayloadHeader`](#executionpayloadheader) + + + + +## Introduction + +TODO + +## Containers + +### Extended Containers + +#### `ExecutionPayload` + +```python +class ExecutionPayload(Container): + # Execution block header fields + parent_hash: Hash32 + fee_recipient: ExecutionAddress # 'beneficiary' in the yellow paper + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 # 'difficulty' in the yellow paper + block_number: uint64 # 'number' in the yellow paper + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + # Extra payload fields + block_hash: Hash32 # Hash of execution block + transactions: List[Transaction, MAX_TRANSACTIONS_PER_PAYLOAD] + withdrawals: List[Withdrawal, MAX_WITHDRAWALS_PER_PAYLOAD] + parent_beacon_block_root: Root # [New in EIP-4788] +``` + +#### `ExecutionPayloadHeader` + +```python +class ExecutionPayloadHeader(Container): + # Execution block header fields + parent_hash: Hash32 + fee_recipient: ExecutionAddress + state_root: Bytes32 + receipts_root: Bytes32 + logs_bloom: ByteVector[BYTES_PER_LOGS_BLOOM] + prev_randao: Bytes32 + block_number: uint64 + gas_limit: uint64 + gas_used: uint64 + timestamp: uint64 + extra_data: ByteList[MAX_EXTRA_DATA_BYTES] + base_fee_per_gas: uint256 + # Extra payload fields + block_hash: Hash32 # Hash of execution block + transactions_root: Root + withdrawals_root: Root + parent_beacon_block_root: Root # [New in EIP-4788] +``` diff --git a/specs/_features/eip4788/validator.md b/specs/_features/eip4788/validator.md new file mode 100644 index 0000000000..421e297ce9 --- /dev/null +++ b/specs/_features/eip4788/validator.md @@ -0,0 +1,103 @@ +# EIP-4788 -- Honest Validator + +**Notice**: This document is a work-in-progress for researchers and implementers. + +## Table of contents + + + + + +- [Introduction](#introduction) +- [Prerequisites](#prerequisites) +- [Helpers](#helpers) +- [Protocols](#protocols) + - [`ExecutionEngine`](#executionengine) + - [`get_payload`](#get_payload) +- [Beacon chain responsibilities](#beacon-chain-responsibilities) + - [Block proposal](#block-proposal) + - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) + - [ExecutionPayload](#executionpayload) + + + + +## Introduction + +This document represents the changes to be made in the code of an "honest validator" to implement the EIP-4788 feature. + +## Prerequisites + +This document is an extension of the [Capella -- Honest Validator](../capella/validator.md) guide. +All behaviors and definitions defined in this document, and documents it extends, carry over unless explicitly noted or overridden. + +All terminology, constants, functions, and protocol mechanics defined in the updated Beacon Chain doc of [Capella](../capella/beacon-chain.md) are requisite for this document and used throughout. +Please see related Beacon Chain doc before continuing and use them as a reference throughout. + +## Helpers + +## Protocols + +### `ExecutionEngine` + +#### `get_payload` + +`get_payload` returns the upgraded EIP-4788 `ExecutionPayload` type. + +## Beacon chain responsibilities + +All validator responsibilities remain unchanged other than those noted below. + +### Block proposal + +#### Constructing the `BeaconBlockBody` + +##### ExecutionPayload + +`ExecutionPayload`s are constructed as they were in Capella, except that the parent beacon block root is also supplied. + +*Note*: In this section, `state` is the state of the slot for the block proposal _without_ the block yet applied. +That is, `state` is the `previous_state` processed through any empty slots up to the assigned slot using `process_slots(previous_state, slot)`. + +*Note*: The only change made to `prepare_execution_payload` is to add the parent beacon block root as an additional +parameter to the `PayloadAttributes`. + +```python +def prepare_execution_payload(state: BeaconState, + pow_chain: Dict[Hash32, PowBlock], + safe_block_hash: Hash32, + finalized_block_hash: Hash32, + suggested_fee_recipient: ExecutionAddress, + execution_engine: ExecutionEngine) -> Optional[PayloadId]: + if not is_merge_transition_complete(state): + is_terminal_block_hash_set = TERMINAL_BLOCK_HASH != Hash32() + is_activation_epoch_reached = get_current_epoch(state) >= TERMINAL_BLOCK_HASH_ACTIVATION_EPOCH + if is_terminal_block_hash_set and not is_activation_epoch_reached: + # Terminal block hash is set but activation epoch is not yet reached, no prepare payload call is needed + return None + + terminal_pow_block = get_terminal_pow_block(pow_chain) + if terminal_pow_block is None: + # Pre-merge, no prepare payload call is needed + return None + # Signify merge via producing on top of the terminal PoW block + parent_hash = terminal_pow_block.block_hash + else: + # Post-merge, normal payload + parent_hash = state.latest_execution_payload_header.block_hash + + # Set the forkchoice head and initiate the payload build process + payload_attributes = PayloadAttributes( + timestamp=compute_timestamp_at_slot(state, state.slot), + prev_randao=get_randao_mix(state, get_current_epoch(state)), + suggested_fee_recipient=suggested_fee_recipient, + withdrawals=get_expected_withdrawals(state), + parent_beacon_block_root=hash_tree_root(state.latest_block_header), # [New in EIP-4788] + ) + return execution_engine.notify_forkchoice_updated( + head_block_hash=parent_hash, + safe_block_hash=safe_block_hash, + finalized_block_hash=finalized_block_hash, + payload_attributes=payload_attributes, + ) +``` From 09e5fc7ebe29608fedad2828dd57cb82d89f1a41 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 11 Apr 2023 13:42:16 +0600 Subject: [PATCH 181/210] Add eip6110 to generators --- tests/generators/epoch_processing/main.py | 3 +++ tests/generators/finality/main.py | 2 ++ tests/generators/fork_choice/main.py | 4 +++- tests/generators/genesis/main.py | 2 ++ tests/generators/light_client/main.py | 2 ++ tests/generators/rewards/main.py | 2 ++ tests/generators/sync/main.py | 2 ++ 7 files changed, 16 insertions(+), 1 deletion(-) diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py index a485f646aa..26fe6d72d4 100644 --- a/tests/generators/epoch_processing/main.py +++ b/tests/generators/epoch_processing/main.py @@ -34,6 +34,8 @@ deneb_mods = capella_mods + eip6110_mods = deneb_mods + # TODO Custody Game testgen is disabled for now # custody_game_mods = {**{key: 'eth2spec.test.custody_game.epoch_processing.test_process_' + key for key in [ # 'reveal_deadlines', @@ -47,6 +49,7 @@ BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="epoch_processing", all_mods=all_mods) diff --git a/tests/generators/finality/main.py b/tests/generators/finality/main.py index a25f3b8e7a..601afdd053 100644 --- a/tests/generators/finality/main.py +++ b/tests/generators/finality/main.py @@ -8,6 +8,7 @@ bellatrix_mods = altair_mods # No additional Bellatrix specific finality tests capella_mods = bellatrix_mods # No additional Capella specific finality tests deneb_mods = capella_mods # No additional Deneb specific finality tests + eip6110_mods = deneb_mods # No additional EIP6110 specific finality tests all_mods = { PHASE0: phase_0_mods, @@ -15,6 +16,7 @@ BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="finality", all_mods=all_mods) diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 4456c2546b..49b4f83cef 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -19,13 +19,15 @@ ]} bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) capella_mods = bellatrix_mods # No additional Capella specific fork choice tests - deneb_mods = capella_mods # No additional Capella specific fork choice tests + deneb_mods = capella_mods # No additional Deneb specific fork choice tests + eip6110_mods = deneb_mods # No additional EIP6110 specific fork choice tests all_mods = { ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="fork_choice", all_mods=all_mods) diff --git a/tests/generators/genesis/main.py b/tests/generators/genesis/main.py index e95afcde19..6db5ccf722 100644 --- a/tests/generators/genesis/main.py +++ b/tests/generators/genesis/main.py @@ -17,12 +17,14 @@ bellatrix_mods = combine_mods(_new_bellatrix_mods, altair_mods) capella_mods = bellatrix_mods # No additional Capella specific genesis tests deneb_mods = capella_mods # No additional Deneb specific genesis tests + eip6110_mods = deneb_mods # No additional EIP6110 specific genesis tests all_mods = { PHASE0: phase_0_mods, ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="genesis", all_mods=all_mods) diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index cfe34aee4b..2f9451d1ed 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -15,12 +15,14 @@ ]} capella_mods = combine_mods(_new_capella_mods, bellatrix_mods) deneb_mods = capella_mods + eip6110_mods = deneb_mods all_mods = { ALTAIR: altair_mods, BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="light_client", all_mods=all_mods) diff --git a/tests/generators/rewards/main.py b/tests/generators/rewards/main.py index e6244d1720..5c84d9da03 100644 --- a/tests/generators/rewards/main.py +++ b/tests/generators/rewards/main.py @@ -17,6 +17,7 @@ bellatrix_mods = altair_mods capella_mods = bellatrix_mods deneb_mods = capella_mods + eip6110_mods = deneb_mods all_mods = { PHASE0: phase_0_mods, @@ -24,6 +25,7 @@ BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="rewards", all_mods=all_mods) diff --git a/tests/generators/sync/main.py b/tests/generators/sync/main.py index 11f05a741f..68e38cebe6 100644 --- a/tests/generators/sync/main.py +++ b/tests/generators/sync/main.py @@ -8,11 +8,13 @@ ]} capella_mods = bellatrix_mods deneb_mods = capella_mods + eip6110_mods = deneb_mods all_mods = { BELLATRIX: bellatrix_mods, CAPELLA: capella_mods, DENEB: deneb_mods, + EIP6110: eip6110_mods, } run_state_test_generators(runner_name="sync", all_mods=all_mods) From 0230c643b0ee5f7eb37cc974d419a66df98b85e8 Mon Sep 17 00:00:00 2001 From: Mikhail Kalinin Date: Tue, 11 Apr 2023 21:29:30 +0600 Subject: [PATCH 182/210] Fix EIP6110 import in generators --- tests/generators/epoch_processing/main.py | 2 +- tests/generators/finality/main.py | 2 +- tests/generators/fork_choice/main.py | 2 +- tests/generators/genesis/main.py | 2 +- tests/generators/light_client/main.py | 2 +- tests/generators/rewards/main.py | 2 +- 6 files changed, 6 insertions(+), 6 deletions(-) diff --git a/tests/generators/epoch_processing/main.py b/tests/generators/epoch_processing/main.py index 26fe6d72d4..645c84cb6b 100644 --- a/tests/generators/epoch_processing/main.py +++ b/tests/generators/epoch_processing/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": diff --git a/tests/generators/finality/main.py b/tests/generators/finality/main.py index 601afdd053..15c6cad8dd 100644 --- a/tests/generators/finality/main.py +++ b/tests/generators/finality/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": diff --git a/tests/generators/fork_choice/main.py b/tests/generators/fork_choice/main.py index 49b4f83cef..b0c9a9bb9d 100644 --- a/tests/generators/fork_choice/main.py +++ b/tests/generators/fork_choice/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": diff --git a/tests/generators/genesis/main.py b/tests/generators/genesis/main.py index 6db5ccf722..feffde8e38 100644 --- a/tests/generators/genesis/main.py +++ b/tests/generators/genesis/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators, combine_mods -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": diff --git a/tests/generators/light_client/main.py b/tests/generators/light_client/main.py index 2f9451d1ed..c6b0e01b9b 100644 --- a/tests/generators/light_client/main.py +++ b/tests/generators/light_client/main.py @@ -1,4 +1,4 @@ -from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 from eth2spec.gen_helpers.gen_from_tests.gen import combine_mods, run_state_test_generators diff --git a/tests/generators/rewards/main.py b/tests/generators/rewards/main.py index 5c84d9da03..d01d4a424e 100644 --- a/tests/generators/rewards/main.py +++ b/tests/generators/rewards/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators -from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import PHASE0, ALTAIR, BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": From 6e423f6c4275608515158b5c483c351f0eb61b19 Mon Sep 17 00:00:00 2001 From: Age Manning Date: Wed, 12 Apr 2023 11:29:48 +1000 Subject: [PATCH 183/210] Stagger node rotations --- specs/phase0/validator.md | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 1b06aecfbb..56ca50732c 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -612,23 +612,22 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th * Remain subscribed to `SUBNETS_PER_NODE` for `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs. * Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets. -* Select these subnets based on their node-id as specified by the following `compute_subnets(node_id,epoch)` function. +* Select these subnets based on their node-id as specified by the following `compute_subscribed_subnets(node_id,epoch)` function. ```python -def compute_subnet(node_id: int, epoch: Epoch, index: int) -> int: +def compute_subscribed_subnet(node_id: int, epoch: Epoch, index: int) -> int: node_id_prefix = node_id >> (256 - ATTESTATION_SUBNET_PREFIX_BITS) - permutation_seed = hash(uint_to_bytes(epoch // EPOCHS_PER_SUBNET_SUBSCRIPTION)) + node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION + permutation_seed = hash(uint_to_bytes((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION)) permutated_prefix = compute_shuffled_index(node_id_prefix, 1 << ATTESTATION_SUBNET_PREFIX_BITS, permutation_seed) return (permutated_prefix + index) % ATTESTATION_SUBNET_COUNT ``` ```python -def compute_subnets(node_id: int, epoch: Epoch) -> Sequence[int]: - return [compute_subnet(node_id, epoch, idx) for idx in range(SUBNETS_PER_NODE)] +def compute_subscribed_subnets(node_id: int, epoch: Epoch) -> Sequence[int]: + return [compute_subscribed_subnet(node_id, epoch, idx) for idx in range(SUBNETS_PER_NODE)] ``` -*Note*: Nodes should subscribe to new subnets and remain subscribed to old subnets for at least one epoch. Nodes should pick a random duration to unsubscribe from old subnets to smooth the transition on the exact epoch boundary of which the shuffling changes. - *Note*: When preparing for a hard fork, a validator must select and subscribe to subnets of the future fork versioning at least `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements. ## How to avoid slashing From 334114d9d373d3a71ab49720ba4831d0b1fce6dd Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Tue, 18 Apr 2023 13:14:53 +1000 Subject: [PATCH 184/210] Rename get_ancestor_at_epoch_boundary to get_checkpoint_block --- specs/bellatrix/fork-choice.md | 2 +- specs/deneb/fork-choice.md | 2 +- specs/phase0/fork-choice.md | 12 ++++++------ specs/phase0/p2p-interface.md | 8 ++++---- 4 files changed, 12 insertions(+), 12 deletions(-) diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index d22436c9d8..6c7a31508b 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -170,7 +170,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary( + assert store.finalized_checkpoint.root == get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index e76e159c4f..8a33fecc56 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -82,7 +82,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary( + assert store.finalized_checkpoint.root == get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 478dd21427..a2bbb8f629 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -18,7 +18,7 @@ - [`get_current_slot`](#get_current_slot) - [`compute_slots_since_epoch_start`](#compute_slots_since_epoch_start) - [`get_ancestor`](#get_ancestor) - - [`get_ancestor_at_epoch_boundary`](#get_ancestor_at_epoch_boundary) + - [`get_checkpoint_block`](#get_checkpoint_block) - [`get_weight`](#get_weight) - [`get_voting_source`](#get_voting_source) - [`filter_block_tree`](#filter_block_tree) @@ -193,10 +193,10 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: return root ``` -#### `get_ancestor_at_epoch_boundary` +#### `get_checkpoint_block` ```python -def get_ancestor_at_epoch_boundary(store: Store, root: Root, epoch: Epoch) -> Root: +def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root: """ Compute the epoch boundary block for epoch ``epoch`` in the chain of block ``root`` """ @@ -292,7 +292,7 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB correct_finalized = ( store.finalized_checkpoint.epoch == GENESIS_EPOCH - or store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary( + or store.finalized_checkpoint.root == get_checkpoint_block( store, block_root, store.finalized_checkpoint.epoch, @@ -457,7 +457,7 @@ def validate_on_attestation(store: Store, attestation: Attestation, is_from_bloc assert store.blocks[attestation.data.beacon_block_root].slot <= attestation.data.slot # LMD vote must be consistent with FFG vote target - assert target.root == get_ancestor_at_epoch_boundary(store, attestation.data.beacon_block_root, target.epoch) + assert target.root == get_checkpoint_block(store, attestation.data.beacon_block_root, target.epoch) # Attestations can only affect the fork choice of subsequent slots. # Delay consideration in the fork choice until their slot is in the past. @@ -520,7 +520,7 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert store.finalized_checkpoint.root == get_ancestor_at_epoch_boundary( + assert store.finalized_checkpoint.root == get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index 56c1b8cfb1..5401a15da5 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -317,7 +317,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block` - _[REJECT]_ The block's parent (defined by `block.parent_root`) passes validation. - _[REJECT]_ The block is from a higher slot than its parent. - _[REJECT]_ The current `finalized_checkpoint` is an ancestor of `block` -- i.e. - `get_ancestor_at_epoch_boundary(store, block.parent_root, store.finalized_checkpoint.epoch) + `get_checkpoint_block(store, block.parent_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root` - _[REJECT]_ The block is proposed by the expected `proposer_index` for the block's slot in the context of the current shuffling (defined by `parent_root`/`slot`). @@ -356,7 +356,7 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_ (a client MAY queue aggregates for processing once block is retrieved). - _[REJECT]_ The block being voted for (`aggregate.data.beacon_block_root`) passes validation. - _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `aggregate.data.beacon_block_root` -- i.e. - `get_ancestor_at_epoch_boundary(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch) + `get_checkpoint_block(store, aggregate.data.beacon_block_root, finalized_checkpoint.epoch) == store.finalized_checkpoint.root` @@ -425,9 +425,9 @@ The following validations MUST pass before forwarding the `attestation` on the s (a client MAY queue attestations for processing once block is retrieved). - _[REJECT]_ The block being voted for (`attestation.data.beacon_block_root`) passes validation. - _[REJECT]_ The attestation's target block is an ancestor of the block named in the LMD vote -- i.e. - `get_ancestor_at_epoch_boundary(store, attestation.data.beacon_block_root, attestation.data.target.epoch) == attestation.data.target.root` + `get_checkpoint_block(store, attestation.data.beacon_block_root, attestation.data.target.epoch) == attestation.data.target.root` - _[IGNORE]_ The current `finalized_checkpoint` is an ancestor of the `block` defined by `attestation.data.beacon_block_root` -- i.e. - `get_ancestor_at_epoch_boundary(store, attestation.data.beacon_block_root, store.finalized_checkpoint.epoch) + `get_checkpoint_block(store, attestation.data.beacon_block_root, store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root` From 36fcb81b88c87f279cd4b46d094eb58514e9be8b Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Tue, 18 Apr 2023 13:26:16 +1000 Subject: [PATCH 185/210] Break long statement into two statements --- specs/bellatrix/fork-choice.md | 3 ++- specs/deneb/fork-choice.md | 3 ++- specs/phase0/fork-choice.md | 15 +++++++++------ 3 files changed, 13 insertions(+), 8 deletions(-) diff --git a/specs/bellatrix/fork-choice.md b/specs/bellatrix/fork-choice.md index 6c7a31508b..68519ff908 100644 --- a/specs/bellatrix/fork-choice.md +++ b/specs/bellatrix/fork-choice.md @@ -170,11 +170,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert store.finalized_checkpoint.root == get_checkpoint_block( + finalized_checkpoint_block = get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block # Check the block is valid and compute the post-state state = pre_state.copy() diff --git a/specs/deneb/fork-choice.md b/specs/deneb/fork-choice.md index 8a33fecc56..9faa11077f 100644 --- a/specs/deneb/fork-choice.md +++ b/specs/deneb/fork-choice.md @@ -82,11 +82,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert store.finalized_checkpoint.root == get_checkpoint_block( + finalized_checkpoint_block = get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block # [New in Deneb] # Check if blob data is available diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index a2bbb8f629..8582547fdb 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -290,13 +290,15 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB voting_source.epoch + 2 >= current_epoch ) + finalized_checkpoint_block = get_checkpoint_block( + store, + block.parent_root, + store.finalized_checkpoint.epoch, + ) + correct_finalized = ( store.finalized_checkpoint.epoch == GENESIS_EPOCH - or store.finalized_checkpoint.root == get_checkpoint_block( - store, - block_root, - store.finalized_checkpoint.epoch, - ) + or store.finalized_checkpoint.root == finalized_checkpoint_block ) # If expected finalized/justified, add to viable block-tree and signal viability to parent. @@ -520,11 +522,12 @@ def on_block(store: Store, signed_block: SignedBeaconBlock) -> None: finalized_slot = compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) assert block.slot > finalized_slot # Check block is a descendant of the finalized block at the checkpoint finalized slot - assert store.finalized_checkpoint.root == get_checkpoint_block( + finalized_checkpoint_block = get_checkpoint_block( store, block.parent_root, store.finalized_checkpoint.epoch, ) + assert store.finalized_checkpoint.root == finalized_checkpoint_block # Check the block is valid and compute the post-state state = pre_state.copy() From c98560597351529f6f782fd434a2937d0f6c296e Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Tue, 18 Apr 2023 13:49:08 +1000 Subject: [PATCH 186/210] Fix copy and past error --- specs/phase0/fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 8582547fdb..0d5bfb4d79 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -292,7 +292,7 @@ def filter_block_tree(store: Store, block_root: Root, blocks: Dict[Root, BeaconB finalized_checkpoint_block = get_checkpoint_block( store, - block.parent_root, + block_root, store.finalized_checkpoint.epoch, ) From b5bd90dd5f6028d59de1fb5c97c5da95ac53b3aa Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Tue, 18 Apr 2023 13:51:13 +1000 Subject: [PATCH 187/210] Applied changes to tests --- .../test/phase0/fork_choice/test_get_head.py | 22 +++++++---- .../test/phase0/fork_choice/test_on_block.py | 38 +++++++++++++------ 2 files changed, 40 insertions(+), 20 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index f5960ff703..f5c3aae15d 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -479,7 +479,7 @@ def test_voting_source_within_two_epoch(spec, state): - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and - store.voting_source[block_root].epoch + 2 >= current_epoch, and - - store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) + - store.finalized_checkpoint.root == get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch) """ test_steps = [] # Initialization @@ -536,8 +536,11 @@ def test_voting_source_within_two_epoch(spec, state): assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch # assert store.voting_source[last_fork_block_root].epoch + 2 >= \ # spec.compute_epoch_at_slot(spec.get_current_slot(store)) - finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert store.finalized_checkpoint.root == spec.get_checkpoint_block( + store, + last_fork_block_root, + store.finalized_checkpoint.epoch + ) assert spec.get_head(store) == last_fork_block_root yield 'steps', test_steps @@ -552,7 +555,7 @@ def test_voting_source_beyond_two_epoch(spec, state): - store.voting_source[block_root].epoch != store.justified_checkpoint.epoch, and - store.unrealized_justifications[block_root].epoch >= store.justified_checkpoint.epoch, and - store.voting_source[block_root].epoch + 2 < current_epoch, and - - store.finalized_checkpoint.root == get_ancestor(store, block_root, finalized_slot) + - store.finalized_checkpoint.root == get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch) """ test_steps = [] # Initialization @@ -617,8 +620,11 @@ def test_voting_source_beyond_two_epoch(spec, state): assert store.unrealized_justifications[last_fork_block_root].epoch >= store.justified_checkpoint.epoch # assert store.voting_source[last_fork_block_root].epoch + 2 < \ # spec.compute_epoch_at_slot(spec.get_current_slot(store)) - finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - assert store.finalized_checkpoint.root == spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert store.finalized_checkpoint.root == spec.get_checkpoint_block( + store, + last_fork_block_root, + store.finalized_checkpoint.epoch + ) assert spec.get_head(store) == correct_head yield 'steps', test_steps @@ -641,7 +647,7 @@ def test_incorrect_finalized(spec, state): # Check that the store doesn't allow for a head block that has: # - store.voting_source[block_root].epoch == store.justified_checkpoint.epoch, and # - store.finalized_checkpoint.epoch != GENESIS_EPOCH, and - # - store.finalized_checkpoint.root != get_ancestor(store, block_root, finalized_slot) + # - store.finalized_checkpoint.root != get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch) test_steps = [] # Initialization store, anchor_block = get_genesis_forkchoice_store_and_block(spec, state) @@ -718,7 +724,7 @@ def test_incorrect_finalized(spec, state): assert store.voting_source[last_fork_block_root].epoch == store.justified_checkpoint.epoch assert store.finalized_checkpoint.epoch != spec.GENESIS_EPOCH finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - assert store.finalized_checkpoint.root != spec.get_ancestor(store, last_fork_block_root, finalized_slot) + assert store.finalized_checkpoint.root != spec.get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch) assert spec.get_head(store) != last_fork_block_root assert spec.get_head(store) == head_root diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index 0af7753391..a3f09c7c96 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -352,8 +352,7 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): # NOTE: Do not call `on_tick` here yield from add_block(spec, store, block, test_steps) - finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot) + ancestor_at_finalized_slot = spec.get_checkpoint_block(store, pre_store_justified_checkpoint_root, store.finalized_checkpoint.epoch) assert ancestor_at_finalized_slot != store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint @@ -428,8 +427,7 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): for block in all_blocks: yield from tick_and_add_block(spec, store, block, test_steps) - finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - ancestor_at_finalized_slot = spec.get_ancestor(store, pre_store_justified_checkpoint_root, finalized_slot) + ancestor_at_finalized_slot = spec.get_checkpoint_block(store, pre_store_justified_checkpoint_root, store.finalized_checkpoint.epoch) assert ancestor_at_finalized_slot == store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint @@ -857,10 +855,18 @@ def test_incompatible_justification_update_start_of_epoch(spec, state): # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) - finalized_slot = spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) - assert spec.get_ancestor(store, last_block_root, finalized_slot) == state.finalized_checkpoint.root - justified_slot = spec.compute_start_slot_at_epoch(state.current_justified_checkpoint.epoch) - assert spec.get_ancestor(store, last_block_root, justified_slot) != state.current_justified_checkpoint.root + finalized_checkpoint_block = spec.get_checkpoint_block( + store, + last_block_root, + state.finalized_checkpoint.epoch, + ) + assert finalized_checkpoint_block == state.finalized_checkpoint.root + justified_checkpoint_block = spec.get_checkpoint_block( + store, + last_block_root, + state.current_justified_checkpoint.epoch, + ) + assert justified_checkpoint_block != state.current_justified_checkpoint.root assert store.finalized_checkpoint.epoch == 4 assert store.justified_checkpoint.epoch == 6 @@ -934,10 +940,18 @@ def test_incompatible_justification_update_end_of_epoch(spec, state): # Now add the blocks & check that justification update was triggered for signed_block in signed_blocks: yield from tick_and_add_block(spec, store, signed_block, test_steps) - finalized_slot = spec.compute_start_slot_at_epoch(state.finalized_checkpoint.epoch) - assert spec.get_ancestor(store, last_block_root, finalized_slot) == state.finalized_checkpoint.root - justified_slot = spec.compute_start_slot_at_epoch(state.current_justified_checkpoint.epoch) - assert spec.get_ancestor(store, last_block_root, justified_slot) != state.current_justified_checkpoint.root + finalized_checkpoint_block = spec.get_checkpoint_block( + store, + last_block_root, + state.finalized_checkpoint.epoch, + ) + assert finalized_checkpoint_block == state.finalized_checkpoint.root + justified_checkpoint_block = spec.get_checkpoint_block( + store, + last_block_root, + state.current_justified_checkpoint.epoch, + ) + assert justified_checkpoint_block != state.current_justified_checkpoint.root assert store.finalized_checkpoint.epoch == 4 assert store.justified_checkpoint.epoch == 6 From 313439a04b121a1c53585396dbea6df02026404f Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Tue, 18 Apr 2023 13:54:31 +1000 Subject: [PATCH 188/210] Fix lint erorrs --- .../test/phase0/fork_choice/test_get_head.py | 6 +++++- .../test/phase0/fork_choice/test_on_block.py | 12 ++++++++++-- 2 files changed, 15 insertions(+), 3 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py index f5c3aae15d..30f94b854c 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_get_head.py @@ -724,7 +724,11 @@ def test_incorrect_finalized(spec, state): assert store.voting_source[last_fork_block_root].epoch == store.justified_checkpoint.epoch assert store.finalized_checkpoint.epoch != spec.GENESIS_EPOCH finalized_slot = spec.compute_start_slot_at_epoch(store.finalized_checkpoint.epoch) - assert store.finalized_checkpoint.root != spec.get_checkpoint_block(store, block_root, store.finalized_checkpoint.epoch) + assert store.finalized_checkpoint.root != spec.get_checkpoint_block( + store, + block_root, + store.finalized_checkpoint.epoch + ) assert spec.get_head(store) != last_fork_block_root assert spec.get_head(store) == head_root diff --git a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py index a3f09c7c96..840413a364 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py +++ b/tests/core/pyspec/eth2spec/test/phase0/fork_choice/test_on_block.py @@ -352,7 +352,11 @@ def test_new_finalized_slot_is_not_justified_checkpoint_ancestor(spec, state): # NOTE: Do not call `on_tick` here yield from add_block(spec, store, block, test_steps) - ancestor_at_finalized_slot = spec.get_checkpoint_block(store, pre_store_justified_checkpoint_root, store.finalized_checkpoint.epoch) + ancestor_at_finalized_slot = spec.get_checkpoint_block( + store, + pre_store_justified_checkpoint_root, + store.finalized_checkpoint.epoch + ) assert ancestor_at_finalized_slot != store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint @@ -427,7 +431,11 @@ def test_new_finalized_slot_is_justified_checkpoint_ancestor(spec, state): for block in all_blocks: yield from tick_and_add_block(spec, store, block, test_steps) - ancestor_at_finalized_slot = spec.get_checkpoint_block(store, pre_store_justified_checkpoint_root, store.finalized_checkpoint.epoch) + ancestor_at_finalized_slot = spec.get_checkpoint_block( + store, + pre_store_justified_checkpoint_root, + store.finalized_checkpoint.epoch + ) assert ancestor_at_finalized_slot == store.finalized_checkpoint.root assert store.finalized_checkpoint == another_state.finalized_checkpoint From ffb84598cf5f7d29bd6220e977f76d47c599bb4f Mon Sep 17 00:00:00 2001 From: Roberto Saltini Date: Tue, 18 Apr 2023 16:03:10 +1000 Subject: [PATCH 189/210] Fixed doc in get_checkpoint_block --- specs/phase0/fork-choice.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 0d5bfb4d79..e25ae6e901 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -198,7 +198,7 @@ def get_ancestor(store: Store, root: Root, slot: Slot) -> Root: ```python def get_checkpoint_block(store: Store, root: Root, epoch: Epoch) -> Root: """ - Compute the epoch boundary block for epoch ``epoch`` in the chain of block ``root`` + Compute the checkpoint block for epoch ``epoch`` in the chain of block ``root`` """ epoch_first_slot = compute_start_slot_at_epoch(epoch) return get_ancestor(store, root, epoch_first_slot) From 21d4370fd5a36112f035880d528e59a3a2c523ff Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 18 Apr 2023 18:31:34 +0800 Subject: [PATCH 190/210] Add docs about how to add a new feature proposal in consensus-specs and online viewer support (#3239) * Add docs * update link to template * Add more info * Try mkdocs * Create docs.yml * Update docs.yml * update * update * update * Try mkdocs * Add "B: Make it executable for pytest and test generator" section * Use mkdocs-material * Use `mkdocs-awesome-pages-plugin` to create custom specs order * Add toc permalink * Update site_url * Add .pages files for navigations. Update highlight style * Dark theme * Fix list indent --- .github/workflows/docs.yml | 24 +++ .gitignore | 8 + Makefile | 25 +++ README.md | 4 + docs/.pages | 5 + docs/README.md | 70 ++++++++ docs/docs/new-feature.md | 163 +++++++++++++++++++ docs/docs/templates/beacon-chain-template.md | 84 ++++++++++ docs/light-client/.pages | 5 + docs/light-client/index.md | 1 + docs/stylesheets/extra.css | 34 ++++ fork_choice/.pages | 7 + mkdocs.yml | 40 +++++ setup.py | 1 + specs/.pages | 4 + specs/_features/eip6110/beacon-chain.md | 2 +- specs/_features/sharding/p2p-interface.md | 2 +- 17 files changed, 477 insertions(+), 2 deletions(-) create mode 100644 .github/workflows/docs.yml create mode 100644 docs/.pages create mode 100644 docs/README.md create mode 100644 docs/docs/new-feature.md create mode 100644 docs/docs/templates/beacon-chain-template.md create mode 100644 docs/light-client/.pages create mode 100644 docs/light-client/index.md create mode 100644 docs/stylesheets/extra.css create mode 100644 fork_choice/.pages create mode 100644 mkdocs.yml create mode 100644 specs/.pages diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml new file mode 100644 index 0000000000..eab3bba173 --- /dev/null +++ b/.github/workflows/docs.yml @@ -0,0 +1,24 @@ + +name: Publish docs +on: + push: + branches: + - master +permissions: + contents: write +jobs: + deploy: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Build docs + run: make copy_docs + - uses: actions/setup-python@v4 + with: + python-version: 3.x + - uses: actions/cache@v2 + with: + key: ${{ github.ref }} + path: .cache + - run: pip install -e .[docs] + - run: mkdocs gh-deploy --force diff --git a/.gitignore b/.gitignore index c56a658ce2..82026c27bd 100644 --- a/.gitignore +++ b/.gitignore @@ -35,3 +35,11 @@ tests/core/pyspec/eth2spec/test_results.xml # TOC tool outputs temporary files *.tmp + +# docs reader build +docs/specs +docs/sync +docs/ssz +docs/fork_choice +docs/README.md +site diff --git a/Makefile b/Makefile index 1ec399e3a8..ab5521663a 100644 --- a/Makefile +++ b/Makefile @@ -19,6 +19,11 @@ GENERATORS = $(sort $(dir $(wildcard $(GENERATOR_DIR)/*/.))) # Map this list of generator paths to "gen_{generator name}" entries GENERATOR_TARGETS = $(patsubst $(GENERATOR_DIR)/%/, gen_%, $(GENERATORS)) GENERATOR_VENVS = $(patsubst $(GENERATOR_DIR)/%, $(GENERATOR_DIR)/%venv, $(GENERATORS)) +# Documents +DOCS_DIR = ./docs +SSZ_DIR = ./ssz +SYNC_DIR = ./sync +FORK_CHOICE_DIR = ./fork_choice # To check generator matching: #$(info $$GENERATOR_TARGETS is [${GENERATOR_TARGETS}]) @@ -214,3 +219,23 @@ detect_generator_incomplete: $(TEST_VECTOR_DIR) detect_generator_error_log: $(TEST_VECTOR_DIR) [ -f $(GENERATOR_ERROR_LOG_FILE) ] && echo "[ERROR] $(GENERATOR_ERROR_LOG_FILE) file exists" || echo "[PASSED] error log file does not exist" + + +# For docs reader +install_docs: + python3 -m venv venv; . venv/bin/activate; python3 -m pip install -e .[docs]; + +copy_docs: + cp -r $(SPEC_DIR) $(DOCS_DIR); + cp -r $(SYNC_DIR) $(DOCS_DIR); + cp -r $(SSZ_DIR) $(DOCS_DIR); + cp -r $(FORK_CHOICE_DIR) $(DOCS_DIR); + cp $(CURRENT_DIR)/README.md $(DOCS_DIR)/README.md + +build_docs: copy_docs + . venv/bin/activate; + mkdocs build + +serve_docs: + . venv/bin/activate; + mkdocs serve diff --git a/README.md b/README.md index d0d6b222d0..28d61ad404 100644 --- a/README.md +++ b/README.md @@ -65,6 +65,10 @@ Documentation on the different components used during spec writing can be found * [YAML Test Generators](tests/generators/README.md) * [Executable Python Spec, with Py-tests](tests/core/pyspec/README.md) +## Online viewer of the latest release (latest `master` branch) + +[Ethereum Consensus Specs](https://ethereum.github.io/consensus-specs/) + ## Consensus spec tests Conformance tests built from the executable python spec are available in the [Ethereum Proof-of-Stake Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) repo. Compressed tarballs are available in [releases](https://github.com/ethereum/consensus-spec-tests/releases). diff --git a/docs/.pages b/docs/.pages new file mode 100644 index 0000000000..d9e382ede5 --- /dev/null +++ b/docs/.pages @@ -0,0 +1,5 @@ +nav: + - Home: + - README.md + - specs + - ... diff --git a/docs/README.md b/docs/README.md new file mode 100644 index 0000000000..9f2528263e --- /dev/null +++ b/docs/README.md @@ -0,0 +1,70 @@ +# Ethereum Proof-of-Stake Consensus Specifications + +[![Join the chat at https://discord.gg/qGpsxSA](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/qGpsxSA) [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) + +To learn more about proof-of-stake and sharding, see the [PoS documentation](https://ethereum.org/en/developers/docs/consensus-mechanisms/pos/), [sharding documentation](https://ethereum.org/en/upgrades/sharding/) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). + +This repository hosts the current Ethereum proof-of-stake specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests. + +## Specs + +[![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec) + +Core specifications for Ethereum proof-of-stake clients can be found in [specs](specs/). These are divided into features. +Features are researched and developed in parallel, and then consolidated into sequential upgrades when ready. + +### Stable Specifications + +| Seq. | Code Name | Fork Epoch | Specs | +| - | - | - | - | +| 0 | **Phase0** |`0` |
  • Core
    • [The beacon chain](specs/phase0/beacon-chain.md)
    • [Deposit contract](specs/phase0/deposit-contract.md)
    • [Beacon chain fork choice](specs/phase0/fork-choice.md)
  • Additions
    • [Honest validator guide](specs/phase0/validator.md)
    • [P2P networking](specs/phase0/p2p-interface.md)
    • [Weak subjectivity](specs/phase0/weak-subjectivity.md)
| +| 1 | **Altair** | `74240` |
  • Core
    • [Beacon chain changes](specs/altair/beacon-chain.md)
    • [Altair fork](specs/altair/fork.md)
  • Additions
    • [Light client sync protocol](specs/altair/light-client/sync-protocol.md) ([full node](specs/altair/light-client/full-node.md), [light client](specs/altair/light-client/light-client.md), [networking](specs/altair/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/altair/validator.md)
    • [P2P networking](specs/altair/p2p-interface.md)
| +| 2 | **Bellatrix**
(["The Merge"](https://ethereum.org/en/upgrades/merge/)) | `144896` |
  • Core
    • [Beacon Chain changes](specs/bellatrix/beacon-chain.md)
    • [Bellatrix fork](specs/bellatrix/fork.md)
    • [Fork choice changes](specs/bellatrix/fork-choice.md)
  • Additions
    • [Honest validator guide changes](specs/bellatrix/validator.md)
    • [P2P networking](specs/bellatrix/p2p-interface.md)
| +| 3 | **Capella** | `194048` |
  • Core
    • [Beacon chain changes](specs/capella/beacon-chain.md)
    • [Capella fork](specs/capella/fork.md)
  • Additions
    • [Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))
    • [Validator additions](specs/capella/validator.md)
    • [P2P networking](specs/capella/p2p-interface.md)
| + +### In-development Specifications +| Code Name or Topic | Specs | Notes | +| - | - | - | +| Deneb (tentative) |
  • Core
    • [Beacon Chain changes](specs/deneb/beacon-chain.md)
    • [Deneb fork](specs/deneb/fork.md)
    • [Polynomial commitments](specs/deneb/polynomial-commitments.md)
    • [Fork choice changes](specs/deneb/fork-choice.md)
  • Additions
    • [Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/deneb/validator.md)
    • [P2P networking](specs/deneb/p2p-interface.md)
| +| Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/_features/sharding/p2p-interface.md)
| +| Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/_features/custody_game/validator.md)
| Dependent on sharding | +| Data Availability Sampling (outdated) |
  • Core
    • [Core types and functions](specs/_features/das/das-core.md)
    • [Fork choice changes](specs/_features/das/fork-choice.md)
  • Additions
    • [P2P Networking](specs/_features/das/p2p-interface.md)
    • [Sampling process](specs/_features/das/sampling.md)
|
  • Dependent on sharding
  • [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
| +| EIP-6110 |
  • Core
    • [Beacon Chain changes](specs/_features/eip6110//beacon-chain.md)
    • [EIP-6110 fork](specs/_features/eip6110/fork.md)
  • Additions
    • [Honest validator guide changes](specs/_features/eip6110/validator.md)
| + +### Accompanying documents can be found in [specs](specs) and include: + +* [SimpleSerialize (SSZ) spec](ssz/simple-serialize.md) +* [Merkle proof formats](ssz/merkle-proofs.md) +* [General test format](tests/formats/README.md) + +## Additional specifications for client implementers + +Additional specifications and standards outside of requisite client functionality can be found in the following repos: + +* [Beacon APIs](https://github.com/ethereum/beacon-apis) +* [Beacon Metrics](https://github.com/ethereum/beacon-metrics/) + +## Design goals + +The following are the broad design goals for the Ethereum proof-of-stake consensus specifications: +* to minimize complexity, even at the cost of some losses in efficiency +* to remain live through major network partitions and when very large portions of nodes go offline +* to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available +* to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time +* to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) + +## Useful external resources + +* [Design Rationale](https://notes.ethereum.org/s/rkhCgQteN#) +* [Phase 0 Onboarding Document](https://notes.ethereum.org/s/Bkn3zpwxB) +* [Combining GHOST and Casper paper](https://arxiv.org/abs/2003.03052) + +## For spec contributors + +Documentation on the different components used during spec writing can be found here: +* [YAML Test Generators](tests/generators/README.md) +* [Executable Python Spec, with Py-tests](tests/core/pyspec/README.md) + +## Consensus spec tests + +Conformance tests built from the executable python spec are available in the [Ethereum Proof-of-Stake Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) repo. Compressed tarballs are available in [releases](https://github.com/ethereum/consensus-spec-tests/releases). diff --git a/docs/docs/new-feature.md b/docs/docs/new-feature.md new file mode 100644 index 0000000000..5e6180329f --- /dev/null +++ b/docs/docs/new-feature.md @@ -0,0 +1,163 @@ +# How to add a new feature proposal in consensus-specs + + + +## Table of Contents + +- [A. Make it executable for linter checks](#a-make-it-executable-for-linter-checks) + - [1. Create a folder under `./specs/_features`](#1-create-a-folder-under-specs_features) + - [2. Choose the "previous fork" to extend: usually, use the scheduled or the latest mainnet fork version.](#2-choose-the-previous-fork-to-extend-usually-use-the-scheduled-or-the-latest-mainnet-fork-version) + - [3. Write down your proposed `beacon-chain.md` change](#3-write-down-your-proposed-beacon-chainmd-change) + - [4. Add `fork.md`](#4-add-forkmd) + - [5. Make it executable](#5-make-it-executable) +- [B: Make it executable for pytest and test generator](#b-make-it-executable-for-pytest-and-test-generator) + - [1. Add `light-client/*` docs if you updated the content of `BeaconBlock`](#1-add-light-client-docs-if-you-updated-the-content-of-beaconblock) + - [2. Add the mainnet and minimal presets and update the configs](#2-add-the-mainnet-and-minimal-presets-and-update-the-configs) + - [3. Update `context.py`](#3-update-contextpy) + - [4. Update `constants.py`](#4-update-constantspy) + - [5. Update `genesis.py`:](#5-update-genesispy) + - [6. To add fork transition tests, update fork_transition.py](#6-to-add-fork-transition-tests-update-fork_transitionpy) + - [7. Update CI configurations](#7-update-ci-configurations) +- [Others](#others) + - [Bonus](#bonus) + - [Need help?](#need-help) + + + + +## A. Make it executable for linter checks + +### 1. Create a folder under `./specs/_features` + +For example, if it's an `EIP-9999` CL spec, you can create a `./specs/_features/eip9999` folder. + +### 2. Choose the "previous fork" to extend: usually, use the scheduled or the latest mainnet fork version. + +For example, if the latest fork is Capella, use `./specs/capella` content as your "previous fork". + +### 3. Write down your proposed `beacon-chain.md` change +- You can either use [Beacon Chain Spec Template](./templates/beacon-chain-template.md), or make a copy of the latest fork content and then edit it. +- Tips: + - We use [`doctoc`](https://www.npmjs.com/package/doctoc) tool to generate the table of content. + ``` + cd consensus-specs + doctoc specs + ``` + - The differences between "Constants", "Configurations", and "Presets": + - Constants: The constant that should never be changed. + - Configurations: The settings that we may change for different networks. + - Presets: The settings that we may change for testing. + - Readability and simplicity are more important than efficiency and optimization. + - Use simple Python rather than the fancy Python dark magic. + +### 4. Add `fork.md` +You can refer to the previous fork's `fork.md` file. +### 5. Make it executable +- Update [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) with the new feature name. +- Update [`setup.py`](https://github.com/ethereum/consensus-specs/blob/dev/setup.py): + - Add a new `SpecBuilder` with the new feature name constant. e.g., `EIP9999SpecBuilder` + - Add the new `SpecBuilder` to `spec_builders` list. + - Add the path of the new markdown files in `finalize_options` function. + +## B: Make it executable for pytest and test generator + +### 1. Add `light-client/*` docs if you updated the content of `BeaconBlock` +- You can refer to the previous fork's `light-client/*` file. +- Add the path of the new markdown files in `setup.py`'s `finalize_options` function. + +### 2. Add the mainnet and minimal presets and update the configs +- Add presets: `presets/mainnet/.yaml` and `presets/minimal/.yaml` +- Update configs: `configs/mainnet.yaml` and `configs/minimal.yaml` + +### 3. Update [`context.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/context.py) +- Update `spec_targets` by adding `` + +```python +from eth2spec.eip9999 import mainnet as spec_eip9999_mainnet, minimal as spec_eip9999_minimal + +... + +spec_targets: Dict[PresetBaseName, Dict[SpecForkName, Spec]] = { + MINIMAL: { + ... + EIP9999: spec_eip9999_minimal, + }, + MAINNET: { + ... + EIP9999: spec_eip9999_mainnet + }, +} +``` + +### 4. Update [`constants.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/constants.py) +- Add `` to `ALL_PHASES` and `TESTGEN_FORKS` + +### 5. Update [`genesis.py`](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/genesis.py): + +We use `create_genesis_state` to create the default `state` in tests. + +- Update `create_genesis_state` by adding `fork_version` setting: + +```python +def create_genesis_state(spec, validator_balances, activation_threshold): + ... + if spec.fork == ALTAIR: + current_version = spec.config.ALTAIR_FORK_VERSION + ... + elif spec.fork == EIP9999: + # Add the previous fork version of given fork + previous_version = spec.config. + current_version = spec.config.EIP9999_FORK_VERSION +``` + +- If the given feature changes `BeaconState` fields, you have to set the initial values by adding: + +```python +def create_genesis_state(spec, validator_balances, activation_threshold): + ... + if is_post_eip9999(spec): + state. = + + return state +``` + +- If the given feature changes `ExecutionPayload` fields, you have to set the initial values by updating `get_sample_genesis_execution_payload_header` helper. + +### 6. To add fork transition tests, update [fork_transition.py](https://github.com/ethereum/consensus-specs/blob/dev/tests/core/pyspec/eth2spec/test/helpers/fork_transition.py) + +```python +def do_fork(state, spec, post_spec, fork_epoch, with_block=True, sync_aggregate=None, operation_dict=None): + ... + + if post_spec.fork == ALTAIR: + state = post_spec.upgrade_to_altair(state) + ... + elif post_spec.fork == EIP9999: + state = post_spec.upgrade_to_eip9999(state) + + ... + + if post_spec.fork == ALTAIR: + assert state.fork.previous_version == post_spec.config.GENESIS_FORK_VERSION + assert state.fork.current_version == post_spec.config.ALTAIR_FORK_VERSION + ... + elif post_spec.fork == EIP9999: + assert state.fork.previous_version == post_spec.config. + assert state.fork.current_version == post_spec.config.EIP9999_FORK_VERSION + + ... +``` + +### 7. Update CI configurations +- Update [GitHub Actions config](https://github.com/ethereum/consensus-specs/blob/dev/.github/workflows/run-tests.yml) + - Update `pyspec-tests.strategy.matrix.version` list by adding new feature to it +- Update [CircleCI config](https://github.com/ethereum/consensus-specs/blob/dev/.circleci/config.yml) + - Add new job to the `workflows.test_spec.jobs` + +## Others + +### Bonus +- Add `validator.md` if honest validator behavior changes with the new feature. + +### Need help? +You can tag spec elves for cleaning up your PR. 🧚 diff --git a/docs/docs/templates/beacon-chain-template.md b/docs/docs/templates/beacon-chain-template.md new file mode 100644 index 0000000000..4d22d3908e --- /dev/null +++ b/docs/docs/templates/beacon-chain-template.md @@ -0,0 +1,84 @@ +# `beacon-chain.md` Template + +# -- The Beacon Chain + +## Table of contents + + + + + + + + + +## Introduction + +## Notation + +## Custom types + +## Constants + +### [CATEGORY OF CONSTANTS] + +| Name | Value | +| - | - | +| `` | ``` | + +## Preset + + +### [CATEGORY OF PRESETS] + +| Name | Value | +| - | - | +| `` | `` | + +## Configuration + +### [CATEGORY OF CONFIGURATIONS] + +| Name | Value | +| - | - | +| `` | `` | + +## Containers + +### [CATEGORY OF CONTAINERS] + +#### `CONTAINER_NAME` + +```python +class CONTAINER_NAME(Container): + FILED_NAME: SSZ_TYPE +``` + +## Helper functions + +### [CATEGORY OF HELPERS] + +```python + +``` + +### Epoch processing + + +### Block processing + + + + +## Testing + +*Note*: The function `initialize_beacon_state_from_eth1` is modified for pure testing only. + +```python +def initialize_beacon_state_from_eth1(eth1_block_hash: Hash32, + eth1_timestamp: uint64, + deposits: Sequence[Deposit], + execution_payload_header: ExecutionPayloadHeader=ExecutionPayloadHeader() + ) -> BeaconState: + ... +``` diff --git a/docs/light-client/.pages b/docs/light-client/.pages new file mode 100644 index 0000000000..a372a5d2e5 --- /dev/null +++ b/docs/light-client/.pages @@ -0,0 +1,5 @@ +nav: + - 'Index': index.md + - 'Altair': specs/altair/light-client/sync-protocol + - 'Capella': specs/capella/light-client/sync-protocol + - 'Deneb': specs/deneb/light-client/sync-protocol diff --git a/docs/light-client/index.md b/docs/light-client/index.md new file mode 100644 index 0000000000..32155b1852 --- /dev/null +++ b/docs/light-client/index.md @@ -0,0 +1 @@ +# Light client specifications diff --git a/docs/stylesheets/extra.css b/docs/stylesheets/extra.css new file mode 100644 index 0000000000..3849762488 --- /dev/null +++ b/docs/stylesheets/extra.css @@ -0,0 +1,34 @@ +/* Reference: https://zenn.dev/mebiusbox/articles/81d977a72cee01 */ + +[data-md-color-scheme=default] { + --md-default-fg-color--light: #222 !important; +} +[data-md-color-scheme=slate] { + --md-default-fg-color--light: #fefefe !important; + --md-typeset-a-color: #fc0 !important; +} + +.md-typeset pre { + color: #f8f8f2; +} +.md-typeset .highlighttable { + margin-left:-20px; + margin-right: -20px; + border-radius: 0; +} +.md-typeset .highlighttable > * { + --md-code-bg-color: #222 !important; + --md-code-fg-color: #fefefe !important; +} +.md-typeset .highlighttable .linenos .linenodiv pre span { + background-color: #222 !important; + color: #fefefe !important; +} +.md-typeset .highlighttable .md-clipboard:before, +.md-typeset .highlighttable .md-clipboard:after { + color: rgba(240,240,240,.8); +} +.md-typeset .highlighttable .md-clipboard:hover:before, +.md-typeset .highlighttable .md-clipboard:hover:after { + color: rgba(102,217,224,1); +} diff --git a/fork_choice/.pages b/fork_choice/.pages new file mode 100644 index 0000000000..a5e6ccc904 --- /dev/null +++ b/fork_choice/.pages @@ -0,0 +1,7 @@ +nav: + - ... + - Fork Choice -- Core: + - phase0: specs/phase0/fork-choice + - bellatrix: specs/bellatrix/fork-choice + - capella: specs/capella/fork-choice + - deneb: specs/deneb/fork-choice diff --git a/mkdocs.yml b/mkdocs.yml new file mode 100644 index 0000000000..dc6b352baa --- /dev/null +++ b/mkdocs.yml @@ -0,0 +1,40 @@ +site_name: Ethereum Consensus Specs +site_url: https://ethereum.github.io/consensus-specs/ +repo_name: ethereum/consensus-specs +theme: + name: material + palette: + - scheme: default + primary: black + toggle: + icon: material/brightness-7 + name: Switch to dark mode + - scheme: slate + primary: black + toggle: + icon: material/brightness-4 + name: Switch to light mode + features: + - navigation.tabs + - search +markdown_extensions: + - toc: + permalink: true + - pymdownx.superfences + - pymdownx.highlight: + use_pygments: true + noclasses: true + pygments_style: monokai + linenums: true + anchor_linenums: true + - mdx_truly_sane_lists: + nested_indent: 4 +plugins: + - search + - awesome-pages +extra_css: + - stylesheets/extra.css +extra: + social: + - icon: fontawesome/brands/github + link: https://github.com/ethereum/consensus-specs diff --git a/setup.py b/setup.py index 52bad2b71b..fc3acb8062 100644 --- a/setup.py +++ b/setup.py @@ -1181,6 +1181,7 @@ def run(self): "test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"], "lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"], "generator": ["python-snappy==0.6.1", "filelock"], + "docs": ["mkdocs==1.4.2", "mkdocs-material==9.1.5", "mdx-truly-sane-lists==1.3", "mkdocs-awesome-pages-plugin==2.8.0"] }, install_requires=[ "eth-utils>=2.0.0,<3", diff --git a/specs/.pages b/specs/.pages new file mode 100644 index 0000000000..7e47dc5f75 --- /dev/null +++ b/specs/.pages @@ -0,0 +1,4 @@ +nav: + - phase0 + - ... + - _features diff --git a/specs/_features/eip6110/beacon-chain.md b/specs/_features/eip6110/beacon-chain.md index 70a72a5f45..01999a929e 100644 --- a/specs/_features/eip6110/beacon-chain.md +++ b/specs/_features/eip6110/beacon-chain.md @@ -33,7 +33,7 @@ This is the beacon chain specification of in-protocol deposits processing mechanism. This mechanism relies on the changes proposed by [EIP-6110](http://eips.ethereum.org/EIPS/eip-6110). -*Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development. +*Note:* This specification is built upon [Capella](../../capella/beacon-chain.md) and is under active development. ## Constants diff --git a/specs/_features/sharding/p2p-interface.md b/specs/_features/sharding/p2p-interface.md index c29146fe9d..553444eff1 100644 --- a/specs/_features/sharding/p2p-interface.md +++ b/specs/_features/sharding/p2p-interface.md @@ -47,7 +47,7 @@ Following the same scheme as the [Phase0 gossip topics](../../phase0/p2p-interfa | `shard_column_{subnet_id}` | `SignedShardSample` | | `builder_block_bid` | `BuilderBlockBid` | -The [DAS network specification](./das-p2p.md) defines additional topics. +The [DAS network specification](../das/das-core.md) defines additional topics. #### Builder block bid From 85c8daf08530545c50fd2f5b6998342bcac85977 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 18 Apr 2023 18:34:12 +0800 Subject: [PATCH 191/210] bump version.txt to 1.3.0 --- tests/core/pyspec/eth2spec/VERSION.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/core/pyspec/eth2spec/VERSION.txt b/tests/core/pyspec/eth2spec/VERSION.txt index d4f06976f0..f0bb29e763 100644 --- a/tests/core/pyspec/eth2spec/VERSION.txt +++ b/tests/core/pyspec/eth2spec/VERSION.txt @@ -1 +1 @@ -1.3.0-rc.5 +1.3.0 From 87d42919b9f80e18ff2eba5354642d6b6c4829c6 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 18 Apr 2023 14:00:11 -0500 Subject: [PATCH 192/210] Add "commitment" to test names for consistency --- tests/generators/kzg_4844/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py index 699d1f369a..b1391f6787 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/kzg_4844/main.py @@ -708,7 +708,7 @@ def case06_verify_blob_kzg_proof_batch(): # Edge case: Invalid commitment, too few bytes commitments_invalid_tooFewBytes = commitments[:3] + [commitments[3][:-1]] + commitments[4:] expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooFewBytes) - yield 'verify_blob_kzg_proof_batch_case_too_few_bytes', { + yield 'verify_blob_kzg_proof_batch_case_commitment_too_few_bytes', { 'input': { 'blobs': encode_hex_list(VALID_BLOBS), 'commitments': encode_hex_list(commitments_invalid_tooFewBytes), @@ -720,7 +720,7 @@ def case06_verify_blob_kzg_proof_batch(): # Edge case: Invalid commitment, too many bytes commitments_invalid_tooManyBytes = commitments[:3] + [commitments[3] + b"\x00"] + commitments[4:] expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooManyBytes) - yield 'verify_blob_kzg_proof_batch_case_too_many_bytes', { + yield 'verify_blob_kzg_proof_batch_case_commitment_too_many_bytes', { 'input': { 'blobs': encode_hex_list(VALID_BLOBS), 'commitments': encode_hex_list(commitments_invalid_tooManyBytes), From 03a3e4082a4cfcc984bf4b46d1c17cc08e61b576 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 19 Apr 2023 19:10:46 +0800 Subject: [PATCH 193/210] Fix p2p-interface.md ToC --- docs/README.md | 70 ------------ specs/altair/p2p-interface.md | 42 +++---- specs/bellatrix/p2p-interface.md | 64 +++++------ specs/capella/p2p-interface.md | 24 ++-- specs/deneb/p2p-interface.md | 79 ++++++------- specs/phase0/p2p-interface.md | 186 +++++++++++++++---------------- 6 files changed, 199 insertions(+), 266 deletions(-) delete mode 100644 docs/README.md diff --git a/docs/README.md b/docs/README.md deleted file mode 100644 index 9f2528263e..0000000000 --- a/docs/README.md +++ /dev/null @@ -1,70 +0,0 @@ -# Ethereum Proof-of-Stake Consensus Specifications - -[![Join the chat at https://discord.gg/qGpsxSA](https://img.shields.io/badge/chat-on%20discord-blue.svg)](https://discord.gg/qGpsxSA) [![Join the chat at https://gitter.im/ethereum/sharding](https://badges.gitter.im/ethereum/sharding.svg)](https://gitter.im/ethereum/sharding?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) - -To learn more about proof-of-stake and sharding, see the [PoS documentation](https://ethereum.org/en/developers/docs/consensus-mechanisms/pos/), [sharding documentation](https://ethereum.org/en/upgrades/sharding/) and the [research compendium](https://notes.ethereum.org/s/H1PGqDhpm). - -This repository hosts the current Ethereum proof-of-stake specifications. Discussions about design rationale and proposed changes can be brought up and discussed as issues. Solidified, agreed-upon changes to the spec can be made through pull requests. - -## Specs - -[![GitHub release](https://img.shields.io/github/v/release/ethereum/eth2.0-specs)](https://github.com/ethereum/eth2.0-specs/releases/) [![PyPI version](https://badge.fury.io/py/eth2spec.svg)](https://badge.fury.io/py/eth2spec) - -Core specifications for Ethereum proof-of-stake clients can be found in [specs](specs/). These are divided into features. -Features are researched and developed in parallel, and then consolidated into sequential upgrades when ready. - -### Stable Specifications - -| Seq. | Code Name | Fork Epoch | Specs | -| - | - | - | - | -| 0 | **Phase0** |`0` |
  • Core
    • [The beacon chain](specs/phase0/beacon-chain.md)
    • [Deposit contract](specs/phase0/deposit-contract.md)
    • [Beacon chain fork choice](specs/phase0/fork-choice.md)
  • Additions
    • [Honest validator guide](specs/phase0/validator.md)
    • [P2P networking](specs/phase0/p2p-interface.md)
    • [Weak subjectivity](specs/phase0/weak-subjectivity.md)
| -| 1 | **Altair** | `74240` |
  • Core
    • [Beacon chain changes](specs/altair/beacon-chain.md)
    • [Altair fork](specs/altair/fork.md)
  • Additions
    • [Light client sync protocol](specs/altair/light-client/sync-protocol.md) ([full node](specs/altair/light-client/full-node.md), [light client](specs/altair/light-client/light-client.md), [networking](specs/altair/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/altair/validator.md)
    • [P2P networking](specs/altair/p2p-interface.md)
| -| 2 | **Bellatrix**
(["The Merge"](https://ethereum.org/en/upgrades/merge/)) | `144896` |
  • Core
    • [Beacon Chain changes](specs/bellatrix/beacon-chain.md)
    • [Bellatrix fork](specs/bellatrix/fork.md)
    • [Fork choice changes](specs/bellatrix/fork-choice.md)
  • Additions
    • [Honest validator guide changes](specs/bellatrix/validator.md)
    • [P2P networking](specs/bellatrix/p2p-interface.md)
| -| 3 | **Capella** | `194048` |
  • Core
    • [Beacon chain changes](specs/capella/beacon-chain.md)
    • [Capella fork](specs/capella/fork.md)
  • Additions
    • [Light client sync protocol changes](specs/capella/light-client/sync-protocol.md) ([fork](specs/capella/light-client/fork.md), [full node](specs/capella/light-client/full-node.md), [networking](specs/capella/light-client/p2p-interface.md))
    • [Validator additions](specs/capella/validator.md)
    • [P2P networking](specs/capella/p2p-interface.md)
| - -### In-development Specifications -| Code Name or Topic | Specs | Notes | -| - | - | - | -| Deneb (tentative) |
  • Core
    • [Beacon Chain changes](specs/deneb/beacon-chain.md)
    • [Deneb fork](specs/deneb/fork.md)
    • [Polynomial commitments](specs/deneb/polynomial-commitments.md)
    • [Fork choice changes](specs/deneb/fork-choice.md)
  • Additions
    • [Light client sync protocol changes](specs/deneb/light-client/sync-protocol.md) ([fork](specs/deneb/light-client/fork.md), [full node](specs/deneb/light-client/full-node.md), [networking](specs/deneb/light-client/p2p-interface.md))
    • [Honest validator guide changes](specs/deneb/validator.md)
    • [P2P networking](specs/deneb/p2p-interface.md)
| -| Sharding (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/sharding/beacon-chain.md)
  • Additions
    • [P2P networking](specs/_features/sharding/p2p-interface.md)
| -| Custody Game (outdated) |
  • Core
    • [Beacon Chain changes](specs/_features/custody_game/beacon-chain.md)
  • Additions
    • [Honest validator guide changes](specs/_features/custody_game/validator.md)
| Dependent on sharding | -| Data Availability Sampling (outdated) |
  • Core
    • [Core types and functions](specs/_features/das/das-core.md)
    • [Fork choice changes](specs/_features/das/fork-choice.md)
  • Additions
    • [P2P Networking](specs/_features/das/p2p-interface.md)
    • [Sampling process](specs/_features/das/sampling.md)
|
  • Dependent on sharding
  • [Technical explainer](https://hackmd.io/@HWeNw8hNRimMm2m2GH56Cw/B1YJPGkpD)
| -| EIP-6110 |
  • Core
    • [Beacon Chain changes](specs/_features/eip6110//beacon-chain.md)
    • [EIP-6110 fork](specs/_features/eip6110/fork.md)
  • Additions
    • [Honest validator guide changes](specs/_features/eip6110/validator.md)
| - -### Accompanying documents can be found in [specs](specs) and include: - -* [SimpleSerialize (SSZ) spec](ssz/simple-serialize.md) -* [Merkle proof formats](ssz/merkle-proofs.md) -* [General test format](tests/formats/README.md) - -## Additional specifications for client implementers - -Additional specifications and standards outside of requisite client functionality can be found in the following repos: - -* [Beacon APIs](https://github.com/ethereum/beacon-apis) -* [Beacon Metrics](https://github.com/ethereum/beacon-metrics/) - -## Design goals - -The following are the broad design goals for the Ethereum proof-of-stake consensus specifications: -* to minimize complexity, even at the cost of some losses in efficiency -* to remain live through major network partitions and when very large portions of nodes go offline -* to select all components such that they are either quantum secure or can be easily swapped out for quantum secure counterparts when available -* to utilize crypto and design techniques that allow for a large participation of validators in total and per unit time -* to allow for a typical consumer laptop with `O(C)` resources to process/validate `O(1)` shards (including any system level validation such as the beacon chain) - -## Useful external resources - -* [Design Rationale](https://notes.ethereum.org/s/rkhCgQteN#) -* [Phase 0 Onboarding Document](https://notes.ethereum.org/s/Bkn3zpwxB) -* [Combining GHOST and Casper paper](https://arxiv.org/abs/2003.03052) - -## For spec contributors - -Documentation on the different components used during spec writing can be found here: -* [YAML Test Generators](tests/generators/README.md) -* [Executable Python Spec, with Py-tests](tests/core/pyspec/README.md) - -## Consensus spec tests - -Conformance tests built from the executable python spec are available in the [Ethereum Proof-of-Stake Consensus Spec Tests](https://github.com/ethereum/consensus-spec-tests) repo. Compressed tarballs are available in [releases](https://github.com/ethereum/consensus-spec-tests/releases). diff --git a/specs/altair/p2p-interface.md b/specs/altair/p2p-interface.md index 8d6b1c433a..0f278b08c5 100644 --- a/specs/altair/p2p-interface.md +++ b/specs/altair/p2p-interface.md @@ -13,7 +13,7 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery - - [Warning](#warning) +- [Warning](#warning) - [Modifications in Altair](#modifications-in-altair) - [MetaData](#metadata) - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) @@ -43,9 +43,9 @@ Altair adds new messages, topics and data to the Req-Resp, Gossip and Discovery This document is currently illustrative for early Altair testnets and some parts are subject to change. Refer to the note in the [validator guide](./validator.md) for further details. -# Modifications in Altair +## Modifications in Altair -## MetaData +### MetaData The `MetaData` stored locally by clients is updated with an additional field to communicate the sync committee subnet subscriptions: @@ -62,12 +62,12 @@ Where - `seq_number` and `attnets` have the same meaning defined in the Phase 0 document. - `syncnets` is a `Bitvector` representing the node's sync committee subnet subscriptions. This field should mirror the data in the node's ENR as outlined in the [validator guide](./validator.md#sync-committee-subnet-stability). -## The gossip domain: gossipsub +### The gossip domain: gossipsub Gossip meshes are added in Altair to support the consensus activities of the sync committees. Validators use an aggregation scheme to balance the processing and networking load across all of the relevant actors. -### Topics and messages +#### Topics and messages Topics follow the same specification as in the Phase 0 document. New topics are added in Altair to support the sync committees and the beacon block topic is updated with the modified type. @@ -103,11 +103,11 @@ Definitions of these new types can be found in the [Altair validator guide](./va Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics. -#### Global topics +##### Global topics Altair changes the type of the global beacon block topic and adds one global topic to propagate partially aggregated sync committee messages to all potential proposers of beacon blocks. -##### `beacon_block` +###### `beacon_block` The existing specification for this topic does not change from the Phase 0 document, but the type of the payload does change to the (modified) `SignedBeaconBlock`. @@ -115,7 +115,7 @@ This type changes due to the inclusion of the inner `BeaconBlockBody` that is mo See the [state transition document](./beacon-chain.md#beaconblockbody) for Altair for further details. -##### `sync_committee_contribution_and_proof` +###### `sync_committee_contribution_and_proof` This topic is used to propagate partially aggregated sync committee messages to be included in future blocks. @@ -152,11 +152,11 @@ def get_sync_subcommittee_pubkeys(state: BeaconState, subcommittee_index: uint64 - _[REJECT]_ The aggregator signature, `signed_contribution_and_proof.signature`, is valid. - _[REJECT]_ The aggregate signature is valid for the message `beacon_block_root` and aggregate pubkey derived from the participation info in `aggregation_bits` for the subcommittee specified by the `contribution.subcommittee_index`. -#### Sync committee subnets +##### Sync committee subnets Sync committee subnets are used to propagate unaggregated sync committee messages to subsections of the network. -##### `sync_committee_{subnet_id}` +###### `sync_committee_{subnet_id}` The `sync_committee_{subnet_id}` topics are used to propagate unaggregated sync committee messages to the subnet `subnet_id` to be aggregated before being gossiped to the global `sync_committee_contribution_and_proof` topic. @@ -170,7 +170,7 @@ The following validations MUST pass before forwarding the `sync_committee_messag Note this validation is _per topic_ so that for a given `slot`, multiple messages could be forwarded with the same `validator_index` as long as the `subnet_id`s are distinct. - _[REJECT]_ The `signature` is valid for the message `beacon_block_root` for the validator referenced by `validator_index`. -#### Sync committees and aggregation +##### Sync committees and aggregation The aggregation scheme closely follows the design of the attestation aggregation scheme. Sync committee messages are broadcast into "subnets" defined by a topic. @@ -182,7 +182,7 @@ Unaggregated messages (along with metadata) are sent as `SyncCommitteeMessage`s Aggregated sync committee messages are packaged into (signed) `SyncCommitteeContribution` along with proofs and gossiped to the `sync_committee_contribution_and_proof` topic. -### Transitioning the gossip +#### Transitioning the gossip With any fork, the fork version, and thus the `ForkDigestValue`, change. Message types are unique per topic, and so for a smooth transition a node must temporarily subscribe to both the old and new topics. @@ -205,9 +205,9 @@ Post-fork: E.g. an attestation on the both the old and new topic is ignored like any duplicate. - Two epochs after the fork, pre-fork topics SHOULD be unsubscribed from. This is well after the configured `seen_ttl`. -## The Req/Resp domain +### The Req/Resp domain -### Req-Resp interaction +#### Req-Resp interaction An additional `` field is introduced to the `response_chunk` as defined in the Phase 0 document: @@ -221,7 +221,7 @@ On a non-zero `` with `ErrorMessage` payload, the `` is a In Altair and later forks, `` functions as a short meta-data, defined per req-resp method, and can parametrize the payload decoder. -#### `ForkDigest`-context +##### `ForkDigest`-context Starting with Altair, and in future forks, SSZ type definitions may change. For this common case, we define the `ForkDigest`-context: @@ -229,9 +229,9 @@ For this common case, we define the `ForkDigest`-context: A fixed-width 4 byte ``, set to the `ForkDigest` matching the chunk: `compute_fork_digest(fork_version, genesis_validators_root)`. -### Messages +#### Messages -#### BeaconBlocksByRange v2 +##### BeaconBlocksByRange v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` @@ -246,7 +246,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` | | `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | -#### BeaconBlocksByRoot v2 +##### BeaconBlocksByRoot v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` @@ -261,7 +261,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `GENESIS_FORK_VERSION` | `phase0.SignedBeaconBlock` | | `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | -#### GetMetaData v2 +##### GetMetaData v2 **Protocol ID:** `/eth2/beacon_chain/req/metadata/2/` @@ -279,7 +279,7 @@ Requests the MetaData of a peer, using the new `MetaData` definition given above that is extended from phase 0 in Altair. Other conditions for the `GetMetaData` protocol are unchanged from the phase 0 p2p networking document. -### Transitioning from v1 to v2 +#### Transitioning from v1 to v2 In advance of the fork, implementations can opt in to both run the v1 and v2 for a smooth transition. This is non-breaking, and is recommended as soon as the fork specification is stable. @@ -291,7 +291,7 @@ The v1 method MAY be unregistered at the fork boundary. In the event of a request on v1 for an Altair specific payload, the responder MUST return the **InvalidRequest** response code. -## The discovery domain: discv5 +### The discovery domain: discv5 The `attnets` key of the ENR is used as defined in the Phase 0 document. diff --git a/specs/bellatrix/p2p-interface.md b/specs/bellatrix/p2p-interface.md index 4d4044689b..b8b3a11d6b 100644 --- a/specs/bellatrix/p2p-interface.md +++ b/specs/bellatrix/p2p-interface.md @@ -13,23 +13,23 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas - [Warning](#warning) -- [Modifications in Bellatrix](#modifications-in-bellatrix) - - [Configuration](#configuration) - - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - - [Topics and messages](#topics-and-messages) - - [Global topics](#global-topics) - - [`beacon_block`](#beacon_block) - - [Transitioning the gossip](#transitioning-the-gossip) - - [The Req/Resp domain](#the-reqresp-domain) - - [Messages](#messages) - - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) - - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) + - [Modifications in Bellatrix](#modifications-in-bellatrix) + - [Configuration](#configuration) + - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [Topics and messages](#topics-and-messages) + - [Global topics](#global-topics) + - [`beacon_block`](#beacon_block) + - [Transitioning the gossip](#transitioning-the-gossip) + - [The Req/Resp domain](#the-reqresp-domain) + - [Messages](#messages) + - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) + - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) - [Design decision rationale](#design-decision-rationale) - - [Gossipsub](#gossipsub) - - [Why was the max gossip message size increased at Bellatrix?](#why-was-the-max-gossip-message-size-increased-at-bellatrix) - - [Req/Resp](#reqresp) - - [Why was the max chunk response size increased at Bellatrix?](#why-was-the-max-chunk-response-size-increased-at-bellatrix) - - [Why allow invalid payloads on the P2P network?](#why-allow-invalid-payloads-on-the-p2p-network) + - [Gossipsub](#gossipsub) + - [Why was the max gossip message size increased at Bellatrix?](#why-was-the-max-gossip-message-size-increased-at-bellatrix) + - [Req/Resp](#reqresp) + - [Why was the max chunk response size increased at Bellatrix?](#why-was-the-max-chunk-response-size-increased-at-bellatrix) + - [Why allow invalid payloads on the P2P network?](#why-allow-invalid-payloads-on-the-p2p-network) @@ -39,9 +39,9 @@ Readers should understand the Phase 0 and Altair documents and use them as a bas This document is currently illustrative for early Bellatrix testnets and some parts are subject to change. Refer to the note in the [validator guide](./validator.md) for further details. -# Modifications in Bellatrix +## Modifications in Bellatrix -## Configuration +### Configuration This section outlines modifications constants that are used in this spec. @@ -50,11 +50,11 @@ This section outlines modifications constants that are used in this spec. | `GOSSIP_MAX_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed gossip messages starting at Bellatrix upgrade. | | `MAX_CHUNK_SIZE_BELLATRIX` | `10 * 2**20` (= 10,485,760, 10 MiB) | The maximum allowed size of uncompressed req/resp chunked responses starting at Bellatrix upgrade. | -## The gossip domain: gossipsub +### The gossip domain: gossipsub Some gossip meshes are upgraded in Bellatrix to support upgraded types. -### Topics and messages +#### Topics and messages Topics follow the same specification as in prior upgrades. All topics remain stable except the beacon block topic which is updated with the modified type. @@ -76,11 +76,11 @@ The new topics along with the type of the `data` field of a gossipsub message ar Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics. -#### Global topics +##### Global topics Bellatrix changes the type of the global beacon block topic. -##### `beacon_block` +###### `beacon_block` The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Bellatrix. Specifically, this type changes with the addition of `execution_payload` to the inner `BeaconBlockBody`. @@ -107,12 +107,12 @@ Alias `block = signed_beacon_block.message`, `execution_payload = block.body.exe The following gossip validation from prior specifications MUST NOT be applied if the execution is enabled for the block -- i.e. `is_execution_enabled(state, block.body)`: - [REJECT] The block's parent (defined by `block.parent_root`) passes validation. -### Transitioning the gossip +#### Transitioning the gossip See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for details on how to handle transitioning gossip topics. -## The Req/Resp domain +### The Req/Resp domain Non-faulty, [optimistic](/sync/optimistic.md) nodes may send blocks which result in an INVALID response from an execution engine. To prevent network @@ -122,9 +122,9 @@ down-scored or disconnected. Transmission of a block which is invalid due to any consensus layer rules (i.e., *not* execution layer rules) MAY result in down-scoring or disconnection. -### Messages +#### Messages -#### BeaconBlocksByRange v2 +##### BeaconBlocksByRange v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` @@ -146,7 +146,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `ALTAIR_FORK_VERSION` | `altair.SignedBeaconBlock` | | `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` | -#### BeaconBlocksByRoot v2 +##### BeaconBlocksByRoot v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` @@ -165,9 +165,9 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: # Design decision rationale -## Gossipsub +### Gossipsub -### Why was the max gossip message size increased at Bellatrix? +#### Why was the max gossip message size increased at Bellatrix? With the addition of `ExecutionPayload` to `BeaconBlock`s, there is a dynamic field -- `transactions` -- which can validly exceed the `GOSSIP_MAX_SIZE` limit (1 MiB) put in @@ -190,9 +190,9 @@ order of 128 KiB in the worst case and the current gas limit (~30M) bounds max b than 2 MiB today, this marginal difference in theoretical bounds will have zero impact on network functionality and security. -## Req/Resp +### Req/Resp -### Why was the max chunk response size increased at Bellatrix? +#### Why was the max chunk response size increased at Bellatrix? Similar to the discussion about the maximum gossip size increase, the `ExecutionPayload` type can cause `BeaconBlock`s to exceed the 1 MiB bounds put @@ -204,7 +204,7 @@ valid block sizes in the range of gas limits expected in the medium term. As with both gossip and req/rsp maximum values, type-specific limits should always by simultaneously respected. -### Why allow invalid payloads on the P2P network? +#### Why allow invalid payloads on the P2P network? The specification allows blocks with invalid execution payloads to propagate across gossip and via RPC calls. The reasoning for this is as follows: diff --git a/specs/capella/p2p-interface.md b/specs/capella/p2p-interface.md index 834fd44d88..a71b6479f1 100644 --- a/specs/capella/p2p-interface.md +++ b/specs/capella/p2p-interface.md @@ -4,7 +4,7 @@ This document contains the networking specification for Capella. The specification of these changes continues in the same format as the network specifications of previous upgrades, and assumes them as pre-requisite. -## Table of contents +### Table of contents @@ -26,13 +26,13 @@ The specification of these changes continues in the same format as the network s -# Modifications in Capella +## Modifications in Capella -## The gossip domain: gossipsub +### The gossip domain: gossipsub A new topic is added to support the gossip of withdrawal credential change messages. And an existing topic is upgraded for updated types in Capella. -### Topics and messages +#### Topics and messages Topics follow the same specification as in prior upgrades. All existing topics remain stable except the beacon block topic which is updated with the modified type. @@ -45,17 +45,17 @@ The new topics along with the type of the `data` field of a gossipsub message ar Note that the `ForkDigestValue` path segment of the topic separates the old and the new `beacon_block` topics. -#### Global topics +##### Global topics Capella changes the type of the global beacon block topic and adds one global topic to propagate withdrawal credential change messages to all potential proposers of beacon blocks. -##### `beacon_block` +###### `beacon_block` The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in Capella. Specifically, this type changes with the addition of `bls_to_execution_changes` to the inner `BeaconBlockBody`. See Capella [state transition document](./beacon-chain.md#beaconblockbody) for further details. -##### `bls_to_execution_change` +###### `bls_to_execution_change` This topic is used to propagate signed bls to execution change messages to be included in future blocks. @@ -67,16 +67,16 @@ The following validations MUST pass before forwarding the `signed_bls_to_executi for the validator with index `signed_bls_to_execution_change.message.validator_index`. - _[REJECT]_ All of the conditions within `process_bls_to_execution_change` pass validation. -### Transitioning the gossip +#### Transitioning the gossip See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for details on how to handle transitioning gossip topics for Capella. -## The Req/Resp domain +### The Req/Resp domain -### Messages +#### Messages -#### BeaconBlocksByRange v2 +##### BeaconBlocksByRange v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` @@ -93,7 +93,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: | `BELLATRIX_FORK_VERSION` | `bellatrix.SignedBeaconBlock` | | `CAPELLA_FORK_VERSION` | `capella.SignedBeaconBlock` | -#### BeaconBlocksByRoot v2 +##### BeaconBlocksByRoot v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` diff --git a/specs/deneb/p2p-interface.md b/specs/deneb/p2p-interface.md index 0b6381e20f..3c6f3c88a5 100644 --- a/specs/deneb/p2p-interface.md +++ b/specs/deneb/p2p-interface.md @@ -10,32 +10,35 @@ The specification of these changes continues in the same format as the network s -- [Configuration](#configuration) -- [Containers](#containers) - - [`BlobSidecar`](#blobsidecar) - - [`SignedBlobSidecar`](#signedblobsidecar) - - [`BlobIdentifier`](#blobidentifier) - - [Helpers](#helpers) - - [`verify_blob_sidecar_signature`](#verify_blob_sidecar_signature) -- [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) - - [Topics and messages](#topics-and-messages) - - [Global topics](#global-topics) - - [`beacon_block`](#beacon_block) - - [`blob_sidecar_{index}`](#blob_sidecar_index) - - [Transitioning the gossip](#transitioning-the-gossip) -- [The Req/Resp domain](#the-reqresp-domain) - - [Messages](#messages) - - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) - - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) - - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) - - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) +- [Modifications in Deneb](#modifications-in-deneb) + - [Configuration](#configuration) + - [Containers](#containers) + - [`BlobSidecar`](#blobsidecar) + - [`SignedBlobSidecar`](#signedblobsidecar) + - [`BlobIdentifier`](#blobidentifier) + - [Helpers](#helpers) + - [`verify_blob_sidecar_signature`](#verify_blob_sidecar_signature) + - [The gossip domain: gossipsub](#the-gossip-domain-gossipsub) + - [Topics and messages](#topics-and-messages) + - [Global topics](#global-topics) + - [`beacon_block`](#beacon_block) + - [`blob_sidecar_{index}`](#blob_sidecar_index) + - [Transitioning the gossip](#transitioning-the-gossip) + - [The Req/Resp domain](#the-reqresp-domain) + - [Messages](#messages) + - [BeaconBlocksByRange v2](#beaconblocksbyrange-v2) + - [BeaconBlocksByRoot v2](#beaconblocksbyroot-v2) + - [BlobSidecarsByRoot v1](#blobsidecarsbyroot-v1) + - [BlobSidecarsByRange v1](#blobsidecarsbyrange-v1) - [Design decision rationale](#design-decision-rationale) - [Why are blobs relayed as a sidecar, separate from beacon blocks?](#why-are-blobs-relayed-as-a-sidecar-separate-from-beacon-blocks) -## Configuration +## Modifications in Deneb + +### Configuration | Name | Value | Description | |------------------------------------------|-----------------------------------|---------------------------------------------------------------------| @@ -43,9 +46,9 @@ The specification of these changes continues in the same format as the network s | `MAX_REQUEST_BLOB_SIDECARS` | `MAX_REQUEST_BLOCKS_DENEB * MAX_BLOBS_PER_BLOCK` | Maximum number of blob sidecars in a single request | | `MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS` | `2**12` (= 4096 epochs, ~18 days) | The minimum epoch range over which a node must serve blob sidecars | -## Containers +### Containers -### `BlobSidecar` +#### `BlobSidecar` ```python class BlobSidecar(Container): @@ -59,7 +62,7 @@ class BlobSidecar(Container): kzg_proof: KZGProof # Allows for quick verification of kzg_commitment ``` -### `SignedBlobSidecar` +#### `SignedBlobSidecar` ```python class SignedBlobSidecar(Container): @@ -67,7 +70,7 @@ class SignedBlobSidecar(Container): signature: BLSSignature ``` -### `BlobIdentifier` +#### `BlobIdentifier` ```python class BlobIdentifier(Container): @@ -75,9 +78,9 @@ class BlobIdentifier(Container): index: BlobIndex ``` -### Helpers +#### Helpers -#### `verify_blob_sidecar_signature` +##### `verify_blob_sidecar_signature` ```python def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: SignedBlobSidecar) -> bool: @@ -86,11 +89,11 @@ def verify_blob_sidecar_signature(state: BeaconState, signed_blob_sidecar: Signe return bls.Verify(proposer.pubkey, signing_root, signed_blob_sidecar.signature) ``` -## The gossip domain: gossipsub +### The gossip domain: gossipsub Some gossip meshes are upgraded in the fork of Deneb to support upgraded types. -### Topics and messages +#### Topics and messages Topics follow the same specification as in prior upgrades. @@ -106,15 +109,15 @@ The new topics along with the type of the `data` field of a gossipsub message ar | - | - | | `blob_sidecar_{index}` | `SignedBlobSidecar` (new) | -#### Global topics +##### Global topics Deneb introduces new global topics for blob sidecars. -##### `beacon_block` +###### `beacon_block` The *type* of the payload of this topic changes to the (modified) `SignedBeaconBlock` found in deneb. -##### `blob_sidecar_{index}` +###### `blob_sidecar_{index}` This topic is used to propagate signed blob sidecars, one for each sidecar index. The number of indices is defined by `MAX_BLOBS_PER_BLOCK`. @@ -132,16 +135,16 @@ The following validations MUST pass before forwarding the `signed_blob_sidecar` If the `proposer_index` cannot immediately be verified against the expected shuffling, the sidecar MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. -### Transitioning the gossip +#### Transitioning the gossip See gossip transition details found in the [Altair document](../altair/p2p-interface.md#transitioning-the-gossip) for details on how to handle transitioning gossip topics for this upgrade. -## The Req/Resp domain +### The Req/Resp domain -### Messages +#### Messages -#### BeaconBlocksByRange v2 +##### BeaconBlocksByRange v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/2/` @@ -161,7 +164,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time. -#### BeaconBlocksByRoot v2 +##### BeaconBlocksByRoot v2 **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/2/` @@ -179,7 +182,7 @@ Per `context = compute_fork_digest(fork_version, genesis_validators_root)`: No more than `MAX_REQUEST_BLOCKS_DENEB` may be requested at a time. -#### BlobSidecarsByRoot v1 +##### BlobSidecarsByRoot v1 **Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_root/1/` @@ -228,7 +231,7 @@ Clients MUST support requesting sidecars since `minimum_request_epoch`, where `m Clients MUST respond with at least one sidecar, if they have it. Clients MAY limit the number of blocks and sidecars in the response. -#### BlobSidecarsByRange v1 +##### BlobSidecarsByRange v1 **Protocol ID:** `/eth2/beacon_chain/req/blob_sidecars_by_range/1/` diff --git a/specs/phase0/p2p-interface.md b/specs/phase0/p2p-interface.md index f527529316..f38601ed86 100644 --- a/specs/phase0/p2p-interface.md +++ b/specs/phase0/p2p-interface.md @@ -111,11 +111,11 @@ It consists of four main sections: -# Network fundamentals +## Network fundamentals This section outlines the specification for the networking stack in Ethereum consensus-layer clients. -## Transport +### Transport Even though libp2p is a multi-transport stack (designed to listen on multiple simultaneous transports and endpoints transparently), we hereby define a profile for basic interoperability. @@ -133,14 +133,14 @@ All listening endpoints must be publicly dialable, and thus not rely on libp2p c Nodes operating behind a NAT, or otherwise undialable by default (e.g. container runtime, firewall, etc.), MUST have their infrastructure configured to enable inbound traffic on the announced public listening endpoint. -## Encryption and identification +### Encryption and identification The [Libp2p-noise](https://github.com/libp2p/specs/tree/master/noise) secure channel handshake with `secp256k1` identities will be used for encryption. As specified in the libp2p specification, clients MUST support the `XX` handshake pattern. -## Protocol Negotiation +### Protocol Negotiation Clients MUST use exact equality when negotiating protocol versions to use and MAY use the version to give priority to higher version numbers. @@ -148,7 +148,7 @@ Clients MUST support [multistream-select 1.0](https://github.com/multiformats/mu and MAY support [multiselect 2.0](https://github.com/libp2p/specs/pull/95) when the spec solidifies. Once all clients have implementations for multiselect 2.0, multistream-select 1.0 MAY be phased out. -## Multiplexing +### Multiplexing During connection bootstrapping, libp2p dynamically negotiates a mutually supported multiplexing method to conduct parallel conversations. This applies to transports that are natively incapable of multiplexing (e.g. TCP, WebSockets, WebRTC), @@ -163,9 +163,9 @@ and MAY support [yamux](https://github.com/hashicorp/yamux/blob/master/spec.md). If both are supported by the client, yamux MUST take precedence during negotiation. See the [Rationale](#design-decision-rationale) section below for tradeoffs. -# Consensus-layer network interaction domains +## Consensus-layer network interaction domains -## Configuration +### Configuration This section outlines constants that are used in this spec. @@ -182,7 +182,7 @@ This section outlines constants that are used in this spec. | `MESSAGE_DOMAIN_INVALID_SNAPPY` | `0x00000000` | 4-byte domain for gossip message-id isolation of *invalid* snappy messages | | `MESSAGE_DOMAIN_VALID_SNAPPY` | `0x01000000` | 4-byte domain for gossip message-id isolation of *valid* snappy messages | -## MetaData +### MetaData Clients MUST locally store the following `MetaData`: @@ -203,7 +203,7 @@ Where is entirely independent of the ENR sequence number, and will in most cases be out of sync with the ENR sequence number. -## The gossip domain: gossipsub +### The gossip domain: gossipsub Clients MUST support the [gossipsub v1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.0.md) libp2p Protocol including the [gossipsub v1.1](https://github.com/libp2p/specs/blob/master/pubsub/gossipsub/gossipsub-v1.1.md) extension. @@ -229,7 +229,7 @@ The following gossipsub [parameters](https://github.com/libp2p/specs/blob/master for peer scoring and other attack mitigations. These are currently under investigation and will be spec'd and released to mainnet when they are ready. -### Topics and messages +#### Topics and messages Topics are plain UTF-8 strings and are encoded on the wire as determined by protobuf (gossipsub messages are enveloped in protobuf messages). Topic strings have form: `/eth2/ForkDigestValue/Name/Encoding`. @@ -289,7 +289,7 @@ We utilize `ACCEPT`, `REJECT`, and `IGNORE`. For each gossipsub topic, there are If all validations pass, return `ACCEPT`. If one or more validations fail while processing the items in order, return either `REJECT` or `IGNORE` as specified in the prefix of the particular condition. -#### Global topics +##### Global topics There are two primary global topics used to propagate beacon blocks (`beacon_block`) and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the network. @@ -297,7 +297,7 @@ and aggregate attestations (`beacon_aggregate_and_proof`) to all nodes on the ne There are three additional global topics that are used to propagate lower frequency validator messages (`voluntary_exit`, `proposer_slashing`, and `attester_slashing`). -##### `beacon_block` +###### `beacon_block` The `beacon_block` topic is used solely for propagating new signed beacon blocks to all nodes on the networks. Signed blocks are sent in their entirety. @@ -325,7 +325,7 @@ The following validations MUST pass before forwarding the `signed_beacon_block` the block MAY be queued for later processing while proposers for the block's branch are calculated -- in such a case _do not_ `REJECT`, instead `IGNORE` this message. -##### `beacon_aggregate_and_proof` +###### `beacon_aggregate_and_proof` The `beacon_aggregate_and_proof` topic is used to propagate aggregated attestations (as `SignedAggregateAndProof`s) to subscribing nodes (typically validators) to be included in future blocks. @@ -360,7 +360,7 @@ The following validations MUST pass before forwarding the `signed_aggregate_and_ == store.finalized_checkpoint.root` -##### `voluntary_exit` +###### `voluntary_exit` The `voluntary_exit` topic is used solely for propagating signed voluntary validator exits to proposers on the network. Signed voluntary exits are sent in their entirety. @@ -370,7 +370,7 @@ The following validations MUST pass before forwarding the `signed_voluntary_exit for the validator with index `signed_voluntary_exit.message.validator_index`. - _[REJECT]_ All of the conditions within `process_voluntary_exit` pass validation. -##### `proposer_slashing` +###### `proposer_slashing` The `proposer_slashing` topic is used solely for propagating proposer slashings to proposers on the network. Proposer slashings are sent in their entirety. @@ -380,7 +380,7 @@ The following validations MUST pass before forwarding the `proposer_slashing` on for the proposer with index `proposer_slashing.signed_header_1.message.proposer_index`. - _[REJECT]_ All of the conditions within `process_proposer_slashing` pass validation. -##### `attester_slashing` +###### `attester_slashing` The `attester_slashing` topic is used solely for propagating attester slashings to proposers on the network. Attester slashings are sent in their entirety. @@ -392,11 +392,11 @@ Clients who receive an attester slashing on this topic MUST validate the conditi verify if `any(attester_slashed_indices.difference(prior_seen_attester_slashed_indices))`). - _[REJECT]_ All of the conditions within `process_attester_slashing` pass validation. -#### Attestation subnets +##### Attestation subnets Attestation subnets are used to propagate unaggregated attestations to subsections of the network. -##### `beacon_attestation_{subnet_id}` +###### `beacon_attestation_{subnet_id}` The `beacon_attestation_{subnet_id}` topics are used to propagate unaggregated attestations to the subnet `subnet_id` (typically beacon and persistent committees) to be aggregated before being gossiped to `beacon_aggregate_and_proof`. @@ -432,7 +432,7 @@ The following validations MUST pass before forwarding the `attestation` on the s -#### Attestations and Aggregation +##### Attestations and Aggregation Attestation broadcasting is grouped into subnets defined by a topic. The number of subnets is defined via `ATTESTATION_SUBNET_COUNT`. @@ -445,7 +445,7 @@ Unaggregated attestations are sent as `Attestation`s to the subnet topic, Aggregated attestations are sent to the `beacon_aggregate_and_proof` topic as `AggregateAndProof`s. -### Encodings +#### Encodings Topics are post-fixed with an encoding. Encodings define how the payload of a gossipsub message is encoded. @@ -461,9 +461,9 @@ so [basic snappy block compression](https://github.com/google/snappy/blob/master Implementations MUST use a single encoding for gossip. Changing an encoding will require coordination between participating implementations. -## The Req/Resp domain +### The Req/Resp domain -### Protocol identification +#### Protocol identification Each message type is segregated into its own libp2p protocol ID, which is a case-sensitive UTF-8 string of the form: @@ -485,7 +485,7 @@ With: This protocol segregation allows libp2p `multistream-select 1.0` / `multiselect 2.0` to handle the request type, version, and encoding negotiation before establishing the underlying streams. -### Req/Resp interaction +#### Req/Resp interaction We use ONE stream PER request/response interaction. Streams are closed when the interaction finishes, whether in success or in error. @@ -515,7 +515,7 @@ Regardless of these type specific bounds, a global maximum uncompressed byte siz Clients MUST ensure that lengths are within these bounds; if not, they SHOULD reset the stream immediately. Clients tracking peer reputation MAY decrement the score of the misbehaving peer under this circumstance. -#### Requesting side +##### Requesting side Once a new stream with the protocol ID for the request type has been negotiated, the full request message SHOULD be sent immediately. The request MUST be encoded according to the encoding strategy. @@ -537,7 +537,7 @@ A requester SHOULD read from the stream until either: For requests consisting of a single valid `response_chunk`, the requester SHOULD read the chunk fully, as defined by the `encoding-dependent-header`, before closing the stream. -#### Responding side +##### Responding side Once a new stream with the protocol ID for the request type has been negotiated, the responder SHOULD process the incoming request and MUST validate it before processing it. @@ -588,7 +588,7 @@ The `ErrorMessage` schema is: *Note*: By convention, the `error_message` is a sequence of bytes that MAY be interpreted as a UTF-8 string (for debugging purposes). Clients MUST treat as valid any byte sequences. -### Encoding strategies +#### Encoding strategies The token of the negotiated protocol ID specifies the type of encoding to be used for the req/resp interaction. Only one value is possible at this time: @@ -599,7 +599,7 @@ Only one value is possible at this time: For example, the `BeaconBlocksByRoot` request is an SSZ-encoded list of `Root`'s. This encoding type MUST be supported by all clients. -#### SSZ-snappy encoding strategy +##### SSZ-snappy encoding strategy The [SimpleSerialize (SSZ) specification](../../ssz/simple-serialize.md) outlines how objects are SSZ-encoded. @@ -646,9 +646,9 @@ constituents individually as `response_chunk`s. For example, the `List[SignedBeaconBlock, ...]` response type sends zero or more `response_chunk`s. Each _successful_ `response_chunk` contains a single `SignedBeaconBlock` payload. -### Messages +#### Messages -#### Status +##### Status **Protocol ID:** ``/eth2/beacon_chain/req/status/1/`` @@ -694,7 +694,7 @@ SHOULD request beacon blocks from its counterparty via the `BeaconBlocksByRange` the client might need to send `Status` request again to learn if the peer has a higher head. Implementers are free to implement such behavior in their own way. -#### Goodbye +##### Goodbye **Protocol ID:** ``/eth2/beacon_chain/req/goodbye/1/`` @@ -718,7 +718,7 @@ The request/response MUST be encoded as a single SSZ-field. The response MUST consist of a single `response_chunk`. -#### BeaconBlocksByRange +##### BeaconBlocksByRange **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_range/1/` @@ -795,7 +795,7 @@ In particular when `step == 1`, each `parent_root` MUST match the `hash_tree_roo After the initial block, clients MAY stop in the process of responding if their fork choice changes the view of the chain in the context of the request. -#### BeaconBlocksByRoot +##### BeaconBlocksByRoot **Protocol ID:** `/eth2/beacon_chain/req/beacon_blocks_by_root/1/` @@ -835,7 +835,7 @@ Clients MAY limit the number of blocks in the response. `/eth2/beacon_chain/req/beacon_blocks_by_root/1/` is deprecated. Clients MAY respond with an empty list during the deprecation transition period. -#### Ping +##### Ping **Protocol ID:** `/eth2/beacon_chain/req/ping/1/` @@ -867,7 +867,7 @@ The request MUST be encoded as an SSZ-field. The response MUST consist of a single `response_chunk`. -#### GetMetaData +##### GetMetaData **Protocol ID:** `/eth2/beacon_chain/req/metadata/1/` @@ -890,14 +890,14 @@ The response MUST be encoded as an SSZ-container. The response MUST consist of a single `response_chunk`. -## The discovery domain: discv5 +### The discovery domain: discv5 Discovery Version 5 ([discv5](https://github.com/ethereum/devp2p/blob/master/discv5/discv5.md)) (Protocol version v5.1) is used for peer discovery. `discv5` is a standalone protocol, running on UDP on a dedicated port, meant for peer discovery only. `discv5` supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are (or will be) requirements in this context. -### Integration into libp2p stacks +#### Integration into libp2p stacks `discv5` SHOULD be integrated into the client’s libp2p stack by implementing an adaptor to make it conform to the [service discovery](https://github.com/libp2p/go-libp2p-core/blob/master/discovery/discovery.go) @@ -908,7 +908,7 @@ and the outputs will be multiaddrs converted from the ENR records returned by th This integration enables the libp2p stack to subsequently form connections and streams with discovered peers. -### ENR structure +#### ENR structure The Ethereum Node Record (ENR) for an Ethereum consensus client MUST contain the following entries (exclusive of the sequence number and signature, which MUST be present in an ENR): @@ -923,7 +923,7 @@ The ENR MAY contain the following entries: Specifications of these parameters can be found in the [ENR Specification](http://eips.ethereum.org/EIPS/eip-778). -#### Attestation subnet bitfield +##### Attestation subnet bitfield The ENR `attnets` entry signifies the attestation subnet bitfield with the following form to more easily discover peers participating in particular attestation gossip subnets. @@ -936,7 +936,7 @@ If a node's `MetaData.attnets` has any non-zero bit, the ENR MUST include the `a If a node's `MetaData.attnets` is composed of all zeros, the ENR MAY optionally include the `attnets` entry or leave it out entirely. -#### `eth2` field +##### `eth2` field ENRs MUST carry a generic `eth2` key with an 16-byte value of the node's current fork digest, next fork version, and next fork epoch to ensure connections are made with peers on the intended Ethereum network. @@ -979,11 +979,11 @@ Clients MAY connect to peers with the same `fork_digest` but a different `next_f Unless `ENRForkID` is manually updated to matching prior to the earlier `next_fork_epoch` of the two clients, these connecting clients will be unable to successfully interact starting at the earlier `next_fork_epoch`. -# Design decision rationale +## Design decision rationale -## Transport +### Transport -### Why are we defining specific transports? +#### Why are we defining specific transports? libp2p peers can listen on multiple transports concurrently, and these can change over time. Multiaddrs encode not only the address but also the transport to be used to dial. @@ -992,7 +992,7 @@ Due to this dynamic nature, agreeing on specific transports like TCP, QUIC, or W However, it is useful to define a minimum baseline for interoperability purposes. -### Can clients support other transports/handshakes than the ones mandated by the spec? +#### Can clients support other transports/handshakes than the ones mandated by the spec? Clients may support other transports such as libp2p QUIC, WebSockets, and WebRTC transports, if available in the language of choice. While interoperability shall not be harmed by lack of such support, the advantages are desirable: @@ -1007,7 +1007,7 @@ and the accompanying [QUIC-TLS document](https://tools.ietf.org/html/draft-ietf- The usage of one handshake procedure or the other shall be transparent to the application layer, once the libp2p Host/Node object has been configured appropriately. -### What are the advantages of using TCP/QUIC/Websockets? +#### What are the advantages of using TCP/QUIC/Websockets? TCP is a reliable, ordered, full-duplex, congestion-controlled network protocol that powers much of the Internet as we know it today. HTTP/1.1 and HTTP/2 run atop TCP. @@ -1027,7 +1027,7 @@ and we may only become subject to standard IP-based firewall filtering—somethi WebSockets and/or WebRTC transports are necessary for interaction with browsers, and will become increasingly important as we incorporate browser-based light clients to the Ethereum network. -### Why do we not just support a single transport? +#### Why do we not just support a single transport? Networks evolve. Hardcoding design decisions leads to ossification, preventing the evolution of networks alongside the state of the art. @@ -1039,7 +1039,7 @@ Clients can adopt new transports without breaking old ones, and the multi-transp (e.g. browsers, embedded devices) to interact with the network as first-class citizens via suitable/native transports (e.g. WSS), without the need for proxying or trust delegation to servers. -### Why are we not using QUIC from the start? +#### Why are we not using QUIC from the start? The QUIC standard is still not finalized (at working draft 22 at the time of writing), and not all mainstream runtimes/languages have mature, standard, and/or fully-interoperable [QUIC support](https://github.com/quicwg/base-drafts/wiki/Implementations). @@ -1052,9 +1052,9 @@ On the other hand, TLS 1.3 is the newest, simplified iteration of TLS. Old, insecure, obsolete ciphers and algorithms have been removed, adopting Ed25519 as the sole ECDH key agreement function. Handshakes are faster, 1-RTT data is supported, and session resumption is a reality, amongst other features. -## Multiplexing +### Multiplexing -### Why are we using mplex/yamux? +#### Why are we using mplex/yamux? [Yamux](https://github.com/hashicorp/yamux/blob/master/spec.md) is a multiplexer invented by Hashicorp that supports stream-level congestion control. Implementations exist in a limited set of languages, and it’s not a trivial piece to develop. @@ -1066,9 +1066,9 @@ It does not support stream-level congestion control and is subject to head-of-li Overlay multiplexers are not necessary with QUIC since the protocol provides native multiplexing, but they need to be layered atop TCP, WebSockets, and other transports that lack such support. -## Protocol Negotiation +### Protocol Negotiation -### When is multiselect 2.0 due and why do we plan to migrate to it? +#### When is multiselect 2.0 due and why do we plan to migrate to it? multiselect 2.0 is currently being conceptualized. The debate started [on this issue](https://github.com/libp2p/specs/pull/95), @@ -1084,7 +1084,7 @@ We plan to eventually migrate to multiselect 2.0 because it will: 3. Leverage *push data* mechanisms of underlying protocols to expedite negotiation. 4. Provide the building blocks for enhanced censorship resistance. -### What is the difference between connection-level and stream-level protocol negotiation? +#### What is the difference between connection-level and stream-level protocol negotiation? All libp2p connections must be authenticated, encrypted, and multiplexed. Connections using network transports unsupportive of native authentication/encryption and multiplexing (e.g. TCP) need to undergo protocol negotiation to agree on a mutually supported: @@ -1101,9 +1101,9 @@ When opening streams, peers pin a protocol to that stream, by conducting *stream At present, multistream-select 1.0 is used for both types of negotiation, but multiselect 2.0 will use dedicated mechanisms for connection bootstrapping process and stream protocol negotiation. -## Encryption +### Encryption -### Why are we not supporting SecIO? +#### Why are we not supporting SecIO? SecIO has been the default encryption layer for libp2p for years. It is used in IPFS and Filecoin. And although it will be superseded shortly, it is proven to work at scale. @@ -1114,7 +1114,7 @@ a mechanism that multiselect 2.0 will leverage to reduce round trips during conn SecIO is not considered secure for the purposes of this spec. -### Why are we using Noise? +#### Why are we using Noise? Copied from the Noise Protocol Framework [website](http://www.noiseprotocol.org): @@ -1129,7 +1129,7 @@ and are used in major cryptographic-centric projects like WireGuard, I2P, and Li [Various](https://www.wireguard.com/papers/kobeissi-bhargavan-noise-explorer-2018.pdf) [studies](https://eprint.iacr.org/2019/436.pdf) have assessed the stated security goals of several Noise handshakes with positive results. -### Why are we using encryption at all? +#### Why are we using encryption at all? Transport level encryption secures message exchange and provides properties that are useful for privacy, safety, and censorship resistance. These properties are derived from the following security guarantees that apply to the entire communication between two peers: @@ -1146,9 +1146,9 @@ Note that transport-level encryption is not exclusive of application-level encry Transport-level encryption secures the communication itself, while application-level cryptography is necessary for the application’s use cases (e.g. signatures, randomness, etc.). -## Gossipsub +### Gossipsub -### Why are we using a pub/sub algorithm for block and attestation propagation? +#### Why are we using a pub/sub algorithm for block and attestation propagation? Pubsub is a technique to broadcast/disseminate data across a network rapidly. Such data is packaged in fire-and-forget messages that do not require a response from every recipient. @@ -1156,18 +1156,18 @@ Peers subscribed to a topic participate in the propagation of messages in that t The alternative is to maintain a fully connected mesh (all peers connected to each other 1:1), which scales poorly (O(n^2)). -### Why are we using topics to segregate encodings, yet only support one encoding? +#### Why are we using topics to segregate encodings, yet only support one encoding? For future extensibility with almost zero overhead now (besides the extra bytes in the topic name). -### How do we upgrade gossip channels (e.g. changes in encoding, compression)? +#### How do we upgrade gossip channels (e.g. changes in encoding, compression)? Changing gossipsub/broadcasts requires a coordinated upgrade where all clients start publishing to the new topic together, during a hard fork. When a node is preparing for upcoming tasks (e.g. validator duty lookahead) on a gossipsub topic, the node should join the topic of the future epoch in which the task is to occur in addition to listening to the topics for the current epoch. -### Why must all clients use the same gossip topic instead of one negotiated between each peer pair? +#### Why must all clients use the same gossip topic instead of one negotiated between each peer pair? Supporting multiple topics/encodings would require the presence of relayers to translate between encodings and topics so as to avoid network fragmentation where participants have diverging views on the gossiped state, @@ -1182,7 +1182,7 @@ but the price here is pretty high in terms of overhead -- both computational and It is permitted for clients to publish data on alternative topics as long as they also publish on the network-wide mandatory topic. -### Why are the topics strings and not hashes? +#### Why are the topics strings and not hashes? Topic names have a hierarchical structure. In the future, gossipsub may support wildcard subscriptions @@ -1195,14 +1195,14 @@ since the domain is finite anyway, and calculating a digest's preimage would be Furthermore, the topic names are shorter than their digest equivalents (assuming SHA-256 hash), so hashing topics would bloat messages unnecessarily. -### Why are we using the `StrictNoSign` signature policy? +#### Why are we using the `StrictNoSign` signature policy? The policy omits the `from` (1), `seqno` (3), `signature` (5) and `key` (6) fields. These fields would: - Expose origin of sender (`from`), type of sender (based on `seqno`) - Add extra unused data to the gossip, since message IDs are based on `data`, not on the `from` and `seqno`. - Introduce more message validation than necessary, e.g. no `signature`. -### Why are we overriding the default libp2p pubsub `message-id`? +#### Why are we overriding the default libp2p pubsub `message-id`? For our current purposes, there is no need to address messages based on source peer, or track a message `seqno`. By overriding the default `message-id` to use content-addressing we can filter unnecessary duplicates before hitting the application layer. @@ -1214,7 +1214,7 @@ Some examples of where messages could be duplicated: Partial aggregates could be duplicated * Clients re-publishing seen messages -### Why are these specific gossip parameters chosen? +#### Why are these specific gossip parameters chosen? - `D`, `D_low`, `D_high`, `D_lazy`: recommended defaults. - `heartbeat_interval`: 0.7 seconds, recommended for the beacon chain in the [GossipSub evaluation report by Protocol Labs](https://gateway.ipfs.io/ipfs/QmRAFP5DBnvNjdYSbWhEhVRJJDFCLpPyvew5GwCCB4VxM4). @@ -1233,7 +1233,7 @@ Some examples of where messages could be duplicated: Attestation gossip validity is bounded by an epoch, so this is the safe max bound. -### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets? +#### Why is there `MAXIMUM_GOSSIP_CLOCK_DISPARITY` when validating slot ranges of messages in gossip subnets? For some gossip channels (e.g. those for Attestations and BeaconBlocks), there are designated ranges of slots during which particular messages can be sent, @@ -1247,14 +1247,14 @@ For minimum and maximum allowable slot broadcast times, Although messages can at times be eagerly gossiped to the network, the node's fork choice prevents integration of these messages into the actual consensus until the _actual local start_ of the designated slot. -### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets? +#### Why are there `ATTESTATION_SUBNET_COUNT` attestation subnets? Depending on the number of validators, it may be more efficient to group shard subnets and might provide better stability for the gossipsub channel. The exact grouping will be dependent on more involved network tests. This constant allows for more flexibility in setting up the network topology for attestation aggregation (as aggregation should happen on each subnet). The value is currently set to be equal to `MAX_COMMITTEES_PER_SLOT` if/until network tests indicate otherwise. -### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots? +#### Why are attestations limited to be broadcast on gossip channels within `SLOTS_PER_EPOCH` slots? Attestations can only be included on chain within an epoch's worth of slots so this is the natural cutoff. There is no utility to the chain to broadcast attestations older than one epoch, @@ -1265,7 +1265,7 @@ In addition to this, relaying attestations requires validating the attestation i Thus, validating arbitrarily old attestations would put additional requirements on which states need to be readily available to the node. This would result in a higher resource burden and could serve as a DoS vector. -### Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s? +#### Why are aggregate attestations broadcast to the global topic as `AggregateAndProof`s rather than just as `Attestation`s? The dominant strategy for an individual validator is to always broadcast an aggregate containing their own attestation to the global channel to ensure that proposers see their attestation for inclusion. @@ -1275,19 +1275,19 @@ the gossiped aggregate ensures that this dominant strategy will not flood the gl Also, an attacker can create any number of honest-looking aggregates and broadcast them to the global pubsub channel. Thus without some sort of proof of selection as an aggregator, the global channel can trivially be spammed. -### Why are we sending entire objects in the pubsub and not just hashes? +#### Why are we sending entire objects in the pubsub and not just hashes? Entire objects should be sent to get the greatest propagation speeds. If only hashes are sent, then block and attestation propagation is dependent on recursive requests from each peer. In a hash-only scenario, peers could receive hashes without knowing who to download the actual contents from. Sending entire objects ensures that they get propagated through the entire network. -### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc? +#### Should clients gossip blocks if they *cannot* validate the proposer signature due to not yet being synced, not knowing the head block, etc? The prohibition of unverified-block-gossiping extends to nodes that cannot verify a signature due to not being fully synced to ensure that such (amplified) DOS attacks are not possible. -### How are we going to discover peers in a gossipsub topic? +#### How are we going to discover peers in a gossipsub topic? In Phase 0, peers for attestation subnets will be found using the `attnets` entry in the ENR. @@ -1295,7 +1295,7 @@ Although this method will be sufficient for early upgrade of the beacon chain, w ENRs should ultimately not be used for this purpose. They are best suited to store identity, location, and capability information, rather than more volatile advertisements. -### How should fork version be used in practice? +#### How should fork version be used in practice? Fork versions are to be manually updated (likely via incrementing) at each hard fork. This is to provide native domain separation for signatures as well as to aid in usefulness for identitying peers (via ENRs) @@ -1308,9 +1308,9 @@ In these cases, extra care should be taken to isolate fork versions (e.g. flip a A node locally stores all previous and future planned fork versions along with the each fork epoch. This allows for handling sync and processing messages starting from past forks/epochs. -## Req/Resp +### Req/Resp -### Why segregate requests into dedicated protocol IDs? +#### Why segregate requests into dedicated protocol IDs? Requests are segregated by protocol ID to: @@ -1343,7 +1343,7 @@ Multiselect 2.0 will eventually remove this overhead by memoizing previously sel Fortunately, this req/resp protocol is not the expected network bottleneck in the protocol so the additional overhead is not expected to significantly hinder this domain. -### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding? +#### Why are messages length-prefixed with a protobuf varint in the SSZ-encoding? We are using single-use streams where each stream is closed at the end of the message. Thus, libp2p transparently handles message delimiting in the underlying stream. @@ -1361,7 +1361,7 @@ Nevertheless, in the case of `ssz_snappy`, messages are still length-prefixed wi [Protobuf varint](https://developers.google.com/protocol-buffers/docs/encoding#varints) is an efficient technique to encode variable-length (unsigned here) ints. Instead of reserving a fixed-size field of as many bytes as necessary to convey the maximum possible value, this field is elastic in exchange for 1-bit overhead per byte. -### Why do we version protocol strings with ordinals instead of semver? +#### Why do we version protocol strings with ordinals instead of semver? Using semver for network protocols is confusing. It is never clear what a change in a field, even if backwards compatible on deserialization, actually implies. @@ -1382,11 +1382,11 @@ because it's unclear if "backwards compatibility" and "breaking change" apply on For this reason, we remove and replace semver with ordinals that require explicit agreement and do not mandate a specific policy for changes. -### Why is it called Req/Resp and not RPC? +#### Why is it called Req/Resp and not RPC? Req/Resp is used to avoid confusion with JSON-RPC and similar user-client interaction mechanisms. -### Why do we allow empty responses in block requests? +#### Why do we allow empty responses in block requests? When requesting blocks by range or root, it may happen that there are no blocks in the selected range or the responding node does not have the requested blocks. @@ -1413,7 +1413,7 @@ Failing to provide blocks that nodes "should" have is reason to trust a peer les -- for example, if a particular peer gossips a block, it should have access to its parent. If a request for the parent fails, it's indicative of poor peer quality since peers should validate blocks before gossiping them. -### Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from? +#### Why does `BeaconBlocksByRange` let the server choose which branch to send blocks from? When connecting, the `Status` message gives an idea about the sync status of a particular peer, but this changes over time. By the time a subsequent `BeaconBlockByRange` request is processed, the information may be stale, @@ -1423,7 +1423,7 @@ To avoid this race condition, we allow the responding side to choose which branc The requesting client then goes on to validate the blocks and incorporate them in their own database -- because they follow the same rules, they should at this point arrive at the same canonical chain. -### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs? +#### Why are `BlocksByRange` requests only required to be served for the latest `MIN_EPOCHS_FOR_BLOCK_REQUESTS` epochs? Due to economic finality and weak subjectivity requirements of a proof-of-stake blockchain, for a new node to safely join the network the node must provide a recent checkpoint found out-of-band. This checkpoint can be in the form of a `root` & `epoch` or it can be the entire @@ -1447,7 +1447,7 @@ MIN_EPOCHS_FOR_BLOCK_REQUESTS = ( Where `MAX_SAFETY_DECAY = 100` and thus `MIN_EPOCHS_FOR_BLOCK_REQUESTS = 33024` (~5 months). -### Why must the proposer signature be checked when backfilling blocks in the database? +#### Why must the proposer signature be checked when backfilling blocks in the database? When backfilling blocks in a database from a know safe block/state (e.g. when starting from a weak subjectivity state), the node not only must ensure the `BeaconBlock`s form a chain to the known safe block, @@ -1462,7 +1462,7 @@ Although in this particular use case this does not represent a decay in safety would represent invalid historic data and could be unwittingly transmitted to additional nodes. -### What's the effect of empty slots on the sync algorithm? +#### What's the effect of empty slots on the sync algorithm? When syncing one can only tell that a slot has been skipped on a particular branch by examining subsequent blocks and analyzing the graph formed by the parent root. @@ -1472,9 +1472,9 @@ For example, if a peer responds with blocks [2, 3] when asked for [2, 3, 4], cli -- it merely means that the responding peer did not send it (they may not have it yet or may maliciously be trying to hide it) and successive blocks will be needed to determine if there exists a block at slot 4 in this particular branch. -## Discovery +### Discovery -### Why are we using discv5 and not libp2p Kademlia DHT? +#### Why are we using discv5 and not libp2p Kademlia DHT? discv5 is a standalone protocol, running on UDP on a dedicated port, meant for peer and service discovery only. discv5 supports self-certified, flexible peer records (ENRs) and topic-based advertisement, both of which are, or will be, requirements in this context. @@ -1490,7 +1490,7 @@ It should also help light clients of both networks find nodes with specific capa discv5 is in the process of being audited. -### What is the difference between an ENR and a multiaddr, and why are we using ENRs? +#### What is the difference between an ENR and a multiaddr, and why are we using ENRs? Ethereum Node Records are self-certified node records. Nodes craft and disseminate ENRs for themselves, proving authorship via a cryptographic signature. @@ -1510,7 +1510,7 @@ discv5 uses ENRs and we will presumably need to: 2. Define a bi-directional conversion function between multiaddrs and the corresponding denormalized fields in an ENR (ip, ip6, tcp, tcp6, etc.), for compatibility with nodes that do not support multiaddr natively (e.g. Ethereum execution-layer nodes). -### Why do we not form ENRs and find peers until genesis block/state is known? +#### Why do we not form ENRs and find peers until genesis block/state is known? Although client software might very well be running locally prior to the solidification of the beacon chain genesis state and block, clients cannot form valid ENRs prior to this point. @@ -1521,9 +1521,9 @@ Once genesis data is known, we can then form ENRs and safely find peers. When using a proof-of-work deposit contract for deposits, `fork_digest` will be known `GENESIS_DELAY` (7 days in mainnet configuration) before `genesis_time`, providing ample time to find peers and form initial connections and gossip subnets prior to genesis. -## Compression/Encoding +### Compression/Encoding -### Why are we using SSZ for encoding? +#### Why are we using SSZ for encoding? SSZ is used at the consensus layer, and all implementations should have support for SSZ-encoding/decoding, requiring no further dependencies to be added to client implementations. @@ -1533,7 +1533,7 @@ The actual data in most protocols will be further compressed for efficiency. SSZ has well-defined schemas for consensus objects (typically sent across the wire) reducing any serialization schema data that needs to be sent. It also has defined all required types that are required for this network specification. -### Why are we compressing, and at which layers? +#### Why are we compressing, and at which layers? We compress on the wire to achieve smaller payloads per-message, which, in aggregate, result in higher efficiency, better utilization of available bandwidth, and overall reduction in network-wide traffic overhead. @@ -1563,13 +1563,13 @@ This looks different depending on the interaction layer: implementers are encouraged to encapsulate the encoding and compression logic behind MessageReader and MessageWriter components/strategies that can be layered on top of the raw byte streams. -### Why are we using Snappy for compression? +#### Why are we using Snappy for compression? Snappy is used in Ethereum 1.0. It is well maintained by Google, has good benchmarks, and can calculate the size of the uncompressed object without inflating it in memory. This prevents DOS vectors where large uncompressed data is sent. -### Can I get access to unencrypted bytes on the wire for debugging purposes? +#### Can I get access to unencrypted bytes on the wire for debugging purposes? Yes, you can add loggers in your libp2p protocol handlers to log incoming and outgoing messages. It is recommended to use programming design patterns to encapsulate the logging logic cleanly. @@ -1580,7 +1580,7 @@ you can use logging facilities in those frameworks/runtimes to enable message tr For specific ad-hoc testing scenarios, you can use the [plaintext/2.0.0 secure channel](https://github.com/libp2p/specs/blob/master/plaintext/README.md) (which is essentially no-op encryption or message authentication), in combination with tcpdump or Wireshark to inspect the wire. -### What are SSZ type size bounds? +#### What are SSZ type size bounds? The SSZ encoding outputs of each type have size bounds: each dynamic type, such as a list, has a "limit", which can be used to compute the maximum valid output size. Note that for some more complex dynamic-length objects, element offsets (4 bytes each) may need to be included. @@ -1589,7 +1589,7 @@ Other types are static, they have a fixed size: no dynamic-length content is inv For reference, the type bounds can be computed ahead of time, [as per this example](https://gist.github.com/protolambda/db75c7faa1e94f2464787a480e5d613e). It is advisable to derive these lengths from the SSZ type definitions in use, to ensure that version changes do not cause out-of-sync type bounds. -# libp2p implementations matrix +## libp2p implementations matrix This section will soon contain a matrix showing the maturity/state of the libp2p features required by this spec across the languages in which clients are being developed. From accf99fba3b05e6a7c505e4f5bb07168e0921cbb Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 20 Apr 2023 08:47:49 +0900 Subject: [PATCH 194/210] rename to eip6914 --- specs/_features/{reuse_indices => eip6914}/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) rename specs/_features/{reuse_indices => eip6914}/beacon-chain.md (93%) diff --git a/specs/_features/reuse_indices/beacon-chain.md b/specs/_features/eip6914/beacon-chain.md similarity index 93% rename from specs/_features/reuse_indices/beacon-chain.md rename to specs/_features/eip6914/beacon-chain.md index 6dd71e36fe..5f75895941 100644 --- a/specs/_features/reuse_indices/beacon-chain.md +++ b/specs/_features/eip6914/beacon-chain.md @@ -1,4 +1,4 @@ -# Reuse indices -- The Beacon Chain +EIP-6914 -- The Beacon Chain ## Table of contents @@ -21,7 +21,7 @@ ## Introduction -This is the beacon chain specification to assign new deposits to existing validator records that have withdrawn long ago. +This is the beacon chain specification to assign new deposits to existing validator records. Refers to [EIP-6914](https://github.com/ethereum/EIPs/pull/6914). *Note:* This specification is built upon [Capella](../../capella/beacon_chain.md) and is under active development. From 498fbd04a23de5a21e4544e203cb7157679c5282 Mon Sep 17 00:00:00 2001 From: dapplion <35266934+dapplion@users.noreply.github.com> Date: Thu, 20 Apr 2023 08:52:25 +0900 Subject: [PATCH 195/210] rename SAFE_EPOCHS_TO_REUSE_INDEX --- specs/_features/eip6914/beacon-chain.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/_features/eip6914/beacon-chain.md b/specs/_features/eip6914/beacon-chain.md index 5f75895941..2c60c9bdb4 100644 --- a/specs/_features/eip6914/beacon-chain.md +++ b/specs/_features/eip6914/beacon-chain.md @@ -31,7 +31,7 @@ This is the beacon chain specification to assign new deposits to existing valida | Name | Value | Unit | Duration | | - | - | - | - | -| `REUSE_VALIDATOR_INDEX_DELAY` | `uint64(2**16)` (= 65,536) | epochs | ~0.8 year | +| `SAFE_EPOCHS_TO_REUSE_INDEX` | `uint64(2**16)` (= 65,536) | epochs | ~0.8 year | ## Helper functions @@ -45,7 +45,7 @@ def is_reusable_validator(validator: Validator, balance: Gwei, epoch: Epoch) -> Check if ``validator`` index can be re-assigned to a new deposit. """ return ( - epoch > validator.withdrawable_epoch + REUSE_VALIDATOR_INDEX_DELAY + epoch > validator.withdrawable_epoch + SAFE_EPOCHS_TO_REUSE_INDEX and balance == 0 ) ``` From b7f3d37a528b8b8e3c2ce250eded3855d0b04552 Mon Sep 17 00:00:00 2001 From: Ben Edgington Date: Fri, 21 Apr 2023 09:16:51 +0100 Subject: [PATCH 196/210] Move is_previous_epoch_justified --- specs/phase0/fork-choice.md | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/specs/phase0/fork-choice.md b/specs/phase0/fork-choice.md index 6e281d5c3d..f39a9234c0 100644 --- a/specs/phase0/fork-choice.md +++ b/specs/phase0/fork-choice.md @@ -11,8 +11,8 @@ - [Configuration](#configuration) - [Helpers](#helpers) - [`LatestMessage`](#latestmessage) - - [`is_previous_epoch_justified`](#is_previous_epoch_justified) - [`Store`](#store) + - [`is_previous_epoch_justified`](#is_previous_epoch_justified) - [`get_forkchoice_store`](#get_forkchoice_store) - [`get_slots_since_genesis`](#get_slots_since_genesis) - [`get_current_slot`](#get_current_slot) @@ -92,17 +92,6 @@ class LatestMessage(object): root: Root ``` - -### `is_previous_epoch_justified` - -```python -def is_previous_epoch_justified(store: Store) -> bool: - current_slot = get_current_slot(store) - current_epoch = compute_epoch_at_slot(current_slot) - return store.justified_checkpoint.epoch + 1 == current_epoch -``` - - #### `Store` The `Store` is responsible for tracking information required for the fork choice algorithm. The important fields being tracked are described below: @@ -130,6 +119,15 @@ class Store(object): unrealized_justifications: Dict[Root, Checkpoint] = field(default_factory=dict) ``` +#### `is_previous_epoch_justified` + +```python +def is_previous_epoch_justified(store: Store) -> bool: + current_slot = get_current_slot(store) + current_epoch = compute_epoch_at_slot(current_slot) + return store.justified_checkpoint.epoch + 1 == current_epoch +``` + #### `get_forkchoice_store` The provided anchor-state will be regarded as a trusted state, to not roll back beyond. From 745d529598632029fee9820590b033b7e0e23935 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 25 Apr 2023 12:57:42 +0800 Subject: [PATCH 197/210] Add `compute_subscribed_subnets` unittests and fix typing errors --- specs/phase0/validator.md | 10 +++-- .../validator/test_validator_unittest.py | 39 ++++++++++++++++++- 2 files changed, 45 insertions(+), 4 deletions(-) diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 56ca50732c..5266fec7ae 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -616,10 +616,14 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th ```python def compute_subscribed_subnet(node_id: int, epoch: Epoch, index: int) -> int: - node_id_prefix = node_id >> (256 - ATTESTATION_SUBNET_PREFIX_BITS) + node_id_prefix = node_id >> (256 - int(ATTESTATION_SUBNET_PREFIX_BITS)) node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION - permutation_seed = hash(uint_to_bytes((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION)) - permutated_prefix = compute_shuffled_index(node_id_prefix, 1 << ATTESTATION_SUBNET_PREFIX_BITS, permutation_seed) + permutation_seed = hash(uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION))) + permutated_prefix = compute_shuffled_index( + node_id_prefix, + 1 << int(ATTESTATION_SUBNET_PREFIX_BITS), + permutation_seed, + ) return (permutated_prefix + index) % ATTESTATION_SUBNET_COUNT ``` diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py index cf7ef392f1..177748eacd 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/validator/test_validator_unittest.py @@ -1,6 +1,12 @@ +import random + from eth2spec.test.context import ( + single_phase, spec_state_test, - always_bls, with_phases, with_all_phases, + spec_test, + always_bls, + with_phases, + with_all_phases, ) from eth2spec.test.helpers.constants import PHASE0 from eth2spec.test.helpers.attestations import build_attestation_data, get_valid_attestation @@ -476,3 +482,34 @@ def test_get_aggregate_and_proof_signature(spec, state): privkey=privkey, pubkey=pubkey, ) + + +def run_compute_subscribed_subnets_arguments(spec, rng=random.Random(1111)): + node_id = rng.randint(0, 2**40 - 1) # try VALIDATOR_REGISTRY_LIMIT + epoch = rng.randint(0, 2**64 - 1) + subnets = spec.compute_subscribed_subnets(node_id, epoch) + assert len(subnets) == spec.SUBNETS_PER_NODE + + +@with_all_phases +@spec_test +@single_phase +def test_compute_subscribed_subnets_random_1(spec): + rng = random.Random(1111) + run_compute_subscribed_subnets_arguments(spec, rng) + + +@with_all_phases +@spec_test +@single_phase +def test_compute_subscribed_subnets_random_2(spec): + rng = random.Random(2222) + run_compute_subscribed_subnets_arguments(spec, rng) + + +@with_all_phases +@spec_test +@single_phase +def test_compute_subscribed_subnets_random_3(spec): + rng = random.Random(3333) + run_compute_subscribed_subnets_arguments(spec, rng) From 655094ee4a7026185f1962fc245dfa763735828a Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 28 Apr 2023 17:28:11 +0800 Subject: [PATCH 198/210] Add `test_zero_blob` test case --- .../eth2spec/test/deneb/sanity/test_blocks.py | 28 +++++++++---------- 1 file changed, 14 insertions(+), 14 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index 5e65dbd4ef..9a6c6a45bc 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -16,13 +16,11 @@ ) -@with_deneb_and_later -@spec_state_test -def test_one_blob(spec, state): +def run_block_with_blobs(spec, state, blob_count): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) - opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) @@ -34,15 +32,17 @@ def test_one_blob(spec, state): @with_deneb_and_later @spec_state_test -def test_max_blobs(spec, state): - yield 'pre', state +def test_zero_blob(spec, state): + yield from run_block_with_blobs(spec, state, blob_count=0) - block = build_empty_block_for_next_slot(spec, state) - opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=spec.MAX_BLOBS_PER_BLOCK) - block.body.blob_kzg_commitments = blob_kzg_commitments - block.body.execution_payload.transactions = [opaque_tx] - block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) - signed_block = state_transition_and_sign_block(spec, state, block) - yield 'blocks', [signed_block] - yield 'post', state +@with_deneb_and_later +@spec_state_test +def test_one_blob(spec, state): + yield from run_block_with_blobs(spec, state, blob_count=1) + + +@with_deneb_and_later +@spec_state_test +def test_max_blobs(spec, state): + yield from run_block_with_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK) From 327ef095d0fe8f309dc42126a9b3abf64f31175d Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 28 Apr 2023 17:37:50 +0800 Subject: [PATCH 199/210] Add `test_incorrect_blob_tx_type` --- .../eth2spec/test/deneb/sanity/test_blocks.py | 17 +++++++++++++++++ 1 file changed, 17 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index 9a6c6a45bc..50b975d3bc 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -46,3 +46,20 @@ def test_one_blob(spec, state): @spec_state_test def test_max_blobs(spec, state): yield from run_block_with_blobs(spec, state, blob_count=spec.MAX_BLOBS_PER_BLOCK) + + +@with_deneb_and_later +@spec_state_test +def test_incorrect_blob_tx_type(spec, state): + yield 'pre', state + + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) + block.body.blob_kzg_commitments = blob_kzg_commitments + opaque_tx[0] == spec.uint8(0x04) # incorrect tx type + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + signed_block = state_transition_and_sign_block(spec, state, block) + + yield 'blocks', [signed_block] + yield 'post', state From 22da92e5979b72d6b2c37719c84ee66b9d909cc8 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 28 Apr 2023 18:26:42 +0800 Subject: [PATCH 200/210] Refactor the spec and add more test cases --- specs/deneb/beacon-chain.md | 7 +- .../eth2spec/test/deneb/sanity/test_blocks.py | 90 ++++++++++++++++++- 2 files changed, 92 insertions(+), 5 deletions(-) diff --git a/specs/deneb/beacon-chain.md b/specs/deneb/beacon-chain.md index df1da8e2a5..359c7fc95b 100644 --- a/specs/deneb/beacon-chain.md +++ b/specs/deneb/beacon-chain.md @@ -173,6 +173,8 @@ def tx_peek_blob_versioned_hashes(opaque_tx: Transaction) -> Sequence[VersionedH message_offset + uint32.decode_bytes(opaque_tx[(message_offset + 188):(message_offset + 192)]) ) + # `VersionedHash` is a 32-byte object + assert (len(opaque_tx) - blob_versioned_hashes_offset) % 32 == 0 return [ VersionedHash(opaque_tx[x:(x + 32)]) for x in range(blob_versioned_hashes_offset, len(opaque_tx), 32) @@ -205,7 +207,7 @@ def process_block(state: BeaconState, block: BeaconBlock) -> None: process_eth1_data(state, block.body) process_operations(state, block.body) process_sync_aggregate(state, block.body.sync_aggregate) - process_blob_kzg_commitments(state, block.body) # [New in Deneb] + process_blob_kzg_commitments(block.body) # [New in Deneb] ``` #### Execution payload @@ -248,8 +250,7 @@ def process_execution_payload(state: BeaconState, payload: ExecutionPayload, exe #### Blob KZG commitments ```python -def process_blob_kzg_commitments(state: BeaconState, body: BeaconBlockBody) -> None: - # pylint: disable=unused-argument +def process_blob_kzg_commitments(body: BeaconBlockBody) -> None: assert verify_kzg_commitments_against_transactions(body.execution_payload.transactions, body.blob_kzg_commitments) ``` diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index 50b975d3bc..0f8f8b3a4d 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -1,3 +1,5 @@ +import random + from eth2spec.test.helpers.state import ( state_transition_and_sign_block ) @@ -50,15 +52,99 @@ def test_max_blobs(spec, state): @with_deneb_and_later @spec_state_test -def test_incorrect_blob_tx_type(spec, state): +def test_invalid_incorrect_blob_tx_type(spec, state): + yield 'pre', state + + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) + block.body.blob_kzg_commitments = blob_kzg_commitments + opaque_tx = b'\x04' + opaque_tx[1:] # incorrect tx type + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) + + yield 'blocks', [signed_block] + yield 'post', None + + +@with_deneb_and_later +@spec_state_test +def test_invalid_incorrect_transaction_length_1_byte(spec, state): + yield 'pre', state + + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) + block.body.blob_kzg_commitments = blob_kzg_commitments + opaque_tx = opaque_tx + b'\x12' # incorrect tx length + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) + + yield 'blocks', [signed_block] + yield 'post', None + + +@with_deneb_and_later +@spec_state_test +def test_invalid_incorrect_transaction_length_32_bytes(spec, state): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) block.body.blob_kzg_commitments = blob_kzg_commitments - opaque_tx[0] == spec.uint8(0x04) # incorrect tx type + opaque_tx = opaque_tx + b'\x12' * 32 # incorrect tx length block.body.execution_payload.transactions = [opaque_tx] block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) + + yield 'blocks', [signed_block] + yield 'post', None + + +@with_deneb_and_later +@spec_state_test +def test_invalid_incorrect_commitment(spec, state): + yield 'pre', state + + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) + blob_kzg_commitments[0] = b'\x12' * 48 # incorrect commitment + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) + + yield 'blocks', [signed_block] + yield 'post', None + + +@with_deneb_and_later +@spec_state_test +def test_invalid_incorrect_commitments_order(spec, state): + yield 'pre', state + + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=2, rng=random.Random(1111)) + block.body.blob_kzg_commitments = [blob_kzg_commitments[1], blob_kzg_commitments[0]] # incorrect order + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) + signed_block = state_transition_and_sign_block(spec, state, block, expect_fail=True) + + yield 'blocks', [signed_block] + yield 'post', None + + +@with_deneb_and_later +@spec_state_test +def test_incorrect_block_hash(spec, state): + yield 'pre', state + + block = build_empty_block_for_next_slot(spec, state) + opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec) + block.body.blob_kzg_commitments = blob_kzg_commitments + block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.block_hash = b'\x12' * 32 # incorrect block hash + # CL itself doesn't verify EL block hash signed_block = state_transition_and_sign_block(spec, state, block) yield 'blocks', [signed_block] From 2a1d998594ba795c22ffd657292e2b0b87fe8850 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 28 Apr 2023 19:01:19 +0800 Subject: [PATCH 201/210] Test non zero `excess_data_gas` --- tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py | 1 + tests/core/pyspec/eth2spec/test/helpers/execution_payload.py | 2 ++ 2 files changed, 3 insertions(+) diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index 0f8f8b3a4d..2b99d6a725 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -25,6 +25,7 @@ def run_block_with_blobs(spec, state, blob_count): opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] + block.body.execution_payload.excess_data_gas = 2 block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) signed_block = state_transition_and_sign_block(spec, state, block) diff --git a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py index c0a70aca1d..c8ef1cbf00 100644 --- a/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py +++ b/tests/core/pyspec/eth2spec/test/helpers/execution_payload.py @@ -165,6 +165,8 @@ def build_empty_execution_payload(spec, state, randao_mix=None): ) if is_post_capella(spec): payload.withdrawals = spec.get_expected_withdrawals(state) + if is_post_deneb(spec): + payload.excess_data_gas = 0 payload.block_hash = compute_el_block_hash(spec, payload) From e31fcbd6a9f795100ec6f1de434ffd4555a0f0e2 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Fri, 28 Apr 2023 23:09:10 +0800 Subject: [PATCH 202/210] Add `GetPayloadResponse` for `get_payload` API --- setup.py | 2 +- specs/_features/eip4788/validator.md | 4 +-- specs/bellatrix/validator.md | 19 +++++++++--- specs/capella/validator.md | 14 +++++++-- specs/deneb/validator.md | 46 ++++++++++++++++++++++------ tests/formats/fork_choice/README.md | 4 +-- 6 files changed, 68 insertions(+), 21 deletions(-) diff --git a/setup.py b/setup.py index fc3acb8062..a3e94642e0 100644 --- a/setup.py +++ b/setup.py @@ -588,7 +588,7 @@ def notify_forkchoice_updated(self: ExecutionEngine, payload_attributes: Optional[PayloadAttributes]) -> Optional[PayloadId]: pass - def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload: + def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse: # pylint: disable=unused-argument raise NotImplementedError("no default block production") diff --git a/specs/_features/eip4788/validator.md b/specs/_features/eip4788/validator.md index 421e297ce9..3140cdb213 100644 --- a/specs/_features/eip4788/validator.md +++ b/specs/_features/eip4788/validator.md @@ -13,7 +13,7 @@ - [Helpers](#helpers) - [Protocols](#protocols) - [`ExecutionEngine`](#executionengine) - - [`get_payload`](#get_payload) + - [Modified `get_payload`](#modified-get_payload) - [Beacon chain responsibilities](#beacon-chain-responsibilities) - [Block proposal](#block-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) @@ -40,7 +40,7 @@ Please see related Beacon Chain doc before continuing and use them as a referenc ### `ExecutionEngine` -#### `get_payload` +#### Modified `get_payload` `get_payload` returns the upgraded EIP-4788 `ExecutionPayload` type. diff --git a/specs/bellatrix/validator.md b/specs/bellatrix/validator.md index a176d7534e..dea763cde8 100644 --- a/specs/bellatrix/validator.md +++ b/specs/bellatrix/validator.md @@ -9,6 +9,7 @@ - [Introduction](#introduction) - [Prerequisites](#prerequisites) - [Helpers](#helpers) + - [`GetPayloadResponse`](#getpayloadresponse) - [`get_pow_block_at_terminal_total_difficulty`](#get_pow_block_at_terminal_total_difficulty) - [`get_terminal_pow_block`](#get_terminal_pow_block) - [Protocols](#protocols) @@ -36,6 +37,14 @@ Please see related Beacon Chain doc before continuing and use them as a referenc ## Helpers +### `GetPayloadResponse` + +```python +@dataclass +class GetPayloadResponse(object): + execution_payload: ExecutionPayload +``` + ### `get_pow_block_at_terminal_total_difficulty` ```python @@ -83,13 +92,13 @@ The Engine API may be used to implement it with an external execution engine. #### `get_payload` -Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that -has been built since the corresponding call to `notify_forkchoice_updated` method. +Given the `payload_id`, `get_payload` returns `GetPayloadResponse` with the most recent version of +the execution payload that has been built since the corresponding call to `notify_forkchoice_updated` method. ```python -def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> ExecutionPayload: +def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse: """ - Return ``execution_payload`` object. + Return ``GetPayloadResponse`` object. """ ... ``` @@ -162,7 +171,7 @@ def get_execution_payload(payload_id: Optional[PayloadId], execution_engine: Exe # Pre-merge, empty payload return ExecutionPayload() else: - return execution_engine.get_payload(payload_id) + return execution_engine.get_payload(payload_id).execution_payload ``` *Note*: It is recommended for a validator to call `prepare_execution_payload` as soon as input parameters become known, diff --git a/specs/capella/validator.md b/specs/capella/validator.md index 644ee476f9..29cff8c611 100644 --- a/specs/capella/validator.md +++ b/specs/capella/validator.md @@ -11,9 +11,10 @@ - [Introduction](#introduction) - [Prerequisites](#prerequisites) - [Helpers](#helpers) + - [Modified `GetPayloadResponse`](#modified-getpayloadresponse) - [Protocols](#protocols) - [`ExecutionEngine`](#executionengine) - - [`get_payload`](#get_payload) + - [Modified `get_payload`](#modified-get_payload) - [Beacon chain responsibilities](#beacon-chain-responsibilities) - [Block proposal](#block-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) @@ -39,11 +40,20 @@ Please see related Beacon Chain doc before continuing and use them as a referenc ## Helpers +### Modified `GetPayloadResponse` + +```python +@dataclass +class GetPayloadResponse(object): + execution_payload: ExecutionPayload + block_value: uint256 +``` + ## Protocols ### `ExecutionEngine` -#### `get_payload` +#### Modified `get_payload` `get_payload` returns the upgraded Capella `ExecutionPayload` type. diff --git a/specs/deneb/validator.md b/specs/deneb/validator.md index b627de023e..6562c91ddd 100644 --- a/specs/deneb/validator.md +++ b/specs/deneb/validator.md @@ -11,7 +11,11 @@ - [Introduction](#introduction) - [Prerequisites](#prerequisites) - [Helpers](#helpers) - - [`get_blobs_and_kzg_commitments`](#get_blobs_and_kzg_commitments) + - [`BlobsBundle`](#blobsbundle) + - [Modified `GetPayloadResponse`](#modified-getpayloadresponse) +- [Protocol](#protocol) + - [`ExecutionEngine`](#executionengine) + - [Modified `get_payload`](#modified-get_payload) - [Beacon chain responsibilities](#beacon-chain-responsibilities) - [Block and sidecar proposal](#block-and-sidecar-proposal) - [Constructing the `BeaconBlockBody`](#constructing-the-beaconblockbody) @@ -36,17 +40,40 @@ Please see related Beacon Chain doc before continuing and use them as a referenc ## Helpers -### `get_blobs_and_kzg_commitments` +### `BlobsBundle` -The interface to retrieve blobs and corresponding kzg commitments. +```python +@dataclass +class BlobsBundle(object): + commitments: Sequence[KZGCommitment] + proofs: Sequence[KZGProof] + blobs: Sequence[Blob] +``` + +### Modified `GetPayloadResponse` + +```python +@dataclass +class GetPayloadResponse(object): + execution_payload: ExecutionPayload + block_value: uint256 + blobs_bundle: BlobsBundle +``` + +## Protocol + +### `ExecutionEngine` + +#### Modified `get_payload` -Note: This API is *unstable*. `get_blobs_and_kzg_commitments` and `get_payload` may be unified. -Implementers may also retrieve blobs individually per transaction. +Given the `payload_id`, `get_payload` returns the most recent version of the execution payload that +has been built since the corresponding call to `notify_forkchoice_updated` method. ```python -def get_blobs_and_kzg_commitments( - payload_id: PayloadId -) -> Tuple[Sequence[Blob], Sequence[KZGCommitment], Sequence[KZGProof]]: +def get_payload(self: ExecutionEngine, payload_id: PayloadId) -> GetPayloadResponse: + """ + Return ExecutionPayload, uint256, BlobsBundle objects. + """ # pylint: disable=unused-argument ... ``` @@ -62,7 +89,8 @@ All validator responsibilities remain unchanged other than those noted below. ##### Blob KZG commitments 1. After retrieving the execution payload from the execution engine as specified in Capella, -use the `payload_id` to retrieve `blobs` and `blob_kzg_commitments` via `get_blobs_and_kzg_commitments(payload_id)`. +use the `payload_id` to retrieve `blobs`, `blob_kzg_commitments`, and `blob_kzg_proofs` +via `get_payload(payload_id).blobs_bundle`. 2. Validate `blobs` and `blob_kzg_commitments`: ```python diff --git a/tests/formats/fork_choice/README.md b/tests/formats/fork_choice/README.md index c94b959338..3b28837de7 100644 --- a/tests/formats/fork_choice/README.md +++ b/tests/formats/fork_choice/README.md @@ -114,8 +114,8 @@ Optional step for optimistic sync tests. This step sets the [`payloadStatus`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#PayloadStatusV1) value that Execution Layer client mock returns in responses to the following Engine API calls: -* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash` -* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/specification.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash` +* [`engine_newPayloadV1(payload)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_newpayloadv1) if `payload.blockHash == payload_info.block_hash` +* [`engine_forkchoiceUpdatedV1(forkchoiceState, ...)`](https://github.com/ethereum/execution-apis/blob/main/src/engine/paris.md#engine_forkchoiceupdatedv1) if `forkchoiceState.headBlockHash == payload_info.block_hash` *Note:* Status of a payload must be *initialized* via `on_payload_info` before the corresponding `on_block` execution step. From 7570445e983d9e63e204be9182c9e8682be58391 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Tue, 2 May 2023 23:45:21 +0800 Subject: [PATCH 203/210] Fix sync testgen --- tests/generators/sync/main.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/generators/sync/main.py b/tests/generators/sync/main.py index 68e38cebe6..5563e6f8c3 100644 --- a/tests/generators/sync/main.py +++ b/tests/generators/sync/main.py @@ -1,5 +1,5 @@ from eth2spec.gen_helpers.gen_from_tests.gen import run_state_test_generators -from eth2spec.test.helpers.constants import BELLATRIX, CAPELLA, DENEB +from eth2spec.test.helpers.constants import BELLATRIX, CAPELLA, DENEB, EIP6110 if __name__ == "__main__": From ddf7e8013bd717fd01faa484f63a24f362063f05 Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 2 May 2023 10:57:49 -0500 Subject: [PATCH 204/210] Add group of invalid G1 points --- tests/generators/kzg_4844/main.py | 481 +++++++----------------------- 1 file changed, 114 insertions(+), 367 deletions(-) diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py index b1391f6787..7ddc3471cd 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/kzg_4844/main.py @@ -54,12 +54,17 @@ def evaluate_blob_at(blob, z): ) +BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.ENDIANNESS) + G1 = bls.G1_to_bytes48(bls.G1()) -P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + +G1_INVALID_TOO_FEW_BYTES = G1[:-1] +G1_INVALID_TOO_MANY_BYTES = G1 + b"\x00" +G1_INVALID_P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + "0123456789abcdef0123456789abcdef0123456789abcdef") -P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + +G1_INVALID_P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + "0123456789abcdef0123456789abcdef0123456789abcde0") -BLS_MODULUS_BYTES = spec.BLS_MODULUS.to_bytes(32, spec.ENDIANNESS) +INVALID_G1_POINTS = [G1_INVALID_TOO_FEW_BYTES, G1_INVALID_TOO_MANY_BYTES, + G1_INVALID_P1_NOT_IN_G1, G1_INVALID_P1_NOT_ON_CURVE] BLOB_ALL_ZEROS = spec.Blob() BLOB_RANDOM_VALID1 = spec.Blob(b''.join([field_element_bytes(pow(2, n + 256, spec.BLS_MODULUS)) for n in range(4096)])) @@ -150,7 +155,7 @@ def case02_compute_kzg_proof(): for blob in INVALID_BLOBS: z = VALID_FIELD_ELEMENTS[0] expect_exception(spec.compute_kzg_proof, blob, z) - identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + identifier = f'{encode_hex(hash(blob))}' yield f'compute_kzg_proof_case_invalid_blob_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), @@ -163,7 +168,7 @@ def case02_compute_kzg_proof(): for z in INVALID_FIELD_ELEMENTS: blob = VALID_BLOBS[4] expect_exception(spec.compute_kzg_proof, blob, z) - identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + identifier = f'{encode_hex(hash(z))}' yield f'compute_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'blob': encode_hex(blob), @@ -209,13 +214,29 @@ def case03_verify_kzg_proof(): 'output': False } + # Edge case: Invalid commitment + for commitment in INVALID_G1_POINTS: + blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[1] + proof, y = spec.compute_kzg_proof(blob, z) + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + identifier = f'{encode_hex(commitment)}' + yield f'verify_kzg_proof_case_invalid_commitment_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } + # Edge case: Invalid z for z in INVALID_FIELD_ELEMENTS: blob, validz = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[1] proof, y = spec.compute_kzg_proof(blob, validz) commitment = spec.blob_to_kzg_commitment(blob) expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - identifier = f'{encode_hex(hash(blob))}_{encode_hex(z)}' + identifier = f'{encode_hex(z)}' yield f'verify_kzg_proof_case_invalid_z_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'commitment': encode_hex(commitment), @@ -232,7 +253,7 @@ def case03_verify_kzg_proof(): proof, _ = spec.compute_kzg_proof(blob, z) commitment = spec.blob_to_kzg_commitment(blob) expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - identifier = f'{encode_hex(hash(blob))}_{encode_hex(y)}' + identifier = f'{encode_hex(y)}' yield f'verify_kzg_proof_case_invalid_y_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { 'input': { 'commitment': encode_hex(commitment), @@ -243,133 +264,22 @@ def case03_verify_kzg_proof(): 'output': None } - # Edge case: Invalid proof, not in G1 - blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[0] - proof = P1_NOT_IN_G1 - commitment = spec.blob_to_kzg_commitment(blob) - y = VALID_FIELD_ELEMENTS[1] - expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_proof_not_in_G1', { - 'input': { - 'commitment': encode_hex(commitment), - 'z': encode_hex(z), - 'y': encode_hex(y), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid proof, not on curve - blob, z = VALID_BLOBS[3], VALID_FIELD_ELEMENTS[1] - proof = P1_NOT_ON_CURVE - commitment = spec.blob_to_kzg_commitment(blob) - y = VALID_FIELD_ELEMENTS[1] - expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_proof_not_on_curve', { - 'input': { - 'commitment': encode_hex(commitment), - 'z': encode_hex(z), - 'y': encode_hex(y), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid proof, too few bytes - blob = VALID_BLOBS[1] - commitment = spec.blob_to_kzg_commitment(blob) - z = VALID_FIELD_ELEMENTS[4] - proof, y = spec.compute_kzg_proof(blob, z) - proof = proof[:-1] - expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_proof_too_few_bytes', { - 'input': { - 'commitment': encode_hex(commitment), - 'z': encode_hex(z), - 'y': encode_hex(y), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid proof, too many bytes - blob = VALID_BLOBS[1] - commitment = spec.blob_to_kzg_commitment(blob) - z = VALID_FIELD_ELEMENTS[4] - proof, y = spec.compute_kzg_proof(blob, z) - proof = proof + b"\x00" - expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_proof_too_many_bytes', { - 'input': { - 'commitment': encode_hex(commitment), - 'z': encode_hex(z), - 'y': encode_hex(y), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid commitment, not in G1 - blob, z = VALID_BLOBS[4], VALID_FIELD_ELEMENTS[3] - proof, y = spec.compute_kzg_proof(blob, z) - commitment = P1_NOT_IN_G1 - expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_commitment_not_in_G1', { - 'input': { - 'commitment': encode_hex(commitment), - 'z': encode_hex(z), - 'y': encode_hex(y), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid commitment, not on curve - blob, z = VALID_BLOBS[1], VALID_FIELD_ELEMENTS[4] - proof, y = spec.compute_kzg_proof(blob, z) - commitment = P1_NOT_ON_CURVE - expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_commitment_not_on_curve', { - 'input': { - 'commitment': encode_hex(commitment), - 'z': encode_hex(z), - 'y': encode_hex(y), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid commitment, too few bytes - blob = VALID_BLOBS[1] - commitment = spec.blob_to_kzg_commitment(blob)[:-1] - z = VALID_FIELD_ELEMENTS[4] - proof, y = spec.compute_kzg_proof(blob, z) - expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_commitment_too_few_bytes', { - 'input': { - 'commitment': encode_hex(commitment), - 'z': encode_hex(z), - 'y': encode_hex(y), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid commitment, too many bytes - blob = VALID_BLOBS[1] - commitment = spec.blob_to_kzg_commitment(blob) + b"\x00" - z = VALID_FIELD_ELEMENTS[4] - proof, y = spec.compute_kzg_proof(blob, z) - expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) - yield 'verify_kzg_proof_case_commitment_too_many_bytes', { - 'input': { - 'commitment': encode_hex(commitment), - 'z': encode_hex(z), - 'y': encode_hex(y), - 'proof': encode_hex(proof), - }, - 'output': None - } + # Edge case: Invalid proof + for proof in INVALID_G1_POINTS: + blob, z = VALID_BLOBS[2], VALID_FIELD_ELEMENTS[1] + _, y = spec.compute_kzg_proof(blob, z) + commitment = spec.blob_to_kzg_commitment(blob) + expect_exception(spec.verify_kzg_proof, commitment, z, y, proof) + identifier = f'{encode_hex(proof)}' + yield f'verify_kzg_proof_case_invalid_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'commitment': encode_hex(commitment), + 'z': encode_hex(z), + 'y': encode_hex(y), + 'proof': encode_hex(proof), + }, + 'output': None + } def case04_compute_blob_kzg_proof(): @@ -399,31 +309,18 @@ def case04_compute_blob_kzg_proof(): 'output': None } - # Edge case: Invalid commitment, not in G1 - commitment = P1_NOT_IN_G1 - blob = VALID_BLOBS[1] - expect_exception(spec.compute_blob_kzg_proof, blob, commitment) - identifier = f'{encode_hex(hash(blob))}' - yield 'compute_blob_kzg_proof_case_invalid_commitment_not_in_G1', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - }, - 'output': None - } - - # Edge case: Invalid commitment, not on curve - commitment = P1_NOT_ON_CURVE - blob = VALID_BLOBS[1] - expect_exception(spec.compute_blob_kzg_proof, blob, commitment) - identifier = f'{encode_hex(hash(blob))}' - yield 'compute_blob_kzg_proof_case_invalid_commitment_not_on_curve', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - }, - 'output': None - } + # Edge case: Invalid commitment + for commitment in INVALID_G1_POINTS: + blob = VALID_BLOBS[1] + expect_exception(spec.compute_blob_kzg_proof, blob, commitment) + identifier = f'{encode_hex(hash(commitment))}' + yield f'compute_blob_kzg_proof_case_invalid_commitment_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + }, + 'output': None + } def case05_verify_blob_kzg_proof(): @@ -457,120 +354,6 @@ def case05_verify_blob_kzg_proof(): 'output': False } - # Edge case: Invalid proof, not in G1 - blob = VALID_BLOBS[2] - proof = P1_NOT_IN_G1 - commitment = G1 - expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) - yield 'verify_blob_kzg_proof_case_proof_not_in_G1', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid proof, not on curve - blob = VALID_BLOBS[1] - proof = P1_NOT_ON_CURVE - commitment = G1 - expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) - yield 'verify_blob_kzg_proof_case_proof_not_on_curve', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid proof, too few bytes - blob = VALID_BLOBS[1] - commitment = spec.blob_to_kzg_commitment(blob) - proof = spec.compute_blob_kzg_proof(blob, commitment)[:-1] - expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) - yield 'verify_blob_kzg_proof_case_proof_too_few_bytes', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid proof, too many bytes - blob = VALID_BLOBS[1] - commitment = spec.blob_to_kzg_commitment(blob) - proof = spec.compute_blob_kzg_proof(blob, commitment) + b"\x00" - expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) - yield 'verify_blob_kzg_proof_case_proof_too_many_bytes', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid commitment, not in G1 - blob = VALID_BLOBS[0] - proof = G1 - commitment = P1_NOT_IN_G1 - expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) - yield 'verify_blob_kzg_proof_case_commitment_not_in_G1', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid commitment, not on curve - blob = VALID_BLOBS[2] - proof = G1 - commitment = P1_NOT_ON_CURVE - expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) - yield 'verify_blob_kzg_proof_case_commitment_not_on_curve', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid commitment, too few bytes - blob = VALID_BLOBS[1] - commitment = spec.blob_to_kzg_commitment(blob) - proof = spec.compute_blob_kzg_proof(blob, commitment) - commitment = commitment[:-1] - expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) - yield 'verify_blob_kzg_proof_case_commitment_too_few_bytes', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - 'proof': encode_hex(proof), - }, - 'output': None - } - - # Edge case: Invalid commitment, too many bytes - blob = VALID_BLOBS[1] - commitment = spec.blob_to_kzg_commitment(blob) - proof = spec.compute_blob_kzg_proof(blob, commitment) - commitment = commitment + b"\x00" - expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) - yield 'verify_blob_kzg_proof_case_commitment_too_many_bytes', { - 'input': { - 'blob': encode_hex(blob), - 'commitment': encode_hex(commitment), - 'proof': encode_hex(proof), - }, - 'output': None - } - # Edge case: Invalid blob for blob in INVALID_BLOBS: proof = G1 @@ -586,6 +369,36 @@ def case05_verify_blob_kzg_proof(): 'output': None } + # Edge case: Invalid commitment + for commitment in INVALID_G1_POINTS: + blob = VALID_BLOBS[1] + proof = G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + identifier = f'{encode_hex(hash(commitment))}' + yield f'verify_blob_kzg_proof_case_invalid_commitment_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + + # Edge case: Invalid proof + for proof in INVALID_G1_POINTS: + blob = VALID_BLOBS[1] + commitment = G1 + expect_exception(spec.verify_blob_kzg_proof, blob, commitment, proof) + identifier = f'{encode_hex(hash(proof))}' + yield f'verify_blob_kzg_proof_case_invalid_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blob': encode_hex(blob), + 'commitment': encode_hex(commitment), + 'proof': encode_hex(proof), + }, + 'output': None + } + def case06_verify_blob_kzg_proof_batch(): # Valid cases @@ -633,101 +446,35 @@ def case06_verify_blob_kzg_proof_batch(): 'output': None } - # Edge case: Invalid proof, not in G1 - proofs_invalid_notG1 = [P1_NOT_IN_G1] + proofs[1:] - expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notG1) - yield 'verify_blob_kzg_proof_batch_case_proof_not_in_G1', { - 'input': { - 'blobs': encode_hex_list(VALID_BLOBS), - 'commitments': encode_hex_list(commitments), - 'proofs': encode_hex_list(proofs_invalid_notG1), - }, - 'output': None - } - - # Edge case: Invalid proof, not on curve - proofs_invalid_notCurve = proofs[:1] + [P1_NOT_ON_CURVE] + proofs[2:] - expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_notCurve) - yield 'verify_blob_kzg_proof_batch_case_proof_not_on_curve', { - 'input': { - 'blobs': encode_hex_list(VALID_BLOBS), - 'commitments': encode_hex_list(commitments), - 'proofs': encode_hex_list(proofs_invalid_notCurve), - }, - 'output': None - } - - # Edge case: Invalid proof, too few bytes - proofs_invalid_tooFewBytes = proofs[:1] + [proofs[1][:-1]] + proofs[2:] - expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooFewBytes) - yield 'verify_blob_kzg_proof_batch_case_proof_too_few_bytes', { - 'input': { - 'blobs': encode_hex_list(VALID_BLOBS), - 'commitments': encode_hex_list(commitments), - 'proofs': encode_hex_list(proofs_invalid_tooFewBytes), - }, - 'output': None - } - - # Edge case: Invalid proof, too many bytes - proofs_invalid_tooManyBytes = proofs[:1] + [proofs[1] + b"\x00"] + proofs[2:] - expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, proofs_invalid_tooManyBytes) - yield 'verify_blob_kzg_proof_batch_case_proof_too_many_bytes', { - 'input': { - 'blobs': encode_hex_list(VALID_BLOBS), - 'commitments': encode_hex_list(commitments), - 'proofs': encode_hex_list(proofs_invalid_tooManyBytes), - }, - 'output': None - } - - # Edge case: Invalid commitment, not in G1 - commitments_invalid_notG1 = commitments[:2] + [P1_NOT_IN_G1] + commitments[3:] - expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notG1) - yield 'verify_blob_kzg_proof_batch_case_commitment_not_in_G1', { - 'input': { - 'blobs': encode_hex_list(VALID_BLOBS), - 'commitments': encode_hex_list(commitments_invalid_notG1), - 'proofs': encode_hex_list(proofs), - }, - 'output': None - } - - # Edge case: Invalid commitment, not on curve - commitments_invalid_notCurve = commitments[:3] + [P1_NOT_ON_CURVE] + commitments[4:] - expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_notCurve) - yield 'verify_blob_kzg_proof_batch_case_not_on_curve', { - 'input': { - 'blobs': encode_hex_list(VALID_BLOBS), - 'commitments': encode_hex_list(commitments_invalid_notCurve), - 'proofs': encode_hex_list(proofs), - }, - 'output': None - } - - # Edge case: Invalid commitment, too few bytes - commitments_invalid_tooFewBytes = commitments[:3] + [commitments[3][:-1]] + commitments[4:] - expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooFewBytes) - yield 'verify_blob_kzg_proof_batch_case_commitment_too_few_bytes', { - 'input': { - 'blobs': encode_hex_list(VALID_BLOBS), - 'commitments': encode_hex_list(commitments_invalid_tooFewBytes), - 'proofs': encode_hex_list(proofs), - }, - 'output': None - } + # Edge case: Invalid commitment + for commitment in INVALID_G1_POINTS: + blobs = VALID_BLOBS + commitments_invalid = [commitment] + commitments[1:] + expect_exception(spec.verify_blob_kzg_proof_batch, blobs, commitments_invalid, proofs) + identifier = f'{encode_hex(hash(commitment))}' + yield f'verify_blob_kzg_proof_batch_case_invalid_commitment_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blobs': encode_hex_list(blobs), + 'commitments': encode_hex_list(commitments_invalid), + 'proofs': encode_hex_list(proofs), + }, + 'output': None + } - # Edge case: Invalid commitment, too many bytes - commitments_invalid_tooManyBytes = commitments[:3] + [commitments[3] + b"\x00"] + commitments[4:] - expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS, commitments, commitments_invalid_tooManyBytes) - yield 'verify_blob_kzg_proof_batch_case_commitment_too_many_bytes', { - 'input': { - 'blobs': encode_hex_list(VALID_BLOBS), - 'commitments': encode_hex_list(commitments_invalid_tooManyBytes), - 'proofs': encode_hex_list(proofs), - }, - 'output': None - } + # Edge case: Invalid proof + for proof in INVALID_G1_POINTS: + blobs = VALID_BLOBS + proofs_invalid = [proof] + proofs[1:] + expect_exception(spec.verify_blob_kzg_proof_batch, blobs, commitments, proofs_invalid) + identifier = f'{encode_hex(hash(proof))}' + yield f'verify_blob_kzg_proof_batch_case_invalid_proof_{(hash(bytes(identifier, "utf-8"))[:8]).hex()}', { + 'input': { + 'blobs': encode_hex_list(blobs), + 'commitments': encode_hex_list(commitments), + 'proofs': encode_hex_list(proofs_invalid), + }, + 'output': None + } # Edge case: Blob length different expect_exception(spec.verify_blob_kzg_proof_batch, VALID_BLOBS[:-1], commitments, proofs) From dfdbe15e42ece956e6525b4cb4c26227083227db Mon Sep 17 00:00:00 2001 From: Justin Traglia Date: Tue, 2 May 2023 13:05:41 -0500 Subject: [PATCH 205/210] Fix linter issues --- tests/generators/kzg_4844/main.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/generators/kzg_4844/main.py b/tests/generators/kzg_4844/main.py index 7ddc3471cd..2f3efeb21a 100644 --- a/tests/generators/kzg_4844/main.py +++ b/tests/generators/kzg_4844/main.py @@ -60,9 +60,9 @@ def evaluate_blob_at(blob, z): G1_INVALID_TOO_FEW_BYTES = G1[:-1] G1_INVALID_TOO_MANY_BYTES = G1 + b"\x00" G1_INVALID_P1_NOT_IN_G1 = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + - "0123456789abcdef0123456789abcdef0123456789abcdef") + "0123456789abcdef0123456789abcdef0123456789abcdef") G1_INVALID_P1_NOT_ON_CURVE = bytes.fromhex("8123456789abcdef0123456789abcdef0123456789abcdef" + - "0123456789abcdef0123456789abcdef0123456789abcde0") + "0123456789abcdef0123456789abcdef0123456789abcde0") INVALID_G1_POINTS = [G1_INVALID_TOO_FEW_BYTES, G1_INVALID_TOO_MANY_BYTES, G1_INVALID_P1_NOT_IN_G1, G1_INVALID_P1_NOT_ON_CURVE] From 057517526e7f24a11483e5028128614ad055f822 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Wed, 3 May 2023 17:25:04 +0800 Subject: [PATCH 206/210] Set python_requires=">=3.9" (#2964) --- .circleci/config.yml | 22 +++++++++++----------- setup.py | 2 +- 2 files changed, 12 insertions(+), 12 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index 1d5b098111..5958a2fc69 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -60,7 +60,7 @@ commands: jobs: checkout_specs: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: # Restore git repo at point close to target branch/revision, to speed up checkout @@ -80,7 +80,7 @@ jobs: - ~/specs-repo install_pyspec_test: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: @@ -92,7 +92,7 @@ jobs: - save_pyspec_cached_venv test-phase0: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: @@ -105,7 +105,7 @@ jobs: path: tests/core/pyspec/test-reports test-altair: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: @@ -118,7 +118,7 @@ jobs: path: tests/core/pyspec/test-reports test-bellatrix: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: @@ -131,7 +131,7 @@ jobs: path: tests/core/pyspec/test-reports test-capella: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: @@ -144,7 +144,7 @@ jobs: path: tests/core/pyspec/test-reports test-deneb: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: @@ -179,7 +179,7 @@ jobs: command: sudo npm install -g doctoc@2 && make check_toc codespell: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - checkout @@ -188,7 +188,7 @@ jobs: command: pip install 'codespell<3.0.0,>=2.0.0' --user && make codespell lint: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: @@ -244,7 +244,7 @@ jobs: - /nix install_deposit_contract_web3_tester: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: @@ -256,7 +256,7 @@ jobs: - save_deposit_contract_tester_cached_venv test_deposit_contract_web3_tests: docker: - - image: circleci/python:3.8 + - image: circleci/python:3.9 working_directory: ~/specs-repo steps: - restore_cache: diff --git a/setup.py b/setup.py index a7fe7d9e02..5d27369794 100644 --- a/setup.py +++ b/setup.py @@ -1180,7 +1180,7 @@ def run(self): packages=find_packages(where='tests/core/pyspec') + ['configs', 'specs'], py_modules=["eth2spec"], cmdclass=commands, - python_requires=">=3.8, <4", + python_requires=">=3.9, <4", extras_require={ "test": ["pytest>=4.4", "pytest-cov", "pytest-xdist"], "lint": ["flake8==5.0.4", "mypy==0.981", "pylint==2.15.3"], From 4244dc8e139f299b0b0a2cdaf1f045b69a9b0917 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 4 May 2023 17:55:14 +0800 Subject: [PATCH 207/210] Make `excess_data_gas` a param of `run_block_with_blobs` --- tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py index 2b99d6a725..111565cce2 100644 --- a/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py +++ b/tests/core/pyspec/eth2spec/test/deneb/sanity/test_blocks.py @@ -18,14 +18,14 @@ ) -def run_block_with_blobs(spec, state, blob_count): +def run_block_with_blobs(spec, state, blob_count, excess_data_gas=1): yield 'pre', state block = build_empty_block_for_next_slot(spec, state) opaque_tx, _, blob_kzg_commitments, _ = get_sample_opaque_tx(spec, blob_count=blob_count) block.body.blob_kzg_commitments = blob_kzg_commitments block.body.execution_payload.transactions = [opaque_tx] - block.body.execution_payload.excess_data_gas = 2 + block.body.execution_payload.excess_data_gas = excess_data_gas block.body.execution_payload.block_hash = compute_el_block_hash(spec, block.body.execution_payload) signed_block = state_transition_and_sign_block(spec, state, block) From 79b8a9abecded921179d2b2854d8dc7b8c570d5d Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 4 May 2023 18:09:01 +0800 Subject: [PATCH 208/210] Apply suggestions from code review --- specs/phase0/validator.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 5266fec7ae..92eadde5f1 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -612,7 +612,7 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th * Remain subscribed to `SUBNETS_PER_NODE` for `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs. * Maintain advertisement of the selected subnets in their node's ENR `attnets` entry by setting the selected `subnet_id` bits to `True` (e.g. `ENR["attnets"][subnet_id] = True`) for all persistent attestation subnets. -* Select these subnets based on their node-id as specified by the following `compute_subscribed_subnets(node_id,epoch)` function. +* Select these subnets based on their node-id as specified by the following `compute_subscribed_subnets(node_id, epoch)` function. ```python def compute_subscribed_subnet(node_id: int, epoch: Epoch, index: int) -> int: @@ -629,7 +629,7 @@ def compute_subscribed_subnet(node_id: int, epoch: Epoch, index: int) -> int: ```python def compute_subscribed_subnets(node_id: int, epoch: Epoch) -> Sequence[int]: - return [compute_subscribed_subnet(node_id, epoch, idx) for idx in range(SUBNETS_PER_NODE)] + return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)] ``` *Note*: When preparing for a hard fork, a validator must select and subscribe to subnets of the future fork versioning at least `EPOCHS_PER_SUBNET_SUBSCRIPTION` epochs in advance of the fork. These new subnets for the fork are maintained in addition to those for the current fork until the fork occurs. After the fork occurs, let the subnets from the previous fork reach the end of life with no replacements. From 5cb2733ed5271c582fb2235367558ff8950dd7a2 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Thu, 4 May 2023 18:50:13 +0800 Subject: [PATCH 209/210] Add custom types `NodeID` and `SubnetID` and constant `NODE_ID_BITS` --- setup.py | 4 +-- specs/phase0/validator.md | 25 ++++++++++++++----- .../unittests/test_config_invariants.py | 2 ++ 3 files changed, 23 insertions(+), 8 deletions(-) diff --git a/setup.py b/setup.py index 52bad2b71b..f1130eb586 100644 --- a/setup.py +++ b/setup.py @@ -383,7 +383,7 @@ def imports(cls, preset_name: str) -> str: from eth2spec.utils.ssz.ssz_impl import hash_tree_root, copy, uint_to_bytes from eth2spec.utils.ssz.ssz_typing import ( - View, boolean, Container, List, Vector, uint8, uint32, uint64, + View, boolean, Container, List, Vector, uint8, uint32, uint64, uint256, Bytes1, Bytes4, Bytes32, Bytes48, Bytes96, Bitlist) from eth2spec.utils.ssz.ssz_typing import Bitvector # noqa: F401 from eth2spec.utils import bls @@ -551,7 +551,7 @@ def imports(cls, preset_name: str): return super().imports(preset_name) + f''' from typing import Protocol from eth2spec.altair import {preset_name} as altair -from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector, uint256 +from eth2spec.utils.ssz.ssz_typing import Bytes8, Bytes20, ByteList, ByteVector ''' @classmethod diff --git a/specs/phase0/validator.md b/specs/phase0/validator.md index 92eadde5f1..604350ed80 100644 --- a/specs/phase0/validator.md +++ b/specs/phase0/validator.md @@ -10,6 +10,7 @@ This is an accompanying document to [Phase 0 -- The Beacon Chain](./beacon-chain - [Introduction](#introduction) - [Prerequisites](#prerequisites) +- [Custom types](#custom-types) - [Constants](#constants) - [Misc](#misc) - [Containers](#containers) @@ -82,6 +83,15 @@ A validator is an entity that participates in the consensus of the Ethereum proo All terminology, constants, functions, and protocol mechanics defined in the [Phase 0 -- The Beacon Chain](./beacon-chain.md) and [Phase 0 -- Deposit Contract](./deposit-contract.md) doc are requisite for this document and used throughout. Please see the Phase 0 doc before continuing and use as a reference throughout. +## Custom types + +We define the following Python custom types for type hinting and readability: + +| Name | SSZ equivalent | Description | +| - | - | - | +| `NodeID` | `uint256` | node identifier | +| `SubnetID` | `uint64` | subnet identifier | + ## Constants ### Misc @@ -94,6 +104,7 @@ All terminology, constants, functions, and protocol mechanics defined in the [Ph | `ATTESTATION_SUBNET_EXTRA_BITS` | `0` | The number of extra bits of a NodeId to use when mapping to a subscribed subnet | | `SUBNETS_PER_NODE` | `2` | The number of long-lived subnets a beacon node should be subscribed to. | | `ATTESTATION_SUBNET_PREFIX_BITS` | `(ceillog2(ATTESTATION_SUBNET_COUNT) + ATTESTATION_SUBNET_EXTRA_BITS)` | | +| `NODE_ID_BITS` | `256` | The bit length of uint256 is 256 | ## Containers @@ -515,7 +526,9 @@ The `subnet_id` for the `attestation` is calculated with: - Let `subnet_id = compute_subnet_for_attestation(committees_per_slot, attestation.data.slot, attestation.data.index)`. ```python -def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, committee_index: CommitteeIndex) -> uint64: +def compute_subnet_for_attestation(committees_per_slot: uint64, + slot: Slot, + committee_index: CommitteeIndex) -> SubnetID: """ Compute the correct subnet for an attestation for Phase 0. Note, this mimics expected future behavior where attestations will be mapped to their shard subnet. @@ -523,7 +536,7 @@ def compute_subnet_for_attestation(committees_per_slot: uint64, slot: Slot, comm slots_since_epoch_start = uint64(slot % SLOTS_PER_EPOCH) committees_since_epoch_start = committees_per_slot * slots_since_epoch_start - return uint64((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT) + return SubnetID((committees_since_epoch_start + committee_index) % ATTESTATION_SUBNET_COUNT) ``` ### Attestation aggregation @@ -615,8 +628,8 @@ Because Phase 0 does not have shards and thus does not have Shard Committees, th * Select these subnets based on their node-id as specified by the following `compute_subscribed_subnets(node_id, epoch)` function. ```python -def compute_subscribed_subnet(node_id: int, epoch: Epoch, index: int) -> int: - node_id_prefix = node_id >> (256 - int(ATTESTATION_SUBNET_PREFIX_BITS)) +def compute_subscribed_subnet(node_id: NodeID, epoch: Epoch, index: int) -> SubnetID: + node_id_prefix = node_id >> (NODE_ID_BITS - int(ATTESTATION_SUBNET_PREFIX_BITS)) node_offset = node_id % EPOCHS_PER_SUBNET_SUBSCRIPTION permutation_seed = hash(uint_to_bytes(uint64((epoch + node_offset) // EPOCHS_PER_SUBNET_SUBSCRIPTION))) permutated_prefix = compute_shuffled_index( @@ -624,11 +637,11 @@ def compute_subscribed_subnet(node_id: int, epoch: Epoch, index: int) -> int: 1 << int(ATTESTATION_SUBNET_PREFIX_BITS), permutation_seed, ) - return (permutated_prefix + index) % ATTESTATION_SUBNET_COUNT + return SubnetID((permutated_prefix + index) % ATTESTATION_SUBNET_COUNT) ``` ```python -def compute_subscribed_subnets(node_id: int, epoch: Epoch) -> Sequence[int]: +def compute_subscribed_subnets(node_id: NodeID, epoch: Epoch) -> Sequence[SubnetID]: return [compute_subscribed_subnet(node_id, epoch, index) for index in range(SUBNETS_PER_NODE)] ``` diff --git a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py index 69aa3eb2a5..b0fd06374d 100644 --- a/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py +++ b/tests/core/pyspec/eth2spec/test/phase0/unittests/test_config_invariants.py @@ -76,6 +76,8 @@ def test_time(spec, state): @spec_state_test def test_networking(spec, state): assert spec.SUBNETS_PER_NODE <= spec.ATTESTATION_SUBNET_COUNT + node_id_length = spec.NodeID(1).type_byte_length() # in bytes + assert node_id_length * 8 == spec.NODE_ID_BITS # in bits @with_all_phases From 5e2a18a319bfe0be233444d202b6306c81dc0f64 Mon Sep 17 00:00:00 2001 From: Hsiao-Wei Wang Date: Sat, 6 May 2023 17:45:22 +0800 Subject: [PATCH 210/210] Fix `test_randomized_state` and `test_randomized_state_leaking` --- .../test_process_inactivity_updates.py | 14 +++++++++++--- 1 file changed, 11 insertions(+), 3 deletions(-) diff --git a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py index 0816dfad62..57fe8b9ca3 100644 --- a/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py +++ b/tests/core/pyspec/eth2spec/test/altair/epoch_processing/test_process_inactivity_updates.py @@ -54,7 +54,15 @@ def test_genesis_random_scores(spec, state): # def run_inactivity_scores_test(spec, state, participation_fn=None, inactivity_scores_fn=None, rng=Random(10101)): - next_epoch_via_block(spec, state) + while True: + try: + next_epoch_via_block(spec, state) + except AssertionError: + # If the proposer is slashed, we skip this epoch and try to propose block at the next epoch + next_epoch(spec, state) + else: + break + if participation_fn is not None: participation_fn(spec, state, rng=rng) if inactivity_scores_fn is not None: @@ -363,7 +371,7 @@ def test_randomized_state(spec, state): their inactivity score does not change. """ rng = Random(10011001) - _run_randomized_state_test_for_inactivity_updates(spec, state, rng=rng) + yield from _run_randomized_state_test_for_inactivity_updates(spec, state, rng=rng) @with_altair_and_later @@ -377,6 +385,6 @@ def test_randomized_state_leaking(spec, state): (refer ``get_eligible_validator_indices`). """ rng = Random(10011002) - _run_randomized_state_test_for_inactivity_updates(spec, state, rng=rng) + yield from _run_randomized_state_test_for_inactivity_updates(spec, state, rng=rng) # Check still in leak assert spec.is_in_inactivity_leak(state)