Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

PeerDAS testnet branch #6286

Closed
wants to merge 5 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion .gitmodules
Original file line number Diff line number Diff line change
Expand Up @@ -214,7 +214,7 @@
path = vendor/nim-kzg4844
url = https://github.com/status-im/nim-kzg4844.git
ignore = untracked
branch = master
branch = peerdas
[submodule "vendor/nim-results"]
path = vendor/nim-results
url = https://github.com/arnetheduck/nim-results.git
Expand Down
55 changes: 47 additions & 8 deletions beacon_chain/gossip_processing/eth2_processor.nim
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@ import
stew/results,
chronicles, chronos, metrics, taskpools,
../spec/[helpers, forks],
../spec/datatypes/[altair, phase0, deneb],
../spec/datatypes/[altair, phase0, deneb, eip7594],
../consensus_object_pools/[
blob_quarantine, block_clearance, block_quarantine, blockchain_dag,
attestation_pool, light_client_pool, sync_committee_msg_pool,
Expand Down Expand Up @@ -47,6 +47,10 @@ declareCounter blob_sidecars_received,
"Number of valid blobs processed by this node"
declareCounter blob_sidecars_dropped,
"Number of invalid blobs dropped by this node", labels = ["reason"]
declareCounter data_column_sidecars_received,
"Number of valid data column sidecars processed by this node"
declareCounter data_column_sidecars_dropped,
"Number of invalid data column sidecars dropped by this node", labels = ["reason"]
declareCounter beacon_attester_slashings_received,
"Number of valid attester slashings processed by this node"
declareCounter beacon_attester_slashings_dropped,
Expand Down Expand Up @@ -94,6 +98,9 @@ declareHistogram beacon_block_delay,
declareHistogram blob_sidecar_delay,
"Time(s) between slot start and blob sidecar reception", buckets = delayBuckets

declareHistogram data_column_sidecar_delay,
"Time(s) between slot start and data column sidecar reception", buckets = delayBuckets

type
DoppelgangerProtection = object
broadcastStartEpoch*: Epoch ##\
Expand Down Expand Up @@ -320,6 +327,41 @@ proc processBlobSidecar*(

v

proc processDataColumnSidecar*(
self: var Eth2Processor, src: MsgSource,
dataColumnSidecar: DataColumnSidecar, subnet_id: uint64): ValidationRes =
template block_header: untyped = dataColumnSidecar.signed_block_header.message

let
wallTime = self.getCurrentBeaconTime()
(_, wallSlot) = wallTime.toSlot()

logScope:
dcs = shortLog(dataColumnSidecar)
wallSlot

# Potential under/overflows are fine; would just create odd metrics and logs
let delay = wallTime - block_header.slot.start_beacon_time
debug "Data column received", delay

let v =
self.dag.validateDataColumnSidecar(self.quarantine, self.blobQuarantine,
dataColumnSidecar, wallTime, subnet_id)

if v.isErr():
debug "Dropping data column", error = v.error()
blob_sidecars_dropped.inc(1, [$v.error[0]])
return v

debug "Data column validated"

# TODO do something with it!

data_column_sidecars_received.inc()
data_column_sidecar_delay.observe(delay.toFloatSeconds())

v

proc setupDoppelgangerDetection*(self: var Eth2Processor, slot: Slot) =
# When another client's already running, this is very likely to detect
# potential duplicate validators, which can trigger slashing.
Expand All @@ -342,8 +384,7 @@ proc clearDoppelgangerProtection*(self: var Eth2Processor) =
self.doppelgangerDetection.broadcastStartEpoch = FAR_FUTURE_EPOCH

proc checkForPotentialDoppelganger(
self: var Eth2Processor,
attestation: phase0.Attestation | electra.Attestation,
self: var Eth2Processor, attestation: phase0.Attestation | electra.Attestation,
attesterIndices: openArray[ValidatorIndex]) =
# Only check for attestations after node launch. There might be one slot of
# overlap in quick intra-slot restarts so trade off a few true negatives in
Expand Down Expand Up @@ -413,10 +454,8 @@ proc processAttestation*(

proc processSignedAggregateAndProof*(
self: ref Eth2Processor, src: MsgSource,
signedAggregateAndProof:
phase0.SignedAggregateAndProof | electra.SignedAggregateAndProof,
checkSignature = true, checkCover = true): Future[ValidationRes]
{.async: (raises: [CancelledError]).} =
signedAggregateAndProof: phase0.SignedAggregateAndProof | electra.SignedAggregateAndProof,
checkSignature = true, checkCover = true): Future[ValidationRes] {.async: (raises: [CancelledError]).} =
var wallTime = self.getCurrentBeaconTime()
let (afterGenesis, wallSlot) = wallTime.toSlot()

Expand Down Expand Up @@ -685,4 +724,4 @@ proc processLightClientOptimisticUpdate*(
beacon_light_client_optimistic_update_received.inc()
else:
beacon_light_client_optimistic_update_dropped.inc(1, [$v.error[0]])
v
v
114 changes: 114 additions & 0 deletions beacon_chain/gossip_processing/gossip_validation.nim
Original file line number Diff line number Diff line change
Expand Up @@ -471,6 +471,120 @@ proc validateBlobSidecar*(

ok()

# https://github.com/ethereum/consensus-specs/blob/5f48840f4d768bf0e0a8156a3ed06ec333589007/specs/_features/eip7594/p2p-interface.md#the-gossip-domain-gossipsub
proc validateDataColumnSidecar*(
dag: ChainDAGRef, quarantine: ref Quarantine,
blobQuarantine: ref BlobQuarantine, data_column_sidecar: DataColumnSidecar,
wallTime: BeaconTime, subnet_id: uint64): Result[void, ValidationError] =

template block_header: untyped = data_column_sidecar.signed_block_header.message

# [REJECT] The sidecar's index is consistent with `NUMBER_OF_COLUMNS`
# -- i.e. `blob_sidecar.index < NUMBER_OF_COLUMNS`
if not (data_column_sidecar.index < NUMBER_OF_COLUMNS):
return dag.checkedReject("DataColumnSidecar: The sidecar's index should be consistent with NUMBER_OF_COLUMNS")

# [REJECT] The sidecar is for the correct subnet
# -- i.e. `compute_subnet_for_data_column_sidecar(blob_sidecar.index) == subnet_id`.
if not (compute_subnet_for_data_column_sidecar(data_column_sidecar.index) == subnet_id):
return dag.checkedReject("DataColumnSidecar: The sidecar is not for the correct subnet")

# [IGNORE] The sidecar is not from a future slot (with a `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance)
# -- i.e. validate that `block_header.slot <= current_slot` (a client MAY queue future sidecars for
# processing at the appropriate slot).
if not (block_header.slot <=
(wallTime + MAXIMUM_GOSSIP_CLOCK_DISPARITY).slotOrZero):
return errIgnore("DataColumnSidecar: slot too high")

# [IGNORE] The sidecar is from a slot greater than the latest
# finalized slot -- i.e. validate that `block_header.slot >
# compute_start_slot_at_epoch(state.finalized_checkpoint.epoch)`
if not (block_header.slot > dag.finalizedHead.slot):
return errIgnore("DataColumnSidecar: slot already finalized")

# TODO: [REJECT] The sidecar's `kzg_commitments` inclusion proof is valid as verified by
# `verify_data_column_sidecar_inclusion_proof(sidecar)`.

# TODO: [REJECT] The sidecar's column data is valid as
# verified by `verify_data_column_kzg_proofs(sidecar)`

# [IGNORE] The sidecar is the first sidecar for the tuple
# (block_header.slot, block_header.proposer_index, blob_sidecar.index)
# with valid header signature, sidecar inclusion proof, and kzg proof.
let block_root = hash_tree_root(block_header)
if dag.getBlockRef(block_root).isSome():
return errIgnore("DataColumnSidecar: already have block")
if blobQuarantine[].hasBlob(
block_header.slot, block_header.proposer_index, data_column_sidecar.index):
return errIgnore("DataColumnSidecar: already have valid blob from same proposer")

# [IGNORE] The sidecar's block's parent (defined by
# `block_header.parent_root`) has been seen (via both gossip and
# non-gossip sources) (a client MAY queue sidecars for processing
# once the parent block is retrieved).
#
# [REJECT] The sidecar's block's parent (defined by
# `block_header.parent_root`) passes validation.
# let parent = dag.getBlockRef(block_header.parent_root).valueOr:
# if block_header.parent_root in quarantine[].unviable:
# quarantine[].addUnviable(block_root)
# return dag.checkedReject("DataColumnSidecar: parent not validated")
# else:
# quarantine[].addMissing(block_header.parent_root)
# return errIgnore("DataColumnSidecar: parent not found")

# # [REJECT] The sidecar is proposed by the expected `proposer_index`
# # for the block's slot in the context of the current shuffling
# # (defined by `block_header.parent_root`/`block_header.slot`).
# # If the proposer_index cannot immediately be verified against the expected
# # shuffling, the sidecar MAY be queued for later processing while proposers
# # for the block's branch are calculated -- in such a case do not
# # REJECT, instead IGNORE this message.
# let proposer = getProposer(dag, parent, block_header.slot).valueOr:
# warn "cannot compute proposer for blob"
# return errIgnore("DataColumnSidecar: Cannot compute proposer") # internal issue

# if uint64(proposer) != block_header.proposer_index:
# return dag.checkedReject("BlobSidecar: Unexpected proposer")

# # [REJECT] The proposer signature of `blob_sidecar.signed_block_header`,
# # is valid with respect to the `block_header.proposer_index` pubkey.
# if not verify_block_signature(
# dag.forkAtEpoch(block_header.slot.epoch),
# getStateField(dag.headState, genesis_validators_root),
# block_header.slot,
# block_root,
# dag.validatorKey(proposer).get(),
# data_column_sidecar.signed_block_header.signature):
# return dag.checkedReject("DataColumnSidecar: Invalid proposer signature")

# # [REJECT] The sidecar is from a higher slot than the sidecar's
# # block's parent (defined by `block_header.parent_root`).
# if not (block_header.slot > parent.bid.slot):
# return dag.checkedReject("DataColumnSidecar: slot lower than parents'")

# # [REJECT] The current finalized_checkpoint is an ancestor of the sidecar's
# # block -- i.e. `get_checkpoint_block(store, block_header.parent_root,
# # store.finalized_checkpoint.epoch) == store.finalized_checkpoint.root`.
# let
# finalized_checkpoint = getStateField(dag.headState, finalized_checkpoint)
# ancestor = get_ancestor(parent, finalized_checkpoint.epoch.start_slot)

# if ancestor.isNil:
# # This shouldn't happen: we should always be able to trace the parent back
# # to the finalized checkpoint (else it wouldn't be in the DAG)
# return errIgnore("DataColumnSidecar: Can't find ancestor")

# if not (
# finalized_checkpoint.root == ancestor.root or
# finalized_checkpoint.root.isZero):
# quarantine[].addUnviable(block_root)
# return dag.checkedReject(
# "DataColumnSidecar: Finalized checkpoint not an ancestor")

ok()


# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/phase0/p2p-interface.md#beacon_block
# https://github.com/ethereum/consensus-specs/blob/v1.3.0/specs/bellatrix/p2p-interface.md#beacon_block
proc validateBeaconBlock*(
Expand Down
2 changes: 1 addition & 1 deletion beacon_chain/networking/eth2_network.nim
Original file line number Diff line number Diff line change
Expand Up @@ -831,7 +831,7 @@ template gossipMaxSize(T: untyped): uint32 =
when isFixedSize(T):
fixedPortionSize(T).uint32
elif T is bellatrix.SignedBeaconBlock or T is capella.SignedBeaconBlock or
T is deneb.SignedBeaconBlock or T is electra.SignedBeaconBlock:
T is deneb.SignedBeaconBlock or T is electra.SignedBeaconBlock or T is DataColumnSidecar:
GOSSIP_MAX_SIZE
# TODO https://github.com/status-im/nim-ssz-serialization/issues/20 for
# Attestation, AttesterSlashing, and SignedAggregateAndProof, which all
Expand Down
24 changes: 17 additions & 7 deletions beacon_chain/nimbus_beacon_node.nim
Original file line number Diff line number Diff line change
Expand Up @@ -1107,7 +1107,7 @@ proc addCapellaMessageHandlers(
proc addDenebMessageHandlers(
node: BeaconNode, forkDigest: ForkDigest, slot: Slot) =
node.addCapellaMessageHandlers(forkDigest, slot)
for topic in blobSidecarTopics(forkDigest):
for topic in dataColumnSidecarTopics(forkDigest):
node.network.subscribe(topic, basicParams)

proc addElectraMessageHandlers(
Expand All @@ -1131,7 +1131,7 @@ proc removeCapellaMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =

proc removeDenebMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
node.removeCapellaMessageHandlers(forkDigest)
for topic in blobSidecarTopics(forkDigest):
for topic in dataColumnSidecarTopics(forkDigest):
node.network.unsubscribe(topic)

proc removeElectraMessageHandlers(node: BeaconNode, forkDigest: ForkDigest) =
Expand Down Expand Up @@ -1866,16 +1866,26 @@ proc installMessageValidators(node: BeaconNode) =
when consensusFork >= ConsensusFork.Deneb:
# blob_sidecar_{subnet_id}
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/p2p-interface.md#blob_sidecar_subnet_id
for it in BlobId:
# for it in BlobId:
# closureScope: # Needed for inner `proc`; don't lift it out of loop.
# let subnet_id = it
# node.network.addValidator(
# getBlobSidecarTopic(digest, subnet_id), proc (
# blobSidecar: deneb.BlobSidecar
# ): ValidationResult =
# toValidationResult(
# node.processor[].processBlobSidecar(
# MsgSource.gossip, blobSidecar, subnet_id)))
for it in 0'u64..<DATA_COLUMN_SIDECAR_SUBNET_COUNT:
closureScope: # Needed for inner `proc`; don't lift it out of loop.
let subnet_id = it
node.network.addValidator(
getBlobSidecarTopic(digest, subnet_id), proc (
blobSidecar: deneb.BlobSidecar
getDataColumnSidecarTopic(digest, subnet_id), proc (
dataColumnSidecar: DataColumnSidecar
): ValidationResult =
toValidationResult(
node.processor[].processBlobSidecar(
MsgSource.gossip, blobSidecar, subnet_id)))
node.processor[].processDataColumnSidecar(
MsgSource.gossip, dataColumnSidecar, subnet_id)))

node.installLightClientMessageValidators()

Expand Down
12 changes: 10 additions & 2 deletions beacon_chain/spec/crypto.nim
Original file line number Diff line number Diff line change
Expand Up @@ -46,6 +46,7 @@ export results, blscurve, rand, json_serialization
const
RawSigSize* = 96
RawPubKeySize* = 48
RawPointSize* = 48
UncompressedPubKeySize* = 96
# RawPrivKeySize* = 32 for BLST

Expand Down Expand Up @@ -94,6 +95,13 @@ type
key*: ValidatorPrivKey
id*: uint32

Point* = object
blob*: array[RawPointSize, byte]

BLSCommitment* = object



export
AggregateSignature

Expand Down Expand Up @@ -390,7 +398,7 @@ func toRaw*(x: ValidatorPrivKey): array[32, byte] =
# TODO: distinct type - see https://github.com/status-im/nim-blscurve/pull/67
static: doAssert BLS_BACKEND == BLST
result = SecretKey(x).exportRaw()

template toRaw*(x: ValidatorPubKey | ValidatorSig): auto =
x.blob

Expand Down Expand Up @@ -608,4 +616,4 @@ proc confirmShares*(pubKey: ValidatorPubKey,
let signature = share.key.blsSign(confirmationData).toSignatureShare(share.id);
signs.add(signature)
let recovered = signs.recoverSignature()
return pubKey.blsVerify(confirmationData, recovered)
return pubKey.blsVerify(confirmationData, recovered)
2 changes: 1 addition & 1 deletion beacon_chain/spec/datatypes/deneb.nim
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ export json_serialization, base, kzg4844

const
# https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/deneb/polynomial-commitments.md#constants
BYTES_PER_FIELD_ELEMENT = 32
BYTES_PER_FIELD_ELEMENT* = 32
BLS_MODULUS* = "52435875175126190479447740508185965837690552500527637822603658699938581184513".u256

type
Expand Down
Loading
Loading