Skip to content

Commit

Permalink
Merge branch 'zkevm' into fix/bump-zkevm-data-streamer-version
Browse files Browse the repository at this point in the history
  • Loading branch information
Stefan-Ethernal committed Aug 12, 2024
2 parents 55a9388 + dba73ec commit d9a37a7
Show file tree
Hide file tree
Showing 17 changed files with 827 additions and 267 deletions.
4 changes: 4 additions & 0 deletions chain/chain_config.go
Original file line number Diff line number Diff line change
Expand Up @@ -146,6 +146,10 @@ func (c *Config) SetForkIdBlock(forkIdNumber constants.ForkId, blockNum uint64)
c.ForkID88ElderberryBlock = new(big.Int).SetUint64(blockNum)
case constants.ForkID9Elderberry2:
c.ForkID9Elderberry2Block = new(big.Int).SetUint64(blockNum)
case constants.ForkID10:
c.ForkID10 = new(big.Int).SetUint64(blockNum)
case constants.ForkID11:
c.ForkID11 = new(big.Int).SetUint64(blockNum)
case constants.ForkID12Banana:
c.ForkID12BananaBlock = new(big.Int).SetUint64(blockNum)
default:
Expand Down
9 changes: 9 additions & 0 deletions core/state/intra_block_state_zkevm.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ import (
"github.com/ledgerwatch/erigon/common"
"github.com/ledgerwatch/erigon/core/types"
dstypes "github.com/ledgerwatch/erigon/zk/datastream/types"
zktypes "github.com/ledgerwatch/erigon/zk/types"
)

var (
Expand All @@ -33,6 +34,14 @@ type ReadOnlyHermezDb interface {
GetGerForL1BlockHash(l1BlockHash libcommon.Hash) (libcommon.Hash, error)
GetIntermediateTxStateRoot(blockNum uint64, txhash libcommon.Hash) (libcommon.Hash, error)
GetReusedL1InfoTreeIndex(blockNum uint64) (bool, error)
GetSequenceByBatchNo(batchNo uint64) (*zktypes.L1BatchInfo, error)
GetHighestBlockInBatch(batchNo uint64) (uint64, error)
GetLowestBlockInBatch(batchNo uint64) (uint64, bool, error)
GetL2BlockNosByBatch(batchNo uint64) ([]uint64, error)
GetBatchGlobalExitRoot(batchNum uint64) (*dstypes.GerUpdate, error)
GetVerificationByBatchNo(batchNo uint64) (*zktypes.L1BatchInfo, error)
GetL1BatchData(batchNumber uint64) ([]byte, error)
GetL1InfoTreeUpdateByGer(ger libcommon.Hash) (*zktypes.L1InfoTreeUpdate, error)
}

func (sdb *IntraBlockState) GetTxCount() (uint64, error) {
Expand Down
9 changes: 9 additions & 0 deletions core/types/receipt.go
Original file line number Diff line number Diff line change
Expand Up @@ -495,3 +495,12 @@ func (r Receipts) DeriveFields(hash libcommon.Hash, number uint64, txs Transacti
}
return nil
}

// ToSlice converts Receipts into slice of value type Receipt
func (r Receipts) ToSlice() []Receipt {
result := make([]Receipt, 0, r.Len())
for _, receipt := range r {
result = append(result, *receipt)
}
return result
}
7 changes: 5 additions & 2 deletions eth/backend.go
Original file line number Diff line number Diff line change
Expand Up @@ -785,8 +785,11 @@ func New(stack *node.Node, config *ethconfig.Config) (*Ethereum, error) {
isSequencer := sequencer.IsSequencer()

// if the L1 block sync is set we're in recovery so can't run as a sequencer
if cfg.L1SyncStartBlock > 0 && !isSequencer {
panic("you cannot launch in l1 sync mode as an RPC node")
if cfg.L1SyncStartBlock > 0 {
if !isSequencer {
panic("you cannot launch in l1 sync mode as an RPC node")
}
log.Info("Starting sequencer in L1 recovery mode", "startBlock", cfg.L1SyncStartBlock)
}

seqAndVerifTopics := [][]libcommon.Hash{{
Expand Down
130 changes: 39 additions & 91 deletions zk/datastream/client/stream_client.go
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,7 @@ type StreamClient struct {
progress atomic.Uint64

// Channels
batchStartChan chan types.BatchStart
batchEndChan chan types.BatchEnd
l2BlockChan chan types.FullL2Block
l2TxChan chan types.L2TransactionProto
gerUpdatesChan chan types.GerUpdate // NB: unused from etrog onwards (forkid 7)
entryChan chan interface{}

// keeps track of the latest fork from the stream to assign to l2 blocks
currentFork uint64
Expand All @@ -70,17 +66,14 @@ const (
// server must be in format "url:port"
func NewClient(ctx context.Context, server string, version int, checkTimeout time.Duration, latestDownloadedForkId uint16) *StreamClient {
c := &StreamClient{
ctx: ctx,
checkTimeout: checkTimeout,
server: server,
version: version,
streamType: StSequencer,
id: "",
batchStartChan: make(chan types.BatchStart, 100),
batchEndChan: make(chan types.BatchEnd, 100),
l2BlockChan: make(chan types.FullL2Block, 100000),
gerUpdatesChan: make(chan types.GerUpdate, 1000),
currentFork: uint64(latestDownloadedForkId),
ctx: ctx,
checkTimeout: checkTimeout,
server: server,
version: version,
streamType: StSequencer,
id: "",
entryChan: make(chan interface{}, 100000),
currentFork: uint64(latestDownloadedForkId),
}

return c
Expand All @@ -90,20 +83,8 @@ func (c *StreamClient) IsVersion3() bool {
return c.version >= versionAddedBlockEnd
}

func (c *StreamClient) GetBatchStartChan() chan types.BatchStart {
return c.batchStartChan
}
func (c *StreamClient) GetBatchEndChan() chan types.BatchEnd {
return c.batchEndChan
}
func (c *StreamClient) GetL2BlockChan() chan types.FullL2Block {
return c.l2BlockChan
}
func (c *StreamClient) GetL2TxChan() chan types.L2TransactionProto {
return c.l2TxChan
}
func (c *StreamClient) GetGerUpdatesChan() chan types.GerUpdate {
return c.gerUpdatesChan
func (c *StreamClient) GetEntryChan() chan interface{} {
return c.entryChan
}
func (c *StreamClient) GetLastWrittenTimeAtomic() *atomic.Int64 {
return &c.lastWrittenTime
Expand Down Expand Up @@ -135,8 +116,7 @@ func (c *StreamClient) Stop() {
}
c.conn.Close()

close(c.l2BlockChan)
close(c.gerUpdatesChan)
close(c.entryChan)
}

// Command header: Get status
Expand Down Expand Up @@ -324,38 +304,30 @@ LOOP:
c.conn.SetReadDeadline(time.Now().Add(c.checkTimeout))
}

fullBlock, batchStart, batchEnd, gerUpdate, batchBookmark, blockBookmark, localErr := c.readFullBlockProto()
parsedProto, localErr := c.readParsedProto()
if localErr != nil {
err = localErr
break
}
c.lastWrittenTime.Store(time.Now().UnixNano())

// skip over bookmarks (but only when fullblock is nil or will miss l2 blocks)
if batchBookmark != nil || blockBookmark != nil {
switch parsedProto := parsedProto.(type) {
case *types.BookmarkProto:
continue
}

// write batch starts to channel
if batchStart != nil {
c.currentFork = (*batchStart).ForkId
c.batchStartChan <- *batchStart
}

if gerUpdate != nil {
c.gerUpdatesChan <- *gerUpdate
}

if batchEnd != nil {
// this check was inside c.readFullBlockProto() but it is better to move it here
c.batchEndChan <- *batchEnd
}

// ensure the block is assigned the currently known fork
if fullBlock != nil {
fullBlock.ForkId = c.currentFork
log.Trace("writing block to channel", "blockNumber", fullBlock.L2BlockNumber, "batchNumber", fullBlock.BatchNumber)
c.l2BlockChan <- *fullBlock
case *types.BatchStart:
c.currentFork = parsedProto.ForkId
c.entryChan <- parsedProto
case *types.GerUpdateProto:
c.entryChan <- parsedProto
case *types.BatchEnd:
c.entryChan <- parsedProto
case *types.FullL2Block:
parsedProto.ForkId = c.currentFork
log.Trace("writing block to channel", "blockNumber", parsedProto.L2BlockNumber, "batchNumber", parsedProto.BatchNumber)
c.entryChan <- parsedProto
default:
err = fmt.Errorf("unexpected entry type: %v", parsedProto)
break LOOP
}
}

Expand All @@ -381,13 +353,8 @@ func (c *StreamClient) tryReConnect() error {
return err
}

func (c *StreamClient) readFullBlockProto() (
l2Block *types.FullL2Block,
batchStart *types.BatchStart,
batchEnd *types.BatchEnd,
gerUpdate *types.GerUpdate,
batchBookmark *types.BookmarkProto,
blockBookmark *types.BookmarkProto,
func (c *StreamClient) readParsedProto() (
parsedEntry interface{},
err error,
) {
file, err := c.readFileEntry()
Expand All @@ -398,34 +365,15 @@ func (c *StreamClient) readFullBlockProto() (

switch file.EntryType {
case types.BookmarkEntryType:
var bookmark *types.BookmarkProto
if bookmark, err = types.UnmarshalBookmark(file.Data); err != nil {
return
}
if bookmark.BookmarkType() == datastream.BookmarkType_BOOKMARK_TYPE_BATCH {
batchBookmark = bookmark
return
} else {
blockBookmark = bookmark
return
}
parsedEntry, err = types.UnmarshalBookmark(file.Data)
case types.EntryTypeGerUpdate:
if gerUpdate, err = types.DecodeGerUpdateProto(file.Data); err != nil {
return
}
log.Trace("ger update", "ger", gerUpdate)
return
parsedEntry, err = types.DecodeGerUpdateProto(file.Data)
case types.EntryTypeBatchStart:
if batchStart, err = types.UnmarshalBatchStart(file.Data); err != nil {
return
}
return
parsedEntry, err = types.UnmarshalBatchStart(file.Data)
case types.EntryTypeBatchEnd:
if batchEnd, err = types.UnmarshalBatchEnd(file.Data); err != nil {
return
}
return
parsedEntry, err = types.UnmarshalBatchEnd(file.Data)
case types.EntryTypeL2Block:
var l2Block *types.FullL2Block
if l2Block, err = types.UnmarshalL2Block(file.Data); err != nil {
return
}
Expand Down Expand Up @@ -467,7 +415,7 @@ func (c *StreamClient) readFullBlockProto() (
return
}
} else if innerFile.IsBatchEnd() {
if batchEnd, err = types.UnmarshalBatchEnd(file.Data); err != nil {
if _, err = types.UnmarshalBatchEnd(file.Data); err != nil {
return
}
break LOOP
Expand All @@ -478,14 +426,14 @@ func (c *StreamClient) readFullBlockProto() (
}

l2Block.L2Txs = txs
parsedEntry = l2Block
return
case types.EntryTypeL2Tx:
err = fmt.Errorf("unexpected l2Tx out of block")
return
default:
err = fmt.Errorf("unexpected entry type: %d", file.EntryType)
return
}
return
}

// reads file bytes from socket and tries to parse them
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"reflect"

"github.com/ledgerwatch/erigon/zk/datastream/client"
"github.com/ledgerwatch/erigon/zk/datastream/types"
"github.com/nsf/jsondiff"
)

Expand Down Expand Up @@ -80,13 +81,14 @@ func readFromClient(client *client.StreamClient, total int) ([]interface{}, erro

LOOP:
for {
select {
case d := <-client.GetL2BlockChan():
data = append(data, d)
count++
case d := <-client.GetGerUpdatesChan():
data = append(data, d)
entry := <-client.GetEntryChan()

switch entry.(type) {
case types.FullL2Block:
case types.GerUpdate:
data = append(data, entry)
count++
default:
}

if count == total {
Expand Down
77 changes: 77 additions & 0 deletions zk/debug_tools/mdbx-data-browser/README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,77 @@
# MDBX data browser

MDBX data browser represents a CLI tool, that is able to query the MDBX database, used by the CDK Erigon.
It offers two CLI commands:
- `output-blocks` - it receives block numbers as the parameter and outputs the blocks information
- `output-batches` - it receives batch number as the parameter and outputs the retrieved batches information

## CLI Commands Documentation
This paragraph documents the CLI commands that are incorporated into the MDBX DB browser.

### Global Flags

#### `verbose`
- **Name**: `verbose`
- **Usage**: If verbose output is enabled, it prints all the details about blocks and transactions in the batches, otherwise just its hashes.
- **Destination**: `&verboseOutput`
- **Default Value**: `false`

#### `file-output`
- **Name**: `file-output`
- **Usage**: If file output is enabled, all the results are persisted within a file.
- **Destination**: `&fileOutput`
- **Default Value**: `false`

### Commands

#### `output-batches`
It is used to output the batches by numbers from the Erigon database.
In case `verbose` flag is provided, it collects all the data for the blocks and transactions in the batch (otherwise only hashes).
In case `file-output` flag is provided, results are printed to a JSON file (otherwise on a standard output).

- **Name**: `output-batches`
- **Usage**: Outputs batches by numbers.
- **Action**: `dumpBatchesByNumbers`
- **Flags**:
- `data-dir`: Specifies the data directory to use.
- `bn`: Batch numbers.
- **Name**: `bn`
- **Usage**: Batch numbers.
- **Destination**: `batchOrBlockNumbers`
- `verbose`: See [verbose](#verbose) flag.
- `file-output`: See [file-output](#file-output) flag.

#### `output-blocks`
It is used to output the blocks by numbers from the Erigon database.
In case `verbose` flag is provided, it collects all the data for the blocks and transactions in the batch (otherwise only hashes).
In case `file-output` flag is provided, results are printed to a JSON file (otherwise on a standard output).

- **Name**: `output-blocks`
- **Usage**: Outputs blocks by numbers.
- **Action**: `dumpBlocksByNumbers`
- **Flags**:
- `data-dir`: Specifies the data directory to use.
- `bn`: Block numbers.
- **Name**: `bn`
- **Usage**: Block numbers.
- **Destination**: `batchOrBlockNumbers`
- `verbose`: See [verbose](#verbose) flag.
- `file-output`: See [file-output](#file-output) flag.

**Note:** In case, `output-blocks` is ran with `verbose` flag provided, it is necessary to provide the proper chain id to the `params/chainspecs/mainnet.json`. This is the case, because CDK Erigon (for now) uses hardcoded data to recover transaction senders, and chain id information is read from the mentioned file.

### Example Usage

**Pre-requisite:** Navigate to the `zk/debug_tools/mdbx-data-browser` folder and run `go build -o mdbx-data-browser`

#### `output-batches` Command

```sh
./mdbx-data-browser output-batches --datadir chaindata/ --bn 1,2,3 [--verbose] [--file-output]
```

#### `output-blocks` Command

```sh
./mdbx-data-browser output-blocks --datadir chaindata/ --bn 100,101,102 [--verbose] [--file-output]
```
Loading

0 comments on commit d9a37a7

Please sign in to comment.