Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

draft release v1.3.1 #1941

Closed
wants to merge 14 commits into from
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions .golangci.yml
Original file line number Diff line number Diff line change
Expand Up @@ -51,9 +51,6 @@ issues:
- path: core/state/metrics.go
linters:
- unused
- path: core/state/statedb_fuzz_test.go
linters:
- unused
- path: core/txpool/legacypool/list.go
linters:
- staticcheck
Expand Down
9 changes: 9 additions & 0 deletions CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,4 +1,13 @@
# Changelog
## v1.3.1
FEATURE
* [\#1881](https://github.com/bnb-chain/bsc/pull/1881) feat: active pbss
* [\#1882](https://github.com/bnb-chain/bsc/pull/1881) cmd/geth: add hbss to pbss convert tool
* [\#1916](https://github.com/bnb-chain/bsc/pull/1916) feat: cherry-pick pbss patch commits from eth repo in v1.13.2

BUGFIX
* [\#1923](https://github.com/bnb-chain/bsc/pull/1923) consensus/parlia: fix nextForkHash in Extra filed of block header

## v1.3.0
#### RPC
* [internal/ethapi: add debug_getRawReceipts RPC method (#24773)](https://github.com/bnb-chain/bsc/pull/1840/commits/ae7d834bc752a2d94fef9d354ee78fcb9425f3d1)
Expand Down
4 changes: 2 additions & 2 deletions consensus/parlia/parlia.go
Original file line number Diff line number Diff line change
Expand Up @@ -950,7 +950,7 @@ func (p *Parlia) Prepare(chain consensus.ChainHeaderReader, header *types.Header
}

header.Extra = header.Extra[:extraVanity-nextForkHashSize]
nextForkHash := forkid.NewID(p.chainConfig, p.genesisHash, number, header.Time).Hash
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number, header.Time)
header.Extra = append(header.Extra, nextForkHash[:]...)

if err := p.prepareValidators(header); err != nil {
Expand Down Expand Up @@ -1084,7 +1084,7 @@ func (p *Parlia) Finalize(chain consensus.ChainHeaderReader, header *types.Heade
if err != nil {
return err
}
nextForkHash := forkid.NewID(p.chainConfig, p.genesisHash, number, header.Time).Hash
nextForkHash := forkid.NextForkHash(p.chainConfig, p.genesisHash, number, header.Time)
if !snap.isMajorityFork(hex.EncodeToString(nextForkHash[:])) {
log.Debug("there is a possible fork, and your client is not the majority. Please check...", "nextForkHash", hex.EncodeToString(nextForkHash[:]))
}
Expand Down
7 changes: 3 additions & 4 deletions core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -1180,8 +1180,7 @@ func (bc *BlockChain) Stop() {
// - HEAD-127: So we have a hard limit on the number of blocks reexecuted
if !bc.cacheConfig.TrieDirtyDisabled {
triedb := bc.triedb
var once sync.Once

var once sync.Once
for _, offset := range []uint64{0, 1, TriesInMemory - 1} {
if number := bc.CurrentBlock().Number.Uint64(); number > offset {
recent := bc.GetBlockByNumber(number - offset)
Expand All @@ -1191,9 +1190,9 @@ func (bc *BlockChain) Stop() {
} else {
rawdb.WriteSafePointBlockNumber(bc.db, recent.NumberU64())
once.Do(func() {
rawdb.WriteHeadBlockHash(bc.db, recent.Hash())
rawdb.WriteHeadBlockHash(bc.db, recent.Hash())
})
}
}
}
}
if snapBase != (common.Hash{}) {
Expand Down
26 changes: 26 additions & 0 deletions core/forkid/forkid.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,6 +98,32 @@ func NewID(config *params.ChainConfig, genesis common.Hash, head, time uint64) I
return ID{Hash: checksumToBytes(hash), Next: 0}
}

// NextForkHash calculates the forkHash from genesis to the next fork block number or time
func NextForkHash(config *params.ChainConfig, genesis common.Hash, head uint64, time uint64) [4]byte {
// Calculate the starting checksum from the genesis hash
hash := crc32.ChecksumIEEE(genesis[:])

// Calculate the next fork checksum
forksByBlock, forksByTime := gatherForks(config)
for _, fork := range forksByBlock {
if fork > head {
// Checksum the previous hash and nextFork number and return
return checksumToBytes(checksumUpdate(hash, fork))
}
// Fork already passed, checksum the previous hash and the fork number
hash = checksumUpdate(hash, fork)
}
for _, fork := range forksByTime {
if fork > time {
// Checksum the previous hash and nextFork time and return
return checksumToBytes(checksumUpdate(hash, fork))
}
// Fork already passed, checksum the previous hash and the fork time
hash = checksumUpdate(hash, fork)
}
return checksumToBytes(hash)
}

// NewIDWithChain calculates the Ethereum fork ID from an existing chain instance.
func NewIDWithChain(chain Blockchain) ID {
head := chain.CurrentHeader()
Expand Down
20 changes: 20 additions & 0 deletions core/rawdb/accessors_trie.go
Original file line number Diff line number Diff line change
Expand Up @@ -89,6 +89,16 @@ func HasAccountTrieNode(db ethdb.KeyValueReader, path []byte, hash common.Hash)
return h.hash(data) == hash
}

// ExistsAccountTrieNode checks the presence of the account trie node with the
// specified node path, regardless of the node hash.
func ExistsAccountTrieNode(db ethdb.KeyValueReader, path []byte) bool {
has, err := db.Has(accountTrieNodeKey(path))
if err != nil {
return false
}
return has
}

// WriteAccountTrieNode writes the provided account trie node into database.
func WriteAccountTrieNode(db ethdb.KeyValueWriter, path []byte, node []byte) {
if err := db.Put(accountTrieNodeKey(path), node); err != nil {
Expand Down Expand Up @@ -127,6 +137,16 @@ func HasStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path [
return h.hash(data) == hash
}

// ExistsStorageTrieNode checks the presence of the storage trie node with the
// specified account hash and node path, regardless of the node hash.
func ExistsStorageTrieNode(db ethdb.KeyValueReader, accountHash common.Hash, path []byte) bool {
has, err := db.Has(storageTrieNodeKey(accountHash, path))
if err != nil {
return false
}
return has
}

// WriteStorageTrieNode writes the provided storage trie node into database.
func WriteStorageTrieNode(db ethdb.KeyValueWriter, accountHash common.Hash, path []byte, node []byte) {
if err := db.Put(storageTrieNodeKey(accountHash, path), node); err != nil {
Expand Down
6 changes: 6 additions & 0 deletions core/rawdb/freezer_table.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,6 +220,9 @@ func (t *freezerTable) repair() error {
}
// Ensure the index is a multiple of indexEntrySize bytes
if overflow := stat.Size() % indexEntrySize; overflow != 0 {
if t.readonly {
return fmt.Errorf("index file(path: %s, name: %s) size is not a multiple of %d", t.path, t.name, indexEntrySize)
}
truncateFreezerFile(t.index, stat.Size()-overflow) // New file can't trigger this path
}
// Retrieve the file sizes and prepare for truncation
Expand Down Expand Up @@ -278,6 +281,9 @@ func (t *freezerTable) repair() error {
// Keep truncating both files until they come in sync
contentExp = int64(lastIndex.offset)
for contentExp != contentSize {
if t.readonly {
return fmt.Errorf("freezer table(path: %s, name: %s, num: %d) is corrupted", t.path, t.name, lastIndex.filenum)
}
verbose = true
// Truncate the head file to the last offset pointer
if contentExp < contentSize {
Expand Down
12 changes: 10 additions & 2 deletions core/rawdb/schema.go
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,11 @@ func accountSnapshotKey(hash common.Hash) []byte {

// storageSnapshotKey = SnapshotStoragePrefix + account hash + storage hash
func storageSnapshotKey(accountHash, storageHash common.Hash) []byte {
return append(append(SnapshotStoragePrefix, accountHash.Bytes()...), storageHash.Bytes()...)
buf := make([]byte, len(SnapshotStoragePrefix)+common.HashLength+common.HashLength)
n := copy(buf, SnapshotStoragePrefix)
n += copy(buf[n:], accountHash.Bytes())
copy(buf[n:], storageHash.Bytes())
return buf
}

// storageSnapshotsKey = SnapshotStoragePrefix + account hash + storage hash
Expand Down Expand Up @@ -274,7 +278,11 @@ func accountTrieNodeKey(path []byte) []byte {

// storageTrieNodeKey = trieNodeStoragePrefix + accountHash + nodePath.
func storageTrieNodeKey(accountHash common.Hash, path []byte) []byte {
return append(append(trieNodeStoragePrefix, accountHash.Bytes()...), path...)
buf := make([]byte, len(trieNodeStoragePrefix)+common.HashLength+len(path))
n := copy(buf, trieNodeStoragePrefix)
n += copy(buf[n:], accountHash.Bytes())
copy(buf[n:], path)
return buf
}

// IsLegacyTrieNode reports whether a provided database entry is a legacy trie
Expand Down
134 changes: 105 additions & 29 deletions core/state/statedb.go
Original file line number Diff line number Diff line change
Expand Up @@ -42,7 +42,12 @@ import (
"github.com/ethereum/go-ethereum/trie/triestate"
)

const defaultNumOfSlots = 100
const (
defaultNumOfSlots = 100
// storageDeleteLimit denotes the highest permissible memory allocation
// employed for contract storage deletion.
storageDeleteLimit = 512 * 1024 * 1024
)

type revision struct {
id int
Expand Down Expand Up @@ -1342,63 +1347,134 @@ func (s *StateDB) clearJournalAndRefund() {
s.validRevisions = s.validRevisions[:0] // Snapshots can be created without journal entries
}

// deleteStorage iterates the storage trie belongs to the account and mark all
// slots inside as deleted.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) {
start := time.Now()
// fastDeleteStorage is the function that efficiently deletes the storage trie
// of a specific account. It leverages the associated state snapshot for fast
// storage iteration and constructs trie node deletion markers by creating
// stack trie with iterated slots.
func (s *StateDB) fastDeleteStorage(addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
iter, err := s.snaps.StorageIterator(s.originalRoot, addrHash, common.Hash{})
if err != nil {
return false, 0, nil, nil, err
}
defer iter.Release()

var (
size common.StorageSize
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
stack := trie.NewStackTrie(func(owner common.Hash, path []byte, hash common.Hash, blob []byte) {
nodes.AddNode(path, trienode.NewDeleted())
size += common.StorageSize(len(path))
})
for iter.Next() {
if size > storageDeleteLimit {
return true, size, nil, nil, nil
}
slot := common.CopyBytes(iter.Slot())
if err := iter.Error(); err != nil { // error might occur after Slot function
return false, 0, nil, nil, err
}
size += common.StorageSize(common.HashLength + len(slot))
slots[iter.Hash()] = slot

if err := stack.Update(iter.Hash().Bytes(), slot); err != nil {
return false, 0, nil, nil, err
}
}
if err := iter.Error(); err != nil { // error might occur during iteration
return false, 0, nil, nil, err
}
if stack.Hash() != root {
return false, 0, nil, nil, fmt.Errorf("snapshot is not matched, exp %x, got %x", root, stack.Hash())
}
return false, size, slots, nodes, nil
}

// slowDeleteStorage serves as a less-efficient alternative to "fastDeleteStorage,"
// employed when the associated state snapshot is not available. It iterates the
// storage slots along with all internal trie nodes via trie directly.
func (s *StateDB) slowDeleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, common.StorageSize, map[common.Hash][]byte, *trienode.NodeSet, error) {
tr, err := s.db.OpenStorageTrie(s.originalRoot, addr, root)
if err != nil {
return false, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
return false, 0, nil, nil, fmt.Errorf("failed to open storage trie, err: %w", err)
}
// skip deleting storages for EmptyTrie
if _, ok := tr.(*trie.EmptyTrie); ok {
return false, nil, nil, nil
return false, 0, nil, nil, nil
}
it, err := tr.NodeIterator(nil)
if err != nil {
return false, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
return false, 0, nil, nil, fmt.Errorf("failed to open storage iterator, err: %w", err)
}
var (
set = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
stateSize common.StorageSize
nodeSize common.StorageSize
size common.StorageSize
nodes = trienode.NewNodeSet(addrHash)
slots = make(map[common.Hash][]byte)
)
for it.Next(true) {
// arbitrary stateSize limit, make it configurable
if stateSize+nodeSize > 512*1024*1024 {
log.Info("Skip large storage deletion", "address", addr.Hex(), "states", stateSize, "nodes", nodeSize)
if metrics.EnabledExpensive {
slotDeletionSkip.Inc(1)
}
return true, nil, nil, nil
if size > storageDeleteLimit {
return true, size, nil, nil, nil
}
if it.Leaf() {
slots[common.BytesToHash(it.LeafKey())] = common.CopyBytes(it.LeafBlob())
stateSize += common.StorageSize(common.HashLength + len(it.LeafBlob()))
size += common.StorageSize(common.HashLength + len(it.LeafBlob()))
continue
}
if it.Hash() == (common.Hash{}) {
continue
}
nodeSize += common.StorageSize(len(it.Path()))
set.AddNode(it.Path(), trienode.NewDeleted())
size += common.StorageSize(len(it.Path()))
nodes.AddNode(it.Path(), trienode.NewDeleted())
}
if err := it.Error(); err != nil {
return false, 0, nil, nil, err
}
return false, size, slots, nodes, nil
}

// deleteStorage is designed to delete the storage trie of a designated account.
// It could potentially be terminated if the storage size is excessively large,
// potentially leading to an out-of-memory panic. The function will make an attempt
// to utilize an efficient strategy if the associated state snapshot is reachable;
// otherwise, it will resort to a less-efficient approach.
func (s *StateDB) deleteStorage(addr common.Address, addrHash common.Hash, root common.Hash) (bool, map[common.Hash][]byte, *trienode.NodeSet, error) {
var (
start = time.Now()
err error
aborted bool
size common.StorageSize
slots map[common.Hash][]byte
nodes *trienode.NodeSet
)
// The fast approach can be failed if the snapshot is not fully
// generated, or it's internally corrupted. Fallback to the slow
// one just in case.
if s.snap != nil {
aborted, size, slots, nodes, err = s.fastDeleteStorage(addrHash, root)
}
if s.snap == nil || err != nil {
aborted, size, slots, nodes, err = s.slowDeleteStorage(addr, addrHash, root)
}
if err != nil {
return false, nil, nil, err
}
if metrics.EnabledExpensive {
if int64(len(slots)) > slotDeletionMaxCount.Value() {
slotDeletionMaxCount.Update(int64(len(slots)))
if aborted {
slotDeletionSkip.Inc(1)
}
n := int64(len(slots))
if n > slotDeletionMaxCount.Value() {
slotDeletionMaxCount.Update(n)
}
if int64(stateSize+nodeSize) > slotDeletionMaxSize.Value() {
slotDeletionMaxSize.Update(int64(stateSize + nodeSize))
if int64(size) > slotDeletionMaxSize.Value() {
slotDeletionMaxSize.Update(int64(size))
}
slotDeletionTimer.UpdateSince(start)
slotDeletionCount.Mark(int64(len(slots)))
slotDeletionSize.Mark(int64(stateSize + nodeSize))
slotDeletionCount.Mark(n)
slotDeletionSize.Mark(int64(size))
}
return false, slots, set, nil
return aborted, slots, nodes, nil
}

// handleDestruction processes all destruction markers and deletes the account
Expand Down
Loading
Loading