Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

eth/protocols/snap: fix the flaws in the snap sync #22553

Merged
merged 10 commits into from
Mar 24, 2021
9 changes: 7 additions & 2 deletions eth/protocols/snap/handler.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,8 +256,13 @@ func handleMessage(backend Backend, peer *Peer) error {
var (
storage []*StorageData
last common.Hash
abort bool
)
for it.Next() && size < hardLimit {
for it.Next() {
if size >= hardLimit {
abort = true
break
}
hash, slot := it.Hash(), common.CopyBytes(it.Slot())

// Track the returned interval for the Merkle proofs
Expand All @@ -280,7 +285,7 @@ func handleMessage(backend Backend, peer *Peer) error {
// Generate the Merkle proofs for the first and last storage slot, but
// only if the response was capped. If the entire storage trie included
// in the response, no need for any proofs.
if origin != (common.Hash{}) || size >= hardLimit {
if origin != (common.Hash{}) || abort {
// Request started at a non-zero hash or was capped prematurely, add
// the endpoint Merkle proofs
accTrie, err := trie.New(req.Root, backend.Chain().StateCache().TrieDB())
Expand Down
24 changes: 21 additions & 3 deletions eth/protocols/snap/sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -1553,7 +1553,14 @@ func (s *Syncer) processAccountResponse(res *accountResponse) {
// Ensure that the response doesn't overflow into the subsequent task
last := res.task.Last.Big()
for i, hash := range res.hashes {
if hash.Big().Cmp(last) > 0 {
// Mark the range complete if the last is already included.
// Keep iteration to delete the extra states if exists.
cmp := hash.Big().Cmp(last)
if cmp == 0 {
res.cont = false
continue
}
if cmp > 0 {
// Chunk overflown, cut off excess, but also update the boundary nodes
for j := i; j < len(res.hashes); j++ {
if err := res.trie.Prove(res.hashes[j][:], 0, res.overflow); err != nil {
Expand Down Expand Up @@ -1761,7 +1768,14 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
// Ensure the response doesn't overflow into the subsequent task
last := res.subTask.Last.Big()
for k, hash := range res.hashes[i] {
if hash.Big().Cmp(last) > 0 {
// Mark the range complete if the last is already included.
// Keep iteration to delete the extra states if exists.
cmp := hash.Big().Cmp(last)
if cmp == 0 {
res.cont = false
continue
}
rjl493456442 marked this conversation as resolved.
Show resolved Hide resolved
if cmp > 0 {
// Chunk overflown, cut off excess, but also update the boundary
for l := k; l < len(res.hashes[i]); l++ {
if err := res.tries[i].Prove(res.hashes[i][l][:], 0, res.overflow); err != nil {
Expand All @@ -1788,11 +1802,15 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
it := res.nodes[i].NewIterator(nil, nil)
for it.Next() {
// Boundary nodes are not written for the last result, since they are incomplete
if i == len(res.hashes)-1 {
if i == len(res.hashes)-1 && res.subTask != nil {
if _, ok := res.bounds[common.BytesToHash(it.Key())]; ok {
skipped++
continue
}
if _, err := res.overflow.Get(it.Key()); err == nil {
skipped++
continue
}
}
// Node is not a boundary, persist to disk
batch.Put(it.Key(), it.Value())
Expand Down
Loading