diff --git a/appveyor.yml b/appveyor.yml index 65b5f96841..d477e6db9f 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -13,7 +13,7 @@ environment: GETH_MINGW: 'C:\msys64\mingw32' install: - - git submodule update --init --depth 1 + - git submodule update --init --depth 1 --recursive - go version for: diff --git a/build/ci.go b/build/ci.go index 808daf686b..d11f87c889 100644 --- a/build/ci.go +++ b/build/ci.go @@ -342,12 +342,17 @@ func downloadLinter(cachedir string) string { csdb := build.MustLoadChecksums("build/checksums.txt") arch := runtime.GOARCH - if arch == "arm" { + ext := ".tar.gz" + + if runtime.GOOS == "windows" { + ext = ".zip" + } + if arch == "arm" { arch += "v" + os.Getenv("GOARM") } base := fmt.Sprintf("golangci-lint-%s-%s-%s", version, runtime.GOOS, arch) - url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s.tar.gz", version, base) - archivePath := filepath.Join(cachedir, base+".tar.gz") + url := fmt.Sprintf("https://github.com/golangci/golangci-lint/releases/download/v%s/%s%s", version, base, ext) + archivePath := filepath.Join(cachedir, base+ext) if err := csdb.DownloadFile(url, archivePath); err != nil { log.Fatal(err) } diff --git a/cmd/geth/snapshot.go b/cmd/geth/snapshot.go index dd5d6e34fc..20d4f75fbd 100644 --- a/cmd/geth/snapshot.go +++ b/cmd/geth/snapshot.go @@ -601,8 +601,7 @@ func traverseState(ctx *cli.Context) error { } } if !bytes.Equal(acc.CodeHash, emptyCode) { - code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) - if len(code) == 0 { + if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) { log.Error("Code is missing", "hash", common.BytesToHash(acc.CodeHash)) return errors.New("missing code") } @@ -673,11 +672,10 @@ func traverseRawState(ctx *cli.Context) error { nodes += 1 node := accIter.Hash() + // Check the present for non-empty hash node(embedded node doesn't + // have their own hash). if node != (common.Hash{}) { - // Check the present for non-empty hash node(embedded node doesn't - // have their own hash). - blob := rawdb.ReadTrieNode(chaindb, node) - if len(blob) == 0 { + if !rawdb.HasTrieNode(chaindb, node) { log.Error("Missing trie node(account)", "hash", node) return errors.New("missing account") } @@ -721,8 +719,7 @@ func traverseRawState(ctx *cli.Context) error { } } if !bytes.Equal(acc.CodeHash, emptyCode) { - code := rawdb.ReadCode(chaindb, common.BytesToHash(acc.CodeHash)) - if len(code) == 0 { + if !rawdb.HasCode(chaindb, common.BytesToHash(acc.CodeHash)) { log.Error("Code is missing", "account", common.BytesToHash(accIter.LeafKey())) return errors.New("missing code") } diff --git a/core/blockchain.go b/core/blockchain.go index 60bee85762..8a56f2721a 100644 --- a/core/blockchain.go +++ b/core/blockchain.go @@ -2919,6 +2919,9 @@ Error: %v // of the header retrieval mechanisms already need to verify nonces, as well as // because nonces can be verified sparsely, not needing to check each. func (bc *BlockChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { + if len(chain) == 0 { + return 0, nil + } start := time.Now() if i, err := bc.hc.ValidateHeaderChain(chain, checkFreq); err != nil { return i, err diff --git a/core/rawdb/accessors_state.go b/core/rawdb/accessors_state.go index a239d07667..41e21b6ca4 100644 --- a/core/rawdb/accessors_state.go +++ b/core/rawdb/accessors_state.go @@ -28,17 +28,6 @@ func ReadPreimage(db ethdb.KeyValueReader, hash common.Hash) []byte { return data } -// WritePreimages writes the provided set of preimages to the database. -func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { - for hash, preimage := range preimages { - if err := db.Put(preimageKey(hash), preimage); err != nil { - log.Crit("Failed to store trie preimage", "err", err) - } - } - preimageCounter.Inc(int64(len(preimages))) - preimageHitCounter.Inc(int64(len(preimages))) -} - // ReadCode retrieves the contract code of the provided code hash. func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { // Try with the prefixed code scheme first, if not then try with legacy @@ -47,7 +36,7 @@ func ReadCode(db ethdb.KeyValueReader, hash common.Hash) []byte { if len(data) != 0 { return data } - data, _ = db.Get(hash[:]) + data, _ = db.Get(hash.Bytes()) return data } @@ -59,6 +48,24 @@ func ReadCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) []byte { return data } +// ReadTrieNode retrieves the trie node of the provided hash. +func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { + data, _ := db.Get(hash.Bytes()) + return data +} + +// HasCode checks if the contract code corresponding to the +// provided code hash is present in the db. +func HasCode(db ethdb.KeyValueReader, hash common.Hash) bool { + // Try with the prefixed code scheme first, if not then try with legacy + // scheme. + if ok := HasCodeWithPrefix(db, hash); ok { + return true + } + ok, _ := db.Has(hash.Bytes()) + return ok +} + // HasCodeWithPrefix checks if the contract code corresponding to the // provided code hash is present in the db. This function will only check // presence using the prefix-scheme. @@ -67,30 +74,28 @@ func HasCodeWithPrefix(db ethdb.KeyValueReader, hash common.Hash) bool { return ok } -// WriteCode writes the provided contract code database. -func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { - if err := db.Put(codeKey(hash), code); err != nil { - log.Crit("Failed to store contract code", "err", err) - } +// HasTrieNode checks if the trie node with the provided hash is present in db. +func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { + ok, _ := db.Has(hash.Bytes()) + return ok } -// DeleteCode deletes the specified contract code from the database. -func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { - if err := db.Delete(codeKey(hash)); err != nil { - log.Crit("Failed to delete contract code", "err", err) +// WritePreimages writes the provided set of preimages to the database. +func WritePreimages(db ethdb.KeyValueWriter, preimages map[common.Hash][]byte) { + for hash, preimage := range preimages { + if err := db.Put(preimageKey(hash), preimage); err != nil { + log.Crit("Failed to store trie preimage", "err", err) + } } + preimageCounter.Inc(int64(len(preimages))) + preimageHitCounter.Inc(int64(len(preimages))) } -// ReadTrieNode retrieves the trie node of the provided hash. -func ReadTrieNode(db ethdb.KeyValueReader, hash common.Hash) []byte { - data, _ := db.Get(hash.Bytes()) - return data -} - -// HasTrieNode checks if the trie node with the provided hash is present in db. -func HasTrieNode(db ethdb.KeyValueReader, hash common.Hash) bool { - ok, _ := db.Has(hash.Bytes()) - return ok +// WriteCode writes the provided contract code database. +func WriteCode(db ethdb.KeyValueWriter, hash common.Hash, code []byte) { + if err := db.Put(codeKey(hash), code); err != nil { + log.Crit("Failed to store contract code", "err", err) + } } // WriteTrieNode writes the provided trie node database. @@ -100,6 +105,13 @@ func WriteTrieNode(db ethdb.KeyValueWriter, hash common.Hash, node []byte) { } } +// DeleteCode deletes the specified contract code from the database. +func DeleteCode(db ethdb.KeyValueWriter, hash common.Hash) { + if err := db.Delete(codeKey(hash)); err != nil { + log.Crit("Failed to delete contract code", "err", err) + } +} + // DeleteTrieNode deletes the specified trie node from the database. func DeleteTrieNode(db ethdb.KeyValueWriter, hash common.Hash) { if err := db.Delete(hash.Bytes()); err != nil { diff --git a/core/state/pruner/pruner.go b/core/state/pruner/pruner.go index 6f52079dd4..703893574a 100644 --- a/core/state/pruner/pruner.go +++ b/core/state/pruner/pruner.go @@ -584,7 +584,7 @@ func (p *Pruner) Prune(root common.Hash) error { // Ensure the root is really present. The weak assumption // is the presence of root can indicate the presence of the // entire trie. - if blob := rawdb.ReadTrieNode(p.db, root); len(blob) == 0 { + if !rawdb.HasTrieNode(p.db, root) { // The special case is for clique based networks(rinkeby, goerli // and some other private networks), it's possible that two // consecutive blocks will have same root. In this case snapshot @@ -598,7 +598,7 @@ func (p *Pruner) Prune(root common.Hash) error { // as the pruning target. var found bool for i := len(layers) - 2; i >= 1; i-- { - if blob := rawdb.ReadTrieNode(p.db, layers[i].Root()); len(blob) != 0 { + if rawdb.HasTrieNode(p.db, layers[i].Root()) { root = layers[i].Root() found = true log.Info("Selecting middle-layer as the pruning target", "root", root, "depth", i) @@ -815,7 +815,7 @@ const warningLog = ` WARNING! -The clean trie cache is not found. Please delete it by yourself after the +The clean trie cache is not found. Please delete it by yourself after the pruning. Remember don't start the Geth without deleting the clean trie cache otherwise the entire database may be damaged! diff --git a/core/state/state_object.go b/core/state/state_object.go index 6d7ce60b37..fc89a4f904 100644 --- a/core/state/state_object.go +++ b/core/state/state_object.go @@ -238,25 +238,10 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has } // If no live objects are available, attempt to use snapshots var ( - enc []byte - err error - meter *time.Duration + enc []byte + err error ) - readStart := time.Now() - if metrics.EnabledExpensive { - // If the snap is 'under construction', the first lookup may fail. If that - // happens, we don't want to double-count the time elapsed. Thus this - // dance with the metering. - defer func() { - if meter != nil { - *meter += time.Since(readStart) - } - }() - } if s.db.snap != nil { - if metrics.EnabledExpensive { - meter = &s.db.SnapshotStorageReads - } // If the object was destructed in *this* block (and potentially resurrected), // the storage has been cleared out, and we should *not* consult the previous // snapshot about any storage values. The only possible alternatives are: @@ -266,21 +251,24 @@ func (s *StateObject) GetCommittedState(db Database, key common.Hash) common.Has if _, destructed := s.db.snapDestructs[s.address]; destructed { return common.Hash{} } + start := time.Now() enc, err = s.db.snap.Storage(s.addrHash, crypto.Keccak256Hash(key.Bytes())) + if metrics.EnabledExpensive { + s.db.SnapshotStorageReads += time.Since(start) + } } // If snapshot unavailable or reading from it failed, load from the database if s.db.snap == nil || err != nil { - if meter != nil { - // If we already spent time checking the snapshot, account for it - // and reset the readStart - *meter += time.Since(readStart) - readStart = time.Now() - } + start := time.Now() + // if metrics.EnabledExpensive { + // meter = &s.db.StorageReads + // } + enc, err = s.getTrie(db).TryGet(key.Bytes()) if metrics.EnabledExpensive { - meter = &s.db.StorageReads + s.db.StorageReads += time.Since(start) } - if enc, err = s.getTrie(db).TryGet(key.Bytes()); err != nil { + if err != nil { s.setError(err) return common.Hash{} } diff --git a/core/state/statedb.go b/core/state/statedb.go index cb36d34d54..92f74212f2 100644 --- a/core/state/statedb.go +++ b/core/state/statedb.go @@ -657,16 +657,14 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { return obj } // If no live objects are available, attempt to use snapshots - var ( - data *types.StateAccount - err error - ) + var data *types.StateAccount if s.snap != nil { + start := time.Now() + acc, err := s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())) if metrics.EnabledExpensive { - defer func(start time.Time) { s.SnapshotAccountReads += time.Since(start) }(time.Now()) + s.SnapshotAccountReads += time.Since(start) } - var acc *snapshot.Account - if acc, err = s.snap.Account(crypto.HashData(s.hasher, addr.Bytes())); err == nil { + if err == nil { if acc == nil { return nil } @@ -686,7 +684,7 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } // If snapshot unavailable or reading from it failed, load from the database - if s.snap == nil || err != nil { + if data == nil { if s.trie == nil { tr, err := s.db.OpenTrie(s.originalRoot) if err != nil { @@ -695,10 +693,11 @@ func (s *StateDB) getDeletedStateObject(addr common.Address) *StateObject { } s.trie = tr } + start := time.Now() + enc, err := s.trie.TryGet(addr.Bytes()) if metrics.EnabledExpensive { - defer func(start time.Time) { s.AccountReads += time.Since(start) }(time.Now()) + s.AccountReads += time.Since(start) } - enc, err := s.trie.TryGet(addr.Bytes()) if err != nil { s.setError(fmt.Errorf("getDeleteStateObject (%x) error: %v", addr.Bytes(), err)) return nil diff --git a/core/vm/operations_acl.go b/core/vm/operations_acl.go index 483226eefa..551e1f5f11 100644 --- a/core/vm/operations_acl.go +++ b/core/vm/operations_acl.go @@ -214,7 +214,7 @@ var ( // see gasSStoreEIP2200(...) in core/vm/gas_table.go for more info about how EIP 2200 is specified gasSStoreEIP2929 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP2200) - // gasSStoreEIP2539 implements gas cost for SSTORE according to EPI-2539 + // gasSStoreEIP2539 implements gas cost for SSTORE according to EIP-2539 // Replace `SSTORE_CLEARS_SCHEDULE` with `SSTORE_RESET_GAS + ACCESS_LIST_STORAGE_KEY_COST` (4,800) gasSStoreEIP3529 = makeGasSStoreFunc(params.SstoreClearsScheduleRefundEIP3529) ) diff --git a/eth/downloader/downloader.go b/eth/downloader/downloader.go index 207ed39915..9ac5a7fd43 100644 --- a/eth/downloader/downloader.go +++ b/eth/downloader/downloader.go @@ -1340,23 +1340,25 @@ func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { frequency = 1 } - if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { - rollbackErr = err + if len(chunkHeaders) > 0 { + if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { + rollbackErr = err - // If some headers were inserted, track them as uncertain - if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { - rollback = chunkHeaders[0].Number.Uint64() + // If some headers were inserted, track them as uncertain + if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { + rollback = chunkHeaders[0].Number.Uint64() + } + log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) + return fmt.Errorf("%w: %v", errInvalidChain, err) } - log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) - return fmt.Errorf("%w: %v", errInvalidChain, err) - } - // All verifications passed, track all headers within the alloted limits - if mode == SnapSync { - head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() - if head-rollback > uint64(fsHeaderSafetyNet) { - rollback = head - uint64(fsHeaderSafetyNet) - } else { - rollback = 1 + // All verifications passed, track all headers within the alloted limits + if mode == SnapSync { + head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() + if head-rollback > uint64(fsHeaderSafetyNet) { + rollback = head - uint64(fsHeaderSafetyNet) + } else { + rollback = 1 + } } } } diff --git a/eth/protocols/snap/sync.go b/eth/protocols/snap/sync.go index 51ac8bd420..9a1af86cd4 100644 --- a/eth/protocols/snap/sync.go +++ b/eth/protocols/snap/sync.go @@ -2830,7 +2830,10 @@ func (s *Syncer) reportSyncProgress(force bool) { new(big.Int).Mul(new(big.Int).SetUint64(uint64(synced)), hashSpace), accountFills, ).Uint64()) - + // Don't report anything until we have a meaningful progress + if estBytes < 1.0 { + return + } elapsed := time.Since(s.startTime) estTime := elapsed / time.Duration(synced) * time.Duration(estBytes) diff --git a/eth/tracers/api.go b/eth/tracers/api.go index 693c5435da..9c1fdfbd59 100644 --- a/eth/tracers/api.go +++ b/eth/tracers/api.go @@ -454,7 +454,7 @@ func (api *API) TraceBlockByHash(ctx context.Context, hash common.Hash, config * // TraceBlock returns the structured logs created during the execution of EVM // and returns them as a JSON object. -func (api *API) TraceBlock(ctx context.Context, blob []byte, config *TraceConfig) ([]*txTraceResult, error) { +func (api *API) TraceBlock(ctx context.Context, blob hexutil.Bytes, config *TraceConfig) ([]*txTraceResult, error) { block := new(types.Block) if err := rlp.Decode(bytes.NewReader(blob), block); err != nil { return nil, fmt.Errorf("could not decode block: %v", err) diff --git a/graphql/graphql.go b/graphql/graphql.go index 9f1e251970..68ac63b5dc 100644 --- a/graphql/graphql.go +++ b/graphql/graphql.go @@ -100,6 +100,14 @@ func (a *Account) Balance(ctx context.Context) (hexutil.Big, error) { } func (a *Account) TransactionCount(ctx context.Context) (hexutil.Uint64, error) { + // Ask transaction pool for the nonce which includes pending transactions + if blockNr, ok := a.blockNrOrHash.Number(); ok && blockNr == rpc.PendingBlockNumber { + nonce, err := a.backend.GetPoolNonce(ctx, a.address) + if err != nil { + return 0, err + } + return hexutil.Uint64(nonce), nil + } state, err := a.getState(ctx) if err != nil { return 0, err diff --git a/internal/build/download.go b/internal/build/download.go index 0ed0b5e130..efb223b327 100644 --- a/internal/build/download.go +++ b/internal/build/download.go @@ -58,7 +58,7 @@ func (db *ChecksumDB) Verify(path string) error { } fileHash := hex.EncodeToString(h.Sum(nil)) if !db.findHash(filepath.Base(path), fileHash) { - return fmt.Errorf("invalid file hash %s", fileHash) + return fmt.Errorf("invalid file hash %s for %s", fileHash, filepath.Base(path)) } return nil } diff --git a/internal/ethapi/api.go b/internal/ethapi/api.go index 58fa326344..cf2108fcd9 100644 --- a/internal/ethapi/api.go +++ b/internal/ethapi/api.go @@ -293,7 +293,7 @@ func NewPrivateAccountAPI(b Backend, nonceLock *AddrLocker) *PrivateAccountAPI { } } -// listAccounts will return a list of addresses for accounts this node manages. +// ListAccounts will return a list of addresses for accounts this node manages. func (s *PrivateAccountAPI) ListAccounts() []common.Address { return s.am.Accounts() } @@ -1967,7 +1967,7 @@ func (s *PublicTransactionPoolAPI) GetTransactionDataAndReceipt(ctx context.Cont fields["status"] = hexutil.Uint(receipt.Status) } if receipt.Logs == nil { - fields["logs"] = [][]*types.Log{} + fields["logs"] = []*types.Log{} } // If the ContractAddress is 20 0x0 bytes, assume it is not a contract creation if receipt.ContractAddress != (common.Address{}) { diff --git a/les/vflux/client/fillset_test.go b/les/vflux/client/fillset_test.go index 58240682c6..ca5af8f07e 100644 --- a/les/vflux/client/fillset_test.go +++ b/les/vflux/client/fillset_test.go @@ -34,16 +34,20 @@ type testIter struct { } func (i *testIter) Next() bool { - i.waitCh <- struct{}{} + if _, ok := <-i.waitCh; !ok { + return false + } i.node = <-i.nodeCh - return i.node != nil + return true } func (i *testIter) Node() *enode.Node { return i.node } -func (i *testIter) Close() {} +func (i *testIter) Close() { + close(i.waitCh) +} func (i *testIter) push() { var id enode.ID @@ -53,7 +57,7 @@ func (i *testIter) push() { func (i *testIter) waiting(timeout time.Duration) bool { select { - case <-i.waitCh: + case i.waitCh <- struct{}{}: return true case <-time.After(timeout): return false diff --git a/les/vflux/server/clientpool.go b/les/vflux/server/clientpool.go index 87d783ebab..805de2d41b 100644 --- a/les/vflux/server/clientpool.go +++ b/les/vflux/server/clientpool.go @@ -34,7 +34,7 @@ import ( var ( ErrNotConnected = errors.New("client not connected") ErrNoPriority = errors.New("priority too low to raise capacity") - ErrCantFindMaximum = errors.New("Unable to find maximum allowed capacity") + ErrCantFindMaximum = errors.New("unable to find maximum allowed capacity") ) // ClientPool implements a client database that assigns a priority to each client @@ -177,7 +177,7 @@ func (cp *ClientPool) Unregister(peer clientPeer) { cp.ns.SetField(peer.Node(), cp.setup.clientField, nil) } -// setConnectedBias sets the connection bias, which is applied to already connected clients +// SetConnectedBias sets the connection bias, which is applied to already connected clients // So that already connected client won't be kicked out very soon and we can ensure all // connected clients can have enough time to request or sync some data. func (cp *ClientPool) SetConnectedBias(bias time.Duration) { diff --git a/light/lightchain.go b/light/lightchain.go index 78a436085e..7ce0b2698d 100644 --- a/light/lightchain.go +++ b/light/lightchain.go @@ -424,6 +424,9 @@ func (lc *LightChain) SetChainHead(header *types.Header) error { // In the case of a light chain, InsertHeaderChain also creates and posts light // chain events when necessary. func (lc *LightChain) InsertHeaderChain(chain []*types.Header, checkFreq int) (int, error) { + if len(chain) == 0 { + return 0, nil + } if atomic.LoadInt32(&lc.disableCheckFreq) == 1 { checkFreq = 0 } diff --git a/mobile/geth.go b/mobile/geth.go index bad9e0589f..709b68cbde 100644 --- a/mobile/geth.go +++ b/mobile/geth.go @@ -220,14 +220,6 @@ func (n *Node) Start() error { return n.node.Start() } -// Stop terminates a running node along with all its services. If the node was not started, -// an error is returned. It is not possible to restart a stopped node. -// -// Deprecated: use Close() -func (n *Node) Stop() error { - return n.node.Close() -} - // GetEthereumClient retrieves a client to access the Ethereum subsystem. func (n *Node) GetEthereumClient() (client *EthereumClient, _ error) { rpc, err := n.node.Attach() diff --git a/p2p/server.go b/p2p/server.go index 6d2b6b1c15..38c2d73a0e 100644 --- a/p2p/server.go +++ b/p2p/server.go @@ -976,9 +976,8 @@ func (srv *Server) setupConn(c *conn, flags connFlag, dialDest *enode.Node) erro } // If dialing, figure out the remote public key. - var dialPubkey *ecdsa.PublicKey if dialDest != nil { - dialPubkey = new(ecdsa.PublicKey) + dialPubkey := new(ecdsa.PublicKey) if err := dialDest.Load((*enode.Secp256k1)(dialPubkey)); err != nil { err = errors.New("dial destination doesn't have a secp256k1 public key") srv.log.Trace("Setting up connection failed", "addr", c.fd.RemoteAddr(), "conn", c.flags, "err", err) diff --git a/rpc/server_test.go b/rpc/server_test.go index c692a071cf..e67893710d 100644 --- a/rpc/server_test.go +++ b/rpc/server_test.go @@ -134,7 +134,7 @@ func TestServerShortLivedConn(t *testing.T) { if err != nil { t.Fatal("can't dial:", err) } - defer conn.Close() + conn.SetDeadline(deadline) // Write the request, then half-close the connection so the server stops reading. conn.Write([]byte(request)) @@ -142,6 +142,8 @@ func TestServerShortLivedConn(t *testing.T) { // Now try to get the response. buf := make([]byte, 2000) n, err := conn.Read(buf) + conn.Close() + if err != nil { t.Fatal("read error:", err) } diff --git a/tests/fuzzers/vflux/clientpool-fuzzer.go b/tests/fuzzers/vflux/clientpool-fuzzer.go index 0414c001ec..b3b523cc82 100644 --- a/tests/fuzzers/vflux/clientpool-fuzzer.go +++ b/tests/fuzzers/vflux/clientpool-fuzzer.go @@ -267,9 +267,7 @@ func FuzzClientPool(input []byte) int { bias = f.randomDelay() requested = f.randomBool() ) - if _, err := pool.SetCapacity(f.peers[index].node, reqCap, bias, requested); err == vfs.ErrCantFindMaximum { - panic(nil) - } + pool.SetCapacity(f.peers[index].node, reqCap, bias, requested) doLog("Set capacity", "id", f.peers[index].node.ID(), "reqcap", reqCap, "bias", bias, "requested", requested) case 7: index := f.randomByte() diff --git a/trie/iterator.go b/trie/iterator.go index 9f6dc3af7f..61a91c8dde 100644 --- a/trie/iterator.go +++ b/trie/iterator.go @@ -151,8 +151,11 @@ func (e seekError) Error() string { } func newNodeIterator(trie *Trie, start []byte) NodeIterator { - if trie.Hash() == emptyState { - return new(nodeIterator) + if trie.Hash() == emptyRoot { + return &nodeIterator{ + trie: trie, + err: errIteratorEnd, + } } it := &nodeIterator{trie: trie} it.err = it.seek(start) @@ -402,7 +405,7 @@ func findChild(n *fullNode, index int, path []byte, ancestor common.Hash) (node, func (it *nodeIterator) nextChild(parent *nodeIteratorState, ancestor common.Hash) (*nodeIteratorState, []byte, bool) { switch node := parent.node.(type) { case *fullNode: - //Full node, move to the first non-nil child. + // Full node, move to the first non-nil child. if child, state, path, index := findChild(node, parent.index+1, it.path, ancestor); child != nil { parent.index = index - 1 return state, path, true @@ -480,8 +483,9 @@ func (it *nodeIterator) push(state *nodeIteratorState, parentIndex *int, path [] } func (it *nodeIterator) pop() { - parent := it.stack[len(it.stack)-1] - it.path = it.path[:parent.pathlen] + last := it.stack[len(it.stack)-1] + it.path = it.path[:last.pathlen] + it.stack[len(it.stack)-1] = nil it.stack = it.stack[:len(it.stack)-1] } diff --git a/trie/iterator_test.go b/trie/iterator_test.go index 95cafdd3bd..1ebd6a1131 100644 --- a/trie/iterator_test.go +++ b/trie/iterator_test.go @@ -29,6 +29,19 @@ import ( "github.com/ethereum/go-ethereum/ethdb/memorydb" ) +func TestEmptyIterator(t *testing.T) { + trie := newEmpty() + iter := trie.NodeIterator(nil) + + seen := make(map[string]struct{}) + for iter.Next(true) { + seen[string(iter.Path())] = struct{}{} + } + if len(seen) != 0 { + t.Fatal("Unexpected trie node iterated") + } +} + func TestIterator(t *testing.T) { trie := newEmpty() vals := []struct{ k, v string }{