Skip to content

Commit

Permalink
all: fix mismatched names in comments (#29348)
Browse files Browse the repository at this point in the history
* all: fix mismatched names in comments

* metrics: fix mismatched name in UpdateIfGt
  • Loading branch information
AaronChen0 authored Mar 26, 2024
1 parent 58a3e2f commit 723b1e3
Show file tree
Hide file tree
Showing 53 changed files with 61 additions and 61 deletions.
2 changes: 1 addition & 1 deletion accounts/abi/argument.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func (arguments Arguments) Copy(v interface{}, values []interface{}) error {
return arguments.copyAtomic(v, values[0])
}

// unpackAtomic unpacks ( hexdata -> go ) a single value
// copyAtomic copies ( hexdata -> go ) a single value
func (arguments Arguments) copyAtomic(v interface{}, marshalledValues interface{}) error {
dst := reflect.ValueOf(v).Elem()
src := reflect.ValueOf(marshalledValues)
Expand Down
2 changes: 1 addition & 1 deletion accounts/keystore/account_cache_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@ var (
}
)

// waitWatcherStarts waits up to 1s for the keystore watcher to start.
// waitWatcherStart waits up to 1s for the keystore watcher to start.
func waitWatcherStart(ks *KeyStore) bool {
// On systems where file watch is not supported, just return "ok".
if !ks.cache.watcher.enabled() {
Expand Down
2 changes: 1 addition & 1 deletion beacon/light/api/light_api.go
Original file line number Diff line number Diff line change
Expand Up @@ -289,7 +289,7 @@ func decodeFinalityUpdate(enc []byte) (types.FinalityUpdate, error) {
}, nil
}

// GetHead fetches and validates the beacon header with the given blockRoot.
// GetHeader fetches and validates the beacon header with the given blockRoot.
// If blockRoot is null hash then the latest head header is fetched.
func (api *BeaconLightApi) GetHeader(blockRoot common.Hash) (types.Header, error) {
var blockId string
Expand Down
4 changes: 2 additions & 2 deletions beacon/light/head_tracker.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,15 +56,15 @@ func (h *HeadTracker) ValidatedHead() (types.SignedHeader, bool) {
return h.signedHead, h.hasSignedHead
}

// ValidatedHead returns the latest validated head.
// ValidatedFinality returns the latest validated finality.
func (h *HeadTracker) ValidatedFinality() (types.FinalityUpdate, bool) {
h.lock.RLock()
defer h.lock.RUnlock()

return h.finalityUpdate, h.hasFinalityUpdate
}

// Validate validates the given signed head. If the head is successfully validated
// ValidateHead validates the given signed head. If the head is successfully validated
// and it is better than the old validated head (higher slot or same slot and more
// signers) then ValidatedHead is updated. The boolean return flag signals if
// ValidatedHead has been changed.
Expand Down
4 changes: 2 additions & 2 deletions beacon/light/request/server.go
Original file line number Diff line number Diff line change
Expand Up @@ -212,7 +212,7 @@ func (s *serverWithTimeout) startTimeout(reqData RequestResponse) {
})
}

// stop stops all goroutines associated with the server.
// unsubscribe stops all goroutines associated with the server.
func (s *serverWithTimeout) unsubscribe() {
s.lock.Lock()
defer s.lock.Unlock()
Expand Down Expand Up @@ -337,7 +337,7 @@ func (s *serverWithLimits) sendRequest(request Request) (reqId ID) {
return s.serverWithTimeout.sendRequest(request)
}

// stop stops all goroutines associated with the server.
// unsubscribe stops all goroutines associated with the server.
func (s *serverWithLimits) unsubscribe() {
s.lock.Lock()
defer s.lock.Unlock()
Expand Down
4 changes: 2 additions & 2 deletions beacon/light/sync/head_sync.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func (s *HeadSync) newSignedHead(server request.Server, signedHead types.SignedH
s.headTracker.ValidateHead(signedHead)
}

// newSignedHead handles received signed head; either validates it if the chain
// newFinalityUpdate handles received finality update; either validates it if the chain
// is properly synced or stores it for further validation.
func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types.FinalityUpdate) {
if !s.chainInit || types.SyncPeriod(finalityUpdate.SignatureSlot) > s.nextSyncPeriod {
Expand All @@ -111,7 +111,7 @@ func (s *HeadSync) newFinalityUpdate(server request.Server, finalityUpdate types
s.headTracker.ValidateFinality(finalityUpdate)
}

// processUnvalidatedHeads iterates the list of unvalidated heads and validates
// processUnvalidated iterates the list of unvalidated heads and validates
// those which can be validated.
func (s *HeadSync) processUnvalidated() {
if !s.chainInit {
Expand Down
2 changes: 1 addition & 1 deletion beacon/types/exec_header.go
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ type ExecutionHeader struct {
obj headerObject
}

// HeaderFromJSON decodes an execution header from JSON data provided by
// ExecutionHeaderFromJSON decodes an execution header from JSON data provided by
// the beacon chain API.
func ExecutionHeaderFromJSON(forkName string, data []byte) (*ExecutionHeader, error) {
var obj headerObject
Expand Down
2 changes: 1 addition & 1 deletion cmd/geth/attach_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,7 @@ func TestAttachWithHeaders(t *testing.T) {
// This is fixed in a follow-up PR.
}

// TestAttachWithHeaders tests that 'geth db --remotedb' with custom headers works, i.e
// TestRemoteDbWithHeaders tests that 'geth db --remotedb' with custom headers works, i.e
// that custom headers are forwarded to the target.
func TestRemoteDbWithHeaders(t *testing.T) {
t.Parallel()
Expand Down
2 changes: 1 addition & 1 deletion cmd/utils/export_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func testExport(t *testing.T, f string) {
}
}

// testDeletion tests if the deletion markers can be exported/imported correctly
// TestDeletionExport tests if the deletion markers can be exported/imported correctly
func TestDeletionExport(t *testing.T) {
f := fmt.Sprintf("%v/tempdump", os.TempDir())
defer func() {
Expand Down
2 changes: 1 addition & 1 deletion common/lru/basiclru.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@ func (l *list[T]) init() {
l.root.prev = &l.root
}

// push adds an element to the front of the list.
// pushElem adds an element to the front of the list.
func (l *list[T]) pushElem(e *listElem[T]) {
e.prev = &l.root
e.next = l.root.next
Expand Down
2 changes: 1 addition & 1 deletion consensus/ethash/consensus.go
Original file line number Diff line number Diff line change
Expand Up @@ -568,7 +568,7 @@ var (
u256_32 = uint256.NewInt(32)
)

// AccumulateRewards credits the coinbase of the given block with the mining
// accumulateRewards credits the coinbase of the given block with the mining
// reward. The total reward consists of the static block reward and rewards for
// included uncles. The coinbase of each uncle block is also rewarded.
func accumulateRewards(config *params.ChainConfig, stateDB *state.StateDB, header *types.Header, uncles []*types.Header) {
Expand Down
2 changes: 1 addition & 1 deletion core/asm/lexer.go
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,7 @@ func (l *lexer) ignore() {
l.start = l.pos
}

// Accepts checks whether the given input matches the next rune
// accept checks whether the given input matches the next rune
func (l *lexer) accept(valid string) bool {
if strings.ContainsRune(valid, l.next()) {
return true
Expand Down
2 changes: 1 addition & 1 deletion core/blockchain.go
Original file line number Diff line number Diff line change
Expand Up @@ -639,7 +639,7 @@ func (bc *BlockChain) SetSafe(header *types.Header) {
}
}

// rewindPathHead implements the logic of rewindHead in the context of hash scheme.
// rewindHashHead implements the logic of rewindHead in the context of hash scheme.
func (bc *BlockChain) rewindHashHead(head *types.Header, root common.Hash) (*types.Header, uint64) {
var (
limit uint64 // The oldest block that will be searched for this rewinding
Expand Down
2 changes: 1 addition & 1 deletion core/chain_makers.go
Original file line number Diff line number Diff line change
Expand Up @@ -482,7 +482,7 @@ func makeBlockChain(chainConfig *params.ChainConfig, parent *types.Block, n int,
return blocks
}

// makeBlockChain creates a deterministic chain of blocks from genesis
// makeBlockChainWithGenesis creates a deterministic chain of blocks from genesis
func makeBlockChainWithGenesis(genesis *Genesis, n int, engine consensus.Engine, seed int) (ethdb.Database, []*types.Block) {
db, blocks, _ := GenerateChainWithGenesis(genesis, engine, n, func(i int, b *BlockGen) {
b.SetCoinbase(common.Address{0: byte(seed), 19: byte(i)})
Expand Down
2 changes: 1 addition & 1 deletion core/rawdb/accessors_chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -695,7 +695,7 @@ func (r *receiptLogs) DecodeRLP(s *rlp.Stream) error {
return nil
}

// DeriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
// deriveLogFields fills the logs in receiptLogs with information such as block number, txhash, etc.
func deriveLogFields(receipts []*receiptLogs, hash common.Hash, number uint64, txs types.Transactions) error {
logIndex := uint(0)
if len(txs) != len(receipts) {
Expand Down
2 changes: 1 addition & 1 deletion core/txpool/blobpool/blobpool.go
Original file line number Diff line number Diff line change
Expand Up @@ -1226,7 +1226,7 @@ func (p *BlobPool) Add(txs []*types.Transaction, local bool, sync bool) []error
return errs
}

// Add inserts a new blob transaction into the pool if it passes validation (both
// add inserts a new blob transaction into the pool if it passes validation (both
// consensus validity and pool restrictions).
func (p *BlobPool) add(tx *types.Transaction) (err error) {
// The blob pool blocks on adding a transaction. This is because blob txs are
Expand Down
4 changes: 2 additions & 2 deletions core/vm/contracts.go
Original file line number Diff line number Diff line change
Expand Up @@ -182,7 +182,7 @@ func RunPrecompiledContract(p PrecompiledContract, input []byte, suppliedGas uin
return output, suppliedGas, err
}

// ECRECOVER implemented as a native contract.
// ecrecover implemented as a native contract.
type ecrecover struct{}

func (c *ecrecover) RequiredGas(input []byte) uint64 {
Expand Down Expand Up @@ -457,7 +457,7 @@ func runBn256Add(input []byte) ([]byte, error) {
return res.Marshal(), nil
}

// bn256Add implements a native elliptic curve point addition conforming to
// bn256AddIstanbul implements a native elliptic curve point addition conforming to
// Istanbul consensus rules.
type bn256AddIstanbul struct{}

Expand Down
2 changes: 1 addition & 1 deletion core/vm/interpreter.go
Original file line number Diff line number Diff line change
Expand Up @@ -50,7 +50,7 @@ func (ctx *ScopeContext) MemoryData() []byte {
return ctx.Memory.Data()
}

// MemoryData returns the stack data. Callers must not modify the contents
// StackData returns the stack data. Callers must not modify the contents
// of the returned data.
func (ctx *ScopeContext) StackData() []uint256.Int {
if ctx.Stack == nil {
Expand Down
2 changes: 1 addition & 1 deletion crypto/signature_nocgo.go
Original file line number Diff line number Diff line change
Expand Up @@ -167,7 +167,7 @@ type btCurve struct {
*btcec.KoblitzCurve
}

// Marshall converts a point given as (x, y) into a byte slice.
// Marshal converts a point given as (x, y) into a byte slice.
func (curve btCurve) Marshal(x, y *big.Int) []byte {
byteLen := (curve.Params().BitSize + 7) / 8

Expand Down
2 changes: 1 addition & 1 deletion eth/downloader/downloader_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -57,7 +57,7 @@ func newTester(t *testing.T) *downloadTester {
return newTesterWithNotification(t, nil)
}

// newTester creates a new downloader test mocker.
// newTesterWithNotification creates a new downloader test mocker.
func newTesterWithNotification(t *testing.T, success func()) *downloadTester {
freezer := t.TempDir()
db, err := rawdb.NewDatabaseWithFreezer(rawdb.NewMemoryDatabase(), freezer, "", false)
Expand Down
2 changes: 1 addition & 1 deletion eth/downloader/skeleton_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ func newSkeletonTestPeer(id string, headers []*types.Header) *skeletonTestPeer {
}
}

// newSkeletonTestPeer creates a new mock peer to test the skeleton sync with,
// newSkeletonTestPeerWithHook creates a new mock peer to test the skeleton sync with,
// and sets an optional serve hook that can return headers for delivery instead
// of the predefined chain. Useful for emulating malicious behavior that would
// otherwise require dedicated peer types.
Expand Down
2 changes: 1 addition & 1 deletion eth/filters/filter_system_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -442,7 +442,7 @@ func TestInvalidLogFilterCreation(t *testing.T) {
}
}

// TestLogFilterUninstall tests invalid getLogs requests
// TestInvalidGetLogsRequest tests invalid getLogs requests
func TestInvalidGetLogsRequest(t *testing.T) {
t.Parallel()

Expand Down
2 changes: 1 addition & 1 deletion eth/protocols/eth/handler_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ func newTestBackend(blocks int) *testBackend {
return newTestBackendWithGenerator(blocks, false, nil)
}

// newTestBackend creates a chain with a number of explicitly defined blocks and
// newTestBackendWithGenerator creates a chain with a number of explicitly defined blocks and
// wraps it into a mock backend.
func newTestBackendWithGenerator(blocks int, shanghai bool, generator func(int, *core.BlockGen)) *testBackend {
var (
Expand Down
4 changes: 2 additions & 2 deletions eth/protocols/snap/sync_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -839,7 +839,7 @@ func testMultiSyncManyUseless(t *testing.T, scheme string) {
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}

// TestMultiSyncManyUseless contains one good peer, and many which doesn't return anything valuable at all
// TestMultiSyncManyUselessWithLowTimeout contains one good peer, and many which doesn't return anything valuable at all
func TestMultiSyncManyUselessWithLowTimeout(t *testing.T) {
t.Parallel()

Expand Down Expand Up @@ -1378,7 +1378,7 @@ func testSyncWithStorageAndNonProvingPeer(t *testing.T, scheme string) {
verifyTrie(scheme, syncer.db, sourceAccountTrie.Hash(), t)
}

// TestSyncWithStorage tests basic sync using accounts + storage + code, against
// TestSyncWithStorageMisbehavingProve tests basic sync using accounts + storage + code, against
// a peer who insists on delivering full storage sets _and_ proofs. This triggered
// an error, where the recipient erroneously clipped the boundary nodes, but
// did not mark the account for healing.
Expand Down
2 changes: 1 addition & 1 deletion eth/tracers/api_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ type testBackend struct {
relHook func() // Hook is invoked when the requested state is released
}

// testBackend creates a new test backend. OBS: After test is done, teardown must be
// newTestBackend creates a new test backend. OBS: After test is done, teardown must be
// invoked in order to release associated resources.
func newTestBackend(t *testing.T, n int, gspec *core.Genesis, generator func(i int, b *core.BlockGen)) *testBackend {
backend := &testBackend{
Expand Down
2 changes: 1 addition & 1 deletion eth/tracers/internal/tracetest/flat_calltrace_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ func testFlatCallTracer(tracerName string, dirPath string, t *testing.T) {
}
}

// jsonEqual is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
// jsonEqualFlat is similar to reflect.DeepEqual, but does a 'bounce' via json prior to
// comparison
func jsonEqualFlat(x, y interface{}) bool {
xTrace := new([]flatCallTrace)
Expand Down
2 changes: 1 addition & 1 deletion eth/tracers/internal/util.go
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ func MemoryPtr(m []byte, offset, size int64) []byte {
return nil
}

// Back returns the n'th item in stack
// StackBack returns the n'th item in stack
func StackBack(st []uint256.Int, n int) *uint256.Int {
return &st[len(st)-n-1]
}
2 changes: 1 addition & 1 deletion eth/tracers/js/tracer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -198,7 +198,7 @@ func TestHaltBetweenSteps(t *testing.T) {
}
}

// testNoStepExec tests a regular value transfer (no exec), and accessing the statedb
// TestNoStepExec tests a regular value transfer (no exec), and accessing the statedb
// in 'result'
func TestNoStepExec(t *testing.T) {
execTracer := func(code string) []byte {
Expand Down
2 changes: 1 addition & 1 deletion eth/tracers/logger/access_list_tracer.go
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ func (al accessList) equal(other accessList) bool {
return true
}

// accesslist converts the accesslist to a types.AccessList.
// accessList converts the accesslist to a types.AccessList.
func (al accessList) accessList() types.AccessList {
acl := make(types.AccessList, 0, len(al))
for addr, slots := range al {
Expand Down
2 changes: 1 addition & 1 deletion ethdb/dbtest/testsuite.go
Original file line number Diff line number Diff line change
Expand Up @@ -514,7 +514,7 @@ func iterateKeys(it ethdb.Iterator) []string {
return keys
}

// randomHash generates a random blob of data and returns it as a hash.
// randBytes generates a random blob of data.
func randBytes(len int) []byte {
buf := make([]byte, len)
if n, err := rand.Read(buf); n != len || err != nil {
Expand Down
2 changes: 1 addition & 1 deletion internal/era/era.go
Original file line number Diff line number Diff line change
Expand Up @@ -239,7 +239,7 @@ func (e *Era) readOffset(n uint64) (int64, error) {
return blockIndexRecordOffset + int64(binary.LittleEndian.Uint64(e.buf[:])), nil
}

// newReader returns a snappy.Reader for the e2store entry value at off.
// newSnappyReader returns a snappy.Reader for the e2store entry value at off.
func newSnappyReader(e *e2store.Reader, expectedType uint16, off int64) (io.Reader, int64, error) {
r, n, err := e.ReaderAt(expectedType, off)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion internal/version/version.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ func ClientName(clientIdentifier string) string {
)
}

// runtimeInfo returns build and platform information about the current binary.
// Info returns build and platform information about the current binary.
//
// If the package that is currently executing is a prefixed by our go-ethereum
// module path, it will print out commit and date VCS information. Otherwise,
Expand Down
2 changes: 1 addition & 1 deletion log/logger.go
Original file line number Diff line number Diff line change
Expand Up @@ -156,7 +156,7 @@ func (l *logger) Handler() slog.Handler {
return l.inner.Handler()
}

// write logs a message at the specified level:
// Write logs a message at the specified level:
func (l *logger) Write(level slog.Level, msg string, attrs ...any) {
if !l.inner.Enabled(context.Background(), level) {
return
Expand Down
2 changes: 1 addition & 1 deletion metrics/gauge.go
Original file line number Diff line number Diff line change
Expand Up @@ -74,7 +74,7 @@ func (g *StandardGauge) Update(v int64) {
g.value.Store(v)
}

// Update updates the gauge's value if v is larger then the current value.
// UpdateIfGt updates the gauge's value if v is larger then the current value.
func (g *StandardGauge) UpdateIfGt(v int64) {
for {
exist := g.value.Load()
Expand Down
2 changes: 1 addition & 1 deletion metrics/meter.go
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ type meterArbiter struct {

var arbiter = meterArbiter{ticker: time.NewTicker(5 * time.Second), meters: make(map[*StandardMeter]struct{})}

// Ticks meters on the scheduled interval
// tick meters on the scheduled interval
func (ma *meterArbiter) tick() {
for range ma.ticker.C {
ma.tickMeters()
Expand Down
2 changes: 1 addition & 1 deletion metrics/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ var enablerFlags = []string{"metrics"}
// enablerEnvVars is the env var names to use to enable metrics collections.
var enablerEnvVars = []string{"GETH_METRICS"}

// Init enables or disables the metrics system. Since we need this to run before
// init enables or disables the metrics system. Since we need this to run before
// any other code gets to create meters and timers, we'll actually do an ugly hack
// and peek into the command line args for the metrics flag.
func init() {
Expand Down
2 changes: 1 addition & 1 deletion node/rpcstack.go
Original file line number Diff line number Diff line change
Expand Up @@ -597,7 +597,7 @@ func newIPCServer(log log.Logger, endpoint string) *ipcServer {
return &ipcServer{log: log, endpoint: endpoint}
}

// Start starts the httpServer's http.Server
// start starts the httpServer's http.Server
func (is *ipcServer) start(apis []rpc.API) error {
is.mu.Lock()
defer is.mu.Unlock()
Expand Down
2 changes: 1 addition & 1 deletion p2p/discover/metrics.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ func init() {
}
}

// meteredConn is a wrapper around a net.UDPConn that meters both the
// meteredUdpConn is a wrapper around a net.UDPConn that meters both the
// inbound and outbound network traffic.
type meteredUdpConn struct {
UDPConn
Expand Down
Loading

0 comments on commit 723b1e3

Please sign in to comment.