diff --git a/.avalanche-golangci.yml b/.avalanche-golangci.yml index ff566408d6..fe265fcc40 100644 --- a/.avalanche-golangci.yml +++ b/.avalanche-golangci.yml @@ -55,7 +55,7 @@ linters: - bodyclose - copyloopvar # - depguard - # - errcheck + - errcheck - errorlint - forbidigo - goconst diff --git a/core/fifo_cache.go b/core/fifo_cache.go index 9f1eb8cf1e..06e9de1cbe 100644 --- a/core/fifo_cache.go +++ b/core/fifo_cache.go @@ -44,7 +44,9 @@ func (f *BufferFIFOCache[K, V]) Put(key K, val V) { f.l.Lock() defer f.l.Unlock() - f.buffer.Insert(key) // Insert will remove the oldest [K] if we are at the [limit] + // Insert will remove the oldest [K] if we are at the [limit] - + // remove always returns nil, so it is safe to ignore this error. + _ = f.buffer.Insert(key) f.m[key] = val } diff --git a/core/state_manager_test.go b/core/state_manager_test.go index 3e50c7226b..d9127ed9b0 100644 --- a/core/state_manager_test.go +++ b/core/state_manager_test.go @@ -56,8 +56,8 @@ func TestCappedMemoryTrieWriter(t *testing.T) { require.NoError(w.InsertTrie(block)) require.Zero(m.LastDereference, "should not have dereferenced block on insert") require.Zero(m.LastCommit, "should not have committed block on insert") + require.NoError(w.AcceptTrie(block)) - w.AcceptTrie(block) if i <= tipBufferSize { require.Zero(m.LastDereference, "should not have dereferenced block on accept") } else { @@ -71,7 +71,7 @@ func TestCappedMemoryTrieWriter(t *testing.T) { m.LastCommit = common.Hash{} } - w.RejectTrie(block) + require.NoError(w.RejectTrie(block)) require.Equal(block.Root(), m.LastDereference, "should have dereferenced block on reject") require.Zero(m.LastCommit, "should not have committed block on reject") m.LastDereference = common.Hash{} @@ -96,12 +96,12 @@ func TestNoPruningTrieWriter(t *testing.T) { require.Zero(m.LastDereference, "should not have dereferenced block on insert") require.Zero(m.LastCommit, "should not have committed block on insert") - w.AcceptTrie(block) + require.NoError(w.AcceptTrie(block)) require.Zero(m.LastDereference, "should not have dereferenced block on accept") require.Equal(block.Root(), m.LastCommit, "should have committed block on accept") m.LastCommit = common.Hash{} - w.RejectTrie(block) + require.NoError(w.RejectTrie(block)) require.Equal(block.Root(), m.LastDereference, "should have dereferenced block on reject") require.Zero(m.LastCommit, "should not have committed block on reject") m.LastDereference = common.Hash{} diff --git a/plugin/evm/atomic/state/atomic_trie.go b/plugin/evm/atomic/state/atomic_trie.go index d94ce44974..53732ae1b9 100644 --- a/plugin/evm/atomic/state/atomic_trie.go +++ b/plugin/evm/atomic/state/atomic_trie.go @@ -244,7 +244,9 @@ func (a *AtomicTrie) InsertTrie(nodes *trienode.NodeSet, root common.Hash) error return err } } - a.trieDB.Reference(root, common.Hash{}) + if err := a.trieDB.Reference(root, common.Hash{}); err != nil { + return err + } // The use of [Cap] in [insertTrie] prevents exceeding the configured memory // limit (and OOM) in case there is a large backlog of processing (unaccepted) blocks. @@ -285,12 +287,13 @@ func (a *AtomicTrie) AcceptTrie(height uint64, root common.Hash) (bool, error) { // - not committted, in which case the current root we are inserting contains // references to all the relevant data from the previous root, so the previous // root can be dereferenced. - a.trieDB.Dereference(a.lastAcceptedRoot) + if err := a.trieDB.Dereference(a.lastAcceptedRoot); err != nil { + return false, err + } a.lastAcceptedRoot = root return hasCommitted, nil } func (a *AtomicTrie) RejectTrie(root common.Hash) error { - a.trieDB.Dereference(root) - return nil + return a.trieDB.Dereference(root) } diff --git a/plugin/evm/atomic/state/atomic_trie_test.go b/plugin/evm/atomic/state/atomic_trie_test.go index 626eac9db0..55fd58487e 100644 --- a/plugin/evm/atomic/state/atomic_trie_test.go +++ b/plugin/evm/atomic/state/atomic_trie_test.go @@ -604,7 +604,7 @@ func TestAtomicTrie_AcceptTrie(t *testing.T) { _, storageSize, _ := atomicTrie.trieDB.Size() require.NotZero(t, storageSize, "there should be a dirty node taking up storage space") } - atomicTrie.updateLastCommitted(testCase.lastCommittedRoot, testCase.lastCommittedHeight) + require.NoError(t, atomicTrie.updateLastCommitted(testCase.lastCommittedRoot, testCase.lastCommittedHeight)) hasCommitted, err := atomicTrie.AcceptTrie(testCase.height, testCase.root) require.NoError(t, err) diff --git a/plugin/evm/atomic/vm/block_extension.go b/plugin/evm/atomic/vm/block_extension.go index 6171ef5b86..08e675433f 100644 --- a/plugin/evm/atomic/vm/block_extension.go +++ b/plugin/evm/atomic/vm/block_extension.go @@ -229,9 +229,13 @@ func (be *blockExtension) Reject() error { // CleanupVerified is called when the block is cleaned up after a failed insertion. func (be *blockExtension) CleanupVerified() { vm := be.blockExtender.vm - if atomicState, err := vm.AtomicBackend.GetVerifiedAtomicState(be.block.GetEthBlock().Hash()); err == nil { - atomicState.Reject() + // If the state isn't found, no need to reject (it was never verified). + atomicState, err := vm.AtomicBackend.GetVerifiedAtomicState(be.block.GetEthBlock().Hash()) + if err != nil { + return } + // atomicState.Reject() never returns an error in practice. + atomicState.Reject() //nolint:errcheck // Reject never returns an error } // AtomicTxs returns the atomic transactions in this block. diff --git a/plugin/evm/atomic/vm/vm_test.go b/plugin/evm/atomic/vm/vm_test.go index 78b9243332..4ff3a87987 100644 --- a/plugin/evm/atomic/vm/vm_test.go +++ b/plugin/evm/atomic/vm/vm_test.go @@ -197,14 +197,13 @@ func testIssueAtomicTxs(t *testing.T, scheme string) { utxos := map[ids.ShortID]uint64{ vmtest.TestShortIDAddrs[0]: importAmount, } - addUTXOs(tvm.AtomicMemory, vm.Ctx, utxos) + require.NoError(addUTXOs(tvm.AtomicMemory, vm.Ctx, utxos)) defer func() { require.NoError(vm.Shutdown(t.Context())) }() importTx, err := vm.newImportTx(vm.Ctx.XChainID, vmtest.TestEthAddrs[0], vmtest.InitialBaseFee, vmtest.TestKeys[0:1]) require.NoError(err) - require.NoError(vm.AtomicMempool.AddLocalTx(importTx)) msg, err := vm.WaitForEvent(t.Context()) @@ -213,11 +212,8 @@ func testIssueAtomicTxs(t *testing.T, scheme string) { blk, err := vm.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(t.Context())) - require.NoError(vm.SetPreference(t.Context(), blk.ID())) - require.NoError(blk.Accept(t.Context())) lastAcceptedID, err := vm.LastAccepted(t.Context()) @@ -231,7 +227,6 @@ func testIssueAtomicTxs(t *testing.T, scheme string) { wrappedState := extstate.New(state) exportTx, err := atomic.NewExportTx(vm.Ctx, vm.CurrentRules(), wrappedState, vm.Ctx.AVAXAssetID, importAmount-(2*ap0.AtomicTxFee), vm.Ctx.XChainID, vmtest.TestShortIDAddrs[0], vmtest.InitialBaseFee, vmtest.TestKeys[0:1]) require.NoError(err) - require.NoError(vm.AtomicMempool.AddLocalTx(exportTx)) msg, err = vm.WaitForEvent(t.Context()) @@ -240,9 +235,7 @@ func testIssueAtomicTxs(t *testing.T, scheme string) { blk2, err := vm.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk2.Verify(t.Context())) - require.NoError(blk2.Accept(t.Context())) lastAcceptedID, err = vm.LastAccepted(t.Context()) @@ -842,9 +835,7 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { blk, err := vm.BuildBlock(t.Context()) require.NoError(err) require.NoError(blk.Verify(t.Context())) - require.NoError(vm.SetPreference(t.Context(), blk.ID())) - require.NoError(blk.Accept(t.Context())) newHead := <-newTxPoolHeadChan @@ -852,8 +843,9 @@ func TestConsecutiveAtomicTransactionsRevertSnapshot(t *testing.T) { // Add the two conflicting transactions directly to the mempool, so that two consecutive transactions // will fail verification when build block is called. - vm.AtomicMempool.AddRemoteTx(importTxs[1]) - vm.AtomicMempool.AddRemoteTx(importTxs[2]) + require.NoError(vm.AtomicMempool.ForceAddTx(importTxs[1])) + require.NoError(vm.AtomicMempool.ForceAddTx(importTxs[2])) + require.Equal(2, vm.AtomicMempool.Txs.PendingLen()) _, err = vm.BuildBlock(t.Context()) require.ErrorIs(err, ErrEmptyBlock) @@ -890,7 +882,7 @@ func TestAtomicTxBuildBlockDropsConflicts(t *testing.T) { err = vm.AtomicMempool.AddLocalTx(conflictTx) require.ErrorIs(err, txpool.ErrConflict) // force add the tx - vm.AtomicMempool.ForceAddTx(conflictTx) + require.NoError(vm.AtomicMempool.ForceAddTx(conflictTx)) conflictSets[index].Add(conflictTx.ID()) } msg, err := vm.WaitForEvent(t.Context()) @@ -1637,7 +1629,7 @@ func TestWaitForEvent(t *testing.T) { }, }}}})) testCase.testCase(t, vm, address, key) - vm.Shutdown(t.Context()) + require.NoError(t, vm.Shutdown(t.Context())) }) } } diff --git a/plugin/evm/extension/config.go b/plugin/evm/extension/config.go index 93e633ac55..3fe676866f 100644 --- a/plugin/evm/extension/config.go +++ b/plugin/evm/extension/config.go @@ -101,8 +101,7 @@ type BlockExtension interface { // it can be implemented to extend inner block verification SemanticVerify() error // CleanupVerified is called when a block has passed SemanticVerify and SynctacticVerify, - // and should be cleaned up due to error or verification runs under non-write mode. This - // does not return an error because the block has already been verified. + // and should be cleaned up due to error or verification runs under non-write mode. CleanupVerified() // Accept is called when a block is accepted by the block manager. Accept takes a // database.Batch that contains the changes that were made to the database as a result diff --git a/plugin/evm/message/codec.go b/plugin/evm/message/codec.go index c3f1a6795f..aee9a70775 100644 --- a/plugin/evm/message/codec.go +++ b/plugin/evm/message/codec.go @@ -38,7 +38,7 @@ func init() { // See https://github.com/ava-labs/coreth/pull/999 c.SkipRegistrations(3) - Codec.RegisterCodec(Version, c) + errs.Add(Codec.RegisterCodec(Version, c)) if errs.Errored() { panic(errs.Err) diff --git a/plugin/evm/vm.go b/plugin/evm/vm.go index 548072c786..862551d3ae 100644 --- a/plugin/evm/vm.go +++ b/plugin/evm/vm.go @@ -467,7 +467,9 @@ func (vm *VM) Initialize( // Add p2p warp message warpHandler warpHandler := acp118.NewCachedHandler(meteredCache, vm.warpBackend, vm.ctx.WarpSigner) - vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler) + if err = vm.Network.AddHandler(p2p.SignatureRequestHandlerID, warpHandler); err != nil { + return err + } vm.stateSyncDone = make(chan struct{}) @@ -896,7 +898,9 @@ func (vm *VM) Shutdown(context.Context) error { for _, handler := range vm.rpcHandlers { handler.Stop() } - vm.eth.Stop() + if err := vm.eth.Stop(); err != nil { + log.Error("error stopping eth", "err", err) + } vm.shutdownWg.Wait() return nil } diff --git a/plugin/evm/vm_test.go b/plugin/evm/vm_test.go index 5973f0de35..980e94d1f2 100644 --- a/plugin/evm/vm_test.go +++ b/plugin/evm/vm_test.go @@ -1669,7 +1669,7 @@ func TestWaitForEvent(t *testing.T) { Fork: &fork, }) testCase.testCase(t, vm) - vm.Shutdown(t.Context()) + require.NoError(t, vm.Shutdown(t.Context())) }) } } @@ -1872,7 +1872,9 @@ func TestDelegatePrecompile_BehaviorAcrossUpgrades(t *testing.T) { vmtest.SetupTestVM(t, vm, vmtest.TestVMConfig{ Fork: &tt.fork, }) - defer vm.Shutdown(ctx) + defer func() { + require.NoError(t, vm.Shutdown(ctx)) + }() if tt.preDeployTime != 0 { vm.clock.Set(time.Unix(tt.preDeployTime, 0)) diff --git a/plugin/evm/vmtest/genesis.go b/plugin/evm/vmtest/genesis.go index 6853793414..c8ef0321c1 100644 --- a/plugin/evm/vmtest/genesis.go +++ b/plugin/evm/vmtest/genesis.go @@ -65,7 +65,9 @@ func NewTestGenesis(cfg *params.ChainConfig) *core.Genesis { g.Config = &cpy allocStr := `{"0100000000000000000000000000000000000000":{"code":"0x7300000000000000000000000000000000000000003014608060405260043610603d5760003560e01c80631e010439146042578063b6510bb314606e575b600080fd5b605c60048036036020811015605657600080fd5b503560b1565b60408051918252519081900360200190f35b818015607957600080fd5b5060af60048036036080811015608e57600080fd5b506001600160a01b03813516906020810135906040810135906060013560b6565b005b30cd90565b836001600160a01b031681836108fc8690811502906040516000604051808303818888878c8acf9550505050505015801560f4573d6000803e3d6000fd5b505050505056fea26469706673582212201eebce970fe3f5cb96bf8ac6ba5f5c133fc2908ae3dcd51082cfee8f583429d064736f6c634300060a0033","balance":"0x0"}}` - json.Unmarshal([]byte(allocStr), &g.Alloc) + if err := json.Unmarshal([]byte(allocStr), &g.Alloc); err != nil { + panic(err) + } // After Durango, an additional account is funded in tests to use // with warp messages. if params.GetExtra(cfg).IsDurango(0) { diff --git a/plugin/evm/vmtest/test_syncervm.go b/plugin/evm/vmtest/test_syncervm.go index 717cdc3e72..78801772e8 100644 --- a/plugin/evm/vmtest/test_syncervm.go +++ b/plugin/evm/vmtest/test_syncervm.go @@ -18,6 +18,7 @@ import ( "github.com/ava-labs/avalanchego/snow/engine/enginetest" "github.com/ava-labs/avalanchego/snow/engine/snowman/block" "github.com/ava-labs/avalanchego/upgrade/upgradetest" + "github.com/ava-labs/avalanchego/utils" "github.com/ava-labs/avalanchego/utils/set" "github.com/ava-labs/avalanchego/vms/components/chain" "github.com/ava-labs/avalanchego/vms/evm/database" @@ -153,12 +154,18 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup syncDisabledVM, _ := testSetup.NewVM() appSender := &enginetest.Sender{T: t} appSender.SendAppGossipF = func(context.Context, commonEng.SendConfig, []byte) error { return nil } + var atomicErr utils.Atomic[error] appSender.SendAppRequestF = func(ctx context.Context, nodeSet set.Set[ids.NodeID], requestID uint32, request []byte) error { nodeID, hasItem := nodeSet.Pop() require.True(hasItem, "expected nodeSet to contain at least 1 nodeID") - go testSyncVMSetup.serverVM.VM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request) + go func() { + if err := testSyncVMSetup.serverVM.VM.AppRequest(ctx, nodeID, requestID, time.Now().Add(1*time.Second), request); err != nil { + atomicErr.Set(fmt.Errorf("appRequest: %w", err)) + } + }() return nil } + ResetMetrics(testSyncVMSetup.syncerVM.SnowCtx) stateSyncDisabledConfigJSON := `{"state-sync-enabled":false}` genesisJSON := []byte(GenesisJSON(paramstest.ForkToChainConfig[upgradetest.Latest])) @@ -221,11 +228,14 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] testSyncVMSetup.serverVM.AppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response) + go func() { + if err := syncReEnabledVM.AppResponse(ctx, nodeID, requestID, response); err != nil { + atomicErr.Set(fmt.Errorf("appResponse: %w", err)) + } + }() } else { go test.responseIntercept(syncReEnabledVM, nodeID, requestID, response) } - return nil } @@ -242,6 +252,8 @@ func StateSyncToggleEnabledToDisabledTest(t *testing.T, testSetup *SyncTestSetup testSyncVMSetup.syncerVM.VM = syncReEnabledVM testSyncerVM(t, testSyncVMSetup, test, testSetup.ExtraSyncerVMTest) + + require.NoError(atomicErr.Get()) } func VMShutdownWhileSyncingTest(t *testing.T, testSetup *SyncTestSetup) { @@ -361,15 +373,22 @@ func initSyncServerAndClientVMs(t *testing.T, test SyncTestParams, numBlocks int require.True(enabled) // override [serverVM]'s SendAppResponse function to trigger AppResponse on [syncerVM] + var atomicErr utils.Atomic[error] serverTest.AppSender.SendAppResponseF = func(ctx context.Context, nodeID ids.NodeID, requestID uint32, response []byte) error { if test.responseIntercept == nil { - go syncerVM.AppResponse(ctx, nodeID, requestID, response) + go func() { + if err := syncerVM.AppResponse(ctx, nodeID, requestID, response); err != nil { + atomicErr.Set(fmt.Errorf("unintercepted appResponse: %w", err)) + } + }() } else { go test.responseIntercept(syncerVM, nodeID, requestID, response) } - return nil } + t.Cleanup(func() { + require.NoError(atomicErr.Get()) + }) // connect peer to [syncerVM] require.NoError( diff --git a/plugin/main.go b/plugin/main.go index 9132d6d1d4..6804079836 100644 --- a/plugin/main.go +++ b/plugin/main.go @@ -32,5 +32,9 @@ func main() { fmt.Printf("failed to set fd limit correctly due to: %s\n", err) os.Exit(1) } - rpcchainvm.Serve(context.Background(), factory.NewPluginVM()) + + if err := rpcchainvm.Serve(context.Background(), factory.NewPluginVM()); err != nil { + fmt.Printf("failed to serve rpc chain vm: %s\n", err) + os.Exit(1) + } } diff --git a/sync/statesync/code_queue_test.go b/sync/statesync/code_queue_test.go index 2caf67976f..f714401876 100644 --- a/sync/statesync/code_queue_test.go +++ b/sync/statesync/code_queue_test.go @@ -236,7 +236,9 @@ func TestQuitAndAddCodeRace(t *testing.T) { in := []common.Hash{{}} ready.Done() <-start - q.AddCode(in) + // Due to the race condition, AddCode may either succeed or fail + // depending on whether the quit channel is closed first + _ = q.AddCode(in) }() ready.Wait() diff --git a/sync/statesync/trie_sync_tasks.go b/sync/statesync/trie_sync_tasks.go index 1133e5654f..d06931046e 100644 --- a/sync/statesync/trie_sync_tasks.go +++ b/sync/statesync/trie_sync_tasks.go @@ -75,7 +75,9 @@ func (m *mainTrieTask) OnLeafs(db ethdb.KeyValueWriter, keys, vals [][]byte) err // check if this account has storage root that we need to fetch if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash { - m.sync.trieQueue.RegisterStorageTrie(acc.Root, accountHash) + if err := m.sync.trieQueue.RegisterStorageTrie(acc.Root, accountHash); err != nil { + return err + } } // check if this account has code and add it to codeHashes to fetch diff --git a/tests/warp/warp_test.go b/tests/warp/warp_test.go index 7b6c9728b5..a5aa134753 100644 --- a/tests/warp/warp_test.go +++ b/tests/warp/warp_test.go @@ -365,7 +365,7 @@ func (w *warpTest) aggregateSignaturesViaAPI() { numSigners, err := parsedWarpMessage.Signature.NumSigners() require.NoError(err) require.Len(warpValidators.Validators, numSigners) - parsedWarpMessage.Signature.Verify(&parsedWarpMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator) + err = parsedWarpMessage.Signature.Verify(&parsedWarpMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator) require.NoError(err) w.addressedCallSignedMessage = parsedWarpMessage @@ -377,7 +377,7 @@ func (w *warpTest) aggregateSignaturesViaAPI() { numSigners, err = parsedWarpBlockMessage.Signature.NumSigners() require.NoError(err) require.Len(warpValidators.Validators, numSigners) - parsedWarpBlockMessage.Signature.Verify(&parsedWarpBlockMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator) + err = parsedWarpBlockMessage.Signature.Verify(&parsedWarpBlockMessage.UnsignedMessage, w.networkID, warpValidators, warp.WarpQuorumDenominator, warp.WarpQuorumDenominator) require.NoError(err) w.blockPayloadSignedMessage = parsedWarpBlockMessage } diff --git a/triedb/firewood/database.go b/triedb/firewood/database.go index 3ffa60e5ea..ef246c622e 100644 --- a/triedb/firewood/database.go +++ b/triedb/firewood/database.go @@ -553,7 +553,11 @@ func (db *Database) getProposalHash(parentRoot common.Hash, keys, values [][]byt ffiHashTimer.Inc(time.Since(start).Milliseconds()) // We succesffuly created a proposal, so we must drop it after use. - defer p.Drop() + defer func() { + if err := p.Drop(); err != nil { + log.Error("firewood: error dropping proposal after hash computation", "parentRoot", parentRoot.Hex(), "error", err) + } + }() rootBytes, err := p.Root() if err != nil { diff --git a/warp/verifier_backend_test.go b/warp/verifier_backend_test.go index 09807f3ae7..19995f45a2 100644 --- a/warp/verifier_backend_test.go +++ b/warp/verifier_backend_test.go @@ -51,8 +51,7 @@ func TestAddressedCallSignatures(t *testing.T) { require.NoError(t, err) signature, err := snowCtx.WarpSigner.Sign(msg) require.NoError(t, err) - - backend.AddMessage(msg) + require.NoError(t, backend.AddMessage(msg)) return msg.Bytes(), signature }, verifyStats: func(t *testing.T, stats *verifierStats) {