diff --git a/.golangci.yml b/.golangci.yml index 6c02a92c56e4..447226d997f7 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -60,6 +60,7 @@ linters: - tagalign - testifylint - unconvert + - usetesting - unparam - unused - usestdlibvars @@ -199,6 +200,14 @@ linters: disable: - go-require - float-compare + usetesting: + os-create-temp: true # Disallow `os.CreateTemp("", ...)` + os-mkdir-temp: true # Disallow `os.MkdirTemp()` + os-setenv: true # Disallow `os.Setenv()` + os-temp-dir: true # Disallow `os.TempDir()` + os-chdir: true # Disallow `os.Chdir()` + context-background: true # Disallow `context.Background()` + context-todo: true # Disallow `context.TODO()` unused: # Mark all struct fields that have been written to as used. # Default: true @@ -216,6 +225,15 @@ linters: - common-false-positives - legacy - std-error-handling + rules: + # Exclude some linters from running on test files. + # 1. Exclude the top level tests/ directory. + # 2. Exclude any file prefixed with test_ in any directory. + # 3. Exclude any directory suffixed with test. + # 4. Exclude any file suffixed with _test.go. + - path: "(^tests/)|(^(.*/)*test_[^/]*\\.go$)|(.*test/.*)|(.*_test\\.go$)" + linters: + - prealloc formatters: enable: - gci diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 9f6c9d1762db..2750f40afd46 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -110,7 +110,7 @@ Mocks are auto-generated using [mockgen](https://pkg.go.dev/go.uber.org/mock/moc - Run the linter ```sh -./scipts/run_task.sh lint +./scripts/run_task.sh lint ``` ### Continuous Integration (CI) diff --git a/Taskfile.yml b/Taskfile.yml index 4b347993f6a3..e47de02e0369 100644 --- a/Taskfile.yml +++ b/Taskfile.yml @@ -170,6 +170,10 @@ tasks: desc: Runs static analysis tests of golang code cmd: ./scripts/lint.sh + lint-fix: + desc: Runs automated fixing for failing static analysis of golang code + cmd: ./scripts/run_tool.sh golangci-lint run --config .golangci.yml --fix + lint-action: desc: Runs actionlint to check sanity of github action configuration cmd: ./scripts/actionlint.sh diff --git a/api/admin/client_test.go b/api/admin/client_test.go index fb4fab77aad6..b1f2592175b2 100644 --- a/api/admin/client_test.go +++ b/api/admin/client_test.go @@ -78,7 +78,7 @@ func TestStartCPUProfiler(t *testing.T) { for _, test := range SuccessResponseTests { t.Run(test.name, func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} - err := mockClient.StartCPUProfiler(context.Background()) + err := mockClient.StartCPUProfiler(t.Context()) require.ErrorIs(t, err, test.expectedErr) }) } @@ -88,7 +88,7 @@ func TestStopCPUProfiler(t *testing.T) { for _, test := range SuccessResponseTests { t.Run(test.name, func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} - err := mockClient.StopCPUProfiler(context.Background()) + err := mockClient.StopCPUProfiler(t.Context()) require.ErrorIs(t, err, test.expectedErr) }) } @@ -98,7 +98,7 @@ func TestMemoryProfile(t *testing.T) { for _, test := range SuccessResponseTests { t.Run(test.name, func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} - err := mockClient.MemoryProfile(context.Background()) + err := mockClient.MemoryProfile(t.Context()) require.ErrorIs(t, err, test.expectedErr) }) } @@ -108,7 +108,7 @@ func TestLockProfile(t *testing.T) { for _, test := range SuccessResponseTests { t.Run(test.name, func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} - err := mockClient.LockProfile(context.Background()) + err := mockClient.LockProfile(t.Context()) require.ErrorIs(t, err, test.expectedErr) }) } @@ -118,7 +118,7 @@ func TestAlias(t *testing.T) { for _, test := range SuccessResponseTests { t.Run(test.name, func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} - err := mockClient.Alias(context.Background(), "alias", "alias2") + err := mockClient.Alias(t.Context(), "alias", "alias2") require.ErrorIs(t, err, test.expectedErr) }) } @@ -128,7 +128,7 @@ func TestAliasChain(t *testing.T) { for _, test := range SuccessResponseTests { t.Run(test.name, func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} - err := mockClient.AliasChain(context.Background(), "chain", "chain-alias") + err := mockClient.AliasChain(t.Context(), "chain", "chain-alias") require.ErrorIs(t, err, test.expectedErr) }) } @@ -143,14 +143,14 @@ func TestGetChainAliases(t *testing.T) { Aliases: expectedReply, }, nil)} - reply, err := mockClient.GetChainAliases(context.Background(), "chain") + reply, err := mockClient.GetChainAliases(t.Context(), "chain") require.NoError(err) require.Equal(expectedReply, reply) }) t.Run("failure", func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&GetChainAliasesReply{}, errTest)} - _, err := mockClient.GetChainAliases(context.Background(), "chain") + _, err := mockClient.GetChainAliases(t.Context(), "chain") require.ErrorIs(t, err, errTest) }) } @@ -159,7 +159,7 @@ func TestStacktrace(t *testing.T) { for _, test := range SuccessResponseTests { t.Run(test.name, func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&api.EmptyReply{}, test.expectedErr)} - err := mockClient.Stacktrace(context.Background()) + err := mockClient.Stacktrace(t.Context()) require.ErrorIs(t, err, test.expectedErr) }) } @@ -182,7 +182,7 @@ func TestReloadInstalledVMs(t *testing.T) { FailedVMs: expectedFailedVMs, }, nil)} - loadedVMs, failedVMs, err := mockClient.LoadVMs(context.Background()) + loadedVMs, failedVMs, err := mockClient.LoadVMs(t.Context()) require.NoError(err) require.Equal(expectedNewVMs, loadedVMs) require.Equal(expectedFailedVMs, failedVMs) @@ -190,7 +190,7 @@ func TestReloadInstalledVMs(t *testing.T) { t.Run("failure", func(t *testing.T) { mockClient := Client{Requester: NewMockClient(&LoadVMsReply{}, errTest)} - _, _, err := mockClient.LoadVMs(context.Background()) + _, _, err := mockClient.LoadVMs(t.Context()) require.ErrorIs(t, err, errTest) }) } @@ -253,7 +253,7 @@ func TestSetLoggerLevel(t *testing.T) { ), } res, err := c.SetLoggerLevel( - context.Background(), + t.Context(), "", tt.logLevel, tt.displayLevel, @@ -306,7 +306,7 @@ func TestGetLoggerLevel(t *testing.T) { ), } res, err := c.GetLoggerLevel( - context.Background(), + t.Context(), tt.loggerName, ) require.ErrorIs(err, tt.clientErr) @@ -347,7 +347,7 @@ func TestGetConfig(t *testing.T) { c := Client{ Requester: NewMockClient(tt.expectedResponse, tt.serviceErr), } - res, err := c.GetConfig(context.Background()) + res, err := c.GetConfig(t.Context()) require.ErrorIs(err, tt.clientErr) if tt.clientErr != nil { return diff --git a/api/health/client_test.go b/api/health/client_test.go index e4a1342589e3..84f5c799d124 100644 --- a/api/health/client_test.go +++ b/api/health/client_test.go @@ -48,25 +48,25 @@ func TestClient(t *testing.T) { } { - readiness, err := c.Readiness(context.Background(), nil) + readiness, err := c.Readiness(t.Context(), nil) require.NoError(err) require.True(readiness.Healthy) } { - health, err := c.Health(context.Background(), nil) + health, err := c.Health(t.Context(), nil) require.NoError(err) require.True(health.Healthy) } { - liveness, err := c.Liveness(context.Background(), nil) + liveness, err := c.Liveness(t.Context(), nil) require.NoError(err) require.True(liveness.Healthy) } { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second) healthy, err := AwaitHealthy(ctx, c, time.Second, nil) cancel() require.NoError(err) @@ -74,7 +74,7 @@ func TestClient(t *testing.T) { } { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(t.Context(), 3*time.Second) healthy, err := AwaitReady(ctx, c, time.Second, nil) cancel() require.NoError(err) @@ -84,7 +84,7 @@ func TestClient(t *testing.T) { mc.reply.Healthy = false { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Microsecond) + ctx, cancel := context.WithTimeout(t.Context(), 20*time.Microsecond) healthy, err := AwaitHealthy(ctx, c, time.Microsecond, nil) cancel() require.ErrorIs(err, context.DeadlineExceeded) @@ -92,7 +92,7 @@ func TestClient(t *testing.T) { } { - ctx, cancel := context.WithTimeout(context.Background(), 20*time.Microsecond) + ctx, cancel := context.WithTimeout(t.Context(), 20*time.Microsecond) healthy, err := AwaitReady(ctx, c, time.Microsecond, nil) cancel() require.ErrorIs(err, context.DeadlineExceeded) @@ -104,14 +104,14 @@ func TestClient(t *testing.T) { } { - healthy, err := AwaitHealthy(context.Background(), c, time.Microsecond, nil) + healthy, err := AwaitHealthy(t.Context(), c, time.Microsecond, nil) require.NoError(err) require.True(healthy) } mc.reply.Healthy = false { - healthy, err := AwaitReady(context.Background(), c, time.Microsecond, nil) + healthy, err := AwaitReady(t.Context(), c, time.Microsecond, nil) require.NoError(err) require.True(healthy) } diff --git a/api/health/health_test.go b/api/health/health_test.go index cc9a1c49e158..7b8b379252b4 100644 --- a/api/health/health_test.go +++ b/api/health/health_test.go @@ -125,7 +125,7 @@ func TestPassingChecks(t *testing.T) { require.NoError(h.RegisterHealthCheck("check", check)) require.NoError(h.RegisterLivenessCheck("check", check)) - h.Start(context.Background(), checkFreq) + h.Start(t.Context(), checkFreq) defer h.Stop() { @@ -189,7 +189,7 @@ func TestPassingThenFailingChecks(t *testing.T) { require.NoError(h.RegisterHealthCheck("check", check)) require.NoError(h.RegisterLivenessCheck("check", check)) - h.Start(context.Background(), checkFreq) + h.Start(t.Context(), checkFreq) defer h.Stop() awaitReadiness(t, h, true) @@ -240,7 +240,7 @@ func TestDeadlockRegression(t *testing.T) { return "", nil }) - h.Start(context.Background(), time.Nanosecond) + h.Start(t.Context(), time.Nanosecond) defer h.Stop() for i := 0; i < 100; i++ { @@ -307,7 +307,7 @@ func TestTags(t *testing.T) { require.False(health) } - h.Start(context.Background(), checkFreq) + h.Start(t.Context(), checkFreq) awaitHealthy(t, h, true) diff --git a/api/health/service_test.go b/api/health/service_test.go index 97f4b222dcc3..38ba5fababbe 100644 --- a/api/health/service_test.go +++ b/api/health/service_test.go @@ -64,7 +64,7 @@ func TestServiceResponses(t *testing.T) { require.False(reply.Healthy) } - h.Start(context.Background(), checkFreq) + h.Start(t.Context(), checkFreq) defer h.Stop() awaitReadiness(t, h, true) @@ -190,7 +190,7 @@ func TestServiceTagResponse(t *testing.T) { require.False(reply.Healthy) } - h.Start(context.Background(), checkFreq) + h.Start(t.Context(), checkFreq) test.await(t, h, true) diff --git a/api/info/client_test.go b/api/info/client_test.go index 609034f72ee6..6b85d53b69db 100644 --- a/api/info/client_test.go +++ b/api/info/client_test.go @@ -46,7 +46,7 @@ func TestClient(t *testing.T) { } { - bootstrapped, err := c.IsBootstrapped(context.Background(), "X") + bootstrapped, err := c.IsBootstrapped(t.Context(), "X") require.NoError(err) require.True(bootstrapped) } @@ -54,7 +54,7 @@ func TestClient(t *testing.T) { mc.reply.IsBootstrapped = false { - bootstrapped, err := c.IsBootstrapped(context.Background(), "X") + bootstrapped, err := c.IsBootstrapped(t.Context(), "X") require.NoError(err) require.False(bootstrapped) } @@ -64,7 +64,7 @@ func TestClient(t *testing.T) { } { - bootstrapped, err := AwaitBootstrapped(context.Background(), c, "X", time.Microsecond) + bootstrapped, err := AwaitBootstrapped(t.Context(), c, "X", time.Microsecond) require.NoError(err) require.True(bootstrapped) } diff --git a/cache/lru/cache_test.go b/cache/lru/cache_test.go index e75ae5d593fe..6548627b3349 100644 --- a/cache/lru/cache_test.go +++ b/cache/lru/cache_test.go @@ -88,8 +88,10 @@ func TestCacheOnEvict(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - evictedKeys := make([]int, 0) - evictedValues := make([]int, 0) + var ( + evictedKeys []int + evictedValues []int + ) c := NewCacheWithOnEvict(tt.cacheSize, func(key, value int) { evictedKeys = append(evictedKeys, key) evictedValues = append(evictedValues, value) diff --git a/codec/codectest/codectest.go b/codec/codectest/codectest.go index b36f249f33d6..5f657bdc056e 100644 --- a/codec/codectest/codectest.go +++ b/codec/codectest/codectest.go @@ -770,7 +770,7 @@ func TestSliceWithEmptySerialization(t testing.TB, codec codecpkg.GeneralCodec) require.NoError(manager.RegisterCodec(0, codec)) val := &nestedSliceStruct{ - Arr: make([]emptyStruct, 0), + Arr: []emptyStruct{}, } expected := []byte{0x00, 0x00, 0x00, 0x00, 0x00, 0x00} // codec version (0x00, 0x00) then (0x00, 0x00, 0x00, 0x00) for numElts result, err := manager.Marshal(0, val) diff --git a/database/corruptabledb/db_test.go b/database/corruptabledb/db_test.go index d85d7c26655a..ee0eebec1c51 100644 --- a/database/corruptabledb/db_test.go +++ b/database/corruptabledb/db_test.go @@ -4,7 +4,6 @@ package corruptabledb import ( - "context" "errors" "testing" @@ -73,7 +72,7 @@ func TestCorruption(t *testing.T) { return corruptableBatch.Write() }, "corrupted healthcheck": func(db database.Database) error { - _, err := db.HealthCheck(context.Background()) + _, err := db.HealthCheck(t.Context()) return err }, } diff --git a/database/heightindexdb/meterdb/db.go b/database/heightindexdb/meterdb/db.go new file mode 100644 index 000000000000..c25e10b7a432 --- /dev/null +++ b/database/heightindexdb/meterdb/db.go @@ -0,0 +1,124 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package meterdb + +import ( + "errors" + "time" + + "github.com/prometheus/client_golang/prometheus" + + "github.com/ava-labs/avalanchego/database" +) + +const methodLabel = "method" + +var ( + _ database.HeightIndex = (*Database)(nil) + + methodLabels = []string{methodLabel} + putLabel = prometheus.Labels{ + methodLabel: "put", + } + getLabel = prometheus.Labels{ + methodLabel: "get", + } + hasLabel = prometheus.Labels{ + methodLabel: "has", + } + closeLabel = prometheus.Labels{ + methodLabel: "close", + } +) + +// Database tracks the amount of time each operation takes and how many bytes +// are read/written to the underlying height index database. +type Database struct { + heightDB database.HeightIndex + + calls *prometheus.CounterVec + duration *prometheus.GaugeVec + size *prometheus.CounterVec +} + +func New( + reg prometheus.Registerer, + namespace string, + db database.HeightIndex, +) (*Database, error) { + meterDB := &Database{ + heightDB: db, + calls: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "calls", + Help: "number of calls to the database", + }, + methodLabels, + ), + duration: prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: namespace, + Name: "duration", + Help: "time spent in database calls (ns)", + }, + methodLabels, + ), + size: prometheus.NewCounterVec( + prometheus.CounterOpts{ + Namespace: namespace, + Name: "size", + Help: "size of data passed in database calls", + }, + methodLabels, + ), + } + return meterDB, errors.Join( + reg.Register(meterDB.calls), + reg.Register(meterDB.duration), + reg.Register(meterDB.size), + ) +} + +func (db *Database) Put(height uint64, block []byte) error { + start := time.Now() + err := db.heightDB.Put(height, block) + duration := time.Since(start) + + db.calls.With(putLabel).Inc() + db.duration.With(putLabel).Add(float64(duration.Nanoseconds())) + db.size.With(putLabel).Add(float64(len(block))) + return err +} + +func (db *Database) Get(height uint64) ([]byte, error) { + start := time.Now() + block, err := db.heightDB.Get(height) + duration := time.Since(start) + + db.calls.With(getLabel).Inc() + db.duration.With(getLabel).Add(float64(duration.Nanoseconds())) + db.size.With(getLabel).Add(float64(len(block))) + return block, err +} + +func (db *Database) Has(height uint64) (bool, error) { + start := time.Now() + has, err := db.heightDB.Has(height) + duration := time.Since(start) + + db.calls.With(hasLabel).Inc() + db.duration.With(hasLabel).Add(float64(duration.Nanoseconds())) + return has, err +} + +func (db *Database) Close() error { + start := time.Now() + err := db.heightDB.Close() + duration := time.Since(start) + + db.calls.With(closeLabel).Inc() + db.duration.With(closeLabel).Add(float64(duration.Nanoseconds())) + return err +} diff --git a/database/heightindexdb/meterdb/db_test.go b/database/heightindexdb/meterdb/db_test.go new file mode 100644 index 000000000000..6f858da6344b --- /dev/null +++ b/database/heightindexdb/meterdb/db_test.go @@ -0,0 +1,148 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package meterdb + +import ( + "errors" + "fmt" + "testing" + + "github.com/prometheus/client_golang/prometheus" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/database/heightindexdb/memdb" + + dto "github.com/prometheus/client_model/go" +) + +const metricsNamespace = "meterdb" + +func setup(t *testing.T) (*prometheus.Registry, *Database) { + t.Helper() + + reg := prometheus.NewRegistry() + memDB := &memdb.Database{} + + db, err := New(reg, metricsNamespace, memDB) + require.NoError(t, err) + require.NotNil(t, db) + + return reg, db +} + +func writeBlocks(t *testing.T, db *Database, blockCount int, blockSize int) [][]byte { + t.Helper() + + blocks := make([][]byte, blockCount) + for i := range blockCount { + blockData := make([]byte, blockSize) + prefix := fmt.Sprintf("block-%d", i) + copy(blockData, prefix) + blocks[i] = blockData + } + for i := range blockCount { + require.NoError(t, db.Put(uint64(i), blocks[i])) + } + + return blocks +} + +func TestPutGet(t *testing.T) { + reg, db := setup(t) + + // Create 100 fixed-size blocks (1KB each) + const blockCount = 100 + const blockSize = 1024 + writeBlocks(t, db, blockCount, blockSize) + + // Read from blocks 0 to 119 (including non-existent ones) + const blocksToRead = 120 + for height := range blocksToRead { + _, err := db.Get(uint64(height)) + if errors.Is(err, database.ErrNotFound) { + continue + } + require.NoError(t, err, "error getting block %d", height) + } + + calls, duration, size := gatherMetrics(t, reg) + + // Verify put metrics + require.InEpsilon(t, float64(blockCount), calls["put"], 0.01) + require.InEpsilon(t, float64(blockCount*blockSize), size["put"], 0.01) + require.Greater(t, duration["put"], float64(0)) + + // Verify get metrics + require.InEpsilon(t, float64(blocksToRead), calls["get"], 0.01) + require.InEpsilon(t, float64(blockCount*blockSize), size["get"], 0.01) + require.Greater(t, duration["get"], float64(0)) +} + +func TestHas(t *testing.T) { + reg, db := setup(t) + + const blocksToRead = 120 + for height := range blocksToRead { + _, err := db.Has(uint64(height)) + require.NoError(t, err) + } + + calls, duration, size := gatherMetrics(t, reg) + require.InEpsilon(t, float64(blocksToRead), calls["has"], 0.01) + require.Zero(t, size["has"]) + require.Greater(t, duration["has"], float64(0)) +} + +func TestClose(t *testing.T) { + reg, db := setup(t) + require.NoError(t, db.Close()) + + calls, duration, size := gatherMetrics(t, reg) + require.InEpsilon(t, float64(1), calls["close"], 0.01) + require.Zero(t, size["close"]) + require.Greater(t, duration["close"], float64(0)) +} + +func gatherMetrics(t *testing.T, reg *prometheus.Registry) (map[string]float64, map[string]float64, map[string]float64) { + t.Helper() + + metrics, err := reg.Gather() + require.NoError(t, err) + require.NotEmpty(t, metrics) + + calls := extractMetricValues(metrics, "calls") + duration := extractMetricValues(metrics, "duration") + size := extractMetricValues(metrics, "size") + return calls, duration, size +} + +func extractMetricValues(metrics []*dto.MetricFamily, metricName string) map[string]float64 { + result := make(map[string]float64) + namespacedMetricName := fmt.Sprintf("%s_%s", metricsNamespace, metricName) + + for _, metric := range metrics { + if *metric.Name == namespacedMetricName { + for _, m := range metric.Metric { + method := "" + for _, label := range m.Label { + if *label.Name == "method" { + method = *label.Value + break + } + } + switch metricName { + case "calls": + result[method] = *m.Counter.Value + case "duration": + result[method] = *m.Gauge.Value + case "size": + result[method] = *m.Counter.Value + } + } + break + } + } + return result +} diff --git a/database/rpcdb/db_test.go b/database/rpcdb/db_test.go index 00e031212b75..9e1317f8e1d5 100644 --- a/database/rpcdb/db_test.go +++ b/database/rpcdb/db_test.go @@ -4,7 +4,6 @@ package rpcdb import ( - "context" "fmt" "testing" @@ -126,7 +125,7 @@ func TestHealthCheck(t *testing.T) { require.NoError(scenario.testFn(db)) // check db HealthCheck - _, err := db.HealthCheck(context.Background()) + _, err := db.HealthCheck(t.Context()) if scenario.wantErr { require.Error(err) //nolint:forbidigo require.Contains(err.Error(), scenario.wantErrMsg) @@ -135,7 +134,7 @@ func TestHealthCheck(t *testing.T) { require.NoError(err) // check rpc HealthCheck - _, err = baseDB.client.HealthCheck(context.Background()) + _, err = baseDB.client.HealthCheck(t.Context()) require.NoError(err) }) } diff --git a/genesis/genesis_test.go b/genesis/genesis_test.go index 9b3e10e2710c..c1a18458ef13 100644 --- a/genesis/genesis_test.go +++ b/genesis/genesis_test.go @@ -330,8 +330,6 @@ func TestGenesisFromFlag(t *testing.T) { case constants.LocalID: genBytes, err = json.Marshal(&LocalConfig) require.NoError(err) - default: - genBytes = make([]byte, 0) } } else { genBytes = test.customConfig diff --git a/indexer/client_test.go b/indexer/client_test.go index 1d9b74d2c4d9..8bed4b38bab1 100644 --- a/indexer/client_test.go +++ b/indexer/client_test.go @@ -40,7 +40,7 @@ func TestIndexClient(t *testing.T) { return nil }, } - index, err := client.GetIndex(context.Background(), ids.Empty) + index, err := client.GetIndex(t.Context(), ids.Empty) require.NoError(err) require.Equal(uint64(5), index) } @@ -62,7 +62,7 @@ func TestIndexClient(t *testing.T) { return nil }, } - container, index, err := client.GetLastAccepted(context.Background()) + container, index, err := client.GetLastAccepted(t.Context()) require.NoError(err) require.Equal(id, container.ID) require.Equal(bytes, container.Bytes) @@ -85,7 +85,7 @@ func TestIndexClient(t *testing.T) { return nil }, } - containers, err := client.GetContainerRange(context.Background(), 1, 10) + containers, err := client.GetContainerRange(t.Context(), 1, 10) require.NoError(err) require.Len(containers, 1) require.Equal(id, containers[0].ID) @@ -101,7 +101,7 @@ func TestIndexClient(t *testing.T) { return nil }, } - isAccepted, err := client.IsAccepted(context.Background(), ids.Empty) + isAccepted, err := client.IsAccepted(t.Context(), ids.Empty) require.NoError(err) require.True(isAccepted) } @@ -123,7 +123,7 @@ func TestIndexClient(t *testing.T) { return nil }, } - container, index, err := client.GetContainerByID(context.Background(), id) + container, index, err := client.GetContainerByID(t.Context(), id) require.NoError(err) require.Equal(id, container.ID) require.Equal(bytes, container.Bytes) diff --git a/network/dialer/dialer_test.go b/network/dialer/dialer_test.go index f707c0885358..ba3c4356dbc7 100644 --- a/network/dialer/dialer_test.go +++ b/network/dialer/dialer_test.go @@ -24,7 +24,7 @@ func TestDialerDialCanceledContext(t *testing.T) { listenAddrPort := netip.AddrPortFrom(netip.IPv4Unspecified(), 0) dialer := NewDialer("tcp", Config{}, logging.NoLog{}) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() _, err := dialer.Dial(ctx, listenAddrPort) require.ErrorIs(err, context.Canceled) @@ -50,7 +50,7 @@ func TestDialerDial(t *testing.T) { eg := errgroup.Group{} eg.Go(func() error { - _, err := dialer.Dial(context.Background(), listenedAddrPort) + _, err := dialer.Dial(t.Context(), listenedAddrPort) return err }) diff --git a/network/network_test.go b/network/network_test.go index 2d8e076d37b6..be00a77958e1 100644 --- a/network/network_test.go +++ b/network/network_test.go @@ -337,7 +337,7 @@ func TestIngressConnCount(t *testing.T) { for _, net := range networks { connCount := net.IngressConnCount() ingressConnCount.Add(connCount) - _, err := net.HealthCheck(context.Background()) + _, err := net.HealthCheck(t.Context()) if connCount == 0 { require.ErrorContains(err, ErrNoIngressConnections.Error()) //nolint } else { diff --git a/network/p2p/acp118/aggregator_test.go b/network/p2p/acp118/aggregator_test.go index c17cc086eb81..f397022a00b6 100644 --- a/network/p2p/acp118/aggregator_test.go +++ b/network/p2p/acp118/aggregator_test.go @@ -62,7 +62,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { peers: map[ids.NodeID]p2p.Handler{ nodeID0: NewHandler(&testVerifier{Errs: []*common.AppError{common.ErrUndefined}}, signer0), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -92,7 +92,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { peers: map[ids.NodeID]p2p.Handler{ nodeID0: NewHandler(&testVerifier{}, signer0), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -123,7 +123,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { peers: map[ids.NodeID]p2p.Handler{ nodeID0: NewHandler(&testVerifier{}, signer0), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -156,7 +156,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{Errs: []*common.AppError{common.ErrUndefined}}, signer1), nodeID2: NewHandler(&testVerifier{Errs: []*common.AppError{common.ErrUndefined}}, signer2), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -199,7 +199,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{}, signer1), nodeID2: NewHandler(&testVerifier{Errs: []*common.AppError{common.ErrUndefined}}, signer2), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -242,7 +242,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{}, signer1), nodeID2: NewHandler(&testVerifier{}, signer2), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -285,7 +285,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{Errs: []*common.AppError{common.ErrUndefined}}, signer1), nodeID2: NewHandler(&testVerifier{Errs: []*common.AppError{common.ErrUndefined}}, signer2), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -328,7 +328,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{}, signer1), nodeID2: NewHandler(&testVerifier{Errs: []*common.AppError{common.ErrUndefined}}, signer2), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -371,7 +371,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{}, signer1), nodeID2: NewHandler(&testVerifier{Errs: []*common.AppError{common.ErrUndefined}}, signer2), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -414,7 +414,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{}, signer1), nodeID2: NewHandler(&testVerifier{}, signer1), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -447,7 +447,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{}, signer1), nodeID2: NewHandler(&testVerifier{}, signer1), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -484,7 +484,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID0: p2p.NoOpHandler{}, }, ctx: func() context.Context { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() return ctx @@ -521,7 +521,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID2: p2p.NoOpHandler{}, }, ctx: func() context.Context { - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() return ctx @@ -567,7 +567,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { nodeID1: NewHandler(&testVerifier{}, signer1), nodeID2: NewHandler(&testVerifier{}, signer2), }, - ctx: context.Background(), + ctx: t.Context(), msg: func() *warp.Message { unsignedMsg, err := warp.NewUnsignedMessage( networkID, @@ -620,7 +620,7 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { client := p2ptest.NewClientWithPeers( t, - context.Background(), + t.Context(), ids.EmptyNodeID, p2p.NoOpHandler{}, tt.peers, @@ -645,8 +645,10 @@ func TestSignatureAggregator_AggregateSignatures(t *testing.T) { bitSet := set.BitsFromBytes(gotSignature.Signers) require.Equal(tt.wantSigners, bitSet.Len()) - pks := make([]*bls.PublicKey, 0) - wantAggregatedStake := uint64(0) + var ( + pks []*bls.PublicKey + wantAggregatedStake uint64 + ) for i := 0; i < bitSet.BitLen(); i++ { if !bitSet.Contains(i) { continue diff --git a/network/p2p/acp118/handler_test.go b/network/p2p/acp118/handler_test.go index 1129fcb15379..670c6cb2f636 100644 --- a/network/p2p/acp118/handler_test.go +++ b/network/p2p/acp118/handler_test.go @@ -72,7 +72,7 @@ func TestHandler(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() sk, err := localsigner.New() require.NoError(err) pk := sk.PublicKey() diff --git a/network/p2p/gossip/gossip_test.go b/network/p2p/gossip/gossip_test.go index 6795447323a2..ad4cb824f06e 100644 --- a/network/p2p/gossip/gossip_test.go +++ b/network/p2p/gossip/gossip_test.go @@ -26,7 +26,7 @@ import ( "github.com/ava-labs/avalanchego/utils/units" ) -func TestGossiperShutdown(*testing.T) { +func TestGossiperShutdown(t *testing.T) { gossiper := NewPullGossiper[*testTx]( logging.NoLog{}, nil, @@ -35,7 +35,7 @@ func TestGossiperShutdown(*testing.T) { Metrics{}, 0, ) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) wg := &sync.WaitGroup{} wg.Add(1) @@ -108,7 +108,7 @@ func TestGossiperGossip(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() responseSender := &enginetest.SenderStub{ SentAppResponse: make(chan []byte, 1), @@ -163,7 +163,7 @@ func TestGossiperGossip(t *testing.T) { peers, ) require.NoError(err) - require.NoError(requestNetwork.Connected(context.Background(), ids.EmptyNodeID, nil)) + require.NoError(requestNetwork.Connected(t.Context(), ids.EmptyNodeID, nil)) bloom, err := NewBloomFilter(prometheus.NewRegistry(), "", 1000, 0.01, 0.05) require.NoError(err) @@ -213,8 +213,8 @@ func TestGossiperGossip(t *testing.T) { } } -func TestEvery(*testing.T) { - ctx, cancel := context.WithCancel(context.Background()) +func TestEvery(t *testing.T) { + ctx, cancel := context.WithCancel(t.Context()) calls := 0 gossiper := &TestGossiper{ GossipF: func(context.Context) error { @@ -254,12 +254,12 @@ func TestValidatorGossiper(t *testing.T) { } // we are a validator, so we should request gossip - require.NoError(gossiper.Gossip(context.Background())) + require.NoError(gossiper.Gossip(t.Context())) require.Equal(1, calls) // we are not a validator, so we should not request gossip validators.validators = set.Set[ids.NodeID]{} - require.NoError(gossiper.Gossip(context.Background())) + require.NoError(gossiper.Gossip(t.Context())) require.Equal(1, calls) } @@ -536,7 +536,7 @@ func TestPushGossiper(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() sender := &enginetest.SenderStub{ SentAppGossip: make(chan []byte, 2), diff --git a/network/p2p/handler_test.go b/network/p2p/handler_test.go index a47b244c3f1b..db584a6290ee 100644 --- a/network/p2p/handler_test.go +++ b/network/p2p/handler_test.go @@ -73,7 +73,7 @@ func TestValidatorHandlerAppGossip(t *testing.T) { logging.NoLog{}, ) - handler.AppGossip(context.Background(), tt.nodeID, []byte("foobar")) + handler.AppGossip(t.Context(), tt.nodeID, []byte("foobar")) require.Equal(tt.expected, called) }) } @@ -114,7 +114,7 @@ func TestValidatorHandlerAppRequest(t *testing.T) { logging.NoLog{}, ) - _, err := handler.AppRequest(context.Background(), tt.nodeID, time.Time{}, []byte("foobar")) + _, err := handler.AppRequest(t.Context(), tt.nodeID, time.Time{}, []byte("foobar")) require.ErrorIs(err, tt.expected) }) } @@ -304,7 +304,7 @@ func TestNewDynamicThrottlerHandler_AppRequest(t *testing.T) { for _, r := range tt.requests[i] { _, err := handler.AppRequest( - context.Background(), + t.Context(), r.nodeID, time.Time{}, []byte("foobar"), diff --git a/network/p2p/network.go b/network/p2p/network.go index c07ade37fdcc..6aea64ab600d 100644 --- a/network/p2p/network.go +++ b/network/p2p/network.go @@ -160,6 +160,13 @@ func (p *Peers) Disconnected(nodeID ids.NodeID) { p.set.Remove(nodeID) } +func (p *Peers) Has(nodeID ids.NodeID) bool { + p.lock.RLock() + defer p.lock.RUnlock() + + return p.set.Contains(nodeID) +} + // Sample returns a pseudo-random sample of up to limit Peers func (p *Peers) Sample(limit int) []ids.NodeID { p.lock.RLock() diff --git a/network/p2p/network_test.go b/network/p2p/network_test.go index fe7f68eb2665..2ccc138ec77c 100644 --- a/network/p2p/network_test.go +++ b/network/p2p/network_test.go @@ -33,7 +33,7 @@ var errFoo = &common.AppError{ func TestMessageRouting(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() wantNodeID := ids.GenerateTestNodeID() wantMsg := []byte("message") @@ -85,7 +85,7 @@ func TestMessageRouting(t *testing.T) { // Tests that the Client prefixes messages with the handler prefix func TestClientPrefixesMessages(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() sender := enginetest.SenderStub{ SentAppRequest: make(chan []byte, 1), @@ -140,7 +140,7 @@ func TestClientPrefixesMessages(t *testing.T) { // Tests that the Client callback is called on a successful response func TestAppRequestResponse(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() sender := enginetest.SenderStub{ SentAppRequest: make(chan []byte, 1), @@ -179,7 +179,7 @@ func TestAppRequestResponse(t *testing.T) { // Tests that the Client does not provide a cancelled context to the AppSender. func TestAppRequestCancelledContext(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() sentMessages := make(chan []byte, 1) sender := &enginetest.Sender{ @@ -226,7 +226,7 @@ func TestAppRequestCancelledContext(t *testing.T) { // Tests that the Client callback is given an error if the request fails func TestAppRequestFailed(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() sender := enginetest.SenderStub{ SentAppRequest: make(chan []byte, 1), @@ -281,7 +281,7 @@ func TestAppGossipMessageForUnregisteredHandler(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() handler := &TestHandler{ AppGossipF: func(context.Context, ids.NodeID, []byte) { require.Fail("should not be called") @@ -324,7 +324,7 @@ func TestAppRequestMessageForUnregisteredHandler(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() handler := &TestHandler{ AppRequestF: func(context.Context, ids.NodeID, time.Time, []byte) ([]byte, *common.AppError) { require.Fail("should not be called") @@ -365,7 +365,7 @@ func TestAppRequestMessageForUnregisteredHandler(t *testing.T) { // A handler that errors should send an AppError to the requesting peer func TestAppError(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() appError := &common.AppError{ Code: 123, Message: "foo", @@ -428,7 +428,7 @@ func TestResponseForUnrequestedRequest(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() handler := &TestHandler{ AppGossipF: func(context.Context, ids.NodeID, []byte) { require.Fail("should not be called") @@ -460,7 +460,7 @@ func TestResponseForUnrequestedRequest(t *testing.T) { // not attempt to issue another request until the previous one has cleared. func TestAppRequestDuplicateRequestIDs(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() sender := &enginetest.SenderStub{ SentAppRequest: make(chan []byte, 1), @@ -483,7 +483,7 @@ func TestAppRequestDuplicateRequestIDs(t *testing.T) { // force the network to use the same requestID network.router.requestID = 1 - err = client.AppRequest(context.Background(), set.Of(ids.EmptyNodeID), []byte{}, noOpCallback) + err = client.AppRequest(t.Context(), set.Of(ids.EmptyNodeID), []byte{}, noOpCallback) require.ErrorIs(err, ErrRequestPending) } @@ -564,11 +564,11 @@ func TestPeersSample(t *testing.T) { require.NoError(err) for connected := range tt.connected { - require.NoError(network.Connected(context.Background(), connected, nil)) + require.NoError(network.Connected(t.Context(), connected, nil)) } for disconnected := range tt.disconnected { - require.NoError(network.Disconnected(context.Background(), disconnected)) + require.NoError(network.Disconnected(t.Context(), disconnected)) } sampleable := set.Set[ids.NodeID]{} @@ -620,12 +620,12 @@ func TestAppRequestAnyWithPeerSampling(t *testing.T) { ) require.NoError(err) for _, peer := range tt.peers { - require.NoError(n.Connected(context.Background(), peer, &version.Application{})) + require.NoError(n.Connected(t.Context(), peer, &version.Application{})) } client := n.NewClient(1, PeerSampler{Peers: peers}) - err = client.AppRequestAny(context.Background(), []byte("foobar"), nil) + err = client.AppRequestAny(t.Context(), []byte("foobar"), nil) require.ErrorIs(err, tt.expected) require.Subset(tt.peers, sent.List()) }) @@ -700,7 +700,7 @@ func TestAppRequestAnyWithValidatorSampling(t *testing.T) { validators, ) require.NoError(err) - ctx := context.Background() + ctx := t.Context() for _, peer := range tt.peers { require.NoError(network.Connected(ctx, peer, nil)) } @@ -952,34 +952,51 @@ func TestNetworkValidators_ConnectAndDisconnect(t *testing.T) { } for _, nodeID := range tt.connectedPeers[i] { - require.NoError(n.Connected(context.Background(), nodeID, nil)) + require.NoError(n.Connected(t.Context(), nodeID, nil)) } for _, nodeID := range tt.disconnectedPeers[i] { - require.NoError(n.Disconnected(context.Background(), nodeID)) + require.NoError(n.Disconnected(t.Context(), nodeID)) } require.Equal( len(tt.wantConnectedValidators[i]), - validatorSet.Len(context.Background()), + validatorSet.Len(t.Context()), ) for _, nodeID := range tt.wantConnectedValidators[i] { - require.True(validatorSet.Has(context.Background(), nodeID)) + require.True(validatorSet.Has(t.Context(), nodeID)) } wantDisconnectedValidators := set.Of(nodeIDs...) wantDisconnectedValidators.Difference(set.Of(tt.wantConnectedValidators[i]...)) for nodeID := range wantDisconnectedValidators { - require.False(validatorSet.Has(context.Background(), nodeID)) + require.False(validatorSet.Has(t.Context(), nodeID)) } require.Equal( len(tt.wantConnectedValidators[i]), - validatorSet.Len(context.Background()), + validatorSet.Len(t.Context()), ) } }) } } + +func TestPeers_Has(t *testing.T) { + require := require.New(t) + + peers := &Peers{} + network, err := NewNetwork( + logging.NoLog{}, + &enginetest.Sender{}, + prometheus.NewRegistry(), + "", + peers, + ) + require.NoError(err) + require.NoError(network.Connected(t.Context(), ids.EmptyNodeID, nil)) + + require.True(peers.Has(ids.EmptyNodeID)) +} diff --git a/network/p2p/p2ptest/client_test.go b/network/p2p/p2ptest/client_test.go index f4ead2e6a8da..9223c35817b5 100644 --- a/network/p2p/p2ptest/client_test.go +++ b/network/p2p/p2ptest/client_test.go @@ -18,7 +18,7 @@ import ( func TestClient_AppGossip(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() appGossipChan := make(chan struct{}) testHandler := p2p.TestHandler{ @@ -84,7 +84,7 @@ func TestClient_AppRequest(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() appRequestChan := make(chan struct{}) testHandler := p2p.TestHandler{ diff --git a/network/p2p/throttler_handler_test.go b/network/p2p/throttler_handler_test.go index 53a05870a254..df500018a0e1 100644 --- a/network/p2p/throttler_handler_test.go +++ b/network/p2p/throttler_handler_test.go @@ -48,7 +48,7 @@ func TestThrottlerHandlerAppGossip(t *testing.T) { logging.NoLog{}, ) - handler.AppGossip(context.Background(), ids.GenerateTestNodeID(), []byte("foobar")) + handler.AppGossip(t.Context(), ids.GenerateTestNodeID(), []byte("foobar")) require.Equal(tt.expected, called) }) } @@ -79,7 +79,7 @@ func TestThrottlerHandlerAppRequest(t *testing.T) { tt.Throttler, logging.NoLog{}, ) - _, err := handler.AppRequest(context.Background(), ids.GenerateTestNodeID(), time.Time{}, []byte("foobar")) + _, err := handler.AppRequest(t.Context(), ids.GenerateTestNodeID(), time.Time{}, []byte("foobar")) require.ErrorIs(err, tt.expectedErr) }) } diff --git a/network/p2p/validators_test.go b/network/p2p/validators_test.go index 4e52b42a23c1..1a51f91af2cb 100644 --- a/network/p2p/validators_test.go +++ b/network/p2p/validators_test.go @@ -172,16 +172,18 @@ func TestValidatorsSample(t *testing.T) { ctrl := gomock.NewController(t) mockValidators := validatorsmock.NewState(ctrl) - calls := make([]any, 0) + calls := make([]any, 0, 2*len(tt.calls)) for _, call := range tt.calls { - calls = append(calls, mockValidators.EXPECT(). - GetCurrentHeight(gomock.Any()).Return(call.height, call.getCurrentHeightErr)) + calls = append( + calls, + mockValidators.EXPECT().GetCurrentHeight(gomock.Any()).Return(call.height, call.getCurrentHeightErr), + ) if call.getCurrentHeightErr != nil { continue } - validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput, 0) + validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput, len(call.validators)) for _, validator := range call.validators { validatorSet[validator] = &validators.GetValidatorOutput{ NodeID: validator, @@ -189,10 +191,10 @@ func TestValidatorsSample(t *testing.T) { } } - calls = append(calls, - mockValidators.EXPECT(). - GetValidatorSet(gomock.Any(), gomock.Any(), subnetID). - Return(validatorSet, call.getValidatorSetErr)) + calls = append( + calls, + mockValidators.EXPECT().GetValidatorSet(gomock.Any(), gomock.Any(), subnetID).Return(validatorSet, call.getValidatorSetErr), + ) } gomock.InOrder(calls...) @@ -204,7 +206,7 @@ func TestValidatorsSample(t *testing.T) { ) require.NoError(err) - ctx := context.Background() + ctx := t.Context() require.NoError(network.Connected(ctx, nodeID1, nil)) require.NoError(network.Connected(ctx, nodeID2, nil)) @@ -337,7 +339,7 @@ func TestValidatorsTop(t *testing.T) { require := require.New(t) ctrl := gomock.NewController(t) - validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput, 0) + validatorSet := make(map[ids.NodeID]*validators.GetValidatorOutput, len(test.validators)) for _, validator := range test.validators { validatorSet[validator.nodeID] = &validators.GetValidatorOutput{ NodeID: validator.nodeID, @@ -359,7 +361,7 @@ func TestValidatorsTop(t *testing.T) { ) require.NoError(err) - ctx := context.Background() + ctx := t.Context() require.NoError(network.Connected(ctx, nodeID1, nil)) require.NoError(network.Connected(ctx, nodeID2, nil)) @@ -389,5 +391,5 @@ func TestValidatorsLock(t *testing.T) { mockValidators.EXPECT().GetValidatorSet(gomock.Any(), uint64(1), subnetID).Return(nil, nil) v = NewValidators(logging.NoLog{}, subnetID, mockValidators, time.Second) - _ = v.Len(context.Background()) + _ = v.Len(t.Context()) } diff --git a/network/peer/message_queue_test.go b/network/peer/message_queue_test.go index 1eaa69685d24..1d31fa7df279 100644 --- a/network/peer/message_queue_test.go +++ b/network/peer/message_queue_test.go @@ -38,7 +38,7 @@ func TestMessageQueue(t *testing.T) { go func() { for i := 0; i < numToSend; i++ { - q.Push(context.Background(), msgs[i]) + q.Push(t.Context(), msgs[i]) } }() @@ -53,7 +53,7 @@ func TestMessageQueue(t *testing.T) { require.False(ok) // Assert that Push returns false when the context is canceled - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() expectFail = true gotOk := make(chan bool) @@ -64,7 +64,7 @@ func TestMessageQueue(t *testing.T) { // Assert that Push returns false when the queue is closed go func() { - gotOk <- q.Push(context.Background(), msgs[0]) + gotOk <- q.Push(t.Context(), msgs[0]) }() q.Close() require.False(<-gotOk) diff --git a/network/peer/peer_test.go b/network/peer/peer_test.go index 8fe67906e713..e78ffe741ae7 100644 --- a/network/peer/peer_test.go +++ b/network/peer/peer_test.go @@ -159,7 +159,7 @@ func awaitReady(t *testing.T, peers ...Peer) { require := require.New(t) for _, peer := range peers { - require.NoError(peer.AwaitReady(context.Background())) + require.NoError(peer.AwaitReady(t.Context())) require.True(peer.Ready()) } } @@ -189,8 +189,8 @@ func TestReady(t *testing.T) { awaitReady(t, peer0, peer1) peer0.StartClose() - require.NoError(peer0.AwaitClosed(context.Background())) - require.NoError(peer1.AwaitClosed(context.Background())) + require.NoError(peer0.AwaitClosed(t.Context())) + require.NoError(peer1.AwaitClosed(t.Context())) } func TestSend(t *testing.T) { @@ -208,14 +208,14 @@ func TestSend(t *testing.T) { outboundGetMsg, err := config0.MessageCreator.Get(ids.Empty, 1, time.Second, ids.Empty) require.NoError(err) - require.True(peer0.Send(context.Background(), outboundGetMsg)) + require.True(peer0.Send(t.Context(), outboundGetMsg)) inboundGetMsg := <-peer1.inboundMsgChan require.Equal(message.GetOp, inboundGetMsg.Op()) peer1.StartClose() - require.NoError(peer0.AwaitClosed(context.Background())) - require.NoError(peer1.AwaitClosed(context.Background())) + require.NoError(peer0.AwaitClosed(t.Context())) + require.NoError(peer1.AwaitClosed(t.Context())) } func TestPingUptimes(t *testing.T) { @@ -234,12 +234,12 @@ func TestPingUptimes(t *testing.T) { defer func() { peer1.StartClose() peer0.StartClose() - require.NoError(peer0.AwaitClosed(context.Background())) - require.NoError(peer1.AwaitClosed(context.Background())) + require.NoError(peer0.AwaitClosed(t.Context())) + require.NoError(peer1.AwaitClosed(t.Context())) }() pingMsg, err := config0.MessageCreator.Ping(1) require.NoError(err) - require.True(peer0.Send(context.Background(), pingMsg)) + require.True(peer0.Send(t.Context(), pingMsg)) // we send Get message after ping to ensure Ping is handled by the // time Get is handled. This is because Get is routed to the handler @@ -297,16 +297,16 @@ func TestTrackedSubnets(t *testing.T) { rawPeer0.config.MySubnets = set.Of(test.trackedSubnets...) peer0, peer1 := startTestPeers(rawPeer0, rawPeer1) if test.shouldDisconnect { - require.NoError(peer0.AwaitClosed(context.Background())) - require.NoError(peer1.AwaitClosed(context.Background())) + require.NoError(peer0.AwaitClosed(t.Context())) + require.NoError(peer1.AwaitClosed(t.Context())) return } defer func() { peer1.StartClose() peer0.StartClose() - require.NoError(peer0.AwaitClosed(context.Background())) - require.NoError(peer1.AwaitClosed(context.Background())) + require.NoError(peer0.AwaitClosed(t.Context())) + require.NoError(peer1.AwaitClosed(t.Context())) }() awaitReady(t, peer0, peer1) @@ -352,8 +352,8 @@ func TestInvalidBLSKeyDisconnects(t *testing.T) { // Because peer1 thinks that peer0 is using the wrong BLS key, they should // disconnect from each other. - require.NoError(peer0.AwaitClosed(context.Background())) - require.NoError(peer1.AwaitClosed(context.Background())) + require.NoError(peer0.AwaitClosed(t.Context())) + require.NoError(peer1.AwaitClosed(t.Context())) } func TestShouldDisconnect(t *testing.T) { @@ -665,7 +665,7 @@ func sendAndFlush(t *testing.T, sender *testPeer, receiver *testPeer) { mc := newMessageCreator(t) outboundGetMsg, err := mc.Get(ids.Empty, 1, time.Second, ids.Empty) require.NoError(t, err) - require.True(t, sender.Send(context.Background(), outboundGetMsg)) + require.True(t, sender.Send(t.Context(), outboundGetMsg)) inboundGetMsg := <-receiver.inboundMsgChan require.Equal(t, message.GetOp, inboundGetMsg.Op()) } diff --git a/network/throttling/bandwidth_throttler_test.go b/network/throttling/bandwidth_throttler_test.go index 5763ca7822e1..422db9373731 100644 --- a/network/throttling/bandwidth_throttler_test.go +++ b/network/throttling/bandwidth_throttler_test.go @@ -4,7 +4,6 @@ package throttling import ( - "context" "sync" "testing" @@ -46,14 +45,14 @@ func TestBandwidthThrottler(t *testing.T) { require.Len(throttler.limiters, 1) // Should be able to acquire 8 - throttler.Acquire(context.Background(), 8, nodeID1) + throttler.Acquire(t.Context(), 8, nodeID1) // Make several goroutines that acquire bytes. wg := sync.WaitGroup{} wg.Add(int(config.MaxBurstSize) + 5) for i := uint64(0); i < config.MaxBurstSize+5; i++ { go func() { - throttler.Acquire(context.Background(), 1, nodeID1) + throttler.Acquire(t.Context(), 1, nodeID1) wg.Done() }() } diff --git a/network/throttling/inbound_msg_buffer_throttler_test.go b/network/throttling/inbound_msg_buffer_throttler_test.go index f1d5acf49415..1f6deb590559 100644 --- a/network/throttling/inbound_msg_buffer_throttler_test.go +++ b/network/throttling/inbound_msg_buffer_throttler_test.go @@ -22,16 +22,16 @@ func TestMsgBufferThrottler(t *testing.T) { nodeID1, nodeID2 := ids.GenerateTestNodeID(), ids.GenerateTestNodeID() // Acquire shouldn't block for first 3 - throttler.Acquire(context.Background(), nodeID1) - throttler.Acquire(context.Background(), nodeID1) - throttler.Acquire(context.Background(), nodeID1) + throttler.Acquire(t.Context(), nodeID1) + throttler.Acquire(t.Context(), nodeID1) + throttler.Acquire(t.Context(), nodeID1) require.Len(throttler.nodeToNumProcessingMsgs, 1) require.Equal(uint64(3), throttler.nodeToNumProcessingMsgs[nodeID1]) // Acquire shouldn't block for other node - throttler.Acquire(context.Background(), nodeID2) - throttler.Acquire(context.Background(), nodeID2) - throttler.Acquire(context.Background(), nodeID2) + throttler.Acquire(t.Context(), nodeID2) + throttler.Acquire(t.Context(), nodeID2) + throttler.Acquire(t.Context(), nodeID2) require.Len(throttler.nodeToNumProcessingMsgs, 2) require.Equal(uint64(3), throttler.nodeToNumProcessingMsgs[nodeID1]) require.Equal(uint64(3), throttler.nodeToNumProcessingMsgs[nodeID2]) @@ -39,7 +39,7 @@ func TestMsgBufferThrottler(t *testing.T) { // Acquire should block for 4th acquire done := make(chan struct{}) go func() { - throttler.Acquire(context.Background(), nodeID1) + throttler.Acquire(t.Context(), nodeID1) done <- struct{}{} }() select { @@ -72,7 +72,7 @@ func TestMsgBufferThrottlerContextCancelled(t *testing.T) { throttler, err := newInboundMsgBufferThrottler(prometheus.NewRegistry(), 3) require.NoError(err) - vdr1Context, vdr1ContextCancelFunc := context.WithCancel(context.Background()) + vdr1Context, vdr1ContextCancelFunc := context.WithCancel(t.Context()) nodeID1 := ids.GenerateTestNodeID() // Acquire shouldn't block for first 3 throttler.Acquire(vdr1Context, nodeID1) diff --git a/network/throttling/inbound_msg_byte_throttler_test.go b/network/throttling/inbound_msg_byte_throttler_test.go index 2207866aac08..32ff0e9b12a3 100644 --- a/network/throttling/inbound_msg_byte_throttler_test.go +++ b/network/throttling/inbound_msg_byte_throttler_test.go @@ -36,7 +36,7 @@ func TestInboundMsgByteThrottlerCancelContextDeadlock(t *testing.T) { ) require.NoError(err) - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() nodeID := ids.GenerateTestNodeID() @@ -65,11 +65,11 @@ func TestInboundMsgByteThrottlerCancelContext(t *testing.T) { ) require.NoError(err) - throttler.Acquire(context.Background(), config.VdrAllocSize, vdr1ID) + throttler.Acquire(t.Context(), config.VdrAllocSize, vdr1ID) // Trying to take more bytes for node should block vdr2Done := make(chan struct{}) - vdr2Context, vdr2ContextCancelFunction := context.WithCancel(context.Background()) + vdr2Context, vdr2ContextCancelFunction := context.WithCancel(t.Context()) go func() { throttler.Acquire(vdr2Context, config.VdrAllocSize, vdr2ID) vdr2Done <- struct{}{} @@ -133,7 +133,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { // Take from at-large allocation. // Should return immediately. - throttler.Acquire(context.Background(), 1, vdr1ID) + throttler.Acquire(t.Context(), 1, vdr1ID) require.Equal(config.AtLargeAllocSize-1, throttler.remainingAtLargeBytes) require.Equal(config.VdrAllocSize, throttler.remainingVdrBytes) require.Empty(throttler.nodeToVdrBytesUsed) @@ -149,7 +149,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { // Use all the at-large allocation bytes and 1 of the validator allocation bytes // Should return immediately. - throttler.Acquire(context.Background(), config.AtLargeAllocSize+1, vdr1ID) + throttler.Acquire(t.Context(), config.AtLargeAllocSize+1, vdr1ID) // vdr1 at-large bytes used: 1024. Validator bytes used: 1 require.Zero(throttler.remainingAtLargeBytes) require.Equal(config.VdrAllocSize-1, throttler.remainingVdrBytes) @@ -160,7 +160,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { // The other validator should be able to acquire half the validator allocation. // Should return immediately. - throttler.Acquire(context.Background(), config.AtLargeAllocSize/2, vdr2ID) + throttler.Acquire(t.Context(), config.AtLargeAllocSize/2, vdr2ID) // vdr2 at-large bytes used: 0. Validator bytes used: 512 require.Equal(config.VdrAllocSize/2-1, throttler.remainingVdrBytes) require.Equal(uint64(1), throttler.nodeToVdrBytesUsed[vdr1ID]) @@ -172,7 +172,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { // vdr1 should be able to acquire the rest of the validator allocation // Should return immediately. - throttler.Acquire(context.Background(), config.VdrAllocSize/2-1, vdr1ID) + throttler.Acquire(t.Context(), config.VdrAllocSize/2-1, vdr1ID) // vdr1 at-large bytes used: 1024. Validator bytes used: 512 require.Equal(config.VdrAllocSize/2, throttler.nodeToVdrBytesUsed[vdr1ID]) require.Len(throttler.nodeToAtLargeBytesUsed, 1) @@ -181,7 +181,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { // Trying to take more bytes for either node should block vdr1Done := make(chan struct{}) go func() { - throttler.Acquire(context.Background(), 1, vdr1ID) + throttler.Acquire(t.Context(), 1, vdr1ID) vdr1Done <- struct{}{} }() select { @@ -199,7 +199,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { vdr2Done := make(chan struct{}) go func() { - throttler.Acquire(context.Background(), 1, vdr2ID) + throttler.Acquire(t.Context(), 1, vdr2ID) vdr2Done <- struct{}{} }() select { @@ -219,7 +219,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { nonVdrID := ids.GenerateTestNodeID() nonVdrDone := make(chan struct{}) go func() { - throttler.Acquire(context.Background(), 1, nonVdrID) + throttler.Acquire(t.Context(), 1, nonVdrID) nonVdrDone <- struct{}{} }() select { @@ -257,7 +257,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { require.Zero(throttler.waitingToAcquire.Len()) // Non-validator should be able to take the rest of the at-large bytes - throttler.Acquire(context.Background(), config.AtLargeAllocSize/2-2, nonVdrID) + throttler.Acquire(t.Context(), config.AtLargeAllocSize/2-2, nonVdrID) require.Zero(throttler.remainingAtLargeBytes) require.Equal(config.AtLargeAllocSize/2-1, throttler.nodeToAtLargeBytesUsed[nonVdrID]) require.Empty(throttler.nodeToWaitingMsgID) @@ -265,7 +265,7 @@ func TestInboundMsgByteThrottler(t *testing.T) { // But should block on subsequent Acquires go func() { - throttler.Acquire(context.Background(), 1, nonVdrID) + throttler.Acquire(t.Context(), 1, nonVdrID) nonVdrDone <- struct{}{} }() select { @@ -336,12 +336,12 @@ func TestSybilMsgThrottlerMaxNonVdr(t *testing.T) { ) require.NoError(err) nonVdrNodeID1 := ids.GenerateTestNodeID() - throttler.Acquire(context.Background(), config.NodeMaxAtLargeBytes, nonVdrNodeID1) + throttler.Acquire(t.Context(), config.NodeMaxAtLargeBytes, nonVdrNodeID1) // Acquiring more should block nonVdrDone := make(chan struct{}) go func() { - throttler.Acquire(context.Background(), 1, nonVdrNodeID1) + throttler.Acquire(t.Context(), 1, nonVdrNodeID1) nonVdrDone <- struct{}{} }() select { @@ -352,10 +352,10 @@ func TestSybilMsgThrottlerMaxNonVdr(t *testing.T) { // A different non-validator should be able to acquire nonVdrNodeID2 := ids.GenerateTestNodeID() - throttler.Acquire(context.Background(), config.NodeMaxAtLargeBytes, nonVdrNodeID2) + throttler.Acquire(t.Context(), config.NodeMaxAtLargeBytes, nonVdrNodeID2) // Validator should only be able to take [MaxAtLargeBytes] - throttler.Acquire(context.Background(), config.NodeMaxAtLargeBytes+1, vdr1ID) + throttler.Acquire(t.Context(), config.NodeMaxAtLargeBytes+1, vdr1ID) require.Equal(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[vdr1ID]) require.Equal(uint64(1), throttler.nodeToVdrBytesUsed[vdr1ID]) require.Equal(config.NodeMaxAtLargeBytes, throttler.nodeToAtLargeBytesUsed[nonVdrNodeID1]) @@ -387,14 +387,14 @@ func TestMsgThrottlerNextMsg(t *testing.T) { require.NoError(err) // validator uses up all but 1 byte - throttler.Acquire(context.Background(), maxBytes-1, vdr1ID) + throttler.Acquire(t.Context(), maxBytes-1, vdr1ID) // validator uses the last byte - throttler.Acquire(context.Background(), 1, vdr1ID) + throttler.Acquire(t.Context(), 1, vdr1ID) // validator wants to acquire a lot of bytes doneVdr := make(chan struct{}) go func() { - throttler.Acquire(context.Background(), maxBytes-1, vdr1ID) + throttler.Acquire(t.Context(), maxBytes-1, vdr1ID) doneVdr <- struct{}{} }() select { @@ -406,7 +406,7 @@ func TestMsgThrottlerNextMsg(t *testing.T) { // nonvalidator tries to acquire more bytes done := make(chan struct{}) go func() { - throttler.Acquire(context.Background(), 1, nonVdrNodeID) + throttler.Acquire(t.Context(), 1, nonVdrNodeID) done <- struct{}{} }() select { diff --git a/network/throttling/inbound_resource_throttler_test.go b/network/throttling/inbound_resource_throttler_test.go index 5d9aea188d78..f36bc32af885 100644 --- a/network/throttling/inbound_resource_throttler_test.go +++ b/network/throttling/inbound_resource_throttler_test.go @@ -65,12 +65,12 @@ func TestSystemThrottler(t *testing.T) { targeter.EXPECT().TargetUsage(vdrID).Return(1.0).Times(1) mockTracker.EXPECT().Usage(vdrID, gomock.Any()).Return(0.9).Times(1) - throttler.Acquire(context.Background(), vdrID) + throttler.Acquire(t.Context(), vdrID) targeter.EXPECT().TargetUsage(nonVdrID).Return(1.0).Times(1) mockTracker.EXPECT().Usage(nonVdrID, gomock.Any()).Return(0.9).Times(1) - throttler.Acquire(context.Background(), nonVdrID) + throttler.Acquire(t.Context(), nonVdrID) // Case: Actual usage > target usage; we should wait. // In the first loop iteration inside acquire, @@ -90,7 +90,7 @@ func TestSystemThrottler(t *testing.T) { // Check for validator go func() { - throttler.Acquire(context.Background(), vdrID) + throttler.Acquire(t.Context(), vdrID) onAcquire <- struct{}{} }() // Make sure the min re-check frequency is honored @@ -113,7 +113,7 @@ func TestSystemThrottler(t *testing.T) { // Check for non-validator go func() { - throttler.Acquire(context.Background(), nonVdrID) + throttler.Acquire(t.Context(), nonVdrID) onAcquire <- struct{}{} }() // Make sure the min re-check frequency is honored @@ -151,7 +151,7 @@ func TestSystemThrottlerContextCancel(t *testing.T) { mockTracker.EXPECT().TimeUntilUsage(vdrID, gomock.Any(), gomock.Any()).Return(maxRecheckDelay).Times(1) onAcquire := make(chan struct{}) // Pass a canceled context into Acquire so that it returns immediately. - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() go func() { throttler.Acquire(ctx, vdrID) diff --git a/simplex/block_builder_test.go b/simplex/block_builder_test.go index 752d13ffbb57..090bb0f522e3 100644 --- a/simplex/block_builder_test.go +++ b/simplex/block_builder_test.go @@ -18,7 +18,7 @@ import ( ) func TestBlockBuilder(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) child := newTestBlock(t, newBlockConfig{ prev: genesis, @@ -93,7 +93,7 @@ func TestBlockBuilder(t *testing.T) { } func TestBlockBuilderCancelContext(t *testing.T) { - ctx := context.Background() + ctx := t.Context() vm := newTestVM() genesis := newTestBlock(t, newBlockConfig{}) child := newTestBlock(t, newBlockConfig{ @@ -118,7 +118,7 @@ func TestBlockBuilderCancelContext(t *testing.T) { } func TestWaitForPendingBlock(t *testing.T) { - ctx := context.Background() + ctx := t.Context() vm := newTestVM() genesis := newTestBlock(t, newBlockConfig{}) count := 0 @@ -141,7 +141,7 @@ func TestWaitForPendingBlock(t *testing.T) { } func TestBlockBuildingExponentialBackoff(t *testing.T) { - ctx := context.Background() + ctx := t.Context() vm := newTestVM() genesis := newTestBlock(t, newBlockConfig{}) child := newTestBlock(t, newBlockConfig{ @@ -185,7 +185,7 @@ func TestBlockBuildingExponentialBackoff(t *testing.T) { } func TestWaitForPendingBlockBackoff(t *testing.T) { - ctx := context.Background() + ctx := t.Context() vm := newTestVM() const ( failedAttempts = 7 diff --git a/simplex/block_test.go b/simplex/block_test.go index 9ac7a0113f96..bcd9fa47f4cc 100644 --- a/simplex/block_test.go +++ b/simplex/block_test.go @@ -23,7 +23,7 @@ import ( func TestBlockSerialization(t *testing.T) { unexpectedBlockBytes := errors.New("unexpected block bytes") - ctx := context.Background() + ctx := t.Context() genesisBlock := newTestBlock(t, newBlockConfig{}) testBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -106,7 +106,7 @@ func TestBlockSerialization(t *testing.T) { // TestVerifyPrevNotFound attempts to verify a block with a prev digest that is not valid. func TestVerifyPrevNotFound(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) b := newTestBlock(t, newBlockConfig{ @@ -121,7 +121,7 @@ func TestVerifyPrevNotFound(t *testing.T) { // TestVerifyTwice tests that a block the same vmBlock will only // have its Verify method called once, even if Verify is called multiple times. func TestVerifyTwice(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) b := newTestBlock(t, newBlockConfig{ @@ -140,7 +140,7 @@ func TestVerifyTwice(t *testing.T) { // TestVerifyGenesis tests that a block with a sequence number of 0 cannot be verified. func TestVerifyGenesis(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) _, err := genesis.Verify(ctx) @@ -148,7 +148,7 @@ func TestVerifyGenesis(t *testing.T) { } func TestVerify(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) b := newTestBlock(t, newBlockConfig{ @@ -170,7 +170,7 @@ func TestVerify(t *testing.T) { // TestVerifyParentAccepted tests that a block, whose parent has been verified // and indexed, can also be verified and indexed successfully. func TestVerifyParentAccepted(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) seq1Block := newTestBlock(t, newBlockConfig{ @@ -196,7 +196,7 @@ func TestVerifyParentAccepted(t *testing.T) { } func TestVerifyBlockRejectsSiblings(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) // genesisChild0 and genesisChild1 are siblings, both children of genesis. @@ -227,7 +227,7 @@ func TestVerifyBlockRejectsSiblings(t *testing.T) { } func TestVerifyInnerBlockBreaksHashChain(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) b := newTestBlock(t, newBlockConfig{ @@ -243,7 +243,7 @@ func TestVerifyInnerBlockBreaksHashChain(t *testing.T) { } func TestIndexBlockDigestNotFound(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) diff --git a/simplex/storage_test.go b/simplex/storage_test.go index 070210500bf5..74e6d7a4a8bb 100644 --- a/simplex/storage_test.go +++ b/simplex/storage_test.go @@ -4,7 +4,6 @@ package simplex import ( - "context" "testing" "github.com/ava-labs/simplex" @@ -18,7 +17,7 @@ import ( ) func TestStorageNew(t *testing.T) { - ctx := context.Background() + ctx := t.Context() child := snowmantest.BuildChild(snowmantest.Genesis) tests := []struct { name string @@ -77,7 +76,7 @@ func TestStorageRetrieve(t *testing.T) { require.NoError(t, err) vm := newTestVM() - ctx := context.Background() + ctx := t.Context() config := newEngineConfig(t, 4) config.VM = vm _, verifier := NewBLSAuth(config) @@ -130,7 +129,7 @@ func TestStorageRetrieve(t *testing.T) { } func TestStorageIndexFails(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) child1 := newTestBlock(t, newBlockConfig{prev: genesis}) child2 := newTestBlock(t, newBlockConfig{prev: child1}) @@ -215,7 +214,7 @@ func TestStorageIndexFails(t *testing.T) { // TestIndexMismatchedChild tests that the previously indexed digest matches the // previous digest of the block being indexed. func TestIndexMismatchedChild(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) child1 := newTestBlock(t, newBlockConfig{prev: genesis}) child1Sibling := newTestBlock(t, newBlockConfig{prev: genesis}) @@ -250,7 +249,7 @@ func TestIndexMismatchedChild(t *testing.T) { // TestStorageIndexSuccess indexes 10 blocks and verifies that they can be retrieved. func TestStorageIndexSuccess(t *testing.T) { - ctx := context.Background() + ctx := t.Context() genesis := newTestBlock(t, newBlockConfig{}) configs := newNetworkConfigs(t, 4) diff --git a/snow/consensus/snowman/bootstrapper/majority_test.go b/snow/consensus/snowman/bootstrapper/majority_test.go index d34e03f602c9..7e155034f0b9 100644 --- a/snow/consensus/snowman/bootstrapper/majority_test.go +++ b/snow/consensus/snowman/bootstrapper/majority_test.go @@ -4,7 +4,6 @@ package bootstrapper import ( - "context" "math" "testing" @@ -140,7 +139,7 @@ func TestMajorityGetPeers(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - peers := test.majority.GetPeers(context.Background()) + peers := test.majority.GetPeers(t.Context()) require.Equal(test.expectedState, test.majority) require.Equal(test.expectedPeers, peers) }) @@ -332,7 +331,7 @@ func TestMajorityRecordOpinion(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - err := test.majority.RecordOpinion(context.Background(), test.nodeID, test.blkIDs) + err := test.majority.RecordOpinion(t.Context(), test.nodeID, test.blkIDs) require.Equal(test.expectedState, test.majority) require.ErrorIs(err, test.expectedErr) }) @@ -388,7 +387,7 @@ func TestMajorityResult(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - accepted, finalized := test.majority.Result(context.Background()) + accepted, finalized := test.majority.Result(t.Context()) require.Equal(test.expectedAccepted, accepted) require.Equal(test.expectedFinalized, finalized) }) diff --git a/snow/consensus/snowman/bootstrapper/minority_test.go b/snow/consensus/snowman/bootstrapper/minority_test.go index 662dd859bcca..b0205e246129 100644 --- a/snow/consensus/snowman/bootstrapper/minority_test.go +++ b/snow/consensus/snowman/bootstrapper/minority_test.go @@ -4,7 +4,6 @@ package bootstrapper import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -101,7 +100,7 @@ func TestMinorityGetPeers(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - peers := test.minority.GetPeers(context.Background()) + peers := test.minority.GetPeers(t.Context()) require.Equal(test.expectedState, test.minority) require.Equal(test.expectedPeers, peers) }) @@ -189,7 +188,7 @@ func TestMinorityRecordOpinion(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - err := test.minority.RecordOpinion(context.Background(), test.nodeID, test.blkIDs) + err := test.minority.RecordOpinion(t.Context(), test.nodeID, test.blkIDs) require.Equal(test.expectedState, test.minority) require.ErrorIs(err, test.expectedErr) }) @@ -234,7 +233,7 @@ func TestMinorityResult(t *testing.T) { t.Run(test.name, func(t *testing.T) { require := require.New(t) - accepted, finalized := test.minority.Result(context.Background()) + accepted, finalized := test.minority.Result(t.Context()) require.Equal(test.expectedAccepted, accepted) require.Equal(test.expectedFinalized, finalized) }) diff --git a/snow/consensus/snowman/bootstrapper/noop_test.go b/snow/consensus/snowman/bootstrapper/noop_test.go index 7c2d52f71778..f79de80232bc 100644 --- a/snow/consensus/snowman/bootstrapper/noop_test.go +++ b/snow/consensus/snowman/bootstrapper/noop_test.go @@ -4,7 +4,6 @@ package bootstrapper import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -13,11 +12,11 @@ import ( func TestNoop(t *testing.T) { require := require.New(t) - require.Empty(Noop.GetPeers(context.Background())) + require.Empty(Noop.GetPeers(t.Context())) - require.NoError(Noop.RecordOpinion(context.Background(), nodeID0, nil)) + require.NoError(Noop.RecordOpinion(t.Context(), nodeID0, nil)) - blkIDs, finalized := Noop.Result(context.Background()) + blkIDs, finalized := Noop.Result(t.Context()) require.Empty(blkIDs) require.False(finalized) } diff --git a/snow/consensus/snowman/consensus_test.go b/snow/consensus/snowman/consensus_test.go index 4f659beb1389..a4493f5265bb 100644 --- a/snow/consensus/snowman/consensus_test.go +++ b/snow/consensus/snowman/consensus_test.go @@ -4,7 +4,6 @@ package snowman import ( - "context" "errors" "path" "reflect" @@ -141,7 +140,7 @@ func NumProcessingTest(t *testing.T, factory Factory) { require.Equal(1, sm.NumProcessing()) votes := bag.Of(block.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Zero(sm.NumProcessing()) } @@ -323,7 +322,7 @@ func StatusOrProcessingPreviouslyRejectedTest(t *testing.T, factory Factory) { )) block := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(block.Reject(context.Background())) + require.NoError(block.Reject(t.Context())) require.Equal(snowtest.Rejected, block.Status) require.False(sm.Processing(block.ID())) @@ -435,12 +434,12 @@ func RecordPollAcceptSingleBlockTest(t *testing.T, factory Factory) { require.NoError(sm.Add(block)) votes := bag.Of(block.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Equal(block.ID(), sm.Preference()) require.Equal(1, sm.NumProcessing()) require.Equal(snowtest.Undecided, block.Status) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Equal(block.ID(), sm.Preference()) require.Zero(sm.NumProcessing()) require.Equal(snowtest.Accepted, block.Status) @@ -479,13 +478,13 @@ func RecordPollAcceptAndRejectTest(t *testing.T, factory Factory) { votes := bag.Of(firstBlock.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Equal(firstBlock.ID(), sm.Preference()) require.Equal(2, sm.NumProcessing()) require.Equal(snowtest.Undecided, firstBlock.Status) require.Equal(snowtest.Undecided, secondBlock.Status) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Equal(firstBlock.ID(), sm.Preference()) require.Zero(sm.NumProcessing()) require.Equal(snowtest.Accepted, firstBlock.Status) @@ -532,7 +531,7 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { votes := bag.Of(firstBlock.ID(), secondBlock.ID()) // The first poll will accept shared bits - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Equal(firstBlock.ID(), sm.Preference()) require.Equal(2, sm.NumProcessing()) @@ -541,7 +540,7 @@ func RecordPollSplitVoteNoChangeTest(t *testing.T, factory Factory) { require.Equal(float64(1), metrics["polls_successful"]) // The second poll will do nothing - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Equal(firstBlock.ID(), sm.Preference()) require.Equal(2, sm.NumProcessing()) @@ -576,7 +575,7 @@ func RecordPollWhenFinalizedTest(t *testing.T, factory Factory) { )) votes := bag.Of(snowmantest.GenesisID) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Zero(sm.NumProcessing()) require.Equal(snowmantest.GenesisID, sm.Preference()) } @@ -623,7 +622,7 @@ func RecordPollRejectTransitivelyTest(t *testing.T, factory Factory) { // Tail = 0 votes := bag.Of(block0.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) // Current graph structure: // 0 @@ -679,25 +678,25 @@ func RecordPollTransitivelyResetConfidenceTest(t *testing.T, factory Factory) { // 2 3 votesFor2 := bag.Of(block2.ID()) - require.NoError(sm.RecordPoll(context.Background(), votesFor2)) + require.NoError(sm.RecordPoll(t.Context(), votesFor2)) require.Equal(4, sm.NumProcessing()) require.Equal(block2.ID(), sm.Preference()) emptyVotes := bag.Bag[ids.ID]{} - require.NoError(sm.RecordPoll(context.Background(), emptyVotes)) + require.NoError(sm.RecordPoll(t.Context(), emptyVotes)) require.Equal(4, sm.NumProcessing()) require.Equal(block2.ID(), sm.Preference()) - require.NoError(sm.RecordPoll(context.Background(), votesFor2)) + require.NoError(sm.RecordPoll(t.Context(), votesFor2)) require.Equal(4, sm.NumProcessing()) require.Equal(block2.ID(), sm.Preference()) votesFor3 := bag.Of(block3.ID()) - require.NoError(sm.RecordPoll(context.Background(), votesFor3)) + require.NoError(sm.RecordPoll(t.Context(), votesFor3)) require.Equal(2, sm.NumProcessing()) require.Equal(block3.ID(), sm.Preference()) - require.NoError(sm.RecordPoll(context.Background(), votesFor3)) + require.NoError(sm.RecordPoll(t.Context(), votesFor3)) require.Zero(sm.NumProcessing()) require.Equal(block3.ID(), sm.Preference()) require.Equal(snowtest.Rejected, block0.Status) @@ -737,11 +736,11 @@ func RecordPollInvalidVoteTest(t *testing.T, factory Factory) { require.NoError(sm.Add(block)) validVotes := bag.Of(block.ID()) - require.NoError(sm.RecordPoll(context.Background(), validVotes)) + require.NoError(sm.RecordPoll(t.Context(), validVotes)) invalidVotes := bag.Of(unknownBlockID) - require.NoError(sm.RecordPoll(context.Background(), invalidVotes)) - require.NoError(sm.RecordPoll(context.Background(), validVotes)) + require.NoError(sm.RecordPoll(t.Context(), invalidVotes)) + require.NoError(sm.RecordPoll(t.Context(), validVotes)) require.Equal(1, sm.NumProcessing()) require.Equal(block.ID(), sm.Preference()) } @@ -794,7 +793,7 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { // Tail = 2 votes0_2_4 := bag.Of(block0.ID(), block2.ID(), block4.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes0_2_4)) + require.NoError(sm.RecordPoll(t.Context(), votes0_2_4)) // Current graph structure: // 0 @@ -813,7 +812,7 @@ func RecordPollTransitiveVotingTest(t *testing.T, factory Factory) { require.Equal(snowtest.Undecided, block4.Status) dep2_2_2 := bag.Of(block2.ID(), block2.ID(), block2.ID()) - require.NoError(sm.RecordPoll(context.Background(), dep2_2_2)) + require.NoError(sm.RecordPoll(t.Context(), dep2_2_2)) // Current graph structure: // 2 @@ -886,7 +885,7 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact // the following bits have been decided to follow the 254 remaining bits of // [block0]. votes0 := bag.Of(block0.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes0)) + require.NoError(sm.RecordPoll(t.Context(), votes0)) // Although we are adding in [block2] here - the underlying snowball // instance has already decided it is rejected. Snowman doesn't actually @@ -921,7 +920,7 @@ func RecordPollDivergedVotingWithNoConflictingBitTest(t *testing.T, factory Fact // only be marked as accepted after [block2] is marked as accepted; which // will never happen. votes3 := bag.Of(block3.ID()) - require.NoError(sm.RecordPoll(context.Background(), votes3)) + require.NoError(sm.RecordPoll(t.Context(), votes3)) require.Equal(4, sm.NumProcessing()) require.Equal(snowtest.Undecided, block0.Status) @@ -981,7 +980,7 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { require.Equal(a2Block.ID(), pref) b2Votes := bag.Of(b2Block.ID()) - require.NoError(sm.RecordPoll(context.Background(), b2Votes)) + require.NoError(sm.RecordPoll(t.Context(), b2Votes)) require.Equal(b2Block.ID(), sm.Preference()) require.False(sm.IsPreferred(a1Block.ID())) @@ -998,8 +997,8 @@ func RecordPollChangePreferredChainTest(t *testing.T, factory Factory) { require.Equal(b2Block.ID(), pref) a1Votes := bag.Of(a1Block.ID()) - require.NoError(sm.RecordPoll(context.Background(), a1Votes)) - require.NoError(sm.RecordPoll(context.Background(), a1Votes)) + require.NoError(sm.RecordPoll(t.Context(), a1Votes)) + require.NoError(sm.RecordPoll(t.Context(), a1Votes)) require.Equal(a2Block.ID(), sm.Preference()) require.True(sm.IsPreferred(a1Block.ID())) @@ -1058,31 +1057,31 @@ func LastAcceptedTest(t *testing.T, factory Factory) { require.Equal(snowmantest.GenesisID, lastAcceptedID) require.Equal(snowmantest.GenesisHeight, lastAcceptedHeight) - require.NoError(sm.RecordPoll(context.Background(), bag.Of(block0.IDV))) + require.NoError(sm.RecordPoll(t.Context(), bag.Of(block0.IDV))) lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() require.Equal(snowmantest.GenesisID, lastAcceptedID) require.Equal(snowmantest.GenesisHeight, lastAcceptedHeight) - require.NoError(sm.RecordPoll(context.Background(), bag.Of(block1.IDV))) + require.NoError(sm.RecordPoll(t.Context(), bag.Of(block1.IDV))) lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() require.Equal(block0.IDV, lastAcceptedID) require.Equal(block0.HeightV, lastAcceptedHeight) - require.NoError(sm.RecordPoll(context.Background(), bag.Of(block1.IDV))) + require.NoError(sm.RecordPoll(t.Context(), bag.Of(block1.IDV))) lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() require.Equal(block1.IDV, lastAcceptedID) require.Equal(block1.HeightV, lastAcceptedHeight) - require.NoError(sm.RecordPoll(context.Background(), bag.Of(block2.IDV))) + require.NoError(sm.RecordPoll(t.Context(), bag.Of(block2.IDV))) lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() require.Equal(block1.IDV, lastAcceptedID) require.Equal(block1.HeightV, lastAcceptedHeight) - require.NoError(sm.RecordPoll(context.Background(), bag.Of(block2.IDV))) + require.NoError(sm.RecordPoll(t.Context(), bag.Of(block2.IDV))) lastAcceptedID, lastAcceptedHeight = sm.LastAccepted() require.Equal(block2.IDV, lastAcceptedID) @@ -1223,7 +1222,7 @@ func ErrorOnAcceptTest(t *testing.T, factory Factory) { require.NoError(sm.Add(block)) votes := bag.Of(block.ID()) - err := sm.RecordPoll(context.Background(), votes) + err := sm.RecordPoll(t.Context(), votes) require.ErrorIs(err, errTest) } @@ -1261,7 +1260,7 @@ func ErrorOnRejectSiblingTest(t *testing.T, factory Factory) { require.NoError(sm.Add(block1)) votes := bag.Of(block0.ID()) - err := sm.RecordPoll(context.Background(), votes) + err := sm.RecordPoll(t.Context(), votes) require.ErrorIs(err, errTest) } @@ -1301,7 +1300,7 @@ func ErrorOnTransitiveRejectionTest(t *testing.T, factory Factory) { require.NoError(sm.Add(block2)) votes := bag.Of(block0.ID()) - err := sm.RecordPoll(context.Background(), votes) + err := sm.RecordPoll(t.Context(), votes) require.ErrorIs(err, errTest) } @@ -1420,7 +1419,7 @@ func RecordPollWithDefaultParameters(t *testing.T, factory Factory) { for i := 0; i < params.Beta; i++ { // should not finalize with less than beta rounds require.Equal(2, sm.NumProcessing()) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) } require.Zero(sm.NumProcessing()) } @@ -1464,7 +1463,7 @@ func RecordPollRegressionCalculateInDegreeIndegreeCalculation(t *testing.T, fact votes := bag.Bag[ids.ID]{} votes.AddCount(blk2.ID(), 1) votes.AddCount(blk3.ID(), 2) - require.NoError(sm.RecordPoll(context.Background(), votes)) + require.NoError(sm.RecordPoll(t.Context(), votes)) require.Equal(snowtest.Accepted, blk1.Status) require.Equal(snowtest.Accepted, blk2.Status) require.Equal(snowtest.Accepted, blk3.Status) diff --git a/snow/engine/avalanche/bootstrap/bootstrapper_test.go b/snow/engine/avalanche/bootstrap/bootstrapper_test.go index aebd22b21e42..f169bffd0bd9 100644 --- a/snow/engine/avalanche/bootstrap/bootstrapper_test.go +++ b/snow/engine/avalanche/bootstrap/bootstrapper_test.go @@ -221,7 +221,7 @@ func TestBootstrapperSingleFrontier(t *testing.T) { } vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) require.Equal(choices.Accepted, vtx0.Status()) require.Equal(choices.Accepted, vtx1.Status()) @@ -325,12 +325,12 @@ func TestBootstrapperByzantineResponses(t *testing.T) { } vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) // should request vtx0 + require.NoError(bs.Start(t.Context(), 0)) // should request vtx0 require.Equal(vtxID0, reqVtxID) oldReqID := *requestID - require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{vtxBytes2})) // send unexpected vertex - require.NotEqual(oldReqID, *requestID) // should have sent a new request + require.NoError(bs.Ancestors(t.Context(), peerID, *requestID, [][]byte{vtxBytes2})) // send unexpected vertex + require.NotEqual(oldReqID, *requestID) // should have sent a new request oldReqID = *requestID manager.GetVtxF = func(_ context.Context, vtxID ids.ID) (avalanche.Vertex, error) { @@ -359,8 +359,8 @@ func TestBootstrapperByzantineResponses(t *testing.T) { return nil } - require.NoError(bs.Ancestors(context.Background(), peerID, *requestID, [][]byte{vtxBytes0, vtxBytes2})) // send expected vertex and vertex that should not be accepted - require.Equal(oldReqID, *requestID) // shouldn't have sent a new request + require.NoError(bs.Ancestors(t.Context(), peerID, *requestID, [][]byte{vtxBytes0, vtxBytes2})) // send expected vertex and vertex that should not be accepted + require.Equal(oldReqID, *requestID) // shouldn't have sent a new request require.Equal(snow.NormalOp, config.Ctx.State.Get().State) require.Equal(choices.Accepted, vtx0.Status()) require.Equal(choices.Accepted, vtx1.Status()) @@ -484,7 +484,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { } vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) manager.ParseVtxF = func(_ context.Context, vtxBytes []byte) (avalanche.Vertex, error) { switch { @@ -513,7 +513,7 @@ func TestBootstrapperTxDependencies(t *testing.T) { return nil } - require.NoError(bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0})) + require.NoError(bs.Ancestors(t.Context(), peerID, *reqIDPtr, [][]byte{vtxBytes0})) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) require.Equal(choices.Accepted, tx0.Status()) require.Equal(choices.Accepted, tx1.Status()) @@ -615,10 +615,10 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { } vm.CantSetState = false - require.NoError(bs.Start(context.Background(), 0)) // should request vtx1 + require.NoError(bs.Start(t.Context(), 0)) // should request vtx1 require.Equal(vtxID1, requested) - require.NoError(bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes1})) // Provide vtx1; should request vtx0 + require.NoError(bs.Ancestors(t.Context(), peerID, *reqIDPtr, [][]byte{vtxBytes1})) // Provide vtx1; should request vtx0 require.Equal(snow.Bootstrapping, bs.Context().State.Get().State) require.Equal(vtxID0, requested) @@ -636,7 +636,7 @@ func TestBootstrapperIncompleteAncestors(t *testing.T) { return nil } - require.NoError(bs.Ancestors(context.Background(), peerID, *reqIDPtr, [][]byte{vtxBytes0})) // Provide vtx0; can finish now + require.NoError(bs.Ancestors(t.Context(), peerID, *reqIDPtr, [][]byte{vtxBytes0})) // Provide vtx0; can finish now require.Equal(snow.NormalOp, bs.Context().State.Get().State) require.Equal(choices.Accepted, vtx0.Status()) require.Equal(choices.Accepted, vtx1.Status()) diff --git a/snow/engine/avalanche/bootstrap/queue/jobs_test.go b/snow/engine/avalanche/bootstrap/queue/jobs_test.go index e73922de3698..d84fb38af1e8 100644 --- a/snow/engine/avalanche/bootstrap/queue/jobs_test.go +++ b/snow/engine/avalanche/bootstrap/queue/jobs_test.go @@ -88,7 +88,7 @@ func TestPushAndExecute(t *testing.T) { require.NoError(err) require.False(has) - pushed, err := jobs.Push(context.Background(), job) + pushed, err := jobs.Push(t.Context(), job) require.True(pushed) require.NoError(err) @@ -116,7 +116,7 @@ func TestPushAndExecute(t *testing.T) { } snowCtx := snowtest.Context(t, snowtest.CChainID) - count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) + count, err := jobs.ExecuteAll(t.Context(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(1, count) @@ -154,7 +154,7 @@ func TestRemoveDependency(t *testing.T) { return []byte{1} } - pushed, err := jobs.Push(context.Background(), job1) + pushed, err := jobs.Push(t.Context(), job1) require.True(pushed) require.NoError(err) @@ -162,7 +162,7 @@ func TestRemoveDependency(t *testing.T) { require.NoError(err) require.False(hasNext) - pushed, err = jobs.Push(context.Background(), job0) + pushed, err = jobs.Push(t.Context(), job0) require.True(pushed) require.NoError(err) @@ -183,7 +183,7 @@ func TestRemoveDependency(t *testing.T) { } snowCtx := snowtest.Context(t, snowtest.CChainID) - count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) + count, err := jobs.ExecuteAll(t.Context(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(2, count) require.True(executed0) @@ -210,11 +210,11 @@ func TestDuplicatedExecutablePush(t *testing.T) { jobID := ids.GenerateTestID() job := testJob(t, jobID, nil, ids.Empty, nil) - pushed, err := jobs.Push(context.Background(), job) + pushed, err := jobs.Push(t.Context(), job) require.True(pushed) require.NoError(err) - pushed, err = jobs.Push(context.Background(), job) + pushed, err = jobs.Push(t.Context(), job) require.False(pushed) require.NoError(err) @@ -223,7 +223,7 @@ func TestDuplicatedExecutablePush(t *testing.T) { jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) - pushed, err = jobs.Push(context.Background(), job) + pushed, err = jobs.Push(t.Context(), job) require.False(pushed) require.NoError(err) } @@ -241,11 +241,11 @@ func TestDuplicatedNotExecutablePush(t *testing.T) { job1ID := ids.GenerateTestID() job1 := testJob(t, job1ID, nil, job0ID, &executed0) - pushed, err := jobs.Push(context.Background(), job1) + pushed, err := jobs.Push(t.Context(), job1) require.True(pushed) require.NoError(err) - pushed, err = jobs.Push(context.Background(), job1) + pushed, err = jobs.Push(t.Context(), job1) require.False(pushed) require.NoError(err) @@ -254,7 +254,7 @@ func TestDuplicatedNotExecutablePush(t *testing.T) { jobs, err = New(db, "", prometheus.NewRegistry()) require.NoError(err) - pushed, err = jobs.Push(context.Background(), job1) + pushed, err = jobs.Push(t.Context(), job1) require.False(pushed) require.NoError(err) } @@ -267,7 +267,7 @@ func TestMissingJobs(t *testing.T) { jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - require.NoError(jobs.SetParser(context.Background(), parser)) + require.NoError(jobs.SetParser(t.Context(), parser)) job0ID := ids.GenerateTestID() job1ID := ids.GenerateTestID() @@ -294,7 +294,7 @@ func TestMissingJobs(t *testing.T) { jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - require.NoError(jobs.SetParser(context.Background(), parser)) + require.NoError(jobs.SetParser(t.Context(), parser)) missingIDSet = set.Of(jobs.MissingIDs()...) @@ -313,7 +313,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - require.NoError(jobs.SetParser(context.Background(), parser)) + require.NoError(jobs.SetParser(t.Context(), parser)) job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false @@ -328,7 +328,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { return []byte{1} } - pushed, err := jobs.Push(context.Background(), job1) + pushed, err := jobs.Push(t.Context(), job1) require.True(pushed) require.NoError(err) @@ -336,7 +336,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.NoError(err) require.False(hasNext) - pushed, err = jobs.Push(context.Background(), job0) + pushed, err = jobs.Push(t.Context(), job0) require.True(pushed) require.NoError(err) @@ -357,7 +357,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { } snowCtx := snowtest.Context(t, snowtest.CChainID) - _, err = jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) + _, err = jobs.ExecuteAll(t.Context(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) // Assert that the database closed error on job1 causes ExecuteAll // to fail in the middle of execution. require.ErrorIs(err, database.ErrClosed) @@ -374,14 +374,14 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { // recovers correctly. jobs, err = NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - require.NoError(jobs.SetParser(context.Background(), parser)) + require.NoError(jobs.SetParser(t.Context(), parser)) missingIDs := jobs.MissingIDs() require.Len(missingIDs, 1) require.Equal(missingIDs[0], job0.ID()) - pushed, err = jobs.Push(context.Background(), job0) + pushed, err = jobs.Push(t.Context(), job0) require.NoError(err) require.True(pushed) @@ -389,7 +389,7 @@ func TestHandleJobWithMissingDependencyOnRunnableStack(t *testing.T) { require.NoError(err) require.True(hasNext) - count, err := jobs.ExecuteAll(context.Background(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) + count, err := jobs.ExecuteAll(t.Context(), snowtest.ConsensusContext(snowCtx), &common.Halter{}, false) require.NoError(err) require.Equal(2, count) require.True(executed1) @@ -403,7 +403,7 @@ func TestInitializeNumJobs(t *testing.T) { jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - require.NoError(jobs.SetParser(context.Background(), parser)) + require.NoError(jobs.SetParser(t.Context(), parser)) job0ID := ids.GenerateTestID() job1ID := ids.GenerateTestID() @@ -441,12 +441,12 @@ func TestInitializeNumJobs(t *testing.T) { }, } - pushed, err := jobs.Push(context.Background(), job0) + pushed, err := jobs.Push(t.Context(), job0) require.True(pushed) require.NoError(err) require.Equal(uint64(1), jobs.state.numJobs) - pushed, err = jobs.Push(context.Background(), job1) + pushed, err = jobs.Push(t.Context(), job1) require.True(pushed) require.NoError(err) require.Equal(uint64(2), jobs.state.numJobs) @@ -468,7 +468,7 @@ func TestClearAll(t *testing.T) { jobs, err := NewWithMissing(db, "", prometheus.NewRegistry()) require.NoError(err) - require.NoError(jobs.SetParser(context.Background(), parser)) + require.NoError(jobs.SetParser(t.Context(), parser)) job0ID, executed0 := ids.GenerateTestID(), false job1ID, executed1 := ids.GenerateTestID(), false job0 := testJob(t, job0ID, &executed0, ids.Empty, nil) @@ -477,11 +477,11 @@ func TestClearAll(t *testing.T) { return []byte{1} } - pushed, err := jobs.Push(context.Background(), job0) + pushed, err := jobs.Push(t.Context(), job0) require.NoError(err) require.True(pushed) - pushed, err = jobs.Push(context.Background(), job1) + pushed, err = jobs.Push(t.Context(), job1) require.True(pushed) require.NoError(err) diff --git a/snow/engine/avalanche/state/unique_vertex_test.go b/snow/engine/avalanche/state/unique_vertex_test.go index d6df15d95c59..ec9294283f7c 100644 --- a/snow/engine/avalanche/state/unique_vertex_test.go +++ b/snow/engine/avalanche/state/unique_vertex_test.go @@ -60,7 +60,7 @@ func TestUnknownUniqueVertexErrors(t *testing.T) { _, err = uVtx.Height() require.ErrorIs(err, errGetHeight) - _, err = uVtx.Txs(context.Background()) + _, err = uVtx.Txs(t.Context()) require.ErrorIs(err, errGetTxs) } @@ -92,7 +92,7 @@ func TestUniqueVertexCacheHit(t *testing.T) { id: id, serializer: s, } - require.NoError(uVtx.setVertex(context.Background(), vtx)) + require.NoError(uVtx.setVertex(t.Context(), vtx)) newUVtx := &uniqueVertex{ id: id, @@ -108,7 +108,7 @@ func TestUniqueVertexCacheHit(t *testing.T) { require.NoError(err) require.Equal(height, newHeight) - txs, err := newUVtx.Txs(context.Background()) + txs, err := newUVtx.Txs(t.Context()) require.NoError(err) require.Len(txs, 1) require.Equal(testTx, txs[0]) @@ -149,7 +149,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { s := newTestSerializer(t, parseTx) uvtxParent := newTestUniqueVertex(t, s, nil, [][]byte{txBytesParent}, false) - require.NoError(uvtxParent.Accept(context.Background())) + require.NoError(uvtxParent.Accept(t.Context())) parentID := uvtxParent.ID() parentIDs := []ids.ID{parentID} @@ -174,7 +174,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { require.Equal(choices.Unknown, uVtx.Status()) // Register cache hit - vtx, err := newUniqueVertex(context.Background(), s, vtxBytes) + vtx, err := newUniqueVertex(t.Context(), s, vtxBytes) require.NoError(err) require.Equal(choices.Processing, vtx.Status()) @@ -196,7 +196,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { require.NoError(err) require.Equal(height, vtxHeight) - vtxTxs, err := vtx.Txs(context.Background()) + vtxTxs, err := vtx.Txs(t.Context()) require.NoError(err) require.Len(vtxTxs, 1) require.Equal(txBytes, vtxTxs[0].Bytes()) @@ -212,7 +212,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { validateVertex(vtx, choices.Processing) // Check that a newly parsed vertex refreshed from the cache is valid - vtx, err = newUniqueVertex(context.Background(), s, vtxBytes) + vtx, err = newUniqueVertex(t.Context(), s, vtxBytes) require.NoError(err) validateVertex(vtx, choices.Processing) @@ -227,7 +227,7 @@ func TestUniqueVertexCacheMiss(t *testing.T) { validateVertex(vtx, choices.Processing) s.state.uniqueVtx.Flush() - vtx, err = newUniqueVertex(context.Background(), s, vtxBytes) + vtx, err = newUniqueVertex(t.Context(), s, vtxBytes) require.NoError(err) validateVertex(vtx, choices.Processing) } @@ -251,7 +251,7 @@ func TestParseVertexWithIncorrectChainID(t *testing.T) { return nil, errUnknownTx }) - _, err = s.ParseVtx(context.Background(), vtxBytes) + _, err = s.ParseVtx(t.Context(), vtxBytes) require.ErrorIs(err, errWrongChainID) } @@ -276,14 +276,14 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { require.NoError(err) vtxBytes := statelessVertex.Bytes() - _, err = s.ParseVtx(context.Background(), vtxBytes) + _, err = s.ParseVtx(t.Context(), vtxBytes) require.ErrorIs(err, errUnknownTx) - _, err = s.ParseVtx(context.Background(), vtxBytes) + _, err = s.ParseVtx(t.Context(), vtxBytes) require.ErrorIs(err, errUnknownTx) id := hashing.ComputeHash256Array(vtxBytes) - _, err = s.GetVtx(context.Background(), id) + _, err = s.GetVtx(t.Context(), id) require.ErrorIs(err, errUnknownVertex) childStatelessVertex, err := vertex.Build( // regular, non-stop vertex @@ -295,7 +295,7 @@ func TestParseVertexWithInvalidTxs(t *testing.T) { require.NoError(err) childVtxBytes := childStatelessVertex.Bytes() - childVtx, err := s.ParseVtx(context.Background(), childVtxBytes) + childVtx, err := s.ParseVtx(t.Context(), childVtxBytes) require.NoError(err) parents, err := childVtx.Parents() @@ -334,7 +334,7 @@ func newTestUniqueVertex( ) } require.NoError(err) - uvtx, err := newUniqueVertex(context.Background(), s, vtx.Bytes()) + uvtx, err := newUniqueVertex(t.Context(), s, vtx.Bytes()) require.NoError(err) return uvtx } diff --git a/snow/engine/common/tracker/peers_test.go b/snow/engine/common/tracker/peers_test.go index 05d8b79ab06d..a8a6ce46d202 100644 --- a/snow/engine/common/tracker/peers_test.go +++ b/snow/engine/common/tracker/peers_test.go @@ -4,7 +4,6 @@ package tracker import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -25,7 +24,7 @@ func TestPeers(t *testing.T) { p.OnValidatorAdded(nodeID, nil, ids.Empty, 5) require.Zero(p.ConnectedWeight()) - require.NoError(p.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(p.Connected(t.Context(), nodeID, version.CurrentApp)) require.Equal(uint64(5), p.ConnectedWeight()) p.OnValidatorWeightChanged(nodeID, 5, 10) @@ -37,6 +36,6 @@ func TestPeers(t *testing.T) { p.OnValidatorAdded(nodeID, nil, ids.Empty, 5) require.Equal(uint64(5), p.ConnectedWeight()) - require.NoError(p.Disconnected(context.Background(), nodeID)) + require.NoError(p.Disconnected(t.Context(), nodeID)) require.Zero(p.ConnectedWeight()) } diff --git a/snow/engine/snowman/block/batched_vm_test.go b/snow/engine/snowman/block/batched_vm_test.go index 3484ee4a514a..9e72000791ce 100644 --- a/snow/engine/snowman/block/batched_vm_test.go +++ b/snow/engine/snowman/block/batched_vm_test.go @@ -31,7 +31,7 @@ func TestGetAncestorsDatabaseNotFound(t *testing.T) { require.Equal(someID, id) return nil, database.ErrNotFound } - containers, err := GetAncestors(context.Background(), logging.NoLog{}, vm, someID, 10, 10, 1*time.Second) + containers, err := GetAncestors(t.Context(), logging.NoLog{}, vm, someID, 10, 10, 1*time.Second) require.NoError(err) require.Empty(containers) } @@ -47,7 +47,7 @@ func TestGetAncestorsPropagatesErrors(t *testing.T) { require.Equal(someID, id) return nil, errTest } - containers, err := GetAncestors(context.Background(), logging.NoLog{}, vm, someID, 10, 10, 1*time.Second) + containers, err := GetAncestors(t.Context(), logging.NoLog{}, vm, someID, 10, 10, 1*time.Second) require.Nil(containers) require.ErrorIs(err, errTest) } diff --git a/snow/engine/snowman/block/notifier_test.go b/snow/engine/snowman/block/notifier_test.go index b37e1cff70ba..93fe3d687876 100644 --- a/snow/engine/snowman/block/notifier_test.go +++ b/snow/engine/snowman/block/notifier_test.go @@ -4,7 +4,6 @@ package block_test import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -39,7 +38,7 @@ func TestChangeNotifierStateSyncableVM(t *testing.T) { { name: "StateSyncEnabled", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.StateSyncEnabled(context.Background()) + _, err := n.StateSyncEnabled(t.Context()) require.NoError(t, err) }, vm: fullVM, @@ -47,7 +46,7 @@ func TestChangeNotifierStateSyncableVM(t *testing.T) { { name: "GetOngoingSyncStateSummary", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.GetOngoingSyncStateSummary(context.Background()) + _, err := n.GetOngoingSyncStateSummary(t.Context()) require.NoError(t, err) }, vm: fullVM, @@ -55,7 +54,7 @@ func TestChangeNotifierStateSyncableVM(t *testing.T) { { name: "GetLastStateSummary", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.GetLastStateSummary(context.Background()) + _, err := n.GetLastStateSummary(t.Context()) require.NoError(t, err) }, vm: fullVM, @@ -63,7 +62,7 @@ func TestChangeNotifierStateSyncableVM(t *testing.T) { { name: "ParseStateSummary", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.ParseStateSummary(context.Background(), []byte{}) + _, err := n.ParseStateSummary(t.Context(), []byte{}) require.NoError(t, err) }, vm: fullVM, @@ -71,7 +70,7 @@ func TestChangeNotifierStateSyncableVM(t *testing.T) { { name: "GetStateSummary", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.GetStateSummary(context.Background(), 0) + _, err := n.GetStateSummary(t.Context(), 0) require.NoError(t, err) }, vm: fullVM, @@ -79,7 +78,7 @@ func TestChangeNotifierStateSyncableVM(t *testing.T) { { name: "StateSyncEnabled-not-implemented", f: func(t *testing.T, n *block.ChangeNotifier) { - ok, err := n.StateSyncEnabled(context.Background()) + ok, err := n.StateSyncEnabled(t.Context()) require.NoError(t, err) require.False(t, ok, "expected StateSyncEnabled to return false") }, @@ -88,7 +87,7 @@ func TestChangeNotifierStateSyncableVM(t *testing.T) { { name: "GetOngoingSyncStateSummary-not-implemented", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.GetOngoingSyncStateSummary(context.Background()) + _, err := n.GetOngoingSyncStateSummary(t.Context()) require.ErrorIs(t, err, block.ErrStateSyncableVMNotImplemented) }, vm: vm, @@ -96,21 +95,21 @@ func TestChangeNotifierStateSyncableVM(t *testing.T) { { name: "GetLastStateSummary-not-implemented", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.GetLastStateSummary(context.Background()) + _, err := n.GetLastStateSummary(t.Context()) require.ErrorIs(t, err, block.ErrStateSyncableVMNotImplemented) }, }, { name: "ParseStateSummary-not-implemented", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.ParseStateSummary(context.Background(), []byte{}) + _, err := n.ParseStateSummary(t.Context(), []byte{}) require.ErrorIs(t, err, block.ErrStateSyncableVMNotImplemented) }, }, { name: "GetStateSummary-not-implemented", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.GetStateSummary(context.Background(), 0) + _, err := n.GetStateSummary(t.Context(), 0) require.ErrorIs(t, err, block.ErrStateSyncableVMNotImplemented) }, }, @@ -140,7 +139,7 @@ func TestChangeNotifierBatchedChainVM(t *testing.T) { { name: "BatchedParseBlock", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.BatchedParseBlock(context.Background(), [][]byte{}) + _, err := n.BatchedParseBlock(t.Context(), [][]byte{}) require.NoError(t, err) }, vm: fullVM, @@ -148,7 +147,7 @@ func TestChangeNotifierBatchedChainVM(t *testing.T) { { name: "GetAncestors", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.GetAncestors(context.Background(), ids.Empty, 0, 0, 0) + _, err := n.GetAncestors(t.Context(), ids.Empty, 0, 0, 0) require.NoError(t, err) }, vm: fullVM, @@ -156,7 +155,7 @@ func TestChangeNotifierBatchedChainVM(t *testing.T) { { name: "BatchedParseBlock-not-implemented", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.BatchedParseBlock(context.Background(), [][]byte{}) + _, err := n.BatchedParseBlock(t.Context(), [][]byte{}) require.ErrorIs(t, err, block.ErrRemoteVMNotImplemented) }, vm: vm, @@ -164,7 +163,7 @@ func TestChangeNotifierBatchedChainVM(t *testing.T) { { name: "GetAncestors-not-implemented", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.GetAncestors(context.Background(), ids.Empty, 0, 0, 0) + _, err := n.GetAncestors(t.Context(), ids.Empty, 0, 0, 0) require.ErrorIs(t, err, block.ErrRemoteVMNotImplemented) }, vm: vm, @@ -194,19 +193,19 @@ func TestChangeNotifierNormal(t *testing.T) { { name: "SetPreference", f: func(t *testing.T, n *block.ChangeNotifier) { - require.NoError(t, n.SetPreference(context.Background(), ids.Empty)) + require.NoError(t, n.SetPreference(t.Context(), ids.Empty)) }, }, { name: "SetState", f: func(t *testing.T, n *block.ChangeNotifier) { - require.NoError(t, n.SetState(context.Background(), snow.NormalOp)) + require.NoError(t, n.SetState(t.Context(), snow.NormalOp)) }, }, { name: "BuildBlock", f: func(t *testing.T, n *block.ChangeNotifier) { - _, err := n.BuildBlock(context.Background()) + _, err := n.BuildBlock(t.Context()) require.NoError(t, err) }, }, @@ -240,22 +239,22 @@ func TestChangeNotifierSetPreference(t *testing.T) { } // First time SetPreference is called, it should invoke OnChange - require.NoError(t, nf.SetPreference(context.Background(), ids.Empty), "expected SetPreference to succeed") + require.NoError(t, nf.SetPreference(t.Context(), ids.Empty), "expected SetPreference to succeed") require.True(t, invoked, "expected to have been invoked on first SetPreference call") invoked = false // Second time SetPreference is called with the same block ID, it should not invoke OnChange - require.NoError(t, nf.SetPreference(context.Background(), ids.Empty), "expected SetPreference to succeed on second call with same block ID") + require.NoError(t, nf.SetPreference(t.Context(), ids.Empty), "expected SetPreference to succeed on second call with same block ID") require.False(t, invoked, "expected not to have been invoked on second SetPreference call with same block ID") invoked = false // Third time SetPreference is called with a different block ID, it should invoke OnChange again testID := ids.GenerateTestID() - require.NoError(t, nf.SetPreference(context.Background(), testID), "expected SetPreference to succeed on third call with different block ID") + require.NoError(t, nf.SetPreference(t.Context(), testID), "expected SetPreference to succeed on third call with different block ID") require.True(t, invoked, "expected to have been invoked on third SetPreference call with different block ID") invoked = false // Fourth time SetPreference is called with the same block ID, it should not invoke OnChange - require.NoError(t, nf.SetPreference(context.Background(), testID), "expected SetPreference to succeed on fourth call with same block ID") + require.NoError(t, nf.SetPreference(t.Context(), testID), "expected SetPreference to succeed on fourth call with same block ID") require.False(t, invoked, "expected not to have been invoked on fourth SetPreference call with same block ID") } diff --git a/snow/engine/snowman/bootstrap/bootstrapper_test.go b/snow/engine/snowman/bootstrap/bootstrapper_test.go index 41750b37fd96..b3428fb3f6fa 100644 --- a/snow/engine/snowman/bootstrap/bootstrapper_test.go +++ b/snow/engine/snowman/bootstrap/bootstrapper_test.go @@ -74,7 +74,7 @@ func newConfig(t *testing.T) (Config, ids.NodeID, *enginetest.Sender, *blocktest startupTracker := tracker.NewStartup(tracker.NewPeers(), totalWeight/2+1) vdrs.RegisterSetCallbackListener(ctx.SubnetID, startupTracker) - require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) + require.NoError(startupTracker.Connected(t.Context(), peer, version.CurrentApp)) snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) @@ -191,7 +191,7 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { } // attempt starting bootstrapper with no stake connected. Bootstrapper should stall. - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) require.False(frontierRequested) // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. @@ -199,9 +199,9 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { require.NoError(peers.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) peerTracker.Connected(vdr0, version.CurrentApp) - require.NoError(bs.Connected(context.Background(), vdr0, version.CurrentApp)) + require.NoError(bs.Connected(t.Context(), vdr0, version.CurrentApp)) - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) require.False(frontierRequested) // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. @@ -209,7 +209,7 @@ func TestBootstrapperStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { require.NoError(peers.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) peerTracker.Connected(vdr, version.CurrentApp) - require.NoError(bs.Connected(context.Background(), vdr, version.CurrentApp)) + require.NoError(bs.Connected(t.Context(), vdr, version.CurrentApp)) require.True(frontierRequested) } @@ -235,9 +235,9 @@ func TestBootstrapperSingleFrontier(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[0:1]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[0:1]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -264,7 +264,7 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) var requestID uint32 sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { @@ -273,18 +273,18 @@ func TestBootstrapperUnknownByzantineResponse(t *testing.T) { requestID = reqID } - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:2]))) // should request blk1 + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[1:2]))) // should request blk1 oldReqID := requestID - require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks[0:1]))) // respond with wrong block + require.NoError(bs.Ancestors(t.Context(), peerID, requestID, blocksToBytes(blks[0:1]))) // respond with wrong block require.NotEqual(oldReqID, requestID) - require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks[1:2]))) + require.NoError(bs.Ancestors(t.Context(), peerID, requestID, blocksToBytes(blks[1:2]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) snowmantest.RequireStatusIs(require, snowtest.Accepted, blks...) - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:2]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[1:2]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -310,7 +310,7 @@ func TestBootstrapperPartialFetch(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) var ( requestID uint32 @@ -323,18 +323,18 @@ func TestBootstrapperPartialFetch(t *testing.T) { requested = blkID } - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) // should request blk3 + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[3:4]))) // should request blk3 require.Equal(blks[3].ID(), requested) - require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks[2:4]))) // respond with blk3 and blk2 + require.NoError(bs.Ancestors(t.Context(), peerID, requestID, blocksToBytes(blks[2:4]))) // respond with blk3 and blk2 require.Equal(blks[1].ID(), requested) - require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks[1:2]))) // respond with blk1 + require.NoError(bs.Ancestors(t.Context(), peerID, requestID, blocksToBytes(blks[1:2]))) // respond with blk1 require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) snowmantest.RequireStatusIs(require, snowtest.Accepted, blks...) - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[3:4]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -361,7 +361,7 @@ func TestBootstrapperEmptyResponse(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) var ( requestedNodeID ids.NodeID @@ -373,17 +373,17 @@ func TestBootstrapperEmptyResponse(t *testing.T) { requestID = reqID } - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:2]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[1:2]))) require.Equal(requestedNodeID, peerID) // Add another peer to allow a new node to be selected. A new node should be // sampled if the prior response was empty. bs.PeerTracker.Connected(ids.GenerateTestNodeID(), version.CurrentApp) - require.NoError(bs.Ancestors(context.Background(), requestedNodeID, requestID, nil)) // respond with empty + require.NoError(bs.Ancestors(t.Context(), requestedNodeID, requestID, nil)) // respond with empty require.NotEqual(requestedNodeID, peerID) - require.NoError(bs.Ancestors(context.Background(), requestedNodeID, requestID, blocksToBytes(blks[1:2]))) + require.NoError(bs.Ancestors(t.Context(), requestedNodeID, requestID, blocksToBytes(blks[1:2]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) snowmantest.RequireStatusIs(require, snowtest.Accepted, blks...) } @@ -410,7 +410,7 @@ func TestBootstrapperAncestors(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) var ( requestID uint32 @@ -423,15 +423,15 @@ func TestBootstrapperAncestors(t *testing.T) { requested = blkID } - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) // should request blk3 + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[3:4]))) // should request blk3 require.Equal(blks[3].ID(), requested) - require.NoError(bs.Ancestors(context.Background(), peerID, requestID, blocksToBytes(blks))) // respond with all the blocks + require.NoError(bs.Ancestors(t.Context(), peerID, requestID, blocksToBytes(blks))) // respond with all the blocks require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) snowmantest.RequireStatusIs(require, snowtest.Accepted, blks...) - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[3:4]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -456,7 +456,7 @@ func TestBootstrapperFinalized(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { @@ -464,17 +464,17 @@ func TestBootstrapperFinalized(t *testing.T) { requestIDs[blkID] = reqID } - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:3]))) // should request blk1 and blk2 + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[1:3]))) // should request blk1 and blk2 reqIDBlk2, ok := requestIDs[blks[2].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, blocksToBytes(blks[1:3]))) + require.NoError(bs.Ancestors(t.Context(), peerID, reqIDBlk2, blocksToBytes(blks[1:3]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) snowmantest.RequireStatusIs(require, snowtest.Accepted, blks...) - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[2:3]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[2:3]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -499,7 +499,7 @@ func TestRestartBootstrapping(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { @@ -507,12 +507,12 @@ func TestRestartBootstrapping(t *testing.T) { requestIDs[blkID] = reqID } - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[3:4]))) // should request blk3 + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[3:4]))) // should request blk3 reqID, ok := requestIDs[blks[3].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, blocksToBytes(blks[2:4]))) + require.NoError(bs.Ancestors(t.Context(), peerID, reqID, blocksToBytes(blks[2:4]))) require.Contains(requestIDs, blks[1].ID()) // Remove request, so we can restart bootstrapping via startSyncing @@ -520,23 +520,23 @@ func TestRestartBootstrapping(t *testing.T) { require.True(removed) clear(requestIDs) - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[4:5]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[4:5]))) blk1RequestID, ok := requestIDs[blks[1].ID()] require.True(ok) blk4RequestID, ok := requestIDs[blks[4].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, blk1RequestID, blocksToBytes(blks[1:2]))) + require.NoError(bs.Ancestors(t.Context(), peerID, blk1RequestID, blocksToBytes(blks[1:2]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) require.Equal(snowtest.Accepted, blks[0].Status) snowmantest.RequireStatusIs(require, snowtest.Undecided, blks[1:]...) - require.NoError(bs.Ancestors(context.Background(), peerID, blk4RequestID, blocksToBytes(blks[4:5]))) + require.NoError(bs.Ancestors(t.Context(), peerID, blk4RequestID, blocksToBytes(blks[4:5]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) snowmantest.RequireStatusIs(require, snowtest.Accepted, blks...) - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[4:5]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[4:5]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) } @@ -549,7 +549,7 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { initializeVMWithBlockchain(vm, blks) blks[0].Status = snowtest.Undecided - require.NoError(blks[1].Accept(context.Background())) + require.NoError(blks[1].Accept(t.Context())) bs, err := New( config, @@ -564,7 +564,7 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { @@ -573,12 +573,12 @@ func TestBootstrapOldBlockAfterStateSync(t *testing.T) { } // Force Accept, the already transitively accepted, blk0 - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[0:1]))) // should request blk0 + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[0:1]))) // should request blk0 reqID, ok := requestIDs[blks[0].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, reqID, blocksToBytes(blks[0:1]))) + require.NoError(bs.Ancestors(t.Context(), peerID, reqID, blocksToBytes(blks[0:1]))) require.Equal(snow.NormalOp, config.Ctx.State.Get().State) require.Equal(snowtest.Undecided, blks[0].Status) require.Equal(snowtest.Accepted, blks[1].Status) @@ -611,9 +611,9 @@ func TestBootstrapContinueAfterHalt(t *testing.T) { return getBlockF(ctx, blkID) } - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:2]))) + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[1:2]))) require.Equal(1, bs.missingBlockIDs.Len()) } @@ -653,7 +653,7 @@ func TestBootstrapNoParseOnNew(t *testing.T) { require.NoError(err) startupTracker := tracker.NewStartup(tracker.NewPeers(), totalWeight/2+1) peers.RegisterSetCallbackListener(ctx.SubnetID, startupTracker) - require.NoError(startupTracker.Connected(context.Background(), peer, version.CurrentApp)) + require.NoError(startupTracker.Connected(t.Context(), peer, version.CurrentApp)) snowGetHandler, err := getter.New(vm, sender, ctx.Log, time.Second, 2000, ctx.Registerer) require.NoError(err) @@ -733,7 +733,7 @@ func TestBootstrapperReceiveStaleAncestorsMessage(t *testing.T) { require.NoError(err) bs.TimeoutRegistrar = &enginetest.Timer{} - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) requestIDs := map[ids.ID]uint32{} sender.SendGetAncestorsF = func(_ context.Context, nodeID ids.NodeID, reqID uint32, blkID ids.ID) { @@ -741,18 +741,18 @@ func TestBootstrapperReceiveStaleAncestorsMessage(t *testing.T) { requestIDs[blkID] = reqID } - require.NoError(bs.startSyncing(context.Background(), blocksToIDs(blks[1:3]))) // should request blk1 and blk2 + require.NoError(bs.startSyncing(t.Context(), blocksToIDs(blks[1:3]))) // should request blk1 and blk2 reqIDBlk1, ok := requestIDs[blks[1].ID()] require.True(ok) reqIDBlk2, ok := requestIDs[blks[2].ID()] require.True(ok) - require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk2, blocksToBytes(blks[1:3]))) + require.NoError(bs.Ancestors(t.Context(), peerID, reqIDBlk2, blocksToBytes(blks[1:3]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) snowmantest.RequireStatusIs(require, snowtest.Accepted, blks...) - require.NoError(bs.Ancestors(context.Background(), peerID, reqIDBlk1, blocksToBytes(blks[1:2]))) + require.NoError(bs.Ancestors(t.Context(), peerID, reqIDBlk1, blocksToBytes(blks[1:2]))) require.Equal(snow.Bootstrapping, config.Ctx.State.Get().State) } @@ -784,7 +784,7 @@ func TestBootstrapperRollbackOnSetState(t *testing.T) { return nil } - require.NoError(bs.Start(context.Background(), 0)) + require.NoError(bs.Start(t.Context(), 0)) require.Equal(blks[0].HeightV, bs.startingHeight) } diff --git a/snow/engine/snowman/bootstrap/storage_test.go b/snow/engine/snowman/bootstrap/storage_test.go index 8c95360706f1..6974d75f6f22 100644 --- a/snow/engine/snowman/bootstrap/storage_test.go +++ b/snow/engine/snowman/bootstrap/storage_test.go @@ -91,7 +91,7 @@ func TestGetMissingBlockIDs(t *testing.T) { } missingBlockIDs, err := getMissingBlockIDs( - context.Background(), + t.Context(), db, parser, tree, @@ -268,7 +268,7 @@ func TestExecute(t *testing.T) { } require.NoError(execute( - context.Background(), + t.Context(), test.haltable.Halted, logging.NoLog{}.Info, db, diff --git a/snow/engine/snowman/engine_test.go b/snow/engine/snowman/engine_test.go index c013d5bc355c..f448cb9b16ff 100644 --- a/snow/engine/snowman/engine_test.go +++ b/snow/engine/snowman/engine_test.go @@ -74,7 +74,7 @@ func setup(t *testing.T, config Config) (ids.NodeID, validators.Manager, *engine vdr := ids.GenerateTestNodeID() require.NoError(config.Validators.AddStaker(config.Ctx.SubnetID, vdr, nil, ids.Empty, 1)) - require.NoError(config.ConnectedValidators.Connected(context.Background(), vdr, version.CurrentApp)) + require.NoError(config.ConnectedValidators.Connected(t.Context(), vdr, version.CurrentApp)) config.Validators.RegisterSetCallbackListener(config.Ctx.SubnetID, config.ConnectedValidators) sender := &enginetest.Sender{T: t} @@ -115,7 +115,7 @@ func setup(t *testing.T, config Config) (ids.NodeID, validators.Manager, *engine te, err := New(config) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -155,7 +155,7 @@ func TestEngineDropsAttemptToIssueBlockAfterFailedRequest(t *testing.T) { // Attempting to add [child] will cause [parent] to be requested. While the // request for [parent] is outstanding, [child] will be registered into a // job blocked on [parent]'s issuance. - require.NoError(engine.Put(context.Background(), peerID, 0, child.Bytes())) + require.NoError(engine.Put(t.Context(), peerID, 0, child.Bytes())) require.NotNil(request) require.Equal(1, engine.blocked.NumDependencies()) @@ -165,7 +165,7 @@ func TestEngineDropsAttemptToIssueBlockAfterFailedRequest(t *testing.T) { // Because this request doesn't provide [parent], the [child] job should be // cancelled. - require.NoError(engine.Put(context.Background(), request.NodeID, request.RequestID, nil)) + require.NoError(engine.Put(t.Context(), request.NodeID, request.RequestID, nil)) require.Zero(engine.blocked.NumDependencies()) } @@ -216,7 +216,7 @@ func TestEngineQuery(t *testing.T) { // Handling a pull query for [parent] should result in immediately // responding with chits for [Genesis] along with a request for [parent]. - require.NoError(engine.PullQuery(context.Background(), peerID, 15, parent.ID(), 1)) + require.NoError(engine.PullQuery(t.Context(), peerID, 15, parent.ID(), 1)) require.True(sendChitsCalled) require.True(getBlockCalled) require.NotNil(getRequest) @@ -240,7 +240,7 @@ func TestEngineQuery(t *testing.T) { // After receiving [parent], the engine will parse it, issue it, and then // send a pull query. - require.NoError(engine.Put(context.Background(), getRequest.NodeID, getRequest.RequestID, parent.Bytes())) + require.NoError(engine.Put(t.Context(), getRequest.NodeID, getRequest.RequestID, parent.Bytes())) require.NotNil(queryRequest) vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -266,7 +266,7 @@ func TestEngineQuery(t *testing.T) { // Handling chits for [child] register a voter job blocking on [child]'s // issuance and send a request for [child]. - require.NoError(engine.Chits(context.Background(), queryRequest.NodeID, queryRequest.RequestID, child.ID(), child.ID(), child.ID(), child.Height())) + require.NoError(engine.Chits(t.Context(), queryRequest.NodeID, queryRequest.RequestID, child.ID(), child.ID(), child.ID(), child.Height())) queryRequest = nil sender.SendPullQueryF = func(_ context.Context, nodeIDs set.Set[ids.NodeID], requestID uint32, blockID ids.ID, requestedHeight uint64) { @@ -300,7 +300,7 @@ func TestEngineQuery(t *testing.T) { // After receiving [child], the engine will parse it, issue it, and then // apply the votes received during the poll for [parent]. Applying the votes // should cause both [parent] and [child] to be accepted. - require.NoError(engine.Put(context.Background(), getRequest.NodeID, getRequest.RequestID, child.Bytes())) + require.NoError(engine.Put(t.Context(), getRequest.NodeID, getRequest.RequestID, child.Bytes())) require.Equal(snowtest.Accepted, parent.Status) require.Equal(snowtest.Accepted, child.Status) require.Zero(engine.blocked.NumDependencies()) @@ -355,7 +355,7 @@ func TestEngineMultipleQuery(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -385,7 +385,7 @@ func TestEngineMultipleQuery(t *testing.T) { } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk0, false, @@ -414,8 +414,8 @@ func TestEngineMultipleQuery(t *testing.T) { require.Equal(vdr0, inVdr) require.Equal(blk1.ID(), blkID) } - require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blk1.ID(), blk1.ID(), blk1.ID(), blk1.Height())) - require.NoError(te.Chits(context.Background(), vdr1, *queryRequestID, blk1.ID(), blk1.ID(), blk1.ID(), blk1.Height())) + require.NoError(te.Chits(t.Context(), vdr0, *queryRequestID, blk1.ID(), blk1.ID(), blk1.ID(), blk1.Height())) + require.NoError(te.Chits(t.Context(), vdr1, *queryRequestID, blk1.ID(), blk1.ID(), blk1.ID(), blk1.Height())) vm.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { vm.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -443,10 +443,10 @@ func TestEngineMultipleQuery(t *testing.T) { require.Equal(blk1.ID(), blkID) require.Equal(uint64(1), requestedHeight) } - require.NoError(te.Put(context.Background(), vdr0, *getRequestID, blk1.Bytes())) + require.NoError(te.Put(t.Context(), vdr0, *getRequestID, blk1.Bytes())) // Should be dropped because the query was already filled - require.NoError(te.Chits(context.Background(), vdr2, *queryRequestID, blk0.ID(), blk0.ID(), blk0.ID(), blk0.Height())) + require.NoError(te.Chits(t.Context(), vdr2, *queryRequestID, blk0.ID(), blk0.ID(), blk0.ID(), blk0.Height())) require.Equal(snowtest.Accepted, blk1.Status) require.Zero(te.blocked.NumDependencies()) @@ -475,7 +475,7 @@ func TestEngineBlockedIssue(t *testing.T) { } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk1, false, @@ -483,7 +483,7 @@ func TestEngineBlockedIssue(t *testing.T) { )) require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk0, false, @@ -515,7 +515,7 @@ func TestEngineRespondsToGetRequest(t *testing.T) { require.Equal(snowmantest.GenesisBytes, blk) } - require.NoError(te.Get(context.Background(), vdr, 123, snowmantest.GenesisID)) + require.NoError(te.Get(t.Context(), vdr, 123, snowmantest.GenesisID)) require.True(sentPut) } @@ -568,7 +568,7 @@ func TestEnginePushQuery(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.PushQuery(context.Background(), vdr, 20, blk.Bytes(), 1)) + require.NoError(te.PushQuery(t.Context(), vdr, 20, blk.Bytes(), 1)) require.True(*chitted) require.True(*queried) @@ -607,7 +607,7 @@ func TestEngineBuildBlock(t *testing.T) { vm.BuildBlockF = func(context.Context) (snowman.Block, error) { return blk, nil } - require.NoError(te.Notify(context.Background(), common.PendingTxs)) + require.NoError(te.Notify(t.Context(), common.PendingTxs)) require.True(*pushSent) } @@ -626,7 +626,7 @@ func TestEngineRepoll(t *testing.T) { require.Equal(vdrSet, inVdrs) } - te.repoll(context.Background()) + te.repoll(t.Context()) require.True(*queried) } @@ -680,7 +680,7 @@ func TestVoteCanceling(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) vm.LastAcceptedF = nil @@ -699,7 +699,7 @@ func TestVoteCanceling(t *testing.T) { } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk, true, @@ -708,7 +708,7 @@ func TestVoteCanceling(t *testing.T) { require.Equal(1, te.polls.Len()) - require.NoError(te.QueryFailed(context.Background(), vdr0, *queryRequestID)) + require.NoError(te.QueryFailed(t.Context(), vdr0, *queryRequestID)) require.Equal(1, te.polls.Len()) @@ -716,7 +716,7 @@ func TestVoteCanceling(t *testing.T) { sender.SendPullQueryF = func(context.Context, set.Set[ids.NodeID], uint32, ids.ID, uint64) { *repolled = true } - require.NoError(te.QueryFailed(context.Background(), vdr1, *queryRequestID)) + require.NoError(te.QueryFailed(t.Context(), vdr1, *queryRequestID)) require.True(*repolled) } @@ -748,12 +748,12 @@ func TestEngineNoQuery(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) blk := snowmantest.BuildChild(snowmantest.Genesis) require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk, false, @@ -788,9 +788,9 @@ func TestEngineNoRepollQuery(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) - te.repoll(context.Background()) + te.repoll(t.Context()) } func TestEngineAbandonQuery(t *testing.T) { @@ -814,11 +814,11 @@ func TestEngineAbandonQuery(t *testing.T) { sender.CantSendChits = false - require.NoError(te.PullQuery(context.Background(), vdr, 0, blkID, 0)) + require.NoError(te.PullQuery(t.Context(), vdr, 0, blkID, 0)) require.Equal(1, te.blkReqs.Len()) - require.NoError(te.GetFailed(context.Background(), vdr, *reqID)) + require.NoError(te.GetFailed(t.Context(), vdr, *reqID)) require.Zero(te.blkReqs.Len()) } @@ -849,7 +849,7 @@ func TestEngineAbandonChit(t *testing.T) { } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk, false, @@ -867,12 +867,12 @@ func TestEngineAbandonChit(t *testing.T) { } // Register a voter dependency on an unknown block. - require.NoError(te.Chits(context.Background(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID, blk.Height())) + require.NoError(te.Chits(t.Context(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID, blk.Height())) require.Equal(1, te.blocked.NumDependencies()) sender.CantSendPullQuery = false - require.NoError(te.GetFailed(context.Background(), vdr, reqID)) + require.NoError(te.GetFailed(t.Context(), vdr, reqID)) require.Zero(te.blocked.NumDependencies()) } @@ -902,7 +902,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk, true, @@ -920,7 +920,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { } // Register a voter dependency on an unknown block. - require.NoError(te.Chits(context.Background(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID, blk.Height())) + require.NoError(te.Chits(t.Context(), vdr, reqID, fakeBlkID, fakeBlkID, fakeBlkID, blk.Height())) require.Equal(1, te.blocked.NumDependencies()) sender.CantSendPullQuery = false @@ -932,7 +932,7 @@ func TestEngineAbandonChitWithUnexpectedPutBlock(t *testing.T) { // Respond with an unexpected block and verify that the request is correctly // cleared. - require.NoError(te.Put(context.Background(), vdr, reqID, snowmantest.GenesisBytes)) + require.NoError(te.Put(t.Context(), vdr, reqID, snowmantest.GenesisBytes)) require.Zero(te.blocked.NumDependencies()) } @@ -966,7 +966,7 @@ func TestEngineBlockingChitRequest(t *testing.T) { } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, parentBlk, false, @@ -975,14 +975,14 @@ func TestEngineBlockingChitRequest(t *testing.T) { sender.CantSendChits = false - require.NoError(te.PushQuery(context.Background(), vdr, 0, blockingBlk.Bytes(), 0)) + require.NoError(te.PushQuery(t.Context(), vdr, 0, blockingBlk.Bytes(), 0)) require.Equal(2, te.blocked.NumDependencies()) sender.CantSendPullQuery = false require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, missingBlk, false, @@ -1046,7 +1046,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { // Issuing [blockingBlk] will register an issuer job for [blockingBlk] // awaiting on [missingBlk]. It will also send a request for [missingBlk]. require.NoError(te.Put( - context.Background(), + t.Context(), peerID, 0, blockingBlk.Bytes(), @@ -1067,7 +1067,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { // Issuing [issuedBlk] will immediately adds [issuedBlk] to consensus, sets // it as the preferred block, and sends a query for [issuedBlk]. require.NoError(te.Put( - context.Background(), + t.Context(), peerID, 0, issuedBlk.Bytes(), @@ -1080,7 +1080,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { // [issuedBlk] is [missingBlk]. This registers a voter job dependent on // [blockingBlk] and [missingBlk]. require.NoError(te.Chits( - context.Background(), + t.Context(), queryRequest.NodeID, queryRequest.RequestID, blockingBlk.ID(), @@ -1120,7 +1120,7 @@ func TestEngineBlockingChitResponse(t *testing.T) { // Issuing [missingBlk] will add the block into consensus. However, it will // not send a query for it as it is not the preferred block. require.NoError(te.Put( - context.Background(), + t.Context(), getRequest.NodeID, getRequest.RequestID, missingBlk.Bytes(), @@ -1147,12 +1147,12 @@ func TestEngineRetryFetch(t *testing.T) { } sender.CantSendChits = false - require.NoError(te.PullQuery(context.Background(), vdr, 0, missingBlk.ID(), 0)) + require.NoError(te.PullQuery(t.Context(), vdr, 0, missingBlk.ID(), 0)) vm.CantGetBlock = true sender.SendGetF = nil - require.NoError(te.GetFailed(context.Background(), vdr, *reqID)) + require.NoError(te.GetFailed(t.Context(), vdr, *reqID)) vm.CantGetBlock = false @@ -1161,7 +1161,7 @@ func TestEngineRetryFetch(t *testing.T) { *called = true } - require.NoError(te.PullQuery(context.Background(), vdr, 0, missingBlk.ID(), 0)) + require.NoError(te.PullQuery(t.Context(), vdr, 0, missingBlk.ID(), 0)) vm.CantGetBlock = true sender.SendGetF = nil @@ -1200,7 +1200,7 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { } } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, validBlk, false, @@ -1208,13 +1208,13 @@ func TestEngineUndeclaredDependencyDeadlock(t *testing.T) { )) sender.SendPushQueryF = nil require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, invalidBlk, false, te.metrics.issued.WithLabelValues(unknownSource), )) - require.NoError(te.Chits(context.Background(), vdr, *reqID, invalidBlkID, invalidBlkID, invalidBlkID, invalidBlk.Height())) + require.NoError(te.Chits(t.Context(), vdr, *reqID, invalidBlkID, invalidBlkID, invalidBlkID, invalidBlk.Height())) require.Equal(snowtest.Accepted, validBlk.Status) } @@ -1238,7 +1238,7 @@ func TestEngineGossip(t *testing.T) { require.Equal(set.Of(nodeID), nodeIDs) } - require.NoError(te.Gossip(context.Background())) + require.NoError(te.Gossip(t.Context())) require.True(calledSendPullQuery) } @@ -1287,9 +1287,9 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } sender.CantSendChits = false - require.NoError(te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes(), 0)) + require.NoError(te.PushQuery(t.Context(), vdr, 0, pendingBlk.Bytes(), 0)) - require.NoError(te.Put(context.Background(), secondVdr, *reqID, []byte{3})) + require.NoError(te.Put(t.Context(), secondVdr, *reqID, []byte{3})) *parsed = false vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1314,7 +1314,7 @@ func TestEngineInvalidBlockIgnoredFromUnexpectedPeer(t *testing.T) { } sender.CantSendPullQuery = false - require.NoError(te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes())) + require.NoError(te.Put(t.Context(), vdr, *reqID, missingBlk.Bytes())) require.Equal(pendingBlk.ID(), te.Consensus.Preference()) } @@ -1360,12 +1360,12 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } sender.CantSendChits = false - require.NoError(te.PushQuery(context.Background(), vdr, 0, pendingBlk.Bytes(), 0)) + require.NoError(te.PushQuery(t.Context(), vdr, 0, pendingBlk.Bytes(), 0)) sender.SendGetF = nil sender.CantSendGet = false - require.NoError(te.PushQuery(context.Background(), vdr, *reqID, []byte{3}, 0)) + require.NoError(te.PushQuery(t.Context(), vdr, *reqID, []byte{3}, 0)) *parsed = false vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -1390,7 +1390,7 @@ func TestEnginePushQueryRequestIDConflict(t *testing.T) { } sender.CantSendPullQuery = false - require.NoError(te.Put(context.Background(), vdr, *reqID, missingBlk.Bytes())) + require.NoError(te.Put(t.Context(), vdr, *reqID, missingBlk.Bytes())) require.Equal(pendingBlk.ID(), te.Consensus.Preference()) } @@ -1431,7 +1431,7 @@ func TestEngineAggressivePolling(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -1466,7 +1466,7 @@ func TestEngineAggressivePolling(t *testing.T) { *numPulled++ } - require.NoError(te.Put(context.Background(), vdr, 0, pendingBlk.Bytes())) + require.NoError(te.Put(t.Context(), vdr, 0, pendingBlk.Bytes())) require.Equal(2, *numPulled) } @@ -1519,7 +1519,7 @@ func TestEngineDoubleChit(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) vm.LastAcceptedF = nil @@ -1537,7 +1537,7 @@ func TestEngineDoubleChit(t *testing.T) { require.Equal(uint64(1), requestedHeight) } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk, false, @@ -1557,13 +1557,13 @@ func TestEngineDoubleChit(t *testing.T) { require.Equal(snowtest.Undecided, blk.Status) - require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) + require.NoError(te.Chits(t.Context(), vdr0, *queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) require.Equal(snowtest.Undecided, blk.Status) - require.NoError(te.Chits(context.Background(), vdr0, *queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) + require.NoError(te.Chits(t.Context(), vdr0, *queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) require.Equal(snowtest.Undecided, blk.Status) - require.NoError(te.Chits(context.Background(), vdr1, *queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) + require.NoError(te.Chits(t.Context(), vdr1, *queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) require.Equal(snowtest.Accepted, blk.Status) } @@ -1605,7 +1605,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) vm.GetBlockF = nil vm.LastAcceptedF = nil @@ -1641,12 +1641,12 @@ func TestEngineBuildBlockLimit(t *testing.T) { blkToReturn++ return blk, nil } - require.NoError(te.Notify(context.Background(), common.PendingTxs)) + require.NoError(te.Notify(t.Context(), common.PendingTxs)) require.True(queried) queried = false - require.NoError(te.Notify(context.Background(), common.PendingTxs)) + require.NoError(te.Notify(t.Context(), common.PendingTxs)) require.False(queried) @@ -1661,7 +1661,7 @@ func TestEngineBuildBlockLimit(t *testing.T) { } } - require.NoError(te.Chits(context.Background(), vdr, reqID, blk0.ID(), blk0.ID(), blk0.ID(), blk0.Height())) + require.NoError(te.Chits(t.Context(), vdr, reqID, blk0.ID(), blk0.ID(), blk0.ID(), blk0.Height())) require.True(queried) } @@ -1696,17 +1696,17 @@ func TestEngineDropRejectedBlockOnReceipt(t *testing.T) { } // Issue [acceptedBlk] to the engine. This - require.NoError(te.PushQuery(context.Background(), nodeID, 0, acceptedBlk.Bytes(), acceptedBlk.Height())) + require.NoError(te.PushQuery(t.Context(), nodeID, 0, acceptedBlk.Bytes(), acceptedBlk.Height())) require.Len(queryRequestIDs, 1) // Vote for [acceptedBlk] and cause it to be accepted. - require.NoError(te.Chits(context.Background(), nodeID, queryRequestIDs[0], acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.Height())) + require.NoError(te.Chits(t.Context(), nodeID, queryRequestIDs[0], acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.ID(), acceptedBlk.Height())) require.Len(queryRequestIDs, 1) // Shouldn't have caused another query require.Equal(snowtest.Accepted, acceptedBlk.Status) // Attempt to issue rejectedChain[1] to the engine. This should be dropped // because the engine knows it has rejected it's parent rejectedChain[0]. - require.NoError(te.PushQuery(context.Background(), nodeID, 0, rejectedChain[1].Bytes(), acceptedBlk.Height())) + require.NoError(te.PushQuery(t.Context(), nodeID, 0, rejectedChain[1].Bytes(), acceptedBlk.Height())) require.Len(queryRequestIDs, 1) // Shouldn't have caused another query require.Zero(te.blkReqs.Len()) } @@ -1750,9 +1750,9 @@ func TestEngineNonPreferredAmplification(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.Put(context.Background(), vdr, 0, preferredBlk.Bytes())) + require.NoError(te.Put(t.Context(), vdr, 0, preferredBlk.Bytes())) - require.NoError(te.Put(context.Background(), vdr, 0, nonPreferredBlk.Bytes())) + require.NoError(te.Put(t.Context(), vdr, 0, nonPreferredBlk.Bytes())) } // Test that in the following scenario, if block B fails verification, votes @@ -1816,7 +1816,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // This engine receives a Gossip message for [blk2] which was "unknown" in this engine. // The engine thus learns about its ancestor [blk1] and should send a Get request for it. // (see above for expected "Get" request) - require.NoError(te.PushQuery(context.Background(), vdr, 0, blk2.Bytes(), 0)) + require.NoError(te.PushQuery(t.Context(), vdr, 0, blk2.Bytes(), 0)) require.True(*asked) // Prepare to PullQuery [blk1] after our Get request is fulfilled. We should not PullQuery @@ -1837,7 +1837,7 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // which will result in attempting to issue [blk2]. However, [blk2] should fail verification and be dropped. // By issuing [blk1], this node should fire a "PullQuery" request for [blk1]. // (see above for expected "PullQuery" request) - require.NoError(te.Put(context.Background(), vdr, *reqID, blk1.Bytes())) + require.NoError(te.Put(t.Context(), vdr, *reqID, blk1.Bytes())) require.True(*asked) require.True(*queried, "Didn't query the newly issued blk1") @@ -1866,11 +1866,11 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { // Now we are expecting a Chits message, and we receive it for [blk2] // instead of [blk1]. This will cause the node to again request [blk2]. - require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk2.ID(), blk1.ID(), blk2.ID(), blk2.Height())) + require.NoError(te.Chits(t.Context(), vdr, *queryRequestID, blk2.ID(), blk1.ID(), blk2.ID(), blk2.Height())) // The votes should be bubbled through [blk2] despite the fact that it is // failing verification. - require.NoError(te.Put(context.Background(), *reqVdr, *sendReqID, blk2.Bytes())) + require.NoError(te.Put(t.Context(), *reqVdr, *sendReqID, blk2.Bytes())) // The vote should be bubbled through [blk2], such that [blk1] gets marked as Accepted. require.Equal(snowtest.Accepted, blk1.Status) @@ -1902,11 +1902,11 @@ func TestEngineBubbleVotesThroughInvalidBlock(t *testing.T) { require.Equal(uint64(2), requestedHeight) } // Expect that the Engine will send a PullQuery after receiving this Gossip message for [blk2]. - require.NoError(te.PushQuery(context.Background(), vdr, 0, blk2.Bytes(), 0)) + require.NoError(te.PushQuery(t.Context(), vdr, 0, blk2.Bytes(), 0)) require.True(*queried) // After a single vote for [blk2], it should be marked as accepted. - require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk2.ID(), blk1.ID(), blk2.ID(), blk2.Height())) + require.NoError(te.Chits(t.Context(), vdr, *queryRequestID, blk2.ID(), blk1.ID(), blk2.ID(), blk2.Height())) require.Equal(snowtest.Accepted, blk2.Status) } @@ -1975,7 +1975,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { // Receive Gossip message for [blk3] first and expect the sender to issue a // Get request for its ancestor: [blk2]. - require.NoError(te.PushQuery(context.Background(), vdr, 0, blk3.Bytes(), 0)) + require.NoError(te.PushQuery(t.Context(), vdr, 0, blk3.Bytes(), 0)) require.True(*asked) // Prepare to PullQuery [blk1] after our request for [blk2] is fulfilled. @@ -1993,7 +1993,7 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { } // Answer the request, this should result in [blk1] being issued as well. - require.NoError(te.Put(context.Background(), vdr, *reqID, blk2.Bytes())) + require.NoError(te.Put(t.Context(), vdr, *reqID, blk2.Bytes())) require.True(*queried) sendReqID := new(uint32) @@ -2021,12 +2021,12 @@ func TestEngineBubbleVotesThroughInvalidChain(t *testing.T) { // Now we are expecting a Chits message and we receive it for [blk3]. // This will cause the node to again request [blk3]. - require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk3.ID(), blk1.ID(), blk3.ID(), blk3.Height())) + require.NoError(te.Chits(t.Context(), vdr, *queryRequestID, blk3.ID(), blk1.ID(), blk3.ID(), blk3.Height())) // Drop the re-request for [blk3] to cause the poll to terminate. The votes // should be bubbled through [blk3] despite the fact that it hasn't been // issued. - require.NoError(te.GetFailed(context.Background(), *reqVdr, *sendReqID)) + require.NoError(te.GetFailed(t.Context(), *reqVdr, *sendReqID)) // The vote should be bubbled through [blk3] and [blk2] such that [blk1] // gets marked as Accepted. @@ -2077,7 +2077,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } // Give the engine the grandparent - require.NoError(te.Put(context.Background(), vdr, 0, grandParentBlk.BytesV)) + require.NoError(te.Put(t.Context(), vdr, 0, grandParentBlk.BytesV)) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkA.BytesV, b) @@ -2087,7 +2087,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { // Give the node [parentBlkA]/[parentBlkB]. // When it's parsed we get [parentBlkA] (not [parentBlkB]). // [parentBlkA] fails verification and gets put into [te.nonVerifiedCache]. - require.NoError(te.Put(context.Background(), vdr, 0, parentBlkA.BytesV)) + require.NoError(te.Put(t.Context(), vdr, 0, parentBlkA.BytesV)) vm.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { require.Equal(parentBlkB.BytesV, b) @@ -2120,11 +2120,11 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { // When we fetch it using [GetBlockF] we get [parentBlkB]. // Note that [parentBlkB] doesn't fail verification and is issued into consensus. // This evicts [parentBlkA] from [te.nonVerifiedCache]. - require.NoError(te.Put(context.Background(), vdr, 0, parentBlkA.BytesV)) + require.NoError(te.Put(t.Context(), vdr, 0, parentBlkA.BytesV)) // Give 2 chits for [parentBlkA]/[parentBlkB] - require.NoError(te.Chits(context.Background(), vdr, *queryRequestAID, parentBlkB.IDV, grandParentBlk.IDV, parentBlkB.IDV, parentBlkB.Height())) - require.NoError(te.Chits(context.Background(), vdr, *queryRequestGPID, parentBlkB.IDV, grandParentBlk.IDV, parentBlkB.IDV, parentBlkB.Height())) + require.NoError(te.Chits(t.Context(), vdr, *queryRequestAID, parentBlkB.IDV, grandParentBlk.IDV, parentBlkB.IDV, parentBlkB.Height())) + require.NoError(te.Chits(t.Context(), vdr, *queryRequestGPID, parentBlkB.IDV, grandParentBlk.IDV, parentBlkB.IDV, parentBlkB.Height())) // Assert that the blocks' statuses are correct. // The evicted [parentBlkA] shouldn't be changed. @@ -2141,7 +2141,7 @@ func TestEngineBuildBlockWithCachedNonVerifiedParent(t *testing.T) { } // Should issue a new block and send a query for it. - require.NoError(te.Notify(context.Background(), common.PendingTxs)) + require.NoError(te.Notify(t.Context(), common.PendingTxs)) require.True(*sentQuery) } @@ -2189,7 +2189,7 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) vm.LastAcceptedF = nil @@ -2204,7 +2204,7 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { } require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk, true, @@ -2231,11 +2231,11 @@ func TestEngineApplyAcceptedFrontierInQueryFailed(t *testing.T) { require.Equal(uint64(1), requestedHeight) } - require.NoError(te.Chits(context.Background(), vdr, *queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) + require.NoError(te.Chits(t.Context(), vdr, *queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) require.Equal(snowtest.Undecided, blk.Status) - require.NoError(te.QueryFailed(context.Background(), vdr, *queryRequestID)) + require.NoError(te.QueryFailed(t.Context(), vdr, *queryRequestID)) require.Equal(snowtest.Accepted, blk.Status) } @@ -2283,7 +2283,7 @@ func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { te, err := New(engCfg) require.NoError(err) - require.NoError(te.Start(context.Background(), 0)) + require.NoError(te.Start(t.Context(), 0)) vm.LastAcceptedF = nil @@ -2292,7 +2292,7 @@ func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { // Issue the block. This shouldn't call the sender, because creating the // poll should fail. require.NoError(te.issue( - context.Background(), + t.Context(), te.Ctx.NodeID, blk, true, @@ -2320,7 +2320,7 @@ func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { // Because there is now a validator that can be queried, gossip should // trigger creation of the poll. - require.NoError(te.Gossip(context.Background())) + require.NoError(te.Gossip(t.Context())) require.True(queried) vm.GetBlockF = func(_ context.Context, id ids.ID) (snowman.Block, error) { @@ -2336,7 +2336,7 @@ func TestEngineRepollsMisconfiguredSubnet(t *testing.T) { // Voting for the block that was issued during the period when the validator // set was misconfigured should result in it being accepted successfully. - require.NoError(te.Chits(context.Background(), vdr, queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) + require.NoError(te.Chits(t.Context(), vdr, queryRequestID, blk.ID(), blk.ID(), blk.ID(), blk.Height())) require.Equal(snowtest.Accepted, blk.Status) } @@ -2450,7 +2450,7 @@ func TestEngineVoteStallRegression(t *testing.T) { engine, err := New(config) require.NoError(err) - require.NoError(engine.Start(context.Background(), 0)) + require.NoError(engine.Start(t.Context(), 0)) var pollRequestIDs []uint32 sender.SendPullQueryF = func(_ context.Context, polledNodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { @@ -2460,7 +2460,7 @@ func TestEngineVoteStallRegression(t *testing.T) { // Issue block 0. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID0, 0, acceptedChain[0].Bytes(), @@ -2470,7 +2470,7 @@ func TestEngineVoteStallRegression(t *testing.T) { // Issue block 1. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID0, 0, acceptedChain[1].Bytes(), @@ -2480,7 +2480,7 @@ func TestEngineVoteStallRegression(t *testing.T) { // Issue block 2. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID0, 0, acceptedChain[2].Bytes(), @@ -2490,7 +2490,7 @@ func TestEngineVoteStallRegression(t *testing.T) { // Apply votes in poll 0 to the blocks that will be accepted. require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID0, pollRequestIDs[0], acceptedChain[1].ID(), @@ -2499,7 +2499,7 @@ func TestEngineVoteStallRegression(t *testing.T) { acceptedChain[1].Height(), )) require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID1, pollRequestIDs[0], acceptedChain[2].ID(), @@ -2522,7 +2522,7 @@ func TestEngineVoteStallRegression(t *testing.T) { } require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID2, pollRequestIDs[0], rejectedChain[0].ID(), @@ -2535,7 +2535,7 @@ func TestEngineVoteStallRegression(t *testing.T) { // Attempt to issue block 4. This will register a dependency on block 3 for // the issuance of block 4. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID0, 0, rejectedChain[1].Bytes(), @@ -2546,7 +2546,7 @@ func TestEngineVoteStallRegression(t *testing.T) { // Apply votes in poll 1 that will cause blocks 3 and 4 to be rejected once // poll 0 finishes. require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID0, pollRequestIDs[1], acceptedChain[1].ID(), @@ -2555,7 +2555,7 @@ func TestEngineVoteStallRegression(t *testing.T) { acceptedChain[1].Height(), )) require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID1, pollRequestIDs[1], acceptedChain[2].ID(), @@ -2564,7 +2564,7 @@ func TestEngineVoteStallRegression(t *testing.T) { acceptedChain[2].Height(), )) require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID2, pollRequestIDs[1], rejectedChain[1].ID(), @@ -2585,7 +2585,7 @@ func TestEngineVoteStallRegression(t *testing.T) { ) require.NoError(engine.Put( - context.Background(), + t.Context(), getBlock3Request.NodeID, getBlock3Request.RequestID, rejectedChain[0].Bytes(), @@ -2599,7 +2599,7 @@ func TestEngineVoteStallRegression(t *testing.T) { for i := 2; i < len(pollRequestIDs); i++ { for _, nodeID := range nodeIDs { require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID, pollRequestIDs[i], acceptedChain[2].ID(), @@ -2672,7 +2672,7 @@ func TestEngineEarlyTerminateVoterRegression(t *testing.T) { engine, err := New(config) require.NoError(err) - require.NoError(engine.Start(context.Background(), 0)) + require.NoError(engine.Start(t.Context(), 0)) var pollRequestIDs []uint32 sender.SendPullQueryF = func(_ context.Context, polledNodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { @@ -2688,7 +2688,7 @@ func TestEngineEarlyTerminateVoterRegression(t *testing.T) { // Issue block 0 to trigger poll 0. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID, 0, chain[0].Bytes(), @@ -2707,7 +2707,7 @@ func TestEngineEarlyTerminateVoterRegression(t *testing.T) { // Vote for block 2 or block 1 in poll 0. This should trigger Get requests // for both block 2 and block 1. require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID, pollRequestIDs[0], chain[2].ID(), @@ -2722,7 +2722,7 @@ func TestEngineEarlyTerminateVoterRegression(t *testing.T) { // Mark the request for block 2 as failed. This should not cause the poll to // be applied as there is still an outstanding request for block 1. require.NoError(engine.GetFailed( - context.Background(), + t.Context(), nodeID, getRequestIDs[chain[2].ID()], )) @@ -2731,7 +2731,7 @@ func TestEngineEarlyTerminateVoterRegression(t *testing.T) { // Issue block 1. This should cause the poll to be applied to both block 0 // and block 1. require.NoError(engine.Put( - context.Background(), + t.Context(), nodeID, getRequestIDs[chain[1].ID()], chain[1].Bytes(), @@ -2824,7 +2824,7 @@ func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { engine, err := New(config) require.NoError(err) - require.NoError(engine.Start(context.Background(), 0)) + require.NoError(engine.Start(t.Context(), 0)) var pollRequestIDs []uint32 sender.SendPullQueryF = func(_ context.Context, polledNodeIDs set.Set[ids.NodeID], requestID uint32, _ ids.ID, _ uint64) { @@ -2834,7 +2834,7 @@ func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { // Issue rejectedChain[0] to consensus. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID, 0, rejectedChain[0].Bytes(), @@ -2852,7 +2852,7 @@ func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { // Attempt to issue rejectedChain[1] which should add it to the invalid // block cache. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID, 0, rejectedChain[1].Bytes(), @@ -2865,7 +2865,7 @@ func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { // Issue acceptedChain[0] to consensus. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID, 0, acceptedChain[0].Bytes(), @@ -2885,7 +2885,7 @@ func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { // Accept acceptedChain[0] and reject rejectedChain[0]. require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID, pollRequestIDs[0], acceptedChain[0].ID(), @@ -2900,7 +2900,7 @@ func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { // Issue acceptedChain[1] to consensus. require.NoError(engine.PushQuery( - context.Background(), + t.Context(), nodeID, 0, acceptedChain[1].Bytes(), @@ -2911,7 +2911,7 @@ func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { // Vote for the transitively rejected rejectedChain[1]. This should cause a // repoll. require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID, pollRequestIDs[1], rejectedChain[1].ID(), @@ -2931,7 +2931,7 @@ func TestEngineRegistersInvalidVoterDependencyRegression(t *testing.T) { // Accept acceptedChain[1]. require.NoError(engine.Chits( - context.Background(), + t.Context(), nodeID, pollRequestIDs[2], acceptedChain[1].ID(), @@ -3096,7 +3096,7 @@ func TestShouldIssueBlock(t *testing.T) { blocks = slices.Concat(chain0Through3, chain4Through6, chain7Through10, chain11Through11) ) - require.NoError(t, blocks[0].Accept(context.Background())) + require.NoError(t, blocks[0].Accept(t.Context())) c := &snowman.Topological{Factory: snowball.SnowflakeFactory} require.NoError(t, c.Initialize( @@ -3229,7 +3229,7 @@ func TestEngineAbortQueryWhenInPartition(t *testing.T) { _, _, _, _, engine := setup(t, conf) // Gossip will cause a pull query if enough stake is connected - engine.sendQuery(context.Background(), ids.ID{}, nil, false) + engine.sendQuery(t.Context(), ids.ID{}, nil, false) require.Contains(buff.String(), errInsufficientStake) } @@ -3287,8 +3287,8 @@ func TestEngineAcceptedHeight(t *testing.T) { require.NoError(engCfg.Consensus.Initialize(ctx, params, blk1.ID(), blk1.Height(), time.Now())) - require.NoError(te.Chits(context.Background(), vdr0, 1, blk1.ID(), blk1.ID(), blk1.ID(), blk1.Height())) - require.NoError(te.Chits(context.Background(), vdr1, 2, blk2.ID(), blk2.ID(), blk2.ID(), blk2.Height())) + require.NoError(te.Chits(t.Context(), vdr0, 1, blk1.ID(), blk1.ID(), blk1.ID(), blk1.Height())) + require.NoError(te.Chits(t.Context(), vdr1, 2, blk2.ID(), blk2.ID(), blk2.ID(), blk2.Height())) eBlk1, h1, ok := te.acceptedFrontiers.LastAccepted(vdr0) require.True(ok) diff --git a/snow/engine/snowman/getter/getter_test.go b/snow/engine/snowman/getter/getter_test.go index 8855f1810693..c9c4c2d1fff7 100644 --- a/snow/engine/snowman/getter/getter_test.go +++ b/snow/engine/snowman/getter/getter_test.go @@ -71,7 +71,7 @@ func TestAcceptedFrontier(t *testing.T) { accepted = containerID } - require.NoError(bs.GetAcceptedFrontier(context.Background(), ids.EmptyNodeID, 0)) + require.NoError(bs.GetAcceptedFrontier(t.Context(), ids.EmptyNodeID, 0)) require.Equal(blkID, accepted) } @@ -80,7 +80,7 @@ func TestFilterAccepted(t *testing.T) { bs, vm, sender := newTest(t) acceptedBlk := snowmantest.BuildChild(snowmantest.Genesis) - require.NoError(acceptedBlk.Accept(context.Background())) + require.NoError(acceptedBlk.Accept(t.Context())) var ( allBlocks = []*snowmantest.Block{ @@ -109,7 +109,7 @@ func TestFilterAccepted(t *testing.T) { } blkIDs := set.Of(snowmantest.GenesisID, acceptedBlk.ID(), unknownBlkID) - require.NoError(bs.GetAccepted(context.Background(), ids.EmptyNodeID, 0, blkIDs)) + require.NoError(bs.GetAccepted(t.Context(), ids.EmptyNodeID, 0, blkIDs)) require.Len(accepted, 2) require.Contains(accepted, snowmantest.GenesisID) diff --git a/snow/engine/snowman/job/scheduler_test.go b/snow/engine/snowman/job/scheduler_test.go index 6d6e6848cc09..1d798a8589d6 100644 --- a/snow/engine/snowman/job/scheduler_test.go +++ b/snow/engine/snowman/job/scheduler_test.go @@ -48,12 +48,12 @@ func newSchedulerWithJob[T comparable]( abandoned []T, ) *Scheduler[T] { s := NewScheduler[T]() - require.NoError(t, s.Schedule(context.Background(), job, dependencies...)) + require.NoError(t, s.Schedule(t.Context(), job, dependencies...)) for _, d := range fulfilled { - require.NoError(t, s.Fulfill(context.Background(), d)) + require.NoError(t, s.Fulfill(t.Context(), d)) } for _, d := range abandoned { - require.NoError(t, s.Abandon(context.Background(), d)) + require.NoError(t, s.Abandon(t.Context(), d)) } return s } @@ -155,7 +155,7 @@ func TestScheduler_Schedule(t *testing.T) { // Reset the variable between tests userJob.reset() - require.NoError(test.scheduler.Schedule(context.Background(), userJob, test.dependencies...)) + require.NoError(test.scheduler.Schedule(t.Context(), userJob, test.dependencies...)) require.Equal(test.expectedNumDependencies, test.scheduler.NumDependencies()) require.Equal(test.expectedExecuted, userJob.calledExecute) require.Empty(userJob.fulfilled) @@ -242,7 +242,7 @@ func TestScheduler_Fulfill(t *testing.T) { // Reset the variable between tests userJob.reset() - require.NoError(test.scheduler.Fulfill(context.Background(), depToResolve)) + require.NoError(test.scheduler.Fulfill(t.Context(), depToResolve)) require.Equal(test.expectedExecuted, userJob.calledExecute) require.Equal(test.expectedFulfilled, userJob.fulfilled) require.Equal(test.expectedAbandoned, userJob.abandoned) @@ -328,7 +328,7 @@ func TestScheduler_Abandon(t *testing.T) { // Reset the variable between tests userJob.reset() - require.NoError(test.scheduler.Abandon(context.Background(), depToResolve)) + require.NoError(test.scheduler.Abandon(t.Context(), depToResolve)) require.Equal(test.expectedExecuted, userJob.calledExecute) require.Equal(test.expectedFulfilled, userJob.fulfilled) require.Equal(test.expectedAbandoned, userJob.abandoned) diff --git a/snow/engine/snowman/syncer/state_syncer_test.go b/snow/engine/snowman/syncer/state_syncer_test.go index 7a264e00ea79..0c8c68c2c564 100644 --- a/snow/engine/snowman/syncer/state_syncer_test.go +++ b/snow/engine/snowman/syncer/state_syncer_test.go @@ -62,7 +62,7 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { return nil }) - enabled, err := syncer.IsEnabled(context.Background()) + enabled, err := syncer.IsEnabled(t.Context()) require.NoError(err) require.False(enabled) @@ -94,7 +94,7 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { fullVM.StateSyncEnabledF = func(context.Context) (bool, error) { return false, nil } - enabled, err = syncer.IsEnabled(context.Background()) + enabled, err = syncer.IsEnabled(t.Context()) require.NoError(err) require.False(enabled) @@ -102,7 +102,7 @@ func TestStateSyncerIsEnabledIfVMSupportsStateSyncing(t *testing.T) { fullVM.StateSyncEnabledF = func(context.Context) (bool, error) { return true, nil } - enabled, err = syncer.IsEnabled(context.Background()) + enabled, err = syncer.IsEnabled(t.Context()) require.NoError(err) require.True(enabled) } @@ -128,25 +128,25 @@ func TestStateSyncingStartsOnlyIfEnoughStakeIsConnected(t *testing.T) { // attempt starting bootstrapper with no stake connected. Bootstrapper should stall. require.False(startup.ShouldStart()) - require.NoError(syncer.Start(context.Background(), startReqID)) + require.NoError(syncer.Start(t.Context(), startReqID)) require.False(syncer.started) // attempt starting bootstrapper with not enough stake connected. Bootstrapper should stall. vdr0 := ids.GenerateTestNodeID() require.NoError(beacons.AddStaker(ctx.SubnetID, vdr0, nil, ids.Empty, startupAlpha/2)) - require.NoError(syncer.Connected(context.Background(), vdr0, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), vdr0, version.CurrentApp)) require.False(startup.ShouldStart()) - require.NoError(syncer.Start(context.Background(), startReqID)) + require.NoError(syncer.Start(t.Context(), startReqID)) require.False(syncer.started) // finally attempt starting bootstrapper with enough stake connected. Frontiers should be requested. vdr := ids.GenerateTestNodeID() require.NoError(beacons.AddStaker(ctx.SubnetID, vdr, nil, ids.Empty, startupAlpha)) - require.NoError(syncer.Connected(context.Background(), vdr, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), vdr, version.CurrentApp)) require.True(startup.ShouldStart()) - require.NoError(syncer.Start(context.Background(), startReqID)) + require.NoError(syncer.Start(t.Context(), startReqID)) require.True(syncer.started) } @@ -178,7 +178,7 @@ func TestStateSyncLocalSummaryIsIncludedAmongFrontiersIfAvailable(t *testing.T) // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.Equal(localSummary, syncer.locallyAvailableSummary) @@ -211,7 +211,7 @@ func TestStateSyncNotFoundOngoingSummaryIsNotIncludedAmongFrontiers(t *testing.T // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.Nil(syncer.locallyAvailableSummary) @@ -243,7 +243,7 @@ func TestBeaconsAreReachedForFrontiersUponStartup(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } // check that vdrs are reached out for frontiers @@ -284,7 +284,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) @@ -307,7 +307,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // check a response with wrong request ID is dropped require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), responsiveBeaconID, math.MaxInt32, summaryBytes, @@ -318,7 +318,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // check a response from unsolicited node is dropped unsolicitedNodeID := ids.GenerateTestNodeID() require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), unsolicitedNodeID, responsiveBeaconReqID, summaryBytes, @@ -327,7 +327,7 @@ func TestUnRequestedStateSummaryFrontiersAreDropped(t *testing.T) { // check a valid response is duly recorded require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), responsiveBeaconID, responsiveBeaconReqID, summaryBytes, @@ -374,7 +374,7 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) @@ -396,7 +396,7 @@ func TestMalformedStateSummaryFrontiersAreDropped(t *testing.T) { // response is valid, but invalid summary is not recorded require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), responsiveBeaconID, responsiveBeaconReqID, summary, @@ -443,7 +443,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } initiallyReachedOutBeaconsSize := len(contactedFrontiersProviders) @@ -462,7 +462,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // assume timeout is reached and vdrs is marked as unresponsive require.NoError(syncer.GetStateSummaryFrontierFailed( - context.Background(), + t.Context(), unresponsiveBeaconID, unresponsiveBeaconReqID, )) @@ -489,7 +489,7 @@ func TestLateResponsesFromUnresponsiveFrontiersAreNotRecorded(t *testing.T) { // check a valid but late response is not recorded require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), unresponsiveBeaconID, unresponsiveBeaconReqID, summaryBytes, @@ -551,7 +551,7 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -565,14 +565,14 @@ func TestStateSyncIsRestartedIfTooManyFrontierSeedersTimeout(t *testing.T) { if maxResponses > 0 { require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, summaryBytes, )) } else { require.NoError(syncer.GetStateSummaryFrontierFailed( - context.Background(), + t.Context(), beaconID, reqID, )) @@ -634,7 +634,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -645,7 +645,7 @@ func TestVoteRequestsAreSentAsAllFrontierBeaconsResponded(t *testing.T) { reqID := contactedFrontiersProviders[beaconID] require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, summaryBytes, @@ -704,7 +704,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -715,7 +715,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { reqID := contactedFrontiersProviders[beaconID] require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, summaryBytes, @@ -737,7 +737,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // check a response with wrong request ID is dropped require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), responsiveVoterID, math.MaxInt32, set.Of(summaryID), @@ -750,7 +750,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // check a response from unsolicited node is dropped unsolicitedVoterID := ids.GenerateTestNodeID() require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), unsolicitedVoterID, responsiveVoterReqID, set.Of(summaryID), @@ -759,7 +759,7 @@ func TestUnRequestedVotesAreDropped(t *testing.T) { // check a valid response is duly recorded require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), responsiveVoterID, responsiveVoterReqID, set.Of(summaryID), @@ -821,7 +821,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -832,7 +832,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { reqID := contactedFrontiersProviders[beaconID] require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, summaryBytes, @@ -854,7 +854,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // check a response for unRequested summary is dropped require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), responsiveVoterID, responsiveVoterReqID, set.Of(unknownSummaryID), @@ -865,7 +865,7 @@ func TestVotesForUnknownSummariesAreDropped(t *testing.T) { // check that responsiveVoter cannot cast another vote require.NotContains(syncer.pendingSeeders, responsiveVoterID) require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), responsiveVoterID, responsiveVoterReqID, set.Of(summaryID), @@ -941,7 +941,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -957,14 +957,14 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { if reachedSeeders%2 == 0 { require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, summaryBytes, )) } else { require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, minoritySummaryBytes, @@ -994,7 +994,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { switch { case cumulatedWeight < alpha/2: require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), voterID, reqID, set.Of(summaryID, minoritySummaryID), @@ -1003,7 +1003,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { case cumulatedWeight < alpha: require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), voterID, reqID, set.Of(summaryID), @@ -1012,7 +1012,7 @@ func TestStateSummaryIsPassedToVMAsMajorityOfVotesIsCastedForIt(t *testing.T) { default: require.NoError(syncer.GetAcceptedStateSummaryFailed( - context.Background(), + t.Context(), voterID, reqID, )) @@ -1072,7 +1072,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -1083,7 +1083,7 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { reqID := contactedFrontiersProviders[beaconID] require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, summaryBytes, @@ -1107,14 +1107,14 @@ func TestVotingIsRestartedIfMajorityIsNotReachedDueToTimeouts(t *testing.T) { // vdr carries the largest weight by far. Make sure it fails if timedOutWeight <= alpha { require.NoError(syncer.GetAcceptedStateSummaryFailed( - context.Background(), + t.Context(), voterID, reqID, )) timedOutWeight += beacons.GetWeight(ctx.SubnetID, voterID) } else { require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), voterID, reqID, set.Of(summaryID), @@ -1192,7 +1192,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. // Connect enough stake to start syncer for _, nodeID := range beacons.GetValidatorIDs(ctx.SubnetID) { - require.NoError(syncer.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(syncer.Connected(t.Context(), nodeID, version.CurrentApp)) } require.NotEmpty(syncer.pendingSeeders) @@ -1208,14 +1208,14 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. if reachedSeeders%2 == 0 { require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, summaryBytes, )) } else { require.NoError(syncer.StateSummaryFrontier( - context.Background(), + t.Context(), beaconID, reqID, minoritySummaryBytes, @@ -1252,7 +1252,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. switch { case votingWeightStake < alpha/2: require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), voterID, reqID, set.Of(minoritySummary1.ID(), minoritySummary2.ID()), @@ -1261,7 +1261,7 @@ func TestStateSyncIsStoppedIfEnoughVotesAreCastedWithNoClearMajority(t *testing. default: require.NoError(syncer.AcceptedStateSummary( - context.Background(), + t.Context(), voterID, reqID, set.Of(ids.ID{'u', 'n', 'k', 'n', 'o', 'w', 'n', 'I', 'D'}), @@ -1299,6 +1299,6 @@ func TestStateSyncIsDoneOnceVMNotifies(t *testing.T) { } // Any Put response before StateSyncDone is received from VM is dropped - require.NoError(syncer.Notify(context.Background(), common.StateSyncDone)) + require.NoError(syncer.Notify(t.Context(), common.StateSyncDone)) require.True(stateSyncFullyDone) } diff --git a/snow/networking/handler/handler_test.go b/snow/networking/handler/handler_test.go index cc45b364df48..d7114f17f80e 100644 --- a/snow/networking/handler/handler_test.go +++ b/snow/networking/handler/handler_test.go @@ -122,7 +122,7 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { InboundMessage: message.InboundGetAcceptedFrontier(chainID, reqID, 0*time.Second, nodeID), EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } - handler.Push(context.Background(), msg) + handler.Push(t.Context(), msg) currentTime := time.Now().Add(time.Second) handler.clock.Set(currentTime) @@ -132,13 +132,13 @@ func TestHandlerDropsTimedOutMessages(t *testing.T) { InboundMessage: message.InboundGetAccepted(chainID, reqID, 1*time.Second, nil, nodeID), EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } - handler.Push(context.Background(), msg) + handler.Push(t.Context(), msg) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - handler.Start(context.Background(), false) + handler.Start(t.Context(), false) ticker := time.NewTicker(time.Second) defer ticker.Stop() @@ -240,7 +240,7 @@ func TestHandlerClosesOnError(t *testing.T) { return nil } - handler.Start(context.Background(), false) + handler.Start(t.Context(), false) nodeID := ids.EmptyNodeID reqID := uint32(1) @@ -249,7 +249,7 @@ func TestHandlerClosesOnError(t *testing.T) { InboundMessage: message.InboundGetAcceptedFrontier(ids.Empty, reqID, deadline, nodeID), EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } - handler.Push(context.Background(), msg) + handler.Push(t.Context(), msg) ticker := time.NewTicker(time.Second) select { @@ -333,7 +333,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { return nil } - handler.Start(context.Background(), false) + handler.Start(t.Context(), false) nodeID := ids.EmptyNodeID chainID := ids.Empty @@ -342,7 +342,7 @@ func TestHandlerDropsGossipDuringBootstrapping(t *testing.T) { InboundMessage: message.InternalGetFailed(nodeID, chainID, reqID), EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } - handler.Push(context.Background(), inInboundMessage) + handler.Push(t.Context(), inInboundMessage) ticker := time.NewTicker(time.Second) select { @@ -436,7 +436,7 @@ func TestHandlerDispatchInternal(t *testing.T) { return nil } - handler.Start(context.Background(), false) + handler.Start(t.Context(), false) messages <- common.PendingTxs select { case msg := <-notified: @@ -612,8 +612,8 @@ func TestDynamicEngineTypeDispatch(t *testing.T) { return nil } - handler.Start(context.Background(), false) - handler.Push(context.Background(), Message{ + handler.Start(t.Context(), false) + handler.Push(t.Context(), Message{ InboundMessage: message.InboundChits( ids.Empty, uint32(0), @@ -677,9 +677,9 @@ func TestHandlerStartError(t *testing.T) { Type: p2ppb.EngineType_ENGINE_TYPE_CHAIN, State: snow.Initializing, }) - handler.Start(context.Background(), false) + handler.Start(t.Context(), false) - _, err = handler.AwaitStopped(context.Background()) + _, err = handler.AwaitStopped(t.Context()) require.NoError(err) } diff --git a/snow/networking/handler/health_test.go b/snow/networking/handler/health_test.go index 3f77940db856..dc9a38cc3f91 100644 --- a/snow/networking/handler/health_test.go +++ b/snow/networking/handler/health_test.go @@ -130,7 +130,7 @@ func TestHealthCheckSubnet(t *testing.T) { return nil } - handlerIntf.Start(context.Background(), false) + handlerIntf.Start(t.Context(), false) testVdrCount := 4 vdrIDs := set.NewSet[ids.NodeID](testVdrCount) @@ -142,9 +142,9 @@ func TestHealthCheckSubnet(t *testing.T) { } vdrIDsList := vdrIDs.List() for index, nodeID := range vdrIDsList { - require.NoError(peerTracker.Connected(context.Background(), nodeID, nil)) + require.NoError(peerTracker.Connected(t.Context(), nodeID, nil)) - details, err := handlerIntf.HealthCheck(context.Background()) + details, err := handlerIntf.HealthCheck(t.Context()) expectedPercentConnected := float64(index+1) / float64(testVdrCount) conf := sb.Config() minPercentConnected := conf.ConsensusParameters.MinPercentConnectedHealthy() diff --git a/snow/networking/handler/message_queue_test.go b/snow/networking/handler/message_queue_test.go index 2dad2a809368..9b72d21de442 100644 --- a/snow/networking/handler/message_queue_test.go +++ b/snow/networking/handler/message_queue_test.go @@ -4,7 +4,6 @@ package handler import ( - "context" "testing" "time" @@ -57,7 +56,7 @@ func TestQueue(t *testing.T) { // Push then pop should work regardless of usage when there are no other // messages on [u.msgs] cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.1).Times(1) - u.Push(context.Background(), msg1) + u.Push(t.Context(), msg1) require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) require.Equal(1, u.Len()) _, gotMsg1, ok := u.Pop() @@ -67,7 +66,7 @@ func TestQueue(t *testing.T) { require.Equal(msg1, gotMsg1) cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) - u.Push(context.Background(), msg1) + u.Push(t.Context(), msg1) require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() @@ -77,7 +76,7 @@ func TestQueue(t *testing.T) { require.Equal(msg1, gotMsg1) cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(1.0).Times(1) - u.Push(context.Background(), msg1) + u.Push(t.Context(), msg1) require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() @@ -87,7 +86,7 @@ func TestQueue(t *testing.T) { require.Equal(msg1, gotMsg1) cpuTracker.EXPECT().Usage(vdr1ID, gomock.Any()).Return(0.0).Times(1) - u.Push(context.Background(), msg1) + u.Push(t.Context(), msg1) require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) require.Equal(1, u.Len()) _, gotMsg1, ok = u.Pop() @@ -97,7 +96,7 @@ func TestQueue(t *testing.T) { require.Equal(msg1, gotMsg1) // Push msg1 from vdr1ID - u.Push(context.Background(), msg1) + u.Push(t.Context(), msg1) require.Equal(1, u.nodeToUnprocessedMsgs[vdr1ID]) require.Equal(1, u.Len()) @@ -114,7 +113,7 @@ func TestQueue(t *testing.T) { } // Push msg2 from vdr2ID - u.Push(context.Background(), msg2) + u.Push(t.Context(), msg2) require.Equal(2, u.Len()) require.Equal(1, u.nodeToUnprocessedMsgs[vdr2ID]) // Set vdr1's usage to 99% and vdr2's to .01 @@ -142,9 +141,9 @@ func TestQueue(t *testing.T) { InboundMessage: message.InboundPushQuery(ids.Empty, 0, 0, nil, 0, nonVdrNodeID2), EngineType: p2p.EngineType_ENGINE_TYPE_UNSPECIFIED, } - u.Push(context.Background(), msg3) - u.Push(context.Background(), msg4) - u.Push(context.Background(), msg1) + u.Push(t.Context(), msg3) + u.Push(t.Context(), msg4) + u.Push(t.Context(), msg1) require.Equal(3, u.Len()) // msg1 should get popped first because nonVdrNodeID1 and nonVdrNodeID2 diff --git a/snow/networking/router/chain_router_test.go b/snow/networking/router/chain_router_test.go index 30e57a572338..7cb435789f47 100644 --- a/snow/networking/router/chain_router_test.go +++ b/snow/networking/router/chain_router_test.go @@ -171,16 +171,16 @@ func TestShutdown(t *testing.T) { State: snow.NormalOp, // assumed bootstrapping is done }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - h.Start(context.Background(), false) + h.Start(t.Context(), false) - chainRouter.Shutdown(context.Background()) + chainRouter.Shutdown(t.Context()) - ctx, cancel := context.WithTimeout(context.Background(), 250*time.Millisecond) + ctx, cancel := context.WithTimeout(t.Context(), 250*time.Millisecond) defer cancel() select { @@ -289,13 +289,13 @@ func TestConnectedAfterShutdownErrorLogRegression(t *testing.T) { State: snow.NormalOp, // assumed bootstrapping is done }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) - h.Start(context.Background(), false) + h.Start(t.Context(), false) - chainRouter.Shutdown(context.Background()) + chainRouter.Shutdown(t.Context()) - shutdownDuration, err := h.AwaitStopped(context.Background()) + shutdownDuration, err := h.AwaitStopped(t.Context()) require.NoError(err) require.GreaterOrEqual(shutdownDuration, time.Duration(0)) @@ -431,12 +431,12 @@ func TestShutdownTimesOut(t *testing.T) { State: snow.NormalOp, // assumed bootstrapping is done }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - h.Start(context.Background(), false) + h.Start(t.Context(), false) shutdownFinished := make(chan struct{}, 1) @@ -446,11 +446,11 @@ func TestShutdownTimesOut(t *testing.T) { InboundMessage: message.InboundPullQuery(chainID, 1, time.Hour, ids.GenerateTestID(), 0, nodeID), EngineType: p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, } - h.Push(context.Background(), msg) + h.Push(t.Context(), msg) time.Sleep(50 * time.Millisecond) // Pause to ensure message gets processed - chainRouter.Shutdown(context.Background()) + chainRouter.Shutdown(t.Context()) shutdownFinished <- struct{}{} }() @@ -498,7 +498,7 @@ func TestRouterTimeout(t *testing.T) { HealthConfig{}, prometheus.NewRegistry(), )) - defer chainRouter.Shutdown(context.Background()) + defer chainRouter.Shutdown(t.Context()) // Create bootstrapper, engine and handler var ( @@ -613,7 +613,7 @@ func TestRouterTimeout(t *testing.T) { State: snow.Bootstrapping, // assumed bootstrapping is ongoing }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil @@ -630,14 +630,14 @@ func TestRouterTimeout(t *testing.T) { Consensus: nil, }, }) - h.Start(context.Background(), false) + h.Start(t.Context(), false) nodeID := ids.GenerateTestNodeID() requestID := uint32(0) { wg.Add(1) chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -655,7 +655,7 @@ func TestRouterTimeout(t *testing.T) { wg.Add(1) requestID++ chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -673,7 +673,7 @@ func TestRouterTimeout(t *testing.T) { wg.Add(1) requestID++ chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -691,7 +691,7 @@ func TestRouterTimeout(t *testing.T) { wg.Add(1) requestID++ chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -709,7 +709,7 @@ func TestRouterTimeout(t *testing.T) { wg.Add(1) requestID++ chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -728,7 +728,7 @@ func TestRouterTimeout(t *testing.T) { wg.Add(1) requestID++ chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -746,7 +746,7 @@ func TestRouterTimeout(t *testing.T) { wg.Add(1) requestID++ chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -764,7 +764,7 @@ func TestRouterTimeout(t *testing.T) { wg.Add(1) requestID++ chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -831,7 +831,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { HealthConfig{}, prometheus.NewRegistry(), )) - defer chainRouter.Shutdown(context.Background()) + defer chainRouter.Shutdown(t.Context()) h := handlermock.NewHandler(ctrl) @@ -843,7 +843,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { h.EXPECT().AwaitStopped(gomock.Any()).AnyTimes() h.EXPECT().Push(gomock.Any(), gomock.Any()).Times(1) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) h.EXPECT().ShouldHandle(gomock.Any()).Return(true).AnyTimes() @@ -851,7 +851,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { requestID := uint32(0) { chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -873,13 +873,13 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { require.Equal(p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, msg.EngineType) }) - chainRouter.HandleInbound(context.Background(), msg) + chainRouter.HandleInbound(t.Context(), msg) } { requestID++ chainRouter.RegisterRequest( - context.Background(), + t.Context(), nodeID, ctx.ChainID, requestID, @@ -901,7 +901,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { require.Equal(engineType, msg.EngineType) }) - chainRouter.HandleInbound(context.Background(), msg) + chainRouter.HandleInbound(t.Context(), msg) } { @@ -918,7 +918,7 @@ func TestRouterHonorsRequestedEngine(t *testing.T) { h.EXPECT().Push(gomock.Any(), gomock.Any()).Do(func(_ context.Context, msg handler.Message) { require.Equal(p2ppb.EngineType_ENGINE_TYPE_UNSPECIFIED, msg.EngineType) }) - chainRouter.HandleInbound(context.Background(), msg) + chainRouter.HandleInbound(t.Context(), msg) } chainRouter.lock.Lock() @@ -986,7 +986,7 @@ func TestRouterClearTimeouts(t *testing.T) { chainRouter, _ := newChainRouterTest(t) chainRouter.RegisterRequest( - context.Background(), + t.Context(), ids.EmptyNodeID, ids.Empty, requestID, @@ -995,7 +995,7 @@ func TestRouterClearTimeouts(t *testing.T) { engineType, ) - chainRouter.HandleInbound(context.Background(), tt.responseMsg) + chainRouter.HandleInbound(t.Context(), tt.responseMsg) chainRouter.lock.Lock() require.Zero(chainRouter.timedRequests.Len()) @@ -1040,7 +1040,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { HealthConfig{}, prometheus.NewRegistry(), )) - defer chainRouter.Shutdown(context.Background()) + defer chainRouter.Shutdown(t.Context()) // Create bootstrapper, engine and handler calledF := false @@ -1122,12 +1122,12 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { }, }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - h.Start(context.Background(), false) + h.Start(t.Context(), false) var inMsg message.InboundMessage dummyContainerID := ids.GenerateTestID() @@ -1145,7 +1145,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { 0, nID, ) - chainRouter.HandleInbound(context.Background(), inMsg) + chainRouter.HandleInbound(t.Context(), inMsg) require.False(calledF) // should not be called @@ -1161,7 +1161,7 @@ func TestValidatorOnlyMessageDrops(t *testing.T) { vID, ) wg.Add(1) - chainRouter.HandleInbound(context.Background(), inMsg) + chainRouter.HandleInbound(t.Context(), inMsg) wg.Wait() require.True(calledF) // should be called since this is a validator request @@ -1203,7 +1203,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { HealthConfig{}, prometheus.NewRegistry(), )) - defer chainRouter.Shutdown(context.Background()) + defer chainRouter.Shutdown(t.Context()) // Create bootstrapper, engine and handler calledF := false @@ -1283,12 +1283,12 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { }, }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - h.Start(context.Background(), false) + h.Start(t.Context(), false) var inMsg message.InboundMessage dummyContainerID := ids.GenerateTestID() @@ -1306,7 +1306,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { 0, nID, ) - chainRouter.HandleInbound(context.Background(), inMsg) + chainRouter.HandleInbound(t.Context(), inMsg) require.False(calledF) // should not be called for unallowed node ID @@ -1322,7 +1322,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { allowedID, ) wg.Add(1) - chainRouter.HandleInbound(context.Background(), inMsg) + chainRouter.HandleInbound(t.Context(), inMsg) wg.Wait() require.True(calledF) // should be called since this is a allowed node request @@ -1339,7 +1339,7 @@ func TestValidatorOnlyAllowedNodeMessageDrops(t *testing.T) { vID, ) wg.Add(1) - chainRouter.HandleInbound(context.Background(), inMsg) + chainRouter.HandleInbound(t.Context(), inMsg) wg.Wait() require.True(calledF) // should be called since this is a validator request @@ -1418,7 +1418,7 @@ func TestAppRequest(t *testing.T) { } } - ctx := context.Background() + ctx := t.Context() chainRouter.RegisterRequest(ctx, ids.EmptyNodeID, ids.Empty, wantRequestID, tt.responseOp, tt.timeoutMsg, engineType) chainRouter.lock.Lock() require.Equal(1, chainRouter.timedRequests.Len()) @@ -1537,17 +1537,17 @@ func newChainRouterTest(t *testing.T) (*ChainRouter, *enginetest.Engine) { State: snow.NormalOp, // assumed bootstrapping is done }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - h.Start(context.Background(), false) + h.Start(t.Context(), false) t.Cleanup(func() { tm.Stop() - chainRouter.Shutdown(context.Background()) + chainRouter.Shutdown(t.Context()) }) return chainRouter, engine @@ -1571,7 +1571,7 @@ func TestHandleSimplexMessage(t *testing.T) { HealthConfig{}, prometheus.NewRegistry(), )) - defer chainRouter.Shutdown(context.Background()) + defer chainRouter.Shutdown(t.Context()) chainRouter.log = log testID := ids.GenerateTestID() @@ -1606,9 +1606,9 @@ func TestHandleSimplexMessage(t *testing.T) { } }).AnyTimes() - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) h.EXPECT().ShouldHandle(gomock.Any()).Return(true).Times(1) - chainRouter.HandleInbound(context.Background(), inboundMsg) + chainRouter.HandleInbound(t.Context(), inboundMsg) require.True(t, receivedMsg) } diff --git a/snow/networking/sender/sender_test.go b/snow/networking/sender/sender_test.go index 88573d1ec890..ac8952ac3b0c 100644 --- a/snow/networking/sender/sender_test.go +++ b/snow/networking/sender/sender_test.go @@ -176,12 +176,12 @@ func TestTimeout(t *testing.T) { State: snow.Bootstrapping, // assumed bootstrap is ongoing }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - h.Start(context.Background(), false) + h.Start(t.Context(), false) var ( wg = sync.WaitGroup{} @@ -193,7 +193,7 @@ func TestTimeout(t *testing.T) { failedChains = set.Set[ids.ID]{} ) - cancelledCtx, cancel := context.WithCancel(context.Background()) + cancelledCtx, cancel := context.WithCancel(t.Context()) cancel() failed := func(ctx context.Context, nodeID ids.NodeID, _ uint32) error { @@ -445,18 +445,18 @@ func TestReliableMessages(t *testing.T) { State: snow.Bootstrapping, // assumed bootstrap is ongoing }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - h.Start(context.Background(), false) + h.Start(t.Context(), false) go func() { for i := 0; i < queriesToSend; i++ { vdrIDs := set.Of(ids.BuildTestNodeID([]byte{1})) - sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty, 0) + sender.SendPullQuery(t.Context(), vdrIDs, uint32(i), ids.Empty, 0) time.Sleep(time.Duration(rand.Float64() * float64(time.Microsecond))) // #nosec G404 } }() @@ -606,12 +606,12 @@ func TestReliableMessagesToMyself(t *testing.T) { State: snow.Bootstrapping, // assumed bootstrap is ongoing }) - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) bootstrapper.StartF = func(context.Context, uint32) error { return nil } - h.Start(context.Background(), false) + h.Start(t.Context(), false) go func() { for i := 0; i < queriesToSend; i++ { @@ -619,7 +619,7 @@ func TestReliableMessagesToMyself(t *testing.T) { // because they don't exist. This will almost immediately trigger // a query failed message vdrIDs := set.Of(ids.GenerateTestNodeID()) - sender.SendPullQuery(context.Background(), vdrIDs, uint32(i), ids.Empty, 0) + sender.SendPullQuery(t.Context(), vdrIDs, uint32(i), ids.Empty, 0) } }() @@ -690,7 +690,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { }, sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { sender.SendGetStateSummaryFrontier( - context.Background(), + t.Context(), nodeIDs, requestID, ) @@ -734,7 +734,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { ).Return(set.Of(successNodeID)) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { - sender.SendGetAcceptedStateSummary(context.Background(), nodeIDs, requestID, heights) + sender.SendGetAcceptedStateSummary(t.Context(), nodeIDs, requestID, heights) }, }, { @@ -773,7 +773,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { ).Return(set.Of(successNodeID)) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { - sender.SendGetAcceptedFrontier(context.Background(), nodeIDs, requestID) + sender.SendGetAcceptedFrontier(t.Context(), nodeIDs, requestID) }, }, { @@ -813,7 +813,7 @@ func TestSender_Bootstrap_Requests(t *testing.T) { ).Return(set.Of(successNodeID)) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeIDs set.Set[ids.NodeID]) { - sender.SendGetAccepted(context.Background(), nodeIDs, requestID, containerIDs) + sender.SendGetAccepted(t.Context(), nodeIDs, requestID, containerIDs) }, }, } @@ -933,7 +933,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ).Return(nil) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { - sender.SendStateSummaryFrontier(context.Background(), nodeID, requestID, summary) + sender.SendStateSummaryFrontier(t.Context(), nodeID, requestID, summary) }, }, { @@ -965,7 +965,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ).Return(nil) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { - sender.SendAcceptedStateSummary(context.Background(), nodeID, requestID, summaryIDs) + sender.SendAcceptedStateSummary(t.Context(), nodeID, requestID, summaryIDs) }, }, { @@ -995,7 +995,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ).Return(nil) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { - sender.SendAcceptedFrontier(context.Background(), nodeID, requestID, summaryIDs[0]) + sender.SendAcceptedFrontier(t.Context(), nodeID, requestID, summaryIDs[0]) }, }, { @@ -1027,7 +1027,7 @@ func TestSender_Bootstrap_Responses(t *testing.T) { ).Return(nil) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { - sender.SendAccepted(context.Background(), nodeID, requestID, summaryIDs) + sender.SendAccepted(t.Context(), nodeID, requestID, summaryIDs) }, }, } @@ -1146,7 +1146,7 @@ func TestSender_Single_Request(t *testing.T) { ).Return(sentTo) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { - sender.SendGetAncestors(context.Background(), nodeID, requestID, containerID) + sender.SendGetAncestors(t.Context(), nodeID, requestID, containerID) }, expectedEngineType: engineType, }, @@ -1186,7 +1186,7 @@ func TestSender_Single_Request(t *testing.T) { ).Return(sentTo) }, sendF: func(_ *require.Assertions, sender common.Sender, nodeID ids.NodeID) { - sender.SendGet(context.Background(), nodeID, requestID, containerID) + sender.SendGet(t.Context(), nodeID, requestID, containerID) }, }, } diff --git a/snow/uptime/manager.go b/snow/uptime/manager.go index 4438c070846e..ef96190352ce 100644 --- a/snow/uptime/manager.go +++ b/snow/uptime/manager.go @@ -30,7 +30,6 @@ type Tracker interface { StartedTracking() bool Connect(nodeID ids.NodeID) error - IsConnected(nodeID ids.NodeID) bool Disconnect(nodeID ids.NodeID) error } diff --git a/snow/uptime/manager_test.go b/snow/uptime/manager_test.go index 5c0d21587762..7933f3c7e15f 100644 --- a/snow/uptime/manager_test.go +++ b/snow/uptime/manager_test.go @@ -230,14 +230,8 @@ func TestConnectAndDisconnect(t *testing.T) { s.AddNode(nodeID0, startTime) - connected := up.IsConnected(nodeID0) - require.False(connected) - require.NoError(up.StartTracking([]ids.NodeID{nodeID0})) - connected = up.IsConnected(nodeID0) - require.False(connected) - duration, lastUpdated, err := up.CalculateUptime(nodeID0) require.NoError(err) require.Equal(time.Duration(0), duration) @@ -245,9 +239,6 @@ func TestConnectAndDisconnect(t *testing.T) { require.NoError(up.Connect(nodeID0)) - connected = up.IsConnected(nodeID0) - require.True(connected) - currentTime = currentTime.Add(time.Second) clk.Set(currentTime) @@ -258,9 +249,6 @@ func TestConnectAndDisconnect(t *testing.T) { require.NoError(up.Disconnect(nodeID0)) - connected = up.IsConnected(nodeID0) - require.False(connected) - currentTime = currentTime.Add(time.Second) clk.Set(currentTime) diff --git a/snow/validators/gvalidators/validator_state_test.go b/snow/validators/gvalidators/validator_state_test.go index a1e5e6821b20..9eb3b3c4f013 100644 --- a/snow/validators/gvalidators/validator_state_test.go +++ b/snow/validators/gvalidators/validator_state_test.go @@ -75,14 +75,14 @@ func TestGetMinimumHeight(t *testing.T) { expectedHeight := uint64(1337) state.server.EXPECT().GetMinimumHeight(gomock.Any()).Return(expectedHeight, nil) - height, err := state.client.GetMinimumHeight(context.Background()) + height, err := state.client.GetMinimumHeight(t.Context()) require.NoError(err) require.Equal(expectedHeight, height) // Error path state.server.EXPECT().GetMinimumHeight(gomock.Any()).Return(expectedHeight, errCustom) - _, err = state.client.GetMinimumHeight(context.Background()) + _, err = state.client.GetMinimumHeight(t.Context()) // TODO: require specific error require.Error(err) //nolint:forbidigo // currently returns grpc error } @@ -97,14 +97,14 @@ func TestGetCurrentHeight(t *testing.T) { expectedHeight := uint64(1337) state.server.EXPECT().GetCurrentHeight(gomock.Any()).Return(expectedHeight, nil) - height, err := state.client.GetCurrentHeight(context.Background()) + height, err := state.client.GetCurrentHeight(t.Context()) require.NoError(err) require.Equal(expectedHeight, height) // Error path state.server.EXPECT().GetCurrentHeight(gomock.Any()).Return(expectedHeight, errCustom) - _, err = state.client.GetCurrentHeight(context.Background()) + _, err = state.client.GetCurrentHeight(t.Context()) // TODO: require specific error require.Error(err) //nolint:forbidigo // currently returns grpc error } @@ -120,14 +120,14 @@ func TestGetSubnetID(t *testing.T) { expectedSubnetID := ids.GenerateTestID() state.server.EXPECT().GetSubnetID(gomock.Any(), chainID).Return(expectedSubnetID, nil) - subnetID, err := state.client.GetSubnetID(context.Background(), chainID) + subnetID, err := state.client.GetSubnetID(t.Context(), chainID) require.NoError(err) require.Equal(expectedSubnetID, subnetID) // Error path state.server.EXPECT().GetSubnetID(gomock.Any(), chainID).Return(expectedSubnetID, errCustom) - _, err = state.client.GetSubnetID(context.Background(), chainID) + _, err = state.client.GetSubnetID(t.Context(), chainID) // TODO: require specific error require.Error(err) //nolint:forbidigo // currently returns grpc error } @@ -170,14 +170,14 @@ func TestGetValidatorSet(t *testing.T) { subnetID := ids.GenerateTestID() state.server.EXPECT().GetValidatorSet(gomock.Any(), height, subnetID).Return(expectedVdrs, nil) - vdrs, err := state.client.GetValidatorSet(context.Background(), height, subnetID) + vdrs, err := state.client.GetValidatorSet(t.Context(), height, subnetID) require.NoError(err) require.Equal(expectedVdrs, vdrs) // Error path state.server.EXPECT().GetValidatorSet(gomock.Any(), height, subnetID).Return(expectedVdrs, errCustom) - _, err = state.client.GetValidatorSet(context.Background(), height, subnetID) + _, err = state.client.GetValidatorSet(t.Context(), height, subnetID) // TODO: require specific error require.Error(err) //nolint:forbidigo // currently returns grpc error } @@ -216,7 +216,7 @@ func benchmarkGetValidatorSet(b *testing.B, vs map[ids.NodeID]*validators.GetVal state.server.EXPECT().GetValidatorSet(gomock.Any(), height, subnetID).Return(vs, nil).AnyTimes() b.ResetTimer() for i := 0; i < b.N; i++ { - _, err := state.client.GetValidatorSet(context.Background(), height, subnetID) + _, err := state.client.GetValidatorSet(b.Context(), height, subnetID) require.NoError(err) } b.StopTimer() @@ -248,7 +248,7 @@ func TestGetWarpValidatorSets(t *testing.T) { } c := newClient(t, state) - _, err := c.GetWarpValidatorSets(context.Background(), height) + _, err := c.GetWarpValidatorSets(t.Context(), height) require.Error(t, err) //nolint:forbidigo // returns grpc error }) @@ -268,7 +268,7 @@ func TestGetWarpValidatorSets(t *testing.T) { } c := newClient(t, state) - vdrSets, err := c.GetWarpValidatorSets(context.Background(), height) + vdrSets, err := c.GetWarpValidatorSets(t.Context(), height) require.NoError(err) require.Equal(expectedVdrSets, vdrSets) }) @@ -282,7 +282,7 @@ func TestGetWarpValidatorSet(t *testing.T) { } c := newClient(t, state) - _, err := c.GetWarpValidatorSet(context.Background(), height, ids.GenerateTestID()) + _, err := c.GetWarpValidatorSet(t.Context(), height, ids.GenerateTestID()) require.Error(t, err) //nolint:forbidigo // returns grpc error }) @@ -300,7 +300,7 @@ func TestGetWarpValidatorSet(t *testing.T) { } c := newClient(t, state) - vdrSet, err := c.GetWarpValidatorSet(context.Background(), height, subnetID) + vdrSet, err := c.GetWarpValidatorSet(t.Context(), height, subnetID) require.NoError(err) require.Equal(expectedVdrSet, vdrSet) }) diff --git a/snow/validators/state_test.go b/snow/validators/state_test.go index a09b65c6cb2a..884ce41aca38 100644 --- a/snow/validators/state_test.go +++ b/snow/validators/state_test.go @@ -96,7 +96,7 @@ func TestCachedState_GetWarpValidatorSets(t *testing.T) { uncached.GetWarpValidatorSetsF = nil } - got, err := cached.GetWarpValidatorSets(context.Background(), test.height) + got, err := cached.GetWarpValidatorSets(t.Context(), test.height) require.ErrorIs(err, test.wantErr) require.Equal(test.want, got) require.Equal(test.expectCached, !cacheMiss) @@ -148,7 +148,7 @@ func TestCachedState_GetWarpValidatorSet_Inactive(t *testing.T) { cached = NewCachedState(uncached, upgrade.UnscheduledActivationTime) ) - got, err := cached.GetWarpValidatorSet(context.Background(), height, subnetID) + got, err := cached.GetWarpValidatorSet(t.Context(), height, subnetID) require.ErrorIs(err, test.wantErr) require.Equal(test.want, got) }) @@ -239,7 +239,7 @@ func TestCachedState_GetWarpValidatorSet_Active(t *testing.T) { uncached.GetWarpValidatorSetsF = nil } - got, err := cached.GetWarpValidatorSet(context.Background(), test.height, test.subnetID) + got, err := cached.GetWarpValidatorSet(t.Context(), test.height, test.subnetID) require.ErrorIs(err, test.wantErr) require.Equal(test.want, got) require.Equal(expectCached, !cacheMiss) @@ -265,7 +265,7 @@ func BenchmarkCachedState_GetWarpValidatorSet_Active(b *testing.B) { } cached := NewCachedState(uncached, upgrade.InitiallyActiveTime) - ctx := context.Background() + ctx := b.Context() subnetID := ids.GenerateTestID() for b.Loop() { _, err := cached.GetWarpValidatorSet(ctx, 0, subnetID) diff --git a/tests/e2e/x/transfer/virtuous.go b/tests/e2e/x/transfer/virtuous.go index 1b1ee342630c..a37f60876d52 100644 --- a/tests/e2e/x/transfer/virtuous.go +++ b/tests/e2e/x/transfer/virtuous.go @@ -166,13 +166,13 @@ var _ = e2e.DescribeXChainSerial("[Virtuous Transfer Tx AVAX]", func() { } } - testBalances := make([]uint64, 0) + testBalances := make([]uint64, len(wallets)) for i, w := range wallets { balances, err := w.X().Builder().GetFTBalance() require.NoError(err) bal := balances[avaxAssetID] - testBalances = append(testBalances, bal) + testBalances[i] = bal tc.Log().Info("balance in AVAX", zap.Uint64("balance", bal), diff --git a/tests/fixture/tmpnet/network_test.go b/tests/fixture/tmpnet/network_test.go index aad4e83ad5b1..d1ca33830a9f 100644 --- a/tests/fixture/tmpnet/network_test.go +++ b/tests/fixture/tmpnet/network_test.go @@ -4,7 +4,6 @@ package tmpnet import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -17,7 +16,7 @@ func TestNetworkSerialization(t *testing.T) { tmpDir := t.TempDir() - ctx := context.Background() + ctx := t.Context() network := NewDefaultNetwork("testnet") // Runtime configuration is required diff --git a/tests/reexecute/c/vm_reexecute_test.go b/tests/reexecute/c/vm_reexecute_test.go index 44b77458217c..605b26db9b42 100644 --- a/tests/reexecute/c/vm_reexecute_test.go +++ b/tests/reexecute/c/vm_reexecute_test.go @@ -175,7 +175,7 @@ func benchmarkReexecuteRange( metricsCollectorEnabled bool, ) { r := require.New(b) - ctx := context.Background() + ctx := b.Context() // Create the prefix gatherer passed to the VM and register it with the top-level, // labeled gatherer. @@ -588,7 +588,7 @@ func startServer( func startCollector(tb testing.TB, log logging.Logger, name string, labels map[string]string, serverAddr string) { r := require.New(tb) - startPromCtx, cancel := context.WithTimeout(context.Background(), tests.DefaultTimeout) + startPromCtx, cancel := context.WithTimeout(tb.Context(), tests.DefaultTimeout) defer cancel() logger := tests.NewDefaultLogger("prometheus") @@ -608,6 +608,7 @@ func startCollector(tb testing.TB, log logging.Logger, name string, labels map[s }(), ) + //nolint:usetesting // t.Context() is already canceled inside the cleanup function checkMetricsCtx, cancel := context.WithTimeout(context.Background(), tests.DefaultTimeout) defer cancel() r.NoError(tmpnet.CheckMetricsExist(checkMetricsCtx, logger, networkUUID)) diff --git a/utils/dynamicip/updater_test.go b/utils/dynamicip/updater_test.go index c3dfc3554b5a..9115da11c00b 100644 --- a/utils/dynamicip/updater_test.go +++ b/utils/dynamicip/updater_test.go @@ -79,7 +79,7 @@ func TestNewUpdater(t *testing.T) { // Make sure stopChan and doneChan are closed when stop is called updater.Stop() - ctx, cancel := context.WithTimeout(context.Background(), stopTimeout) + ctx, cancel := context.WithTimeout(t.Context(), stopTimeout) defer cancel() select { case <-updater.rootCtx.Done(): diff --git a/utils/filesystem/rename_test.go b/utils/filesystem/rename_test.go index b3f816ea553c..7d5fefbf39ec 100644 --- a/utils/filesystem/rename_test.go +++ b/utils/filesystem/rename_test.go @@ -15,7 +15,7 @@ func TestRenameIfExists(t *testing.T) { t.Parallel() - f, err := os.CreateTemp(os.TempDir(), "test-rename") + f, err := os.CreateTemp(t.TempDir(), "test-rename") require.NoError(err) a := f.Name() diff --git a/utils/lock/cond_test.go b/utils/lock/cond_test.go index e904166f19cf..d8b8e6f9e58a 100644 --- a/utils/lock/cond_test.go +++ b/utils/lock/cond_test.go @@ -13,7 +13,7 @@ import ( ) func TestCond(t *testing.T) { - cancelled, cancel := context.WithCancel(context.Background()) + cancelled, cancel := context.WithCancel(t.Context()) cancel() var ( @@ -35,7 +35,7 @@ func TestCond(t *testing.T) { }{ { name: "signal_once", - ctx: context.Background(), + ctx: t.Context(), expectedErrors: make([]error, 1), next: []func(*Cond){ signal, @@ -43,7 +43,7 @@ func TestCond(t *testing.T) { }, { name: "signal_twice", - ctx: context.Background(), + ctx: t.Context(), expectedErrors: make([]error, 1), next: []func(*Cond){ merge( @@ -54,7 +54,7 @@ func TestCond(t *testing.T) { }, { name: "signal_both_once", - ctx: context.Background(), + ctx: t.Context(), expectedErrors: make([]error, 2), next: []func(*Cond){ signal, @@ -63,7 +63,7 @@ func TestCond(t *testing.T) { }, { name: "signal_both_once_atomically", - ctx: context.Background(), + ctx: t.Context(), expectedErrors: make([]error, 2), next: []func(*Cond){ merge( @@ -75,7 +75,7 @@ func TestCond(t *testing.T) { }, { name: "broadcast_once", - ctx: context.Background(), + ctx: t.Context(), expectedErrors: make([]error, 2), next: []func(*Cond){ broadcast, @@ -84,7 +84,7 @@ func TestCond(t *testing.T) { }, { name: "broadcast_twice", - ctx: context.Background(), + ctx: t.Context(), expectedErrors: make([]error, 2), next: []func(*Cond){ broadcast, diff --git a/utils/tree/tree_test.go b/utils/tree/tree_test.go index d0397260fc1c..5dd31aabeb22 100644 --- a/utils/tree/tree_test.go +++ b/utils/tree/tree_test.go @@ -4,7 +4,6 @@ package tree import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -27,7 +26,7 @@ func TestAcceptSingleBlock(t *testing.T) { _, contains = tr.Get(block) require.True(contains) - require.NoError(tr.Accept(context.Background(), block)) + require.NoError(tr.Accept(t.Context(), block)) require.Equal(snowtest.Accepted, block.Status) _, contains = tr.Get(block) @@ -52,7 +51,7 @@ func TestAcceptBlockConflict(t *testing.T) { require.True(contains) // accept one of them - require.NoError(tr.Accept(context.Background(), blockToAccept)) + require.NoError(tr.Accept(t.Context(), blockToAccept)) // check their statuses and that they are removed from the tree require.Equal(snowtest.Accepted, blockToAccept.Status) @@ -87,7 +86,7 @@ func TestAcceptChainConflict(t *testing.T) { require.True(contains) // accept one of them - require.NoError(tr.Accept(context.Background(), blockToAccept)) + require.NoError(tr.Accept(t.Context(), blockToAccept)) // check their statuses and whether they are removed from tree require.Equal(snowtest.Accepted, blockToAccept.Status) diff --git a/vms/avm/block/builder/builder_test.go b/vms/avm/block/builder/builder_test.go index 5ba0e7c334b3..3590ebe5f56d 100644 --- a/vms/avm/block/builder/builder_test.go +++ b/vms/avm/block/builder/builder_test.go @@ -4,7 +4,6 @@ package builder import ( - "context" "errors" "testing" "time" @@ -475,7 +474,7 @@ func TestBuilderBuildBlock(t *testing.T) { ctrl := gomock.NewController(t) builder := tt.builderFunc(ctrl) - _, err := builder.BuildBlock(context.Background()) + _, err := builder.BuildBlock(t.Context()) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -539,7 +538,7 @@ func TestBlockBuilderAddLocalTx(t *testing.T) { builder := New(backend, manager, clk, mempool) // show that build block fails if tx is invalid - _, err = builder.BuildBlock(context.Background()) + _, err = builder.BuildBlock(t.Context()) require.ErrorIs(err, ErrNoTransactions) } diff --git a/vms/avm/block/executor/block_test.go b/vms/avm/block/executor/block_test.go index 172e7c0809d2..e757054eb025 100644 --- a/vms/avm/block/executor/block_test.go +++ b/vms/avm/block/executor/block_test.go @@ -4,7 +4,6 @@ package executor import ( - "context" "errors" "testing" "time" @@ -577,7 +576,7 @@ func TestBlockVerify(t *testing.T) { ctrl := gomock.NewController(t) b := tt.blockFunc(ctrl) - err := b.Verify(context.Background()) + err := b.Verify(t.Context()) require.ErrorIs(err, tt.expectedErr) if tt.postVerify != nil { tt.postVerify(require, b) @@ -784,7 +783,7 @@ func TestBlockAccept(t *testing.T) { ctrl := gomock.NewController(t) b := tt.blockFunc(ctrl) - err := b.Accept(context.Background()) + err := b.Accept(t.Context()) require.ErrorIs(err, tt.expectedErr) if err == nil { // Make sure block is removed from cache @@ -930,7 +929,7 @@ func TestBlockReject(t *testing.T) { ctrl := gomock.NewController(t) b := tt.blockFunc(ctrl) - require.NoError(b.Reject(context.Background())) + require.NoError(b.Reject(t.Context())) _, ok := b.manager.blkIDToState[b.ID()] require.False(ok) }) diff --git a/vms/avm/environment_test.go b/vms/avm/environment_test.go index 267482545d6c..39e4db0eb764 100644 --- a/vms/avm/environment_test.go +++ b/vms/avm/environment_test.go @@ -124,7 +124,7 @@ func setup(tb testing.TB, c *envConfig) *environment { require.NoError(err) require.NoError(vm.Initialize( - context.Background(), + tb.Context(), ctx, prefixdb.New([]byte{1}, baseDB), genesisBytes, @@ -156,22 +156,23 @@ func setup(tb testing.TB, c *envConfig) *environment { txBuilder: txstest.New(vm.parser.Codec(), vm.ctx, &vm.Config, vm.feeAssetID, vm.state), } - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(tb.Context(), snow.Bootstrapping)) if c.notLinearized { return env } - require.NoError(vm.Linearize(context.Background(), stopVertexID)) + require.NoError(vm.Linearize(tb.Context(), stopVertexID)) if c.notBootstrapped { return env } - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + require.NoError(vm.SetState(tb.Context(), snow.NormalOp)) tb.Cleanup(func() { env.vm.ctx.Lock.Lock() defer env.vm.ctx.Lock.Unlock() + //nolint:usetesting // t.Context() is already canceled inside the cleanup function require.NoError(env.vm.Shutdown(context.Background())) }) diff --git a/vms/avm/network/network_test.go b/vms/avm/network/network_test.go index 341c54f86343..4f8df6d55037 100644 --- a/vms/avm/network/network_test.go +++ b/vms/avm/network/network_test.go @@ -251,7 +251,7 @@ func TestNetworkIssueTxFromRPC(t *testing.T) { err = n.IssueTxFromRPC(tt.tx) require.ErrorIs(err, tt.expectedErr) - require.NoError(n.txPushGossiper.Gossip(context.Background())) + require.NoError(n.txPushGossiper.Gossip(t.Context())) }) } } @@ -325,7 +325,7 @@ func TestNetworkIssueTxFromRPCWithoutVerification(t *testing.T) { err = n.IssueTxFromRPCWithoutVerification(&txs.Tx{Unsigned: &txs.BaseTx{}}) require.ErrorIs(err, tt.expectedErr) - require.NoError(n.txPushGossiper.Gossip(context.Background())) + require.NoError(n.txPushGossiper.Gossip(t.Context())) }) } } diff --git a/vms/avm/vm_test.go b/vms/avm/vm_test.go index f49ffcf53fdf..bd0600e78a84 100644 --- a/vms/avm/vm_test.go +++ b/vms/avm/vm_test.go @@ -4,7 +4,6 @@ package avm import ( - "context" "math" "testing" @@ -37,7 +36,7 @@ func TestInvalidGenesis(t *testing.T) { defer ctx.Lock.Unlock() err := vm.Initialize( - context.Background(), + t.Context(), ctx, // context memdb.New(), // database nil, // genesisState @@ -56,13 +55,13 @@ func TestInvalidFx(t *testing.T) { ctx := snowtest.Context(t, snowtest.XChainID) ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) ctx.Lock.Unlock() }() genesisBytes := newGenesisBytesTest(t) err := vm.Initialize( - context.Background(), + t.Context(), ctx, // context memdb.New(), // database genesisBytes, // genesisState @@ -83,13 +82,13 @@ func TestFxInitializationFailure(t *testing.T) { ctx := snowtest.Context(t, snowtest.XChainID) ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) ctx.Lock.Unlock() }() genesisBytes := newGenesisBytesTest(t) err := vm.Initialize( - context.Background(), + t.Context(), ctx, // context memdb.New(), // database genesisBytes, // genesisState @@ -425,17 +424,17 @@ func TestTxAcceptAfterParseTx(t *testing.T) { }} require.NoError(secondTx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - parsedFirstTx, err := env.vm.ParseTx(context.Background(), firstTx.Bytes()) + parsedFirstTx, err := env.vm.ParseTx(t.Context(), firstTx.Bytes()) require.NoError(err) - require.NoError(parsedFirstTx.Verify(context.Background())) - require.NoError(parsedFirstTx.Accept(context.Background())) + require.NoError(parsedFirstTx.Verify(t.Context())) + require.NoError(parsedFirstTx.Accept(t.Context())) - parsedSecondTx, err := env.vm.ParseTx(context.Background(), secondTx.Bytes()) + parsedSecondTx, err := env.vm.ParseTx(t.Context(), secondTx.Bytes()) require.NoError(err) - require.NoError(parsedSecondTx.Verify(context.Background())) - require.NoError(parsedSecondTx.Accept(context.Background())) + require.NoError(parsedSecondTx.Verify(t.Context())) + require.NoError(parsedSecondTx.Accept(t.Context())) _, err = env.vm.state.GetTx(firstTx.ID()) require.NoError(err) @@ -572,11 +571,11 @@ func TestForceAcceptImportTx(t *testing.T) { }} require.NoError(tx.SignSECP256K1Fx(env.vm.parser.Codec(), [][]*secp256k1.PrivateKey{{key}})) - parsedTx, err := env.vm.ParseTx(context.Background(), tx.Bytes()) + parsedTx, err := env.vm.ParseTx(t.Context(), tx.Bytes()) require.NoError(err) - require.NoError(parsedTx.Verify(context.Background())) - require.NoError(parsedTx.Accept(context.Background())) + require.NoError(parsedTx.Verify(t.Context())) + require.NoError(parsedTx.Accept(t.Context())) id := utxoID.InputID() _, err = env.vm.ctx.SharedMemory.Get(constants.PlatformChainID, [][]byte{id[:]}) diff --git a/vms/components/chain/state_test.go b/vms/components/chain/state_test.go index 8ecb21f61679..9b16121eb0b7 100644 --- a/vms/components/chain/state_test.go +++ b/vms/components/chain/state_test.go @@ -111,13 +111,13 @@ func checkProcessingBlock(t *testing.T, s *State, blk snowman.Block) { require.IsType(&BlockWrapper{}, blk) - parsedBlk, err := s.ParseBlock(context.Background(), blk.Bytes()) + parsedBlk, err := s.ParseBlock(t.Context(), blk.Bytes()) require.NoError(err) require.Equal(blk.ID(), parsedBlk.ID()) require.Equal(blk.Bytes(), parsedBlk.Bytes()) require.Equal(blk, parsedBlk) - getBlk, err := s.GetBlock(context.Background(), blk.ID()) + getBlk, err := s.GetBlock(t.Context(), blk.ID()) require.NoError(err) require.Equal(parsedBlk, getBlk) } @@ -135,7 +135,7 @@ func checkDecidedBlock(t *testing.T, s *State, blk snowman.Block, cached bool) { require.True(ok) } - parsedBlk, err := s.ParseBlock(context.Background(), blk.Bytes()) + parsedBlk, err := s.ParseBlock(t.Context(), blk.Bytes()) require.NoError(err) require.Equal(blk.ID(), parsedBlk.ID()) require.Equal(blk.Bytes(), parsedBlk.Bytes()) @@ -148,7 +148,7 @@ func checkDecidedBlock(t *testing.T, s *State, blk snowman.Block, cached bool) { require.Equal(blk, parsedBlk) } - getBlk, err := s.GetBlock(context.Background(), blk.ID()) + getBlk, err := s.GetBlock(t.Context(), blk.ID()) require.NoError(err) require.Equal(blk.ID(), getBlk.ID()) require.Equal(blk.Bytes(), getBlk.Bytes()) @@ -183,32 +183,32 @@ func TestState(t *testing.T) { BuildBlock: cantBuildBlock, }) - lastAccepted, err := chainState.LastAccepted(context.Background()) + lastAccepted, err := chainState.LastAccepted(t.Context()) require.NoError(err) require.Equal(genesisBlock.ID(), lastAccepted) - wrappedGenesisBlk, err := chainState.GetBlock(context.Background(), genesisBlock.ID()) + wrappedGenesisBlk, err := chainState.GetBlock(t.Context(), genesisBlock.ID()) require.NoError(err) // Check that a cache miss on a block is handled correctly - _, err = chainState.GetBlock(context.Background(), blk1.ID()) + _, err = chainState.GetBlock(t.Context(), blk1.ID()) require.ErrorIs(err, database.ErrNotFound) // Parse and verify blk1 and blk2 - parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) + parsedBlk1, err := chainState.ParseBlock(t.Context(), blk1.Bytes()) require.NoError(err) - require.NoError(parsedBlk1.Verify(context.Background())) + require.NoError(parsedBlk1.Verify(t.Context())) - parsedBlk2, err := chainState.ParseBlock(context.Background(), blk2.Bytes()) + parsedBlk2, err := chainState.ParseBlock(t.Context(), blk2.Bytes()) require.NoError(err) - require.NoError(parsedBlk2.Verify(context.Background())) + require.NoError(parsedBlk2.Verify(t.Context())) // Check that the verified blocks have been placed in the processing map require.Len(chainState.verifiedBlocks, 2) - parsedBlk3, err := chainState.ParseBlock(context.Background(), blk3.Bytes()) + parsedBlk3, err := chainState.ParseBlock(t.Context(), blk3.Bytes()) require.NoError(err) - getBlk3, err := chainState.GetBlock(context.Background(), blk3.ID()) + getBlk3, err := chainState.GetBlock(t.Context(), blk3.ID()) require.NoError(err) require.Equal(parsedBlk3.ID(), getBlk3.ID(), "State GetBlock returned the wrong block") @@ -216,19 +216,19 @@ func TestState(t *testing.T) { // not been verified. require.Len(chainState.verifiedBlocks, 2) - require.NoError(parsedBlk3.Verify(context.Background())) + require.NoError(parsedBlk3.Verify(t.Context())) // Check that blk3 has been added to processing blocks. require.Len(chainState.verifiedBlocks, 3) // Decide the blocks and ensure they are removed from the processing blocks map - require.NoError(parsedBlk1.Accept(context.Background())) - require.NoError(parsedBlk2.Accept(context.Background())) - require.NoError(parsedBlk3.Reject(context.Background())) + require.NoError(parsedBlk1.Accept(t.Context())) + require.NoError(parsedBlk2.Accept(t.Context())) + require.NoError(parsedBlk3.Reject(t.Context())) require.Empty(chainState.verifiedBlocks) // Check that the last accepted block was updated correctly - lastAcceptedID, err := chainState.LastAccepted(context.Background()) + lastAcceptedID, err := chainState.LastAccepted(t.Context()) require.NoError(err) require.Equal(blk2.ID(), lastAcceptedID) require.Equal(blk2.ID(), chainState.LastAcceptedBlock().ID()) @@ -267,16 +267,16 @@ func TestBuildBlock(t *testing.T) { BuildBlock: buildBlock, }) - builtBlk, err := chainState.BuildBlock(context.Background()) + builtBlk, err := chainState.BuildBlock(t.Context()) require.NoError(err) require.Empty(chainState.verifiedBlocks) - require.NoError(builtBlk.Verify(context.Background())) + require.NoError(builtBlk.Verify(t.Context())) require.Len(chainState.verifiedBlocks, 1) checkProcessingBlock(t, chainState, builtBlk) - require.NoError(builtBlk.Accept(context.Background())) + require.NoError(builtBlk.Accept(t.Context())) checkDecidedBlock(t, chainState, builtBlk, true) } @@ -306,33 +306,33 @@ func TestStateDecideBlock(t *testing.T) { }) // Parse badVerifyBlk (which should fail verification) - badBlk, err := chainState.ParseBlock(context.Background(), badVerifyBlk.Bytes()) + badBlk, err := chainState.ParseBlock(t.Context(), badVerifyBlk.Bytes()) require.NoError(err) - err = badBlk.Verify(context.Background()) + err = badBlk.Verify(t.Context()) require.ErrorIs(err, errVerify) // Ensure a block that fails verification is not marked as processing require.Empty(chainState.verifiedBlocks) // Ensure that an error during block acceptance is propagated correctly - badBlk, err = chainState.ParseBlock(context.Background(), badAcceptBlk.Bytes()) + badBlk, err = chainState.ParseBlock(t.Context(), badAcceptBlk.Bytes()) require.NoError(err) - require.NoError(badBlk.Verify(context.Background())) + require.NoError(badBlk.Verify(t.Context())) require.Len(chainState.verifiedBlocks, 1) - err = badBlk.Accept(context.Background()) + err = badBlk.Accept(t.Context()) require.ErrorIs(err, errAccept) // Ensure that an error during block reject is propagated correctly - badBlk, err = chainState.ParseBlock(context.Background(), badRejectBlk.Bytes()) + badBlk, err = chainState.ParseBlock(t.Context(), badRejectBlk.Bytes()) require.NoError(err) - require.NoError(badBlk.Verify(context.Background())) + require.NoError(badBlk.Verify(t.Context())) // Note: an error during block Accept/Reject is fatal, so it is undefined whether // the block that failed on Accept should be removed from processing or not. We allow // either case here to make this test more flexible. numProcessing := len(chainState.verifiedBlocks) require.Contains([]int{1, 2}, numProcessing) - err = badBlk.Reject(context.Background()) + err = badBlk.Reject(t.Context()) require.ErrorIs(err, errReject) } @@ -357,24 +357,24 @@ func TestStateParent(t *testing.T) { BuildBlock: cantBuildBlock, }) - parsedBlk2, err := chainState.ParseBlock(context.Background(), blk2.Bytes()) + parsedBlk2, err := chainState.ParseBlock(t.Context(), blk2.Bytes()) require.NoError(err) missingBlk1ID := parsedBlk2.Parent() - _, err = chainState.GetBlock(context.Background(), missingBlk1ID) + _, err = chainState.GetBlock(t.Context(), missingBlk1ID) require.ErrorIs(err, database.ErrNotFound) - parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) + parsedBlk1, err := chainState.ParseBlock(t.Context(), blk1.Bytes()) require.NoError(err) genesisBlkParentID := parsedBlk1.Parent() - genesisBlkParent, err := chainState.GetBlock(context.Background(), genesisBlkParentID) + genesisBlkParent, err := chainState.GetBlock(t.Context(), genesisBlkParentID) require.NoError(err) checkDecidedBlock(t, chainState, genesisBlkParent, true) parentBlk1ID := parsedBlk2.Parent() - parentBlk1, err := chainState.GetBlock(context.Background(), parentBlk1ID) + parentBlk1, err := chainState.GetBlock(t.Context(), parentBlk1ID) require.NoError(err) checkProcessingBlock(t, chainState, parentBlk1) } @@ -401,7 +401,7 @@ func TestGetBlockInternal(t *testing.T) { require.IsType(&snowmantest.Block{}, genesisBlockInternal) require.Equal(genesisBlock.ID(), genesisBlockInternal.ID()) - blk, err := chainState.GetBlockInternal(context.Background(), genesisBlock.ID()) + blk, err := chainState.GetBlockInternal(t.Context(), genesisBlock.ID()) require.NoError(err) require.IsType(&snowmantest.Block{}, blk) @@ -435,13 +435,13 @@ func TestGetBlockError(t *testing.T) { BuildBlock: cantBuildBlock, }) - _, err := chainState.GetBlock(context.Background(), blk1.ID()) + _, err := chainState.GetBlock(t.Context(), blk1.ID()) require.ErrorIs(err, database.ErrNotFound) // Update the status to Undecided, so that it will be returned by the // internal get block function. blk1.Status = snowtest.Undecided - blk, err := chainState.GetBlock(context.Background(), blk1.ID()) + blk, err := chainState.GetBlock(t.Context(), blk1.ID()) require.NoError(err) require.Equal(blk1.ID(), blk.ID()) checkProcessingBlock(t, chainState, blk) @@ -464,7 +464,7 @@ func TestParseBlockError(t *testing.T) { BuildBlock: cantBuildBlock, }) - _, err := chainState.ParseBlock(context.Background(), []byte{255}) + _, err := chainState.ParseBlock(t.Context(), []byte{255}) require.ErrorIs(t, err, errUnexpectedBlockBytes) } @@ -485,7 +485,7 @@ func TestBuildBlockError(t *testing.T) { BuildBlock: cantBuildBlock, }) - _, err := chainState.BuildBlock(context.Background()) + _, err := chainState.BuildBlock(t.Context()) require.ErrorIs(t, err, errCantBuildBlock) } @@ -543,13 +543,13 @@ func TestStateBytesToIDCache(t *testing.T) { }) // Shouldn't have blk1 ID to start with - _, err := chainState.GetBlock(context.Background(), blk1.ID()) + _, err := chainState.GetBlock(t.Context(), blk1.ID()) require.ErrorIs(err, database.ErrNotFound) _, ok := chainState.bytesToIDCache.Get(string(blk1.Bytes())) require.False(ok) // Parse blk1 from bytes - _, err = chainState.ParseBlock(context.Background(), blk1.Bytes()) + _, err = chainState.ParseBlock(t.Context(), blk1.Bytes()) require.NoError(err) // blk1 should be in cache now @@ -557,7 +557,7 @@ func TestStateBytesToIDCache(t *testing.T) { require.True(ok) // Parse another block - _, err = chainState.ParseBlock(context.Background(), blk2.Bytes()) + _, err = chainState.ParseBlock(t.Context(), blk2.Bytes()) require.NoError(err) // Should have bumped blk1 from cache @@ -594,23 +594,23 @@ func TestSetLastAcceptedBlock(t *testing.T) { UnmarshalBlock: parseBlock, BuildBlock: cantBuildBlock, }) - lastAcceptedID, err := chainState.LastAccepted(context.Background()) + lastAcceptedID, err := chainState.LastAccepted(t.Context()) require.NoError(err) require.Equal(genesisBlock.ID(), lastAcceptedID) // call SetLastAcceptedBlock for postSetBlk1 require.NoError(chainState.SetLastAcceptedBlock(postSetBlk1)) - lastAcceptedID, err = chainState.LastAccepted(context.Background()) + lastAcceptedID, err = chainState.LastAccepted(t.Context()) require.NoError(err) require.Equal(postSetBlk1.ID(), lastAcceptedID) require.Equal(postSetBlk1.ID(), chainState.LastAcceptedBlock().ID()) // ensure further blocks can be accepted - parsedpostSetBlk2, err := chainState.ParseBlock(context.Background(), postSetBlk2.Bytes()) + parsedpostSetBlk2, err := chainState.ParseBlock(t.Context(), postSetBlk2.Bytes()) require.NoError(err) - require.NoError(parsedpostSetBlk2.Verify(context.Background())) - require.NoError(parsedpostSetBlk2.Accept(context.Background())) - lastAcceptedID, err = chainState.LastAccepted(context.Background()) + require.NoError(parsedpostSetBlk2.Verify(t.Context())) + require.NoError(parsedpostSetBlk2.Accept(t.Context())) + lastAcceptedID, err = chainState.LastAccepted(t.Context()) require.NoError(err) require.Equal(postSetBlk2.ID(), lastAcceptedID) require.Equal(postSetBlk2.ID(), chainState.LastAcceptedBlock().ID()) @@ -645,11 +645,11 @@ func TestSetLastAcceptedBlockWithProcessingBlocksErrors(t *testing.T) { BuildBlock: buildBlock, }) - builtBlk, err := chainState.BuildBlock(context.Background()) + builtBlk, err := chainState.BuildBlock(t.Context()) require.NoError(err) require.Empty(chainState.verifiedBlocks) - require.NoError(builtBlk.Verify(context.Background())) + require.NoError(builtBlk.Verify(t.Context())) require.Len(chainState.verifiedBlocks, 1) checkProcessingBlock(t, chainState, builtBlk) @@ -680,7 +680,7 @@ func TestStateParseTransitivelyAcceptedBlock(t *testing.T) { BuildBlock: cantBuildBlock, }) - parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) + parsedBlk1, err := chainState.ParseBlock(t.Context(), blk1.Bytes()) require.NoError(err) require.Equal(blk1.Height(), parsedBlk1.Height()) } @@ -706,20 +706,20 @@ func TestIsProcessing(t *testing.T) { }) // Parse blk1 - parsedBlk1, err := chainState.ParseBlock(context.Background(), blk1.Bytes()) + parsedBlk1, err := chainState.ParseBlock(t.Context(), blk1.Bytes()) require.NoError(err) // Check that it is not processing in consensus require.False(chainState.IsProcessing(parsedBlk1.ID())) // Verify blk1 - require.NoError(parsedBlk1.Verify(context.Background())) + require.NoError(parsedBlk1.Verify(t.Context())) // Check that it is processing in consensus require.True(chainState.IsProcessing(parsedBlk1.ID())) // Accept blk1 - require.NoError(parsedBlk1.Accept(context.Background())) + require.NoError(parsedBlk1.Accept(t.Context())) // Check that it is no longer processing in consensus require.False(chainState.IsProcessing(parsedBlk1.ID())) diff --git a/vms/components/verify/subnet_test.go b/vms/components/verify/subnet_test.go index c38736b778a0..6dbe1ee98e67 100644 --- a/vms/components/verify/subnet_test.go +++ b/vms/components/verify/subnet_test.go @@ -4,7 +4,6 @@ package verify import ( - "context" "errors" "testing" @@ -91,7 +90,7 @@ func TestSameSubnet(t *testing.T) { ctrl := gomock.NewController(t) ctx := test.ctxF(ctrl) - result := SameSubnet(context.Background(), ctx, test.chainID) + result := SameSubnet(t.Context(), ctx, test.chainID) require.ErrorIs(t, result, test.result) }) } diff --git a/vms/evm/database/database.go b/vms/evm/database/database.go new file mode 100644 index 000000000000..138a2f6f0d3d --- /dev/null +++ b/vms/evm/database/database.go @@ -0,0 +1,73 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package database + +import ( + "errors" + + "github.com/ava-labs/libevm/ethdb" + + avalanchegodb "github.com/ava-labs/avalanchego/database" +) + +var ( + errSnapshotNotSupported = errors.New("snapshot is not supported") + errStatNotSupported = errors.New("stat is not supported") + + _ ethdb.Batch = (*batch)(nil) + _ ethdb.KeyValueStore = (*database)(nil) +) + +type database struct { + db avalanchegodb.Database +} + +func New(db avalanchegodb.Database) ethdb.KeyValueStore { return database{db} } + +func (database) Stat(string) (string, error) { return "", errStatNotSupported } + +func (db database) NewBatch() ethdb.Batch { return batch{batch: db.db.NewBatch()} } + +func (db database) Has(key []byte) (bool, error) { return db.db.Has(key) } + +func (db database) Get(key []byte) ([]byte, error) { return db.db.Get(key) } + +func (db database) Put(key, value []byte) error { return db.db.Put(key, value) } + +func (db database) Delete(key []byte) error { return db.db.Delete(key) } + +func (db database) Compact(start, limit []byte) error { return db.db.Compact(start, limit) } + +func (db database) Close() error { return db.db.Close() } + +func (db database) NewBatchWithSize(int) ethdb.Batch { return db.NewBatch() } + +func (database) NewSnapshot() (ethdb.Snapshot, error) { + return nil, errSnapshotNotSupported +} + +func (db database) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + newStart := make([]byte, len(prefix)+len(start)) + copy(newStart, prefix) + copy(newStart[len(prefix):], start) + start = newStart + + return db.db.NewIteratorWithStartAndPrefix(start, prefix) +} + +type batch struct { + batch avalanchegodb.Batch +} + +func (b batch) Put(key, value []byte) error { return b.batch.Put(key, value) } + +func (b batch) Delete(key []byte) error { return b.batch.Delete(key) } + +func (b batch) ValueSize() int { return b.batch.Size() } + +func (b batch) Write() error { return b.batch.Write() } + +func (b batch) Reset() { b.batch.Reset() } + +func (b batch) Replay(w ethdb.KeyValueWriter) error { return b.batch.Replay(w) } diff --git a/vms/evm/database/database_test.go b/vms/evm/database/database_test.go new file mode 100644 index 000000000000..619e9597a91e --- /dev/null +++ b/vms/evm/database/database_test.go @@ -0,0 +1,148 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package database + +import ( + "bytes" + "errors" + "slices" + "testing" + + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/ethdb/dbtest" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/memdb" +) + +// testDatabase wraps the production database with test-only snapshot functionality +type testDatabase struct { + ethdb.KeyValueStore +} + +// Creates a snapshot by iterating over the entire database and copying key-value pairs. +func (db testDatabase) NewSnapshot() (ethdb.Snapshot, error) { + snapshotData := make(map[string][]byte) + + iter := db.NewIterator(nil, nil) + defer iter.Release() + + for iter.Next() { + key := iter.Key() + value := iter.Value() + valueCopy := make([]byte, len(value)) + copy(valueCopy, value) + snapshotData[string(key)] = valueCopy + } + + if err := iter.Error(); err != nil { + return nil, err + } + + return &testSnapshot{data: snapshotData}, nil +} + +// testSnapshot implements [ethdb.Snapshot] by storing a copy of the database state. +type testSnapshot struct { + data map[string][]byte +} + +func (t *testSnapshot) Get(key []byte) ([]byte, error) { + value, ok := t.data[string(key)] + if !ok { + return nil, errors.New("not found") + } + return value, nil +} + +func (t *testSnapshot) Has(key []byte) (bool, error) { + _, ok := t.data[string(key)] + return ok, nil +} + +func (*testSnapshot) Release() {} + +func (t *testSnapshot) NewIterator(prefix []byte, start []byte) ethdb.Iterator { + // Create a slice of key-value pairs that match the prefix and start criteria + pairs := make([]kvPair, 0, len(t.data)) + + for keyStr, value := range t.data { + key := []byte(keyStr) + + if prefix != nil && len(key) < len(prefix) { + continue + } + if prefix != nil && !bytes.HasPrefix(key, prefix) { + continue + } + + if start != nil && bytes.Compare(key, start) < 0 { + continue + } + + pairs = append(pairs, kvPair{key: key, value: value}) + } + + // Sort by key for consistent iteration + slices.SortFunc(pairs, func(a, b kvPair) int { + return bytes.Compare(a.key, b.key) + }) + + return &testSnapshotIterator{pairs: pairs, index: -1} +} + +type kvPair struct { + key []byte + value []byte +} + +type testSnapshotIterator struct { + pairs []kvPair + index int +} + +func (it *testSnapshotIterator) Next() bool { + it.index++ + return it.index < len(it.pairs) +} + +func (it *testSnapshotIterator) Key() []byte { + if it.index < 0 || it.index >= len(it.pairs) { + return nil + } + return it.pairs[it.index].key +} + +func (it *testSnapshotIterator) Value() []byte { + if it.index < 0 || it.index >= len(it.pairs) { + return nil + } + return it.pairs[it.index].value +} + +func (*testSnapshotIterator) Release() {} + +func (*testSnapshotIterator) Error() error { + return nil +} + +func TestInterface(t *testing.T) { + dbtest.TestDatabaseSuite(t, func() ethdb.KeyValueStore { + return &testDatabase{KeyValueStore: New(memdb.New())} + }) +} + +func TestUnimplemented(t *testing.T) { + t.Run("NewSnapshot_ReturnsError", func(t *testing.T) { + db := New(memdb.New()) + _, err := db.NewSnapshot() + require.ErrorIs(t, err, errSnapshotNotSupported) + }) + + t.Run("Stat_ReturnsError", func(t *testing.T) { + db := New(memdb.New()) + _, err := db.Stat("test") + require.ErrorIs(t, err, errStatNotSupported) + }) +} diff --git a/vms/evm/sync/customrawdb/db.go b/vms/evm/sync/customrawdb/db.go new file mode 100644 index 000000000000..404f057dacbb --- /dev/null +++ b/vms/evm/sync/customrawdb/db.go @@ -0,0 +1,34 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "errors" + + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" +) + +// FirewoodScheme is the scheme for the Firewood storage scheme. +const FirewoodScheme = "firewood" + +// errStateSchemeConflict indicates the provided state scheme conflicts with +// what is on disk. +var errStateSchemeConflict = errors.New("state scheme conflict") + +// ParseStateScheme parses the state scheme from the provided string. +func ParseStateScheme(provided string, db ethdb.Database) (string, error) { + // Check for custom scheme + if provided == FirewoodScheme { + if diskScheme := rawdb.ReadStateScheme(db); diskScheme != "" { + // Valid scheme on db mismatched + return "", errStateSchemeConflict + } + // If no conflicting scheme is found, is valid. + return FirewoodScheme, nil + } + + // Check for valid eth scheme + return rawdb.ParseStateScheme(provided, db) +} diff --git a/vms/evm/sync/customrawdb/db_test.go b/vms/evm/sync/customrawdb/db_test.go new file mode 100644 index 000000000000..3eedd894e782 --- /dev/null +++ b/vms/evm/sync/customrawdb/db_test.go @@ -0,0 +1,32 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "testing" + + "github.com/ava-labs/libevm/core/rawdb" + "github.com/stretchr/testify/require" +) + +func TestParseStateScheme(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + // Provided Firewood on empty disk -> allowed. + scheme, err := ParseStateScheme(FirewoodScheme, db) + require.NoError(t, err) + require.Equal(t, FirewoodScheme, scheme) + + // Simulate disk has non-empty path scheme by writing persistent state id. + rawdb.WritePersistentStateID(db, 1) + scheme, err = ParseStateScheme(FirewoodScheme, db) + require.ErrorIs(t, err, errStateSchemeConflict) + require.Empty(t, scheme) + + // Pass-through to rawdb for non-Firewood using a fresh empty DB. + db2 := rawdb.NewMemoryDatabase() + scheme, err = ParseStateScheme(rawdb.HashScheme, db2) + require.NoError(t, err) + require.Equal(t, rawdb.HashScheme, scheme) +} diff --git a/vms/evm/sync/customrawdb/markers.go b/vms/evm/sync/customrawdb/markers.go new file mode 100644 index 000000000000..b44c29219e2b --- /dev/null +++ b/vms/evm/sync/customrawdb/markers.go @@ -0,0 +1,231 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "encoding/json" + "errors" + "fmt" + "time" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/params" + "github.com/ava-labs/libevm/rlp" + + "github.com/ava-labs/avalanchego/database" +) + +var ( + // errInvalidData indicates the stored value exists but is malformed or undecodable. + errInvalidData = errors.New("invalid data") + errFailedToGetUpgradeConfig = errors.New("failed to get upgrade config") + errFailedToMarshalUpgradeConfig = errors.New("failed to marshal upgrade config") + + upgradeConfigPrefix = []byte("upgrade-config-") + // offlinePruningKey tracks runs of offline pruning. + offlinePruningKey = []byte("OfflinePruning") + // populateMissingTriesKey tracks runs of trie backfills. + populateMissingTriesKey = []byte("PopulateMissingTries") + // pruningDisabledKey tracks whether the node has ever run in archival mode + // to ensure that a user does not accidentally corrupt an archival node. + pruningDisabledKey = []byte("PruningDisabled") + // acceptorTipKey tracks the tip of the last accepted block that has been fully processed. + acceptorTipKey = []byte("AcceptorTipKey") + // snapshotBlockHashKey tracks the block hash of the last snapshot. + snapshotBlockHashKey = []byte("SnapshotBlockHash") +) + +// WriteOfflinePruning writes a time marker of the last attempt to run offline pruning. +// The marker is written when offline pruning completes and is deleted when the node +// is started successfully with offline pruning disabled. This ensures users must +// disable offline pruning and start their node successfully between runs of offline +// pruning. +func WriteOfflinePruning(db ethdb.KeyValueWriter, ts time.Time) error { + return writeTimeMarker(db, offlinePruningKey, ts) +} + +// ReadOfflinePruning reads the most recent timestamp of an attempt to run offline +// pruning if present. +func ReadOfflinePruning(db ethdb.KeyValueReader) (time.Time, error) { + return readTimeMarker(db, offlinePruningKey) +} + +// DeleteOfflinePruning deletes any marker of the last attempt to run offline pruning. +func DeleteOfflinePruning(db ethdb.KeyValueWriter) error { + return db.Delete(offlinePruningKey) +} + +// WritePopulateMissingTries writes a marker for the current attempt to populate +// missing tries. +func WritePopulateMissingTries(db ethdb.KeyValueWriter, ts time.Time) error { + return writeTimeMarker(db, populateMissingTriesKey, ts) +} + +// ReadPopulateMissingTries reads the most recent timestamp of an attempt to +// re-populate missing trie nodes. +func ReadPopulateMissingTries(db ethdb.KeyValueReader) (time.Time, error) { + return readTimeMarker(db, populateMissingTriesKey) +} + +// DeletePopulateMissingTries deletes any marker of the last attempt to +// re-populate missing trie nodes. +func DeletePopulateMissingTries(db ethdb.KeyValueWriter) error { + return db.Delete(populateMissingTriesKey) +} + +// WritePruningDisabled writes a marker to track whether the node has ever run +// with pruning disabled. +func WritePruningDisabled(db ethdb.KeyValueWriter) error { + return db.Put(pruningDisabledKey, nil) +} + +// HasPruningDisabled returns true if there is a marker present indicating that +// the node has run with pruning disabled at some point. +func HasPruningDisabled(db ethdb.KeyValueReader) (bool, error) { + return db.Has(pruningDisabledKey) +} + +// WriteAcceptorTip writes `hash` as the last accepted block that has been fully processed. +func WriteAcceptorTip(db ethdb.KeyValueWriter, hash common.Hash) error { + return db.Put(acceptorTipKey, hash[:]) +} + +// ReadAcceptorTip reads the hash of the last accepted block that was fully processed. +// If there is no value present (the index is being initialized for the first time), then the +// empty hash is returned. +func ReadAcceptorTip(db ethdb.KeyValueReader) (common.Hash, error) { + ok, err := db.Has(acceptorTipKey) + if err != nil { + return common.Hash{}, err + } + if !ok { + return common.Hash{}, database.ErrNotFound + } + h, err := db.Get(acceptorTipKey) + if err != nil { + return common.Hash{}, err + } + if len(h) != common.HashLength { + return common.Hash{}, fmt.Errorf("%w: length %d", errInvalidData, len(h)) + } + return common.BytesToHash(h), nil +} + +// ReadChainConfig retrieves the consensus settings based on the given genesis hash. +// The provided `upgradeConfig` (any JSON-unmarshalable type) will be populated if present on disk. +func ReadChainConfig[T any](db ethdb.KeyValueReader, hash common.Hash, upgradeConfig *T) (*params.ChainConfig, error) { + config := rawdb.ReadChainConfig(db, hash) + if config == nil { + return nil, database.ErrNotFound + } + + upgrade, err := db.Get(upgradeConfigKey(hash)) + if err != nil { + return nil, fmt.Errorf("%w: %w", errFailedToGetUpgradeConfig, err) + } + + if len(upgrade) == 0 { + return config, nil + } + + if err := json.Unmarshal(upgrade, upgradeConfig); err != nil { + return nil, errInvalidData + } + + return config, nil +} + +// WriteChainConfig writes the chain config settings to the database. +// The provided `upgradeConfig` (any JSON-marshalable type) will be stored alongside the chain config. +func WriteChainConfig[T any](db ethdb.KeyValueWriter, hash common.Hash, config *params.ChainConfig, upgradeConfig T) error { + rawdb.WriteChainConfig(db, hash, config) + if config == nil { + return nil + } + + data, err := json.Marshal(upgradeConfig) + if err != nil { + return fmt.Errorf("%w: %w", errFailedToMarshalUpgradeConfig, err) + } + return db.Put(upgradeConfigKey(hash), data) +} + +// NewAccountSnapshotsIterator returns an iterator for walking all of the accounts in the snapshot +func NewAccountSnapshotsIterator(db ethdb.Iteratee) ethdb.Iterator { + it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil) + keyLen := len(rawdb.SnapshotAccountPrefix) + common.HashLength + return rawdb.NewKeyLengthIterator(it, keyLen) +} + +// ReadSnapshotBlockHash retrieves the hash of the block whose state is contained in +// the persisted snapshot. +func ReadSnapshotBlockHash(db ethdb.KeyValueReader) (common.Hash, error) { + ok, err := db.Has(snapshotBlockHashKey) + if err != nil { + return common.Hash{}, err + } + if !ok { + return common.Hash{}, database.ErrNotFound + } + + data, err := db.Get(snapshotBlockHashKey) + if err != nil { + return common.Hash{}, err + } + if len(data) != common.HashLength { + return common.Hash{}, fmt.Errorf("%w: length %d", errInvalidData, len(data)) + } + return common.BytesToHash(data), nil +} + +// WriteSnapshotBlockHash stores the root of the block whose state is contained in +// the persisted snapshot. +func WriteSnapshotBlockHash(db ethdb.KeyValueWriter, blockHash common.Hash) error { + return db.Put(snapshotBlockHashKey, blockHash[:]) +} + +// DeleteSnapshotBlockHash deletes the hash of the block whose state is contained in +// the persisted snapshot. Since snapshots are not immutable, this method can +// be used during updates, so a crash or failure will mark the entire snapshot +// invalid. +func DeleteSnapshotBlockHash(db ethdb.KeyValueWriter) error { + return db.Delete(snapshotBlockHashKey) +} + +func writeTimeMarker(db ethdb.KeyValueWriter, key []byte, ts time.Time) error { + data, err := rlp.EncodeToBytes(uint64(ts.Unix())) + if err != nil { + return err + } + return db.Put(key, data) +} + +func readTimeMarker(db ethdb.KeyValueReader, key []byte) (time.Time, error) { + // Check existence first to map missing marker to a stable sentinel error. + ok, err := db.Has(key) + if err != nil { + return time.Time{}, err + } + if !ok { + return time.Time{}, database.ErrNotFound + } + + data, err := db.Get(key) + if err != nil { + return time.Time{}, err + } + + var unix uint64 + if err := rlp.DecodeBytes(data, &unix); err != nil { + return time.Time{}, fmt.Errorf("%w: %w", errInvalidData, err) + } + + return time.Unix(int64(unix), 0), nil +} + +func upgradeConfigKey(hash common.Hash) []byte { + return append(upgradeConfigPrefix, hash.Bytes()...) +} diff --git a/vms/evm/sync/customrawdb/markers_test.go b/vms/evm/sync/customrawdb/markers_test.go new file mode 100644 index 000000000000..a65f3b1d567f --- /dev/null +++ b/vms/evm/sync/customrawdb/markers_test.go @@ -0,0 +1,245 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "math/big" + "slices" + "testing" + "time" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/params" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" +) + +func TestOfflinePruning(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + // Not present initially. + _, err := ReadOfflinePruning(db) + require.ErrorIs(t, err, database.ErrNotFound) + + // Write marker and read back fixed time. + fixed := time.Unix(1_700_000_000, 0) + require.NoError(t, WriteOfflinePruning(db, fixed)) + ts, err := ReadOfflinePruning(db) + require.NoError(t, err) + require.Equal(t, fixed.Unix(), ts.Unix()) + + // Delete marker. + require.NoError(t, DeleteOfflinePruning(db)) + _, err = ReadOfflinePruning(db) + require.ErrorIs(t, err, database.ErrNotFound) +} + +func TestPopulateMissingTries(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + // Not present initially. + _, err := ReadPopulateMissingTries(db) + require.ErrorIs(t, err, database.ErrNotFound) + + // Write marker and read back fixed time. + fixed := time.Unix(1_700_000_000, 0) + require.NoError(t, WritePopulateMissingTries(db, fixed)) + ts, err := ReadPopulateMissingTries(db) + require.NoError(t, err) + require.Equal(t, fixed.Unix(), ts.Unix()) + + // Delete marker. + require.NoError(t, DeletePopulateMissingTries(db)) + _, err = ReadPopulateMissingTries(db) + require.ErrorIs(t, err, database.ErrNotFound) +} + +func TestOfflinePruning_BadEncoding(t *testing.T) { + db := rawdb.NewMemoryDatabase() + // Write invalid RLP bytes (0xB8 indicates a long string length with missing payload). + require.NoError(t, db.Put(offlinePruningKey, []byte{0xB8})) + _, err := ReadOfflinePruning(db) + require.ErrorIs(t, err, errInvalidData) +} + +func TestPopulateMissingTries_BadEncoding(t *testing.T) { + db := rawdb.NewMemoryDatabase() + // Write invalid RLP bytes (0xB8 indicates a long string length with missing payload). + require.NoError(t, db.Put(populateMissingTriesKey, []byte{0xB8})) + _, err := ReadPopulateMissingTries(db) + require.ErrorIs(t, err, errInvalidData) +} + +func TestPruningDisabledFlag(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + ok, err := HasPruningDisabled(db) + require.NoError(t, err) + require.False(t, ok) + + require.NoError(t, WritePruningDisabled(db)) + + ok, err = HasPruningDisabled(db) + require.NoError(t, err) + require.True(t, ok) +} + +func TestReadAcceptorTip_InvalidLength(t *testing.T) { + db := rawdb.NewMemoryDatabase() + // Write an invalid value under acceptor tip key (wrong length). + require.NoError(t, db.Put(acceptorTipKey, []byte("short"))) + _, err := ReadAcceptorTip(db) + require.ErrorIs(t, err, errInvalidData) +} + +func TestWriteAcceptorTip(t *testing.T) { + tests := []struct { + name string + writes []common.Hash + want common.Hash + wantErr error + }{ + { + name: "none", + writes: nil, + want: common.Hash{}, + wantErr: database.ErrNotFound, + }, + { + name: "single_write", + writes: []common.Hash{common.HexToHash("0xabc1")}, + want: common.HexToHash("0xabc1"), + }, + { + name: "overwrite", + writes: []common.Hash{common.HexToHash("0xabc1"), common.HexToHash("0xabc2")}, + want: common.HexToHash("0xabc2"), + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + db := rawdb.NewMemoryDatabase() + for _, h := range tc.writes { + require.NoError(t, WriteAcceptorTip(db, h)) + } + tip, err := ReadAcceptorTip(db) + require.ErrorIs(t, err, tc.wantErr) + require.Equal(t, tc.want, tip) + }) + } +} + +func TestSnapshotBlockHashReadWriteDelete(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + // Initially empty. + got, err := ReadSnapshotBlockHash(db) + require.ErrorIs(t, err, database.ErrNotFound) + require.Equal(t, common.Hash{}, got) + + // Write and read back. + want := common.HexToHash("0xdeadbeef") + require.NoError(t, WriteSnapshotBlockHash(db, want)) + got, err = ReadSnapshotBlockHash(db) + require.NoError(t, err) + require.Equal(t, want, got) + + // Delete and verify empty. + require.NoError(t, DeleteSnapshotBlockHash(db)) + got, err = ReadSnapshotBlockHash(db) + require.ErrorIs(t, err, database.ErrNotFound) + require.Equal(t, common.Hash{}, got) +} + +func TestNewAccountSnapshotsIterator(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + // Keys that match and don't match the iterator length filter. + a1 := common.HexToHash("0x01") + a2 := common.HexToHash("0x02") + key1 := slices.Concat(rawdb.SnapshotAccountPrefix, a1.Bytes()) + key2 := slices.Concat(rawdb.SnapshotAccountPrefix, a2.Bytes()) + // Non-matching: extra byte appended. + bad := slices.Concat(key1, []byte{0x00}) + + require.NoError(t, db.Put(key1, []byte("v1"))) + require.NoError(t, db.Put(key2, []byte("v2"))) + require.NoError(t, db.Put(bad, []byte("nope"))) + + it := NewAccountSnapshotsIterator(db) + defer it.Release() + count := 0 + for it.Next() { + count++ + } + require.NoError(t, it.Error()) + require.Equal(t, 2, count) +} + +func TestSnapshotBlockHash_InvalidLength(t *testing.T) { + db := rawdb.NewMemoryDatabase() + // Write wrong length value and assert invalid encoding. + require.NoError(t, db.Put(snapshotBlockHashKey, []byte("short"))) + _, err := ReadSnapshotBlockHash(db) + require.ErrorIs(t, err, errInvalidData) +} + +func TestChainConfigCases(t *testing.T) { + type upgrade struct { + X int `json:"x"` + } + + tests := []struct { + name string + cfg *params.ChainConfig + inputUpgrade *upgrade // nil => no overwrite + wantErr error + }{ + { + name: "valid_upgrade", + cfg: ¶ms.ChainConfig{ChainID: big.NewInt(1)}, + inputUpgrade: &upgrade{X: 7}, + }, + { + name: "nil_config", + wantErr: database.ErrNotFound, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + db := rawdb.NewMemoryDatabase() + h := common.HexToHash("0x100") + + require.NoError(t, WriteChainConfig(db, h, tc.cfg, upgrade{X: 0})) + if tc.inputUpgrade != nil && tc.cfg != nil { + require.NoError(t, WriteChainConfig(db, h, tc.cfg, *tc.inputUpgrade)) + } + + var out upgrade + _, err := ReadChainConfig(db, h, &out) + require.ErrorIs(t, err, tc.wantErr) + if tc.wantErr == nil { + require.Equal(t, *tc.inputUpgrade, out) + } + }) + } +} + +func TestReadChainConfig_InvalidUpgradeJSONReturnsNil(t *testing.T) { + db := rawdb.NewMemoryDatabase() + hash := common.HexToHash("0xbeef") + // Write a valid base chain config. + rawdb.WriteChainConfig(db, hash, ¶ms.ChainConfig{}) + // Write invalid upgrade JSON. + require.NoError(t, db.Put(upgradeConfigKey(hash), []byte("{"))) + + var out struct{} + got, err := ReadChainConfig(db, hash, &out) + require.ErrorIs(t, err, errInvalidData) + require.Nil(t, got) +} diff --git a/vms/evm/sync/customrawdb/sync_progress.go b/vms/evm/sync/customrawdb/sync_progress.go new file mode 100644 index 000000000000..b845fd26aa6f --- /dev/null +++ b/vms/evm/sync/customrawdb/sync_progress.go @@ -0,0 +1,240 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "encoding/binary" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/wrappers" +) + +var ( + // syncRootKey indicates the root of the main account trie currently being synced. + syncRootKey = []byte("sync_root") + // syncStorageTriesPrefix + trie root + account hash indicates a storage trie must be fetched for the account. + syncStorageTriesPrefix = []byte("sync_storage") + // syncSegmentsPrefix + trie root + 32-byte start key indicates the trie at root has a segment starting at the specified key. + syncSegmentsPrefix = []byte("sync_segments") + // CodeToFetchPrefix + code hash -> empty value tracks the outstanding code hashes we need to fetch. + CodeToFetchPrefix = []byte("CP") + + // === State sync progress key lengths === + syncStorageTriesKeyLength = len(syncStorageTriesPrefix) + 2*common.HashLength + syncSegmentsKeyLength = len(syncSegmentsPrefix) + 2*common.HashLength + codeToFetchKeyLength = len(CodeToFetchPrefix) + common.HashLength + + // === State sync metadata === + syncPerformedPrefix = []byte("sync_performed") + // syncPerformedKeyLength is the length of the key for the sync performed metadata key, + // and is equal to [syncPerformedPrefix] + block number as uint64. + syncPerformedKeyLength = len(syncPerformedPrefix) + wrappers.LongLen +) + +// ReadSyncRoot reads the root corresponding to the main trie of an in-progress +// sync and returns common.Hash{} if no in-progress sync was found. +func ReadSyncRoot(db ethdb.KeyValueReader) (common.Hash, error) { + ok, err := db.Has(syncRootKey) + if err != nil { + return common.Hash{}, err + } + if !ok { + return common.Hash{}, database.ErrNotFound + } + root, err := db.Get(syncRootKey) + if err != nil { + return common.Hash{}, err + } + return common.BytesToHash(root), nil +} + +// WriteSyncRoot writes root as the root of the main trie of the in-progress sync. +func WriteSyncRoot(db ethdb.KeyValueWriter, root common.Hash) error { + return db.Put(syncRootKey, root[:]) +} + +// WriteCodeToFetch adds a marker that we need to fetch the code for `hash`. +func WriteCodeToFetch(db ethdb.KeyValueWriter, codeHash common.Hash) error { + return db.Put(codeToFetchKey(codeHash), nil) +} + +// DeleteCodeToFetch removes the marker that the code corresponding to `hash` needs to be fetched. +func DeleteCodeToFetch(db ethdb.KeyValueWriter, codeHash common.Hash) error { + return db.Delete(codeToFetchKey(codeHash)) +} + +// NewCodeToFetchIterator returns a KeyLength iterator over all code +// hashes that are pending syncing. It is the caller's responsibility to +// parse the key and call Release on the returned iterator. +func NewCodeToFetchIterator(db ethdb.Iteratee) ethdb.Iterator { + return rawdb.NewKeyLengthIterator( + db.NewIterator(CodeToFetchPrefix, nil), + codeToFetchKeyLength, + ) +} + +func codeToFetchKey(codeHash common.Hash) []byte { + codeToFetchKey := make([]byte, codeToFetchKeyLength) + copy(codeToFetchKey, CodeToFetchPrefix) + copy(codeToFetchKey[len(CodeToFetchPrefix):], codeHash[:]) + return codeToFetchKey +} + +// NewSyncSegmentsIterator returns a KeyLength iterator over all trie segments +// added for root. It is the caller's responsibility to parse the key and call +// Release on the returned iterator. +func NewSyncSegmentsIterator(db ethdb.Iteratee, root common.Hash) ethdb.Iterator { + segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength) + copy(segmentsPrefix, syncSegmentsPrefix) + copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:]) + + return rawdb.NewKeyLengthIterator( + db.NewIterator(segmentsPrefix, nil), + syncSegmentsKeyLength, + ) +} + +// WriteSyncSegment adds a trie segment for root at the given start position. +func WriteSyncSegment(db ethdb.KeyValueWriter, root common.Hash, start common.Hash) error { + // packs root and account into a key for storage in db. + bytes := make([]byte, syncSegmentsKeyLength) + copy(bytes, syncSegmentsPrefix) + copy(bytes[len(syncSegmentsPrefix):], root[:]) + copy(bytes[len(syncSegmentsPrefix)+common.HashLength:], start.Bytes()) + return db.Put(bytes, nil) +} + +// ClearSyncSegments removes segment markers for root from db +func ClearSyncSegments(db ethdb.KeyValueStore, root common.Hash) error { + segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength) + copy(segmentsPrefix, syncSegmentsPrefix) + copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:]) + return clearPrefix(db, segmentsPrefix, syncSegmentsKeyLength) +} + +// ClearAllSyncSegments removes all segment markers from db +func ClearAllSyncSegments(db ethdb.KeyValueStore) error { + return clearPrefix(db, syncSegmentsPrefix, syncSegmentsKeyLength) +} + +// ParseSyncSegmentKey returns the root and start position for a trie segment +// key returned from NewSyncSegmentsIterator. +func ParseSyncSegmentKey(keyBytes []byte) (common.Hash, []byte) { + keyBytes = keyBytes[len(syncSegmentsPrefix):] // skip prefix + root := common.BytesToHash(keyBytes[:common.HashLength]) + start := keyBytes[common.HashLength:] + return root, start +} + +// NewSyncStorageTriesIterator returns a KeyLength iterator over all storage tries +// added for syncing (beginning at seek). It is the caller's responsibility to parse +// the key and call Release on the returned iterator. +func NewSyncStorageTriesIterator(db ethdb.Iteratee, seek []byte) ethdb.Iterator { + return rawdb.NewKeyLengthIterator(db.NewIterator(syncStorageTriesPrefix, seek), syncStorageTriesKeyLength) +} + +// WriteSyncStorageTrie adds a storage trie for account (with the given root) to be synced. +func WriteSyncStorageTrie(db ethdb.KeyValueWriter, root common.Hash, account common.Hash) error { + bytes := make([]byte, syncStorageTriesKeyLength) + copy(bytes, syncStorageTriesPrefix) + copy(bytes[len(syncStorageTriesPrefix):], root[:]) + copy(bytes[len(syncStorageTriesPrefix)+common.HashLength:], account[:]) + return db.Put(bytes, nil) +} + +// ClearSyncStorageTrie removes all storage trie accounts (with the given root) from db. +// Intended for use when the trie with root has completed syncing. +func ClearSyncStorageTrie(db ethdb.KeyValueStore, root common.Hash) error { + accountsPrefix := make([]byte, len(syncStorageTriesPrefix)+common.HashLength) + copy(accountsPrefix, syncStorageTriesPrefix) + copy(accountsPrefix[len(syncStorageTriesPrefix):], root[:]) + return clearPrefix(db, accountsPrefix, syncStorageTriesKeyLength) +} + +// ClearAllSyncStorageTries removes all storage tries added for syncing from db +func ClearAllSyncStorageTries(db ethdb.KeyValueStore) error { + return clearPrefix(db, syncStorageTriesPrefix, syncStorageTriesKeyLength) +} + +// ParseSyncStorageTrieKey returns the root and account for a storage trie +// key returned from NewSyncStorageTriesIterator. It assumes the key has the +// `syncStorageTriesPrefix` followed by a 32-byte root and 32-byte account hash, +// and panics if the key is shorter than len(syncStorageTriesPrefix)+2*common.HashLength. +func ParseSyncStorageTrieKey(keyBytes []byte) (common.Hash, common.Hash) { + keyBytes = keyBytes[len(syncStorageTriesPrefix):] // skip prefix + root := common.BytesToHash(keyBytes[:common.HashLength]) + account := common.BytesToHash(keyBytes[common.HashLength:]) + return root, account +} + +// WriteSyncPerformed logs an entry in `db` indicating the VM state synced to `blockNumber`. +func WriteSyncPerformed(db ethdb.KeyValueWriter, blockNumber uint64) error { + bytes := make([]byte, syncPerformedKeyLength) + copy(bytes, syncPerformedPrefix) + binary.BigEndian.PutUint64(bytes[len(syncPerformedPrefix):], blockNumber) + return db.Put(bytes, nil) +} + +// NewSyncPerformedIterator returns an iterator over all block numbers the VM +// has state synced to. +func NewSyncPerformedIterator(db ethdb.Iteratee) ethdb.Iterator { + return rawdb.NewKeyLengthIterator(db.NewIterator(syncPerformedPrefix, nil), syncPerformedKeyLength) +} + +// GetLatestSyncPerformed returns the latest block number state synced performed to. +func GetLatestSyncPerformed(db ethdb.Iteratee) (uint64, error) { + it := NewSyncPerformedIterator(db) + defer it.Release() + + var latestSyncPerformed uint64 + for it.Next() { + syncPerformed := parseSyncPerformedKey(it.Key()) + if syncPerformed > latestSyncPerformed { + latestSyncPerformed = syncPerformed + } + } + return latestSyncPerformed, it.Error() +} + +// clearPrefix removes all keys in db that begin with prefix and match an +// expected key length. `keyLen` must include the length of the prefix. +func clearPrefix(db ethdb.KeyValueStore, prefix []byte, keyLen int) error { + it := db.NewIterator(prefix, nil) + defer it.Release() + + batch := db.NewBatch() + for it.Next() { + key := it.Key() + if len(key) != keyLen { + continue + } + key = common.CopyBytes(key) + + if err := batch.Delete(key); err != nil { + return err + } + if batch.ValueSize() > ethdb.IdealBatchSize { + if err := batch.Write(); err != nil { + return err + } + batch.Reset() + } + } + if err := it.Error(); err != nil { + return err + } + return batch.Write() +} + +// parseSyncPerformedKey returns the block number from keys the iterator returned +// from NewSyncPerformedIterator. It assumes the key has the syncPerformedPrefix +// followed by an 8-byte big-endian block number, and panics if the key is shorter +// than len(syncPerformedPrefix)+wrappers.LongLen. +func parseSyncPerformedKey(key []byte) uint64 { + return binary.BigEndian.Uint64(key[len(syncPerformedPrefix):]) +} diff --git a/vms/evm/sync/customrawdb/sync_progress_test.go b/vms/evm/sync/customrawdb/sync_progress_test.go new file mode 100644 index 000000000000..add01f10b38c --- /dev/null +++ b/vms/evm/sync/customrawdb/sync_progress_test.go @@ -0,0 +1,396 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package customrawdb + +import ( + "math/big" + "slices" + "testing" + + "github.com/ava-labs/libevm/common" + "github.com/ava-labs/libevm/core/rawdb" + "github.com/ava-labs/libevm/ethdb" + "github.com/ava-labs/libevm/params" + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/utils/set" +) + +func TestClearAllSyncSegments(t *testing.T) { + db := rawdb.NewMemoryDatabase() + // add a key that should be cleared + require.NoError(t, WriteSyncSegment(db, common.Hash{1}, common.Hash{})) + + // add a key that should not be cleared + key := slices.Concat(syncSegmentsPrefix, []byte("foo")) + require.NoError(t, db.Put(key, []byte("bar"))) + + require.NoError(t, ClearAllSyncSegments(db)) + + // No well-formed segment keys should remain. + iter := rawdb.NewKeyLengthIterator(db.NewIterator(syncSegmentsPrefix, nil), syncSegmentsKeyLength) + keys := mapIterator(t, iter, common.CopyBytes) + require.Empty(t, keys) + // The malformed key should still be present. + has, err := db.Has(key) + require.NoError(t, err) + require.True(t, has) +} + +func TestWriteReadSyncRoot(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + // No root written yet + root, err := ReadSyncRoot(db) + require.ErrorIs(t, err, database.ErrNotFound) + require.Zero(t, root) + + // Write and read back + want := common.HexToHash("0x01") + require.NoError(t, WriteSyncRoot(db, want)) + got, err := ReadSyncRoot(db) + require.NoError(t, err) + require.Equal(t, want, got) +} + +func TestCodeToFetchIteratorAndDelete(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + h1 := common.HexToHash("0x11") + h2 := common.HexToHash("0x22") + + require.NoError(t, WriteCodeToFetch(db, h1)) + require.NoError(t, WriteCodeToFetch(db, h2)) + + // Insert a malformed key that should be ignored by the iterator (wrong length) + bad := slices.Concat(CodeToFetchPrefix, h1.Bytes(), []byte{0x00}) + require.NoError(t, db.Put(bad, []byte("x"))) + + // Collect hashes from iterator and assert presence. + vals := mapIterator(t, NewCodeToFetchIterator(db), func(key []byte) common.Hash { + return common.BytesToHash(key[len(CodeToFetchPrefix):]) + }) + + seen := set.Of(vals...) + require.Contains(t, seen, h1) + require.Contains(t, seen, h2) + + // Delete one and confirm only one remains + require.NoError(t, DeleteCodeToFetch(db, h1)) + iter := rawdb.NewKeyLengthIterator(db.NewIterator(CodeToFetchPrefix, nil), codeToFetchKeyLength) + keys := mapIterator(t, iter, common.CopyBytes) + require.Len(t, keys, 1) +} + +func TestSyncSegmentsIteratorUnpackAndClear(t *testing.T) { + db := rawdb.NewMemoryDatabase() + rootA := common.HexToHash("0xaaa") + rootB := common.HexToHash("0xbbb") + start1 := common.HexToHash("0x01") + start2 := common.HexToHash("0x02") + start3 := common.HexToHash("0x03") + + require.NoError(t, WriteSyncSegment(db, rootA, start1)) + require.NoError(t, WriteSyncSegment(db, rootA, start2)) + require.NoError(t, WriteSyncSegment(db, rootB, start3)) + + // Iterate only over rootA and assert exact keys. + keys := mapIterator(t, NewSyncSegmentsIterator(db, rootA), common.CopyBytes) + expectedA := [][]byte{buildSegmentKey(rootA, start1), buildSegmentKey(rootA, start2)} + require.Equal(t, expectedA, keys) + + // Clear only rootA. + require.NoError(t, ClearSyncSegments(db, rootA)) + keys = mapIterator(t, NewSyncSegmentsIterator(db, rootA), common.CopyBytes) + require.Empty(t, keys) + + // RootB remains. + keys = mapIterator(t, NewSyncSegmentsIterator(db, rootB), common.CopyBytes) + expectedB := [][]byte{buildSegmentKey(rootB, start3)} + require.Equal(t, expectedB, keys) +} + +func TestStorageTriesIteratorUnpackAndClear(t *testing.T) { + db := rawdb.NewMemoryDatabase() + root := common.HexToHash("0xabc") + acct1 := common.HexToHash("0x01") + acct2 := common.HexToHash("0x02") + + require.NoError(t, WriteSyncStorageTrie(db, root, acct1)) + require.NoError(t, WriteSyncStorageTrie(db, root, acct2)) + + keys := mapIterator(t, NewSyncStorageTriesIterator(db, nil), common.CopyBytes) + expected := [][]byte{buildStorageTrieKey(root, acct1), buildStorageTrieKey(root, acct2)} + require.Equal(t, expected, keys) + + require.NoError(t, ClearSyncStorageTrie(db, root)) + keys = mapIterator(t, NewSyncStorageTriesIterator(db, nil), common.CopyBytes) + require.Empty(t, keys) +} + +func TestClearAllSyncStorageTries(t *testing.T) { + db := rawdb.NewMemoryDatabase() + root := common.HexToHash("0xabc") + // Keys that should be cleared + require.NoError(t, WriteSyncStorageTrie(db, root, common.HexToHash("0x01"))) + require.NoError(t, WriteSyncStorageTrie(db, root, common.HexToHash("0x02"))) + // Key that should not be cleared due to wrong length. + bad := slices.Concat( + syncStorageTriesPrefix, + root.Bytes(), + common.HexToHash("0xff").Bytes(), + []byte{0x00}, + ) + require.NoError(t, db.Put(bad, []byte("x"))) + + require.NoError(t, ClearAllSyncStorageTries(db)) + + // No well-formed storage trie keys should remain. + iter := rawdb.NewKeyLengthIterator(db.NewIterator(syncStorageTriesPrefix, nil), syncStorageTriesKeyLength) + keys := mapIterator(t, iter, common.CopyBytes) + require.Empty(t, keys) + // The malformed key should still be present. + has, err := db.Has(bad) + require.NoError(t, err) + require.True(t, has) +} + +func TestClearSyncSegments_NoKeys(t *testing.T) { + db := rawdb.NewMemoryDatabase() + root := common.HexToHash("0xabc") + + require.NoError(t, ClearSyncSegments(db, root)) + it := NewSyncSegmentsIterator(db, root) + require.Empty(t, mapIterator(t, it, common.CopyBytes)) + require.NoError(t, it.Error()) +} + +func TestClearSyncStorageTrie_NoKeys(t *testing.T) { + db := rawdb.NewMemoryDatabase() + root := common.HexToHash("0xabc") + + require.NoError(t, ClearSyncStorageTrie(db, root)) + it := NewSyncStorageTriesIterator(db, nil) + require.Empty(t, mapIterator(t, it, common.CopyBytes)) + require.NoError(t, it.Error()) +} + +func TestSyncPerformedAndLatest(t *testing.T) { + db := rawdb.NewMemoryDatabase() + + require.NoError(t, WriteSyncPerformed(db, 10)) + require.NoError(t, WriteSyncPerformed(db, 20)) + require.NoError(t, WriteSyncPerformed(db, 15)) + + // Iterator yields all + vals := mapIterator(t, NewSyncPerformedIterator(db), parseSyncPerformedKey) + + require.Equal(t, []uint64{10, 15, 20}, vals) + + // Latest is max + latest, err := GetLatestSyncPerformed(db) + require.NoError(t, err) + require.Equal(t, uint64(20), latest) +} + +func TestGetLatestSyncPerformedEmpty(t *testing.T) { + db := rawdb.NewMemoryDatabase() + latest, err := GetLatestSyncPerformed(db) + require.NoError(t, err) + require.Zero(t, latest) +} + +func TestChainConfigReadWriteWithUpgrade(t *testing.T) { + db := rawdb.NewMemoryDatabase() + type upgradeCfg struct { + X int `json:"x"` + } + + hash := common.HexToHash("0xcafe") + cfg := ¶ms.ChainConfig{ChainID: big.NewInt(123)} + require.NoError(t, WriteChainConfig(db, hash, cfg, upgradeCfg{X: 7})) + + var out upgradeCfg + gotCfg, err := ReadChainConfig(db, hash, &out) + require.NoError(t, err) + require.NotNil(t, gotCfg) + require.Equal(t, cfg.ChainID, gotCfg.ChainID) + require.Equal(t, 7, out.X) +} + +func TestChainConfigNilDoesNotWriteUpgrade(t *testing.T) { + db := rawdb.NewMemoryDatabase() + hash := common.HexToHash("0xadd") + // Passing nil config should not write upgrade bytes + require.NoError(t, WriteChainConfig(db, hash, nil, struct{}{})) + + ok, err := db.Has(upgradeConfigKey(hash)) + require.NoError(t, err) + require.False(t, ok) +} + +func TestSyncPerformedLatestCases(t *testing.T) { + tests := []struct { + name string + writes []uint64 + want uint64 + }{ + { + name: "empty", + want: 0, + }, + { + name: "increasing", + writes: []uint64{1, 2, 3}, + want: 3, + }, + { + name: "unsorted", + writes: []uint64{10, 5, 7}, + want: 10, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + db := rawdb.NewMemoryDatabase() + for _, n := range tc.writes { + require.NoError(t, WriteSyncPerformed(db, n)) + } + latest, err := GetLatestSyncPerformed(db) + require.NoError(t, err) + require.Equal(t, tc.want, latest) + }) + } +} + +func TestSyncSegmentsByRootTable(t *testing.T) { + tests := []struct { + name string + root common.Hash + starts []common.Hash + }{ + { + name: "segments_multiple_starts", + root: common.HexToHash("0xaaa"), + starts: []common.Hash{common.HexToHash("0x1"), common.HexToHash("0x2")}, + }, + { + name: "segments_single_start", + root: common.HexToHash("0xbbb"), + starts: []common.Hash{common.HexToHash("0x3")}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + db := rawdb.NewMemoryDatabase() + for _, s := range tc.starts { + require.NoError(t, WriteSyncSegment(db, tc.root, s)) + } + got := mapIterator(t, NewSyncSegmentsIterator(db, tc.root), func(k []byte) common.Hash { + _, start := ParseSyncSegmentKey(k) + return common.BytesToHash(start) + }) + require.Equal(t, tc.starts, got) + }) + } +} + +func TestSyncStorageTriesByRootTable(t *testing.T) { + tests := []struct { + name string + root common.Hash + accounts []common.Hash + }{ + { + name: "storage_multiple_accounts", + root: common.HexToHash("0xabc"), + accounts: []common.Hash{common.HexToHash("0x1"), common.HexToHash("0x2")}, + }, + { + name: "storage_single_account", + root: common.HexToHash("0xdef"), + accounts: []common.Hash{common.HexToHash("0x3")}, + }, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + db := rawdb.NewMemoryDatabase() + for _, a := range tc.accounts { + require.NoError(t, WriteSyncStorageTrie(db, tc.root, a)) + } + got := mapIterator(t, NewSyncStorageTriesIterator(db, nil), func(k []byte) common.Hash { + _, a := ParseSyncStorageTrieKey(k) + return a + }) + require.Equal(t, tc.accounts, got) + }) + } +} + +func TestCodeToFetchCases(t *testing.T) { + h1 := common.HexToHash("0x1") + h2 := common.HexToHash("0x2") + h3 := common.HexToHash("0x3") + + tests := []struct { + name string + hashes []common.Hash + delIdx int // -1 => no delete + want int + }{ + { + name: "none", + delIdx: -1, + want: 0, + }, + { + name: "three_keep", + hashes: []common.Hash{h1, h2, h3}, + delIdx: -1, + want: 3, + }, + { + name: "three_delete_one", + hashes: []common.Hash{h1, h2, h3}, + delIdx: 1, + want: 2, + }, + } + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + db := rawdb.NewMemoryDatabase() + for _, h := range tc.hashes { + require.NoError(t, WriteCodeToFetch(db, h)) + } + if tc.delIdx >= 0 { + require.NoError(t, DeleteCodeToFetch(db, tc.hashes[tc.delIdx])) + } + iter := rawdb.NewKeyLengthIterator(db.NewIterator(CodeToFetchPrefix, nil), codeToFetchKeyLength) + keys := mapIterator(t, iter, common.CopyBytes) + require.Len(t, keys, tc.want) + }) + } +} + +func mapIterator[T any](t *testing.T, it ethdb.Iterator, f func([]byte) T) []T { + t.Helper() + defer it.Release() + var out []T + for it.Next() { + out = append(out, f(it.Key())) + } + require.NoError(t, it.Error()) + return out +} + +func buildSegmentKey(root, start common.Hash) []byte { + return slices.Concat(syncSegmentsPrefix, root[:], start[:]) +} + +func buildStorageTrieKey(root, account common.Hash) []byte { + return slices.Concat(syncStorageTriesPrefix, root[:], account[:]) +} diff --git a/vms/evm/sync/types/types.go b/vms/evm/sync/types/types.go new file mode 100644 index 000000000000..fe009f3d9414 --- /dev/null +++ b/vms/evm/sync/types/types.go @@ -0,0 +1,22 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package types + +import "context" + +// Syncer is the common interface for all sync operations. +// This provides a unified interface for atomic state sync and state trie sync. +type Syncer interface { + // Sync completes the full sync operation, returning any errors encountered. + // The sync will respect context cancellation. + Sync(ctx context.Context) error + + // Name returns a human-readable name for this syncer implementation. + Name() string + + // ID returns a stable, machine-oriented identifier (e.g., "state_block_sync", "state_code_sync", + // "state_evm_state_sync", "state_atomic_sync"). Implementations should ensure this is unique and + // stable across renames for logging/metrics/deduplication. + ID() string +} diff --git a/vms/evm/uptimetracker/state.go b/vms/evm/uptimetracker/state.go new file mode 100644 index 000000000000..b0cee20af5d9 --- /dev/null +++ b/vms/evm/uptimetracker/state.go @@ -0,0 +1,236 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package uptimetracker + +import ( + "fmt" + "math" + "time" + + "golang.org/x/exp/maps" + + "github.com/ava-labs/avalanchego/codec" + "github.com/ava-labs/avalanchego/codec/linearcodec" + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/utils/set" +) + +const codecVersion uint16 = 0 + +var ( + codecManager codec.Manager + _ uptime.State = (*state)(nil) +) + +func init() { + codecManager = codec.NewManager(math.MaxInt32) + c := linearcodec.NewDefault() + + if err := c.RegisterType(validator{}); err != nil { + panic(fmt.Errorf("failed to register type: %w", err)) + } + + if err := codecManager.RegisterCodec(codecVersion, c); err != nil { + panic(fmt.Errorf("failed to register codec: %w", err)) + } +} + +type validator struct { + UpDuration time.Duration `serialize:"true"` + LastUpdated uint64 `serialize:"true"` + NodeID ids.NodeID `serialize:"true"` + Weight uint64 `serialize:"true"` + StartTime uint64 `serialize:"true"` + IsActive bool `serialize:"true"` + IsL1Validator bool `serialize:"true"` + + validationID ids.ID +} + +// state holds the on-disk and cached representation of the validator set +type state struct { + db database.Database + + validators map[ids.ID]*validator + nodeIDsToValidationIDs map[ids.NodeID]ids.ID + updatedValidators set.Set[ids.ID] + deletedValidators set.Set[ids.ID] +} + +func newState(db database.Database) (*state, error) { + s := &state{ + db: db, + validators: make(map[ids.ID]*validator), + nodeIDsToValidationIDs: make(map[ids.NodeID]ids.ID), + } + + it := db.NewIterator() + defer it.Release() + + for it.Next() { + validationID, err := ids.ToID(it.Key()) + if err != nil { + return nil, fmt.Errorf("failed to parse validation ID: %w", err) + } + + vdr := &validator{ + validationID: validationID, + } + + if _, err := codecManager.Unmarshal(it.Value(), vdr); err != nil { + return nil, fmt.Errorf("failed to unmarshal validator: %w", err) + } + + s.addValidatorToMemory(validationID, vdr) + } + + if err := it.Error(); err != nil { + return nil, fmt.Errorf("failed to iterate: %w", err) + } + + return s, nil +} + +func (s *state) GetUptime(nodeID ids.NodeID) (time.Duration, time.Time, error) { + v, ok := s.getValidatorByNodeID(nodeID) + if !ok { + return 0, time.Time{}, database.ErrNotFound + } + + return v.UpDuration, time.Unix(int64(v.LastUpdated), 0), nil +} + +func (s *state) SetUptime( + nodeID ids.NodeID, + upDuration time.Duration, + lastUpdated time.Time, +) error { + v, ok := s.getValidatorByNodeID(nodeID) + if !ok { + return database.ErrNotFound + } + + v.UpDuration = upDuration + v.LastUpdated = uint64(lastUpdated.Unix()) + + s.updatedValidators.Add(v.validationID) + + return nil +} + +func (s *state) GetStartTime(nodeID ids.NodeID) (time.Time, error) { + v, ok := s.getValidatorByNodeID(nodeID) + if !ok { + return time.Time{}, database.ErrNotFound + } + + return time.Unix(int64(v.LastUpdated), 0), nil +} + +// addNewValidator adds a new validator to the state and marks it for +// persistence to the database. This should be used when adding validators +// during runtime operations. +func (s *state) addNewValidator(vdr *validator) { + s.addValidatorToMemory(vdr.validationID, vdr) + s.updatedValidators.Add(vdr.validationID) +} + +// updateValidator sets the isActive state of the validator with the given +// validationID -- this function assumes that a validator with the given +// validationID exists. +func (s *state) updateValidator(validationID ids.ID, isActive bool) bool { + v := s.validators[validationID] + + if v.IsActive == isActive { + return false + } + + v.IsActive = isActive + s.updatedValidators.Add(validationID) + return true +} + +// deleteValidator deletes the validator with the given validationID -- this +// function assumes that a validator with the given validationID exists. +func (s *state) deleteValidator(validationID ids.ID) { + v := s.validators[validationID] + + delete(s.validators, v.validationID) + delete(s.nodeIDsToValidationIDs, v.NodeID) + + s.deletedValidators.Add(v.validationID) +} + +func (s *state) writeModifications() error { + batch := s.db.NewBatch() + + for validationID := range s.updatedValidators { + validatorBytes, err := codecManager.Marshal( + codecVersion, + s.validators[validationID], + ) + if err != nil { + return fmt.Errorf("failed to marshal validator: %w", err) + } + + if err := batch.Put(validationID[:], validatorBytes); err != nil { + return fmt.Errorf("failed to put validator: %w", err) + } + } + + for validationID := range s.deletedValidators { + if err := batch.Delete(validationID[:]); err != nil { + return fmt.Errorf("failed to delete validator: %w", err) + } + } + + if err := batch.Write(); err != nil { + return fmt.Errorf("failed to write batch: %w", err) + } + + // We have written all pending updates + clear(s.updatedValidators) + clear(s.deletedValidators) + + return nil +} + +// addValidatorToMemory adds a validator to the in-memory data structures only. +// This is used during initialization when loading from the database and does not +// mark the validator for persistence. +func (s *state) addValidatorToMemory(validationID ids.ID, validator *validator) { + s.validators[validationID] = validator + s.nodeIDsToValidationIDs[validator.NodeID] = validationID +} + +func (s *state) getValidatorByNodeID(nodeID ids.NodeID) (*validator, bool) { + validationID, ok := s.nodeIDsToValidationIDs[nodeID] + if !ok { + return nil, false + } + + // we are guaranteed to have this validator + v := s.validators[validationID] + return v, true +} + +func (s *state) hasValidationID(validationID ids.ID) bool { + _, ok := s.validators[validationID] + return ok +} + +func (s *state) getNodeID(validationID ids.ID) (ids.NodeID, bool) { + v, ok := s.validators[validationID] + if !ok { + return ids.NodeID{}, false + } + + return v.NodeID, true +} + +func (s *state) getNodeIDs() []ids.NodeID { + return maps.Keys(s.nodeIDsToValidationIDs) +} diff --git a/vms/evm/uptimetracker/uptime_tracker.go b/vms/evm/uptimetracker/uptime_tracker.go new file mode 100644 index 000000000000..3470a758bd3b --- /dev/null +++ b/vms/evm/uptimetracker/uptime_tracker.go @@ -0,0 +1,264 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package uptimetracker + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "github.com/ava-labs/avalanchego/database" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/uptime" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/utils/set" + "github.com/ava-labs/avalanchego/utils/timer/mockable" +) + +var ErrValidationIDNotFound = errors.New("validationID not found") + +// UptimeTracker tracks uptime information for validators +type UptimeTracker struct { + validatorState validators.State + subnetID ids.ID + manager uptime.Manager + + lock sync.Mutex + height uint64 + state *state + synced bool + connectedValidators set.Set[ids.NodeID] + // Deactivated validators are treated as being offline + deactivatedValidators set.Set[ids.NodeID] +} + +// New returns a new instance of UptimeTracker +func New( + validatorState validators.State, + subnetID ids.ID, + db database.Database, + clock *mockable.Clock, +) (*UptimeTracker, error) { + s, err := newState(db) + if err != nil { + return nil, fmt.Errorf("failed to initialize state: %w", err) + } + + return &UptimeTracker{ + validatorState: validatorState, + subnetID: subnetID, + manager: uptime.NewManager(s, clock), + synced: false, + state: s, + }, nil +} + +// GetUptime returns the uptime of the validator corresponding to validationID +func (u *UptimeTracker) GetUptime(validationID ids.ID) ( + time.Duration, + time.Time, + error, +) { + u.lock.Lock() + defer u.lock.Unlock() + + nodeID, ok := u.state.getNodeID(validationID) + if !ok { + return 0, time.Time{}, fmt.Errorf( + "%w: %s", + ErrValidationIDNotFound, + validationID, + ) + } + + uptime, lastUpdated, err := u.manager.CalculateUptime(nodeID) + if err != nil { + return 0, time.Time{}, fmt.Errorf( + "failed to calculate uptime for validator %s: %w", + validationID, + err, + ) + } + + return uptime, lastUpdated, nil +} + +// Connect starts tracking a node. Nodes that are activated and connected will +// be treated as online. +func (u *UptimeTracker) Connect(nodeID ids.NodeID) error { + u.lock.Lock() + defer u.lock.Unlock() + + u.connectedValidators.Add(nodeID) + if u.deactivatedValidators.Contains(nodeID) { + return nil + } + + return u.manager.Connect(nodeID) +} + +// Disconnect stops tracking a node. Disconnected nodes are treated as being +// offline. +func (u *UptimeTracker) Disconnect(nodeID ids.NodeID) error { + u.lock.Lock() + defer u.lock.Unlock() + + u.connectedValidators.Remove(nodeID) + if u.deactivatedValidators.Contains(nodeID) { + return nil + } + + return u.manager.Disconnect(nodeID) +} + +// Sync updates the validator set and writes our state. Sync starts tracking +// uptimes for all active validators the first time it is called. +func (u *UptimeTracker) Sync(ctx context.Context) error { + u.lock.Lock() + defer u.lock.Unlock() + + currentValidatorSet, height, err := u.validatorState.GetCurrentValidatorSet( + ctx, + u.subnetID, + ) + if err != nil { + return fmt.Errorf("failed to get current validator set: %w", err) + } + + // Update validator set if we're behind + if err := u.update(height, currentValidatorSet); err != nil { + return fmt.Errorf("failed to update validator set: %w", err) + } + + // Initialize uptimes if this is the first time Sync has been called + if !u.synced { + if err := u.manager.StartTracking(u.state.getNodeIDs()); err != nil { + return fmt.Errorf("failed to start tracking validators: %w", err) + } + + u.synced = true + } + + if err := u.state.writeModifications(); err != nil { + return fmt.Errorf("failed to write state: %w", err) + } + + return nil +} + +func (u *UptimeTracker) update( + height uint64, + currentValidatorSet map[ids.ID]*validators.GetCurrentValidatorOutput, +) error { + // We are behind and need to update our local state + if u.synced && u.height >= height { + return nil + } + + newValidators := currentValidatorSet + + for validationID, validator := range u.state.validators { + // This validator is still in the latest update + if _, ok := newValidators[validationID]; ok { + continue + } + + u.state.deleteValidator(validationID) + u.deactivatedValidators.Remove(validator.NodeID) + } + + // Add or update validators + for validationID, newValidator := range newValidators { + if ok := u.state.hasValidationID(validationID); ok { + // Check if there was a status change + if !u.state.updateValidator(validationID, newValidator.IsActive) { + continue + } + + // If there was a status change we need to activate/deactivate the + // validator + if newValidator.IsActive { + // This validator is now active and is treated as online + if err := u.activate(newValidator.NodeID); err != nil { + return fmt.Errorf( + "failed to activate validator %s: %w", + newValidator.NodeID, + err, + ) + } + continue + } + + // This validator is no longer active and is treated as offline + if err := u.deactivate(newValidator.NodeID); err != nil { + return fmt.Errorf( + "failed to deactivate validator %s: %w", + newValidator.NodeID, + err, + ) + } + continue + } + + // This is a new validator + u.state.addNewValidator(&validator{ + NodeID: newValidator.NodeID, + validationID: validationID, + IsActive: newValidator.IsActive, + StartTime: newValidator.StartTime, + UpDuration: 0, + LastUpdated: newValidator.StartTime, + IsL1Validator: newValidator.IsL1Validator, + Weight: newValidator.Weight, + }) + + if newValidator.IsActive { + continue + } + + // This validator is not active and is treated is offline + if err := u.deactivate(newValidator.NodeID); err != nil { + return fmt.Errorf( + "failed to deactivate validator %s: %w", + newValidator.NodeID, + err, + ) + } + } + + u.height = height + return nil +} + +// activate treats nodeID as online +func (u *UptimeTracker) activate(nodeID ids.NodeID) error { + u.deactivatedValidators.Remove(nodeID) + + return u.manager.Connect(nodeID) +} + +// deactivate treats nodeID as offline +func (u *UptimeTracker) deactivate(nodeID ids.NodeID) error { + u.deactivatedValidators.Add(nodeID) + + return u.manager.Disconnect(nodeID) +} + +// Shutdown stops tracking uptimes and writes our state. +func (u *UptimeTracker) Shutdown() error { + u.lock.Lock() + defer u.lock.Unlock() + + if !u.synced { + return nil + } + + if err := u.manager.StopTracking(u.state.getNodeIDs()); err != nil { + return fmt.Errorf("failed to stop uptime tracking: %w", err) + } + + return u.state.writeModifications() +} diff --git a/vms/evm/uptimetracker/uptime_tracker_test.go b/vms/evm/uptimetracker/uptime_tracker_test.go new file mode 100644 index 000000000000..a8741aec757a --- /dev/null +++ b/vms/evm/uptimetracker/uptime_tracker_test.go @@ -0,0 +1,551 @@ +// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved. +// See the file LICENSE for licensing terms. + +package uptimetracker + +import ( + "context" + "testing" + "time" + + "github.com/stretchr/testify/require" + + "github.com/ava-labs/avalanchego/database/memdb" + "github.com/ava-labs/avalanchego/ids" + "github.com/ava-labs/avalanchego/snow/validators" + "github.com/ava-labs/avalanchego/snow/validators/validatorstest" + "github.com/ava-labs/avalanchego/utils/timer/mockable" +) + +type testStep struct { + timestamp time.Time + validators []validators.GetCurrentValidatorOutput + connectedValidators []ids.NodeID + disconnectedValidators []ids.NodeID +} + +func TestUptimeTracker_GetUptime(t *testing.T) { + tests := []struct { + name string + steps []testStep + + validationID ids.ID + + wantUptime time.Duration + wantLastUpdated time.Time + wantErr error + }{ + { + name: "no validators", + wantErr: ErrValidationIDNotFound, + }, + { + name: "one validator", + steps: []testStep{ + { + timestamp: time.Time{}.Add(10 * time.Second), + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{2}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantUptime: 10 * time.Second, + wantLastUpdated: time.Time{}.Add(10 * time.Second), + }, + { + name: "one validator added and removed", + steps: []testStep{ + { + timestamp: time.Time{}.Add(10 * time.Second), + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{2}, + StartTime: 10, + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(20 * time.Second), + }, + }, + validationID: ids.ID{1}, + wantErr: ErrValidationIDNotFound, + }, + { + name: "one validator deactivated", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: false, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantUptime: 10 * time.Second, + wantLastUpdated: time.Time{}.Add(10 * time.Second), + }, + { + name: "one validator deactivated and reactivated", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: false, + }, + }, + }, + { + timestamp: time.Time{}.Add(20 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantUptime: 10 * time.Second, + wantLastUpdated: time.Time{}.Add(20 * time.Second), + }, + { + name: "one validator disconnected", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + disconnectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantUptime: 10 * time.Second, + wantLastUpdated: time.Time{}.Add(10 * time.Second), + }, + { + name: "one validator connected and disconnected", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + disconnectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(20 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantUptime: 10 * time.Second, + wantLastUpdated: time.Time{}.Add(20 * time.Second), + }, + { + name: "validator never connected", + steps: []testStep{ + { + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + }, + { + name: "validator removed", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + {}, + }, + }, + }, + validationID: ids.ID{1}, + wantErr: ErrValidationIDNotFound, + }, + { + name: "connected inactive validator becomes active", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantLastUpdated: time.Time{}.Add(10 * time.Second), + }, + { + name: "disconnected inactive validator becomes connected + active", + steps: []testStep{ + { + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantLastUpdated: time.Time{}.Add(10 * time.Second), + }, + { + name: "deactivated validator leaves validator set", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + }, + }, + validationID: ids.ID{1}, + wantErr: ErrValidationIDNotFound, + }, + { + name: "validator has no updates", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(20 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantUptime: 20 * time.Second, + wantLastUpdated: time.Time{}.Add(20 * time.Second), + }, + { + name: "connected validator rejoins validator set", + steps: []testStep{ + { + connectedValidators: []ids.NodeID{ + {1}, + }, + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Unix()), + IsActive: true, + }, + }, + }, + { + timestamp: time.Time{}.Add(10 * time.Second), + }, + { + timestamp: time.Time{}.Add(20 * time.Second), + validators: []validators.GetCurrentValidatorOutput{ + { + ValidationID: ids.ID{1}, + NodeID: ids.NodeID{1}, + StartTime: uint64(time.Time{}.Add(10 * time.Second).Unix()), + IsActive: true, + }, + }, + }, + }, + validationID: ids.ID{1}, + wantUptime: 10 * time.Second, + wantLastUpdated: time.Time{}.Add(20 * time.Second), + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + require := require.New(t) + + validatorState := &validatorstest.State{} + subnetID := ids.GenerateTestID() + db := memdb.New() + clock := &mockable.Clock{} + + uptimeTracker, err := New(validatorState, subnetID, db, clock) + require.NoError(err) + + pChainHeight := uint64(0) + for _, step := range tt.steps { + clock.Set(step.timestamp) + + for _, v := range step.connectedValidators { + require.NoError(uptimeTracker.Connect(v)) + } + + for _, v := range step.disconnectedValidators { + require.NoError(uptimeTracker.Disconnect(v)) + } + + validatorState.GetCurrentValidatorSetF = func( + context.Context, + ids.ID, + ) ( + map[ids.ID]*validators.GetCurrentValidatorOutput, + uint64, + error, + ) { + validatorsMap := make(map[ids.ID]*validators.GetCurrentValidatorOutput) + + for _, v := range step.validators { + validatorsMap[v.ValidationID] = &v + } + + pChainHeight += 1 + return validatorsMap, pChainHeight, nil + } + + require.NoError(uptimeTracker.Sync(t.Context())) + } + + gotUptime, gotLastUpdated, err := uptimeTracker.GetUptime( + tt.validationID, + ) + + require.ErrorIs(err, tt.wantErr) + require.Equal(tt.wantLastUpdated, gotLastUpdated) + require.Equal(tt.wantUptime, gotUptime) + require.NoError(uptimeTracker.Shutdown()) + }) + } +} + +func TestUptimeTracker_Restart(t *testing.T) { + require := require.New(t) + + validationID := ids.GenerateTestID() + nodeID := ids.GenerateTestNodeID() + start := time.Time{} + validatorState := &validatorstest.State{ + GetCurrentValidatorSetF: func(context.Context, ids.ID) (map[ids.ID]*validators.GetCurrentValidatorOutput, uint64, error) { + return map[ids.ID]*validators.GetCurrentValidatorOutput{ + validationID: { + ValidationID: validationID, + StartTime: uint64(start.Unix()), + NodeID: nodeID, + IsActive: true, + }, + }, 0, nil + }, + } + subnetID := ids.GenerateTestID() + db := memdb.New() + clock := &mockable.Clock{} + clock.Set(start) + + uptimeTracker, err := New(validatorState, subnetID, db, clock) + require.NoError(err) + + require.NoError(uptimeTracker.Sync(t.Context())) + require.NoError(uptimeTracker.Connect(nodeID)) + + clock.Set(start.Add(10 * time.Second)) + uptime, lastUpdated, err := uptimeTracker.GetUptime(validationID) + require.NoError(err) + require.Equal(10*time.Second, uptime) + require.Equal(time.Time{}.Add(10*time.Second), lastUpdated) + + require.NoError(uptimeTracker.Shutdown()) + + clock.Set(start.Add(20 * time.Second)) + uptimeTracker, err = New(validatorState, subnetID, db, clock) + require.NoError(err) + require.NoError(uptimeTracker.Sync(t.Context())) + + uptime, lastUpdated, err = uptimeTracker.GetUptime(validationID) + require.NoError(err) + require.Equal(20*time.Second, uptime) + require.Equal(time.Time{}.Add(20*time.Second), lastUpdated) +} diff --git a/vms/platformvm/block/builder/builder_test.go b/vms/platformvm/block/builder/builder_test.go index 74f6b298e7d7..df5d16a2be4a 100644 --- a/vms/platformvm/block/builder/builder_test.go +++ b/vms/platformvm/block/builder/builder_test.go @@ -4,7 +4,6 @@ package builder import ( - "context" "errors" "testing" "time" @@ -63,7 +62,7 @@ func TestBuildBlockBasic(t *testing.T) { require.True(ok) // [BuildBlock] should build a block with the transaction - blkIntf, err := env.Builder.BuildBlock(context.Background()) + blkIntf, err := env.Builder.BuildBlock(t.Context()) require.NoError(err) require.IsType(&blockexecutor.Block{}, blkIntf) @@ -89,7 +88,7 @@ func TestBuildBlockDoesNotBuildWithEmptyMempool(t *testing.T) { require.Nil(tx) // [BuildBlock] should not build an empty block - blk, err := env.Builder.BuildBlock(context.Background()) + blk, err := env.Builder.BuildBlock(t.Context()) require.ErrorIs(err, ErrNoPendingBlocks) require.Nil(blk) } @@ -151,12 +150,12 @@ func TestBuildBlockShouldReward(t *testing.T) { require.True(ok) // Build and accept a block with the tx - blk, err := env.Builder.BuildBlock(context.Background()) + blk, err := env.Builder.BuildBlock(t.Context()) require.NoError(err) require.IsType(&block.BanffStandardBlock{}, blk.(*blockexecutor.Block).Block) require.Equal([]*txs.Tx{tx}, blk.(*blockexecutor.Block).Block.Txs()) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Verify(t.Context())) + require.NoError(blk.Accept(t.Context())) env.blkManager.SetPreference(blk.ID(), nil) // Validator should now be current @@ -175,9 +174,9 @@ func TestBuildBlockShouldReward(t *testing.T) { iter.Release() // Check that the right block was built - blk, err := env.Builder.BuildBlock(context.Background()) + blk, err := env.Builder.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) require.IsType(&block.BanffProposalBlock{}, blk.(*blockexecutor.Block).Block) expectedTx, err := NewRewardValidatorTx(env.ctx, staker.TxID) @@ -187,15 +186,15 @@ func TestBuildBlockShouldReward(t *testing.T) { // Commit the [ProposalBlock] with a [CommitBlock] proposalBlk, ok := blk.(snowman.OracleBlock) require.True(ok) - options, err := proposalBlk.Options(context.Background()) + options, err := proposalBlk.Options(t.Context()) require.NoError(err) commit := options[0].(*blockexecutor.Block) require.IsType(&block.BanffCommitBlock{}, commit.Block) - require.NoError(blk.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(commit.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) + require.NoError(commit.Verify(t.Context())) + require.NoError(commit.Accept(t.Context())) env.blkManager.SetPreference(commit.ID(), nil) // Stop rewarding once our staker is rewarded @@ -232,7 +231,7 @@ func TestBuildBlockAdvanceTime(t *testing.T) { env.backend.Clk.Set(nextTime) // [BuildBlock] should build a block advancing the time to [NextTime] - blkIntf, err := env.Builder.BuildBlock(context.Background()) + blkIntf, err := env.Builder.BuildBlock(t.Context()) require.NoError(err) require.IsType(&blockexecutor.Block{}, blkIntf) @@ -290,7 +289,7 @@ func TestBuildBlockForceAdvanceTime(t *testing.T) { // [BuildBlock] should build a block advancing the time to [nextTime], // not the current wall clock. - blkIntf, err := env.Builder.BuildBlock(context.Background()) + blkIntf, err := env.Builder.BuildBlock(t.Context()) require.NoError(err) require.IsType(&blockexecutor.Block{}, blkIntf) @@ -386,7 +385,7 @@ func TestBuildBlockInvalidStakingDurations(t *testing.T) { require.True(ok) // Only tx1 should be in a built block since [MaxStakeDuration] is satisfied. - blkIntf, err := env.Builder.BuildBlock(context.Background()) + blkIntf, err := env.Builder.BuildBlock(t.Context()) require.NoError(err) require.IsType(&blockexecutor.Block{}, blkIntf) diff --git a/vms/platformvm/block/builder/standard_block_test.go b/vms/platformvm/block/builder/standard_block_test.go index c343ae9d9d6a..52738111f96b 100644 --- a/vms/platformvm/block/builder/standard_block_test.go +++ b/vms/platformvm/block/builder/standard_block_test.go @@ -4,7 +4,6 @@ package builder import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -72,11 +71,11 @@ func TestAtomicTxImports(t *testing.T) { require.NoError(err) require.NoError(env.Builder.Add(tx)) - b, err := env.Builder.BuildBlock(context.Background()) + b, err := env.Builder.BuildBlock(t.Context()) require.NoError(err) // Test multiple verify calls work - require.NoError(b.Verify(context.Background())) - require.NoError(b.Accept(context.Background())) + require.NoError(b.Verify(t.Context())) + require.NoError(b.Accept(t.Context())) _, txStatus, err := env.state.GetTx(tx.ID()) require.NoError(err) // Ensure transaction is in the committed state diff --git a/vms/platformvm/block/executor/block_test.go b/vms/platformvm/block/executor/block_test.go index ba25c7fbde7a..b522b02e807e 100644 --- a/vms/platformvm/block/executor/block_test.go +++ b/vms/platformvm/block/executor/block_test.go @@ -4,7 +4,6 @@ package executor import ( - "context" "testing" "time" @@ -505,7 +504,7 @@ func TestBlockOptions(t *testing.T) { require := require.New(t) blk := tt.blkF(ctrl) - options, err := blk.Options(context.Background()) + options, err := blk.Options(t.Context()) require.NoError(err) require.IsType(tt.expectedPreferenceType, options[0].(*Block).Block) }) diff --git a/vms/platformvm/block/executor/proposal_block_test.go b/vms/platformvm/block/executor/proposal_block_test.go index 0f307ced924e..15936609e31e 100644 --- a/vms/platformvm/block/executor/proposal_block_test.go +++ b/vms/platformvm/block/executor/proposal_block_test.go @@ -4,7 +4,6 @@ package executor import ( - "context" "fmt" "testing" "time" @@ -126,7 +125,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { proposalBlock := env.blkManager.NewBlock(statelessProposalBlock) - err = proposalBlock.Verify(context.Background()) + err = proposalBlock.Verify(t.Context()) require.ErrorIs(err, errIncorrectBlockHeight) // valid @@ -138,7 +137,7 @@ func TestApricotProposalBlockTimeVerification(t *testing.T) { require.NoError(err) proposalBlock = env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(proposalBlock.Verify(context.Background())) + require.NoError(proposalBlock.Verify(t.Context())) } func TestBanffProposalBlockTimeVerification(t *testing.T) { @@ -250,7 +249,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, errIncorrectBlockHeight) } @@ -264,7 +263,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, errApricotBlockIssuedAfterFork) } @@ -280,7 +279,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, executor.ErrChildBlockEarlierThanParent) } @@ -298,7 +297,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, executor.ErrChildBlockBeyondSyncBound) env.clk.Set(initClkTime) } @@ -316,7 +315,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, executor.ErrChildBlockAfterStakerChangeTime) } @@ -338,7 +337,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, executor.ErrAdvanceTimeTxIssuedAfterBanff) } @@ -354,7 +353,7 @@ func TestBanffProposalBlockTimeVerification(t *testing.T) { require.NoError(err) block := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(block.Verify(context.Background())) + require.NoError(block.Verify(t.Context())) } } @@ -642,14 +641,14 @@ func TestBanffProposalBlockUpdateStakers(t *testing.T) { // verify and accept the block block := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(block.Verify(context.Background())) - options, err := block.(snowman.OracleBlock).Options(context.Background()) + require.NoError(block.Verify(t.Context())) + options, err := block.(snowman.OracleBlock).Options(t.Context()) require.NoError(err) - require.NoError(options[0].Verify(context.Background())) + require.NoError(options[0].Verify(t.Context())) - require.NoError(block.Accept(context.Background())) - require.NoError(options[0].Accept(context.Background())) + require.NoError(block.Accept(t.Context())) + require.NoError(options[0].Accept(t.Context())) } require.NoError(env.state.Commit()) @@ -811,12 +810,12 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify(context.Background())) // verify and update staker set + require.NoError(propBlk.Verify(t.Context())) // verify and update staker set - options, err := propBlk.(snowman.OracleBlock).Options(context.Background()) + options, err := propBlk.(snowman.OracleBlock).Options(t.Context()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify(context.Background())) + require.NoError(commitBlk.Verify(t.Context())) blkStateMap := env.blkManager.(*manager).blkIDToState updatedState := blkStateMap[commitBlk.ID()].onAcceptState @@ -824,8 +823,8 @@ func TestBanffProposalBlockRemoveSubnetValidator(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) // Check VM Validators are removed successfully - require.NoError(propBlk.Accept(context.Background())) - require.NoError(commitBlk.Accept(context.Background())) + require.NoError(propBlk.Accept(t.Context())) + require.NoError(commitBlk.Accept(t.Context())) _, ok := env.config.Validators.GetValidator(subnetID, subnetVdr2NodeID) require.False(ok) _, ok = env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) @@ -933,14 +932,14 @@ func TestBanffProposalBlockTrackedSubnet(t *testing.T) { ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify(context.Background())) // verify update staker set - options, err := propBlk.(snowman.OracleBlock).Options(context.Background()) + require.NoError(propBlk.Verify(t.Context())) // verify update staker set + options, err := propBlk.(snowman.OracleBlock).Options(t.Context()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify(context.Background())) + require.NoError(commitBlk.Verify(t.Context())) - require.NoError(propBlk.Accept(context.Background())) - require.NoError(commitBlk.Accept(context.Background())) + require.NoError(propBlk.Accept(t.Context())) + require.NoError(commitBlk.Accept(t.Context())) _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) require.True(ok) }) @@ -1025,15 +1024,15 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify(context.Background())) + require.NoError(propBlk.Verify(t.Context())) - options, err := propBlk.(snowman.OracleBlock).Options(context.Background()) + options, err := propBlk.(snowman.OracleBlock).Options(t.Context()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify(context.Background())) + require.NoError(commitBlk.Verify(t.Context())) - require.NoError(propBlk.Accept(context.Background())) - require.NoError(commitBlk.Accept(context.Background())) + require.NoError(propBlk.Accept(t.Context())) + require.NoError(commitBlk.Accept(t.Context())) // Test validator weight before delegation vdrWeight := env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) @@ -1115,15 +1114,15 @@ func TestBanffProposalBlockDelegatorStakerWeight(t *testing.T) { require.NoError(err) propBlk = env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify(context.Background())) + require.NoError(propBlk.Verify(t.Context())) - options, err = propBlk.(snowman.OracleBlock).Options(context.Background()) + options, err = propBlk.(snowman.OracleBlock).Options(t.Context()) require.NoError(err) commitBlk = options[0] - require.NoError(commitBlk.Verify(context.Background())) + require.NoError(commitBlk.Verify(t.Context())) - require.NoError(propBlk.Accept(context.Background())) - require.NoError(commitBlk.Accept(context.Background())) + require.NoError(propBlk.Accept(t.Context())) + require.NoError(commitBlk.Accept(t.Context())) // Test validator weight after delegation vdrWeight = env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) @@ -1210,15 +1209,15 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { ) require.NoError(err) propBlk := env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify(context.Background())) + require.NoError(propBlk.Verify(t.Context())) - options, err := propBlk.(snowman.OracleBlock).Options(context.Background()) + options, err := propBlk.(snowman.OracleBlock).Options(t.Context()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify(context.Background())) + require.NoError(commitBlk.Verify(t.Context())) - require.NoError(propBlk.Accept(context.Background())) - require.NoError(commitBlk.Accept(context.Background())) + require.NoError(propBlk.Accept(t.Context())) + require.NoError(commitBlk.Accept(t.Context())) // Test validator weight before delegation vdrWeight := env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) @@ -1299,15 +1298,15 @@ func TestBanffProposalBlockDelegatorStakers(t *testing.T) { ) require.NoError(err) propBlk = env.blkManager.NewBlock(statelessProposalBlock) - require.NoError(propBlk.Verify(context.Background())) + require.NoError(propBlk.Verify(t.Context())) - options, err = propBlk.(snowman.OracleBlock).Options(context.Background()) + options, err = propBlk.(snowman.OracleBlock).Options(t.Context()) require.NoError(err) commitBlk = options[0] - require.NoError(commitBlk.Verify(context.Background())) + require.NoError(commitBlk.Verify(t.Context())) - require.NoError(propBlk.Accept(context.Background())) - require.NoError(commitBlk.Accept(context.Background())) + require.NoError(propBlk.Accept(t.Context())) + require.NoError(commitBlk.Accept(t.Context())) // Test validator weight after delegation vdrWeight = env.config.Validators.GetWeight(constants.PrimaryNetworkID, nodeID) @@ -1370,8 +1369,8 @@ func TestAddValidatorProposalBlock(t *testing.T) { ) require.NoError(err) blk := env.blkManager.NewBlock(statelessBlk) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Verify(t.Context())) + require.NoError(blk.Accept(t.Context())) env.blkManager.SetPreference(statelessBlk.ID(), nil) // Should be current @@ -1403,8 +1402,8 @@ func TestAddValidatorProposalBlock(t *testing.T) { ) require.NoError(err) blk = env.blkManager.NewBlock(statelessBlk) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Verify(t.Context())) + require.NoError(blk.Accept(t.Context())) env.blkManager.SetPreference(statelessBlk.ID(), nil) } @@ -1456,15 +1455,15 @@ func TestAddValidatorProposalBlock(t *testing.T) { ) require.NoError(err) blk = env.blkManager.NewBlock(statelessProposalBlk) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) - options, err := blk.(snowman.OracleBlock).Options(context.Background()) + options, err := blk.(snowman.OracleBlock).Options(t.Context()) require.NoError(err) commitBlk := options[0] - require.NoError(commitBlk.Verify(context.Background())) + require.NoError(commitBlk.Verify(t.Context())) - require.NoError(blk.Accept(context.Background())) - require.NoError(commitBlk.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) + require.NoError(commitBlk.Accept(t.Context())) // Should be current staker, err = env.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) diff --git a/vms/platformvm/block/executor/standard_block_test.go b/vms/platformvm/block/executor/standard_block_test.go index d84cec1c7fd2..e0df2759f38b 100644 --- a/vms/platformvm/block/executor/standard_block_test.go +++ b/vms/platformvm/block/executor/standard_block_test.go @@ -4,7 +4,6 @@ package executor import ( - "context" "fmt" "testing" "time" @@ -72,7 +71,7 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) blk := env.blkManager.NewBlock(apricotChildBlk) - err = blk.Verify(context.Background()) + err = blk.Verify(t.Context()) require.ErrorIs(err, errIncorrectBlockHeight) // valid height @@ -83,7 +82,7 @@ func TestApricotStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) blk = env.blkManager.NewBlock(apricotChildBlk) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) } func TestBanffStandardBlockTimeVerification(t *testing.T) { @@ -182,7 +181,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, errApricotBlockIssuedAfterFork) } @@ -197,7 +196,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, errIncorrectBlockHeight) } @@ -212,7 +211,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, executor.ErrChildBlockEarlierThanParent) } @@ -228,7 +227,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, executor.ErrChildBlockBeyondSyncBound) env.clk.Set(initClkTime) } @@ -244,7 +243,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, executor.ErrChildBlockAfterStakerChangeTime) } @@ -259,7 +258,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - err = block.Verify(context.Background()) + err = block.Verify(t.Context()) require.ErrorIs(err, ErrStandardBlockWithoutChanges) } @@ -274,7 +273,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.NoError(block.Verify(context.Background())) + require.NoError(block.Verify(t.Context())) } { @@ -288,7 +287,7 @@ func TestBanffStandardBlockTimeVerification(t *testing.T) { ) require.NoError(err) block := env.blkManager.NewBlock(banffChildBlk) - require.NoError(block.Verify(context.Background())) + require.NoError(block.Verify(t.Context())) } } @@ -327,7 +326,7 @@ func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { block := env.blkManager.NewBlock(statelessStandardBlock) // update staker set - require.NoError(block.Verify(context.Background())) + require.NoError(block.Verify(t.Context())) // tests blkStateMap := env.blkManager.(*manager).blkIDToState @@ -341,7 +340,7 @@ func TestBanffStandardBlockUpdatePrimaryNetworkStakers(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) // Test VM validators - require.NoError(block.Accept(context.Background())) + require.NoError(block.Accept(t.Context())) _, ok := env.config.Validators.GetValidator(constants.PrimaryNetworkID, nodeID) require.True(ok) } @@ -561,8 +560,8 @@ func TestBanffStandardBlockUpdateStakers(t *testing.T) { require.NoError(err) // update staker set - require.NoError(block.Verify(context.Background())) - require.NoError(block.Accept(context.Background())) + require.NoError(block.Verify(t.Context())) + require.NoError(block.Accept(t.Context())) } for stakerNodeID, status := range test.expectedStakers { @@ -683,7 +682,7 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { block := env.blkManager.NewBlock(statelessStandardBlock) // update staker set - require.NoError(block.Verify(context.Background())) + require.NoError(block.Verify(t.Context())) blkStateMap := env.blkManager.(*manager).blkIDToState updatedState := blkStateMap[block.ID()].onAcceptState @@ -691,7 +690,7 @@ func TestBanffStandardBlockRemoveSubnetValidator(t *testing.T) { require.ErrorIs(err, database.ErrNotFound) // Check VM Validators are removed successfully - require.NoError(block.Accept(context.Background())) + require.NoError(block.Accept(t.Context())) _, ok := env.config.Validators.GetValidator(subnetID, subnetVdr2NodeID) require.False(ok) _, ok = env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) @@ -757,8 +756,8 @@ func TestBanffStandardBlockTrackedSubnet(t *testing.T) { block := env.blkManager.NewBlock(statelessStandardBlock) // update staker set - require.NoError(block.Verify(context.Background())) - require.NoError(block.Accept(context.Background())) + require.NoError(block.Verify(t.Context())) + require.NoError(block.Accept(t.Context())) _, ok := env.config.Validators.GetValidator(subnetID, subnetValidatorNodeID) require.True(ok) }) @@ -797,8 +796,8 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) blk := env.blkManager.NewBlock(statelessStandardBlock) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Verify(t.Context())) + require.NoError(blk.Accept(t.Context())) require.NoError(env.state.Commit()) // Test validator weight before delegation @@ -848,8 +847,8 @@ func TestBanffStandardBlockDelegatorStakerWeight(t *testing.T) { ) require.NoError(err) blk = env.blkManager.NewBlock(statelessStandardBlock) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Verify(t.Context())) + require.NoError(blk.Accept(t.Context())) require.NoError(env.state.Commit()) // Test validator weight after delegation diff --git a/vms/platformvm/block/executor/verifier_test.go b/vms/platformvm/block/executor/verifier_test.go index f4ac33ffd0d6..e2d7413ebe3b 100644 --- a/vms/platformvm/block/executor/verifier_test.go +++ b/vms/platformvm/block/executor/verifier_test.go @@ -4,7 +4,6 @@ package executor import ( - "context" "testing" "time" @@ -510,7 +509,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { // Verify the block. blk := manager.NewBlock(apricotBlk) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) // Assert expected state. require.Contains(manager.backend.blkIDToState, apricotBlk.ID()) @@ -519,7 +518,7 @@ func TestVerifierVisitCommitBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) } func TestVerifierVisitAbortBlock(t *testing.T) { @@ -584,7 +583,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { // Verify the block. blk := manager.NewBlock(apricotBlk) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) // Assert expected state. require.Contains(manager.backend.blkIDToState, apricotBlk.ID()) @@ -593,7 +592,7 @@ func TestVerifierVisitAbortBlock(t *testing.T) { require.Equal(timestamp, gotBlkState.timestamp) // Visiting again should return nil without using dependencies. - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) } // Assert that a block with an unverified parent fails verification. diff --git a/vms/platformvm/block/executor/warp_verifier_test.go b/vms/platformvm/block/executor/warp_verifier_test.go index da1315dc7a80..65141d925bfa 100644 --- a/vms/platformvm/block/executor/warp_verifier_test.go +++ b/vms/platformvm/block/executor/warp_verifier_test.go @@ -4,7 +4,6 @@ package executor import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -143,7 +142,7 @@ func TestVerifyWarpMessages(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := VerifyWarpMessages( - context.Background(), + t.Context(), constants.UnitTestID, nil, 0, diff --git a/vms/platformvm/network/network.go b/vms/platformvm/network/network.go index 43a9bb915010..97d678398711 100644 --- a/vms/platformvm/network/network.go +++ b/vms/platformvm/network/network.go @@ -37,6 +37,7 @@ type Network struct { txPushGossipFrequency time.Duration txPullGossiper gossip.Gossiper txPullGossipFrequency time.Duration + peers *p2p.Peers } func New( @@ -60,13 +61,14 @@ func New( vdrs, config.MaxValidatorSetStaleness, ) - + peers := &p2p.Peers{} p2pNetwork, err := p2p.NewNetwork( log, appSender, registerer, "p2p", validators, + peers, ) if err != nil { return nil, err @@ -189,6 +191,7 @@ func New( txPushGossipFrequency: config.PushGossipFrequency, txPullGossiper: txPullGossiper, txPullGossipFrequency: config.PullGossipFrequency, + peers: peers, }, nil } @@ -224,3 +227,7 @@ func (n *Network) IssueTxFromRPC(tx *txs.Tx) error { n.txPushGossiper.Add(tx) return nil } + +func (n *Network) Peers() *p2p.Peers { + return n.peers +} diff --git a/vms/platformvm/network/network_test.go b/vms/platformvm/network/network_test.go index 72a56163b5ef..186c1bbf5007 100644 --- a/vms/platformvm/network/network_test.go +++ b/vms/platformvm/network/network_test.go @@ -4,7 +4,6 @@ package network import ( - "context" "errors" "testing" "time" @@ -244,7 +243,7 @@ func TestNetworkIssueTxFromRPC(t *testing.T) { err = n.IssueTxFromRPC(tt.tx) require.ErrorIs(err, tt.expectedErr) - require.NoError(n.txPushGossiper.Gossip(context.Background())) + require.NoError(n.txPushGossiper.Gossip(t.Context())) }) } } diff --git a/vms/platformvm/network/warp_test.go b/vms/platformvm/network/warp_test.go index 7da438435857..02c7ff1c67a8 100644 --- a/vms/platformvm/network/warp_test.go +++ b/vms/platformvm/network/warp_test.go @@ -4,7 +4,6 @@ package network import ( - "context" "encoding/hex" "math" "strings" @@ -90,7 +89,7 @@ func TestSignatureRequestVerify(t *testing.T) { t.Run(test.name, func(t *testing.T) { s := signatureRequestVerifier{} err := s.Verify( - context.Background(), + t.Context(), must[*warp.UnsignedMessage](t)(warp.NewUnsignedMessage( constants.UnitTestID, constants.PlatformChainID, @@ -159,7 +158,7 @@ func TestSignatureRequestVerifySubnetToL1Conversion(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := s.Verify( - context.Background(), + t.Context(), must[*warp.UnsignedMessage](t)(warp.NewUnsignedMessage( constants.UnitTestID, constants.PlatformChainID, @@ -218,7 +217,7 @@ func TestSignatureRequestVerifyL1ValidatorRegistrationRegistered(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := s.Verify( - context.Background(), + t.Context(), must[*warp.UnsignedMessage](t)(warp.NewUnsignedMessage( constants.UnitTestID, constants.PlatformChainID, @@ -519,7 +518,7 @@ func TestSignatureRequestVerifyL1ValidatorRegistrationNotRegistered(t *testing.T for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := s.Verify( - context.Background(), + t.Context(), must[*warp.UnsignedMessage](t)(warp.NewUnsignedMessage( constants.UnitTestID, constants.PlatformChainID, @@ -620,7 +619,7 @@ func TestSignatureRequestVerifyL1ValidatorWeight(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := s.Verify( - context.Background(), + t.Context(), must[*warp.UnsignedMessage](t)(warp.NewUnsignedMessage( constants.UnitTestID, constants.PlatformChainID, diff --git a/vms/platformvm/service.go b/vms/platformvm/service.go index d34fada80fc7..4197c1424894 100644 --- a/vms/platformvm/service.go +++ b/vms/platformvm/service.go @@ -868,7 +868,7 @@ func (s *Service) getPrimaryOrSubnetValidators(subnetID ids.ID, nodeIDs set.Set[ if err != nil { return nil, err } - isConnected := s.vm.uptimeManager.IsConnected(currentStaker.NodeID) + isConnected := s.vm.Network.Peers().Has(currentStaker.NodeID) connected = &isConnected uptime = ¤tUptime } diff --git a/vms/platformvm/service_test.go b/vms/platformvm/service_test.go index 210896a229cf..7428b0459c79 100644 --- a/vms/platformvm/service_test.go +++ b/vms/platformvm/service_test.go @@ -4,7 +4,6 @@ package platformvm import ( - "context" "encoding/json" "errors" "fmt" @@ -79,7 +78,7 @@ func TestGetProposedHeight(t *testing.T) { reply := api.GetHeightResponse{} require.NoError(service.GetProposedHeight(&http.Request{}, nil, &reply)) - minHeight, err := service.vm.GetMinimumHeight(context.Background()) + minHeight, err := service.vm.GetMinimumHeight(t.Context()) require.NoError(err) require.Equal(minHeight, uint64(reply.Height)) @@ -109,13 +108,13 @@ func TestGetProposedHeight(t *testing.T) { require.NoError(service.vm.Network.IssueTxFromRPC(tx)) service.vm.ctx.Lock.Lock() - block, err := service.vm.BuildBlock(context.Background()) + block, err := service.vm.BuildBlock(t.Context()) require.NoError(err) blk := block.(*blockexecutor.Block) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) service.vm.ctx.Lock.Unlock() @@ -213,13 +212,13 @@ func TestGetTxStatus(t *testing.T) { require.NoError(service.vm.Network.IssueTxFromRPC(tx)) service.vm.ctx.Lock.Lock() - block, err := service.vm.BuildBlock(context.Background()) + block, err := service.vm.BuildBlock(t.Context()) require.NoError(err) blk := block.(*blockexecutor.Block) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) service.vm.ctx.Lock.Unlock() @@ -343,22 +342,22 @@ func TestGetTx(t *testing.T) { require.NoError(service.vm.Network.IssueTxFromRPC(tx)) service.vm.ctx.Lock.Lock() - blk, err := service.vm.BuildBlock(context.Background()) + blk, err := service.vm.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) if blk, ok := blk.(snowman.OracleBlock); ok { // For proposal blocks, commit them - options, err := blk.Options(context.Background()) + options, err := blk.Options(t.Context()) if !errors.Is(err, snowman.ErrNotOracle) { require.NoError(err) commit := options[0].(*blockexecutor.Block) require.IsType(&block.BanffCommitBlock{}, commit.Block) - require.NoError(commit.Verify(context.Background())) - require.NoError(commit.Accept(context.Background())) + require.NoError(commit.Verify(t.Context())) + require.NoError(commit.Accept(t.Context())) } } @@ -640,7 +639,7 @@ func TestGetCurrentValidators(t *testing.T) { for _, validatorTx := range genesis.Validators[:len(genesis.Validators)-1] { validator := validatorTx.Unsigned.(*txs.AddValidatorTx) connectedIDs.Add(validator.NodeID()) - require.NoError(service.vm.Connected(context.Background(), validator.NodeID(), version.CurrentApp)) + require.NoError(service.vm.Connected(t.Context(), validator.NodeID(), version.CurrentApp)) } require.NoError(service.GetCurrentValidators(nil, &args, &response)) @@ -829,13 +828,13 @@ func TestGetValidatorsAt(t *testing.T) { require.NoError(service.vm.Network.IssueTxFromRPC(tx)) service.vm.ctx.Lock.Lock() - block, err := service.vm.BuildBlock(context.Background()) + block, err := service.vm.BuildBlock(t.Context()) require.NoError(err) blk := block.(*blockexecutor.Block) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) service.vm.ctx.Lock.Unlock() newLastAccepted := service.vm.manager.LastAccepted() @@ -978,8 +977,8 @@ func TestGetBlock(t *testing.T) { blk := service.vm.manager.NewBlock(statelessBlock) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Verify(t.Context())) + require.NoError(blk.Accept(t.Context())) service.vm.ctx.Lock.Unlock() diff --git a/vms/platformvm/state/state_test.go b/vms/platformvm/state/state_test.go index bd77f4266390..19884365380f 100644 --- a/vms/platformvm/state/state_test.go +++ b/vms/platformvm/state/state_test.go @@ -5,7 +5,6 @@ package state import ( "bytes" - "context" "maps" "math" "math/rand" @@ -1038,14 +1037,14 @@ func TestState_ApplyValidatorDiffs(t *testing.T) { { primaryValidatorSet := copyValidatorSet(diff.expectedPrimaryValidatorSet) require.NoError(state.ApplyValidatorWeightDiffs( - context.Background(), + t.Context(), primaryValidatorSet, currentHeight, prevHeight+1, constants.PrimaryNetworkID, )) require.NoError(state.ApplyValidatorPublicKeyDiffs( - context.Background(), + t.Context(), primaryValidatorSet, currentHeight, prevHeight+1, @@ -1057,7 +1056,7 @@ func TestState_ApplyValidatorDiffs(t *testing.T) { { legacySubnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) require.NoError(state.ApplyValidatorWeightDiffs( - context.Background(), + t.Context(), legacySubnetValidatorSet, currentHeight, prevHeight+1, @@ -1075,7 +1074,7 @@ func TestState_ApplyValidatorDiffs(t *testing.T) { } require.NoError(state.ApplyValidatorPublicKeyDiffs( - context.Background(), + t.Context(), legacySubnetValidatorSet, currentHeight, prevHeight+1, @@ -1087,7 +1086,7 @@ func TestState_ApplyValidatorDiffs(t *testing.T) { { subnetValidatorSet := copyValidatorSet(diff.expectedSubnetValidatorSet) require.NoError(state.ApplyValidatorWeightDiffs( - context.Background(), + t.Context(), subnetValidatorSet, currentHeight, prevHeight+1, @@ -1095,7 +1094,7 @@ func TestState_ApplyValidatorDiffs(t *testing.T) { )) require.NoError(state.ApplyValidatorPublicKeyDiffs( - context.Background(), + t.Context(), subnetValidatorSet, currentHeight, prevHeight+1, @@ -1114,13 +1113,13 @@ func TestState_ApplyValidatorDiffs(t *testing.T) { allValidatorSets[subnetID] = copyValidatorSet(diff.expectedSubnetValidatorSet) } require.NoError(state.ApplyAllValidatorWeightDiffs( - context.Background(), + t.Context(), allValidatorSets, currentHeight, prevHeight+1, )) require.NoError(state.ApplyAllValidatorPublicKeyDiffs( - context.Background(), + t.Context(), allValidatorSets, currentHeight, prevHeight+1, @@ -2044,8 +2043,8 @@ func TestL1Validators(t *testing.T) { reloadedEndValidatorSet := reloadedState.validators.GetMap(subnetID) require.Equal(expectedEndValidatorSet, reloadedEndValidatorSet) - require.NoError(state.ApplyValidatorWeightDiffs(context.Background(), endValidatorSet, 1, 1, subnetID)) - require.NoError(state.ApplyValidatorPublicKeyDiffs(context.Background(), endValidatorSet, 1, 1, subnetID)) + require.NoError(state.ApplyValidatorWeightDiffs(t.Context(), endValidatorSet, 1, 1, subnetID)) + require.NoError(state.ApplyValidatorPublicKeyDiffs(t.Context(), endValidatorSet, 1, 1, subnetID)) initialValidatorSet := l1ValdiatorsToValidatorSet(initialL1Validators, subnetID) require.Equal(initialValidatorSet, endValidatorSet) @@ -2389,7 +2388,7 @@ func TestGetCurrentValidators(t *testing.T) { require.NoError(state.Commit()) for _, subnetID := range subnetIDs { - baseStakers, currentValidators, height, err := state.GetCurrentValidators(context.Background(), subnetID) + baseStakers, currentValidators, height, err := state.GetCurrentValidators(t.Context(), subnetID) require.NoError(err) require.Equal(uint64(0), height) require.Len(baseStakers, stakersLenBySubnetID[subnetID]) diff --git a/vms/platformvm/txs/executor/warp_verifier_test.go b/vms/platformvm/txs/executor/warp_verifier_test.go index cbb3284c7b2f..2a8fde323e3a 100644 --- a/vms/platformvm/txs/executor/warp_verifier_test.go +++ b/vms/platformvm/txs/executor/warp_verifier_test.go @@ -204,7 +204,7 @@ func TestVerifyWarpMessages(t *testing.T) { for _, test := range tests { t.Run(test.name, func(t *testing.T) { err := VerifyWarpMessages( - context.Background(), + t.Context(), constants.UnitTestID, state, 0, diff --git a/vms/platformvm/txs/fee/complexity_test.go b/vms/platformvm/txs/fee/complexity_test.go index 3dd49655780b..f6d01af701dc 100644 --- a/vms/platformvm/txs/fee/complexity_test.go +++ b/vms/platformvm/txs/fee/complexity_test.go @@ -136,11 +136,7 @@ func TestOutputComplexity(t *testing.T) { { name: "any can spend", out: &avax.TransferableOutput{ - Out: &secp256k1fx.TransferOutput{ - OutputOwners: secp256k1fx.OutputOwners{ - Addrs: make([]ids.ShortID, 0), - }, - }, + Out: &secp256k1fx.TransferOutput{}, }, expected: gas.Dimensions{ gas.Bandwidth: 60, @@ -236,15 +232,9 @@ func TestInputComplexity(t *testing.T) { { name: "any can spend", in: &avax.TransferableInput{ - In: &secp256k1fx.TransferInput{ - Input: secp256k1fx.Input{ - SigIndices: make([]uint32, 0), - }, - }, - }, - cred: &secp256k1fx.Credential{ - Sigs: make([][secp256k1.SignatureLen]byte, 0), + In: &secp256k1fx.TransferInput{}, }, + cred: &secp256k1fx.Credential{}, expected: gas.Dimensions{ gas.Bandwidth: 92, gas.DBRead: 1, @@ -433,10 +423,8 @@ func TestOwnerComplexity(t *testing.T) { expectedErr error }{ { - name: "any can spend", - owner: &secp256k1fx.OutputOwners{ - Addrs: make([]ids.ShortID, 0), - }, + name: "any can spend", + owner: &secp256k1fx.OutputOwners{}, expected: gas.Dimensions{ gas.Bandwidth: 16, }, @@ -500,12 +488,8 @@ func TestAuthComplexity(t *testing.T) { }{ { name: "any can spend", - auth: &secp256k1fx.Input{ - SigIndices: make([]uint32, 0), - }, - cred: &secp256k1fx.Credential{ - Sigs: make([][secp256k1.SignatureLen]byte, 0), - }, + auth: &secp256k1fx.Input{}, + cred: &secp256k1fx.Credential{}, expected: gas.Dimensions{ gas.Bandwidth: 8, }, diff --git a/vms/platformvm/txs/txstest/wallet.go b/vms/platformvm/txs/txstest/wallet.go index 4a9e74b60aac..c634d2be6c66 100644 --- a/vms/platformvm/txs/txstest/wallet.go +++ b/vms/platformvm/txs/txstest/wallet.go @@ -4,7 +4,6 @@ package txstest import ( - "context" "math" "testing" @@ -47,7 +46,7 @@ func NewWallet( for _, utxo := range pChainUTXOs { require.NoError(utxos.AddUTXO( - context.Background(), + t.Context(), constants.PlatformChainID, constants.PlatformChainID, utxo, @@ -68,7 +67,7 @@ func NewWallet( for _, utxo := range remoteChainUTXOs { require.NoError(utxos.AddUTXO( - context.Background(), + t.Context(), chainID, constants.PlatformChainID, utxo, diff --git a/vms/platformvm/validator_set_property_test.go b/vms/platformvm/validator_set_property_test.go index a05097047ef0..9147b5875a9d 100644 --- a/vms/platformvm/validator_set_property_test.go +++ b/vms/platformvm/validator_set_property_test.go @@ -78,7 +78,7 @@ func TestGetValidatorsSetProperty(t *testing.T) { } vm.ctx.Lock.Lock() defer func() { - _ = vm.Shutdown(context.Background()) + _ = vm.Shutdown(t.Context()) vm.ctx.Lock.Unlock() }() nodeID := ids.GenerateTestNodeID() @@ -155,7 +155,7 @@ func TestGetValidatorsSetProperty(t *testing.T) { snapshotHeights := maps.Keys(validatorSetByHeightAndSubnet) slices.Sort(snapshotHeights) for idx, snapShotHeight := range snapshotHeights { - lastAcceptedHeight, err := vm.GetCurrentHeight(context.Background()) + lastAcceptedHeight, err := vm.GetCurrentHeight(t.Context()) if err != nil { return err.Error() } @@ -169,7 +169,7 @@ func TestGetValidatorsSetProperty(t *testing.T) { // does not change and must be equal to snapshot at [snapShotHeight] for height := snapShotHeight; height < nextSnapShotHeight; height++ { for subnetID, validatorsSet := range validatorSetByHeightAndSubnet[snapShotHeight] { - res, err := vm.GetValidatorSet(context.Background(), height, subnetID) + res, err := vm.GetValidatorSet(t.Context(), height, subnetID) if err != nil { return fmt.Sprintf("failed GetValidatorSet at height %v: %v", height, err) } @@ -476,7 +476,7 @@ func TestTimestampListGenerator(t *testing.T) { } // nil out non subnet validators - subnetIndexes := make([]int, 0) + subnetIndexes := make([]int, 0, len(validatorsTimes)) for idx, ev := range validatorsTimes { if ev.eventType == startSubnetValidator { subnetIndexes = append(subnetIndexes, idx) @@ -527,7 +527,7 @@ func TestTimestampListGenerator(t *testing.T) { } // nil out non subnet validators - nonSubnetIndexes := make([]int, 0) + nonSubnetIndexes := make([]int, 0, len(validatorsTimes)) for idx, ev := range validatorsTimes { if ev.eventType != startSubnetValidator { nonSubnetIndexes = append(nonSubnetIndexes, idx) @@ -640,7 +640,7 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { } err := vm.Initialize( - context.Background(), + t.Context(), ctx, chainDB, genesistest.NewBytes(t, genesistest.Config{ @@ -658,7 +658,7 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { return nil, ids.Empty, err } - err = vm.SetState(context.Background(), snow.NormalOp) + err = vm.SetState(t.Context(), snow.NormalOp) if err != nil { return nil, ids.Empty, err } @@ -686,17 +686,17 @@ func buildVM(t *testing.T) (*VM, ids.ID, error) { return nil, ids.Empty, err } - blk, err := vm.Builder.BuildBlock(context.Background()) + blk, err := vm.Builder.BuildBlock(t.Context()) if err != nil { return nil, ids.Empty, err } - if err := blk.Verify(context.Background()); err != nil { + if err := blk.Verify(t.Context()); err != nil { return nil, ids.Empty, err } - if err := blk.Accept(context.Background()); err != nil { + if err := blk.Accept(t.Context()); err != nil { return nil, ids.Empty, err } - if err := vm.SetPreference(context.Background(), vm.manager.LastAccepted()); err != nil { + if err := vm.SetPreference(t.Context(), vm.manager.LastAccepted()); err != nil { return nil, ids.Empty, err } diff --git a/vms/platformvm/validators/manager_benchmark_test.go b/vms/platformvm/validators/manager_benchmark_test.go index 7cdc66427410..5ff3c9bb9bb2 100644 --- a/vms/platformvm/validators/manager_benchmark_test.go +++ b/vms/platformvm/validators/manager_benchmark_test.go @@ -4,7 +4,6 @@ package validators import ( - "context" "math/rand" "testing" "time" @@ -87,7 +86,7 @@ func BenchmarkGetValidatorSet(b *testing.B) { require.NoError(addSubnetDelegator(s, subnetID, genesistest.DefaultValidatorStartTime, genesistest.DefaultValidatorEndTime, nodeIDs, currentHeight)) } - ctx := context.Background() + ctx := b.Context() height, err := m.GetCurrentHeight(ctx) require.NoError(err) require.Equal(currentHeight, height) diff --git a/vms/platformvm/validators/manager_test.go b/vms/platformvm/validators/manager_test.go index f6f6f8aa4b48..61c8e5487631 100644 --- a/vms/platformvm/validators/manager_test.go +++ b/vms/platformvm/validators/manager_test.go @@ -5,7 +5,6 @@ package validators_test import ( "bytes" - "context" "math" "testing" "time" @@ -125,7 +124,7 @@ func TestGetValidatorSet_AfterEtna(t *testing.T) { {}, // Subnet staker was removed at height 2 } for height, expected := range expectedValidators { - actual, err := m.GetValidatorSet(context.Background(), uint64(height), subnetID) + actual, err := m.GetValidatorSet(t.Context(), uint64(height), subnetID) require.NoError(err) require.Equal(expected, actual) } @@ -301,15 +300,15 @@ func TestGetWarpValidatorSets(t *testing.T) { }, // Subnet was removed at height 3 } for height, expected := range expectedValidators { - actual, err := m.GetWarpValidatorSets(context.Background(), uint64(height)) + actual, err := m.GetWarpValidatorSets(t.Context(), uint64(height)) require.NoError(err) require.Equal(expected, actual) - actualPrimaryNetwork, err := m.GetWarpValidatorSet(context.Background(), uint64(height), constants.PrimaryNetworkID) + actualPrimaryNetwork, err := m.GetWarpValidatorSet(t.Context(), uint64(height), constants.PrimaryNetworkID) require.NoError(err) require.Equal(expected[constants.PrimaryNetworkID], actualPrimaryNetwork) - actualSubnet, err := m.GetWarpValidatorSet(context.Background(), uint64(height), subnetID) + actualSubnet, err := m.GetWarpValidatorSet(t.Context(), uint64(height), subnetID) if err != nil { require.NotContains(expected, subnetID) continue diff --git a/vms/platformvm/vm_regression_test.go b/vms/platformvm/vm_regression_test.go index a0e0da84d8e5..511cb13406a8 100644 --- a/vms/platformvm/vm_regression_test.go +++ b/vms/platformvm/vm_regression_test.go @@ -344,12 +344,12 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { ctx := snowtest.Context(t, snowtest.PChainID) ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) ctx.Lock.Unlock() }() require.NoError(vm.Initialize( - context.Background(), + t.Context(), ctx, baseDB, genesistest.NewBytes(t, genesistest.Config{}), @@ -442,20 +442,20 @@ func TestUnverifiedParentPanicRegression(t *testing.T) { require.NoError(err) addSubnetBlk2 := vm.manager.NewBlock(statelessStandardBlk) - _, err = vm.ParseBlock(context.Background(), addSubnetBlk0.Bytes()) + _, err = vm.ParseBlock(t.Context(), addSubnetBlk0.Bytes()) require.NoError(err) - _, err = vm.ParseBlock(context.Background(), addSubnetBlk1.Bytes()) + _, err = vm.ParseBlock(t.Context(), addSubnetBlk1.Bytes()) require.NoError(err) - _, err = vm.ParseBlock(context.Background(), addSubnetBlk2.Bytes()) + _, err = vm.ParseBlock(t.Context(), addSubnetBlk2.Bytes()) require.NoError(err) - require.NoError(addSubnetBlk0.Verify(context.Background())) - require.NoError(addSubnetBlk0.Accept(context.Background())) + require.NoError(addSubnetBlk0.Verify(t.Context())) + require.NoError(addSubnetBlk0.Accept(t.Context())) // Doesn't matter what verify returns as long as it's not panicking. - _ = addSubnetBlk2.Verify(context.Background()) + _ = addSubnetBlk2.Verify(t.Context()) } func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { @@ -503,7 +503,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) addValidatorStandardBlk := vm.manager.NewBlock(statelessBlk) - require.NoError(addValidatorStandardBlk.Verify(context.Background())) + require.NoError(addValidatorStandardBlk.Verify(t.Context())) // Verify that the new validator now in pending validator set { @@ -568,7 +568,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Because the shared memory UTXO hasn't been populated, this block is // currently invalid. - err = importBlk.Verify(context.Background()) + err = importBlk.Verify(t.Context()) require.ErrorIs(err, database.ErrNotFound) // Populate the shared memory UTXO. @@ -596,7 +596,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { // Because the shared memory UTXO has now been populated, the block should // pass verification. - require.NoError(importBlk.Verify(context.Background())) + require.NoError(importBlk.Verify(t.Context())) // Move chain time ahead to bring the new validator from the pending // validator set into the current validator set. @@ -616,7 +616,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { require.NoError(err) advanceTimeStandardBlk := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk) - require.NoError(advanceTimeStandardBlk.Verify(context.Background())) + require.NoError(advanceTimeStandardBlk.Verify(t.Context())) // Accept all the blocks allBlocks := []snowman.Block{ @@ -625,7 +625,7 @@ func TestRejectedStateRegressionInvalidValidatorTimestamp(t *testing.T) { advanceTimeStandardBlk, } for _, blk := range allBlocks { - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) } // Force a reload of the state from the database. @@ -700,7 +700,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) addValidatorStandardBlk0 := vm.manager.NewBlock(statelessAddValidatorStandardBlk0) - require.NoError(addValidatorStandardBlk0.Verify(context.Background())) + require.NoError(addValidatorStandardBlk0.Verify(t.Context())) // Verify that first new validator now in pending validator set { @@ -729,7 +729,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) advanceTimeStandardBlk0 := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk0) - require.NoError(advanceTimeStandardBlk0.Verify(context.Background())) + require.NoError(advanceTimeStandardBlk0.Verify(t.Context())) // Verify that the first new validator is now in the current validator set. { @@ -799,7 +799,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { importBlk := vm.manager.NewBlock(statelessImportBlk) // Because the shared memory UTXO hasn't been populated, this block is // currently invalid. - err = importBlk.Verify(context.Background()) + err = importBlk.Verify(t.Context()) require.ErrorIs(err, database.ErrNotFound) // Populate the shared memory UTXO. @@ -827,7 +827,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { // Because the shared memory UTXO has now been populated, the block should // pass verification. - require.NoError(importBlk.Verify(context.Background())) + require.NoError(importBlk.Verify(t.Context())) newValidatorStartTime1 := newValidatorStartTime0.Add(executor.SyncBound).Add(1 * time.Second) newValidatorEndTime1 := newValidatorStartTime1.Add(defaultMaxStakingDuration) @@ -862,7 +862,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { addValidatorStandardBlk1 := vm.manager.NewBlock(statelessAddValidatorStandardBlk1) - require.NoError(addValidatorStandardBlk1.Verify(context.Background())) + require.NoError(addValidatorStandardBlk1.Verify(t.Context())) // Verify that the second new validator now in pending validator set { @@ -891,7 +891,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { require.NoError(err) advanceTimeStandardBlk1 := vm.manager.NewBlock(statelessAdvanceTimeStandardBlk1) - require.NoError(advanceTimeStandardBlk1.Verify(context.Background())) + require.NoError(advanceTimeStandardBlk1.Verify(t.Context())) // Verify that the second new validator is now in the current validator set. { @@ -917,7 +917,7 @@ func TestRejectedStateRegressionInvalidValidatorReward(t *testing.T) { advanceTimeStandardBlk1, } for _, blk := range allBlocks { - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) } // Force a reload of the state from the database. @@ -959,7 +959,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - currentHeight, err := vm.GetCurrentHeight(context.Background()) + currentHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) require.Equal(uint64(1), currentHeight) @@ -970,7 +970,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { genesistest.DefaultNodeIDs[3]: genesistest.DefaultValidatorWeight, genesistest.DefaultNodeIDs[4]: genesistest.DefaultValidatorWeight, } - validators, err := vm.GetValidatorSet(context.Background(), 1, constants.PrimaryNetworkID) + validators, err := vm.GetValidatorSet(t.Context(), 1, constants.PrimaryNetworkID) require.NoError(err) for nodeID, weight := range expectedValidators1 { require.Equal(weight, validators[nodeID].Weight) @@ -1014,16 +1014,16 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { ) require.NoError(err) addValidatorProposalBlk0 := vm.manager.NewBlock(statelessStandardBlk) - require.NoError(addValidatorProposalBlk0.Verify(context.Background())) - require.NoError(addValidatorProposalBlk0.Accept(context.Background())) - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + require.NoError(addValidatorProposalBlk0.Verify(t.Context())) + require.NoError(addValidatorProposalBlk0.Accept(t.Context())) + require.NoError(vm.SetPreference(t.Context(), vm.manager.LastAccepted())) - currentHeight, err = vm.GetCurrentHeight(context.Background()) + currentHeight, err = vm.GetCurrentHeight(t.Context()) require.NoError(err) require.Equal(uint64(2), currentHeight) for i := uint64(1); i <= 2; i++ { - validators, err = vm.GetValidatorSet(context.Background(), i, constants.PrimaryNetworkID) + validators, err = vm.GetValidatorSet(t.Context(), i, constants.PrimaryNetworkID) require.NoError(err) for nodeID, weight := range expectedValidators1 { require.Equal(weight, validators[nodeID].Weight) @@ -1050,16 +1050,16 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { ) require.NoError(err) advanceTimeProposalBlk0 := vm.manager.NewBlock(statelessStandardBlk) - require.NoError(advanceTimeProposalBlk0.Verify(context.Background())) - require.NoError(advanceTimeProposalBlk0.Accept(context.Background())) - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + require.NoError(advanceTimeProposalBlk0.Verify(t.Context())) + require.NoError(advanceTimeProposalBlk0.Accept(t.Context())) + require.NoError(vm.SetPreference(t.Context(), vm.manager.LastAccepted())) - currentHeight, err = vm.GetCurrentHeight(context.Background()) + currentHeight, err = vm.GetCurrentHeight(t.Context()) require.NoError(err) require.Equal(uint64(3), currentHeight) for i := uint64(1); i <= 2; i++ { - validators, err = vm.GetValidatorSet(context.Background(), i, constants.PrimaryNetworkID) + validators, err = vm.GetValidatorSet(t.Context(), i, constants.PrimaryNetworkID) require.NoError(err) for nodeID, weight := range expectedValidators1 { require.Equal(weight, validators[nodeID].Weight) @@ -1074,7 +1074,7 @@ func TestValidatorSetAtCacheOverwriteRegression(t *testing.T) { genesistest.DefaultNodeIDs[4]: genesistest.DefaultValidatorWeight, extraNodeID: vm.MaxValidatorStake, } - validators, err = vm.GetValidatorSet(context.Background(), 3, constants.PrimaryNetworkID) + validators, err = vm.GetValidatorSet(t.Context(), 3, constants.PrimaryNetworkID) require.NoError(err) for nodeID, weight := range expectedValidators2 { require.Equal(weight, validators[nodeID].Weight) @@ -1240,11 +1240,11 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t // Accept addSubnetValidatorTx require.NoError(buildAndAcceptStandardBlock(vm)) - addSubnetValidatorHeight, err := vm.GetCurrentHeight(context.Background()) + addSubnetValidatorHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) emptyValidatorSet, err := vm.GetValidatorSet( - context.Background(), + t.Context(), addSubnetValidatorHeight, subnetID, ) @@ -1269,7 +1269,7 @@ func TestRemovePermissionedValidatorDuringPendingToCurrentTransitionNotTracked(t require.NoError(buildAndAcceptStandardBlock(vm)) emptyValidatorSet, err = vm.GetValidatorSet( - context.Background(), + t.Context(), addSubnetValidatorHeight, subnetID, ) @@ -1512,7 +1512,7 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.NoError(t, err) - primaryStartHeight, err := vm.GetCurrentHeight(context.Background()) + primaryStartHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(t, err) t.Logf("primaryStartHeight: %d", primaryStartHeight) @@ -1542,7 +1542,7 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.NoError(t, err) - subnetStartHeight, err := vm.GetCurrentHeight(context.Background()) + subnetStartHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(t, err) t.Logf("subnetStartHeight: %d", subnetStartHeight) @@ -1553,32 +1553,32 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.ErrorIs(t, err, database.ErrNotFound) - subnetEndHeight, err := vm.GetCurrentHeight(context.Background()) + subnetEndHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(t, err) t.Logf("subnetEndHeight: %d", subnetEndHeight) // move time ahead, terminating primary network validator vm.clock.Set(primaryEndTime) - blk, err := vm.Builder.BuildBlock(context.Background()) // must be a proposal block rewarding the primary validator + blk, err := vm.Builder.BuildBlock(t.Context()) // must be a proposal block rewarding the primary validator require.NoError(t, err) - require.NoError(t, blk.Verify(context.Background())) + require.NoError(t, blk.Verify(t.Context())) proposalBlk := blk.(snowman.OracleBlock) - options, err := proposalBlk.Options(context.Background()) + options, err := proposalBlk.Options(t.Context()) require.NoError(t, err) commit := options[0].(*blockexecutor.Block) require.IsType(t, &block.BanffCommitBlock{}, commit.Block) - require.NoError(t, blk.Accept(context.Background())) - require.NoError(t, commit.Verify(context.Background())) - require.NoError(t, commit.Accept(context.Background())) - require.NoError(t, vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + require.NoError(t, blk.Accept(t.Context())) + require.NoError(t, commit.Verify(t.Context())) + require.NoError(t, commit.Accept(t.Context())) + require.NoError(t, vm.SetPreference(t.Context(), vm.manager.LastAccepted())) _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.ErrorIs(t, err, database.ErrNotFound) - primaryEndHeight, err := vm.GetCurrentHeight(context.Background()) + primaryEndHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(t, err) t.Logf("primaryEndHeight: %d", primaryEndHeight) @@ -1619,7 +1619,7 @@ func TestSubnetValidatorBLSKeyDiffAfterExpiry(t *testing.T) { _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.NoError(t, err) - primaryRestartHeight, err := vm.GetCurrentHeight(context.Background()) + primaryRestartHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(t, err) t.Logf("primaryRestartHeight: %d", primaryRestartHeight) @@ -1723,31 +1723,31 @@ func TestPrimaryNetworkValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.NoError(err) - primaryStartHeight, err := vm.GetCurrentHeight(context.Background()) + primaryStartHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) // move time ahead, terminating primary network validator vm.clock.Set(primaryEndTime1) - blk, err := vm.Builder.BuildBlock(context.Background()) // must be a proposal block rewarding the primary validator + blk, err := vm.Builder.BuildBlock(t.Context()) // must be a proposal block rewarding the primary validator require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) proposalBlk := blk.(snowman.OracleBlock) - options, err := proposalBlk.Options(context.Background()) + options, err := proposalBlk.Options(t.Context()) require.NoError(err) commit := options[0].(*blockexecutor.Block) require.IsType(&block.BanffCommitBlock{}, commit.Block) - require.NoError(blk.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(commit.Accept(context.Background())) - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + require.NoError(blk.Accept(t.Context())) + require.NoError(commit.Verify(t.Context())) + require.NoError(commit.Accept(t.Context())) + require.NoError(vm.SetPreference(t.Context(), vm.manager.LastAccepted())) _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.ErrorIs(err, database.ErrNotFound) - primaryEndHeight, err := vm.GetCurrentHeight(context.Background()) + primaryEndHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) // reinsert primary validator with a different BLS key @@ -1855,7 +1855,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.NoError(err) - primaryStartHeight, err := vm.GetCurrentHeight(context.Background()) + primaryStartHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) // insert the subnet validator @@ -1884,7 +1884,7 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.NoError(err) - subnetStartHeight, err := vm.GetCurrentHeight(context.Background()) + subnetStartHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) // move time ahead, terminating the subnet validator @@ -1894,31 +1894,31 @@ func TestSubnetValidatorPopulatedToEmptyBLSKeyDiff(t *testing.T) { _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.ErrorIs(err, database.ErrNotFound) - subnetEndHeight, err := vm.GetCurrentHeight(context.Background()) + subnetEndHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) // move time ahead, terminating primary network validator vm.clock.Set(primaryEndTime1) - blk, err := vm.Builder.BuildBlock(context.Background()) // must be a proposal block rewarding the primary validator + blk, err := vm.Builder.BuildBlock(t.Context()) // must be a proposal block rewarding the primary validator require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) proposalBlk := blk.(snowman.OracleBlock) - options, err := proposalBlk.Options(context.Background()) + options, err := proposalBlk.Options(t.Context()) require.NoError(err) commit := options[0].(*blockexecutor.Block) require.IsType(&block.BanffCommitBlock{}, commit.Block) - require.NoError(blk.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(commit.Accept(context.Background())) - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + require.NoError(blk.Accept(t.Context())) + require.NoError(commit.Verify(t.Context())) + require.NoError(commit.Accept(t.Context())) + require.NoError(vm.SetPreference(t.Context(), vm.manager.LastAccepted())) _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.ErrorIs(err, database.ErrNotFound) - primaryEndHeight, err := vm.GetCurrentHeight(context.Background()) + primaryEndHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) // reinsert primary validator with a different BLS key @@ -1982,10 +1982,10 @@ func TestValidatorSetReturnsCopy(t *testing.T) { vm, _, _ := defaultVM(t, upgradetest.Latest) - validators1, err := vm.GetValidatorSet(context.Background(), 1, constants.PrimaryNetworkID) + validators1, err := vm.GetValidatorSet(t.Context(), 1, constants.PrimaryNetworkID) require.NoError(err) - validators2, err := vm.GetValidatorSet(context.Background(), 1, constants.PrimaryNetworkID) + validators2, err := vm.GetValidatorSet(t.Context(), 1, constants.PrimaryNetworkID) require.NoError(err) require.NotNil(validators1[genesistest.DefaultNodeIDs[0]]) @@ -2074,7 +2074,7 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { _, err = vm.state.GetCurrentValidator(subnetID, nodeID) require.NoError(err) - subnetStartHeight, err := vm.GetCurrentHeight(context.Background()) + subnetStartHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) // move time ahead, terminating the subnet validator @@ -2086,28 +2086,28 @@ func TestSubnetValidatorSetAfterPrimaryNetworkValidatorRemoval(t *testing.T) { // move time ahead, terminating primary network validator vm.clock.Set(primaryEndTime1) - blk, err := vm.Builder.BuildBlock(context.Background()) // must be a proposal block rewarding the primary validator + blk, err := vm.Builder.BuildBlock(t.Context()) // must be a proposal block rewarding the primary validator require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) proposalBlk := blk.(snowman.OracleBlock) - options, err := proposalBlk.Options(context.Background()) + options, err := proposalBlk.Options(t.Context()) require.NoError(err) commit := options[0].(*blockexecutor.Block) require.IsType(&block.BanffCommitBlock{}, commit.Block) - require.NoError(blk.Accept(context.Background())) - require.NoError(commit.Verify(context.Background())) - require.NoError(commit.Accept(context.Background())) - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + require.NoError(blk.Accept(t.Context())) + require.NoError(commit.Verify(t.Context())) + require.NoError(commit.Accept(t.Context())) + require.NoError(vm.SetPreference(t.Context(), vm.manager.LastAccepted())) _, err = vm.state.GetCurrentValidator(constants.PrimaryNetworkID, nodeID) require.ErrorIs(err, database.ErrNotFound) // Generating the validator set should not error when re-introducing a // subnet validator whose primary network validator was also removed. - _, err = vm.State.GetValidatorSet(context.Background(), subnetStartHeight, subnetID) + _, err = vm.State.GetValidatorSet(t.Context(), subnetStartHeight, subnetID) require.NoError(err) } @@ -2118,7 +2118,7 @@ func TestValidatorSetRaceCondition(t *testing.T) { defer vm.ctx.Lock.Unlock() nodeID := ids.GenerateTestNodeID() - require.NoError(vm.Connected(context.Background(), nodeID, version.CurrentApp)) + require.NoError(vm.Connected(t.Context(), nodeID, version.CurrentApp)) protocolAppRequestBytest, err := gossip.MarshalAppRequest( bloom.EmptyFilter.Marshal(), @@ -2133,14 +2133,14 @@ func TestValidatorSetRaceCondition(t *testing.T) { var ( eg errgroup.Group - ctx, cancel = context.WithCancel(context.Background()) + ctx, cancel = context.WithCancel(t.Context()) ) // keep 10 workers running for i := 0; i < 10; i++ { eg.Go(func() error { for ctx.Err() == nil { err := vm.AppRequest( - context.Background(), + t.Context(), nodeID, 0, time.Now().Add(time.Hour), @@ -2183,10 +2183,10 @@ func TestBanffStandardBlockWithNoChangesRemainsInvalid(t *testing.T) { vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted(t.Context()) require.NoError(err) - lastAccepted, err := vm.GetBlock(context.Background(), lastAcceptedID) + lastAccepted, err := vm.GetBlock(t.Context(), lastAcceptedID) require.NoError(err) statelessBlk, err := block.NewBanffStandardBlock( @@ -2197,11 +2197,11 @@ func TestBanffStandardBlockWithNoChangesRemainsInvalid(t *testing.T) { ) require.NoError(err) - blk, err := vm.ParseBlock(context.Background(), statelessBlk.Bytes()) + blk, err := vm.ParseBlock(t.Context(), statelessBlk.Bytes()) require.NoError(err) for range 2 { - err = blk.Verify(context.Background()) + err = blk.Verify(t.Context()) require.ErrorIs(err, blockexecutor.ErrStandardBlockWithoutChanges) } } diff --git a/vms/platformvm/vm_test.go b/vms/platformvm/vm_test.go index 39ba3371687d..b774a2537647 100644 --- a/vms/platformvm/vm_test.go +++ b/vms/platformvm/vm_test.go @@ -172,7 +172,7 @@ func defaultVM(t *testing.T, f upgradetest.Fork) (*VM, database.Database, *mutab dynamicConfigBytes := []byte(`{"network":{"max-validator-set-staleness":0}}`) require.NoError(vm.Initialize( - context.Background(), + t.Context(), ctx, chainDB, genesistest.NewBytes(t, genesistest.Config{}), @@ -188,7 +188,7 @@ func defaultVM(t *testing.T, f upgradetest.Fork) (*VM, database.Database, *mutab Capacity: defaultDynamicFeeConfig.MaxCapacity, }) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + require.NoError(vm.SetState(t.Context(), snow.NormalOp)) wallet := newWallet(t, vm, walletConfig{ keys: []*secp256k1.PrivateKey{genesistest.DefaultFundedKeys[0]}, @@ -219,7 +219,7 @@ func defaultVM(t *testing.T, f upgradetest.Fork) (*VM, database.Database, *mutab vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }) return vm, db, msm @@ -254,7 +254,7 @@ func TestGenesis(t *testing.T) { defer vm.ctx.Lock.Unlock() // Ensure the genesis block has been accepted and stored - genesisBlockID, err := vm.LastAccepted(context.Background()) // lastAccepted should be ID of genesis block + genesisBlockID, err := vm.LastAccepted(t.Context()) // lastAccepted should be ID of genesis block require.NoError(err) // Ensure the genesis block can be retrieved @@ -400,10 +400,10 @@ func TestInvalidAddValidatorCommit(t *testing.T) { blkBytes := statelessBlk.Bytes() - parsedBlock, err := vm.ParseBlock(context.Background(), blkBytes) + parsedBlock, err := vm.ParseBlock(t.Context(), blkBytes) require.NoError(err) - err = parsedBlock.Verify(context.Background()) + err = parsedBlock.Verify(t.Context()) require.ErrorIs(err, txexecutor.ErrTimestampNotBeforeStartTime) txID := statelessBlk.Txs()[0].ID() @@ -448,11 +448,11 @@ func TestAddValidatorReject(t *testing.T) { require.NoError(vm.issueTxFromRPC(tx)) vm.ctx.Lock.Lock() - blk, err := vm.Builder.BuildBlock(context.Background()) + blk, err := vm.Builder.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Reject(context.Background())) + require.NoError(blk.Verify(t.Context())) + require.NoError(blk.Reject(t.Context())) _, _, err = vm.state.GetTx(tx.ID()) require.ErrorIs(err, database.ErrNotFound) @@ -600,11 +600,11 @@ func TestAddSubnetValidatorReject(t *testing.T) { require.NoError(vm.issueTxFromRPC(tx)) vm.ctx.Lock.Lock() - blk, err := vm.Builder.BuildBlock(context.Background()) + blk, err := vm.Builder.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) - require.NoError(blk.Reject(context.Background())) + require.NoError(blk.Verify(t.Context())) + require.NoError(blk.Reject(t.Context())) _, _, err = vm.state.GetTx(tx.ID()) require.ErrorIs(err, database.ErrNotFound) @@ -625,12 +625,12 @@ func TestRewardValidatorAccept(t *testing.T) { vm.clock.Set(genesistest.DefaultValidatorEndTime) // Advance time and create proposal to reward a genesis validator - blk, err := vm.Builder.BuildBlock(context.Background()) + blk, err := vm.Builder.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) // Assert preferences are correct - options, err := blk.(smcon.OracleBlock).Options(context.Background()) + options, err := blk.(smcon.OracleBlock).Options(t.Context()) require.NoError(err) commit := options[0].(*blockexecutor.Block) @@ -643,8 +643,8 @@ func TestRewardValidatorAccept(t *testing.T) { require.IsType(&txs.RewardValidatorTx{}, rewardTx) // Verify options and accept commit block - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) + require.NoError(commit.Verify(t.Context())) + require.NoError(abort.Verify(t.Context())) txID := blk.(block.Block).Txs()[0].ID() { onAbort, ok := vm.manager.GetState(abort.ID()) @@ -655,8 +655,8 @@ func TestRewardValidatorAccept(t *testing.T) { require.Equal(status.Aborted, txStatus) } - require.NoError(blk.Accept(context.Background())) - require.NoError(commit.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) + require.NoError(commit.Accept(t.Context())) // Verify that chain's timestamp has advanced timestamp := vm.state.GetTimestamp() @@ -693,13 +693,13 @@ func TestRewardValidatorReject(t *testing.T) { vm.clock.Set(genesistest.DefaultValidatorEndTime) // Advance time and create proposal to reward a genesis validator - blk, err := vm.Builder.BuildBlock(context.Background()) + blk, err := vm.Builder.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) // Assert preferences are correct oracleBlk := blk.(smcon.OracleBlock) - options, err := oracleBlk.Options(context.Background()) + options, err := oracleBlk.Options(t.Context()) require.NoError(err) commit := options[0].(*blockexecutor.Block) @@ -713,8 +713,8 @@ func TestRewardValidatorReject(t *testing.T) { require.IsType(&txs.RewardValidatorTx{}, rewardTx) // Verify options and accept abort block - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) + require.NoError(commit.Verify(t.Context())) + require.NoError(abort.Verify(t.Context())) txID := blk.(block.Block).Txs()[0].ID() { onAccept, ok := vm.manager.GetState(commit.ID()) @@ -725,8 +725,8 @@ func TestRewardValidatorReject(t *testing.T) { require.Equal(status.Committed, txStatus) } - require.NoError(blk.Accept(context.Background())) - require.NoError(abort.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) + require.NoError(abort.Accept(t.Context())) // Verify that chain's timestamp has advanced timestamp := vm.state.GetTimestamp() @@ -759,7 +759,7 @@ func TestUnneededBuildBlock(t *testing.T) { vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - _, err := vm.Builder.BuildBlock(context.Background()) + _, err := vm.Builder.BuildBlock(t.Context()) require.ErrorIs(err, blockbuilder.ErrNoPendingBlocks) } @@ -1004,16 +1004,16 @@ func TestOptimisticAtomicImport(t *testing.T) { blk := vm.manager.NewBlock(statelessBlk) - err = blk.Verify(context.Background()) + err = blk.Verify(t.Context()) require.ErrorIs(err, database.ErrNotFound) // erred due to missing shared memory UTXOs - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(t.Context(), snow.Bootstrapping)) - require.NoError(blk.Verify(context.Background())) // skips shared memory UTXO verification during bootstrapping + require.NoError(blk.Verify(t.Context())) // skips shared memory UTXO verification during bootstrapping - require.NoError(blk.Accept(context.Background())) + require.NoError(blk.Accept(t.Context())) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + require.NoError(vm.SetState(t.Context(), snow.NormalOp)) _, txStatus, err := vm.state.GetTx(tx.ID()) require.NoError(err) @@ -1051,7 +1051,7 @@ func TestRestartFullyAccepted(t *testing.T) { firstCtx.Lock.Lock() require.NoError(firstVM.Initialize( - context.Background(), + t.Context(), firstCtx, firstDB, genesisBytes, @@ -1061,7 +1061,7 @@ func TestRestartFullyAccepted(t *testing.T) { nil, )) - genesisID, err := firstVM.LastAccepted(context.Background()) + genesisID, err := firstVM.LastAccepted(t.Context()) require.NoError(err) // include a tx to make the block be accepted @@ -1104,10 +1104,10 @@ func TestRestartFullyAccepted(t *testing.T) { nextChainTime = nextChainTime.Add(2 * time.Second) firstVM.clock.Set(nextChainTime) - require.NoError(firstAdvanceTimeBlk.Verify(context.Background())) - require.NoError(firstAdvanceTimeBlk.Accept(context.Background())) + require.NoError(firstAdvanceTimeBlk.Verify(t.Context())) + require.NoError(firstAdvanceTimeBlk.Accept(t.Context())) - require.NoError(firstVM.Shutdown(context.Background())) + require.NoError(firstVM.Shutdown(t.Context())) firstCtx.Lock.Unlock() secondVM := &VM{Internal: config.Internal{ @@ -1125,13 +1125,13 @@ func TestRestartFullyAccepted(t *testing.T) { secondVM.clock.Set(initialClkTime) secondCtx.Lock.Lock() defer func() { - require.NoError(secondVM.Shutdown(context.Background())) + require.NoError(secondVM.Shutdown(t.Context())) secondCtx.Lock.Unlock() }() secondDB := prefixdb.New([]byte{}, db) require.NoError(secondVM.Initialize( - context.Background(), + t.Context(), secondCtx, secondDB, genesisBytes, @@ -1141,7 +1141,7 @@ func TestRestartFullyAccepted(t *testing.T) { nil, )) - lastAccepted, err := secondVM.LastAccepted(context.Background()) + lastAccepted, err := secondVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(genesisID, lastAccepted) } @@ -1173,7 +1173,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { ctx.Lock.Lock() require.NoError(vm.Initialize( - context.Background(), + t.Context(), ctx, vmDB, genesistest.NewBytes(t, genesistest.Config{}), @@ -1368,10 +1368,10 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { }) // Allow incoming messages to be routed to the new chain - chainRouter.AddChain(context.Background(), h) + chainRouter.AddChain(t.Context(), h) ctx.Lock.Unlock() - h.Start(context.Background(), false) + h.Start(t.Context(), false) ctx.Lock.Lock() @@ -1390,7 +1390,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { } peerTracker.Connected(vdrID, version.CurrentApp) - require.NoError(bootstrapper.Connected(context.Background(), vdrID, version.CurrentApp)) + require.NoError(bootstrapper.Connected(t.Context(), vdrID, version.CurrentApp)) // Create a valid block to remove the first genesis validator that is not // related to the VM. @@ -1428,7 +1428,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { return config.NodeIDs } - require.NoError(bootstrapper.AcceptedFrontier(context.Background(), vdrID, reqID, rewardValidatorBlk.ID())) + require.NoError(bootstrapper.AcceptedFrontier(t.Context(), vdrID, reqID, rewardValidatorBlk.ID())) // Report the validator removal as accepted. We should request the validator // removal block and any ancestors of it. @@ -1447,7 +1447,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { } frontier := set.Of(rewardValidatorBlk.ID()) - require.NoError(bootstrapper.Accepted(context.Background(), vdrID, reqID, frontier)) + require.NoError(bootstrapper.Accepted(t.Context(), vdrID, reqID, frontier)) // Provide the validator removal block. We should process this block and // then do another round of bootstrapping. @@ -1463,7 +1463,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { return config.NodeIDs } - require.NoError(bootstrapper.Ancestors(context.Background(), vdrID, reqID, [][]byte{rewardValidatorBlk.Bytes()})) + require.NoError(bootstrapper.Ancestors(t.Context(), vdrID, reqID, [][]byte{rewardValidatorBlk.Bytes()})) // We should again report the validator removal block as the last accepted // block. @@ -1477,7 +1477,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { return config.NodeIDs } - require.NoError(bootstrapper.AcceptedFrontier(context.Background(), vdrID, reqID, rewardValidatorBlk.ID())) + require.NoError(bootstrapper.AcceptedFrontier(t.Context(), vdrID, reqID, rewardValidatorBlk.ID())) // Again confirming the validator removal block as accepted should // transition us out of bootstrapping. At this point we should check for @@ -1485,7 +1485,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { externalSender.SendF = nil externalSender.CantSend = false - require.NoError(bootstrapper.Accepted(context.Background(), vdrID, reqID, frontier)) + require.NoError(bootstrapper.Accepted(t.Context(), vdrID, reqID, frontier)) // Verify the locally preferred option on the validator removal aligns with // our consensus preference. @@ -1501,7 +1501,7 @@ func TestBootstrapPartiallyAccepted(t *testing.T) { require.Equal(commitBlock.ID(), vm.manager.Preferred()) ctx.Lock.Unlock() - chainRouter.Shutdown(context.Background()) + chainRouter.Shutdown(t.Context()) } func TestUnverifiedParent(t *testing.T) { @@ -1522,12 +1522,12 @@ func TestUnverifiedParent(t *testing.T) { ctx := snowtest.Context(t, snowtest.PChainID) ctx.Lock.Lock() defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) ctx.Lock.Unlock() }() require.NoError(vm.Initialize( - context.Background(), + t.Context(), ctx, memdb.New(), genesistest.NewBytes(t, genesistest.Config{}), @@ -1572,7 +1572,7 @@ func TestUnverifiedParent(t *testing.T) { ) require.NoError(err) firstAdvanceTimeBlk := vm.manager.NewBlock(statelessBlk) - require.NoError(firstAdvanceTimeBlk.Verify(context.Background())) + require.NoError(firstAdvanceTimeBlk.Verify(t.Context())) // include a tx2 to make the block be accepted tx2 := &txs.Tx{Unsigned: &txs.ImportTx{ @@ -1605,7 +1605,7 @@ func TestUnverifiedParent(t *testing.T) { secondAdvanceTimeBlk := vm.manager.NewBlock(statelessSecondAdvanceTimeBlk) require.Equal(secondAdvanceTimeBlk.Parent(), firstAdvanceTimeBlk.ID()) - require.NoError(secondAdvanceTimeBlk.Verify(context.Background())) + require.NoError(secondAdvanceTimeBlk.Verify(t.Context())) } func TestMaxStakeAmount(t *testing.T) { @@ -1677,7 +1677,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { genesisBytes := genesistest.NewBytes(t, genesistest.Config{}) require.NoError(firstVM.Initialize( - context.Background(), + t.Context(), firstCtx, firstDB, genesisBytes, @@ -1691,8 +1691,8 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { firstVM.clock.Set(initialClkTime) // Set VM state to NormalOp, to start tracking validators' uptime - require.NoError(firstVM.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(firstVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(firstVM.SetState(t.Context(), snow.Bootstrapping)) + require.NoError(firstVM.SetState(t.Context(), snow.NormalOp)) // Fast forward clock so that validators meet 20% uptime required for reward durationForReward := genesistest.DefaultValidatorEndTime.Sub(genesistest.DefaultValidatorStartTime) * firstUptimePercentage / 100 @@ -1701,7 +1701,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { // Shutdown VM to stop all genesis validator uptime. // At this point they have been validating for the 20% uptime needed to be rewarded - require.NoError(firstVM.Shutdown(context.Background())) + require.NoError(firstVM.Shutdown(t.Context())) firstCtx.Lock.Unlock() // Restart the VM with a larger uptime requirement @@ -1718,7 +1718,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { secondCtx := snowtest.Context(t, snowtest.PChainID) secondCtx.Lock.Lock() defer func() { - require.NoError(secondVM.Shutdown(context.Background())) + require.NoError(secondVM.Shutdown(t.Context())) secondCtx.Lock.Unlock() }() @@ -1727,7 +1727,7 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { secondCtx.SharedMemory = m.NewSharedMemory(secondCtx.ChainID) require.NoError(secondVM.Initialize( - context.Background(), + t.Context(), secondCtx, secondDB, genesisBytes, @@ -1740,21 +1740,21 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { secondVM.clock.Set(vmStopTime) // Set VM state to NormalOp, to start tracking validators' uptime - require.NoError(secondVM.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(secondVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(secondVM.SetState(t.Context(), snow.Bootstrapping)) + require.NoError(secondVM.SetState(t.Context(), snow.NormalOp)) // after restart and change of uptime required for reward, push validators to their end of life secondVM.clock.Set(genesistest.DefaultValidatorEndTime) // evaluate a genesis validator for reward - blk, err := secondVM.Builder.BuildBlock(context.Background()) + blk, err := secondVM.Builder.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) // Assert preferences are correct. // secondVM should prefer abort since uptime requirements are not met anymore oracleBlk := blk.(smcon.OracleBlock) - options, err := oracleBlk.Options(context.Background()) + options, err := oracleBlk.Options(t.Context()) require.NoError(err) abort := options[0].(*blockexecutor.Block) @@ -1769,11 +1769,11 @@ func TestUptimeDisallowedWithRestart(t *testing.T) { txID := blk.(block.Block).Txs()[0].ID() // Verify options and accept abort block - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) - require.NoError(abort.Accept(context.Background())) - require.NoError(secondVM.SetPreference(context.Background(), secondVM.manager.LastAccepted())) + require.NoError(commit.Verify(t.Context())) + require.NoError(abort.Verify(t.Context())) + require.NoError(blk.Accept(t.Context())) + require.NoError(abort.Accept(t.Context())) + require.NoError(secondVM.SetPreference(t.Context(), secondVM.manager.LastAccepted())) // Verify that rewarded validator has been removed. // Note that test genesis has multiple validators @@ -1819,7 +1819,7 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { appSender := &enginetest.Sender{T: t} require.NoError(vm.Initialize( - context.Background(), + t.Context(), ctx, db, genesistest.NewBytes(t, genesistest.Config{}), @@ -1830,7 +1830,7 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { )) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) ctx.Lock.Unlock() }() @@ -1838,21 +1838,21 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { vm.clock.Set(initialClkTime) // Set VM state to NormalOp, to start tracking validators' uptime - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + require.NoError(vm.SetState(t.Context(), snow.Bootstrapping)) + require.NoError(vm.SetState(t.Context(), snow.NormalOp)) // Fast forward clock to time for genesis validators to leave vm.clock.Set(genesistest.DefaultValidatorEndTime) // evaluate a genesis validator for reward - blk, err := vm.Builder.BuildBlock(context.Background()) + blk, err := vm.Builder.BuildBlock(t.Context()) require.NoError(err) - require.NoError(blk.Verify(context.Background())) + require.NoError(blk.Verify(t.Context())) // Assert preferences are correct. // vm should prefer abort since uptime requirements are not met. oracleBlk := blk.(smcon.OracleBlock) - options, err := oracleBlk.Options(context.Background()) + options, err := oracleBlk.Options(t.Context()) require.NoError(err) abort := options[0].(*blockexecutor.Block) @@ -1867,11 +1867,11 @@ func TestUptimeDisallowedAfterNeverConnecting(t *testing.T) { txID := blk.(block.Block).Txs()[0].ID() // Verify options and accept abort block - require.NoError(commit.Verify(context.Background())) - require.NoError(abort.Verify(context.Background())) - require.NoError(blk.Accept(context.Background())) - require.NoError(abort.Accept(context.Background())) - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + require.NoError(commit.Verify(t.Context())) + require.NoError(abort.Verify(t.Context())) + require.NoError(blk.Accept(t.Context())) + require.NoError(abort.Accept(t.Context())) + require.NoError(vm.SetPreference(t.Context(), vm.manager.LastAccepted())) // Verify that rewarded validator has been removed. // Note that test genesis has multiple validators @@ -1973,7 +1973,7 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(err) lastAcceptedID := vm.state.GetLastAccepted() - lastAcceptedHeight, err := vm.GetCurrentHeight(context.Background()) + lastAcceptedHeight, err := vm.GetCurrentHeight(t.Context()) require.NoError(err) statelessBlock, err := block.NewBanffStandardBlock( vm.state.GetTimestamp(), @@ -1987,11 +1987,11 @@ func TestRemovePermissionedValidatorDuringAddPending(t *testing.T) { require.NoError(err) blockBytes := statelessBlock.Bytes() - block, err := vm.ParseBlock(context.Background(), blockBytes) + block, err := vm.ParseBlock(t.Context(), blockBytes) require.NoError(err) - require.NoError(block.Verify(context.Background())) - require.NoError(block.Accept(context.Background())) - require.NoError(vm.SetPreference(context.Background(), vm.manager.LastAccepted())) + require.NoError(block.Verify(t.Context())) + require.NoError(block.Accept(t.Context())) + require.NoError(vm.SetPreference(t.Context(), vm.manager.LastAccepted())) _, err = vm.state.GetPendingValidator(subnetID, nodeID) require.ErrorIs(err, database.ErrNotFound) @@ -2207,7 +2207,7 @@ func TestThrottleBlockBuildingUntilNormalOperationsStart(t *testing.T) { ctx.Lock.Lock() require.NoError(vm.Initialize( - context.Background(), + t.Context(), ctx, memdb.New(), genesistest.NewBytes(t, genesistest.Config{}), @@ -2227,10 +2227,10 @@ func TestThrottleBlockBuildingUntilNormalOperationsStart(t *testing.T) { vm.ctx.Lock.Lock() defer vm.ctx.Lock.Unlock() - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(t.Context(), snow.Bootstrapping)) // Advance the time so that the block builder would be willing to remove the // genesis validators. @@ -2239,7 +2239,7 @@ func TestThrottleBlockBuildingUntilNormalOperationsStart(t *testing.T) { ctx.Lock.Unlock() - impatientContext, cancel := context.WithTimeout(context.Background(), time.Millisecond*100) + impatientContext, cancel := context.WithTimeout(t.Context(), time.Millisecond*100) defer cancel() msg, err := vm.WaitForEvent(impatientContext) @@ -2247,10 +2247,10 @@ func TestThrottleBlockBuildingUntilNormalOperationsStart(t *testing.T) { require.Zero(msg) ctx.Lock.Lock() - require.NoError(vm.SetState(context.Background(), snow.NormalOp)) + require.NoError(vm.SetState(t.Context(), snow.NormalOp)) ctx.Lock.Unlock() - impatientContext, cancel = context.WithTimeout(context.Background(), time.Minute) + impatientContext, cancel = context.WithTimeout(t.Context(), time.Minute) defer cancel() msg, err = vm.WaitForEvent(impatientContext) diff --git a/vms/platformvm/warp/validator_test.go b/vms/platformvm/warp/validator_test.go index 779503a6890d..71c8fbe9f9eb 100644 --- a/vms/platformvm/warp/validator_test.go +++ b/vms/platformvm/warp/validator_test.go @@ -140,7 +140,7 @@ func TestGetCanonicalValidatorSet(t *testing.T) { state := tt.stateF(ctrl) - validators, err := GetCanonicalValidatorSetFromSubnetID(context.Background(), state, pChainHeight, subnetID) + validators, err := GetCanonicalValidatorSetFromSubnetID(t.Context(), state, pChainHeight, subnetID) require.ErrorIs(err, tt.expectedErr) if err != nil { return @@ -340,7 +340,7 @@ func BenchmarkGetCanonicalValidatorSet(b *testing.B) { b.Run(strconv.Itoa(size), func(b *testing.B) { for i := 0; i < b.N; i++ { - _, err := GetCanonicalValidatorSetFromSubnetID(context.Background(), validatorState, pChainHeight, subnetID) + _, err := GetCanonicalValidatorSetFromSubnetID(b.Context(), validatorState, pChainHeight, subnetID) require.NoError(b, err) } }) diff --git a/vms/proposervm/batched_vm_test.go b/vms/proposervm/batched_vm_test.go index 8f3fa6bc6b82..8338354d38bf 100644 --- a/vms/proposervm/batched_vm_test.go +++ b/vms/proposervm/batched_vm_test.go @@ -39,7 +39,7 @@ func TestCoreVMNotRemote(t *testing.T) { require := require.New(t) _, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() blkID := ids.Empty @@ -47,7 +47,7 @@ func TestCoreVMNotRemote(t *testing.T) { maxBlocksSize := 1000000 // a high value to get all built blocks maxBlocksRetrivalTime := time.Hour // a high value to get all built blocks _, err := proVM.GetAncestors( - context.Background(), + t.Context(), blkID, maxBlocksNum, maxBlocksSize, @@ -56,7 +56,7 @@ func TestCoreVMNotRemote(t *testing.T) { require.ErrorIs(err, block.ErrRemoteVMNotImplemented) var blks [][]byte - shouldBeEmpty, err := proVM.BatchedParseBlock(context.Background(), blks) + shouldBeEmpty, err := proVM.BatchedParseBlock(t.Context(), blks) require.NoError(err) require.Empty(shouldBeEmpty) } @@ -65,7 +65,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { require := require.New(t) coreVM, proRemoteVM := initTestRemoteProposerVM(t, upgradetest.NoUpgrades) defer func() { - require.NoError(proRemoteVM.Shutdown(context.Background())) + require.NoError(proRemoteVM.Shutdown(t.Context())) }() // Build some prefork blocks.... @@ -73,11 +73,11 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk1, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // prepare build of next block - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk1.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreBlk1.ID(): @@ -91,11 +91,11 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk2, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // prepare build of next block - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk2.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreBlk2.ID(): @@ -109,7 +109,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk3, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // ...Call GetAncestors on them ... @@ -140,7 +140,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { maxBlocksSize := 1000000 // a high value to get all built blocks maxBlocksRetrivalTime := time.Hour // a high value to get all built blocks res, err := proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -157,7 +157,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { // another good call reqBlkID = builtBlk1.ID() res, err = proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -170,7 +170,7 @@ func TestGetAncestorsPreForkOnly(t *testing.T) { // a faulty call reqBlkID = ids.Empty res, err = proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -184,7 +184,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { require := require.New(t) coreVM, proRemoteVM := initTestRemoteProposerVM(t, upgradetest.Latest) defer func() { - require.NoError(proRemoteVM.Shutdown(context.Background())) + require.NoError(proRemoteVM.Shutdown(t.Context())) }() // Build some post-Fork blocks.... @@ -192,35 +192,35 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk1, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // prepare build of next block - require.NoError(builtBlk1.Verify(context.Background())) - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + require.NoError(builtBlk1.Verify(t.Context())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk1.ID())) require.NoError(proRemoteVM.waitForProposerWindow()) coreBlk2 := snowmantest.BuildChild(coreBlk1) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk2, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // prepare build of next block - require.NoError(builtBlk2.Verify(context.Background())) - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + require.NoError(builtBlk2.Verify(t.Context())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk2.ID())) require.NoError(proRemoteVM.waitForProposerWindow()) coreBlk3 := snowmantest.BuildChild(coreBlk2) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk3, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(builtBlk3.Verify(context.Background())) - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) + require.NoError(builtBlk3.Verify(t.Context())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk3.ID())) // ...Call GetAncestors on them ... // Note: we assumed that if blkID is not known, that's NOT an error. @@ -265,7 +265,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { maxBlocksSize := 1000000 // a high value to get all built blocks maxBlocksRetrivalTime := time.Hour // a high value to get all built blocks res, err := proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -282,7 +282,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { // another good call reqBlkID = builtBlk1.ID() res, err = proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -295,7 +295,7 @@ func TestGetAncestorsPostForkOnly(t *testing.T) { // a faulty call reqBlkID = ids.Empty res, err = proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -318,7 +318,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { // enable ProBlks in next future coreVM, proRemoteVM := initTestRemoteProposerVM(t, upgradetest.Latest, forkTime) defer func() { - require.NoError(proRemoteVM.Shutdown(context.Background())) + require.NoError(proRemoteVM.Shutdown(t.Context())) }() // Build some prefork blocks.... @@ -328,12 +328,12 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk1, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, builtBlk1) // prepare build of next block - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk1.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk1.ID(): @@ -348,12 +348,12 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk2, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, builtBlk2) // prepare build of next block - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk2.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk2.ID(): @@ -369,23 +369,23 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk3, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, builtBlk3) // prepare build of next block - require.NoError(builtBlk3.Verify(context.Background())) - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) + require.NoError(builtBlk3.Verify(t.Context())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk3.ID())) require.NoError(proRemoteVM.waitForProposerWindow()) coreBlk4 := snowmantest.BuildChild(coreBlk3) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk4, nil } - builtBlk4, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk4, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, builtBlk4) - require.NoError(builtBlk4.Verify(context.Background())) + require.NoError(builtBlk4.Verify(t.Context())) // ...Call GetAncestors on them ... // Note: we assumed that if blkID is not known, that's NOT an error. @@ -421,7 +421,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { maxBlocksSize := 1000000 // an high value to get all built blocks maxBlocksRetrivalTime := 10 * time.Minute // an high value to get all built blocks res, err := proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -440,7 +440,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { reqBlkID = builtBlk4.ID() maxBlocksNum = 3 res, err = proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -457,7 +457,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { // another good call reqBlkID = builtBlk1.ID() res, err = proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -470,7 +470,7 @@ func TestGetAncestorsAtSnomanPlusPlusFork(t *testing.T) { // a faulty call reqBlkID = ids.Empty res, err = proRemoteVM.GetAncestors( - context.Background(), + t.Context(), reqBlkID, maxBlocksNum, maxBlocksSize, @@ -484,7 +484,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { require := require.New(t) coreVM, proRemoteVM := initTestRemoteProposerVM(t, upgradetest.NoUpgrades) defer func() { - require.NoError(proRemoteVM.Shutdown(context.Background())) + require.NoError(proRemoteVM.Shutdown(t.Context())) }() // Build some prefork blocks.... @@ -492,11 +492,11 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk1, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // prepare build of next block - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk1.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { case coreBlk1.ID(): @@ -510,11 +510,11 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk2, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // prepare build of next block - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk2.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk2.ID(): @@ -528,7 +528,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk3, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -566,7 +566,7 @@ func TestBatchedParseBlockPreForkOnly(t *testing.T) { builtBlk2.Bytes(), builtBlk3.Bytes(), } - res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) + res, err := proRemoteVM.BatchedParseBlock(t.Context(), bytesToParse) require.NoError(err) require.Len(res, 3) require.Equal(builtBlk1.ID(), res[0].ID()) @@ -625,7 +625,7 @@ func TestBatchedParseBlockParallel(t *testing.T) { } { t.Run(testCase.name, func(t *testing.T) { require := require.New(t) - blocks, err := vm.BatchedParseBlock(context.Background(), testCase.rawBlocks) + blocks, err := vm.BatchedParseBlock(t.Context(), testCase.rawBlocks) require.NoError(err) returnedBlockBytes := make([][]byte, len(blocks)) @@ -675,7 +675,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { require := require.New(t) coreVM, proRemoteVM := initTestRemoteProposerVM(t, upgradetest.Latest) defer func() { - require.NoError(proRemoteVM.Shutdown(context.Background())) + require.NoError(proRemoteVM.Shutdown(t.Context())) }() // Build some post-Fork blocks.... @@ -683,31 +683,31 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk1, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // prepare build of next block - require.NoError(builtBlk1.Verify(context.Background())) - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + require.NoError(builtBlk1.Verify(t.Context())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk1.ID())) require.NoError(proRemoteVM.waitForProposerWindow()) coreBlk2 := snowmantest.BuildChild(coreBlk1) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk2, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) // prepare build of next block - require.NoError(builtBlk2.Verify(context.Background())) - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + require.NoError(builtBlk2.Verify(t.Context())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk2.ID())) require.NoError(proRemoteVM.waitForProposerWindow()) coreBlk3 := snowmantest.BuildChild(coreBlk2) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk3, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { @@ -745,7 +745,7 @@ func TestBatchedParseBlockPostForkOnly(t *testing.T) { builtBlk2.Bytes(), builtBlk3.Bytes(), } - res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) + res, err := proRemoteVM.BatchedParseBlock(t.Context(), bytesToParse) require.NoError(err) require.Len(res, 3) require.Equal(builtBlk1.ID(), res[0].ID()) @@ -766,7 +766,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { // enable ProBlks in next future coreVM, proRemoteVM := initTestRemoteProposerVM(t, upgradetest.Latest, forkTime) defer func() { - require.NoError(proRemoteVM.Shutdown(context.Background())) + require.NoError(proRemoteVM.Shutdown(t.Context())) }() // Build some prefork blocks.... @@ -776,12 +776,12 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - builtBlk1, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk1, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, builtBlk1) // prepare build of next block - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk1.ID())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk1.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk1.ID(): @@ -796,12 +796,12 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } - builtBlk2, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk2, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, builtBlk2) // prepare build of next block - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk2.ID())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk2.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch { case blkID == coreBlk2.ID(): @@ -817,23 +817,23 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk3, nil } - builtBlk3, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk3, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, builtBlk3) // prepare build of next block - require.NoError(builtBlk3.Verify(context.Background())) - require.NoError(proRemoteVM.SetPreference(context.Background(), builtBlk3.ID())) + require.NoError(builtBlk3.Verify(t.Context())) + require.NoError(proRemoteVM.SetPreference(t.Context(), builtBlk3.ID())) require.NoError(proRemoteVM.waitForProposerWindow()) coreBlk4 := snowmantest.BuildChild(coreBlk3) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk4, nil } - builtBlk4, err := proRemoteVM.BuildBlock(context.Background()) + builtBlk4, err := proRemoteVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, builtBlk4) - require.NoError(builtBlk4.Verify(context.Background())) + require.NoError(builtBlk4.Verify(t.Context())) coreVM.ParseBlockF = func(_ context.Context, b []byte) (snowman.Block, error) { switch { @@ -876,7 +876,7 @@ func TestBatchedParseBlockAtSnomanPlusPlusFork(t *testing.T) { builtBlk1.Bytes(), } - res, err := proRemoteVM.BatchedParseBlock(context.Background(), bytesToParse) + res, err := proRemoteVM.BatchedParseBlock(t.Context(), bytesToParse) require.NoError(err) require.Len(res, 4) require.Equal(builtBlk4.ID(), res[0].ID()) @@ -1003,7 +1003,7 @@ func initTestRemoteProposerVM( ctx.ValidatorState = valState require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, @@ -1016,7 +1016,7 @@ func initTestRemoteProposerVM( // Initialize shouldn't be called again coreVM.InitializeF = nil - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) + require.NoError(proVM.SetPreference(t.Context(), snowmantest.GenesisID)) return coreVM, proVM } diff --git a/vms/proposervm/block_test.go b/vms/proposervm/block_test.go index 09a900a60992..34d2754bc466 100644 --- a/vms/proposervm/block_test.go +++ b/vms/proposervm/block_test.go @@ -69,7 +69,7 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { }).Return(builtBlk, nil).AnyTimes() vdrState := validatorsmock.NewState(ctrl) - vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + vdrState.EXPECT().GetMinimumHeight(t.Context()).Return(pChainHeight, nil).AnyTimes() windower := proposermock.NewWindower(ctrl) windower.EXPECT().ExpectedProposer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nodeID, nil).AnyTimes() @@ -100,7 +100,7 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { // Should call BuildBlockWithContext since proposervm is activated gotChild, err := blk.buildChild( - context.Background(), + t.Context(), parentID, parentTimestamp, pChainHeight, @@ -112,7 +112,7 @@ func TestPostForkCommonComponents_buildChild(t *testing.T) { func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { @@ -238,7 +238,7 @@ func TestPreDurangoValidatorNodeBlockBuiltDelaysTests(t *testing.T) { func TestPreDurangoNonValidatorNodeBlockBuiltDelaysTests(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { @@ -376,7 +376,7 @@ func TestPreEtnaContextPChainHeight(t *testing.T) { }).Return(innerChildBlock, nil).AnyTimes() vdrState := validatorsmock.NewState(ctrl) - vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + vdrState.EXPECT().GetMinimumHeight(t.Context()).Return(pChainHeight, nil).AnyTimes() windower := proposermock.NewWindower(ctrl) windower.EXPECT().ExpectedProposer(gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).Return(nodeID, nil).AnyTimes() @@ -404,7 +404,7 @@ func TestPreEtnaContextPChainHeight(t *testing.T) { // Should call BuildBlockWithContext since proposervm is activated gotChild, err := blk.buildChild( - context.Background(), + t.Context(), parentID, parentTimestamp, parentPChainHeght, @@ -420,7 +420,7 @@ func TestPreGraniteBlock_NonZeroEpoch(t *testing.T) { _, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() innerBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -446,14 +446,14 @@ func TestPreGraniteBlock_NonZeroEpoch(t *testing.T) { innerBlk: innerBlk, }, } - err = proBlk.Verify(context.Background()) + err = proBlk.Verify(t.Context()) require.ErrorIs(err, errEpochNotZero) } // Verify that post-fork blocks are validated to contain the correct epoch // information. func TestPostGraniteBlock_EpochMatches(t *testing.T) { - ctx := context.Background() + ctx := t.Context() coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { diff --git a/vms/proposervm/post_fork_block_test.go b/vms/proposervm/post_fork_block_test.go index e119f1259e13..d72a5d67689d 100644 --- a/vms/proposervm/post_fork_block_test.go +++ b/vms/proposervm/post_fork_block_test.go @@ -41,13 +41,13 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { } // test - _, err := proBlk.Options(context.Background()) + _, err := proBlk.Options(t.Context()) require.Equal(snowman.ErrNotOracle, err) // setup _, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() innerTestBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -79,7 +79,7 @@ func TestOracle_PostForkBlock_ImplementsInterface(t *testing.T) { } // test - _, err = proBlk.Options(context.Background()) + _, err = proBlk.Options(t.Context()) require.NoError(err) } @@ -89,7 +89,7 @@ func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() pChainHeight := uint64(100) @@ -123,11 +123,11 @@ func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(parentBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + require.NoError(parentBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parentBlk.ID())) // .. create child block ... childCoreBlk := snowmantest.BuildChild(parentCoreBlk) @@ -153,7 +153,7 @@ func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, database.ErrNotFound) } @@ -169,7 +169,7 @@ func TestBlockVerify_PostForkBlock_PreDurango_ParentChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } } @@ -178,7 +178,7 @@ func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() pChainHeight := uint64(100) @@ -211,11 +211,11 @@ func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(parentBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + require.NoError(parentBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parentBlk.ID())) require.NoError(proVM.waitForProposerWindow()) childCoreBlk := snowmantest.BuildChild(parentCoreBlk) @@ -250,7 +250,7 @@ func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, database.ErrNotFound) } @@ -271,7 +271,7 @@ func TestBlockVerify_PostForkBlock_PostDurango_ParentChecks(t *testing.T) { childBlk.SignedBlock = childSlb proVM.Set(childSlb.Timestamp()) - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } } @@ -280,7 +280,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // reduce validator state to allow proVM.ctx.NodeID to be easily selected as proposer @@ -333,11 +333,11 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(parentBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + require.NoError(parentBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parentBlk.ID())) var ( parentTimestamp = parentBlk.Timestamp() @@ -370,11 +370,11 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, errTimeNotMonotonic) } - blkWinDelay, err := proVM.Delay(context.Background(), childCoreBlk.Height(), parentPChainHeight, proVM.ctx.NodeID, proposer.MaxVerifyWindows) + blkWinDelay, err := proVM.Delay(t.Context(), childCoreBlk.Height(), parentPChainHeight, proVM.ctx.NodeID, proposer.MaxVerifyWindows) require.NoError(err) { @@ -395,7 +395,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, errProposerWindowNotStarted) } @@ -417,7 +417,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } { @@ -438,7 +438,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } { @@ -456,7 +456,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } { @@ -476,7 +476,7 @@ func TestBlockVerify_PostForkBlock_TimestampChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, errTimeTooAdvanced) } } @@ -486,7 +486,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() pChainHeight := uint64(100) @@ -523,11 +523,11 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(parentBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + require.NoError(parentBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parentBlk.ID())) require.NoError(proVM.waitForProposerWindow()) childCoreBlk := snowmantest.BuildChild(parentCoreBlk) @@ -562,7 +562,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, errPChainHeightNotMonotonic) } @@ -581,7 +581,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } { @@ -599,10 +599,10 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } - currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) + currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(t.Context()) { // block P-Chain height can be equal to current P-Chain height childSlb, err := block.Build( @@ -618,7 +618,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } { @@ -636,7 +636,7 @@ func TestBlockVerify_PostForkBlock_PChainHeightChecks(t *testing.T) { require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, errPChainHeightNotReached) } } @@ -646,7 +646,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() pChainHeight := uint64(100) @@ -700,21 +700,21 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) } } - oracleBlk, err := proVM.BuildBlock(context.Background()) + oracleBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(oracleBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), oracleBlk.ID())) + require.NoError(oracleBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), oracleBlk.ID())) // retrieve one option and verify block built on it require.IsType(&postForkBlock{}, oracleBlk) postForkOracleBlk := oracleBlk.(*postForkBlock) - opts, err := postForkOracleBlk.Options(context.Background()) + opts, err := postForkOracleBlk.Options(t.Context()) require.NoError(err) parentBlk := opts[0] - require.NoError(parentBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + require.NoError(parentBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parentBlk.ID())) // set VM to be ready to build next block. We set it to generate unsigned blocks // for simplicity. @@ -743,7 +743,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, errPChainHeightNotMonotonic) } @@ -759,7 +759,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } { @@ -774,10 +774,10 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } - currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) + currPChainHeight, _ := proVM.ctx.ValidatorState.GetCurrentHeight(t.Context()) { // block P-Chain height can be equal to current P-Chain height childSlb, err := block.BuildUnsigned( @@ -790,7 +790,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) require.NoError(err) childBlk.SignedBlock = childSlb - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } { @@ -804,7 +804,7 @@ func TestBlockVerify_PostForkBlockBuiltOnOption_PChainHeightChecks(t *testing.T) ) require.NoError(err) childBlk.SignedBlock = childSlb - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, errPChainHeightNotReached) } } @@ -816,7 +816,7 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { // Show that other verify call would not call coreBlk.Verify() coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() pChainHeight := uint64(2000) @@ -849,18 +849,18 @@ func TestBlockVerify_PostForkBlock_CoreBlockVerifyIsCalledOnce(t *testing.T) { } } - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(builtBlk.Verify(context.Background())) + require.NoError(builtBlk.Verify(t.Context())) // set error on coreBlock.Verify and recall Verify() coreBlk.VerifyV = errDuplicateVerify - require.NoError(builtBlk.Verify(context.Background())) + require.NoError(builtBlk.Verify(t.Context())) // rebuild a block with the same core block pChainHeight++ - _, err = proVM.BuildBlock(context.Background()) + _, err = proVM.BuildBlock(t.Context()) require.NoError(err) } @@ -871,7 +871,7 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { // setup coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() pChainHeight := uint64(2000) @@ -904,11 +904,11 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { } } - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) // test - require.NoError(builtBlk.Accept(context.Background())) + require.NoError(builtBlk.Accept(t.Context())) coreVM.LastAcceptedF = snowmantest.MakeLastAcceptedBlockF( []*snowmantest.Block{ @@ -916,7 +916,7 @@ func TestBlockAccept_PostForkBlock_SetsLastAcceptedBlock(t *testing.T) { coreBlk, }, ) - acceptedID, err := proVM.LastAccepted(context.Background()) + acceptedID, err := proVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(builtBlk.ID(), acceptedID) } @@ -926,7 +926,7 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() var minimumHeight uint64 @@ -942,19 +942,19 @@ func TestBlockAccept_PostForkBlock_TwoProBlocksWithSameCoreBlock_OneIsAccepted(t minimumHeight = snowmantest.GenesisHeight - proBlk1, err := proVM.BuildBlock(context.Background()) + proBlk1, err := proVM.BuildBlock(t.Context()) require.NoError(err) minimumHeight++ - proBlk2, err := proVM.BuildBlock(context.Background()) + proBlk2, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.NotEqual(proBlk2.ID(), proBlk1.ID()) // set proBlk1 as preferred - require.NoError(proBlk1.Accept(context.Background())) + require.NoError(proBlk1.Accept(t.Context())) require.Equal(snowtest.Accepted, coreBlk.Status) - acceptedID, err := proVM.LastAccepted(context.Background()) + acceptedID, err := proVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(proBlk1.ID(), acceptedID) } @@ -965,7 +965,7 @@ func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -974,12 +974,12 @@ func TestBlockReject_PostForkBlock_InnerBlockIsNotRejected(t *testing.T) { return coreBlk, nil } - sb, err := proVM.BuildBlock(context.Background()) + sb, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, sb) proBlk := sb.(*postForkBlock) - require.NoError(proBlk.Reject(context.Background())) + require.NoError(proBlk.Reject(t.Context())) } func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { @@ -987,7 +987,7 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create post fork oracle block ... @@ -1032,22 +1032,22 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(parentBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + require.NoError(parentBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parentBlk.ID())) // retrieve options ... require.IsType(&postForkBlock{}, parentBlk) postForkOracleBlk := parentBlk.(*postForkBlock) - opts, err := postForkOracleBlk.Options(context.Background()) + opts, err := postForkOracleBlk.Options(t.Context()) require.NoError(err) require.IsType(&postForkOption{}, opts[0]) // ... and verify them the first time - require.NoError(opts[0].Verify(context.Background())) - require.NoError(opts[1].Verify(context.Background())) + require.NoError(opts[0].Verify(t.Context())) + require.NoError(opts[1].Verify(t.Context())) // Build the child statelessChild, err := block.Build( @@ -1062,13 +1062,13 @@ func TestBlockVerify_PostForkBlock_ShouldBePostForkOption(t *testing.T) { ) require.NoError(err) - invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) + invalidChild, err := proVM.ParseBlock(t.Context(), statelessChild.Bytes()) if err != nil { // A failure to parse is okay here return } - err = invalidChild.Verify(context.Background()) + err = invalidChild.Verify(t.Context()) require.ErrorIs(err, errUnexpectedBlockType) } @@ -1077,7 +1077,7 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 5) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -1111,12 +1111,12 @@ func TestBlockVerify_PostForkBlock_PChainTooLow(t *testing.T) { ) require.NoError(err) - invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) + invalidChild, err := proVM.ParseBlock(t.Context(), statelessChild.Bytes()) if err != nil { // A failure to parse is okay here return } - err = invalidChild.Verify(context.Background()) + err = invalidChild.Verify(t.Context()) require.ErrorIs(err, errPChainHeightTooLow) } diff --git a/vms/proposervm/post_fork_option_test.go b/vms/proposervm/post_fork_option_test.go index 7a3020ef0eb9..fd2474edb918 100644 --- a/vms/proposervm/post_fork_option_test.go +++ b/vms/proposervm/post_fork_option_test.go @@ -41,7 +41,7 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create post fork oracle block ... @@ -87,35 +87,35 @@ func TestBlockVerify_PostForkOption_ParentChecks(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(parentBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + require.NoError(parentBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parentBlk.ID())) // retrieve options ... require.IsType(&postForkBlock{}, parentBlk) postForkOracleBlk := parentBlk.(*postForkBlock) - opts, err := postForkOracleBlk.Options(context.Background()) + opts, err := postForkOracleBlk.Options(t.Context()) require.NoError(err) require.IsType(&postForkOption{}, opts[0]) // ... and verify them - require.NoError(opts[0].Verify(context.Background())) - require.NoError(opts[1].Verify(context.Background())) + require.NoError(opts[0].Verify(t.Context())) + require.NoError(opts[1].Verify(t.Context())) // show we can build on options - require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) + require.NoError(proVM.SetPreference(t.Context(), opts[0].ID())) require.NoError(proVM.waitForProposerWindow()) childCoreBlk := snowmantest.BuildChild(preferredBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return childCoreBlk, nil } - proChild, err := proVM.BuildBlock(context.Background()) + proChild, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, proChild) - require.NoError(proChild.Verify(context.Background())) + require.NoError(proChild.Verify(t.Context())) } // ProposerBlock.Accept tests section @@ -125,7 +125,7 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { // Verify an option once; then show that another verify call would not call coreBlk.Verify() coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create post fork oracle block ... @@ -172,30 +172,30 @@ func TestBlockVerify_PostForkOption_CoreBlockVerifyIsCalledOnce(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(parentBlk.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parentBlk.ID())) + require.NoError(parentBlk.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parentBlk.ID())) // retrieve options ... require.IsType(&postForkBlock{}, parentBlk) postForkOracleBlk := parentBlk.(*postForkBlock) - opts, err := postForkOracleBlk.Options(context.Background()) + opts, err := postForkOracleBlk.Options(t.Context()) require.NoError(err) require.IsType(&postForkOption{}, opts[0]) // ... and verify them the first time - require.NoError(opts[0].Verify(context.Background())) - require.NoError(opts[1].Verify(context.Background())) + require.NoError(opts[0].Verify(t.Context())) + require.NoError(opts[1].Verify(t.Context())) // set error on coreBlock.Verify and recall Verify() coreOpt0.VerifyV = errDuplicateVerify coreOpt1.VerifyV = errDuplicateVerify // ... and verify them again. They verify without call to innerBlk - require.NoError(opts[0].Verify(context.Background())) - require.NoError(opts[1].Verify(context.Background())) + require.NoError(opts[0].Verify(t.Context())) + require.NoError(opts[1].Verify(t.Context())) } func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { @@ -203,7 +203,7 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create post fork oracle block ... @@ -248,11 +248,11 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) // accept oracle block - require.NoError(parentBlk.Accept(context.Background())) + require.NoError(parentBlk.Accept(t.Context())) coreVM.LastAcceptedF = snowmantest.MakeLastAcceptedBlockF( []*snowmantest.Block{ @@ -261,19 +261,19 @@ func TestBlockAccept_PostForkOption_SetsLastAcceptedBlock(t *testing.T) { }, oracleCoreBlk.opts[:], ) - acceptedID, err := proVM.LastAccepted(context.Background()) + acceptedID, err := proVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(parentBlk.ID(), acceptedID) // accept one of the options require.IsType(&postForkBlock{}, parentBlk) postForkOracleBlk := parentBlk.(*postForkBlock) - opts, err := postForkOracleBlk.Options(context.Background()) + opts, err := postForkOracleBlk.Options(t.Context()) require.NoError(err) - require.NoError(opts[0].Accept(context.Background())) + require.NoError(opts[0].Accept(t.Context())) - acceptedID, err = proVM.LastAccepted(context.Background()) + acceptedID, err = proVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(opts[0].ID(), acceptedID) } @@ -284,7 +284,7 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create post fork oracle block ... @@ -329,20 +329,20 @@ func TestBlockReject_InnerBlockIsNotRejected(t *testing.T) { } } - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) // reject oracle block - require.NoError(builtBlk.Reject(context.Background())) + require.NoError(builtBlk.Reject(t.Context())) require.NotEqual(snowtest.Rejected, oracleCoreBlk.Status) // reject an option require.IsType(&postForkBlock{}, builtBlk) postForkOracleBlk := builtBlk.(*postForkBlock) - opts, err := postForkOracleBlk.Options(context.Background()) + opts, err := postForkOracleBlk.Options(t.Context()) require.NoError(err) - require.NoError(opts[0].Reject(context.Background())) + require.NoError(opts[0].Reject(t.Context())) require.NotEqual(snowtest.Rejected, oracleCoreBlk.opts[0].Status) } @@ -352,7 +352,7 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { // Verify an option once; then show that another verify call would not call coreBlk.Verify() coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreTestBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -391,12 +391,12 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, parentBlk) postForkBlk := parentBlk.(*postForkBlock) - _, err = postForkBlk.Options(context.Background()) + _, err = postForkBlk.Options(t.Context()) require.Equal(snowman.ErrNotOracle, err) // Build the child @@ -406,13 +406,13 @@ func TestBlockVerify_PostForkOption_ParentIsNotOracleWithError(t *testing.T) { ) require.NoError(err) - invalidChild, err := proVM.ParseBlock(context.Background(), statelessChild.Bytes()) + invalidChild, err := proVM.ParseBlock(t.Context(), statelessChild.Bytes()) if err != nil { // A failure to parse is okay here return } - err = invalidChild.Verify(context.Background()) + err = invalidChild.Verify(t.Context()) require.ErrorIs(err, database.ErrNotFound) } @@ -469,21 +469,21 @@ func TestOptionTimestampValidity(t *testing.T) { } } - statefulBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) + statefulBlock, err := proVM.ParseBlock(t.Context(), statelessBlock.Bytes()) require.NoError(err) - require.NoError(statefulBlock.Verify(context.Background())) + require.NoError(statefulBlock.Verify(t.Context())) statefulOracleBlock, ok := statefulBlock.(snowman.OracleBlock) require.True(ok) - options, err := statefulOracleBlock.Options(context.Background()) + options, err := statefulOracleBlock.Options(t.Context()) require.NoError(err) option := options[0] - require.NoError(option.Verify(context.Background())) + require.NoError(option.Verify(t.Context())) - require.NoError(statefulBlock.Accept(context.Background())) + require.NoError(statefulBlock.Accept(t.Context())) coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { require.FailNow("called GetBlock when unable to handle the error") @@ -496,8 +496,8 @@ func TestOptionTimestampValidity(t *testing.T) { require.Equal(oracleBlkTime, option.Timestamp().UTC()) - require.NoError(option.Accept(context.Background())) - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(option.Accept(t.Context())) + require.NoError(proVM.Shutdown(t.Context())) // Restart the node. ctx := proVM.ctx @@ -559,7 +559,7 @@ func TestOptionTimestampValidity(t *testing.T) { } require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, db, nil, @@ -569,10 +569,10 @@ func TestOptionTimestampValidity(t *testing.T) { nil, )) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() - statefulOptionBlock, err := proVM.ParseBlock(context.Background(), option.Bytes()) + statefulOptionBlock, err := proVM.ParseBlock(t.Context(), option.Bytes()) require.NoError(err) require.LessOrEqual(statefulOptionBlock.Height(), proVM.lastAcceptedHeight) diff --git a/vms/proposervm/pre_fork_block_test.go b/vms/proposervm/pre_fork_block_test.go index 23f74d1260f4..cfcb941b0f82 100644 --- a/vms/proposervm/pre_fork_block_test.go +++ b/vms/proposervm/pre_fork_block_test.go @@ -37,7 +37,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { } // test - _, err := proBlk.Options(context.Background()) + _, err := proBlk.Options(t.Context()) require.Equal(snowman.ErrNotOracle, err) // setup @@ -46,7 +46,7 @@ func TestOracle_PreForkBlkImplementsInterface(t *testing.T) { } // test - _, err = proBlk.Options(context.Background()) + _, err = proBlk.Options(t.Context()) require.NoError(err) } @@ -55,7 +55,7 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.NoUpgrades, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create pre fork oracle block ... @@ -87,18 +87,18 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) // retrieve options ... require.IsType(&preForkBlock{}, parentBlk) preForkOracleBlk := parentBlk.(*preForkBlock) - opts, err := preForkOracleBlk.Options(context.Background()) + opts, err := preForkOracleBlk.Options(t.Context()) require.NoError(err) - require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[0].Verify(t.Context())) // ... show a block can be built on top of an option - require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) + require.NoError(proVM.SetPreference(t.Context(), opts[0].ID())) lastCoreBlk := &TestOptionsBlock{ Block: *snowmantest.BuildChild(preferredTestBlk), @@ -107,7 +107,7 @@ func TestOracle_PreForkBlkCanBuiltOnPreForkOption(t *testing.T) { return lastCoreBlk, nil } - preForkChild, err := proVM.BuildBlock(context.Background()) + preForkChild, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, preForkChild) } @@ -118,7 +118,7 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { activationTime := snowmantest.GenesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0, activationTime) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create pre fork oracle block pre activation time... @@ -158,18 +158,18 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) // retrieve options ... require.IsType(&preForkBlock{}, parentBlk) preForkOracleBlk := parentBlk.(*preForkBlock) - opts, err := preForkOracleBlk.Options(context.Background()) + opts, err := preForkOracleBlk.Options(t.Context()) require.NoError(err) - require.NoError(opts[0].Verify(context.Background())) + require.NoError(opts[0].Verify(t.Context())) // ... show a block can be built on top of an option - require.NoError(proVM.SetPreference(context.Background(), opts[0].ID())) + require.NoError(proVM.SetPreference(t.Context(), opts[0].ID())) lastCoreBlk := &TestOptionsBlock{ Block: *snowmantest.BuildChild(preferredBlk), @@ -178,7 +178,7 @@ func TestOracle_PostForkBlkCanBuiltOnPreForkOption(t *testing.T) { return lastCoreBlk, nil } - postForkChild, err := proVM.BuildBlock(context.Background()) + postForkChild, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, postForkChild) } @@ -189,7 +189,7 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { activationTime := snowmantest.GenesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0, activationTime) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create parent block ... @@ -218,7 +218,7 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { } } - parentBlk, err := proVM.BuildBlock(context.Background()) + parentBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) // .. create child block ... @@ -231,14 +231,14 @@ func TestBlockVerify_PreFork_ParentChecks(t *testing.T) { { // child block referring unknown parent does not verify childCoreBlk.ParentV = ids.Empty - err = childBlk.Verify(context.Background()) + err = childBlk.Verify(t.Context()) require.ErrorIs(err, database.ErrNotFound) } { // child block referring known parent does verify childCoreBlk.ParentV = parentBlk.ID() - require.NoError(childBlk.Verify(context.Background())) + require.NoError(childBlk.Verify(t.Context())) } } @@ -248,7 +248,7 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { activationTime := snowmantest.GenesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0, activationTime) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() preActivationTime := activationTime.Add(-1 * time.Second) @@ -261,11 +261,11 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { } // preFork block verifies if parent is before fork activation time - preForkChild, err := proVM.BuildBlock(context.Background()) + preForkChild, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, preForkChild) - require.NoError(preForkChild.Verify(context.Background())) + require.NoError(preForkChild.Verify(t.Context())) // postFork block does NOT verify if parent is before fork activation time postForkStatelessChild, err := statelessblock.Build( @@ -288,7 +288,7 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { } require.True(postForkChild.Timestamp().Before(activationTime)) - err = postForkChild.Verify(context.Background()) + err = postForkChild.Verify(t.Context()) require.ErrorIs(err, errProposersNotActivated) // once activation time is crossed postForkBlock are produced @@ -298,7 +298,7 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { coreVM.SetPreferenceF = func(context.Context, ids.ID) error { return nil } - require.NoError(proVM.SetPreference(context.Background(), preForkChild.ID())) + require.NoError(proVM.SetPreference(t.Context(), preForkChild.ID())) secondCoreBlk := snowmantest.BuildChild(coreBlk) secondCoreBlk.TimestampV = postActivationTime @@ -317,13 +317,13 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { } } - lastPreForkBlk, err := proVM.BuildBlock(context.Background()) + lastPreForkBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, lastPreForkBlk) - require.NoError(lastPreForkBlk.Verify(context.Background())) + require.NoError(lastPreForkBlk.Verify(t.Context())) - require.NoError(proVM.SetPreference(context.Background(), lastPreForkBlk.ID())) + require.NoError(proVM.SetPreference(t.Context(), lastPreForkBlk.ID())) thirdCoreBlk := snowmantest.BuildChild(secondCoreBlk) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return thirdCoreBlk, nil @@ -342,11 +342,11 @@ func TestBlockVerify_BlocksBuiltOnPreForkGenesis(t *testing.T) { } } - firstPostForkBlk, err := proVM.BuildBlock(context.Background()) + firstPostForkBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, firstPostForkBlk) - require.NoError(firstPostForkBlk.Verify(context.Background())) + require.NoError(firstPostForkBlk.Verify(t.Context())) } func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { @@ -356,7 +356,7 @@ func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0, activationTime) proVM.Set(activationTime) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // build parent block after fork activation time ... @@ -366,18 +366,18 @@ func TestBlockVerify_BlocksBuiltOnPostForkGenesis(t *testing.T) { } // postFork block verifies if parent is after fork activation time - postForkChild, err := proVM.BuildBlock(context.Background()) + postForkChild, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, postForkChild) - require.NoError(postForkChild.Verify(context.Background())) + require.NoError(postForkChild.Verify(t.Context())) // preFork block does NOT verify if parent is after fork activation time preForkChild := preForkBlock{ Block: coreBlock, vm: proVM, } - err = preForkChild.Verify(context.Background()) + err = preForkChild.Verify(t.Context()) require.ErrorIs(err, errUnexpectedBlockType) } @@ -387,7 +387,7 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { // setup coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.NoUpgrades, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -415,11 +415,11 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { } } - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) // test - require.NoError(builtBlk.Accept(context.Background())) + require.NoError(builtBlk.Accept(t.Context())) coreVM.LastAcceptedF = snowmantest.MakeLastAcceptedBlockF( []*snowmantest.Block{ @@ -427,7 +427,7 @@ func TestBlockAccept_PreFork_SetsLastAcceptedBlock(t *testing.T) { coreBlk, }, ) - acceptedID, err := proVM.LastAccepted(context.Background()) + acceptedID, err := proVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(builtBlk.ID(), acceptedID) } @@ -438,7 +438,7 @@ func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.NoUpgrades, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -446,12 +446,12 @@ func TestBlockReject_PreForkBlock_InnerBlockIsRejected(t *testing.T) { return coreBlk, nil } - sb, err := proVM.BuildBlock(context.Background()) + sb, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, sb) proBlk := sb.(*preForkBlock) - require.NoError(proBlk.Reject(context.Background())) + require.NoError(proBlk.Reject(t.Context())) require.Equal(snowtest.Rejected, coreBlk.Status) } @@ -461,7 +461,7 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { activationTime := snowmantest.GenesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0, activationTime) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() postActivationTime := activationTime.Add(time.Second) @@ -506,20 +506,20 @@ func TestBlockVerify_ForkBlockIsOracleBlock(t *testing.T) { } } - firstBlock, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) + firstBlock, err := proVM.ParseBlock(t.Context(), coreBlk.Bytes()) require.NoError(err) - require.NoError(firstBlock.Verify(context.Background())) + require.NoError(firstBlock.Verify(t.Context())) oracleBlock, ok := firstBlock.(snowman.OracleBlock) require.True(ok) - options, err := oracleBlock.Options(context.Background()) + options, err := oracleBlock.Options(t.Context()) require.NoError(err) - require.NoError(options[0].Verify(context.Background())) + require.NoError(options[0].Verify(t.Context())) - require.NoError(options[1].Verify(context.Background())) + require.NoError(options[1].Verify(t.Context())) } func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { @@ -528,7 +528,7 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { activationTime := snowmantest.GenesisTimestamp.Add(10 * time.Second) coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0, activationTime) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() postActivationTime := activationTime.Add(time.Second) @@ -573,10 +573,10 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { } } - firstBlock, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) + firstBlock, err := proVM.ParseBlock(t.Context(), coreBlk.Bytes()) require.NoError(err) - require.NoError(firstBlock.Verify(context.Background())) + require.NoError(firstBlock.Verify(t.Context())) slb, err := statelessblock.Build( firstBlock.ID(), // refer unknown parent @@ -590,13 +590,13 @@ func TestBlockVerify_ForkBlockIsOracleBlockButChildrenAreSigned(t *testing.T) { ) require.NoError(err) - invalidChild, err := proVM.ParseBlock(context.Background(), slb.Bytes()) + invalidChild, err := proVM.ParseBlock(t.Context(), slb.Bytes()) if err != nil { // A failure to parse is okay here return } - err = invalidChild.Verify(context.Background()) + err = invalidChild.Verify(t.Context()) require.ErrorIs(err, errUnexpectedBlockType) } @@ -619,7 +619,7 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { innerVM := blockmock.NewChainVM(ctrl) innerVM.EXPECT().BuildBlock(gomock.Any()).Return(builtBlk, nil).AnyTimes() vdrState := validatorsmock.NewState(ctrl) - vdrState.EXPECT().GetMinimumHeight(context.Background()).Return(pChainHeight, nil).AnyTimes() + vdrState.EXPECT().GetMinimumHeight(t.Context()).Return(pChainHeight, nil).AnyTimes() vm := &VM{ ChainVM: innerVM, @@ -635,7 +635,7 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { } // Should call BuildBlock since proposervm won't have a P-chain height - gotChild, err := blk.buildChild(context.Background()) + gotChild, err := blk.buildChild(t.Context()) require.NoError(err) require.Equal(builtBlk, gotChild.(*postForkBlock).innerBlk) @@ -643,7 +643,7 @@ func TestPreForkBlock_BuildBlockWithContext(t *testing.T) { innerBlk.EXPECT().Timestamp().Return(time.Time{}) vm.Upgrades.ApricotPhase4Time = upgrade.UnscheduledActivationTime - gotChild, err = blk.buildChild(context.Background()) + gotChild, err = blk.buildChild(t.Context()) require.NoError(err) require.Equal(builtBlk, gotChild.(*preForkBlock).Block) } @@ -653,7 +653,7 @@ func TestPreForkBlock_NonZeroEpoch(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() firstBlockTime := snowmantest.GenesisTimestamp.Add(time.Second) @@ -711,6 +711,6 @@ func TestPreForkBlock_NonZeroEpoch(t *testing.T) { } // Verify that the child block is rejected due to non-zero epoch - err = postForkChild.Verify(context.Background()) + err = postForkChild.Verify(t.Context()) require.ErrorIs(err, errEpochNotZero) } diff --git a/vms/proposervm/proposer/windower_test.go b/vms/proposervm/proposer/windower_test.go index c24100dc38ed..81ba55ca4d56 100644 --- a/vms/proposervm/proposer/windower_test.go +++ b/vms/proposervm/proposer/windower_test.go @@ -56,15 +56,15 @@ func TestWindowerNoValidators(t *testing.T) { nodeID = ids.GenerateTestNodeID() slot uint64 = 1 ) - delay, err := w.Delay(context.Background(), chainHeight, pChainHeight, nodeID, MaxVerifyWindows) + delay, err := w.Delay(t.Context(), chainHeight, pChainHeight, nodeID, MaxVerifyWindows) require.NoError(err) require.Zero(delay) - proposer, err := w.ExpectedProposer(context.Background(), chainHeight, pChainHeight, slot) + proposer, err := w.ExpectedProposer(t.Context(), chainHeight, pChainHeight, slot) require.ErrorIs(err, ErrAnyoneCanPropose) require.Equal(ids.EmptyNodeID, proposer) - delay, err = w.MinDelayForProposer(context.Background(), chainHeight, pChainHeight, nodeID, slot) + delay, err = w.MinDelayForProposer(t.Context(), chainHeight, pChainHeight, nodeID, slot) require.ErrorIs(err, ErrAnyoneCanPropose) require.Zero(delay) }) @@ -93,11 +93,11 @@ func TestWindowerRepeatedValidator(t *testing.T) { w := New(vdrState, subnetID, randomChainID) - validatorDelay, err := w.Delay(context.Background(), 1, 0, validatorID, MaxVerifyWindows) + validatorDelay, err := w.Delay(t.Context(), 1, 0, validatorID, MaxVerifyWindows) require.NoError(err) require.Zero(validatorDelay) - nonValidatorDelay, err := w.Delay(context.Background(), 1, 0, nonValidatorID, MaxVerifyWindows) + nonValidatorDelay, err := w.Delay(t.Context(), 1, 0, nonValidatorID, MaxVerifyWindows) require.NoError(err) require.Equal(MaxVerifyDelay, nonValidatorDelay) } @@ -118,7 +118,7 @@ func TestDelayChangeByHeight(t *testing.T) { } for i, expectedDelay := range expectedDelays1 { vdrID := validatorIDs[i] - validatorDelay, err := w.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) + validatorDelay, err := w.Delay(t.Context(), 1, 0, vdrID, MaxVerifyWindows) require.NoError(err) require.Equal(expectedDelay, validatorDelay) } @@ -133,7 +133,7 @@ func TestDelayChangeByHeight(t *testing.T) { } for i, expectedDelay := range expectedDelays2 { vdrID := validatorIDs[i] - validatorDelay, err := w.Delay(context.Background(), 2, 0, vdrID, MaxVerifyWindows) + validatorDelay, err := w.Delay(t.Context(), 2, 0, vdrID, MaxVerifyWindows) require.NoError(err) require.Equal(expectedDelay, validatorDelay) } @@ -167,7 +167,7 @@ func TestDelayChangeByChain(t *testing.T) { } for i, expectedDelay := range expectedDelays0 { vdrID := validatorIDs[i] - validatorDelay, err := w0.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) + validatorDelay, err := w0.Delay(t.Context(), 1, 0, vdrID, MaxVerifyWindows) require.NoError(err) require.Equal(expectedDelay, validatorDelay) } @@ -182,7 +182,7 @@ func TestDelayChangeByChain(t *testing.T) { } for i, expectedDelay := range expectedDelays1 { vdrID := validatorIDs[i] - validatorDelay, err := w1.Delay(context.Background(), 1, 0, vdrID, MaxVerifyWindows) + validatorDelay, err := w1.Delay(t.Context(), 1, 0, vdrID, MaxVerifyWindows) require.NoError(err) require.Equal(expectedDelay, validatorDelay) } @@ -195,7 +195,7 @@ func TestExpectedProposerChangeByHeight(t *testing.T) { w := New(vdrState, subnetID, fixedChainID) var ( - dummyCtx = context.Background() + dummyCtx = t.Context() pChainHeight uint64 = 0 slot uint64 = 0 ) @@ -229,7 +229,7 @@ func TestExpectedProposerChangeByChain(t *testing.T) { validatorIDs, vdrState := makeValidators(t, 10) var ( - dummyCtx = context.Background() + dummyCtx = t.Context() chainHeight uint64 = 1 pChainHeight uint64 = 0 slot uint64 = 0 @@ -255,7 +255,7 @@ func TestExpectedProposerChangeBySlot(t *testing.T) { w := New(vdrState, subnetID, fixedChainID) var ( - dummyCtx = context.Background() + dummyCtx = t.Context() chainHeight uint64 = 1 pChainHeight uint64 = 0 ) @@ -308,7 +308,7 @@ func TestCoherenceOfExpectedProposerAndMinDelayForProposer(t *testing.T) { w := New(vdrState, subnetID, fixedChainID) var ( - dummyCtx = context.Background() + dummyCtx = t.Context() chainHeight uint64 = 1 pChainHeight uint64 = 0 ) @@ -332,7 +332,7 @@ func TestMinDelayForProposer(t *testing.T) { w := New(vdrState, subnetID, fixedChainID) var ( - dummyCtx = context.Background() + dummyCtx = t.Context() chainHeight uint64 = 1 pChainHeight uint64 = 0 slot uint64 = 0 @@ -366,7 +366,7 @@ func BenchmarkMinDelayForProposer(b *testing.B) { w := New(vdrState, subnetID, fixedChainID) var ( - dummyCtx = context.Background() + dummyCtx = b.Context() pChainHeight uint64 = 0 chainHeight uint64 = 1 nodeID = ids.GenerateTestNodeID() // Ensure to exhaust the search @@ -424,7 +424,7 @@ func TestProposerDistribution(t *testing.T) { w := New(vdrState, subnetID, fixedChainID) var ( - dummyCtx = context.Background() + dummyCtx = t.Context() pChainHeight uint64 = 0 numChainHeights uint64 = 100 numSlots uint64 = 100 diff --git a/vms/proposervm/service_test.go b/vms/proposervm/service_test.go index 1952add06e4f..9f2ab3edc351 100644 --- a/vms/proposervm/service_test.go +++ b/vms/proposervm/service_test.go @@ -4,7 +4,6 @@ package proposervm import ( - "context" "net/http" "net/http/httptest" "net/url" @@ -26,11 +25,11 @@ func TestConnectRPCService_GetProposedHeight(t *testing.T) { const pChainHeight = 123 _, _, vm, _ := initTestProposerVM(t, upgradetest.Latest, pChainHeight) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() // Test through the exported NewHTTPHandler API - handler, err := vm.NewHTTPHandler(context.Background()) + handler, err := vm.NewHTTPHandler(t.Context()) require.NoError(err) require.NotNil(handler) @@ -49,7 +48,7 @@ func TestConnectRPCService_GetProposedHeight(t *testing.T) { // Test the GetProposedHeight endpoint req := connect.NewRequest(&proposervm.GetProposedHeightRequest{}) - resp, err := client.GetProposedHeight(context.Background(), req) + resp, err := client.GetProposedHeight(t.Context(), req) require.NoError(err) require.NotNil(resp) require.NotNil(resp.Msg) @@ -64,7 +63,7 @@ func TestJSONRPCService_GetProposedHeight(t *testing.T) { const pChainHeight = 123 _, _, vm, _ := initTestProposerVM(t, upgradetest.Latest, pChainHeight) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() s := &jsonrpcService{vm: vm} diff --git a/vms/proposervm/state_syncable_vm_test.go b/vms/proposervm/state_syncable_vm_test.go index e252226dea7b..425b7ae8e71f 100644 --- a/vms/proposervm/state_syncable_vm_test.go +++ b/vms/proposervm/state_syncable_vm_test.go @@ -77,7 +77,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { ctx.NodeID = ids.NodeIDFromCert(pTestCert) require.NoError(vm.Initialize( - context.Background(), + t.Context(), ctx, prefixdb.New([]byte{}, memdb.New()), snowmantest.GenesisBytes, @@ -86,7 +86,7 @@ func helperBuildStateSyncTestObjects(t *testing.T) (*fullVM, *VM) { nil, nil, )) - require.NoError(vm.SetState(context.Background(), snow.StateSyncing)) + require.NoError(vm.SetState(t.Context(), snow.StateSyncing)) return innerVM, vm } @@ -96,14 +96,14 @@ func TestStateSyncEnabled(t *testing.T) { innerVM, vm := helperBuildStateSyncTestObjects(t) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() // ProposerVM State Sync disabled if innerVM State sync is disabled innerVM.StateSyncEnabledF = func(context.Context) (bool, error) { return false, nil } - enabled, err := vm.StateSyncEnabled(context.Background()) + enabled, err := vm.StateSyncEnabled(t.Context()) require.NoError(err) require.False(enabled) @@ -111,7 +111,7 @@ func TestStateSyncEnabled(t *testing.T) { innerVM.StateSyncEnabledF = func(context.Context) (bool, error) { return true, nil } - enabled, err = vm.StateSyncEnabled(context.Background()) + enabled, err = vm.StateSyncEnabled(t.Context()) require.NoError(err) require.True(enabled) } @@ -121,7 +121,7 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { innerVM, vm := helperBuildStateSyncTestObjects(t) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() innerSummary := &blocktest.StateSummary{ @@ -134,7 +134,7 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { innerVM.GetOngoingSyncStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound } - summary, err := vm.GetOngoingSyncStateSummary(context.Background()) + summary, err := vm.GetOngoingSyncStateSummary(t.Context()) require.ErrorIs(err, database.ErrNotFound) require.Nil(summary) @@ -144,7 +144,7 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { } _, err = vm.GetForkHeight() require.ErrorIs(err, database.ErrNotFound) - summary, err = vm.GetOngoingSyncStateSummary(context.Background()) + summary, err = vm.GetOngoingSyncStateSummary(t.Context()) require.NoError(err) require.Equal(innerSummary.ID(), summary.ID()) require.Equal(innerSummary.Height(), summary.Height()) @@ -155,7 +155,7 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { return innerSummary, nil } require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) - summary, err = vm.GetOngoingSyncStateSummary(context.Background()) + summary, err = vm.GetOngoingSyncStateSummary(t.Context()) require.NoError(err) require.Equal(innerSummary.ID(), summary.ID()) require.Equal(innerSummary.Height(), summary.Height()) @@ -195,7 +195,7 @@ func TestStateSyncGetOngoingSyncStateSummary(t *testing.T) { } require.NoError(vm.acceptPostForkBlock(proBlk)) - summary, err = vm.GetOngoingSyncStateSummary(context.Background()) + summary, err = vm.GetOngoingSyncStateSummary(t.Context()) require.NoError(err) require.Equal(innerSummary.Height(), summary.Height()) } @@ -205,7 +205,7 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { innerVM, vm := helperBuildStateSyncTestObjects(t) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() innerSummary := &blocktest.StateSummary{ @@ -218,7 +218,7 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { innerVM.GetLastStateSummaryF = func(context.Context) (block.StateSummary, error) { return nil, database.ErrNotFound } - summary, err := vm.GetLastStateSummary(context.Background()) + summary, err := vm.GetLastStateSummary(t.Context()) require.ErrorIs(err, database.ErrNotFound) require.Nil(summary) @@ -228,7 +228,7 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { } _, err = vm.GetForkHeight() require.ErrorIs(err, database.ErrNotFound) - summary, err = vm.GetLastStateSummary(context.Background()) + summary, err = vm.GetLastStateSummary(t.Context()) require.NoError(err) require.Equal(innerSummary.ID(), summary.ID()) require.Equal(innerSummary.Height(), summary.Height()) @@ -239,7 +239,7 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { return innerSummary, nil } require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) - summary, err = vm.GetLastStateSummary(context.Background()) + summary, err = vm.GetLastStateSummary(t.Context()) require.NoError(err) require.Equal(innerSummary.ID(), summary.ID()) require.Equal(innerSummary.Height(), summary.Height()) @@ -279,7 +279,7 @@ func TestStateSyncGetLastStateSummary(t *testing.T) { } require.NoError(vm.acceptPostForkBlock(proBlk)) - summary, err = vm.GetLastStateSummary(context.Background()) + summary, err = vm.GetLastStateSummary(t.Context()) require.NoError(err) require.Equal(innerSummary.Height(), summary.Height()) } @@ -289,7 +289,7 @@ func TestStateSyncGetStateSummary(t *testing.T) { innerVM, vm := helperBuildStateSyncTestObjects(t) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() reqHeight := uint64(1969) @@ -303,7 +303,7 @@ func TestStateSyncGetStateSummary(t *testing.T) { innerVM.GetStateSummaryF = func(context.Context, uint64) (block.StateSummary, error) { return nil, database.ErrNotFound } - summary, err := vm.GetStateSummary(context.Background(), reqHeight) + summary, err := vm.GetStateSummary(t.Context(), reqHeight) require.ErrorIs(err, database.ErrNotFound) require.Nil(summary) @@ -314,7 +314,7 @@ func TestStateSyncGetStateSummary(t *testing.T) { } _, err = vm.GetForkHeight() require.ErrorIs(err, database.ErrNotFound) - summary, err = vm.GetStateSummary(context.Background(), reqHeight) + summary, err = vm.GetStateSummary(t.Context(), reqHeight) require.NoError(err) require.Equal(innerSummary.ID(), summary.ID()) require.Equal(innerSummary.Height(), summary.Height()) @@ -326,7 +326,7 @@ func TestStateSyncGetStateSummary(t *testing.T) { return innerSummary, nil } require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) - summary, err = vm.GetStateSummary(context.Background(), reqHeight) + summary, err = vm.GetStateSummary(t.Context(), reqHeight) require.NoError(err) require.Equal(innerSummary.ID(), summary.ID()) require.Equal(innerSummary.Height(), summary.Height()) @@ -366,7 +366,7 @@ func TestStateSyncGetStateSummary(t *testing.T) { } require.NoError(vm.acceptPostForkBlock(proBlk)) - summary, err = vm.GetStateSummary(context.Background(), reqHeight) + summary, err = vm.GetStateSummary(t.Context(), reqHeight) require.NoError(err) require.Equal(innerSummary.Height(), summary.Height()) } @@ -375,7 +375,7 @@ func TestParseStateSummary(t *testing.T) { require := require.New(t) innerVM, vm := helperBuildStateSyncTestObjects(t) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() reqHeight := uint64(1969) @@ -395,10 +395,10 @@ func TestParseStateSummary(t *testing.T) { // Get a pre fork block than parse it require.NoError(vm.SetForkHeight(innerSummary.Height() + 1)) - summary, err := vm.GetStateSummary(context.Background(), reqHeight) + summary, err := vm.GetStateSummary(t.Context(), reqHeight) require.NoError(err) - parsedSummary, err := vm.ParseStateSummary(context.Background(), summary.Bytes()) + parsedSummary, err := vm.ParseStateSummary(t.Context(), summary.Bytes()) require.NoError(err) require.Equal(summary.ID(), parsedSummary.ID()) require.Equal(summary.Height(), parsedSummary.Height()) @@ -438,10 +438,10 @@ func TestParseStateSummary(t *testing.T) { } require.NoError(vm.acceptPostForkBlock(proBlk)) require.NoError(vm.SetForkHeight(innerSummary.Height() - 1)) - summary, err = vm.GetStateSummary(context.Background(), reqHeight) + summary, err = vm.GetStateSummary(t.Context(), reqHeight) require.NoError(err) - parsedSummary, err = vm.ParseStateSummary(context.Background(), summary.Bytes()) + parsedSummary, err = vm.ParseStateSummary(t.Context(), summary.Bytes()) require.NoError(err) require.Equal(summary.ID(), parsedSummary.ID()) require.Equal(summary.Height(), parsedSummary.Height()) @@ -453,7 +453,7 @@ func TestStateSummaryAccept(t *testing.T) { innerVM, vm := helperBuildStateSyncTestObjects(t) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() reqHeight := uint64(1969) @@ -496,14 +496,14 @@ func TestStateSummaryAccept(t *testing.T) { return innerBlk, nil } - summary, err := vm.ParseStateSummary(context.Background(), statelessSummary.Bytes()) + summary, err := vm.ParseStateSummary(t.Context(), statelessSummary.Bytes()) require.NoError(err) // test Accept accepted innerSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { return block.StateSyncStatic, nil } - status, err := summary.Accept(context.Background()) + status, err := summary.Accept(t.Context()) require.NoError(err) require.Equal(block.StateSyncStatic, status) @@ -511,7 +511,7 @@ func TestStateSummaryAccept(t *testing.T) { innerSummary.AcceptF = func(context.Context) (block.StateSyncMode, error) { return block.StateSyncSkipped, nil } - status, err = summary.Accept(context.Background()) + status, err = summary.Accept(t.Context()) require.NoError(err) require.Equal(block.StateSyncSkipped, status) } @@ -521,7 +521,7 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { innerVM, vm := helperBuildStateSyncTestObjects(t) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() reqHeight := uint64(1969) @@ -572,7 +572,7 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { } require.NoError(vm.acceptPostForkBlock(proBlk)) - summary, err := vm.GetStateSummary(context.Background(), reqHeight) + summary, err := vm.GetStateSummary(t.Context(), reqHeight) require.NoError(err) require.Equal(summary.Height(), reqHeight) @@ -588,14 +588,14 @@ func TestStateSummaryAcceptOlderBlock(t *testing.T) { calledInnerAccept = true return block.StateSyncStatic, nil } - status, err := summary.Accept(context.Background()) + status, err := summary.Accept(t.Context()) require.NoError(err) require.Equal(block.StateSyncStatic, status) require.True(calledInnerAccept) - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(t.Context(), snow.Bootstrapping)) require.Equal(summary.Height(), vm.lastAcceptedHeight) - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted(t.Context()) require.NoError(err) require.Equal(proBlk.ID(), lastAcceptedID) } @@ -610,7 +610,7 @@ func TestStateSummaryAcceptOlderBlockSkipStateSync(t *testing.T) { innerVM, vm := helperBuildStateSyncTestObjects(t) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() // store post fork block associated with summary @@ -714,7 +714,7 @@ func TestStateSummaryAcceptOlderBlockSkipStateSync(t *testing.T) { } require.NoError(vm.acceptPostForkBlock(proBlk2)) - summary, err := vm.GetStateSummary(context.Background(), innerBlk1.Height()) + summary, err := vm.GetStateSummary(t.Context(), innerBlk1.Height()) require.NoError(err) require.Equal(innerBlk1.Height(), summary.Height()) @@ -723,14 +723,14 @@ func TestStateSummaryAcceptOlderBlockSkipStateSync(t *testing.T) { // notify the innerVM. // This can result in the ProposerVM and innerVM diverging their last accepted block. // These are re-aligned in SetState before transitioning to consensus. - status, err := summary.Accept(context.Background()) + status, err := summary.Accept(t.Context()) require.NoError(err) require.Equal(block.StateSyncSkipped, status) require.True(calledInnerAccept) - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(t.Context(), snow.Bootstrapping)) require.Equal(innerBlk2.Height(), vm.lastAcceptedHeight) - lastAcceptedID, err := vm.LastAccepted(context.Background()) + lastAcceptedID, err := vm.LastAccepted(t.Context()) require.NoError(err) require.Equal(proBlk2.ID(), lastAcceptedID) } diff --git a/vms/proposervm/vm_byzantine_test.go b/vms/proposervm/vm_byzantine_test.go index 97218123f3c0..5de330855636 100644 --- a/vms/proposervm/vm_byzantine_test.go +++ b/vms/proposervm/vm_byzantine_test.go @@ -34,7 +34,7 @@ func TestInvalidByzantineProposerParent(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() xBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -42,13 +42,13 @@ func TestInvalidByzantineProposerParent(t *testing.T) { return xBlock, nil } - aBlock, err := proVM.BuildBlock(context.Background()) + aBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreVM.BuildBlockF = nil - require.NoError(aBlock.Verify(context.Background())) - require.NoError(aBlock.Accept(context.Background())) + require.NoError(aBlock.Verify(t.Context())) + require.NoError(aBlock.Accept(t.Context())) yBlock := snowmantest.BuildChild(xBlock) coreVM.ParseBlockF = func(_ context.Context, blockBytes []byte) (snowman.Block, error) { @@ -58,14 +58,14 @@ func TestInvalidByzantineProposerParent(t *testing.T) { return yBlock, nil } - parsedBlock, err := proVM.ParseBlock(context.Background(), yBlock.Bytes()) + parsedBlock, err := proVM.ParseBlock(t.Context(), yBlock.Bytes()) if err != nil { // If there was an error parsing, then this is fine. return } // If there wasn't an error parsing - verify must return an error - err = parsedBlock.Verify(context.Background()) + err = parsedBlock.Verify(t.Context()) require.ErrorIs(err, errUnknownBlock) } @@ -84,7 +84,7 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) proVM.Set(snowmantest.GenesisTimestamp) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() xTestBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -128,24 +128,24 @@ func TestInvalidByzantineProposerOracleParent(t *testing.T) { } } - aBlockIntf, err := proVM.BuildBlock(context.Background()) + aBlockIntf, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, aBlockIntf) aBlock := aBlockIntf.(*postForkBlock) - opts, err := aBlock.Options(context.Background()) + opts, err := aBlock.Options(t.Context()) require.NoError(err) - require.NoError(aBlock.Verify(context.Background())) - require.NoError(opts[0].Verify(context.Background())) - require.NoError(opts[1].Verify(context.Background())) + require.NoError(aBlock.Verify(t.Context())) + require.NoError(opts[0].Verify(t.Context())) + require.NoError(opts[1].Verify(t.Context())) - wrappedXBlock, err := proVM.ParseBlock(context.Background(), xBlock.Bytes()) + wrappedXBlock, err := proVM.ParseBlock(t.Context(), xBlock.Bytes()) require.NoError(err) // This should never be invoked by the consensus engine. However, it is // enforced to fail verification as a failsafe. - err = wrappedXBlock.Verify(context.Background()) + err = wrappedXBlock.Verify(t.Context()) require.ErrorIs(err, errUnexpectedBlockType) } @@ -163,7 +163,7 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() xBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -197,18 +197,18 @@ func TestInvalidByzantineProposerPreForkParent(t *testing.T) { } } - aBlock, err := proVM.BuildBlock(context.Background()) + aBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreVM.BuildBlockF = nil - require.NoError(aBlock.Verify(context.Background())) + require.NoError(aBlock.Verify(t.Context())) - wrappedXBlock, err := proVM.ParseBlock(context.Background(), xBlock.Bytes()) + wrappedXBlock, err := proVM.ParseBlock(t.Context(), xBlock.Bytes()) require.NoError(err) // This should never be invoked by the consensus engine. However, it is // enforced to fail verification as a failsafe. - err = wrappedXBlock.Verify(context.Background()) + err = wrappedXBlock.Verify(t.Context()) require.ErrorIs(err, errUnexpectedBlockType) } @@ -227,7 +227,7 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) proVM.Set(snowmantest.GenesisTimestamp) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() xBlock := &TestOptionsBlock{ @@ -270,18 +270,18 @@ func TestBlockVerify_PostForkOption_FaultyParent(t *testing.T) { } } - aBlockIntf, err := proVM.BuildBlock(context.Background()) + aBlockIntf, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, aBlockIntf) aBlock := aBlockIntf.(*postForkBlock) - opts, err := aBlock.Options(context.Background()) + opts, err := aBlock.Options(t.Context()) require.NoError(err) - require.NoError(aBlock.Verify(context.Background())) - err = opts[0].Verify(context.Background()) + require.NoError(aBlock.Verify(t.Context())) + err = opts[0].Verify(t.Context()) require.ErrorIs(err, errInnerParentMismatch) - err = opts[1].Verify(context.Background()) + err = opts[1].Verify(t.Context()) require.ErrorIs(err, errInnerParentMismatch) } @@ -302,7 +302,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) proVM.Set(snowmantest.GenesisTimestamp) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create an Oracle pre-fork block X @@ -315,7 +315,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - xInnerOptions, err := xBlock.Options(context.Background()) + xInnerOptions, err := xBlock.Options(t.Context()) require.NoError(err) xInnerOption := xInnerOptions[0] @@ -339,7 +339,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - require.NoError(bBlock.Verify(context.Background())) + require.NoError(bBlock.Verify(t.Context())) // generate O1 statelessOuterOption, err := block.BuildOption( @@ -356,17 +356,17 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - err = outerOption.Verify(context.Background()) + err = outerOption.Verify(t.Context()) require.ErrorIs(err, errUnexpectedBlockType) // generate A from X and O2 coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock(context.Background()) + aBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreVM.BuildBlockF = nil - require.NoError(aBlock.Verify(context.Background())) + require.NoError(aBlock.Verify(t.Context())) statelessOuterOption, err = block.BuildOption( aBlock.ID(), @@ -382,7 +382,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - require.NoError(outerOption.Verify(context.Background())) + require.NoError(outerOption.Verify(t.Context())) // create an Oracle pre-fork block Z // create post-fork block B from Y @@ -398,10 +398,10 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return zBlock, nil } - cBlock, err := proVM.BuildBlock(context.Background()) + cBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreVM.BuildBlockF = nil - require.NoError(cBlock.Verify(context.Background())) + require.NoError(cBlock.Verify(t.Context())) // generate O3 statelessOuterOption, err = block.BuildOption( @@ -418,7 +418,7 @@ func TestBlockVerify_InvalidPostForkOption(t *testing.T) { }, } - err = outerOption.Verify(context.Background()) + err = outerOption.Verify(t.Context()) require.ErrorIs(err, errInnerParentMismatch) } @@ -427,7 +427,7 @@ func TestGetBlock_MutatedSignature(t *testing.T) { coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // Make sure that we will be sampled to perform the proposals. @@ -475,12 +475,12 @@ func TestGetBlock_MutatedSignature(t *testing.T) { return coreBlk0, nil } - builtBlk0, err := proVM.BuildBlock(context.Background()) + builtBlk0, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(builtBlk0.Verify(context.Background())) + require.NoError(builtBlk0.Verify(t.Context())) - require.NoError(proVM.SetPreference(context.Background(), builtBlk0.ID())) + require.NoError(proVM.SetPreference(t.Context(), builtBlk0.ID())) // The second proposal block will need to be signed because the timestamp // hasn't moved forward @@ -493,13 +493,13 @@ func TestGetBlock_MutatedSignature(t *testing.T) { invalidBlkBytes, err := hex.DecodeString(invalidBlkBytesHex) require.NoError(err) - invalidBlk, err := proVM.ParseBlock(context.Background(), invalidBlkBytes) + invalidBlk, err := proVM.ParseBlock(t.Context(), invalidBlkBytes) if err != nil { // Not being able to parse an invalid block is fine. t.Skip(err) } - err = invalidBlk.Verify(context.Background()) + err = invalidBlk.Verify(t.Context()) require.ErrorIs(err, database.ErrNotFound) // Note that the invalidBlk.ID() is the same as the correct blk ID because @@ -511,12 +511,12 @@ func TestGetBlock_MutatedSignature(t *testing.T) { // GetBlock shouldn't really be able to succeed, as we don't have a valid // representation of [blkID] proVM.innerBlkCache.Flush() // So we don't get from the cache - fetchedBlk, err := proVM.GetBlock(context.Background(), blkID) + fetchedBlk, err := proVM.GetBlock(t.Context(), blkID) if err != nil { t.Skip(err) } // GetBlock returned, so it must have somehow gotten a valid representation // of [blkID]. - require.NoError(fetchedBlk.Verify(context.Background())) + require.NoError(fetchedBlk.Verify(t.Context())) } diff --git a/vms/proposervm/vm_test.go b/vms/proposervm/vm_test.go index 2dbcfb88ecfd..12f55286b987 100644 --- a/vms/proposervm/vm_test.go +++ b/vms/proposervm/vm_test.go @@ -201,7 +201,7 @@ func initTestProposerVM( db := prefixdb.New([]byte{0}, memdb.New()) require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, db, initialState, @@ -214,8 +214,8 @@ func initTestProposerVM( // Initialize shouldn't be called again coreVM.InitializeF = nil - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) + require.NoError(proVM.SetPreference(t.Context(), snowmantest.GenesisID)) proVM.Set(snowmantest.GenesisTimestamp) @@ -269,7 +269,7 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { // given the same core block, BuildBlock returns the same proposer block coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() skewedTimestamp := time.Now().Truncate(time.Second).Add(time.Millisecond) @@ -281,7 +281,7 @@ func TestBuildBlockTimestampAreRoundedToSeconds(t *testing.T) { } // test - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.Equal(builtBlk.Timestamp().Truncate(time.Second), builtBlk.Timestamp()) @@ -293,7 +293,7 @@ func TestBuildBlockIsIdempotent(t *testing.T) { // given the same core block, BuildBlock returns the same proposer block coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -304,10 +304,10 @@ func TestBuildBlockIsIdempotent(t *testing.T) { // Mock the clock time to make sure that block timestamps will be equal proVM.Clock.Set(time.Now()) - builtBlk1, err := proVM.BuildBlock(context.Background()) + builtBlk1, err := proVM.BuildBlock(t.Context()) require.NoError(err) - builtBlk2, err := proVM.BuildBlock(context.Background()) + builtBlk2, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.Equal(builtBlk1.Bytes(), builtBlk2.Bytes()) @@ -319,7 +319,7 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { // setup coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -328,7 +328,7 @@ func TestFirstProposerBlockIsBuiltOnTopOfGenesis(t *testing.T) { } // test - snowBlock, err := proVM.BuildBlock(context.Background()) + snowBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) // checks @@ -344,7 +344,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // add two proBlks... @@ -352,17 +352,17 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - proBlk1, err := proVM.BuildBlock(context.Background()) + proBlk1, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreBlk2 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } - proBlk2, err := proVM.BuildBlock(context.Background()) + proBlk2, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.NotEqual(proBlk2.ID(), proBlk1.ID()) - require.NoError(proBlk2.Verify(context.Background())) + require.NoError(proBlk2.Verify(t.Context())) // ...and set one as preferred var prefcoreBlk *snowmantest.Block @@ -391,7 +391,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { } } - require.NoError(proVM.SetPreference(context.Background(), proBlk2.ID())) + require.NoError(proVM.SetPreference(t.Context(), proBlk2.ID())) require.NoError(proVM.waitForProposerWindow()) // build block... @@ -400,7 +400,7 @@ func TestProposerBlocksAreBuiltOnPreferredProBlock(t *testing.T) { return coreBlk3, nil } - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) // ...show that parent is the preferred one @@ -412,25 +412,25 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk1 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - proBlk1, err := proVM.BuildBlock(context.Background()) + proBlk1, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreBlk2 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk2, nil } - proBlk2, err := proVM.BuildBlock(context.Background()) + proBlk2, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.NotEqual(proBlk1.ID(), proBlk2.ID()) - require.NoError(proBlk2.Verify(context.Background())) + require.NoError(proBlk2.Verify(t.Context())) // ...and set one as preferred var wronglyPreferredcoreBlk *snowmantest.Block @@ -459,7 +459,7 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { } } - require.NoError(proVM.SetPreference(context.Background(), proBlk2.ID())) + require.NoError(proVM.SetPreference(t.Context(), proBlk2.ID())) require.NoError(proVM.waitForProposerWindow()) // build block... @@ -468,10 +468,10 @@ func TestCoreBlocksMustBeBuiltOnPreferredCoreBlock(t *testing.T) { return coreBlk3, nil } - blk, err := proVM.BuildBlock(context.Background()) + blk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - err = blk.Verify(context.Background()) + err = blk.Verify(t.Context()) require.ErrorIs(err, errInnerParentMismatch) } @@ -481,7 +481,7 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreVM.ParseBlockF = func(context.Context, []byte) (snowman.Block, error) { @@ -509,7 +509,7 @@ func TestCoreBlockFailureCauseProposerBlockParseFailure(t *testing.T) { } // test - _, err = proVM.ParseBlock(context.Background(), proBlk.Bytes()) + _, err = proVM.ParseBlock(t.Context(), proBlk.Bytes()) require.ErrorIs(err, errMarshallingFailed) } @@ -518,7 +518,7 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create two Proposer blocks at the same height @@ -571,9 +571,9 @@ func TestTwoProBlocksWrappingSameCoreBlockCanBeParsed(t *testing.T) { require.NotEqual(proBlk1.ID(), proBlk2.ID()) // Show that both can be parsed and retrieved - parsedBlk1, err := proVM.ParseBlock(context.Background(), proBlk1.Bytes()) + parsedBlk1, err := proVM.ParseBlock(t.Context(), proBlk1.Bytes()) require.NoError(err) - parsedBlk2, err := proVM.ParseBlock(context.Background(), proBlk2.Bytes()) + parsedBlk2, err := proVM.ParseBlock(t.Context(), proBlk2.Bytes()) require.NoError(err) require.Equal(proBlk1.ID(), parsedBlk1.ID()) @@ -586,7 +586,7 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // one block is built from this proVM @@ -595,9 +595,9 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { return localcoreBlk, nil } - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(builtBlk.Verify(context.Background())) + require.NoError(builtBlk.Verify(t.Context())) // another block with same parent comes from network and is parsed netcoreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -615,7 +615,7 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { } } - pChainHeight, err := proVM.ctx.ValidatorState.GetCurrentHeight(context.Background()) + pChainHeight, err := proVM.ctx.ValidatorState.GetCurrentHeight(t.Context()) require.NoError(err) netSlb, err := statelessblock.BuildUnsigned( @@ -635,7 +635,7 @@ func TestTwoProBlocksWithSameParentCanBothVerify(t *testing.T) { } // prove that also block from network verifies - require.NoError(netProBlk.Verify(context.Background())) + require.NoError(netProBlk.Verify(t.Context())) } // Pre Fork tests section @@ -644,14 +644,14 @@ func TestPreFork_Initialize(t *testing.T) { _, _, proVM, _ := initTestProposerVM(t, upgradetest.NoUpgrades, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // checks - blkID, err := proVM.LastAccepted(context.Background()) + blkID, err := proVM.LastAccepted(t.Context()) require.NoError(err) - rtvdBlk, err := proVM.GetBlock(context.Background(), blkID) + rtvdBlk, err := proVM.GetBlock(t.Context(), blkID) require.NoError(err) require.IsType(&preForkBlock{}, rtvdBlk) @@ -663,7 +663,7 @@ func TestPreFork_BuildBlock(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.NoUpgrades, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -672,7 +672,7 @@ func TestPreFork_BuildBlock(t *testing.T) { } // test - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&preForkBlock{}, builtBlk) require.Equal(coreBlk.ID(), builtBlk.ID()) @@ -682,7 +682,7 @@ func TestPreFork_BuildBlock(t *testing.T) { coreVM.GetBlockF = func(context.Context, ids.ID) (snowman.Block, error) { return coreBlk, nil } - storedBlk, err := proVM.GetBlock(context.Background(), builtBlk.ID()) + storedBlk, err := proVM.GetBlock(t.Context(), builtBlk.ID()) require.NoError(err) require.Equal(builtBlk.ID(), storedBlk.ID()) } @@ -692,7 +692,7 @@ func TestPreFork_ParseBlock(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.NoUpgrades, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -701,7 +701,7 @@ func TestPreFork_ParseBlock(t *testing.T) { return coreBlk, nil } - parsedBlk, err := proVM.ParseBlock(context.Background(), coreBlk.Bytes()) + parsedBlk, err := proVM.ParseBlock(t.Context(), coreBlk.Bytes()) require.NoError(err) require.IsType(&preForkBlock{}, parsedBlk) require.Equal(coreBlk.ID(), parsedBlk.ID()) @@ -711,7 +711,7 @@ func TestPreFork_ParseBlock(t *testing.T) { require.Equal(coreBlk.ID(), id) return coreBlk, nil } - storedBlk, err := proVM.GetBlock(context.Background(), parsedBlk.ID()) + storedBlk, err := proVM.GetBlock(t.Context(), parsedBlk.ID()) require.NoError(err) require.Equal(parsedBlk.ID(), storedBlk.ID()) } @@ -721,14 +721,14 @@ func TestPreFork_SetPreference(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.NoUpgrades, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk0 := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk0, nil } - builtBlk, err := proVM.BuildBlock(context.Background()) + builtBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { @@ -751,13 +751,13 @@ func TestPreFork_SetPreference(t *testing.T) { return nil, errUnknownBlock } } - require.NoError(proVM.SetPreference(context.Background(), builtBlk.ID())) + require.NoError(proVM.SetPreference(t.Context(), builtBlk.ID())) coreBlk1 := snowmantest.BuildChild(coreBlk0) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - nextBlk, err := proVM.BuildBlock(context.Background()) + nextBlk, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.Equal(builtBlk.ID(), nextBlk.Parent()) } @@ -845,7 +845,7 @@ func TestPostFork_SetPreference(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, defaultPChainHeight) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() if test.hasSetPreferenceWithContext { @@ -888,7 +888,7 @@ func TestPostFork_SetPreference(t *testing.T) { } proVM.verifiedBlocks[postForkBlk.ID()] = postForkBlk - err := proVM.SetPreference(context.Background(), postForkBlk.ID()) + err := proVM.SetPreference(t.Context(), postForkBlk.ID()) require.ErrorIs(err, test.expectedError) }) } @@ -980,7 +980,7 @@ func TestExpiredBuildBlock(t *testing.T) { // make sure that DBs are compressed correctly require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, memdb.New(), nil, @@ -990,19 +990,19 @@ func TestExpiredBuildBlock(t *testing.T) { nil, )) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // Initialize shouldn't be called again coreVM.InitializeF = nil - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) + require.NoError(proVM.SetPreference(t.Context(), snowmantest.GenesisID)) // Notify the proposer VM of a new block on the inner block side events <- common.PendingTxs // The first notification will be read from the consensus engine - msg, err := proVM.WaitForEvent(context.Background()) + msg, err := proVM.WaitForEvent(t.Context()) require.NoError(err) require.Equal(common.PendingTxs, msg) @@ -1041,11 +1041,11 @@ func TestExpiredBuildBlock(t *testing.T) { proVM.Clock.Set(statelessBlock.Timestamp()) - parsedBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) + parsedBlock, err := proVM.ParseBlock(t.Context(), statelessBlock.Bytes()) require.NoError(err) - require.NoError(parsedBlock.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) + require.NoError(parsedBlock.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parsedBlock.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { require.FailNow(fmt.Errorf("%w: BuildBlock", errUnexpectedCall).Error()) @@ -1054,7 +1054,7 @@ func TestExpiredBuildBlock(t *testing.T) { // Because we are now building on a different block, the proposer window // shouldn't have started. - _, err = proVM.BuildBlock(context.Background()) + _, err = proVM.BuildBlock(t.Context()) require.ErrorIs(err, errProposerWindowNotStarted) } @@ -1083,7 +1083,7 @@ func TestInnerBlockDeduplication(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() coreBlk := snowmantest.BuildChild(snowmantest.Genesis) @@ -1131,12 +1131,12 @@ func TestInnerBlockDeduplication(t *testing.T) { } } - parsedBlock0, err := proVM.ParseBlock(context.Background(), statelessBlock0.Bytes()) + parsedBlock0, err := proVM.ParseBlock(t.Context(), statelessBlock0.Bytes()) require.NoError(err) - require.NoError(parsedBlock0.Verify(context.Background())) + require.NoError(parsedBlock0.Verify(t.Context())) - require.NoError(proVM.SetPreference(context.Background(), parsedBlock0.ID())) + require.NoError(proVM.SetPreference(t.Context(), parsedBlock0.ID())) coreVM.GetBlockF = func(_ context.Context, blkID ids.ID) (snowman.Block, error) { switch blkID { @@ -1159,14 +1159,14 @@ func TestInnerBlockDeduplication(t *testing.T) { } } - parsedBlock1, err := proVM.ParseBlock(context.Background(), statelessBlock1.Bytes()) + parsedBlock1, err := proVM.ParseBlock(t.Context(), statelessBlock1.Bytes()) require.NoError(err) - require.NoError(parsedBlock1.Verify(context.Background())) + require.NoError(parsedBlock1.Verify(t.Context())) - require.NoError(proVM.SetPreference(context.Background(), parsedBlock1.ID())) + require.NoError(proVM.SetPreference(t.Context(), parsedBlock1.ID())) - require.NoError(parsedBlock1.Accept(context.Background())) + require.NoError(parsedBlock1.Accept(t.Context())) } func TestInnerVMRollback(t *testing.T) { @@ -1244,7 +1244,7 @@ func TestInnerVMRollback(t *testing.T) { ) require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, db, nil, @@ -1254,8 +1254,8 @@ func TestInnerVMRollback(t *testing.T) { nil, )) - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) + require.NoError(proVM.SetPreference(t.Context(), snowmantest.GenesisID)) coreBlk := snowmantest.BuildChild(snowmantest.Genesis) statelessBlock, err := statelessblock.BuildUnsigned( @@ -1290,23 +1290,23 @@ func TestInnerVMRollback(t *testing.T) { proVM.Clock.Set(statelessBlock.Timestamp()) - lastAcceptedID, err := proVM.LastAccepted(context.Background()) + lastAcceptedID, err := proVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(snowmantest.GenesisID, lastAcceptedID) - parsedBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) + parsedBlock, err := proVM.ParseBlock(t.Context(), statelessBlock.Bytes()) require.NoError(err) - require.NoError(parsedBlock.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), parsedBlock.ID())) - require.NoError(parsedBlock.Accept(context.Background())) + require.NoError(parsedBlock.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), parsedBlock.ID())) + require.NoError(parsedBlock.Accept(t.Context())) - lastAcceptedID, err = proVM.LastAccepted(context.Background()) + lastAcceptedID, err = proVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(parsedBlock.ID(), lastAcceptedID) // Restart the node and have the inner VM rollback state. - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) coreBlk.Status = snowtest.Undecided proVM = New( @@ -1322,7 +1322,7 @@ func TestInnerVMRollback(t *testing.T) { ) require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, db, nil, @@ -1332,10 +1332,10 @@ func TestInnerVMRollback(t *testing.T) { nil, )) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() - lastAcceptedID, err = proVM.LastAccepted(context.Background()) + lastAcceptedID, err = proVM.LastAccepted(t.Context()) require.NoError(err) require.Equal(snowmantest.GenesisID, lastAcceptedID) } @@ -1345,7 +1345,7 @@ func TestBuildBlockDuringWindow(t *testing.T) { coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { @@ -1395,27 +1395,27 @@ func TestBuildBlockDuringWindow(t *testing.T) { proVM.Clock.Set(statelessBlock0.Timestamp()) - statefulBlock0, err := proVM.ParseBlock(context.Background(), statelessBlock0.Bytes()) + statefulBlock0, err := proVM.ParseBlock(t.Context(), statelessBlock0.Bytes()) require.NoError(err) - require.NoError(statefulBlock0.Verify(context.Background())) + require.NoError(statefulBlock0.Verify(t.Context())) - require.NoError(proVM.SetPreference(context.Background(), statefulBlock0.ID())) + require.NoError(proVM.SetPreference(t.Context(), statefulBlock0.ID())) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return coreBlk1, nil } - statefulBlock1, err := proVM.BuildBlock(context.Background()) + statefulBlock1, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(statefulBlock1.Verify(context.Background())) + require.NoError(statefulBlock1.Verify(t.Context())) - require.NoError(proVM.SetPreference(context.Background(), statefulBlock1.ID())) + require.NoError(proVM.SetPreference(t.Context(), statefulBlock1.ID())) - require.NoError(statefulBlock0.Accept(context.Background())) + require.NoError(statefulBlock0.Accept(t.Context())) - require.NoError(statefulBlock1.Accept(context.Background())) + require.NoError(statefulBlock1.Accept(t.Context())) } // Ensure that Accepting a PostForkBlock (A) containing core block (X) causes @@ -1431,7 +1431,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // create pre-fork block X and post-fork block A @@ -1440,10 +1440,10 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock(context.Background()) + aBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreVM.BuildBlockF = nil - require.NoError(aBlock.Verify(context.Background())) + require.NoError(aBlock.Verify(t.Context())) // use a different way to construct pre-fork block Y and post-fork block B yBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -1465,7 +1465,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { }, } - require.NoError(bBlock.Verify(context.Background())) + require.NoError(bBlock.Verify(t.Context())) // append Z/C to Y/B zBlock := snowmantest.BuildChild(yBlock) @@ -1473,13 +1473,13 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return zBlock, nil } - require.NoError(proVM.SetPreference(context.Background(), bBlock.ID())) + require.NoError(proVM.SetPreference(t.Context(), bBlock.ID())) proVM.Set(proVM.Time().Add(proposer.MaxBuildDelay)) - cBlock, err := proVM.BuildBlock(context.Background()) + cBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreVM.BuildBlockF = nil - require.NoError(cBlock.Verify(context.Background())) + require.NoError(cBlock.Verify(t.Context())) require.Equal(bBlock.Parent(), aBlock.Parent()) require.Equal(yBlock.ID(), zBlock.Parent()) @@ -1488,7 +1488,7 @@ func TestTwoForks_OneIsAccepted(t *testing.T) { require.NotEqual(snowtest.Rejected, yBlock.Status) // accept A - require.NoError(aBlock.Accept(context.Background())) + require.NoError(aBlock.Accept(t.Context())) require.Equal(snowtest.Accepted, xBlock.Status) require.Equal(snowtest.Rejected, yBlock.Status) @@ -1500,7 +1500,7 @@ func TestTooFarAdvanced(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() xBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -1509,9 +1509,9 @@ func TestTooFarAdvanced(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock(context.Background()) + aBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(aBlock.Verify(context.Background())) + require.NoError(aBlock.Verify(t.Context())) ySlb, err := statelessblock.BuildUnsigned( aBlock.ID(), @@ -1530,7 +1530,7 @@ func TestTooFarAdvanced(t *testing.T) { }, } - err = bBlock.Verify(context.Background()) + err = bBlock.Verify(t.Context()) require.ErrorIs(err, errProposerWindowNotStarted) ySlb, err = statelessblock.BuildUnsigned( @@ -1551,7 +1551,7 @@ func TestTooFarAdvanced(t *testing.T) { }, } - err = bBlock.Verify(context.Background()) + err = bBlock.Verify(t.Context()) require.ErrorIs(err, errTimeTooAdvanced) } @@ -1571,7 +1571,7 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() xTestBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -1586,23 +1586,23 @@ func TestTwoOptions_OneIsAccepted(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } - aBlockIntf, err := proVM.BuildBlock(context.Background()) + aBlockIntf, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, aBlockIntf) aBlock := aBlockIntf.(*postForkBlock) - opts, err := aBlock.Options(context.Background()) + opts, err := aBlock.Options(t.Context()) require.NoError(err) bBlock := opts[0] cBlock := opts[1] - require.NoError(aBlock.Verify(context.Background())) - require.NoError(bBlock.Verify(context.Background())) - require.NoError(cBlock.Verify(context.Background())) + require.NoError(aBlock.Verify(t.Context())) + require.NoError(bBlock.Verify(t.Context())) + require.NoError(cBlock.Verify(t.Context())) - require.NoError(aBlock.Accept(context.Background())) - require.NoError(bBlock.Accept(context.Background())) + require.NoError(aBlock.Accept(t.Context())) + require.NoError(bBlock.Accept(t.Context())) // the other pre-fork option should be rejected require.Equal(snowtest.Rejected, xBlock.opts[1].Status) @@ -1615,14 +1615,14 @@ func TestLaggedPChainHeight(t *testing.T) { coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() innerBlock := snowmantest.BuildChild(snowmantest.Genesis) coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return innerBlock, nil } - blockIntf, err := proVM.BuildBlock(context.Background()) + blockIntf, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, blockIntf) @@ -1731,7 +1731,7 @@ func TestRejectedHeightNotIndexed(t *testing.T) { ctx.ValidatorState = valState require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, @@ -1741,15 +1741,15 @@ func TestRejectedHeightNotIndexed(t *testing.T) { nil, )) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // Initialize shouldn't be called again coreVM.InitializeF = nil - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) + require.NoError(proVM.SetPreference(t.Context(), snowmantest.GenesisID)) // create inner block X and outer block A xBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -1757,11 +1757,11 @@ func TestRejectedHeightNotIndexed(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } - aBlock, err := proVM.BuildBlock(context.Background()) + aBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) coreVM.BuildBlockF = nil - require.NoError(aBlock.Verify(context.Background())) + require.NoError(aBlock.Verify(t.Context())) // use a different way to construct inner block Y and outer block B yBlock := snowmantest.BuildChild(snowmantest.Genesis) @@ -1783,20 +1783,20 @@ func TestRejectedHeightNotIndexed(t *testing.T) { }, } - require.NoError(bBlock.Verify(context.Background())) + require.NoError(bBlock.Verify(t.Context())) // accept A - require.NoError(aBlock.Accept(context.Background())) + require.NoError(aBlock.Accept(t.Context())) coreHeights = append(coreHeights, xBlock.ID()) - blkID, err := proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) + blkID, err := proVM.GetBlockIDAtHeight(t.Context(), aBlock.Height()) require.NoError(err) require.Equal(aBlock.ID(), blkID) // reject B - require.NoError(bBlock.Reject(context.Background())) + require.NoError(bBlock.Reject(t.Context())) - blkID, err = proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) + blkID, err = proVM.GetBlockIDAtHeight(t.Context(), aBlock.Height()) require.NoError(err) require.Equal(aBlock.ID(), blkID) } @@ -1900,7 +1900,7 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { ctx.ValidatorState = valState require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly initialState, @@ -1910,15 +1910,15 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { nil, )) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() // Initialize shouldn't be called again coreVM.InitializeF = nil - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), snowmantest.GenesisID)) + require.NoError(proVM.SetPreference(t.Context(), snowmantest.GenesisID)) xTestBlock := snowmantest.BuildChild(snowmantest.Genesis) xBlock := &TestOptionsBlock{ @@ -1932,43 +1932,43 @@ func TestRejectedOptionHeightNotIndexed(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return xBlock, nil } - aBlockIntf, err := proVM.BuildBlock(context.Background()) + aBlockIntf, err := proVM.BuildBlock(t.Context()) require.NoError(err) require.IsType(&postForkBlock{}, aBlockIntf) aBlock := aBlockIntf.(*postForkBlock) - opts, err := aBlock.Options(context.Background()) + opts, err := aBlock.Options(t.Context()) require.NoError(err) - require.NoError(aBlock.Verify(context.Background())) + require.NoError(aBlock.Verify(t.Context())) bBlock := opts[0] - require.NoError(bBlock.Verify(context.Background())) + require.NoError(bBlock.Verify(t.Context())) cBlock := opts[1] - require.NoError(cBlock.Verify(context.Background())) + require.NoError(cBlock.Verify(t.Context())) // accept A - require.NoError(aBlock.Accept(context.Background())) + require.NoError(aBlock.Accept(t.Context())) coreHeights = append(coreHeights, xBlock.ID()) - blkID, err := proVM.GetBlockIDAtHeight(context.Background(), aBlock.Height()) + blkID, err := proVM.GetBlockIDAtHeight(t.Context(), aBlock.Height()) require.NoError(err) require.Equal(aBlock.ID(), blkID) // accept B - require.NoError(bBlock.Accept(context.Background())) + require.NoError(bBlock.Accept(t.Context())) coreHeights = append(coreHeights, xBlock.opts[0].ID()) - blkID, err = proVM.GetBlockIDAtHeight(context.Background(), bBlock.Height()) + blkID, err = proVM.GetBlockIDAtHeight(t.Context(), bBlock.Height()) require.NoError(err) require.Equal(bBlock.ID(), blkID) // reject C - require.NoError(cBlock.Reject(context.Background())) + require.NoError(cBlock.Reject(t.Context())) - blkID, err = proVM.GetBlockIDAtHeight(context.Background(), cBlock.Height()) + blkID, err = proVM.GetBlockIDAtHeight(t.Context(), cBlock.Height()) require.NoError(err) require.Equal(bBlock.ID(), blkID) } @@ -2016,7 +2016,7 @@ func TestVMInnerBlkCache(t *testing.T) { ctx.NodeID = ids.NodeIDFromCert(pTestCert) require.NoError(vm.Initialize( - context.Background(), + t.Context(), ctx, prefixdb.New([]byte{}, memdb.New()), // make sure that DBs are compressed correctly nil, @@ -2026,7 +2026,7 @@ func TestVMInnerBlkCache(t *testing.T) { nil, )) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() // Create a block near the tip (0). @@ -2049,7 +2049,7 @@ func TestVMInnerBlkCache(t *testing.T) { mockInnerBlkNearTip.EXPECT().Bytes().Return(blkNearTipInnerBytes).Times(1) innerVM.EXPECT().ParseBlock(gomock.Any(), blkNearTipInnerBytes).Return(mockInnerBlkNearTip, nil).Times(2) - _, err = vm.ParseBlock(context.Background(), blkNearTip.Bytes()) + _, err = vm.ParseBlock(t.Context(), blkNearTip.Bytes()) require.NoError(err) // Block should now be in cache because it's a post-fork block @@ -2067,7 +2067,7 @@ func TestVMInnerBlkCache(t *testing.T) { // Parse the block again. This time it shouldn't be cached // because it's not close to the tip. - _, err = vm.ParseBlock(context.Background(), blkNearTip.Bytes()) + _, err = vm.ParseBlock(t.Context(), blkNearTip.Bytes()) require.NoError(err) _, ok = vm.innerBlkCache.Get(blkNearTip.ID()) @@ -2128,7 +2128,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { snowCtx.NodeID = ids.NodeIDFromCert(pTestCert) require.NoError(vm.Initialize( - context.Background(), + t.Context(), snowCtx, db, nil, @@ -2138,7 +2138,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { nil, )) defer func() { - require.NoError(vm.Shutdown(context.Background())) + require.NoError(vm.Shutdown(t.Context())) }() { @@ -2148,7 +2148,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { WithVerifyContext: blockmock.NewWithVerifyContext(ctrl), } innerBlk.WithVerifyContext.EXPECT().ShouldVerifyWithContext(gomock.Any()).Return(true, nil).Times(2) - innerBlk.WithVerifyContext.EXPECT().VerifyWithContext(context.Background(), + innerBlk.WithVerifyContext.EXPECT().VerifyWithContext(t.Context(), &block.Context{ PChainHeight: pChainHeight, }, @@ -2163,7 +2163,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { blk.EXPECT().ID().Return(blkID).AnyTimes() require.NoError(vm.verifyAndRecordInnerBlk( - context.Background(), + t.Context(), &block.Context{ PChainHeight: pChainHeight, }, @@ -2173,14 +2173,14 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { // Call VerifyWithContext again but with a different P-Chain height blk.EXPECT().setInnerBlk(innerBlk).AnyTimes() pChainHeight++ - innerBlk.WithVerifyContext.EXPECT().VerifyWithContext(context.Background(), + innerBlk.WithVerifyContext.EXPECT().VerifyWithContext(t.Context(), &block.Context{ PChainHeight: pChainHeight, }, ).Return(nil) require.NoError(vm.verifyAndRecordInnerBlk( - context.Background(), + t.Context(), &block.Context{ PChainHeight: pChainHeight, }, @@ -2204,7 +2204,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { blkID := ids.GenerateTestID() blk.EXPECT().ID().Return(blkID).AnyTimes() require.NoError(vm.verifyAndRecordInnerBlk( - context.Background(), + t.Context(), &block.Context{ PChainHeight: 1, }, @@ -2225,7 +2225,7 @@ func TestVM_VerifyBlockWithContext(t *testing.T) { blk.EXPECT().getInnerBlk().Return(innerBlk).AnyTimes() blkID := ids.GenerateTestID() blk.EXPECT().ID().Return(blkID).AnyTimes() - require.NoError(vm.verifyAndRecordInnerBlk(context.Background(), nil, blk)) + require.NoError(vm.verifyAndRecordInnerBlk(t.Context(), nil, blk)) } } @@ -2301,7 +2301,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { ) require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, db, initialState, @@ -2311,11 +2311,11 @@ func TestHistoricalBlockDeletion(t *testing.T) { nil, )) - lastAcceptedID, err := proVM.LastAccepted(context.Background()) + lastAcceptedID, err := proVM.LastAccepted(t.Context()) require.NoError(err) - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) + require.NoError(proVM.SetPreference(t.Context(), lastAcceptedID)) issueBlock := func() { lastAcceptedBlock := acceptedBlocks[currentHeight] @@ -2324,12 +2324,12 @@ func TestHistoricalBlockDeletion(t *testing.T) { coreVM.BuildBlockF = func(context.Context) (snowman.Block, error) { return innerBlock, nil } - proBlock, err := proVM.BuildBlock(context.Background()) + proBlock, err := proVM.BuildBlock(t.Context()) require.NoError(err) - require.NoError(proBlock.Verify(context.Background())) - require.NoError(proVM.SetPreference(context.Background(), proBlock.ID())) - require.NoError(proBlock.Accept(context.Background())) + require.NoError(proBlock.Verify(t.Context())) + require.NoError(proVM.SetPreference(t.Context(), proBlock.ID())) + require.NoError(proBlock.Accept(t.Context())) acceptedBlocks = append(acceptedBlocks, innerBlock) currentHeight++ @@ -2337,14 +2337,14 @@ func TestHistoricalBlockDeletion(t *testing.T) { requireHeights := func(start, end uint64) { for i := start; i <= end; i++ { - _, err := proVM.GetBlockIDAtHeight(context.Background(), i) + _, err := proVM.GetBlockIDAtHeight(t.Context(), i) require.NoError(err) } } requireMissingHeights := func(start, end uint64) { for i := start; i <= end; i++ { - _, err := proVM.GetBlockIDAtHeight(context.Background(), i) + _, err := proVM.GetBlockIDAtHeight(t.Context(), i) require.ErrorIs(err, database.ErrNotFound) } } @@ -2374,7 +2374,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { issueBlock() requireHeights(0, currentHeight) - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) numHistoricalBlocks := uint64(2) proVM = New( @@ -2390,7 +2390,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { ) require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, db, initialState, @@ -2400,11 +2400,11 @@ func TestHistoricalBlockDeletion(t *testing.T) { nil, )) - lastAcceptedID, err = proVM.LastAccepted(context.Background()) + lastAcceptedID, err = proVM.LastAccepted(t.Context()) require.NoError(err) - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) + require.NoError(proVM.SetPreference(t.Context(), lastAcceptedID)) // Verify that old blocks were pruned during startup requireNumHeights(numHistoricalBlocks) @@ -2416,7 +2416,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { issueBlock() requireNumHeights(numHistoricalBlocks) - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) newNumHistoricalBlocks := numHistoricalBlocks + 2 proVM = New( @@ -2432,7 +2432,7 @@ func TestHistoricalBlockDeletion(t *testing.T) { ) require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, db, initialState, @@ -2442,14 +2442,14 @@ func TestHistoricalBlockDeletion(t *testing.T) { nil, )) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() - lastAcceptedID, err = proVM.LastAccepted(context.Background()) + lastAcceptedID, err = proVM.LastAccepted(t.Context()) require.NoError(err) - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) - require.NoError(proVM.SetPreference(context.Background(), lastAcceptedID)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) + require.NoError(proVM.SetPreference(t.Context(), lastAcceptedID)) // The height index shouldn't be modified at this point requireNumHeights(numHistoricalBlocks) @@ -2471,7 +2471,7 @@ func TestGetPostDurangoSlotTimeWithNoValidators(t *testing.T) { coreVM, valState, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() valState.GetValidatorSetF = func(context.Context, uint64, ids.ID) (map[ids.NodeID]*validators.GetValidatorOutput, error) { @@ -2510,15 +2510,15 @@ func TestGetPostDurangoSlotTimeWithNoValidators(t *testing.T) { } } - statefulBlock, err := proVM.ParseBlock(context.Background(), statelessBlock.Bytes()) + statefulBlock, err := proVM.ParseBlock(t.Context(), statelessBlock.Bytes()) require.NoError(err) - require.NoError(statefulBlock.Verify(context.Background())) + require.NoError(statefulBlock.Verify(t.Context())) currentTime := proVM.Clock.Time().Truncate(time.Second) parentTimestamp := statefulBlock.Timestamp() slotTime, err := proVM.getPostDurangoSlotTime( - context.Background(), + t.Context(), statefulBlock.Height()+1, statelessBlock.PChainHeight(), proposer.TimeToSlot(parentTimestamp, currentTime), @@ -2576,12 +2576,12 @@ func TestLocalParse(t *testing.T) { vm := New(innerVM, conf) defer func() { - require.NoError(t, vm.Shutdown(context.Background())) + require.NoError(t, vm.Shutdown(t.Context())) }() db := prefixdb.New([]byte{}, memdb.New()) - _ = vm.Initialize(context.Background(), &snow.Context{ + _ = vm.Initialize(t.Context(), &snow.Context{ Log: logging.NoLog{}, ChainID: chainID, }, db, nil, nil, nil, nil, nil) @@ -2613,7 +2613,7 @@ func TestLocalParse(t *testing.T) { } for _, test := range tests { t.Run(test.name, func(t *testing.T) { - block, err := test.f(context.Background(), test.block) + block, err := test.f(t.Context(), test.block) require.NoError(t, err) require.IsType(t, test.resultingBlock, block) }) @@ -2621,7 +2621,7 @@ func TestLocalParse(t *testing.T) { } func TestTimestampMetrics(t *testing.T) { - ctx := context.Background() + ctx := t.Context() coreVM, _, proVM, _ := initTestProposerVM(t, upgradetest.ApricotPhase4, 0) @@ -2725,7 +2725,7 @@ func TestSelectChildPChainHeight(t *testing.T) { _, vdrState, proVM, _ := initTestProposerVM(t, upgradetest.Latest, 0) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() proVM.Clock.Set(test.time) @@ -2737,7 +2737,7 @@ func TestSelectChildPChainHeight(t *testing.T) { } actualPChainHeight, err := proVM.selectChildPChainHeight( - context.Background(), + t.Context(), test.minPChainHeight, ) require.NoError(err) @@ -2840,7 +2840,7 @@ func TestBootstrappingAheadOfPChainBuildBlockRegression(t *testing.T) { db := prefixdb.New([]byte{0}, memdb.New()) require.NoError(proVM.Initialize( - context.Background(), + t.Context(), ctx, db, nil, @@ -2850,10 +2850,10 @@ func TestBootstrappingAheadOfPChainBuildBlockRegression(t *testing.T) { nil, )) defer func() { - require.NoError(proVM.Shutdown(context.Background())) + require.NoError(proVM.Shutdown(t.Context())) }() - require.NoError(proVM.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(proVM.SetState(t.Context(), snow.Bootstrapping)) // During bootstrapping, the first post-fork block is verified against the // P-chain height, so we provide a valid height. @@ -2868,11 +2868,11 @@ func TestBootstrappingAheadOfPChainBuildBlockRegression(t *testing.T) { ) require.NoError(err) - block1, err := proVM.ParseBlock(context.Background(), statelessBlock1.Bytes()) + block1, err := proVM.ParseBlock(t.Context(), statelessBlock1.Bytes()) require.NoError(err) - require.NoError(block1.Verify(context.Background())) - require.NoError(block1.Accept(context.Background())) + require.NoError(block1.Verify(t.Context())) + require.NoError(block1.Accept(t.Context())) // During bootstrapping, the additional post-fork blocks are not verified // against the local P-chain height, so even if we provide a height higher @@ -2891,23 +2891,23 @@ func TestBootstrappingAheadOfPChainBuildBlockRegression(t *testing.T) { ) require.NoError(err) - block2, err := proVM.ParseBlock(context.Background(), statelessBlock2.Bytes()) + block2, err := proVM.ParseBlock(t.Context(), statelessBlock2.Bytes()) require.NoError(err) - require.NoError(block2.Verify(context.Background())) - require.NoError(block2.Accept(context.Background())) + require.NoError(block2.Verify(t.Context())) + require.NoError(block2.Accept(t.Context())) - require.NoError(proVM.SetPreference(context.Background(), statelessBlock2.ID())) + require.NoError(proVM.SetPreference(t.Context(), statelessBlock2.ID())) // At this point, the VM has a last accepted block with a P-chain height // greater than our locally accepted P-chain. - require.NoError(proVM.SetState(context.Background(), snow.NormalOp)) + require.NoError(proVM.SetState(t.Context(), snow.NormalOp)) // If the inner VM requests building a block, the proposervm passes that // message to the consensus engine. This is really the source of the issue, // as the proposervm is not currently in a state where it can correctly // build any blocks. - msg, err := proVM.WaitForEvent(context.Background()) + msg, err := proVM.WaitForEvent(t.Context()) require.NoError(err) require.Equal(common.PendingTxs, msg) @@ -2919,6 +2919,6 @@ func TestBootstrappingAheadOfPChainBuildBlockRegression(t *testing.T) { } // Attempting to build a block now errors with an unexpected error. - _, err = proVM.BuildBlock(context.Background()) + _, err = proVM.BuildBlock(t.Context()) require.NoError(err) } diff --git a/vms/registry/vm_registry_test.go b/vms/registry/vm_registry_test.go index b955f11cefd4..685db431e38e 100644 --- a/vms/registry/vm_registry_test.go +++ b/vms/registry/vm_registry_test.go @@ -4,7 +4,6 @@ package registry import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -57,7 +56,7 @@ func TestReload_Success(t *testing.T) { Times(1). Return(nil) - installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) + installedVMs, failedVMs, err := resources.vmRegistry.Reload(t.Context()) require.NoError(err) require.ElementsMatch([]ids.ID{id3, id4}, installedVMs) require.Empty(failedVMs) @@ -71,7 +70,7 @@ func TestReload_GetNewVMsFails(t *testing.T) { resources.mockVMGetter.EXPECT().Get().Times(1).Return(nil, nil, errTest) - installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) + installedVMs, failedVMs, err := resources.vmRegistry.Reload(t.Context()) require.ErrorIs(err, errTest) require.Empty(installedVMs) require.Empty(failedVMs) @@ -111,7 +110,7 @@ func TestReload_PartialRegisterFailure(t *testing.T) { Times(1). Return(nil) - installedVMs, failedVMs, err := resources.vmRegistry.Reload(context.Background()) + installedVMs, failedVMs, err := resources.vmRegistry.Reload(t.Context()) require.NoError(err) require.Len(failedVMs, 1) require.ErrorIs(failedVMs[id3], errTest) diff --git a/vms/rpcchainvm/batched_vm_test.go b/vms/rpcchainvm/batched_vm_test.go index 502c77aeeaae..6d2de81f4c1f 100644 --- a/vms/rpcchainvm/batched_vm_test.go +++ b/vms/rpcchainvm/batched_vm_test.go @@ -4,7 +4,6 @@ package rpcchainvm import ( - "context" "testing" "time" @@ -77,21 +76,21 @@ func TestBatchedParseBlockCaching(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) ctx := snowtest.Context(t, snowtest.CChainID) - require.NoError(vm.Initialize(context.Background(), ctx, memdb.New(), nil, nil, nil, nil, nil)) + require.NoError(vm.Initialize(t.Context(), ctx, memdb.New(), nil, nil, nil, nil, nil)) // Call should parse the first block - blk, err := vm.ParseBlock(context.Background(), blkBytes1) + blk, err := vm.ParseBlock(t.Context(), blkBytes1) require.NoError(err) require.Equal(blkID1, blk.ID()) require.IsType(&chain.BlockWrapper{}, blk) // Call should cache the first block and parse the second block - blks, err := vm.BatchedParseBlock(context.Background(), [][]byte{blkBytes1, blkBytes2}) + blks, err := vm.BatchedParseBlock(t.Context(), [][]byte{blkBytes1, blkBytes2}) require.NoError(err) require.Len(blks, 2) require.Equal(blkID1, blks[0].ID()) @@ -101,7 +100,7 @@ func TestBatchedParseBlockCaching(t *testing.T) { require.IsType(&chain.BlockWrapper{}, blks[1]) // Call should be fully cached and not result in a grpc call - blks, err = vm.BatchedParseBlock(context.Background(), [][]byte{blkBytes1, blkBytes2}) + blks, err = vm.BatchedParseBlock(t.Context(), [][]byte{blkBytes1, blkBytes2}) require.NoError(err) require.Len(blks, 2) require.Equal(blkID1, blks[0].ID()) diff --git a/vms/rpcchainvm/ghttp/gconn/gconn_test.go b/vms/rpcchainvm/ghttp/gconn/gconn_test.go index 144a658ede3e..7d36bafba077 100644 --- a/vms/rpcchainvm/ghttp/gconn/gconn_test.go +++ b/vms/rpcchainvm/ghttp/gconn/gconn_test.go @@ -40,7 +40,7 @@ func TestErrIOEOF(t *testing.T) { _ = server.Serve(listener) }() - grpcConn, err := grpc.DialContext(context.Background(), "bufnet", + grpcConn, err := grpc.DialContext(t.Context(), "bufnet", grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) { return listener.Dial() }), @@ -82,7 +82,7 @@ func TestOSErrDeadlineExceeded(t *testing.T) { _ = server.Serve(listener) }() - grpcConn, err := grpc.DialContext(context.Background(), "bufnet", + grpcConn, err := grpc.DialContext(t.Context(), "bufnet", grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) { return listener.Dial() }), diff --git a/vms/rpcchainvm/ghttp/greader/greader_test.go b/vms/rpcchainvm/ghttp/greader/greader_test.go index 9a3888d42dae..cf9304b30fa4 100644 --- a/vms/rpcchainvm/ghttp/greader/greader_test.go +++ b/vms/rpcchainvm/ghttp/greader/greader_test.go @@ -32,7 +32,7 @@ func TestErrIOEOF(t *testing.T) { _ = server.Serve(listener) }() - conn, err := grpc.DialContext(context.Background(), "bufnet", + conn, err := grpc.DialContext(t.Context(), "bufnet", grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) { return listener.Dial() }), diff --git a/vms/rpcchainvm/grpcutils/client_test.go b/vms/rpcchainvm/grpcutils/client_test.go index 1ea8681693e1..02a395c8077e 100644 --- a/vms/rpcchainvm/grpcutils/client_test.go +++ b/vms/rpcchainvm/grpcutils/client_test.go @@ -4,7 +4,6 @@ package grpcutils import ( - "context" "fmt" "testing" "time" @@ -95,7 +94,7 @@ func TestWaitForReadyCallOption(t *testing.T) { _ = listener.Close() db := pb.NewDatabaseClient(conn) - _, err = db.Put(context.Background(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}, grpc.WaitForReady(false)) + _, err = db.Put(t.Context(), &pb.PutRequest{Key: []byte("foo"), Value: []byte("bar")}, grpc.WaitForReady(false)) s, ok := status.FromError(err) fmt.Printf("status: %v\n", s) require.True(ok) diff --git a/vms/rpcchainvm/state_syncable_vm_test.go b/vms/rpcchainvm/state_syncable_vm_test.go index fe3712daae69..f62150d8e252 100644 --- a/vms/rpcchainvm/state_syncable_vm_test.go +++ b/vms/rpcchainvm/state_syncable_vm_test.go @@ -303,28 +303,28 @@ func TestStateSyncEnabled(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) // test state sync not implemented // Note that enabled == false is returned rather than // common.ErrStateSyncableVMNotImplemented - enabled, err := vm.StateSyncEnabled(context.Background()) + enabled, err := vm.StateSyncEnabled(t.Context()) require.NoError(err) require.False(enabled) // test state sync disabled - enabled, err = vm.StateSyncEnabled(context.Background()) + enabled, err = vm.StateSyncEnabled(t.Context()) require.NoError(err) require.False(enabled) // test state sync enabled - enabled, err = vm.StateSyncEnabled(context.Background()) + enabled, err = vm.StateSyncEnabled(t.Context()) require.NoError(err) require.True(enabled) // test a non-special error. // TODO: retrieve exact error - _, err = vm.StateSyncEnabled(context.Background()) + _, err = vm.StateSyncEnabled(t.Context()) require.Error(err) //nolint:forbidigo // currently returns grpc errors } @@ -334,14 +334,14 @@ func TestGetOngoingSyncStateSummary(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) // test unimplemented case; this is just a guard - _, err := vm.GetOngoingSyncStateSummary(context.Background()) + _, err := vm.GetOngoingSyncStateSummary(t.Context()) require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval - summary, err := vm.GetOngoingSyncStateSummary(context.Background()) + summary, err := vm.GetOngoingSyncStateSummary(t.Context()) require.NoError(err) require.Equal(mockedSummary.ID(), summary.ID()) require.Equal(mockedSummary.Height(), summary.Height()) @@ -349,7 +349,7 @@ func TestGetOngoingSyncStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error - _, err = vm.GetOngoingSyncStateSummary(context.Background()) + _, err = vm.GetOngoingSyncStateSummary(t.Context()) require.Error(err) //nolint:forbidigo // currently returns grpc errors } @@ -359,14 +359,14 @@ func TestGetLastStateSummary(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) // test unimplemented case; this is just a guard - _, err := vm.GetLastStateSummary(context.Background()) + _, err := vm.GetLastStateSummary(t.Context()) require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval - summary, err := vm.GetLastStateSummary(context.Background()) + summary, err := vm.GetLastStateSummary(t.Context()) require.NoError(err) require.Equal(mockedSummary.ID(), summary.ID()) require.Equal(mockedSummary.Height(), summary.Height()) @@ -374,7 +374,7 @@ func TestGetLastStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error - _, err = vm.GetLastStateSummary(context.Background()) + _, err = vm.GetLastStateSummary(t.Context()) require.Error(err) //nolint:forbidigo // currently returns grpc errors } @@ -384,26 +384,26 @@ func TestParseStateSummary(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) // test unimplemented case; this is just a guard - _, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) + _, err := vm.ParseStateSummary(t.Context(), mockedSummary.Bytes()) require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful parsing - summary, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) + summary, err := vm.ParseStateSummary(t.Context(), mockedSummary.Bytes()) require.NoError(err) require.Equal(mockedSummary.ID(), summary.ID()) require.Equal(mockedSummary.Height(), summary.Height()) require.Equal(mockedSummary.Bytes(), summary.Bytes()) // test parsing nil summary - _, err = vm.ParseStateSummary(context.Background(), nil) + _, err = vm.ParseStateSummary(t.Context(), nil) require.Error(err) //nolint:forbidigo // currently returns grpc errors // test a non-special error. // TODO: retrieve exact error - _, err = vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) + _, err = vm.ParseStateSummary(t.Context(), mockedSummary.Bytes()) require.Error(err) //nolint:forbidigo // currently returns grpc errors } @@ -413,14 +413,14 @@ func TestGetStateSummary(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) // test unimplemented case; this is just a guard - _, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) + _, err := vm.GetStateSummary(t.Context(), mockedSummary.Height()) require.Equal(block.ErrStateSyncableVMNotImplemented, err) // test successful retrieval - summary, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) + summary, err := vm.GetStateSummary(t.Context(), mockedSummary.Height()) require.NoError(err) require.Equal(mockedSummary.ID(), summary.ID()) require.Equal(mockedSummary.Height(), summary.Height()) @@ -428,7 +428,7 @@ func TestGetStateSummary(t *testing.T) { // test a non-special error. // TODO: retrieve exact error - _, err = vm.GetStateSummary(context.Background(), mockedSummary.Height()) + _, err = vm.GetStateSummary(t.Context(), mockedSummary.Height()) require.Error(err) //nolint:forbidigo // currently returns grpc errors } @@ -438,25 +438,25 @@ func TestAcceptStateSummary(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) // retrieve the summary first - summary, err := vm.GetStateSummary(context.Background(), mockedSummary.Height()) + summary, err := vm.GetStateSummary(t.Context(), mockedSummary.Height()) require.NoError(err) // test status Summary - status, err := summary.Accept(context.Background()) + status, err := summary.Accept(t.Context()) require.NoError(err) require.Equal(block.StateSyncStatic, status) // test skipped Summary - status, err = summary.Accept(context.Background()) + status, err = summary.Accept(t.Context()) require.NoError(err) require.Equal(block.StateSyncSkipped, status) // test a non-special error. // TODO: retrieve exact error - _, err = summary.Accept(context.Background()) + _, err = summary.Accept(t.Context()) require.Error(err) //nolint:forbidigo // currently returns grpc errors } @@ -468,45 +468,45 @@ func TestLastAcceptedBlockPostStateSummaryAccept(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) // Step 1: initialize VM and check initial LastAcceptedBlock ctx := snowtest.Context(t, snowtest.CChainID) - require.NoError(vm.Initialize(context.Background(), ctx, prefixdb.New([]byte{}, memdb.New()), nil, nil, nil, nil, nil)) + require.NoError(vm.Initialize(t.Context(), ctx, prefixdb.New([]byte{}, memdb.New()), nil, nil, nil, nil, nil)) - blkID, err := vm.LastAccepted(context.Background()) + blkID, err := vm.LastAccepted(t.Context()) require.NoError(err) require.Equal(preSummaryBlk.ID(), blkID) - lastBlk, err := vm.GetBlock(context.Background(), blkID) + lastBlk, err := vm.GetBlock(t.Context(), blkID) require.NoError(err) require.Equal(preSummaryBlk.Height(), lastBlk.Height()) // Step 2: pick a state summary to an higher height and accept it - summary, err := vm.ParseStateSummary(context.Background(), mockedSummary.Bytes()) + summary, err := vm.ParseStateSummary(t.Context(), mockedSummary.Bytes()) require.NoError(err) - status, err := summary.Accept(context.Background()) + status, err := summary.Accept(t.Context()) require.NoError(err) require.Equal(block.StateSyncStatic, status) // State Sync accept does not duly update LastAccepted block information // since state sync can complete asynchronously - blkID, err = vm.LastAccepted(context.Background()) + blkID, err = vm.LastAccepted(t.Context()) require.NoError(err) - lastBlk, err = vm.GetBlock(context.Background(), blkID) + lastBlk, err = vm.GetBlock(t.Context(), blkID) require.NoError(err) require.Equal(preSummaryBlk.Height(), lastBlk.Height()) // Setting state to bootstrapping duly update last accepted block - require.NoError(vm.SetState(context.Background(), snow.Bootstrapping)) + require.NoError(vm.SetState(t.Context(), snow.Bootstrapping)) - blkID, err = vm.LastAccepted(context.Background()) + blkID, err = vm.LastAccepted(t.Context()) require.NoError(err) - lastBlk, err = vm.GetBlock(context.Background(), blkID) + lastBlk, err = vm.GetBlock(t.Context(), blkID) require.NoError(err) require.Equal(summary.Height(), lastBlk.Height()) } diff --git a/vms/rpcchainvm/vm_test.go b/vms/rpcchainvm/vm_test.go index fd23ee4edc37..36115c7b84a3 100644 --- a/vms/rpcchainvm/vm_test.go +++ b/vms/rpcchainvm/vm_test.go @@ -100,7 +100,7 @@ func TestHelperProcess(t *testing.T) { } mockedVM := TestServerPluginMap[testKey](t, true /*loadExpectations*/) - err := Serve(context.Background(), mockedVM) + err := Serve(t.Context(), mockedVM) if err != nil { os.Exit(1) } @@ -184,11 +184,9 @@ func TestRuntimeSubprocessBootstrap(t *testing.T) { listener, err := grpcutils.NewListener() require.NoError(err) - require.NoError(os.Setenv(runtime.EngineAddressKey, listener.Addr().String())) - - ctx, cancel := context.WithCancel(context.Background()) - defer cancel() + t.Setenv(runtime.EngineAddressKey, listener.Addr().String()) + ctx := t.Context() if test.serveVM { go func() { _ = Serve(ctx, vm) @@ -196,7 +194,7 @@ func TestRuntimeSubprocessBootstrap(t *testing.T) { } status, stopper, err := subprocess.Bootstrap( - context.Background(), + t.Context(), listener, helperProcess("dummy"), test.config, @@ -233,7 +231,7 @@ func TestNewHTTPHandler(t *testing.T) { _ = grpcServer.Serve(listener) }() - cc, err := grpc.DialContext(context.Background(), "bufnet", + cc, err := grpc.DialContext(t.Context(), "bufnet", grpc.WithContextDialer(func(context.Context, string) (net.Conn, error) { return listener.Dial() }), @@ -250,7 +248,7 @@ func TestNewHTTPHandler(t *testing.T) { logging.NoLog{}, ) - handler, err := client.NewHTTPHandler(context.Background()) + handler, err := client.NewHTTPHandler(t.Context()) require.NoError(err) w := httptest.NewRecorder() diff --git a/vms/rpcchainvm/with_context_vm_test.go b/vms/rpcchainvm/with_context_vm_test.go index d4135bc16f39..9018e8151b60 100644 --- a/vms/rpcchainvm/with_context_vm_test.go +++ b/vms/rpcchainvm/with_context_vm_test.go @@ -4,7 +4,6 @@ package rpcchainvm import ( - "context" "testing" "time" @@ -95,21 +94,21 @@ func TestContextVMSummary(t *testing.T) { // Create and start the plugin vm := buildClientHelper(require, testKey) - defer vm.runtime.Stop(context.Background()) + defer vm.runtime.Stop(t.Context()) ctx := snowtest.Context(t, snowtest.CChainID) - require.NoError(vm.Initialize(context.Background(), ctx, memdb.New(), nil, nil, nil, nil, nil)) + require.NoError(vm.Initialize(t.Context(), ctx, memdb.New(), nil, nil, nil, nil, nil)) - blkIntf, err := vm.BuildBlockWithContext(context.Background(), blockContext) + blkIntf, err := vm.BuildBlockWithContext(t.Context(), blockContext) require.NoError(err) blk, ok := blkIntf.(block.WithVerifyContext) require.True(ok) - shouldVerify, err := blk.ShouldVerifyWithContext(context.Background()) + shouldVerify, err := blk.ShouldVerifyWithContext(t.Context()) require.NoError(err) require.True(shouldVerify) - require.NoError(blk.VerifyWithContext(context.Background(), blockContext)) + require.NoError(blk.VerifyWithContext(t.Context(), blockContext)) } diff --git a/vms/txs/mempool/mempool_test.go b/vms/txs/mempool/mempool_test.go index 4977b6b4a8dc..7d53e259b72c 100644 --- a/vms/txs/mempool/mempool_test.go +++ b/vms/txs/mempool/mempool_test.go @@ -303,7 +303,7 @@ func TestBlockBuilderMaxMempoolSizeHandling(t *testing.T) { func TestWaitForEventCancelled(t *testing.T) { m := newMempool() - ctx, cancel := context.WithCancel(context.Background()) + ctx, cancel := context.WithCancel(t.Context()) cancel() _, err := m.WaitForEvent(ctx) @@ -320,7 +320,7 @@ func TestWaitForEventWithTx(t *testing.T) { errs <- m.Add(tx) }() - msg, err := m.WaitForEvent(context.Background()) + msg, err := m.WaitForEvent(t.Context()) require.NoError(err) require.Equal(common.PendingTxs, msg) require.NoError(<-errs) diff --git a/wallet/chain/p/builder/builder.go b/wallet/chain/p/builder/builder.go index 61d38746a6c2..04f29df3ee6e 100644 --- a/wallet/chain/p/builder/builder.go +++ b/wallet/chain/p/builder/builder.go @@ -1603,9 +1603,9 @@ func (b *builder) spend( // Initialize the return values with empty slices to preserve backward // compatibility of the json representation of transactions with no // inputs or outputs. - inputs: make([]*avax.TransferableInput, 0), - changeOutputs: make([]*avax.TransferableOutput, 0), - stakeOutputs: make([]*avax.TransferableOutput, 0), + inputs: []*avax.TransferableInput{}, + changeOutputs: []*avax.TransferableOutput{}, + stakeOutputs: []*avax.TransferableOutput{}, } utxosByLocktime := splitByLocktime(utxos, minIssuanceTime) diff --git a/wallet/subnet/primary/common/utxotest/utxotest.go b/wallet/subnet/primary/common/utxotest/utxotest.go index 797e80bf0c90..44583a99ad31 100644 --- a/wallet/subnet/primary/common/utxotest/utxotest.go +++ b/wallet/subnet/primary/common/utxotest/utxotest.go @@ -21,7 +21,7 @@ func NewDeterministicChainUTXOs(t *testing.T, utxoSets map[ids.ID][]*avax.UTXO) for subnetID, utxos := range utxoSets { for _, utxo := range utxos { require.NoError( - t, globalUTXOs.AddUTXO(context.Background(), subnetID, constants.PlatformChainID, utxo), + t, globalUTXOs.AddUTXO(t.Context(), subnetID, constants.PlatformChainID, utxo), ) } } diff --git a/x/archivedb/key_test.go b/x/archivedb/key_test.go index f0d8bc257a71..d7b20a0390d0 100644 --- a/x/archivedb/key_test.go +++ b/x/archivedb/key_test.go @@ -12,10 +12,10 @@ import ( ) func TestNaturalDescSortingForSameKey(t *testing.T) { - key0, _ := newDBKeyFromUser(make([]byte, 0), 0) - key1, _ := newDBKeyFromUser(make([]byte, 0), 1) - key2, _ := newDBKeyFromUser(make([]byte, 0), 2) - key3, _ := newDBKeyFromUser(make([]byte, 0), 3) + key0, _ := newDBKeyFromUser(nil, 0) + key1, _ := newDBKeyFromUser(nil, 1) + key2, _ := newDBKeyFromUser(nil, 2) + key3, _ := newDBKeyFromUser(nil, 3) entry := [][]byte{key0, key1, key2, key3} expected := [][]byte{key3, key2, key1, key0} diff --git a/x/blockdb/database.go b/x/blockdb/database.go index aa2ac3620d4e..14d6c70a9c4d 100644 --- a/x/blockdb/database.go +++ b/x/blockdb/database.go @@ -681,9 +681,12 @@ func (s *Database) recoverUnindexedBlocks(startOffset, endOffset uint64) error { zap.Uint64("endOffset", endOffset), ) - // Start scan from where the index left off. - currentScanOffset := startOffset - recoveredHeights := make([]BlockHeight, 0) + var ( + // Start scan from where the index left off. + currentScanOffset = startOffset + numRecoveredHeights int + maxRecoveredHeight BlockHeight + ) for currentScanOffset < endOffset { bh, err := s.recoverBlockAtOffset(currentScanOffset, endOffset) if err != nil { @@ -706,7 +709,8 @@ func (s *Database) recoverUnindexedBlocks(startOffset, endOffset uint64) error { zap.Uint32("blockSize", bh.Size), zap.Uint64("dataOffset", currentScanOffset), ) - recoveredHeights = append(recoveredHeights, bh.Height) + numRecoveredHeights++ + maxRecoveredHeight = max(maxRecoveredHeight, bh.Height) blockTotalSize, err := safemath.Add(uint64(sizeOfBlockEntryHeader), uint64(bh.Size)) if err != nil { return fmt.Errorf("recovery: overflow in block size calculation: %w", err) @@ -720,14 +724,7 @@ func (s *Database) recoverUnindexedBlocks(startOffset, endOffset uint64) error { // Update the max block height if max recovered height is greater than // the current max height. - if len(recoveredHeights) > 0 { - maxRecoveredHeight := recoveredHeights[0] - for _, height := range recoveredHeights[1:] { - if height > maxRecoveredHeight { - maxRecoveredHeight = height - } - } - + if numRecoveredHeights > 0 { currentMaxHeight := s.maxBlockHeight.Load() if maxRecoveredHeight > currentMaxHeight || currentMaxHeight == unsetHeight { s.maxBlockHeight.Store(maxRecoveredHeight) @@ -740,7 +737,7 @@ func (s *Database) recoverUnindexedBlocks(startOffset, endOffset uint64) error { maxHeight := s.maxBlockHeight.Load() s.log.Info("Recovery: Scan finished", - zap.Int("recoveredBlocks", len(recoveredHeights)), + zap.Int("recoveredBlocks", numRecoveredHeights), zap.Uint64("finalNextWriteOffset", s.nextDataWriteOffset.Load()), zap.Uint64("maxBlockHeight", maxHeight), ) diff --git a/x/merkledb/db_test.go b/x/merkledb/db_test.go index ed21484cb111..395812165576 100644 --- a/x/merkledb/db_test.go +++ b/x/merkledb/db_test.go @@ -67,14 +67,14 @@ func Test_MerkleDB_GetValues_Safety(t *testing.T) { value := []byte{0, 1, 2} require.NoError(db.Put(keyBytes, value)) - gotValues, errs := db.GetValues(context.Background(), [][]byte{keyBytes}) + gotValues, errs := db.GetValues(t.Context(), [][]byte{keyBytes}) require.Len(errs, 1) require.NoError(errs[0]) require.Equal(value, gotValues[0]) gotValues[0][0]++ // editing the value array shouldn't affect the db - gotValues, errs = db.GetValues(context.Background(), [][]byte{keyBytes}) + gotValues, errs = db.GetValues(t.Context(), [][]byte{keyBytes}) require.Len(errs, 1) require.NoError(errs[0]) require.Equal(value, gotValues[0]) @@ -113,7 +113,7 @@ func Test_MerkleDB_DB_Load_Root_From_DB(t *testing.T) { defer baseDB.Close() db, err := New( - context.Background(), + t.Context(), baseDB, NewConfig(), ) @@ -130,24 +130,24 @@ func Test_MerkleDB_DB_Load_Root_From_DB(t *testing.T) { Value: hashing.ComputeHash256(k), }) } - view, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + view, err := db.NewView(t.Context(), ViewChanges{BatchOps: ops}) require.NoError(err) - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) - root, err := db.GetMerkleRoot(context.Background()) + root, err := db.GetMerkleRoot(t.Context()) require.NoError(err) require.NoError(db.Close()) // reloading the db should set the root back to the one that was saved to [baseDB] db, err = New( - context.Background(), + t.Context(), baseDB, NewConfig(), ) require.NoError(err) - reloadedRoot, err := db.GetMerkleRoot(context.Background()) + reloadedRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(root, reloadedRoot) } @@ -162,7 +162,7 @@ func Test_MerkleDB_DB_Rebuild(t *testing.T) { config.IntermediateNodeCacheSize = uint(initialSize) db, err := newDB( - context.Background(), + t.Context(), memdb.New(), config, ) @@ -178,31 +178,31 @@ func Test_MerkleDB_DB_Rebuild(t *testing.T) { Value: hashing.ComputeHash256(k), }) } - view, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + view, err := db.NewView(t.Context(), ViewChanges{BatchOps: ops}) require.NoError(err) - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) // Get root - root, err := db.GetMerkleRoot(context.Background()) + root, err := db.GetMerkleRoot(t.Context()) require.NoError(err) // Rebuild - require.NoError(db.rebuild(context.Background(), initialSize)) + require.NoError(db.rebuild(t.Context(), initialSize)) // Assert root is the same after rebuild - rebuiltRoot, err := db.GetMerkleRoot(context.Background()) + rebuiltRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(root, rebuiltRoot) // add variation where root has a value require.NoError(db.Put(nil, []byte{})) - root, err = db.GetMerkleRoot(context.Background()) + root, err = db.GetMerkleRoot(t.Context()) require.NoError(err) - require.NoError(db.rebuild(context.Background(), initialSize)) + require.NoError(db.rebuild(t.Context(), initialSize)) - rebuiltRoot, err = db.GetMerkleRoot(context.Background()) + rebuiltRoot, err = db.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(root, rebuiltRoot) } @@ -212,7 +212,7 @@ func Test_MerkleDB_Failed_Batch_Commit(t *testing.T) { memDB := memdb.New() db, err := New( - context.Background(), + t.Context(), memDB, NewConfig(), ) @@ -233,7 +233,7 @@ func Test_MerkleDB_Value_Cache(t *testing.T) { memDB := memdb.New() db, err := New( - context.Background(), + t.Context(), memDB, NewConfig(), ) @@ -270,7 +270,7 @@ func Test_MerkleDB_Invalidate_Siblings_On_Commit(t *testing.T) { require.NotNil(dbTrie) viewToCommit, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte{0}, Value: []byte{0}}, @@ -280,16 +280,16 @@ func Test_MerkleDB_Invalidate_Siblings_On_Commit(t *testing.T) { require.NoError(err) // Create siblings of viewToCommit - sibling1, err := dbTrie.NewView(context.Background(), ViewChanges{}) + sibling1, err := dbTrie.NewView(t.Context(), ViewChanges{}) require.NoError(err) - sibling2, err := dbTrie.NewView(context.Background(), ViewChanges{}) + sibling2, err := dbTrie.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.False(sibling1.(*view).isInvalid()) require.False(sibling2.(*view).isInvalid()) // Committing viewToCommit should invalidate siblings - require.NoError(viewToCommit.CommitToDB(context.Background())) + require.NoError(viewToCommit.CommitToDB(t.Context())) require.True(sibling1.(*view).isInvalid()) require.True(sibling2.(*view).isInvalid()) @@ -305,12 +305,12 @@ func Test_MerkleDB_CommitRangeProof_DeletesValuesInRange(t *testing.T) { // value that shouldn't be deleted require.NoError(db.Put([]byte("key6"), []byte("3"))) - startRoot, err := db.GetMerkleRoot(context.Background()) + startRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) // Get an empty proof proof, err := db.GetRangeProof( - context.Background(), + t.Context(), maybe.Nothing[[]byte](), maybe.Some([]byte("key3")), 10, @@ -328,10 +328,10 @@ func Test_MerkleDB_CommitRangeProof_DeletesValuesInRange(t *testing.T) { require.NoError(batch.Write()) // despite having no key/values in it, committing this proof should delete key1-key3. - _, err = db.CommitRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Some([]byte("key3")), proof) + _, err = db.CommitRangeProof(t.Context(), maybe.Nothing[[]byte](), maybe.Some([]byte("key3")), proof) require.NoError(err) - afterCommitRoot, err := db.GetMerkleRoot(context.Background()) + afterCommitRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(startRoot, afterCommitRoot) @@ -351,7 +351,7 @@ func Test_MerkleDB_CommitRangeProof_EmptyTrie(t *testing.T) { // Get a proof for the range [key1, key3]. proof, err := db1.GetRangeProof( - context.Background(), + t.Context(), maybe.Some([]byte("key1")), maybe.Some([]byte("key3")), 10, @@ -362,14 +362,14 @@ func Test_MerkleDB_CommitRangeProof_EmptyTrie(t *testing.T) { db2, err := getBasicDB() require.NoError(err) - _, err = db2.CommitRangeProof(context.Background(), maybe.Some([]byte("key1")), maybe.Some([]byte("key3")), proof) + _, err = db2.CommitRangeProof(t.Context(), maybe.Some([]byte("key1")), maybe.Some([]byte("key3")), proof) require.NoError(err) // [db2] should have the same key-value pairs as [db1]. - db2Root, err := db2.GetMerkleRoot(context.Background()) + db2Root, err := db2.GetMerkleRoot(t.Context()) require.NoError(err) - db1Root, err := db1.GetMerkleRoot(context.Background()) + db1Root, err := db1.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(db1Root, db2Root) @@ -389,7 +389,7 @@ func Test_MerkleDB_CommitRangeProof_TrieWithInitialValues(t *testing.T) { // Get a proof for the range [key1, key3]. proof, err := db1.GetRangeProof( - context.Background(), + t.Context(), maybe.Some([]byte("key1")), maybe.Some([]byte("key3")), 10, @@ -409,7 +409,7 @@ func Test_MerkleDB_CommitRangeProof_TrieWithInitialValues(t *testing.T) { // Commit the proof from [db1] to [db2] _, err = db2.CommitRangeProof( - context.Background(), + t.Context(), maybe.Some([]byte("key1")), maybe.Some([]byte("key3")), proof, @@ -419,10 +419,10 @@ func Test_MerkleDB_CommitRangeProof_TrieWithInitialValues(t *testing.T) { // [db2] should have the same key-value pairs as [db1]. // Note that "key25" was in the range covered by the proof, // so it's deleted from [db2]. - db2Root, err := db2.GetMerkleRoot(context.Background()) + db2Root, err := db2.GetMerkleRoot(t.Context()) require.NoError(err) - db1Root, err := db1.GetMerkleRoot(context.Background()) + db1Root, err := db1.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(db1Root, db2Root) @@ -436,7 +436,7 @@ func Test_MerkleDB_GetValues(t *testing.T) { writeBasicBatch(t, db) keys := [][]byte{{0}, {1}, {2}, {10}} - values, errors := db.GetValues(context.Background(), keys) + values, errors := db.GetValues(t.Context(), keys) require.Len(values, len(keys)) require.Len(errors, len(keys)) @@ -479,7 +479,7 @@ func Test_MerkleDB_HealthCheck(t *testing.T) { db, err := getBasicDB() require.NoError(err) - val, err := db.HealthCheck(context.Background()) + val, err := db.HealthCheck(t.Context()) require.NoError(err) require.Nil(val) } @@ -505,7 +505,7 @@ func TestDatabaseNewUntrackedView(t *testing.T) { require.Empty(db.childViews) // Commit the view - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) // The untracked view should not be tracked by the parent database. require.Empty(db.childViews) @@ -520,7 +520,7 @@ func TestDatabaseNewViewFromBatchOpsTracked(t *testing.T) { // Create a new tracked view. view, err := db.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte{1}, Value: []byte{1}}, @@ -531,7 +531,7 @@ func TestDatabaseNewViewFromBatchOpsTracked(t *testing.T) { require.Len(db.childViews, 1) // Commit the view - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) // The view should be tracked by the parent database. require.Contains(db.childViews, view) @@ -546,14 +546,14 @@ func TestDatabaseCommitChanges(t *testing.T) { dbRoot := db.getMerkleRoot() // Committing a nil view should be a no-op. - require.NoError(db.CommitToDB(context.Background())) + require.NoError(db.CommitToDB(t.Context())) require.Equal(dbRoot, db.getMerkleRoot()) // Root didn't change // Committing an invalid view should fail. - invalidView, err := db.NewView(context.Background(), ViewChanges{}) + invalidView, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) invalidView.(*view).invalidate() - err = invalidView.CommitToDB(context.Background()) + err = invalidView.CommitToDB(t.Context()) require.ErrorIs(err, ErrInvalid) // Add key-value pairs to the database @@ -564,7 +564,7 @@ func TestDatabaseCommitChanges(t *testing.T) { // Make a view and insert/delete a key-value pair. view1Intf, err := db.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: key3, Value: value3}, // New k-v pair @@ -575,17 +575,17 @@ func TestDatabaseCommitChanges(t *testing.T) { require.NoError(err) require.IsType(&view{}, view1Intf) view1 := view1Intf.(*view) - view1Root, err := view1.GetMerkleRoot(context.Background()) + view1Root, err := view1.GetMerkleRoot(t.Context()) require.NoError(err) // Make a second view - view2Intf, err := db.NewView(context.Background(), ViewChanges{}) + view2Intf, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view2Intf) view2 := view2Intf.(*view) // Make a view atop a view - view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) + view3Intf, err := view1.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view3Intf) view3 := view3Intf.(*view) @@ -597,7 +597,7 @@ func TestDatabaseCommitChanges(t *testing.T) { // db // Commit view1 - require.NoError(view1.commitToDB(context.Background())) + require.NoError(view1.commitToDB(t.Context())) // Make sure the key-value pairs are correct. _, err = db.Get(key1) @@ -635,17 +635,17 @@ func TestDatabaseInvalidateChildrenExcept(t *testing.T) { require.NoError(err) // Create children - view1Intf, err := db.NewView(context.Background(), ViewChanges{}) + view1Intf, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view1Intf) view1 := view1Intf.(*view) - view2Intf, err := db.NewView(context.Background(), ViewChanges{}) + view2Intf, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view2Intf) view2 := view2Intf.(*view) - view3Intf, err := db.NewView(context.Background(), ViewChanges{}) + view3Intf, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view3Intf) view3 := view3Intf.(*view) @@ -739,11 +739,11 @@ func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { db, err := getBasicDB() require.NoError(err) - view1, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + view1, err := db.NewView(t.Context(), ViewChanges{BatchOps: ops}) require.NoError(err) // Get the root of the trie after applying [ops]. - view1Root, err := view1.GetMerkleRoot(context.Background()) + view1Root, err := view1.GetMerkleRoot(t.Context()) require.NoError(err) // Assert that the same operations applied in a different order @@ -754,10 +754,10 @@ func Test_MerkleDB_Random_Insert_Ordering(t *testing.T) { ops[i], ops[j] = ops[j], ops[i] }) - view2, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + view2, err := db.NewView(t.Context(), ViewChanges{BatchOps: ops}) require.NoError(err) - view2Root, err := view2.GetMerkleRoot(context.Background()) + view2Root, err := view2.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(view1Root, view2Root) @@ -1248,7 +1248,7 @@ func TestGetRangeProofAtRootEmptyRootID(t *testing.T) { require.NoError(err) _, err = db.GetRangeProofAtRoot( - context.Background(), + t.Context(), ids.Empty, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), @@ -1268,7 +1268,7 @@ func TestGetChangeProofEmptyRootID(t *testing.T) { rootID := db.getMerkleRoot() _, err = db.GetChangeProof( - context.Background(), + t.Context(), rootID, ids.Empty, maybe.Nothing[[]byte](), @@ -1283,7 +1283,7 @@ func TestCrashRecovery(t *testing.T) { baseDB := memdb.New() merkleDB, err := newDatabase( - context.Background(), + t.Context(), baseDB, NewConfig(), &mockMetrics{}, @@ -1295,13 +1295,13 @@ func TestCrashRecovery(t *testing.T) { require.NoError(merkleDBBatch.Put([]byte("expected?"), []byte("so"))) require.NoError(merkleDBBatch.Write()) - expectedRoot, err := merkleDB.GetMerkleRoot(context.Background()) + expectedRoot, err := merkleDB.GetMerkleRoot(t.Context()) require.NoError(err) // Do not `.Close()` the database to simulate a process crash. newMerkleDB, err := newDatabase( - context.Background(), + t.Context(), baseDB, NewConfig(), &mockMetrics{}, @@ -1316,7 +1316,7 @@ func TestCrashRecovery(t *testing.T) { require.NoError(err) require.Equal([]byte("so"), value) - rootAfterRecovery, err := newMerkleDB.GetMerkleRoot(context.Background()) + rootAfterRecovery, err := newMerkleDB.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(expectedRoot, rootAfterRecovery) } @@ -1334,7 +1334,7 @@ func BenchmarkCommitView(b *testing.B) { } } - ctx := context.Background() + ctx := b.Context() viewIntf, err := db.NewView(ctx, ViewChanges{BatchOps: ops}) require.NoError(b, err) @@ -1367,7 +1367,7 @@ func BenchmarkIteration(b *testing.B) { } } - ctx := context.Background() + ctx := b.Context() view, err := db.NewView(ctx, ViewChanges{BatchOps: ops}) require.NoError(b, err) @@ -1392,7 +1392,7 @@ func BenchmarkIteration(b *testing.B) { func Test_FindNextKey_InSync(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -1443,7 +1443,7 @@ func Test_FindNextKey_InSync(t *testing.T) { func Test_FindNextKey_Deleted(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() dbToSync, err := New( ctx, memdb.New(), @@ -1459,7 +1459,7 @@ func Test_FindNextKey_Deleted(t *testing.T) { require.NoError(db.Put([]byte{0x13}, []byte{3})) // 0x12 was "deleted" and there should be no extra node in the proof since there was nothing with a common prefix - rangeProof, err := dbToSync.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Some([]byte{0x12}), 100) + rangeProof, err := dbToSync.GetRangeProof(t.Context(), maybe.Nothing[[]byte](), maybe.Some([]byte{0x12}), 100) require.NoError(err) nextKey, err := db.CommitRangeProof(ctx, maybe.Nothing[[]byte](), maybe.Some([]byte{0x20}), rangeProof) @@ -1467,7 +1467,7 @@ func Test_FindNextKey_Deleted(t *testing.T) { require.Equal(maybe.Some([]byte{0x13}), nextKey) // 0x11 was "deleted" and 0x11.0x11 should be in the exclusion proof - extraNodeProof, err := dbToSync.GetProof(context.Background(), []byte{0x11}) + extraNodeProof, err := dbToSync.GetProof(t.Context(), []byte{0x11}) require.NoError(err) rangeProof.EndProof = extraNodeProof.Path @@ -1481,7 +1481,7 @@ func Test_FindNextKey_Deleted(t *testing.T) { func Test_FindNextKey_BranchInLocal(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() db, err := New( ctx, memdb.New(), @@ -1503,7 +1503,7 @@ func Test_FindNextKey_BranchInLocal(t *testing.T) { func Test_FindNextKey_BranchInReceived(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() db, err := New( ctx, memdb.New(), @@ -1526,7 +1526,7 @@ func Test_FindNextKey_BranchInReceived(t *testing.T) { func Test_FindNextKey_ExtraValues(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() now := time.Now().UnixNano() t.Logf("seed: %d", now) r := rand.New(rand.NewSource(now)) // #nosec G404 @@ -1595,7 +1595,7 @@ func isPrefix(data []byte, prefix []byte) bool { func Test_FindNextKey_DifferentChild(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() now := time.Now().UnixNano() t.Logf("seed: %d", now) r := rand.New(rand.NewSource(now)) // #nosec G404 @@ -1638,7 +1638,7 @@ func Test_FindNextKey_DifferentChild(t *testing.T) { // way and comparing it to the actual result func TestFindNextKeyRandom(t *testing.T) { now := time.Now().UnixNano() - ctx := context.Background() + ctx := t.Context() t.Logf("seed: %d", now) rand := rand.New(rand.NewSource(now)) // #nosec G404 require := require.New(t) @@ -1729,7 +1729,7 @@ func TestFindNextKeyRandom(t *testing.T) { require.NoError(err) localProof, err := localDB.GetProof( - context.Background(), + t.Context(), lastReceivedKey, ) require.NoError(err) diff --git a/x/merkledb/history_test.go b/x/merkledb/history_test.go index 63967a77c147..3388a7f36f21 100644 --- a/x/merkledb/history_test.go +++ b/x/merkledb/history_test.go @@ -4,7 +4,6 @@ package merkledb import ( - "context" "math/rand" "testing" "time" @@ -22,7 +21,7 @@ func Test_History_Simple(t *testing.T) { require := require.New(t) db, err := newDB( - context.Background(), + t.Context(), memdb.New(), NewConfig(), ) @@ -35,36 +34,36 @@ func Test_History_Simple(t *testing.T) { require.NoError(err) require.Equal([]byte("value"), val) - origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + origProof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) + require.NoError(origProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Put([]byte("key"), []byte("value0"))) require.NoError(batch.Write()) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err := db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("value1"))) require.NoError(batch.Put([]byte("key8"), []byte("value8"))) require.NoError(batch.Write()) - newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err = db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) require.NoError(batch.Write()) - newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err = db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Delete([]byte("k"))) @@ -77,10 +76,10 @@ func Test_History_Simple(t *testing.T) { require.NoError(batch.Delete([]byte("key5"))) require.NoError(batch.Delete([]byte("key8"))) require.NoError(batch.Write()) - newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err = db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) } func Test_History_Large(t *testing.T) { @@ -94,7 +93,7 @@ func Test_History_Large(t *testing.T) { // after this loop. config.HistoryLength = uint(numIters) db, err := New( - context.Background(), + t.Context(), memdb.New(), config, ) @@ -133,17 +132,17 @@ func Test_History_Large(t *testing.T) { it.Release() require.NoError(batch.Write()) - root, err := db.GetMerkleRoot(context.Background()) + root, err := db.GetMerkleRoot(t.Context()) require.NoError(err) roots = append(roots, root) } for i := 0; i < numIters; i += numIters / 10 { - proof, err := db.GetRangeProofAtRoot(context.Background(), roots[i], maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) + proof, err := db.GetRangeProofAtRoot(t.Context(), roots[i], maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 10) require.NoError(err) require.NotNil(proof) - require.NoError(proof.Verify(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i], BranchFactorToTokenSize[config.BranchFactor], config.Hasher, len(proof.KeyChanges))) + require.NoError(proof.Verify(t.Context(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), roots[i], BranchFactorToTokenSize[config.BranchFactor], config.Hasher, len(proof.KeyChanges))) } } } @@ -155,7 +154,7 @@ func Test_History_Bad_GetValueChanges_Input(t *testing.T) { config.HistoryLength = 5 db, err := newDB( - context.Background(), + t.Context(), memdb.New(), config, ) @@ -221,7 +220,7 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { config.HistoryLength = 2 db, err := newDB( - context.Background(), + t.Context(), memdb.New(), config, ) @@ -233,11 +232,11 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { require.NoError(batch.Write()) origRootID := db.getMerkleRoot() - origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + origProof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) require.NoError(origProof.Verify( - context.Background(), + t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, @@ -252,11 +251,11 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { require.NoError(batch.Write()) // ensure that previous root is still present and generates a valid proof - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err := db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) require.NoError(newProof.Verify( - context.Background(), + t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, @@ -271,7 +270,7 @@ func Test_History_Trigger_History_Queue_Looping(t *testing.T) { require.NoError(batch.Write()) // proof from first root shouldn't be generatable since it should have been removed from the history - _, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + _, err = db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.ErrorIs(err, sync.ErrInsufficientHistory) } @@ -281,7 +280,7 @@ func Test_History_Values_Lookup_Over_Queue_Break(t *testing.T) { config := NewConfig() config.HistoryLength = 4 db, err := newDB( - context.Background(), + t.Context(), memdb.New(), config, ) @@ -342,7 +341,7 @@ func Test_History_RepeatedRoot(t *testing.T) { require := require.New(t) db, err := newDB( - context.Background(), + t.Context(), memdb.New(), NewConfig(), ) @@ -353,21 +352,21 @@ func Test_History_RepeatedRoot(t *testing.T) { require.NoError(batch.Put([]byte("key3"), []byte("value3"))) require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + origProof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) + require.NoError(origProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Put([]byte("key1"), []byte("other"))) require.NoError(batch.Put([]byte("key2"), []byte("other"))) require.NoError(batch.Put([]byte("key3"), []byte("other"))) require.NoError(batch.Write()) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err := db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) // revert state to be the same as in orig proof batch = db.NewBatch() @@ -376,17 +375,17 @@ func Test_History_RepeatedRoot(t *testing.T) { require.NoError(batch.Put([]byte("key3"), []byte("value3"))) require.NoError(batch.Write()) - newProof, err = db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err = db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) } func Test_History_ExcessDeletes(t *testing.T) { require := require.New(t) db, err := newDB( - context.Background(), + t.Context(), memdb.New(), NewConfig(), ) @@ -395,11 +394,11 @@ func Test_History_ExcessDeletes(t *testing.T) { require.NoError(batch.Put([]byte("key"), []byte("value"))) require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + origProof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) + require.NoError(origProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Delete([]byte("key1"))) @@ -408,17 +407,17 @@ func Test_History_ExcessDeletes(t *testing.T) { require.NoError(batch.Delete([]byte("key4"))) require.NoError(batch.Delete([]byte("key5"))) require.NoError(batch.Write()) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err := db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) } func Test_History_DontIncludeAllNodes(t *testing.T) { require := require.New(t) db, err := newDB( - context.Background(), + t.Context(), memdb.New(), NewConfig(), ) @@ -427,26 +426,26 @@ func Test_History_DontIncludeAllNodes(t *testing.T) { require.NoError(batch.Put([]byte("key"), []byte("value"))) require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + origProof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) + require.NoError(origProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Put([]byte("z"), []byte("z"))) require.NoError(batch.Write()) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err := db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) } func Test_History_Branching2Nodes(t *testing.T) { require := require.New(t) db, err := newDB( - context.Background(), + t.Context(), memdb.New(), NewConfig(), ) @@ -455,26 +454,26 @@ func Test_History_Branching2Nodes(t *testing.T) { require.NoError(batch.Put([]byte("key"), []byte("value"))) require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + origProof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) + require.NoError(origProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Put([]byte("k"), []byte("v"))) require.NoError(batch.Write()) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err := db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) } func Test_History_Branching3Nodes(t *testing.T) { require := require.New(t) db, err := newDB( - context.Background(), + t.Context(), memdb.New(), NewConfig(), ) @@ -483,19 +482,19 @@ func Test_History_Branching3Nodes(t *testing.T) { require.NoError(batch.Put([]byte("key123"), []byte("value123"))) require.NoError(batch.Write()) - origProof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + origProof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(origProof) origRootID := db.rootID - require.NoError(origProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) + require.NoError(origProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(origProof.KeyChanges))) batch = db.NewBatch() require.NoError(batch.Put([]byte("key321"), []byte("value321"))) require.NoError(batch.Write()) - newProof, err := db.GetRangeProofAtRoot(context.Background(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) + newProof, err := db.GetRangeProofAtRoot(t.Context(), origRootID, maybe.Some([]byte("k")), maybe.Some([]byte("key3")), 10) require.NoError(err) require.NotNil(newProof) - require.NoError(newProof.Verify(context.Background(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) + require.NoError(newProof.Verify(t.Context(), maybe.Some([]byte("k")), maybe.Some([]byte("key3")), origRootID, db.tokenSize, db.hasher, len(newProof.KeyChanges))) } func Test_History_MaxLength(t *testing.T) { @@ -504,7 +503,7 @@ func Test_History_MaxLength(t *testing.T) { config := NewConfig() config.HistoryLength = 2 db, err := newDB( - context.Background(), + t.Context(), memdb.New(), config, ) @@ -514,7 +513,7 @@ func Test_History_MaxLength(t *testing.T) { require.NoError(batch.Put([]byte("key"), []byte("value"))) require.NoError(batch.Write()) - oldRoot, err := db.GetMerkleRoot(context.Background()) + oldRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) batch = db.NewBatch() @@ -534,13 +533,13 @@ func Test_Change_List(t *testing.T) { require := require.New(t) db, err := newDB( - context.Background(), + t.Context(), memdb.New(), NewConfig(), ) require.NoError(err) - emptyRoot, err := db.GetMerkleRoot(context.Background()) + emptyRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) batch := db.NewBatch() @@ -550,7 +549,7 @@ func Test_Change_List(t *testing.T) { require.NoError(batch.Put([]byte("key23"), []byte("value23"))) require.NoError(batch.Put([]byte("key24"), []byte("value24"))) require.NoError(batch.Write()) - startRoot, err := db.GetMerkleRoot(context.Background()) + startRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) changes, err := db.history.getValueChanges(emptyRoot, startRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 100) @@ -601,7 +600,7 @@ func Test_Change_List(t *testing.T) { require.NoError(batch.Put([]byte("key29"), []byte("value29"))) require.NoError(batch.Write()) - endRoot, err := db.GetMerkleRoot(context.Background()) + endRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) changes, err = db.history.getValueChanges(startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 100) @@ -653,7 +652,7 @@ func Test_Change_List(t *testing.T) { require.NoError(batch.Put([]byte("key24"), []byte("value24new"))) require.NoError(batch.Write()) - endRoot, err = db.GetMerkleRoot(context.Background()) + endRoot, err = db.GetMerkleRoot(t.Context()) require.NoError(err) changes, err = db.history.getValueChanges(startRoot, endRoot, maybe.Some[[]byte]([]byte("key22")), maybe.Some[[]byte]([]byte("key31")), 8) @@ -837,20 +836,20 @@ func TestHistoryKeyChangeRollback(t *testing.T) { rootIDs := []ids.ID{} for _, batchOps := range keyChangesBatches { - view, err := db.NewView(context.Background(), ViewChanges{ + view, err := db.NewView(t.Context(), ViewChanges{ BatchOps: batchOps, }) require.NoError(err) - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) - rootID, err := db.GetMerkleRoot(context.Background()) + rootID, err := db.GetMerkleRoot(t.Context()) require.NoError(err) rootIDs = append(rootIDs, rootID) } - changeProof, err := db.GetChangeProof(context.Background(), rootIDs[0], rootIDs[len(rootIDs)-1], maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 100) + changeProof, err := db.GetChangeProof(t.Context(), rootIDs[0], rootIDs[len(rootIDs)-1], maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 100) require.NoError(err) require.Equal([]KeyChange{ diff --git a/x/merkledb/metrics_test.go b/x/merkledb/metrics_test.go index 03115c0533c8..53687b33d1ed 100644 --- a/x/merkledb/metrics_test.go +++ b/x/merkledb/metrics_test.go @@ -4,7 +4,6 @@ package merkledb import ( - "context" "testing" "github.com/stretchr/testify/require" @@ -20,7 +19,7 @@ func Test_Metrics_Basic_Usage(t *testing.T) { config.Reg = nil db, err := newDB( - context.Background(), + t.Context(), memdb.New(), config, ) @@ -52,7 +51,7 @@ func Test_Metrics_Basic_Usage(t *testing.T) { func Test_Metrics_Initialize(t *testing.T) { db, err := New( - context.Background(), + t.Context(), memdb.New(), NewConfig(), ) diff --git a/x/merkledb/network_server_test.go b/x/merkledb/network_server_test.go index 191d3d8a2fc7..438b0227f0c3 100644 --- a/x/merkledb/network_server_test.go +++ b/x/merkledb/network_server_test.go @@ -4,7 +4,6 @@ package merkledb import ( - "context" "math/rand" "testing" "time" @@ -29,7 +28,7 @@ func Test_Server_GetRangeProof(t *testing.T) { smallTrieDB, err := generateTrieWithMinKeyLen(t, r, xsync.DefaultRequestKeyLimit, 1) require.NoError(t, err) - smallTrieRoot, err := smallTrieDB.GetMerkleRoot(context.Background()) + smallTrieRoot, err := smallTrieDB.GetMerkleRoot(t.Context()) require.NoError(t, err) tests := []struct { @@ -120,7 +119,7 @@ func Test_Server_GetRangeProof(t *testing.T) { handler := xsync.NewGetRangeProofHandler(smallTrieDB, rangeProofMarshaler) requestBytes, err := proto.Marshal(test.request) require.NoError(err) - responseBytes, err := handler.AppRequest(context.Background(), test.nodeID, time.Time{}, requestBytes) + responseBytes, err := handler.AppRequest(t.Context(), test.nodeID, time.Time{}, requestBytes) require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { return @@ -153,12 +152,12 @@ func Test_Server_GetChangeProof(t *testing.T) { r := rand.New(rand.NewSource(now)) // #nosec G404 serverDB, err := New( - context.Background(), + t.Context(), memdb.New(), newDefaultDBConfig(), ) require.NoError(t, err) - startRoot, err := serverDB.GetMerkleRoot(context.Background()) + startRoot, err := serverDB.GetMerkleRoot(t.Context()) require.NoError(t, err) // create changes @@ -190,14 +189,14 @@ func Test_Server_GetChangeProof(t *testing.T) { it.Release() view, err := serverDB.NewView( - context.Background(), + t.Context(), ViewChanges{BatchOps: ops}, ) require.NoError(t, err) - require.NoError(t, view.CommitToDB(context.Background())) + require.NoError(t, view.CommitToDB(t.Context())) } - endRoot, err := serverDB.GetMerkleRoot(context.Background()) + endRoot, err := serverDB.GetMerkleRoot(t.Context()) require.NoError(t, err) fakeRootID := ids.GenerateTestID() @@ -339,7 +338,7 @@ func Test_Server_GetChangeProof(t *testing.T) { requestBytes, err := proto.Marshal(test.request) require.NoError(err) - proofBytes, err := handler.AppRequest(context.Background(), test.nodeID, time.Time{}, requestBytes) + proofBytes, err := handler.AppRequest(t.Context(), test.nodeID, time.Time{}, requestBytes) require.ErrorIs(err, test.expectedErr) if test.expectedErr != nil { diff --git a/x/merkledb/proof_test.go b/x/merkledb/proof_test.go index 85ae3c39d295..60b195fceb8e 100644 --- a/x/merkledb/proof_test.go +++ b/x/merkledb/proof_test.go @@ -5,7 +5,6 @@ package merkledb import ( "bytes" - "context" "math/rand" "testing" "time" @@ -45,7 +44,7 @@ func Test_Proof_Empty(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - err := tt.proof.Verify(context.Background(), ids.Empty, 4, DefaultHasher) + err := tt.proof.Verify(t.Context(), ids.Empty, 4, DefaultHasher) require.ErrorIs(err, tt.wantErr) }) } @@ -60,11 +59,11 @@ func Test_Proof_Exclusion_Happy_Path(t *testing.T) { writeBasicBatch(t, db) for _, k := range []byte{5, 6, 7, 8} { - proof, err := db.GetProof(context.Background(), []byte{k}) + proof, err := db.GetProof(t.Context(), []byte{k}) require.NoError(err) require.NotNil(proof) - err = proof.Verify(context.Background(), db.getMerkleRoot(), db.tokenSize, db.hasher) + err = proof.Verify(t.Context(), db.getMerkleRoot(), db.tokenSize, db.hasher) require.NoError(err) } } @@ -78,13 +77,13 @@ func Test_Proof_Exclusion_Has_Proof_Value(t *testing.T) { writeBasicBatch(t, db) for _, k := range []byte{5, 6, 7, 8} { - proof, err := db.GetProof(context.Background(), []byte{k}) + proof, err := db.GetProof(t.Context(), []byte{k}) require.NoError(err) require.NotNil(proof) proof.Value = maybe.Some([]byte{}) - err = proof.Verify(context.Background(), db.getMerkleRoot(), db.tokenSize, db.hasher) + err = proof.Verify(t.Context(), db.getMerkleRoot(), db.tokenSize, db.hasher) require.ErrorIs(err, ErrExclusionProofUnexpectedValue) } } @@ -132,13 +131,13 @@ func Test_Proof_Inclusion(t *testing.T) { writeBasicBatch(t, db) for _, k := range []byte{0, 1, 2, 3, 4} { - proof, err := db.GetProof(context.Background(), []byte{k}) + proof, err := db.GetProof(t.Context(), []byte{k}) require.NoError(err) require.NotNil(proof) tt.modify(proof) - err = proof.Verify(context.Background(), db.getMerkleRoot(), db.tokenSize, db.hasher) + err = proof.Verify(t.Context(), db.getMerkleRoot(), db.tokenSize, db.hasher) require.ErrorIs(err, tt.wantErr) } }) @@ -200,13 +199,13 @@ func Test_Proof_Invalid_Proof(t *testing.T) { db := setup(t) - proof, err := db.GetProof(context.Background(), tt.key) + proof, err := db.GetProof(t.Context(), tt.key) require.NoError(err) require.NotNil(proof) tt.modify(proof) - err = proof.Verify(context.Background(), db.getMerkleRoot(), db.tokenSize, db.hasher) + err = proof.Verify(t.Context(), db.getMerkleRoot(), db.tokenSize, db.hasher) require.ErrorIs(err, tt.wantErr) }) } @@ -236,12 +235,12 @@ func Test_RangeProof_Extra_Value(t *testing.T) { require.NoError(err) require.Equal([]byte{2}, val) - proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), 10) + proof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), 10) require.NoError(err) require.NotNil(proof) require.NoError(proof.Verify( - context.Background(), + t.Context(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), db.rootID, @@ -253,7 +252,7 @@ func Test_RangeProof_Extra_Value(t *testing.T) { proof.KeyChanges = append(proof.KeyChanges, KeyChange{Key: []byte{5}, Value: maybe.Some([]byte{5})}) err = proof.Verify( - context.Background(), + t.Context(), maybe.Some([]byte{1}), maybe.Some([]byte{5, 5}), db.rootID, @@ -324,13 +323,13 @@ func Test_RangeProof_Verify_Bad_Data(t *testing.T) { require.NoError(err) writeBasicBatch(t, db) - proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), 50) + proof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), 50) require.NoError(err) require.NotNil(proof) tt.malform(proof) - err = proof.Verify(context.Background(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot(), db.tokenSize, db.hasher, len(proof.KeyChanges)) + err = proof.Verify(t.Context(), maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), db.getMerkleRoot(), db.tokenSize, db.hasher, len(proof.KeyChanges)) require.ErrorIs(err, tt.expectedErr) }) } @@ -342,13 +341,13 @@ func Test_RangeProof_MaxLength(t *testing.T) { dbTrie, err := getBasicDB() require.NoError(err) require.NotNil(dbTrie) - trie, err := dbTrie.NewView(context.Background(), ViewChanges{}) + trie, err := dbTrie.NewView(t.Context(), ViewChanges{}) require.NoError(err) - _, err = trie.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), -1) + _, err = trie.GetRangeProof(t.Context(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), -1) require.ErrorIs(err, ErrInvalidMaxLength) - _, err = trie.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 0) + _, err = trie.GetRangeProof(t.Context(), maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 0) require.ErrorIs(err, ErrInvalidMaxLength) } @@ -360,7 +359,7 @@ func Test_Proof_Path(t *testing.T) { require.NotNil(dbTrie) trie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ { @@ -392,14 +391,14 @@ func Test_Proof_Path(t *testing.T) { ) require.NoError(err) - expectedRootID, err := trie.GetMerkleRoot(context.Background()) + expectedRootID, err := trie.GetMerkleRoot(t.Context()) require.NoError(err) - proof, err := trie.GetProof(context.Background(), []byte("key1")) + proof, err := trie.GetProof(t.Context(), []byte("key1")) require.NoError(err) require.NotNil(proof) - require.NoError(proof.Verify(context.Background(), expectedRootID, dbTrie.tokenSize, dbTrie.hasher)) + require.NoError(proof.Verify(t.Context(), expectedRootID, dbTrie.tokenSize, dbTrie.hasher)) require.Len(proof.Path, 3) @@ -583,7 +582,7 @@ func Test_RangeProof_Syntactic_Verify(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - err := tt.proof.Verify(context.Background(), tt.start, tt.end, ids.Empty, 4, DefaultHasher, len(tt.proof.KeyChanges)) + err := tt.proof.Verify(t.Context(), tt.start, tt.end, ids.Empty, 4, DefaultHasher, len(tt.proof.KeyChanges)) require.ErrorIs(t, err, tt.expectedErr) }) } @@ -596,7 +595,7 @@ func Test_RangeProof(t *testing.T) { require.NoError(err) writeBasicBatch(t, db) - proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{3}), maybe.Some([]byte{3}), 3) + proof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte{3}), maybe.Some([]byte{3}), 3) require.NoError(err) require.NotNil(proof) require.Empty(proof.StartProof) @@ -604,7 +603,7 @@ func Test_RangeProof(t *testing.T) { require.Len(proof.KeyChanges, 1) require.NoError(proof.Verify( - context.Background(), + t.Context(), maybe.Some([]byte{3}), maybe.Some([]byte{3}), db.rootID, @@ -613,7 +612,7 @@ func Test_RangeProof(t *testing.T) { len(proof.KeyChanges), )) - proof, err = db.GetRangeProof(context.Background(), maybe.Some([]byte{1}), maybe.Some([]byte{3, 5}), 10) + proof, err = db.GetRangeProof(t.Context(), maybe.Some([]byte{1}), maybe.Some([]byte{3, 5}), 10) require.NoError(err) require.NotNil(proof) require.Len(proof.KeyChanges, 3) @@ -635,7 +634,7 @@ func Test_RangeProof(t *testing.T) { require.Equal([]byte{1}, proof.StartProof[0].Key.Bytes()) require.NoError(proof.Verify( - context.Background(), + t.Context(), maybe.Some([]byte{1}), maybe.Some([]byte{3, 5}), db.rootID, @@ -654,7 +653,7 @@ func Test_RangeProof_BadBounds(t *testing.T) { require.NoError(db.Put(nil, nil)) // non-nil start/end - proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte{4}), maybe.Some([]byte{3}), 50) + proof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte{4}), maybe.Some([]byte{3}), 50) require.ErrorIs(err, ErrStartAfterEnd) require.Nil(proof) } @@ -675,7 +674,7 @@ func Test_RangeProof_NilStart(t *testing.T) { require.NoError(err) require.Equal([]byte("value1"), val) - proof, err := db.GetRangeProof(context.Background(), maybe.Nothing[[]byte](), maybe.Some([]byte("key35")), 2) + proof, err := db.GetRangeProof(t.Context(), maybe.Nothing[[]byte](), maybe.Some([]byte("key35")), 2) require.NoError(err) require.NotNil(proof) @@ -691,7 +690,7 @@ func Test_RangeProof_NilStart(t *testing.T) { require.Equal(ToKey([]byte("key2")).Take(28), proof.EndProof[0].Key) require.NoError(proof.Verify( - context.Background(), + t.Context(), maybe.Nothing[[]byte](), maybe.Some([]byte("key35")), db.rootID, @@ -711,7 +710,7 @@ func Test_RangeProof_NilEnd(t *testing.T) { require.NoError(err) proof, err := db.GetRangeProof( // Should have keys [1], [2] - context.Background(), + t.Context(), maybe.Some([]byte{1}), maybe.Nothing[[]byte](), 2, @@ -733,7 +732,7 @@ func Test_RangeProof_NilEnd(t *testing.T) { require.Equal([]byte{2}, proof.EndProof[1].Key.Bytes()) require.NoError(proof.Verify( - context.Background(), + t.Context(), maybe.Some([]byte{1}), maybe.Nothing[[]byte](), db.rootID, @@ -758,7 +757,7 @@ func Test_RangeProof_EmptyValues(t *testing.T) { require.NoError(err) require.Equal([]byte("value1"), val) - proof, err := db.GetRangeProof(context.Background(), maybe.Some([]byte("key1")), maybe.Some([]byte("key2")), 10) + proof, err := db.GetRangeProof(t.Context(), maybe.Some([]byte("key1")), maybe.Some([]byte("key2")), 10) require.NoError(err) require.NotNil(proof) @@ -778,7 +777,7 @@ func Test_RangeProof_EmptyValues(t *testing.T) { require.Equal(ToKey([]byte("key2")), proof.EndProof[1].Key, db.tokenSize) require.NoError(proof.Verify( - context.Background(), + t.Context(), maybe.Some([]byte("key1")), maybe.Some([]byte("key2")), db.rootID, @@ -796,7 +795,7 @@ func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { config := NewConfig() db, err := newDatabase( - context.Background(), + t.Context(), memdb.New(), config, &mockMetrics{}, @@ -808,13 +807,13 @@ func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { key := make([]byte, 16) _, _ = rand.Read(key) require.NoError(db.Put(key, nil)) - root, err := db.GetMerkleRoot(context.Background()) + root, err := db.GetMerkleRoot(t.Context()) require.NoError(err) roots = append(roots, root) } _, err = db.GetChangeProof( - context.Background(), + t.Context(), roots[len(roots)-1], ids.GenerateTestID(), maybe.Nothing[[]byte](), @@ -825,7 +824,7 @@ func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { require.ErrorIs(err, xsync.ErrInsufficientHistory) _, err = db.GetChangeProof( - context.Background(), + t.Context(), roots[0], roots[len(roots)-1], maybe.Nothing[[]byte](), @@ -836,7 +835,7 @@ func Test_ChangeProof_Missing_History_For_EndRoot(t *testing.T) { require.ErrorIs(err, xsync.ErrInsufficientHistory) _, err = db.GetChangeProof( - context.Background(), + t.Context(), roots[1], roots[len(roots)-1], maybe.Nothing[[]byte](), @@ -852,16 +851,16 @@ func Test_ChangeProof_BadBounds(t *testing.T) { db, err := getBasicDB() require.NoError(err) - startRoot, err := db.GetMerkleRoot(context.Background()) + startRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) - require.NoError(db.PutContext(context.Background(), []byte{0}, []byte{0})) + require.NoError(db.PutContext(t.Context(), []byte{0}, []byte{0})) - endRoot, err := db.GetMerkleRoot(context.Background()) + endRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) // non-nil start/end - proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Some([]byte("key4")), maybe.Some([]byte("key3")), 50) + proof, err := db.GetChangeProof(t.Context(), startRoot, endRoot, maybe.Some([]byte("key4")), maybe.Some([]byte("key3")), 50) require.ErrorIs(err, ErrStartAfterEnd) require.Nil(proof) } @@ -878,7 +877,7 @@ func Test_ChangeProof_Verify(t *testing.T) { require.NoError(batch.Put([]byte("key23"), []byte("value3"))) require.NoError(batch.Put([]byte("key24"), []byte("value4"))) require.NoError(batch.Write()) - startRoot, err := db.GetMerkleRoot(context.Background()) + startRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) // create a second db that has "synced" to the start root @@ -913,42 +912,42 @@ func Test_ChangeProof_Verify(t *testing.T) { require.NoError(batch.Delete([]byte("key22"))) require.NoError(batch.Write()) - endRoot, err := db.GetMerkleRoot(context.Background()) + endRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) // non-nil start/end - proof, err := db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Some([]byte("key21")), maybe.Some([]byte("key30")), 50) + proof, err := db.GetChangeProof(t.Context(), startRoot, endRoot, maybe.Some([]byte("key21")), maybe.Some([]byte("key30")), 50) require.NoError(err) require.NotNil(proof) - require.NoError(dbClone.VerifyChangeProof(context.Background(), proof, maybe.Some([]byte("key21")), maybe.Some([]byte("key30")), db.getMerkleRoot(), len(proof.KeyChanges))) + require.NoError(dbClone.VerifyChangeProof(t.Context(), proof, maybe.Some([]byte("key21")), maybe.Some([]byte("key30")), db.getMerkleRoot(), len(proof.KeyChanges))) // low maxLength - proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 5) + proof, err = db.GetChangeProof(t.Context(), startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 5) require.NoError(err) require.NotNil(proof) - require.NoError(dbClone.VerifyChangeProof(context.Background(), proof, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), db.getMerkleRoot(), len(proof.KeyChanges))) + require.NoError(dbClone.VerifyChangeProof(t.Context(), proof, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), db.getMerkleRoot(), len(proof.KeyChanges))) // nil start/end - proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 50) + proof, err = db.GetChangeProof(t.Context(), startRoot, endRoot, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), 50) require.NoError(err) require.NotNil(proof) - require.NoError(dbClone.VerifyChangeProof(context.Background(), proof, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), endRoot, len(proof.KeyChanges))) - nextKey, err := dbClone.CommitChangeProof(context.Background(), maybe.Nothing[[]byte](), proof) + require.NoError(dbClone.VerifyChangeProof(t.Context(), proof, maybe.Nothing[[]byte](), maybe.Nothing[[]byte](), endRoot, len(proof.KeyChanges))) + nextKey, err := dbClone.CommitChangeProof(t.Context(), maybe.Nothing[[]byte](), proof) require.NoError(err) require.True(nextKey.IsNothing()) - newRoot, err := dbClone.GetMerkleRoot(context.Background()) + newRoot, err := dbClone.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(endRoot, newRoot) - proof, err = db.GetChangeProof(context.Background(), startRoot, endRoot, maybe.Some([]byte("key20")), maybe.Some([]byte("key30")), 50) + proof, err = db.GetChangeProof(t.Context(), startRoot, endRoot, maybe.Some([]byte("key20")), maybe.Some([]byte("key30")), 50) require.NoError(err) require.NotNil(proof) - require.NoError(dbClone.VerifyChangeProof(context.Background(), proof, maybe.Some([]byte("key20")), maybe.Some([]byte("key30")), db.getMerkleRoot(), len(proof.KeyChanges))) + require.NoError(dbClone.VerifyChangeProof(t.Context(), proof, maybe.Some([]byte("key20")), maybe.Some([]byte("key30")), db.getMerkleRoot(), len(proof.KeyChanges))) } func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { @@ -994,12 +993,12 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { db, err := getBasicDB() require.NoError(err) - startRoot, err := db.GetMerkleRoot(context.Background()) + startRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) writeBasicBatch(t, db) - endRoot, err := db.GetMerkleRoot(context.Background()) + endRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) // create a second db that will be synced to the first db @@ -1007,7 +1006,7 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { require.NoError(err) proof, err := db.GetChangeProof( - context.Background(), + t.Context(), startRoot, endRoot, maybe.Some([]byte{2}), @@ -1020,7 +1019,7 @@ func Test_ChangeProof_Verify_Bad_Data(t *testing.T) { tt.malform(proof) err = dbClone.VerifyChangeProof( - context.Background(), + t.Context(), proof, maybe.Some([]byte{2}), maybe.Some([]byte{3, 0}), @@ -1196,7 +1195,7 @@ func Test_ChangeProof_Syntactic_Verify(t *testing.T) { db, err := getBasicDB() require.NoError(err) - err = db.VerifyChangeProof(context.Background(), tt.proof, tt.start, tt.end, ids.Empty, 10) + err = db.VerifyChangeProof(t.Context(), tt.proof, tt.start, tt.end, ids.Empty, 10) require.ErrorIs(err, tt.expectedErr) }) } @@ -1746,11 +1745,11 @@ func FuzzRangeProofInvariants(f *testing.F) { end = maybe.Some(endBytes) } - rootID, err := db.GetMerkleRoot(context.Background()) + rootID, err := db.GetMerkleRoot(t.Context()) require.NoError(err) rangeProof, err := db.GetRangeProof( - context.Background(), + t.Context(), start, end, int(maxProofLen), @@ -1762,7 +1761,7 @@ func FuzzRangeProofInvariants(f *testing.F) { require.NoError(err) require.NoError(rangeProof.Verify( - context.Background(), + t.Context(), start, end, rootID, @@ -1811,10 +1810,10 @@ func FuzzRangeProofInvariants(f *testing.F) { Value: value, } - rootID, err := db.GetMerkleRoot(context.Background()) + rootID, err := db.GetMerkleRoot(t.Context()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize, db.hasher)) + require.NoError(proof.Verify(t.Context(), rootID, db.tokenSize, db.hasher)) default: require.NotEmpty(rangeProof.EndProof) @@ -1826,10 +1825,10 @@ func FuzzRangeProofInvariants(f *testing.F) { Value: greatestKV.Value, } - rootID, err := db.GetMerkleRoot(context.Background()) + rootID, err := db.GetMerkleRoot(t.Context()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize, db.hasher)) + require.NoError(proof.Verify(t.Context(), rootID, db.tokenSize, db.hasher)) } }) } @@ -1861,16 +1860,16 @@ func FuzzProofVerification(f *testing.F) { } proof, err := db.GetProof( - context.Background(), + t.Context(), key, ) require.NoError(err) - rootID, err := db.GetMerkleRoot(context.Background()) + rootID, err := db.GetMerkleRoot(t.Context()) require.NoError(err) - require.NoError(proof.Verify(context.Background(), rootID, db.tokenSize, db.hasher)) + require.NoError(proof.Verify(t.Context(), rootID, db.tokenSize, db.hasher)) // Insert a new key-value pair newKey := make([]byte, 32) @@ -1902,14 +1901,14 @@ func FuzzChangeProofVerification(f *testing.F) { config := NewConfig() db, err := newDatabase( - context.Background(), + t.Context(), memdb.New(), config, &mockMetrics{}, ) require.NoError(err) - startRootID, err := db.GetMerkleRoot(context.Background()) + startRootID, err := db.GetMerkleRoot(t.Context()) require.NoError(err) // Insert a bunch of random key values. @@ -1922,7 +1921,7 @@ func FuzzChangeProofVerification(f *testing.F) { 0.25, ) - endRootID, err := db.GetMerkleRoot(context.Background()) + endRootID, err := db.GetMerkleRoot(t.Context()) require.NoError(err) // Make sure proof bounds are valid @@ -1945,7 +1944,7 @@ func FuzzChangeProofVerification(f *testing.F) { } changeProof, err := db.GetChangeProof( - context.Background(), + t.Context(), startRootID, endRootID, start, @@ -1955,7 +1954,7 @@ func FuzzChangeProofVerification(f *testing.F) { require.NoError(err) require.NoError(db.VerifyChangeProof( - context.Background(), + t.Context(), changeProof, start, end, @@ -2010,7 +2009,7 @@ func Benchmark_RangeProofs(b *testing.B) { } b.StartTimer() - proof, err := db.GetRangeProof(context.Background(), maybe.Some(start), maybe.Some(end), maxLength) + proof, err := db.GetRangeProof(b.Context(), maybe.Some(start), maybe.Some(end), maxLength) require.NoError(b, err) require.NotNil(b, proof) @@ -2076,7 +2075,7 @@ func Benchmark_ChangeProofs(b *testing.B) { b.StartTimer() proof, err := db.GetChangeProof( - context.Background(), + b.Context(), merkleRoots[startRootIdx], merkleRoots[endRootIdx], maybe.Some(start), diff --git a/x/merkledb/sync_test.go b/x/merkledb/sync_test.go index 4af792463130..3d8153c72afa 100644 --- a/x/merkledb/sync_test.go +++ b/x/merkledb/sync_test.go @@ -34,13 +34,13 @@ func Test_Creation(t *testing.T) { require := require.New(t) db, err := New( - context.Background(), + t.Context(), memdb.New(), newDefaultDBConfig(), ) require.NoError(err) - ctx := context.Background() + ctx := t.Context() syncer, err := xsync.NewManager( db, xsync.ManagerConfig[*RangeProof, *ChangeProof]{ @@ -55,8 +55,8 @@ func Test_Creation(t *testing.T) { ) require.NoError(err) require.NotNil(syncer) - require.NoError(syncer.Start(context.Background())) - require.NoError(syncer.Wait(context.Background())) + require.NoError(syncer.Start(t.Context())) + require.NoError(syncer.Wait(t.Context())) } // Tests that we are able to sync to the correct root while the server is @@ -79,7 +79,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyChanges = append(response.KeyChanges, KeyChange{}) }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -89,7 +89,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyChanges = response.KeyChanges[min(1, len(response.KeyChanges)):] }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -115,7 +115,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { } }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -126,7 +126,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { _ = slices.Delete(response.KeyChanges, i, min(len(response.KeyChanges), i+1)) }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -137,7 +137,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.EndProof = nil }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -147,7 +147,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.EndProof = nil }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -159,13 +159,13 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyChanges = nil }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { name: "range proof server flake", rangeProofClient: func(db MerkleDB) *p2p.Client { - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, &flakyHandler{ + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, &flakyHandler{ Handler: xsync.NewGetRangeProofHandler(db, rangeProofMarshaler), c: &counter{m: 2}, }) @@ -178,7 +178,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyChanges = append(response.KeyChanges, make([]KeyChange, xsync.DefaultRequestKeyLimit)...) }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -188,7 +188,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.KeyChanges = response.KeyChanges[min(1, len(response.KeyChanges)):] }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -199,7 +199,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { _ = slices.Delete(response.KeyChanges, i, min(len(response.KeyChanges), i+1)) }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { @@ -210,13 +210,13 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { response.EndProof = nil }) - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, handler) + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, handler) }, }, { name: "change proof flaky server", changeProofClient: func(db MerkleDB) *p2p.Client { - return p2ptest.NewSelfClient(t, context.Background(), ids.EmptyNodeID, &flakyHandler{ + return p2ptest.NewSelfClient(t, t.Context(), ids.EmptyNodeID, &flakyHandler{ Handler: xsync.NewGetChangeProofHandler(db, rangeProofMarshaler, changeProofMarshaler), c: &counter{m: 2}, }) @@ -228,7 +228,7 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { t.Run(tt.name, func(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() dbToSync, err := generateTrie(t, r, 3*xsync.MaxKeyValuesLimit) require.NoError(err) @@ -306,10 +306,10 @@ func Test_Sync_Result_Correct_Root(t *testing.T) { require.NoError(syncer.Wait(ctx)) // We should have the same resulting root as the server - wantRoot, err := dbToSync.GetMerkleRoot(context.Background()) + wantRoot, err := dbToSync.GetMerkleRoot(t.Context()) require.NoError(err) - gotRoot, err := db.GetMerkleRoot(context.Background()) + gotRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(wantRoot, gotRoot) }) @@ -324,17 +324,17 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { r := rand.New(rand.NewSource(now)) // #nosec G404 dbToSync, err := generateTrie(t, r, 3*xsync.MaxKeyValuesLimit) require.NoError(err) - syncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + syncRoot, err := dbToSync.GetMerkleRoot(t.Context()) require.NoError(err) db, err := New( - context.Background(), + t.Context(), memdb.New(), newDefaultDBConfig(), ) require.NoError(err) - ctx := context.Background() + ctx := t.Context() syncer, err := xsync.NewManager( db, xsync.ManagerConfig[*RangeProof, *ChangeProof]{ @@ -376,11 +376,11 @@ func Test_Sync_Result_Correct_Root_With_Sync_Restart(t *testing.T) { require.NoError(err) require.NotNil(newSyncer) - require.NoError(newSyncer.Start(context.Background())) + require.NoError(newSyncer.Start(t.Context())) require.NoError(newSyncer.Error()) - require.NoError(newSyncer.Wait(context.Background())) + require.NoError(newSyncer.Wait(t.Context())) - newRoot, err := db.GetMerkleRoot(context.Background()) + newRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(syncRoot, newRoot) } @@ -397,7 +397,7 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { dbToSync, err := generateTrie(t, r, 3*xsync.MaxKeyValuesLimit) require.NoError(err) - firstSyncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + firstSyncRoot, err := dbToSync.GetMerkleRoot(t.Context()) require.NoError(err) for x := 0; x < 100; x++ { @@ -423,11 +423,11 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { it.Release() } - secondSyncRoot, err := dbToSync.GetMerkleRoot(context.Background()) + secondSyncRoot, err := dbToSync.GetMerkleRoot(t.Context()) require.NoError(err) db, err := New( - context.Background(), + t.Context(), memdb.New(), newDefaultDBConfig(), ) @@ -435,7 +435,7 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { actionHandler := &p2p.TestHandler{} - ctx := context.Background() + ctx := t.Context() syncer, err := xsync.NewManager( db, xsync.ManagerConfig[*RangeProof, *ChangeProof]{ @@ -469,18 +469,18 @@ func Test_Sync_Result_Correct_Root_Update_Root_During(t *testing.T) { return rangeProofHandler.AppRequest(ctx, nodeID, deadline, requestBytes) } - require.NoError(syncer.Start(context.Background())) - require.NoError(syncer.Wait(context.Background())) + require.NoError(syncer.Start(t.Context())) + require.NoError(syncer.Wait(t.Context())) require.NoError(syncer.Error()) - newRoot, err := db.GetMerkleRoot(context.Background()) + newRoot, err := db.GetMerkleRoot(t.Context()) require.NoError(err) require.Equal(secondSyncRoot, newRoot) } func Test_Sync_UpdateSyncTarget(t *testing.T) { require := require.New(t) - ctx := context.Background() + ctx := t.Context() now := time.Now().UnixNano() t.Logf("seed: %d", now) @@ -541,7 +541,7 @@ func generateTrieWithMinKeyLen(t *testing.T, r *rand.Rand, count int, minKeyLen require := require.New(t) db, err := New( - context.Background(), + t.Context(), memdb.New(), newDefaultDBConfig(), ) diff --git a/x/merkledb/trie_test.go b/x/merkledb/trie_test.go index 4d8b28adfad9..64c49b06caa9 100644 --- a/x/merkledb/trie_test.go +++ b/x/merkledb/trie_test.go @@ -49,7 +49,7 @@ func Test_GetValue_Safety(t *testing.T) { require.NoError(err) view, err := db.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte{0}, Value: []byte{0}}, @@ -58,13 +58,13 @@ func Test_GetValue_Safety(t *testing.T) { ) require.NoError(err) - trieVal, err := view.GetValue(context.Background(), []byte{0}) + trieVal, err := view.GetValue(t.Context(), []byte{0}) require.NoError(err) require.Equal([]byte{0}, trieVal) trieVal[0] = 1 // should still be []byte{0} after edit - trieVal, err = view.GetValue(context.Background(), []byte{0}) + trieVal, err = view.GetValue(t.Context(), []byte{0}) require.NoError(err) require.Equal([]byte{0}, trieVal) } @@ -76,7 +76,7 @@ func Test_GetValues_Safety(t *testing.T) { require.NoError(err) view, err := db.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte{0}, Value: []byte{0}}, @@ -85,7 +85,7 @@ func Test_GetValues_Safety(t *testing.T) { ) require.NoError(err) - trieVals, errs := view.GetValues(context.Background(), [][]byte{{0}}) + trieVals, errs := view.GetValues(t.Context(), [][]byte{{0}}) require.Len(errs, 1) require.NoError(errs[0]) require.Equal([]byte{0}, trieVals[0]) @@ -93,7 +93,7 @@ func Test_GetValues_Safety(t *testing.T) { require.Equal([]byte{1}, trieVals[0]) // should still be []byte{0} after edit - trieVals, errs = view.GetValues(context.Background(), [][]byte{{0}}) + trieVals, errs = view.GetValues(t.Context(), [][]byte{{0}}) require.Len(errs, 1) require.NoError(errs[0]) require.Equal([]byte{0}, trieVals[0]) @@ -105,7 +105,7 @@ func TestVisitPathToKey(t *testing.T) { db, err := getBasicDB() require.NoError(err) - trieIntf, err := db.NewView(context.Background(), ViewChanges{}) + trieIntf, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, trieIntf) trie := trieIntf.(*view) @@ -121,7 +121,7 @@ func TestVisitPathToKey(t *testing.T) { // Insert a key key1 := []byte{0} trieIntf, err = trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: key1, Value: []byte("value")}, @@ -131,7 +131,7 @@ func TestVisitPathToKey(t *testing.T) { require.NoError(err) require.IsType(&view{}, trieIntf) trie = trieIntf.(*view) - require.NoError(trie.applyValueChanges(context.Background())) + require.NoError(trie.applyValueChanges(t.Context())) nodePath = make([]*node, 0, 1) require.NoError(visitPathToKey(trie, ToKey(key1), func(n *node) error { @@ -146,7 +146,7 @@ func TestVisitPathToKey(t *testing.T) { // Insert another key which is a child of the first key2 := []byte{0, 1} trieIntf, err = trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: key2, Value: []byte("value")}, @@ -156,7 +156,7 @@ func TestVisitPathToKey(t *testing.T) { require.NoError(err) require.IsType(&view{}, trieIntf) trie = trieIntf.(*view) - require.NoError(trie.applyValueChanges(context.Background())) + require.NoError(trie.applyValueChanges(t.Context())) nodePath = make([]*node, 0, 2) require.NoError(visitPathToKey(trie, ToKey(key2), func(n *node) error { @@ -175,7 +175,7 @@ func TestVisitPathToKey(t *testing.T) { // Insert a key which shares no prefix with the others key3 := []byte{255} trieIntf, err = trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: key3, Value: []byte("value")}, @@ -185,7 +185,7 @@ func TestVisitPathToKey(t *testing.T) { require.NoError(err) require.IsType(&view{}, trieIntf) trie = trieIntf.(*view) - require.NoError(trie.applyValueChanges(context.Background())) + require.NoError(trie.applyValueChanges(t.Context())) // Trie is: // [] @@ -247,7 +247,7 @@ func Test_Trie_ViewOnCommittedView(t *testing.T) { require.NotNil(dbTrie) committedTrie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte{0}, Value: []byte{0}}, @@ -256,10 +256,10 @@ func Test_Trie_ViewOnCommittedView(t *testing.T) { ) require.NoError(err) - require.NoError(committedTrie.CommitToDB(context.Background())) + require.NoError(committedTrie.CommitToDB(t.Context())) view, err := committedTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte{1}, Value: []byte{1}}, @@ -267,12 +267,12 @@ func Test_Trie_ViewOnCommittedView(t *testing.T) { }, ) require.NoError(err) - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) - val0, err := dbTrie.GetValue(context.Background(), []byte{0}) + val0, err := dbTrie.GetValue(t.Context(), []byte{0}) require.NoError(err) require.Equal([]byte{0}, val0) - val1, err := dbTrie.GetValue(context.Background(), []byte{1}) + val1, err := dbTrie.GetValue(t.Context(), []byte{1}) require.NoError(err) require.Equal([]byte{1}, val1) } @@ -284,17 +284,17 @@ func Test_Trie_WriteToDB(t *testing.T) { require.NoError(err) require.NotNil(dbTrie) - trieIntf1, err := dbTrie.NewView(context.Background(), ViewChanges{}) + trieIntf1, err := dbTrie.NewView(t.Context(), ViewChanges{}) require.NoError(err) trie1 := trieIntf1.(*view) // value hasn't been inserted so shouldn't exist - value, err := trie1.GetValue(context.Background(), []byte("key")) + value, err := trie1.GetValue(t.Context(), []byte("key")) require.ErrorIs(err, database.ErrNotFound) require.Nil(value) trieIntf2, err := trie1.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key"), Value: []byte("value")}, @@ -308,8 +308,8 @@ func Test_Trie_WriteToDB(t *testing.T) { require.NoError(err) require.Equal([]byte("value"), value) - require.NoError(trie1.CommitToDB(context.Background())) - require.NoError(trie2.CommitToDB(context.Background())) + require.NoError(trie1.CommitToDB(t.Context())) + require.NoError(trie2.CommitToDB(t.Context())) key := []byte("key") prefixedKey := make([]byte, len(key)+valueNodePrefixLen) @@ -349,7 +349,7 @@ func Test_Trie_Overwrite(t *testing.T) { require.NoError(err) require.NotNil(dbTrie) trie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key"), Value: []byte("value0")}, @@ -363,7 +363,7 @@ func Test_Trie_Overwrite(t *testing.T) { require.Equal([]byte("value1"), value) trie, err = dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key"), Value: []byte("value2")}, @@ -384,7 +384,7 @@ func Test_Trie_Delete(t *testing.T) { require.NotNil(dbTrie) trie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key"), Value: []byte("value0")}, @@ -398,7 +398,7 @@ func Test_Trie_Delete(t *testing.T) { require.Equal([]byte("value0"), value) trie, err = dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key"), Delete: true}, @@ -419,7 +419,7 @@ func Test_Trie_DeleteMissingKey(t *testing.T) { require.NoError(err) require.NotNil(trie) - require.NoError(trie.DeleteContext(context.Background(), []byte("key"))) + require.NoError(trie.DeleteContext(t.Context(), []byte("key"))) } func Test_Trie_ExpandOnKeyPath(t *testing.T) { @@ -429,7 +429,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { require.NoError(err) require.NotNil(dbTrie) trieIntf, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key"), Value: []byte("value0")}, @@ -444,7 +444,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { require.Equal([]byte("value0"), value) trieIntf, err = trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key1"), Value: []byte("value1")}, @@ -463,7 +463,7 @@ func Test_Trie_ExpandOnKeyPath(t *testing.T) { require.Equal([]byte("value1"), value) trieIntf, err = trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key12"), Value: []byte("value12")}, @@ -493,7 +493,7 @@ func Test_Trie_CompressedKeys(t *testing.T) { require.NoError(err) require.NotNil(dbTrie) trieIntf, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key12"), Value: []byte("value12")}, @@ -508,7 +508,7 @@ func Test_Trie_CompressedKeys(t *testing.T) { require.Equal([]byte("value12"), value) trieIntf, err = trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key1"), Value: []byte("value1")}, @@ -527,7 +527,7 @@ func Test_Trie_CompressedKeys(t *testing.T) { require.Equal([]byte("value1"), value) trieIntf, err = trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key"), Value: []byte("value")}, @@ -559,7 +559,7 @@ func Test_Trie_SplitBranch(t *testing.T) { // force a new node to generate with common prefix "key1" and have these two nodes as children trie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key12"), Value: []byte("value12")}, @@ -588,7 +588,7 @@ func Test_Trie_HashCountOnBranch(t *testing.T) { key1, key2, keyPrefix := []byte("12"), []byte("1F"), []byte("1") view1, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: key1, Value: []byte("")}, @@ -602,7 +602,7 @@ func Test_Trie_HashCountOnBranch(t *testing.T) { // create new node with common prefix whose children // are key1, key2 view2, err := view1.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: key2, Value: []byte("")}, @@ -619,7 +619,7 @@ func Test_Trie_HashCountOnBranch(t *testing.T) { dbTrie.metrics.(*mockMetrics).hashCount = 0 // calculate the root - _, err = view2.GetMerkleRoot(context.Background()) + _, err = view2.GetMerkleRoot(t.Context()) require.NoError(err) // Make sure the root is an intermediate node with the expected common prefix. @@ -641,7 +641,7 @@ func Test_Trie_HashCountOnDelete(t *testing.T) { require.NoError(err) trie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("k"), Value: []byte("value0")}, @@ -655,12 +655,12 @@ func Test_Trie_HashCountOnDelete(t *testing.T) { require.NoError(err) require.NotNil(trie) - require.NoError(trie.CommitToDB(context.Background())) + require.NoError(trie.CommitToDB(t.Context())) oldCount := dbTrie.metrics.(*mockMetrics).hashCount // delete the middle values view, err := trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("k"), Delete: true}, @@ -670,7 +670,7 @@ func Test_Trie_HashCountOnDelete(t *testing.T) { }, ) require.NoError(err) - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) // trie is: // [key0] (first 28 bits) @@ -693,7 +693,7 @@ func Test_Trie_NoExistingResidual(t *testing.T) { require.NotNil(dbTrie) trie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("k"), Value: []byte("1")}, @@ -731,7 +731,7 @@ func Test_Trie_BatchApply(t *testing.T) { require.NotNil(dbTrie) trie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key1"), Value: []byte("value1")}, @@ -763,7 +763,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { require.NoError(err) require.NotNil(trie) newTrie, err := trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("k"), Value: []byte("value0")}, @@ -775,7 +775,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { ) require.NoError(err) - require.NoError(newTrie.(*view).applyValueChanges(context.Background())) + require.NoError(newTrie.(*view).applyValueChanges(t.Context())) maybeRoot := newTrie.getRoot() require.NoError(err) require.True(maybeRoot.HasValue()) @@ -783,7 +783,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { require.Len(maybeRoot.Value().children, 1) newTrie, err = newTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("k"), Delete: true}, @@ -794,7 +794,7 @@ func Test_Trie_ChainDeletion(t *testing.T) { }, ) require.NoError(err) - require.NoError(newTrie.(*view).applyValueChanges(context.Background())) + require.NoError(newTrie.(*view).applyValueChanges(t.Context())) // trie should be empty root := newTrie.getRoot() @@ -808,11 +808,11 @@ func Test_Trie_Invalidate_Siblings_On_Commit(t *testing.T) { require.NoError(err) require.NotNil(dbTrie) - view1, err := dbTrie.NewView(context.Background(), ViewChanges{}) + view1, err := dbTrie.NewView(t.Context(), ViewChanges{}) require.NoError(err) view2, err := view1.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte{0}, Value: []byte{0}}, @@ -822,16 +822,16 @@ func Test_Trie_Invalidate_Siblings_On_Commit(t *testing.T) { require.NoError(err) // Siblings of view2 - sibling1, err := view1.NewView(context.Background(), ViewChanges{}) + sibling1, err := view1.NewView(t.Context(), ViewChanges{}) require.NoError(err) - sibling2, err := view1.NewView(context.Background(), ViewChanges{}) + sibling2, err := view1.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.False(sibling1.(*view).isInvalid()) require.False(sibling2.(*view).isInvalid()) - require.NoError(view1.CommitToDB(context.Background())) - require.NoError(view2.CommitToDB(context.Background())) + require.NoError(view1.CommitToDB(t.Context())) + require.NoError(view2.CommitToDB(t.Context())) require.True(sibling1.(*view).isInvalid()) require.True(sibling2.(*view).isInvalid()) @@ -854,14 +854,14 @@ func Test_Trie_NodeCollapse(t *testing.T) { } trie, err := dbTrie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: kvs, }, ) require.NoError(err) - require.NoError(trie.(*view).applyValueChanges(context.Background())) + require.NoError(trie.(*view).applyValueChanges(t.Context())) for _, kv := range kvs { node, err := trie.getEditableNode(ToKey(kv.Key), true) @@ -881,14 +881,14 @@ func Test_Trie_NodeCollapse(t *testing.T) { } trie, err = trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: deleteOps, }, ) require.NoError(err) - require.NoError(trie.(*view).applyValueChanges(context.Background())) + require.NoError(trie.(*view).applyValueChanges(t.Context())) for _, kv := range deletedKVs { _, err := trie.getEditableNode(ToKey(kv.Key), true) @@ -915,7 +915,7 @@ func Test_Trie_MultipleStates(t *testing.T) { rdb := memdb.New() defer rdb.Close() db, err := New( - context.Background(), + t.Context(), rdb, NewConfig(), ) @@ -933,7 +933,7 @@ func Test_Trie_MultipleStates(t *testing.T) { ops = append(ops, database.BatchOp{Key: k, Value: hashing.ComputeHash256(k)}) } root, err := db.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: ops, }, @@ -941,23 +941,23 @@ func Test_Trie_MultipleStates(t *testing.T) { require.NoError(err) // Get initial root - _, err = root.GetMerkleRoot(context.Background()) + _, err = root.GetMerkleRoot(t.Context()) require.NoError(err) if commitApproach == "before" { - require.NoError(root.CommitToDB(context.Background())) + require.NoError(root.CommitToDB(t.Context())) } // Populate additional states concurrentStates := []Trie{} for i := 0; i < 5; i++ { - newState, err := root.NewView(context.Background(), ViewChanges{}) + newState, err := root.NewView(t.Context(), ViewChanges{}) require.NoError(err) concurrentStates = append(concurrentStates, newState) } if commitApproach == "after" { - require.NoError(root.CommitToDB(context.Background())) + require.NoError(root.CommitToDB(t.Context())) } // Process ops @@ -976,7 +976,7 @@ func Test_Trie_MultipleStates(t *testing.T) { selectedKey := kv[r.Intn(len(kv))] var pastV []byte for index, state := range concurrentStates { - v, err := state.GetValue(context.Background(), selectedKey) + v, err := state.GetValue(t.Context(), selectedKey) require.NoError(err) if pastV == nil { pastV = v @@ -989,7 +989,7 @@ func Test_Trie_MultipleStates(t *testing.T) { } for index, state := range concurrentStates { concurrentStates[index], err = state.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: concurrentOps[index], }, @@ -1000,7 +1000,7 @@ func Test_Trie_MultipleStates(t *testing.T) { // Generate roots var pastRoot ids.ID for _, state := range concurrentStates { - mroot, err := state.GetMerkleRoot(context.Background()) + mroot, err := state.GetMerkleRoot(t.Context()) require.NoError(err) if pastRoot == ids.Empty { pastRoot = mroot @@ -1019,7 +1019,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.NoError(err) // Create a view - view1Intf, err := db.NewView(context.Background(), ViewChanges{BatchOps: []database.BatchOp{{Key: []byte{1}, Value: []byte{1}}}}) + view1Intf, err := db.NewView(t.Context(), ViewChanges{BatchOps: []database.BatchOp{{Key: []byte{1}, Value: []byte{1}}}}) require.NoError(err) require.IsType(&view{}, view1Intf) view1 := view1Intf.(*view) @@ -1033,7 +1033,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal(db, view1.parentTrie) // Commit the view - require.NoError(view1.CommitToDB(context.Background())) + require.NoError(view1.CommitToDB(t.Context())) // view1 (committed) // | @@ -1044,7 +1044,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal(db, view1.parentTrie) // Create a new view on the committed view - view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) + view2Intf, err := view1.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view2Intf) view2 := view2Intf.(*view) @@ -1061,12 +1061,12 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Len(db.childViews, 2) // Make sure the new view has the right value - got, err := view2.GetValue(context.Background(), []byte{1}) + got, err := view2.GetValue(t.Context(), []byte{1}) require.NoError(err) require.Equal([]byte{1}, got) // Make another view - view3Intf, err := view2.NewView(context.Background(), ViewChanges{}) + view3Intf, err := view2.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view3Intf) view3 := view3Intf.(*view) @@ -1087,7 +1087,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Len(db.childViews, 2) // Commit view2 - require.NoError(view2.CommitToDB(context.Background())) + require.NoError(view2.CommitToDB(t.Context())) // view3 // | @@ -1105,7 +1105,7 @@ func TestNewViewOnCommittedView(t *testing.T) { require.Equal(db, view3.parentTrie) // Commit view3 - require.NoError(view3.CommitToDB(context.Background())) + require.NoError(view3.CommitToDB(t.Context())) // view3 being committed invalidates view2 require.True(view2.invalidated) @@ -1121,13 +1121,13 @@ func Test_View_NewView(t *testing.T) { require.NoError(err) // Create a view - view1Intf, err := db.NewView(context.Background(), ViewChanges{}) + view1Intf, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view1Intf) view1 := view1Intf.(*view) // Create a view atop view1 - view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) + view2Intf, err := view1.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view2Intf) view2 := view2Intf.(*view) @@ -1144,10 +1144,10 @@ func Test_View_NewView(t *testing.T) { require.Len(view1.childViews, 1) // Commit view1 - require.NoError(view1.CommitToDB(context.Background())) + require.NoError(view1.CommitToDB(t.Context())) // Make another view atop view1 - view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) + view3Intf, err := view1.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view3Intf) view3 := view3Intf.(*view) @@ -1167,7 +1167,7 @@ func Test_View_NewView(t *testing.T) { // Assert that NewPreallocatedView on an invalid view fails invalidView := &view{invalidated: true} - _, err = invalidView.NewView(context.Background(), ViewChanges{}) + _, err = invalidView.NewView(t.Context(), ViewChanges{}) require.ErrorIs(err, ErrInvalid) } @@ -1178,18 +1178,18 @@ func TestViewInvalidate(t *testing.T) { require.NoError(err) // Create a view - view1Intf, err := db.NewView(context.Background(), ViewChanges{}) + view1Intf, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view1Intf) view1 := view1Intf.(*view) // Create 2 views atop view1 - view2Intf, err := view1.NewView(context.Background(), ViewChanges{}) + view2Intf, err := view1.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view2Intf) view2 := view2Intf.(*view) - view3Intf, err := view1.NewView(context.Background(), ViewChanges{}) + view3Intf, err := view1.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.IsType(&view{}, view3Intf) view3 := view3Intf.(*view) @@ -1217,7 +1217,7 @@ func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { require.NotNil(trie) newTrie, err := trie.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: []byte("key"), Value: []byte("value0")}, @@ -1228,10 +1228,10 @@ func Test_Trie_ConcurrentNewViewAndCommit(t *testing.T) { eg := errgroup.Group{} eg.Go(func() error { - return newTrie.CommitToDB(context.Background()) + return newTrie.CommitToDB(t.Context()) }) - view, err := newTrie.NewView(context.Background(), ViewChanges{}) + view, err := newTrie.NewView(t.Context(), ViewChanges{}) require.NoError(err) require.NotNil(view) @@ -1256,7 +1256,7 @@ func TestTrieCommitToDBInvalid(t *testing.T) { { name: "invalid", trieFunc: func(require *require.Assertions, db *merkleDB) View { - nView, err := db.NewView(context.Background(), ViewChanges{}) + nView, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) // Invalidate the view @@ -1268,11 +1268,11 @@ func TestTrieCommitToDBInvalid(t *testing.T) { { name: "committed", trieFunc: func(require *require.Assertions, db *merkleDB) View { - view, err := db.NewView(context.Background(), ViewChanges{}) + view, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) // Commit the view - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) return view }, expectedErr: ErrCommitted, @@ -1280,7 +1280,7 @@ func TestTrieCommitToDBInvalid(t *testing.T) { { name: "parent not database", trieFunc: func(require *require.Assertions, db *merkleDB) View { - nView, err := db.NewView(context.Background(), ViewChanges{}) + nView, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) // Change the parent @@ -1300,7 +1300,7 @@ func TestTrieCommitToDBInvalid(t *testing.T) { require.NoError(err) trie := tt.trieFunc(require, db) - err = trie.CommitToDB(context.Background()) + err = trie.CommitToDB(t.Context()) require.ErrorIs(err, tt.expectedErr) }) } @@ -1324,7 +1324,7 @@ func TestTrieCommitToDBValid(t *testing.T) { // Delete a key-value pair, modify a key-value pair, // and insert a new key-value pair view, err := db.NewView( - context.Background(), + t.Context(), ViewChanges{ BatchOps: []database.BatchOp{ {Key: key1, Delete: true}, @@ -1336,7 +1336,7 @@ func TestTrieCommitToDBValid(t *testing.T) { require.NoError(err) // Commit the view - require.NoError(view.CommitToDB(context.Background())) + require.NoError(view.CommitToDB(t.Context())) // Make sure the database has the right values _, err = db.Get(key1) diff --git a/x/merkledb/view_iterator_test.go b/x/merkledb/view_iterator_test.go index 12aa9ce3c623..3068b90269d0 100644 --- a/x/merkledb/view_iterator_test.go +++ b/x/merkledb/view_iterator_test.go @@ -5,7 +5,6 @@ package merkledb import ( "bytes" - "context" "math/rand" "slices" "testing" @@ -33,7 +32,7 @@ func Test_View_Iterator(t *testing.T) { require.NoError(db.Put(key1, value1)) require.NoError(db.Put(key2, value2)) - view, err := db.NewView(context.Background(), ViewChanges{}) + view, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) iterator := view.NewIterator() require.NotNil(iterator) @@ -65,7 +64,7 @@ func Test_View_Iterator_DBClosed(t *testing.T) { require.NoError(db.Put(key1, value1)) - view, err := db.NewView(context.Background(), ViewChanges{}) + view, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) iterator := view.NewIterator() require.NotNil(iterator) @@ -97,7 +96,7 @@ func Test_View_IteratorStart(t *testing.T) { require.NoError(db.Put(key1, value1)) require.NoError(db.Put(key2, value2)) - view, err := db.NewView(context.Background(), ViewChanges{}) + view, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) iterator := view.NewIteratorWithStart(key2) require.NotNil(iterator) @@ -134,7 +133,7 @@ func Test_View_IteratorPrefix(t *testing.T) { require.NoError(db.Put(key2, value2)) require.NoError(db.Put(key3, value3)) - view, err := db.NewView(context.Background(), ViewChanges{}) + view, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) iterator := view.NewIteratorWithPrefix([]byte("h")) require.NotNil(iterator) @@ -171,7 +170,7 @@ func Test_View_IteratorStartPrefix(t *testing.T) { require.NoError(db.Put(key2, value2)) require.NoError(db.Put(key3, value3)) - view, err := db.NewView(context.Background(), ViewChanges{}) + view, err := db.NewView(t.Context(), ViewChanges{}) require.NoError(err) iterator := view.NewIteratorWithStartAndPrefix(key1, []byte("h")) require.NotNil(iterator) @@ -231,7 +230,7 @@ func Test_View_Iterator_Random(t *testing.T) { ops = append(ops, database.BatchOp{Key: keyChanges[i].Key, Value: keyChanges[i].Value.Value()}) } - view1, err := db.NewView(context.Background(), ViewChanges{BatchOps: ops}) + view1, err := db.NewView(t.Context(), ViewChanges{BatchOps: ops}) require.NoError(err) ops = make([]database.BatchOp, 0, numKeyChanges/4) @@ -239,7 +238,7 @@ func Test_View_Iterator_Random(t *testing.T) { ops = append(ops, database.BatchOp{Key: keyChanges[i].Key, Value: keyChanges[i].Value.Value()}) } - view2, err := view1.NewView(context.Background(), ViewChanges{BatchOps: ops}) + view2, err := view1.NewView(t.Context(), ViewChanges{BatchOps: ops}) require.NoError(err) ops = make([]database.BatchOp, 0, numKeyChanges/4) @@ -247,7 +246,7 @@ func Test_View_Iterator_Random(t *testing.T) { ops = append(ops, database.BatchOp{Key: keyChanges[i].Key, Value: keyChanges[i].Value.Value()}) } - view3, err := view2.NewView(context.Background(), ViewChanges{BatchOps: ops}) + view3, err := view2.NewView(t.Context(), ViewChanges{BatchOps: ops}) require.NoError(err) // Might have introduced duplicates, so only expect the latest value. diff --git a/x/merkledb/view_test.go b/x/merkledb/view_test.go index e8530799c12d..176eafdefe88 100644 --- a/x/merkledb/view_test.go +++ b/x/merkledb/view_test.go @@ -87,7 +87,7 @@ func Test_HashChangedNodes(t *testing.T) { for _, test := range hashChangedNodesTests { t.Run(test.name, func(t *testing.T) { view := makeViewForHashChangedNodes(t, test.numKeys, 16) - ctx := context.Background() + ctx := t.Context() view.hashChangedNodes(ctx) require.Equal(t, test.expectedRootHash, view.changes.rootID.String()) }) @@ -97,7 +97,7 @@ func Test_HashChangedNodes(t *testing.T) { func Benchmark_HashChangedNodes(b *testing.B) { for _, test := range hashChangedNodesTests { view := makeViewForHashChangedNodes(b, test.numKeys, 1) - ctx := context.Background() + ctx := b.Context() b.Run(test.name, func(b *testing.B) { for i := 0; i < b.N; i++ { view.hashChangedNodes(ctx) @@ -131,7 +131,7 @@ func BenchmarkView_NewIteratorWithStartAndPrefix(b *testing.B) { }) } - ctx := context.Background() + ctx := b.Context() view, err := db.NewView(ctx, ViewChanges{BatchOps: ops}) require.NoError(b, err)