Skip to content
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion x/blockdb/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ BlockDB is a specialized database optimized for blockchain blocks.
- **Configurable Durability**: Optional `syncToDisk` mode guarantees immediate recoverability
- **Automatic Recovery**: Detects and recovers unindexed blocks after unclean shutdowns
- **Block Compression**: zstd compression for block data
- **In-Memory Cache**: LRU cache for recently accessed blocks

## Design

Expand Down Expand Up @@ -167,7 +168,6 @@ if err != nil {

## TODO

- Implement a block cache for recently accessed blocks
- Use a buffered pool to avoid allocations on reads and writes
- Add performance benchmarks
- Consider supporting missing data files (currently we error if any data files are missing)
16 changes: 16 additions & 0 deletions x/blockdb/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -11,6 +11,9 @@ const DefaultMaxDataFileSize = 500 * 1024 * 1024 * 1024
// DefaultMaxDataFiles is the default maximum number of data files descriptors cached.
const DefaultMaxDataFiles = 10

// DefaultEntryCacheSize is the default size of the entry cache.
const DefaultEntryCacheSize = 256

// DatabaseConfig contains configuration parameters for BlockDB.
type DatabaseConfig struct {
// IndexDir is the directory where the index file is stored.
Expand All @@ -28,6 +31,9 @@ type DatabaseConfig struct {
// MaxDataFiles is the maximum number of data files descriptors cached.
MaxDataFiles int

// EntryCacheSize is the size of the entry cache (default: 256).
EntryCacheSize int

// CheckpointInterval defines how frequently (in blocks) the index file header is updated (default: 1024).
CheckpointInterval uint64

Expand All @@ -43,6 +49,7 @@ func DefaultConfig() DatabaseConfig {
MinimumHeight: 0,
MaxDataFileSize: DefaultMaxDataFileSize,
MaxDataFiles: DefaultMaxDataFiles,
EntryCacheSize: DefaultEntryCacheSize,
CheckpointInterval: 1024,
SyncToDisk: true,
}
Expand Down Expand Up @@ -91,6 +98,12 @@ func (c DatabaseConfig) WithMaxDataFiles(maxFiles int) DatabaseConfig {
return c
}

// WithEntryCacheSize returns a copy of the config with EntryCacheSize set to the given value.
func (c DatabaseConfig) WithEntryCacheSize(size int) DatabaseConfig {
c.EntryCacheSize = size
return c
}

// WithCheckpointInterval returns a copy of the config with CheckpointInterval set to the given value.
func (c DatabaseConfig) WithCheckpointInterval(interval uint64) DatabaseConfig {
c.CheckpointInterval = interval
Expand All @@ -114,5 +127,8 @@ func (c DatabaseConfig) Validate() error {
if c.MaxDataFileSize == 0 {
return errors.New("MaxDataFileSize must be positive")
}
if c.EntryCacheSize < 1 {
return errors.New("EntryCacheSize cannot be less than 1")
}
return nil
}
30 changes: 24 additions & 6 deletions x/blockdb/database.go
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,7 @@ import (
"math"
"os"
"path/filepath"
"slices"
"sync"
"sync/atomic"

Expand Down Expand Up @@ -176,6 +177,7 @@ type Database struct {
log logging.Logger
closed bool
fileCache *lru.Cache[int, *os.File]
entryCache *lru.Cache[BlockHeight, BlockData]
compressor compression.Compressor

// closeMu prevents the database from being closed while in use and prevents
Expand Down Expand Up @@ -223,6 +225,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) {
f.Close()
}
}),
entryCache: lru.NewCache[BlockHeight, BlockData](config.EntryCacheSize),
compressor: compressor,
}

Expand All @@ -231,6 +234,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) {
zap.String("dataDir", config.DataDir),
zap.Uint64("maxDataFileSize", config.MaxDataFileSize),
zap.Int("maxDataFiles", config.MaxDataFiles),
zap.Int("entryCacheSize", config.EntryCacheSize),
)

if err := s.openAndInitializeIndex(); err != nil {
Expand Down Expand Up @@ -275,6 +279,7 @@ func (s *Database) Close() error {
}

s.closeFiles()
s.entryCache.Flush()

s.log.Info("Block database closed successfully")
return err
Expand Down Expand Up @@ -371,6 +376,7 @@ func (s *Database) Put(height BlockHeight, block BlockData) error {
)
return err
}
s.entryCache.Put(height, slices.Clone(block))

s.log.Debug("Block written successfully",
zap.Uint64("height", height),
Expand All @@ -385,12 +391,6 @@ func (s *Database) Put(height BlockHeight, block BlockData) error {
// It returns database.ErrNotFound if the block does not exist.
func (s *Database) readBlockIndex(height BlockHeight) (indexEntry, error) {
var entry indexEntry
if s.closed {
s.log.Error("Failed to read block index: database is closed",
zap.Uint64("height", height),
)
return entry, database.ErrClosed
}

// Skip the index entry read if we know the block is past the max height.
maxHeight := s.maxBlockHeight.Load()
Expand Down Expand Up @@ -436,6 +436,15 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) {
s.closeMu.RLock()
defer s.closeMu.RUnlock()

if s.closed {
s.log.Error("Failed Get: database closed", zap.Uint64("height", height))
return nil, database.ErrClosed
}

if c, ok := s.entryCache.Get(height); ok {
return slices.Clone(c), nil
}

indexEntry, err := s.readBlockIndex(height)
if err != nil {
return nil, err
Expand Down Expand Up @@ -486,6 +495,7 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) {
return nil, fmt.Errorf("checksum mismatch: calculated %d, stored %d", calculatedChecksum, bh.Checksum)
}

s.entryCache.Put(height, slices.Clone(decompressed))
return decompressed, nil
}

Expand All @@ -494,6 +504,14 @@ func (s *Database) Has(height BlockHeight) (bool, error) {
s.closeMu.RLock()
defer s.closeMu.RUnlock()

if s.closed {
s.log.Error("Failed Has: database closed", zap.Uint64("height", height))
return false, database.ErrClosed
}

if _, ok := s.entryCache.Get(height); ok {
return true, nil
}
_, err := s.readBlockIndex(height)
if err != nil {
if errors.Is(err, database.ErrNotFound) || errors.Is(err, ErrInvalidBlockHeight) {
Expand Down
119 changes: 119 additions & 0 deletions x/blockdb/entry_cache_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,119 @@
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.

package blockdb

import (
"slices"
"testing"

"github.com/stretchr/testify/require"
)

func TestCacheOnMiss(t *testing.T) {
db, _ := newTestDatabase(t, DefaultConfig())
height := uint64(20)
block := randomBlock(t)
require.NoError(t, db.Put(height, block))

// Evict the entry from cache to simulate a cache miss
db.entryCache.Evict(height)

// Read the block - should populate the cache on cache miss
_, err := db.Get(height)
require.NoError(t, err)

_, ok := db.entryCache.Get(height)
require.True(t, ok)
}

func TestCacheGet(t *testing.T) {
db, _ := newTestDatabase(t, DefaultConfig())
height := uint64(30)
block := randomBlock(t)

// Populate cache directly without writing to database
db.entryCache.Put(height, block)

// Get should return the block from cache
data, err := db.Get(height)
require.NoError(t, err)
require.Equal(t, block, data)
}

func TestCacheHas(t *testing.T) {
db, _ := newTestDatabase(t, DefaultConfig())
height := uint64(40)
block := randomBlock(t)

// Populate cache directly without writing to database
db.entryCache.Put(height, block)

// Has should return true from cache even though block is not in database
has, err := db.Has(height)
require.NoError(t, err)
require.True(t, has)
}

func TestCachePutStoresClone(t *testing.T) {
db, _ := newTestDatabase(t, DefaultConfig())
height := uint64(40)
block := randomBlock(t)
clone := slices.Clone(block)
require.NoError(t, db.Put(height, clone))

// Modify the original block after Put
clone[0] = 99

// Cache should have the original unmodified data
cached, ok := db.entryCache.Get(height)
require.True(t, ok)
require.Equal(t, block, cached)
}

func TestCacheGetReturnsClone(t *testing.T) {
db, _ := newTestDatabase(t, DefaultConfig())
height := uint64(50)
block := randomBlock(t)
require.NoError(t, db.Put(height, block))

// Get the block and modify the returned data
data, err := db.Get(height)
require.NoError(t, err)
data[0] = 99

// Cache should still have the original unmodified data
cached, ok := db.entryCache.Get(height)
require.True(t, ok)
require.Equal(t, block, cached)

// Second Get should also return original data
data, err = db.Get(height)
require.NoError(t, err)
require.Equal(t, block, data)
}

func TestCachePutOverridesSameHeight(t *testing.T) {
db, _ := newTestDatabase(t, DefaultConfig())
height := uint64(60)
b1 := randomBlock(t)
require.NoError(t, db.Put(height, b1))

// Verify first block is in cache
cached, ok := db.entryCache.Get(height)
require.True(t, ok)
require.Equal(t, b1, cached)

// Put second block at same height and verify it overrides the first one
b2 := randomBlock(t)
require.NoError(t, db.Put(height, b2))
cached, ok = db.entryCache.Get(height)
require.True(t, ok)
require.Equal(t, b2, cached)
require.NotEqual(t, b1, cached)

// Get should also return the new block
data, err := db.Get(height)
require.NoError(t, err)
require.Equal(t, b2, data)
}
2 changes: 2 additions & 0 deletions x/blockdb/readblock_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ func TestReadOperations(t *testing.T) {
MaxDataFileSize: DefaultMaxDataFileSize,
CheckpointInterval: 1024,
MaxDataFiles: DefaultMaxDataFileSize,
EntryCacheSize: DefaultEntryCacheSize,
},
},
{
Expand All @@ -69,6 +70,7 @@ func TestReadOperations(t *testing.T) {
MaxDataFileSize: DefaultMaxDataFileSize,
CheckpointInterval: 1024,
MaxDataFiles: DefaultMaxDataFileSize,
EntryCacheSize: DefaultEntryCacheSize,
},
wantErr: ErrInvalidBlockHeight,
},
Expand Down
5 changes: 3 additions & 2 deletions x/blockdb/writeblock_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,7 @@
package blockdb

import (
"bytes"
"math"
"os"
"strings"
Expand Down Expand Up @@ -38,7 +39,7 @@ func TestPutGet(t *testing.T) {
{
name: "nil block",
block: nil,
want: []byte{},
want: nil,
},
}
for _, tt := range tests {
Expand All @@ -49,7 +50,7 @@ func TestPutGet(t *testing.T) {

got, err := db.Get(0)
require.NoError(t, err)
require.Equal(t, tt.want, got)
require.True(t, bytes.Equal(tt.want, got))
})
}
}
Expand Down