Skip to content

Commit c6240d3

Browse files
committed
feat(blockdb): add lru cache for block entries
1 parent edff599 commit c6240d3

File tree

6 files changed

+153
-9
lines changed

6 files changed

+153
-9
lines changed

x/blockdb/README.md

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ BlockDB is a specialized database optimized for blockchain blocks.
1010
- **Configurable Durability**: Optional `syncToDisk` mode guarantees immediate recoverability
1111
- **Automatic Recovery**: Detects and recovers unindexed blocks after unclean shutdowns
1212
- **Block Compression**: zstd compression for block data
13+
- **In-Memory Cache**: LRU cache for recently accessed blocks
1314

1415
## Design
1516

@@ -167,7 +168,6 @@ if err != nil {
167168

168169
## TODO
169170

170-
- Implement a block cache for recently accessed blocks
171171
- Use a buffered pool to avoid allocations on reads and writes
172172
- Add performance benchmarks
173173
- Consider supporting missing data files (currently we error if any data files are missing)

x/blockdb/config.go

Lines changed: 16 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,9 @@ const DefaultMaxDataFileSize = 500 * 1024 * 1024 * 1024
1111
// DefaultMaxDataFiles is the default maximum number of data files descriptors cached.
1212
const DefaultMaxDataFiles = 10
1313

14+
// DefaultEntryCacheSize is the default size of the entry cache.
15+
const DefaultEntryCacheSize = 256
16+
1417
// DatabaseConfig contains configuration parameters for BlockDB.
1518
type DatabaseConfig struct {
1619
// IndexDir is the directory where the index file is stored.
@@ -28,6 +31,9 @@ type DatabaseConfig struct {
2831
// MaxDataFiles is the maximum number of data files descriptors cached.
2932
MaxDataFiles int
3033

34+
// EntryCacheSize is the size of the entry cache (default: 256).
35+
EntryCacheSize int
36+
3137
// CheckpointInterval defines how frequently (in blocks) the index file header is updated (default: 1024).
3238
CheckpointInterval uint64
3339

@@ -43,6 +49,7 @@ func DefaultConfig() DatabaseConfig {
4349
MinimumHeight: 0,
4450
MaxDataFileSize: DefaultMaxDataFileSize,
4551
MaxDataFiles: DefaultMaxDataFiles,
52+
EntryCacheSize: DefaultEntryCacheSize,
4653
CheckpointInterval: 1024,
4754
SyncToDisk: true,
4855
}
@@ -91,6 +98,12 @@ func (c DatabaseConfig) WithMaxDataFiles(maxFiles int) DatabaseConfig {
9198
return c
9299
}
93100

101+
// WithEntryCacheSize returns a copy of the config with EntryCacheSize set to the given value.
102+
func (c DatabaseConfig) WithEntryCacheSize(size int) DatabaseConfig {
103+
c.EntryCacheSize = size
104+
return c
105+
}
106+
94107
// WithCheckpointInterval returns a copy of the config with CheckpointInterval set to the given value.
95108
func (c DatabaseConfig) WithCheckpointInterval(interval uint64) DatabaseConfig {
96109
c.CheckpointInterval = interval
@@ -114,5 +127,8 @@ func (c DatabaseConfig) Validate() error {
114127
if c.MaxDataFileSize == 0 {
115128
return errors.New("MaxDataFileSize must be positive")
116129
}
130+
if c.EntryCacheSize < 1 {
131+
return errors.New("EntryCacheSize cannot be less than 1")
132+
}
117133
return nil
118134
}

x/blockdb/database.go

Lines changed: 24 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ import (
1212
"math"
1313
"os"
1414
"path/filepath"
15+
"slices"
1516
"sync"
1617
"sync/atomic"
1718

@@ -176,6 +177,7 @@ type Database struct {
176177
log logging.Logger
177178
closed bool
178179
fileCache *lru.Cache[int, *os.File]
180+
entryCache *lru.Cache[BlockHeight, BlockData]
179181
compressor compression.Compressor
180182

181183
// closeMu prevents the database from being closed while in use and prevents
@@ -223,6 +225,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) {
223225
f.Close()
224226
}
225227
}),
228+
entryCache: lru.NewCache[BlockHeight, BlockData](config.EntryCacheSize),
226229
compressor: compressor,
227230
}
228231

@@ -231,6 +234,7 @@ func New(config DatabaseConfig, log logging.Logger) (*Database, error) {
231234
zap.String("dataDir", config.DataDir),
232235
zap.Uint64("maxDataFileSize", config.MaxDataFileSize),
233236
zap.Int("maxDataFiles", config.MaxDataFiles),
237+
zap.Int("entryCacheSize", config.EntryCacheSize),
234238
)
235239

236240
if err := s.openAndInitializeIndex(); err != nil {
@@ -275,6 +279,7 @@ func (s *Database) Close() error {
275279
}
276280

277281
s.closeFiles()
282+
s.entryCache.Flush()
278283

279284
s.log.Info("Block database closed successfully")
280285
return err
@@ -371,6 +376,7 @@ func (s *Database) Put(height BlockHeight, block BlockData) error {
371376
)
372377
return err
373378
}
379+
s.entryCache.Put(height, slices.Clone(block))
374380

375381
s.log.Debug("Block written successfully",
376382
zap.Uint64("height", height),
@@ -385,12 +391,6 @@ func (s *Database) Put(height BlockHeight, block BlockData) error {
385391
// It returns database.ErrNotFound if the block does not exist.
386392
func (s *Database) readBlockIndex(height BlockHeight) (indexEntry, error) {
387393
var entry indexEntry
388-
if s.closed {
389-
s.log.Error("Failed to read block index: database is closed",
390-
zap.Uint64("height", height),
391-
)
392-
return entry, database.ErrClosed
393-
}
394394

395395
// Skip the index entry read if we know the block is past the max height.
396396
maxHeight := s.maxBlockHeight.Load()
@@ -436,6 +436,15 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) {
436436
s.closeMu.RLock()
437437
defer s.closeMu.RUnlock()
438438

439+
if s.closed {
440+
s.log.Error("Failed Get: database closed", zap.Uint64("height", height))
441+
return nil, database.ErrClosed
442+
}
443+
444+
if c, ok := s.entryCache.Get(height); ok {
445+
return slices.Clone(c), nil
446+
}
447+
439448
indexEntry, err := s.readBlockIndex(height)
440449
if err != nil {
441450
return nil, err
@@ -486,6 +495,7 @@ func (s *Database) Get(height BlockHeight) (BlockData, error) {
486495
return nil, fmt.Errorf("checksum mismatch: calculated %d, stored %d", calculatedChecksum, bh.Checksum)
487496
}
488497

498+
s.entryCache.Put(height, slices.Clone(decompressed))
489499
return decompressed, nil
490500
}
491501

@@ -494,6 +504,14 @@ func (s *Database) Has(height BlockHeight) (bool, error) {
494504
s.closeMu.RLock()
495505
defer s.closeMu.RUnlock()
496506

507+
if s.closed {
508+
s.log.Error("Failed Has: database closed", zap.Uint64("height", height))
509+
return false, database.ErrClosed
510+
}
511+
512+
if _, ok := s.entryCache.Get(height); ok {
513+
return true, nil
514+
}
497515
_, err := s.readBlockIndex(height)
498516
if err != nil {
499517
if errors.Is(err, database.ErrNotFound) || errors.Is(err, ErrInvalidBlockHeight) {

x/blockdb/entry_cache_test.go

Lines changed: 107 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,107 @@
1+
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
2+
// See the file LICENSE for licensing terms.
3+
4+
package blockdb
5+
6+
import (
7+
"slices"
8+
"testing"
9+
10+
"github.com/stretchr/testify/require"
11+
)
12+
13+
func TestCacheOnMiss(t *testing.T) {
14+
db, _ := newTestDatabase(t, DefaultConfig())
15+
height := uint64(20)
16+
block := randomBlock(t)
17+
require.NoError(t, db.Put(height, block))
18+
19+
// Evict the entry from cache to simulate a cache miss
20+
db.entryCache.Evict(height)
21+
22+
// Read the block - should populate the cache on cache miss
23+
_, err := db.Get(height)
24+
require.NoError(t, err)
25+
26+
_, ok := db.entryCache.Get(height)
27+
require.True(t, ok)
28+
}
29+
30+
func TestCacheHas(t *testing.T) {
31+
db, _ := newTestDatabase(t, DefaultConfig())
32+
height := uint64(30)
33+
block := randomBlock(t)
34+
require.NoError(t, db.Put(height, block))
35+
36+
has, err := db.Has(height)
37+
require.NoError(t, err)
38+
require.True(t, has)
39+
40+
// Verify block is in cache
41+
cached, ok := db.entryCache.Get(height)
42+
require.True(t, ok)
43+
require.Equal(t, block, cached)
44+
}
45+
46+
func TestCachePutStoresClone(t *testing.T) {
47+
db, _ := newTestDatabase(t, DefaultConfig())
48+
height := uint64(40)
49+
block := randomBlock(t)
50+
clone := slices.Clone(block)
51+
require.NoError(t, db.Put(height, clone))
52+
53+
// Modify the original block after Put
54+
clone[0] = 99
55+
56+
// Cache should have the original unmodified data
57+
cached, ok := db.entryCache.Get(height)
58+
require.True(t, ok)
59+
require.Equal(t, block, cached)
60+
}
61+
62+
func TestCacheGetReturnsClone(t *testing.T) {
63+
db, _ := newTestDatabase(t, DefaultConfig())
64+
height := uint64(50)
65+
block := randomBlock(t)
66+
require.NoError(t, db.Put(height, block))
67+
68+
// Get the block and modify the returned data
69+
data, err := db.Get(height)
70+
require.NoError(t, err)
71+
data[0] = 99
72+
73+
// Cache should still have the original unmodified data
74+
cached, ok := db.entryCache.Get(height)
75+
require.True(t, ok)
76+
require.Equal(t, block, cached)
77+
78+
// Second Get should also return original data
79+
data, err = db.Get(height)
80+
require.NoError(t, err)
81+
require.Equal(t, block, data)
82+
}
83+
84+
func TestCachePutOverridesSameHeight(t *testing.T) {
85+
db, _ := newTestDatabase(t, DefaultConfig())
86+
height := uint64(60)
87+
b1 := randomBlock(t)
88+
require.NoError(t, db.Put(height, b1))
89+
90+
// Verify first block is in cache
91+
cached, ok := db.entryCache.Get(height)
92+
require.True(t, ok)
93+
require.Equal(t, b1, cached)
94+
95+
// Put second block at same height and verify it overrides the first one
96+
b2 := randomBlock(t)
97+
require.NoError(t, db.Put(height, b2))
98+
cached, ok = db.entryCache.Get(height)
99+
require.True(t, ok)
100+
require.Equal(t, b2, cached)
101+
require.NotEqual(t, b1, cached)
102+
103+
// Get should also return the new block
104+
data, err := db.Get(height)
105+
require.NoError(t, err)
106+
require.Equal(t, b2, data)
107+
}

x/blockdb/readblock_test.go

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,7 @@ func TestReadOperations(t *testing.T) {
5151
MaxDataFileSize: DefaultMaxDataFileSize,
5252
CheckpointInterval: 1024,
5353
MaxDataFiles: DefaultMaxDataFileSize,
54+
EntryCacheSize: DefaultEntryCacheSize,
5455
},
5556
},
5657
{
@@ -69,6 +70,7 @@ func TestReadOperations(t *testing.T) {
6970
MaxDataFileSize: DefaultMaxDataFileSize,
7071
CheckpointInterval: 1024,
7172
MaxDataFiles: DefaultMaxDataFileSize,
73+
EntryCacheSize: DefaultEntryCacheSize,
7274
},
7375
wantErr: ErrInvalidBlockHeight,
7476
},

x/blockdb/writeblock_test.go

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
package blockdb
55

66
import (
7+
"bytes"
78
"math"
89
"os"
910
"strings"
@@ -38,7 +39,7 @@ func TestPutGet(t *testing.T) {
3839
{
3940
name: "nil block",
4041
block: nil,
41-
want: []byte{},
42+
want: nil,
4243
},
4344
}
4445
for _, tt := range tests {
@@ -49,7 +50,7 @@ func TestPutGet(t *testing.T) {
4950

5051
got, err := db.Get(0)
5152
require.NoError(t, err)
52-
require.Equal(t, tt.want, got)
53+
require.True(t, bytes.Equal(tt.want, got))
5354
})
5455
}
5556
}

0 commit comments

Comments
 (0)