Skip to content

Commit cd23453

Browse files
committed
eth/snap: track time spent on trie rebuilding
1 parent f96f82b commit cd23453

File tree

1 file changed

+25
-2
lines changed

1 file changed

+25
-2
lines changed

eth/protocols/snap/sync.go

Lines changed: 25 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -38,6 +38,7 @@ import (
3838
"github.com/ethereum/go-ethereum/ethdb"
3939
"github.com/ethereum/go-ethereum/event"
4040
"github.com/ethereum/go-ethereum/log"
41+
"github.com/ethereum/go-ethereum/metrics"
4142
"github.com/ethereum/go-ethereum/p2p/msgrate"
4243
"github.com/ethereum/go-ethereum/rlp"
4344
"github.com/ethereum/go-ethereum/trie"
@@ -106,6 +107,9 @@ var (
106107
// storageConcurrency is the number of chunks to split a large contract
107108
// storage trie into to allow concurrent retrievals.
108109
storageConcurrency = 16
110+
111+
//
112+
trieRebuildTimeGauge = metrics.NewRegisteredGauge("snap/sync/rebuild", nil)
109113
)
110114

111115
// ErrCancelled is returned from snap syncing if the operation was prematurely
@@ -502,8 +506,9 @@ type Syncer struct {
502506
storageHealed uint64 // Number of storage slots downloaded during the healing stage
503507
storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
504508

505-
startTime time.Time // Time instance when snapshot sync started
506-
logTime time.Time // Time instance when status was last reported
509+
startTime time.Time // Time instance when snapshot sync started
510+
logTime time.Time // Time instance when status was last reported
511+
trieRebuildTime time.Duration // Total duration it took to rebuild trie intermediate nodes
507512

508513
pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
509514
lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root)
@@ -2202,28 +2207,37 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
22022207
// Keep the left boundary as it's complete
22032208
tr = newPathTrie(account, false, s.db, batch)
22042209
}
2210+
2211+
start := time.Now()
22052212
for j := 0; j < len(res.hashes[i]); j++ {
22062213
tr.update(res.hashes[i][j][:], res.slots[i][j])
22072214
}
22082215
tr.commit(true)
2216+
s.trieRebuildTime += time.Since(start)
22092217
}
22102218
// Persist the received storage segments. These flat state maybe
22112219
// outdated during the sync, but it can be fixed later during the
22122220
// snapshot generation.
22132221
for j := 0; j < len(res.hashes[i]); j++ {
22142222
rawdb.WriteStorageSnapshot(batch, account, res.hashes[i][j], res.slots[i][j])
2223+
}
22152224

2225+
start := time.Now()
2226+
for j := 0; j < len(res.hashes[i]); j++ {
22162227
// If we're storing large contracts, generate the trie nodes
22172228
// on the fly to not trash the gluing points
22182229
if i == len(res.hashes)-1 && res.subTask != nil {
22192230
res.subTask.genTrie.update(res.hashes[i][j][:], res.slots[i][j])
22202231
}
22212232
}
2233+
s.trieRebuildTime += time.Since(start)
22222234
}
22232235
// Large contracts could have generated new trie nodes, flush them to disk
22242236
if res.subTask != nil {
22252237
if res.subTask.done {
2238+
start := time.Now()
22262239
root := res.subTask.genTrie.commit(res.subTask.Last == common.MaxHash)
2240+
s.trieRebuildTime += time.Since(start)
22272241
if err := res.subTask.genBatch.Write(); err != nil {
22282242
log.Error("Failed to persist stack slots", "err", err)
22292243
}
@@ -2241,7 +2255,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
22412255
}
22422256
}
22432257
} else if res.subTask.genBatch.ValueSize() > batchSizeThreshold {
2258+
start := time.Now()
22442259
res.subTask.genTrie.commit(false)
2260+
s.trieRebuildTime += time.Since(start)
22452261
if err := res.subTask.genBatch.Write(); err != nil {
22462262
log.Error("Failed to persist stack slots", "err", err)
22472263
}
@@ -2417,6 +2433,7 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
24172433
slim := types.SlimAccountRLP(*res.accounts[i])
24182434
rawdb.WriteAccountSnapshot(batch, hash, slim)
24192435

2436+
start := time.Now()
24202437
if !task.needHeal[i] {
24212438
// If the storage task is complete, drop it into the stack trie
24222439
// to generate account trie nodes for it
@@ -2433,6 +2450,7 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
24332450
panic(err) // Really shouldn't ever happen
24342451
}
24352452
}
2453+
s.trieRebuildTime += time.Since(start)
24362454
}
24372455
// Flush anything written just now and update the stats
24382456
if err := batch.Write(); err != nil {
@@ -2464,18 +2482,23 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
24642482
// flush after finalizing task.done. It's fine even if we crash and lose this
24652483
// write as it will only cause more data to be downloaded during heal.
24662484
if task.done {
2485+
start := time.Now()
24672486
task.genTrie.commit(task.Last == common.MaxHash)
2487+
s.trieRebuildTime += time.Since(start)
24682488
if err := task.genBatch.Write(); err != nil {
24692489
log.Error("Failed to persist stack account", "err", err)
24702490
}
24712491
task.genBatch.Reset()
24722492
} else if task.genBatch.ValueSize() > batchSizeThreshold {
2493+
start := time.Now()
24732494
task.genTrie.commit(false)
2495+
s.trieRebuildTime += time.Since(start)
24742496
if err := task.genBatch.Write(); err != nil {
24752497
log.Error("Failed to persist stack account", "err", err)
24762498
}
24772499
task.genBatch.Reset()
24782500
}
2501+
trieRebuildTimeGauge.Update(s.trieRebuildTime.Microseconds())
24792502
log.Debug("Persisted range of accounts", "accounts", len(res.accounts), "bytes", s.accountBytes-oldAccountBytes)
24802503
}
24812504

0 commit comments

Comments
 (0)