@@ -38,6 +38,7 @@ import (
38
38
"github.com/ethereum/go-ethereum/ethdb"
39
39
"github.com/ethereum/go-ethereum/event"
40
40
"github.com/ethereum/go-ethereum/log"
41
+ "github.com/ethereum/go-ethereum/metrics"
41
42
"github.com/ethereum/go-ethereum/p2p/msgrate"
42
43
"github.com/ethereum/go-ethereum/rlp"
43
44
"github.com/ethereum/go-ethereum/trie"
@@ -106,6 +107,9 @@ var (
106
107
// storageConcurrency is the number of chunks to split a large contract
107
108
// storage trie into to allow concurrent retrievals.
108
109
storageConcurrency = 16
110
+
111
+ //
112
+ trieRebuildTimeGauge = metrics .NewRegisteredGauge ("snap/sync/rebuild" , nil )
109
113
)
110
114
111
115
// ErrCancelled is returned from snap syncing if the operation was prematurely
@@ -502,8 +506,9 @@ type Syncer struct {
502
506
storageHealed uint64 // Number of storage slots downloaded during the healing stage
503
507
storageHealedBytes common.StorageSize // Number of raw storage bytes persisted to disk during the healing stage
504
508
505
- startTime time.Time // Time instance when snapshot sync started
506
- logTime time.Time // Time instance when status was last reported
509
+ startTime time.Time // Time instance when snapshot sync started
510
+ logTime time.Time // Time instance when status was last reported
511
+ trieRebuildTime time.Duration // Total duration it took to rebuild trie intermediate nodes
507
512
508
513
pend sync.WaitGroup // Tracks network request goroutines for graceful shutdown
509
514
lock sync.RWMutex // Protects fields that can change outside of sync (peers, reqs, root)
@@ -2202,28 +2207,37 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
2202
2207
// Keep the left boundary as it's complete
2203
2208
tr = newPathTrie (account , false , s .db , batch )
2204
2209
}
2210
+
2211
+ start := time .Now ()
2205
2212
for j := 0 ; j < len (res .hashes [i ]); j ++ {
2206
2213
tr .update (res.hashes [i ][j ][:], res.slots [i ][j ])
2207
2214
}
2208
2215
tr .commit (true )
2216
+ s .trieRebuildTime += time .Since (start )
2209
2217
}
2210
2218
// Persist the received storage segments. These flat state maybe
2211
2219
// outdated during the sync, but it can be fixed later during the
2212
2220
// snapshot generation.
2213
2221
for j := 0 ; j < len (res .hashes [i ]); j ++ {
2214
2222
rawdb .WriteStorageSnapshot (batch , account , res.hashes [i ][j ], res.slots [i ][j ])
2223
+ }
2215
2224
2225
+ start := time .Now ()
2226
+ for j := 0 ; j < len (res .hashes [i ]); j ++ {
2216
2227
// If we're storing large contracts, generate the trie nodes
2217
2228
// on the fly to not trash the gluing points
2218
2229
if i == len (res .hashes )- 1 && res .subTask != nil {
2219
2230
res .subTask .genTrie .update (res.hashes [i ][j ][:], res.slots [i ][j ])
2220
2231
}
2221
2232
}
2233
+ s .trieRebuildTime += time .Since (start )
2222
2234
}
2223
2235
// Large contracts could have generated new trie nodes, flush them to disk
2224
2236
if res .subTask != nil {
2225
2237
if res .subTask .done {
2238
+ start := time .Now ()
2226
2239
root := res .subTask .genTrie .commit (res .subTask .Last == common .MaxHash )
2240
+ s .trieRebuildTime += time .Since (start )
2227
2241
if err := res .subTask .genBatch .Write (); err != nil {
2228
2242
log .Error ("Failed to persist stack slots" , "err" , err )
2229
2243
}
@@ -2241,7 +2255,9 @@ func (s *Syncer) processStorageResponse(res *storageResponse) {
2241
2255
}
2242
2256
}
2243
2257
} else if res .subTask .genBatch .ValueSize () > batchSizeThreshold {
2258
+ start := time .Now ()
2244
2259
res .subTask .genTrie .commit (false )
2260
+ s .trieRebuildTime += time .Since (start )
2245
2261
if err := res .subTask .genBatch .Write (); err != nil {
2246
2262
log .Error ("Failed to persist stack slots" , "err" , err )
2247
2263
}
@@ -2417,6 +2433,7 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
2417
2433
slim := types .SlimAccountRLP (* res .accounts [i ])
2418
2434
rawdb .WriteAccountSnapshot (batch , hash , slim )
2419
2435
2436
+ start := time .Now ()
2420
2437
if ! task .needHeal [i ] {
2421
2438
// If the storage task is complete, drop it into the stack trie
2422
2439
// to generate account trie nodes for it
@@ -2433,6 +2450,7 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
2433
2450
panic (err ) // Really shouldn't ever happen
2434
2451
}
2435
2452
}
2453
+ s .trieRebuildTime += time .Since (start )
2436
2454
}
2437
2455
// Flush anything written just now and update the stats
2438
2456
if err := batch .Write (); err != nil {
@@ -2464,18 +2482,23 @@ func (s *Syncer) forwardAccountTask(task *accountTask) {
2464
2482
// flush after finalizing task.done. It's fine even if we crash and lose this
2465
2483
// write as it will only cause more data to be downloaded during heal.
2466
2484
if task .done {
2485
+ start := time .Now ()
2467
2486
task .genTrie .commit (task .Last == common .MaxHash )
2487
+ s .trieRebuildTime += time .Since (start )
2468
2488
if err := task .genBatch .Write (); err != nil {
2469
2489
log .Error ("Failed to persist stack account" , "err" , err )
2470
2490
}
2471
2491
task .genBatch .Reset ()
2472
2492
} else if task .genBatch .ValueSize () > batchSizeThreshold {
2493
+ start := time .Now ()
2473
2494
task .genTrie .commit (false )
2495
+ s .trieRebuildTime += time .Since (start )
2474
2496
if err := task .genBatch .Write (); err != nil {
2475
2497
log .Error ("Failed to persist stack account" , "err" , err )
2476
2498
}
2477
2499
task .genBatch .Reset ()
2478
2500
}
2501
+ trieRebuildTimeGauge .Update (s .trieRebuildTime .Microseconds ())
2479
2502
log .Debug ("Persisted range of accounts" , "accounts" , len (res .accounts ), "bytes" , s .accountBytes - oldAccountBytes )
2480
2503
}
2481
2504
0 commit comments